bcmgenet.c revision 1.4 1 /* $NetBSD: bcmgenet.c,v 1.4 2020/03/29 13:04:15 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2020 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * Broadcom GENETv5
31 */
32
33 #include "opt_net_mpsafe.h"
34 #include "opt_ddb.h"
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: bcmgenet.c,v 1.4 2020/03/29 13:04:15 jmcneill Exp $");
38
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/device.h>
42 #include <sys/intr.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/mutex.h>
46 #include <sys/callout.h>
47 #include <sys/cprng.h>
48
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_ether.h>
52 #include <net/if_media.h>
53 #include <net/bpf.h>
54
55 #include <dev/mii/miivar.h>
56
57 #include <dev/ic/bcmgenetreg.h>
58 #include <dev/ic/bcmgenetvar.h>
59
60 CTASSERT(MCLBYTES == 2048);
61
62 #ifdef GENET_DEBUG
63 #define DPRINTF(...) printf(##__VA_ARGS__)
64 #else
65 #define DPRINTF(...) ((void)0)
66 #endif
67
68 #ifdef NET_MPSAFE
69 #define GENET_MPSAFE 1
70 #define CALLOUT_FLAGS CALLOUT_MPSAFE
71 #else
72 #define CALLOUT_FLAGS 0
73 #endif
74
75 #define TX_SKIP(n, o) (((n) + (o)) & (GENET_DMA_DESC_COUNT - 1))
76 #define TX_NEXT(n) TX_SKIP(n, 1)
77 #define RX_NEXT(n) (((n) + 1) & (GENET_DMA_DESC_COUNT - 1))
78
79 #define TX_MAX_SEGS 128
80 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT
81 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT
82 #define MII_BUSY_RETRY 1000
83 #define GENET_MAX_MDF_FILTER 17
84
85 #define GENET_LOCK(sc) mutex_enter(&(sc)->sc_lock)
86 #define GENET_UNLOCK(sc) mutex_exit(&(sc)->sc_lock)
87 #define GENET_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock))
88
89 #define RD4(sc, reg) \
90 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
91 #define WR4(sc, reg, val) \
92 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
93
94 static int
95 genet_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
96 {
97 struct genet_softc *sc = device_private(dev);
98 int retry;
99
100 WR4(sc, GENET_MDIO_CMD,
101 GENET_MDIO_READ | GENET_MDIO_START_BUSY |
102 __SHIFTIN(phy, GENET_MDIO_PMD) |
103 __SHIFTIN(reg, GENET_MDIO_REG));
104 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
105 if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0) {
106 *val = RD4(sc, GENET_MDIO_CMD) & 0xffff;
107 break;
108 }
109 delay(10);
110 }
111
112
113 if (retry == 0) {
114 device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
115 phy, reg);
116 return ETIMEDOUT;
117 }
118
119 return 0;
120 }
121
122 static int
123 genet_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
124 {
125 struct genet_softc *sc = device_private(dev);
126 int retry;
127
128 WR4(sc, GENET_MDIO_CMD,
129 val | GENET_MDIO_WRITE | GENET_MDIO_START_BUSY |
130 __SHIFTIN(phy, GENET_MDIO_PMD) |
131 __SHIFTIN(reg, GENET_MDIO_REG));
132 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
133 if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0)
134 break;
135 delay(10);
136 }
137
138 if (retry == 0) {
139 device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
140 phy, reg);
141 return ETIMEDOUT;
142 }
143
144 return 0;
145 }
146
147 static void
148 genet_update_link(struct genet_softc *sc)
149 {
150 struct mii_data *mii = &sc->sc_mii;
151 uint32_t val;
152 u_int speed;
153
154 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
155 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
156 speed = GENET_UMAC_CMD_SPEED_1000;
157 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
158 speed = GENET_UMAC_CMD_SPEED_100;
159 else
160 speed = GENET_UMAC_CMD_SPEED_10;
161
162 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
163 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
164 val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
165 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
166 if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII)
167 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
168 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
169
170 val = RD4(sc, GENET_UMAC_CMD);
171 val &= ~GENET_UMAC_CMD_SPEED;
172 val |= __SHIFTIN(speed, GENET_UMAC_CMD_SPEED);
173 WR4(sc, GENET_UMAC_CMD, val);
174 }
175
176 static void
177 genet_mii_statchg(struct ifnet *ifp)
178 {
179 struct genet_softc * const sc = ifp->if_softc;
180
181 genet_update_link(sc);
182 }
183
184 static void
185 genet_setup_txdesc(struct genet_softc *sc, int index, int flags,
186 bus_addr_t paddr, u_int len)
187 {
188 uint32_t status;
189
190 status = flags | __SHIFTIN(len, GENET_TX_DESC_STATUS_BUFLEN);
191 ++sc->sc_tx.queued;
192
193 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
194 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
195 WR4(sc, GENET_TX_DESC_STATUS(index), status);
196 }
197
198 static int
199 genet_setup_txbuf(struct genet_softc *sc, int index, struct mbuf *m)
200 {
201 bus_dma_segment_t *segs;
202 int error, nsegs, cur, i;
203 uint32_t flags;
204
205 error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag,
206 sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
207 if (error == EFBIG) {
208 device_printf(sc->sc_dev,
209 "TX packet needs too many DMA segments, dropping...\n");
210 m_freem(m);
211 return 0;
212 }
213 if (error != 0)
214 return 0;
215
216 segs = sc->sc_tx.buf_map[index].map->dm_segs;
217 nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs;
218
219 if (sc->sc_tx.queued >= GENET_DMA_DESC_COUNT - nsegs) {
220 bus_dmamap_unload(sc->sc_tx.buf_tag,
221 sc->sc_tx.buf_map[index].map);
222 return -1;
223 }
224
225 flags = GENET_TX_DESC_STATUS_SOP |
226 GENET_TX_DESC_STATUS_CRC |
227 GENET_TX_DESC_STATUS_QTAG;
228
229 for (cur = index, i = 0; i < nsegs; i++) {
230 sc->sc_tx.buf_map[cur].mbuf = (i == 0 ? m : NULL);
231 if (i == nsegs - 1)
232 flags |= GENET_TX_DESC_STATUS_EOP;
233
234 genet_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
235 segs[i].ds_len);
236
237 if (i == 0) {
238 flags &= ~GENET_TX_DESC_STATUS_SOP;
239 flags &= ~GENET_TX_DESC_STATUS_CRC;
240 }
241 cur = TX_NEXT(cur);
242 }
243
244 bus_dmamap_sync(sc->sc_tx.buf_tag, sc->sc_tx.buf_map[index].map,
245 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE);
246
247 return nsegs;
248 }
249
250 static void
251 genet_setup_rxdesc(struct genet_softc *sc, int index,
252 bus_addr_t paddr, bus_size_t len)
253 {
254 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
255 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
256 }
257
258 static int
259 genet_setup_rxbuf(struct genet_softc *sc, int index, struct mbuf *m)
260 {
261 int error;
262
263 error = bus_dmamap_load_mbuf(sc->sc_rx.buf_tag,
264 sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT);
265 if (error != 0)
266 return error;
267
268 bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
269 0, sc->sc_rx.buf_map[index].map->dm_mapsize,
270 BUS_DMASYNC_PREREAD);
271
272 sc->sc_rx.buf_map[index].mbuf = m;
273 genet_setup_rxdesc(sc, index,
274 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr,
275 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_len);
276
277 return 0;
278 }
279
280 static struct mbuf *
281 genet_alloc_mbufcl(struct genet_softc *sc)
282 {
283 struct mbuf *m;
284
285 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
286 if (m != NULL)
287 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
288
289 return m;
290 }
291
292 static void
293 genet_enable_intr(struct genet_softc *sc)
294 {
295 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
296 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
297 }
298
299 static void
300 genet_disable_intr(struct genet_softc *sc)
301 {
302 /* Disable interrupts */
303 WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
304 WR4(sc, GENET_INTRL2_CPU_CLEAR, 0xffffffff);
305 }
306
307 static void
308 genet_tick(void *softc)
309 {
310 struct genet_softc *sc = softc;
311 struct mii_data *mii = &sc->sc_mii;
312 #ifndef GENET_MPSAFE
313 int s = splnet();
314 #endif
315
316 GENET_LOCK(sc);
317 mii_tick(mii);
318 callout_schedule(&sc->sc_stat_ch, hz);
319 GENET_UNLOCK(sc);
320
321 #ifndef GENET_MPSAFE
322 splx(s);
323 #endif
324 }
325
326 static void
327 genet_setup_rxfilter_mdf(struct genet_softc *sc, u_int n, const uint8_t *ea)
328 {
329 uint32_t addr0 = (ea[0] << 8) | ea[1];
330 uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
331
332 WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
333 WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
334 }
335
336 static void
337 genet_setup_rxfilter(struct genet_softc *sc)
338 {
339 struct ethercom *ec = &sc->sc_ec;
340 struct ifnet *ifp = &ec->ec_if;
341 struct ether_multistep step;
342 struct ether_multi *enm;
343 uint32_t cmd, mdf_ctrl;
344 u_int n;
345
346 GENET_ASSERT_LOCKED(sc);
347
348 ETHER_LOCK(ec);
349
350 cmd = RD4(sc, GENET_UMAC_CMD);
351
352 /*
353 * Count the required number of hardware filters. We need one
354 * for each multicast address, plus one for our own address and
355 * the broadcast address.
356 */
357 ETHER_FIRST_MULTI(step, ec, enm);
358 for (n = 2; enm != NULL; n++)
359 ETHER_NEXT_MULTI(step, enm);
360
361 if (n > GENET_MAX_MDF_FILTER)
362 ifp->if_flags |= IFF_ALLMULTI;
363 else
364 ifp->if_flags &= ~IFF_ALLMULTI;
365
366 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
367 cmd |= GENET_UMAC_CMD_PROMISC;
368 mdf_ctrl = 0;
369 } else {
370 cmd &= ~GENET_UMAC_CMD_PROMISC;
371 genet_setup_rxfilter_mdf(sc, 0, ifp->if_broadcastaddr);
372 genet_setup_rxfilter_mdf(sc, 1, CLLADDR(ifp->if_sadl));
373 ETHER_FIRST_MULTI(step, ec, enm);
374 for (n = 2; enm != NULL; n++) {
375 genet_setup_rxfilter_mdf(sc, n, enm->enm_addrlo);
376 ETHER_NEXT_MULTI(step, enm);
377 }
378 mdf_ctrl = __BITS(GENET_MAX_MDF_FILTER - 1,
379 GENET_MAX_MDF_FILTER - n);
380 }
381
382 WR4(sc, GENET_UMAC_CMD, cmd);
383 WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
384
385 ETHER_UNLOCK(ec);
386 }
387
388 static int
389 genet_reset(struct genet_softc *sc)
390 {
391 uint32_t val;
392
393 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
394 val |= GENET_SYS_RBUF_FLUSH_RESET;
395 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
396 delay(10);
397
398 val &= ~GENET_SYS_RBUF_FLUSH_RESET;
399 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
400 delay(10);
401
402 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
403 delay(10);
404
405 WR4(sc, GENET_UMAC_CMD, 0);
406 WR4(sc, GENET_UMAC_CMD,
407 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
408 delay(10);
409 WR4(sc, GENET_UMAC_CMD, 0);
410
411 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
412 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
413 WR4(sc, GENET_UMAC_MIB_CTRL, 0);
414
415 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
416
417 val = RD4(sc, GENET_RBUF_CTRL);
418 val |= GENET_RBUF_ALIGN_2B;
419 WR4(sc, GENET_RBUF_CTRL, val);
420
421 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
422
423 return 0;
424 }
425
426 static void
427 genet_init_rings(struct genet_softc *sc, int qid)
428 {
429 uint32_t val;
430
431 /* TX ring */
432
433 sc->sc_tx.queued = 0;
434 sc->sc_tx.cidx = sc->sc_tx.pidx = 0;
435
436 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
437
438 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
439 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
440 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
441 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
442 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
443 __SHIFTIN(TX_DESC_COUNT, GENET_TX_DMA_RING_BUF_SIZE_DESC_COUNT) |
444 __SHIFTIN(MCLBYTES, GENET_TX_DMA_RING_BUF_SIZE_BUF_LENGTH));
445 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
446 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
447 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
448 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
449 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
450 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
451 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
452 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
453 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
454
455 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */
456
457 /* Enable transmit DMA */
458 val = RD4(sc, GENET_TX_DMA_CTRL);
459 val |= GENET_TX_DMA_CTRL_EN;
460 val |= GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
461 WR4(sc, GENET_TX_DMA_CTRL, val);
462
463 /* RX ring */
464
465 sc->sc_rx.cidx = sc->sc_rx.pidx = 0;
466
467 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
468
469 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
470 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
471 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
472 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
473 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
474 __SHIFTIN(RX_DESC_COUNT, GENET_RX_DMA_RING_BUF_SIZE_DESC_COUNT) |
475 __SHIFTIN(MCLBYTES, GENET_RX_DMA_RING_BUF_SIZE_BUF_LENGTH));
476 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
477 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
478 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
479 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
480 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
481 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
482 __SHIFTIN(5, GENET_RX_DMA_XON_XOFF_THRES_LO) |
483 __SHIFTIN(RX_DESC_COUNT >> 4, GENET_RX_DMA_XON_XOFF_THRES_HI));
484 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
485 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
486
487 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */
488
489 /* Enable receive DMA */
490 val = RD4(sc, GENET_RX_DMA_CTRL);
491 val |= GENET_RX_DMA_CTRL_EN;
492 val |= GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
493 WR4(sc, GENET_RX_DMA_CTRL, val);
494 }
495
496 static int
497 genet_init_locked(struct genet_softc *sc)
498 {
499 struct ifnet *ifp = &sc->sc_ec.ec_if;
500 struct mii_data *mii = &sc->sc_mii;
501 uint32_t val;
502 const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
503
504 GENET_ASSERT_LOCKED(sc);
505
506 if ((ifp->if_flags & IFF_RUNNING) != 0)
507 return 0;
508
509 if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII ||
510 sc->sc_phy_mode == GENET_PHY_MODE_RGMII_RXID)
511 WR4(sc, GENET_SYS_PORT_CTRL,
512 GENET_SYS_PORT_MODE_EXT_GPHY);
513
514 /* Write hardware address */
515 val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
516 (enaddr[0] << 24);
517 WR4(sc, GENET_UMAC_MAC0, val);
518 val = enaddr[5] | (enaddr[4] << 8);
519 WR4(sc, GENET_UMAC_MAC1, val);
520
521 /* Setup RX filter */
522 genet_setup_rxfilter(sc);
523
524 /* Setup TX/RX rings */
525 genet_init_rings(sc, GENET_DMA_DEFAULT_QUEUE);
526
527 /* Enable transmitter and receiver */
528 val = RD4(sc, GENET_UMAC_CMD);
529 val |= GENET_UMAC_CMD_TXEN;
530 val |= GENET_UMAC_CMD_RXEN;
531 WR4(sc, GENET_UMAC_CMD, val);
532
533 /* Enable interrupts */
534 genet_enable_intr(sc);
535
536 ifp->if_flags |= IFF_RUNNING;
537 ifp->if_flags &= ~IFF_OACTIVE;
538
539 mii_mediachg(mii);
540 callout_schedule(&sc->sc_stat_ch, hz);
541
542 return 0;
543 }
544
545 static int
546 genet_init(struct ifnet *ifp)
547 {
548 struct genet_softc *sc = ifp->if_softc;
549 int error;
550
551 GENET_LOCK(sc);
552 error = genet_init_locked(sc);
553 GENET_UNLOCK(sc);
554
555 return error;
556 }
557
558 static void
559 genet_stop_locked(struct genet_softc *sc, int disable)
560 {
561 struct ifnet *ifp = &sc->sc_ec.ec_if;
562 uint32_t val;
563
564 GENET_ASSERT_LOCKED(sc);
565
566 callout_stop(&sc->sc_stat_ch);
567
568 mii_down(&sc->sc_mii);
569
570 /* Disable receiver */
571 val = RD4(sc, GENET_UMAC_CMD);
572 val &= ~GENET_UMAC_CMD_RXEN;
573 WR4(sc, GENET_UMAC_CMD, val);
574
575 /* Stop receive DMA */
576 val = RD4(sc, GENET_RX_DMA_CTRL);
577 val &= ~GENET_RX_DMA_CTRL_EN;
578 WR4(sc, GENET_RX_DMA_CTRL, val);
579
580 /* Stop transmit DMA */
581 val = RD4(sc, GENET_TX_DMA_CTRL);
582 val &= ~GENET_TX_DMA_CTRL_EN;
583 WR4(sc, GENET_TX_DMA_CTRL, val);
584
585 /* Flush data in the TX FIFO */
586 WR4(sc, GENET_UMAC_TX_FLUSH, 1);
587 delay(10);
588 WR4(sc, GENET_UMAC_TX_FLUSH, 0);
589
590 /* Disable transmitter */
591 val = RD4(sc, GENET_UMAC_CMD);
592 val &= ~GENET_UMAC_CMD_TXEN;
593 WR4(sc, GENET_UMAC_CMD, val);
594
595 /* Disable interrupts */
596 genet_disable_intr(sc);
597
598 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
599 }
600
601 static void
602 genet_stop(struct ifnet *ifp, int disable)
603 {
604 struct genet_softc * const sc = ifp->if_softc;
605
606 GENET_LOCK(sc);
607 genet_stop_locked(sc, disable);
608 GENET_UNLOCK(sc);
609 }
610
611 static void
612 genet_rxintr(struct genet_softc *sc, int qid)
613 {
614 struct ifnet *ifp = &sc->sc_ec.ec_if;
615 int error, index, len, n;
616 struct mbuf *m, *m0;
617 uint32_t status, pidx, total;
618
619 pidx = RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)) & 0xffff;
620 total = (pidx - sc->sc_rx.cidx) & 0xffff;
621
622 DPRINTF("RX pidx=%08x total=%d\n", pidx, total);
623
624 index = sc->sc_rx.cidx & (RX_DESC_COUNT - 1);
625 for (n = 0; n < total; n++) {
626 status = RD4(sc, GENET_RX_DESC_STATUS(index));
627 len = __SHIFTOUT(status, GENET_RX_DESC_STATUS_BUFLEN);
628
629 m = sc->sc_rx.buf_map[index].mbuf;
630
631 if ((m0 = genet_alloc_mbufcl(sc)) == NULL) {
632 if_statinc(ifp, if_ierrors);
633 goto next;
634 }
635 error = genet_setup_rxbuf(sc, index, m0);
636 if (error != 0) {
637 if_statinc(ifp, if_ierrors);
638 goto next;
639 }
640
641 bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
642 0, sc->sc_rx.buf_map[index].map->dm_mapsize,
643 BUS_DMASYNC_POSTREAD);
644 bus_dmamap_unload(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map);
645
646 DPRINTF("RX [#%d] index=%02x status=%08x len=%d adj_len=%d\n",
647 n, index, status, len, len - ETHER_ALIGN);
648
649 if (len > ETHER_ALIGN) {
650 m_adj(m, ETHER_ALIGN);
651
652 m_set_rcvif(m, ifp);
653 m->m_len = m->m_pkthdr.len = len - ETHER_ALIGN;
654 m->m_nextpkt = NULL;
655
656 if_percpuq_enqueue(ifp->if_percpuq, m);
657 }
658
659 next:
660 index = RX_NEXT(index);
661
662 sc->sc_rx.cidx = (sc->sc_rx.cidx + 1) & 0xffff;
663 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx);
664 }
665 }
666
667 static void
668 genet_txintr(struct genet_softc *sc, int qid)
669 {
670 struct ifnet *ifp = &sc->sc_ec.ec_if;
671 struct genet_bufmap *bmap;
672 uint32_t cidx, total;
673 int i;
674
675 GENET_ASSERT_LOCKED(sc);
676
677 cidx = RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)) & 0xffff;
678 total = (cidx - sc->sc_tx.cidx) & 0xffff;
679
680 for (i = sc->sc_tx.next; sc->sc_tx.queued > 0 && total > 0; i = TX_NEXT(i), total--) {
681 /* XXX check for errors */
682
683 bmap = &sc->sc_tx.buf_map[i];
684 if (bmap->mbuf != NULL) {
685 bus_dmamap_sync(sc->sc_tx.buf_tag, bmap->map,
686 0, bmap->map->dm_mapsize,
687 BUS_DMASYNC_POSTWRITE);
688 bus_dmamap_unload(sc->sc_tx.buf_tag, bmap->map);
689 m_freem(bmap->mbuf);
690 bmap->mbuf = NULL;
691 }
692
693 --sc->sc_tx.queued;
694 ifp->if_flags &= ~IFF_OACTIVE;
695 if_statinc(ifp, if_opackets);
696 }
697
698 sc->sc_tx.next = i;
699 sc->sc_tx.cidx = cidx;
700 }
701
702 static void
703 genet_start_locked(struct genet_softc *sc)
704 {
705 struct ifnet *ifp = &sc->sc_ec.ec_if;
706 struct mbuf *m;
707 int nsegs, index, cnt;
708
709 GENET_ASSERT_LOCKED(sc);
710
711 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
712 return;
713
714 const int qid = GENET_DMA_DEFAULT_QUEUE;
715
716 index = sc->sc_tx.pidx & (TX_DESC_COUNT - 1);
717 cnt = 0;
718
719 for (;;) {
720 IFQ_POLL(&ifp->if_snd, m);
721 if (m == NULL)
722 break;
723
724 nsegs = genet_setup_txbuf(sc, index, m);
725 if (nsegs <= 0) {
726 if (nsegs == -1)
727 ifp->if_flags |= IFF_OACTIVE;
728 break;
729 }
730 IFQ_DEQUEUE(&ifp->if_snd, m);
731 bpf_mtap(ifp, m, BPF_D_OUT);
732
733 index = TX_SKIP(index, nsegs);
734
735 sc->sc_tx.pidx = (sc->sc_tx.pidx + nsegs) & 0xffff;
736 cnt++;
737 }
738
739 if (cnt != 0)
740 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx);
741 }
742
743 static void
744 genet_start(struct ifnet *ifp)
745 {
746 struct genet_softc *sc = ifp->if_softc;
747
748 GENET_LOCK(sc);
749 genet_start_locked(sc);
750 GENET_UNLOCK(sc);
751 }
752
753 int
754 genet_intr(void *arg)
755 {
756 struct genet_softc *sc = arg;
757 struct ifnet *ifp = &sc->sc_ec.ec_if;
758 uint32_t val;
759
760 GENET_LOCK(sc);
761
762 val = RD4(sc, GENET_INTRL2_CPU_STAT);
763 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
764 WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
765
766 if (val & GENET_IRQ_RXDMA_DONE)
767 genet_rxintr(sc, GENET_DMA_DEFAULT_QUEUE);
768
769 if (val & GENET_IRQ_TXDMA_DONE) {
770 genet_txintr(sc, GENET_DMA_DEFAULT_QUEUE);
771 if_schedule_deferred_start(ifp);
772 }
773
774 GENET_UNLOCK(sc);
775
776 return 1;
777 }
778
779 static int
780 genet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
781 {
782 struct genet_softc *sc = ifp->if_softc;
783 int error, s;
784
785 #ifndef GENET_MPSAFE
786 s = splnet();
787 #endif
788
789 switch (cmd) {
790 default:
791 #ifdef GENET_MPSAFE
792 s = splnet();
793 #endif
794 error = ether_ioctl(ifp, cmd, data);
795 #ifdef GENET_MPSAFE
796 splx(s);
797 #endif
798 if (error != ENETRESET)
799 break;
800
801 error = 0;
802
803 if (cmd == SIOCSIFCAP)
804 error = (*ifp->if_init)(ifp);
805 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
806 ;
807 else if ((ifp->if_flags & IFF_RUNNING) != 0) {
808 GENET_LOCK(sc);
809 genet_setup_rxfilter(sc);
810 GENET_UNLOCK(sc);
811 }
812 break;
813 }
814
815 #ifndef GENET_MPSAFE
816 splx(s);
817 #endif
818
819 return error;
820 }
821
822 static void
823 genet_get_eaddr(struct genet_softc *sc, uint8_t *eaddr)
824 {
825 prop_dictionary_t prop = device_properties(sc->sc_dev);
826 uint32_t maclo, machi;
827 prop_data_t eaprop;
828
829 eaprop = prop_dictionary_get(prop, "mac-address");
830 if (eaprop == NULL) {
831 /* Create one */
832 maclo = 0x00f2 | (cprng_strong32() & 0xffff0000);
833 machi = cprng_strong32() & 0xffff;
834
835 eaddr[0] = maclo & 0xff;
836 eaddr[1] = (maclo >> 8) & 0xff;
837 eaddr[2] = (maclo >> 16) & 0xff;
838 eaddr[3] = (maclo >> 24) & 0xff;
839 eaddr[4] = machi & 0xff;
840 eaddr[5] = (machi >> 8) & 0xff;
841 } else {
842 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
843 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
844 memcpy(eaddr, prop_data_data_nocopy(eaprop),
845 ETHER_ADDR_LEN);
846 }
847
848 }
849
850 static int
851 genet_setup_dma(struct genet_softc *sc, int qid)
852 {
853 struct mbuf *m;
854 int error, i;
855
856 /* Setup TX ring */
857 sc->sc_tx.buf_tag = sc->sc_dmat;
858 for (i = 0; i < TX_DESC_COUNT; i++) {
859 error = bus_dmamap_create(sc->sc_tx.buf_tag, MCLBYTES,
860 TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK,
861 &sc->sc_tx.buf_map[i].map);
862 if (error != 0) {
863 device_printf(sc->sc_dev,
864 "cannot create TX buffer map\n");
865 return error;
866 }
867 }
868
869 /* Setup RX ring */
870 sc->sc_rx.buf_tag = sc->sc_dmat;
871 for (i = 0; i < RX_DESC_COUNT; i++) {
872 error = bus_dmamap_create(sc->sc_rx.buf_tag, MCLBYTES,
873 1, MCLBYTES, 0, BUS_DMA_WAITOK,
874 &sc->sc_rx.buf_map[i].map);
875 if (error != 0) {
876 device_printf(sc->sc_dev,
877 "cannot create RX buffer map\n");
878 return error;
879 }
880 if ((m = genet_alloc_mbufcl(sc)) == NULL) {
881 device_printf(sc->sc_dev, "cannot allocate RX mbuf\n");
882 return ENOMEM;
883 }
884 error = genet_setup_rxbuf(sc, i, m);
885 if (error != 0) {
886 device_printf(sc->sc_dev, "cannot create RX buffer\n");
887 return error;
888 }
889 }
890
891 return 0;
892 }
893
894 int
895 genet_attach(struct genet_softc *sc)
896 {
897 struct mii_data *mii = &sc->sc_mii;
898 struct ifnet *ifp = &sc->sc_ec.ec_if;
899 uint8_t eaddr[ETHER_ADDR_LEN];
900 u_int maj, min;
901
902 const uint32_t rev = RD4(sc, GENET_SYS_REV_CTRL);
903 min = __SHIFTOUT(rev, SYS_REV_MINOR);
904 maj = __SHIFTOUT(rev, SYS_REV_MAJOR);
905 if (maj == 0)
906 maj++;
907 else if (maj == 5 || maj == 6)
908 maj--;
909
910 if (maj != 5) {
911 aprint_error(": GENETv%d.%d not supported\n", maj, min);
912 return ENXIO;
913 }
914
915 aprint_naive("\n");
916 aprint_normal(": GENETv%d.%d\n", maj, min);
917
918 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET);
919 callout_init(&sc->sc_stat_ch, CALLOUT_FLAGS);
920 callout_setfunc(&sc->sc_stat_ch, genet_tick, sc);
921
922 genet_get_eaddr(sc, eaddr);
923 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr));
924
925 /* Soft reset EMAC core */
926 genet_reset(sc);
927
928 /* Setup DMA descriptors */
929 if (genet_setup_dma(sc, GENET_DMA_DEFAULT_QUEUE) != 0) {
930 aprint_error_dev(sc->sc_dev, "failed to setup DMA descriptors\n");
931 return EINVAL;
932 }
933
934 /* Setup ethernet interface */
935 ifp->if_softc = sc;
936 snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev));
937 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
938 #ifdef GENET_MPSAFE
939 ifp->if_extflags = IFEF_MPSAFE;
940 #endif
941 ifp->if_start = genet_start;
942 ifp->if_ioctl = genet_ioctl;
943 ifp->if_init = genet_init;
944 ifp->if_stop = genet_stop;
945 ifp->if_capabilities = 0;
946 ifp->if_capenable = ifp->if_capabilities;
947 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
948 IFQ_SET_READY(&ifp->if_snd);
949
950 /* 802.1Q VLAN-sized frames are supported */
951 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
952
953 /* Attach MII driver */
954 sc->sc_ec.ec_mii = mii;
955 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
956 mii->mii_ifp = ifp;
957 mii->mii_readreg = genet_mii_readreg;
958 mii->mii_writereg = genet_mii_writereg;
959 mii->mii_statchg = genet_mii_statchg;
960 mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY,
961 0);
962
963 if (LIST_EMPTY(&mii->mii_phys)) {
964 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
965 return ENOENT;
966 }
967 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
968
969 /* Attach interface */
970 if_attach(ifp);
971 if_deferred_start_init(ifp, NULL);
972
973 /* Attach ethernet interface */
974 ether_ifattach(ifp, eaddr);
975
976 return 0;
977 }
978
979 #ifdef DDB
980 void genet_debug(void);
981
982 void
983 genet_debug(void)
984 {
985 device_t dev = device_find_by_xname("genet0");
986 if (dev == NULL)
987 return;
988
989 struct genet_softc * const sc = device_private(dev);
990 const int qid = GENET_DMA_DEFAULT_QUEUE;
991
992 printf("TX CIDX = %08x (soft)\n", sc->sc_tx.cidx);
993 printf("TX CIDX = %08x\n", RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)));
994 printf("TX PIDX = %08x (soft)\n", sc->sc_tx.pidx);
995 printf("TX PIDX = %08x\n", RD4(sc, GENET_TX_DMA_PROD_INDEX(qid)));
996
997 printf("RX CIDX = %08x (soft)\n", sc->sc_rx.cidx);
998 printf("RX CIDX = %08x\n", RD4(sc, GENET_RX_DMA_CONS_INDEX(qid)));
999 printf("RX PIDX = %08x (soft)\n", sc->sc_rx.pidx);
1000 printf("RX PIDX = %08x\n", RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)));
1001 }
1002 #endif
1003