bcmgenet.c revision 1.1 1 /* $NetBSD: bcmgenet.c,v 1.1 2020/02/22 00:28:35 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2020 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * Broadcom GENETv5
31 */
32
33 #include "opt_net_mpsafe.h"
34 #include "opt_ddb.h"
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: bcmgenet.c,v 1.1 2020/02/22 00:28:35 jmcneill Exp $");
38
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/device.h>
42 #include <sys/intr.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/mutex.h>
46 #include <sys/callout.h>
47 #include <sys/cprng.h>
48
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_ether.h>
52 #include <net/if_media.h>
53 #include <net/bpf.h>
54
55 #include <dev/mii/miivar.h>
56
57 #include <dev/ic/bcmgenetreg.h>
58 #include <dev/ic/bcmgenetvar.h>
59
60 CTASSERT(MCLBYTES == 2048);
61
62 #ifdef GENET_DEBUG
63 #define DPRINTF(...) printf(##__VA_ARGS__)
64 #else
65 #define DPRINTF(...) ((void)0)
66 #endif
67
68 #ifdef NET_MPSAFE
69 #define GENET_MPSAFE 1
70 #define CALLOUT_FLAGS CALLOUT_MPSAFE
71 #else
72 #define CALLOUT_FLAGS 0
73 #endif
74
75 #define TX_SKIP(n, o) (((n) + (o)) & (GENET_DMA_DESC_COUNT - 1))
76 #define TX_NEXT(n) TX_SKIP(n, 1)
77 #define RX_NEXT(n) (((n) + 1) & (GENET_DMA_DESC_COUNT - 1))
78
79 #define TX_MAX_SEGS 128
80 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT
81 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT
82 #define MII_BUSY_RETRY 1000
83
84 #define GENET_LOCK(sc) mutex_enter(&(sc)->sc_lock)
85 #define GENET_UNLOCK(sc) mutex_exit(&(sc)->sc_lock)
86 #define GENET_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock))
87
88 #define RD4(sc, reg) \
89 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
90 #define WR4(sc, reg, val) \
91 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
92
93 static int
94 genet_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
95 {
96 struct genet_softc *sc = device_private(dev);
97 int retry;
98
99 WR4(sc, GENET_MDIO_CMD,
100 GENET_MDIO_READ | GENET_MDIO_START_BUSY |
101 __SHIFTIN(phy, GENET_MDIO_PMD) |
102 __SHIFTIN(reg, GENET_MDIO_REG));
103 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
104 if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0) {
105 *val = RD4(sc, GENET_MDIO_CMD) & 0xffff;
106 break;
107 }
108 delay(10);
109 }
110
111
112 if (retry == 0) {
113 device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
114 phy, reg);
115 return ETIMEDOUT;
116 }
117
118 return 0;
119 }
120
121 static int
122 genet_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
123 {
124 struct genet_softc *sc = device_private(dev);
125 int retry;
126
127 WR4(sc, GENET_MDIO_CMD,
128 val | GENET_MDIO_WRITE | GENET_MDIO_START_BUSY |
129 __SHIFTIN(phy, GENET_MDIO_PMD) |
130 __SHIFTIN(reg, GENET_MDIO_REG));
131 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
132 if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0)
133 break;
134 delay(10);
135 }
136
137 if (retry == 0) {
138 device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
139 phy, reg);
140 return ETIMEDOUT;
141 }
142
143 return 0;
144 }
145
146 static void
147 genet_update_link(struct genet_softc *sc)
148 {
149 struct mii_data *mii = &sc->sc_mii;
150 uint32_t val;
151 u_int speed;
152
153 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
154 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
155 speed = GENET_UMAC_CMD_SPEED_1000;
156 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
157 speed = GENET_UMAC_CMD_SPEED_100;
158 else
159 speed = GENET_UMAC_CMD_SPEED_10;
160
161 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
162 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
163 val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
164 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
165 if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII)
166 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
167 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
168
169 val = RD4(sc, GENET_UMAC_CMD);
170 val &= ~GENET_UMAC_CMD_SPEED;
171 val |= __SHIFTIN(speed, GENET_UMAC_CMD_SPEED);
172 WR4(sc, GENET_UMAC_CMD, val);
173 }
174
175 static void
176 genet_mii_statchg(struct ifnet *ifp)
177 {
178 struct genet_softc * const sc = ifp->if_softc;
179
180 genet_update_link(sc);
181 }
182
183 static void
184 genet_setup_txdesc(struct genet_softc *sc, int index, int flags,
185 bus_addr_t paddr, u_int len)
186 {
187 uint32_t status;
188
189 status = flags | __SHIFTIN(len, GENET_TX_DESC_STATUS_BUFLEN);
190 ++sc->sc_tx.queued;
191
192 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
193 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
194 WR4(sc, GENET_TX_DESC_STATUS(index), status);
195 }
196
197 static int
198 genet_setup_txbuf(struct genet_softc *sc, int index, struct mbuf *m)
199 {
200 bus_dma_segment_t *segs;
201 int error, nsegs, cur, i;
202 uint32_t flags;
203
204 error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag,
205 sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
206 if (error == EFBIG) {
207 device_printf(sc->sc_dev,
208 "TX packet needs too many DMA segments, dropping...\n");
209 m_freem(m);
210 return 0;
211 }
212 if (error != 0)
213 return 0;
214
215 segs = sc->sc_tx.buf_map[index].map->dm_segs;
216 nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs;
217
218 if (sc->sc_tx.queued >= GENET_DMA_DESC_COUNT - nsegs) {
219 bus_dmamap_unload(sc->sc_tx.buf_tag,
220 sc->sc_tx.buf_map[index].map);
221 return -1;
222 }
223
224 flags = GENET_TX_DESC_STATUS_SOP |
225 GENET_TX_DESC_STATUS_CRC |
226 GENET_TX_DESC_STATUS_QTAG;
227
228 for (cur = index, i = 0; i < nsegs; i++) {
229 sc->sc_tx.buf_map[cur].mbuf = (i == 0 ? m : NULL);
230 if (i == nsegs - 1)
231 flags |= GENET_TX_DESC_STATUS_EOP;
232
233 genet_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
234 segs[i].ds_len);
235
236 if (i == 0) {
237 flags &= ~GENET_TX_DESC_STATUS_SOP;
238 flags &= ~GENET_TX_DESC_STATUS_CRC;
239 }
240 cur = TX_NEXT(cur);
241 }
242
243 bus_dmamap_sync(sc->sc_tx.buf_tag, sc->sc_tx.buf_map[index].map,
244 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE);
245
246 return nsegs;
247 }
248
249 static void
250 genet_setup_rxdesc(struct genet_softc *sc, int index,
251 bus_addr_t paddr, bus_size_t len)
252 {
253 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
254 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
255 }
256
257 static int
258 genet_setup_rxbuf(struct genet_softc *sc, int index, struct mbuf *m)
259 {
260 int error;
261
262 error = bus_dmamap_load_mbuf(sc->sc_rx.buf_tag,
263 sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT);
264 if (error != 0)
265 return error;
266
267 bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
268 0, sc->sc_rx.buf_map[index].map->dm_mapsize,
269 BUS_DMASYNC_PREREAD);
270
271 sc->sc_rx.buf_map[index].mbuf = m;
272 genet_setup_rxdesc(sc, index,
273 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr,
274 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_len);
275
276 return 0;
277 }
278
279 static struct mbuf *
280 genet_alloc_mbufcl(struct genet_softc *sc)
281 {
282 struct mbuf *m;
283
284 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
285 if (m != NULL)
286 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
287
288 return m;
289 }
290
291 static void
292 genet_enable_intr(struct genet_softc *sc)
293 {
294 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
295 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
296 }
297
298 static void
299 genet_disable_intr(struct genet_softc *sc)
300 {
301 /* Disable interrupts */
302 WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
303 WR4(sc, GENET_INTRL2_CPU_CLEAR, 0xffffffff);
304 }
305
306 static void
307 genet_tick(void *softc)
308 {
309 struct genet_softc *sc = softc;
310 struct mii_data *mii = &sc->sc_mii;
311 #ifndef GENET_MPSAFE
312 int s = splnet();
313 #endif
314
315 GENET_LOCK(sc);
316 mii_tick(mii);
317 callout_schedule(&sc->sc_stat_ch, hz);
318 GENET_UNLOCK(sc);
319
320 #ifndef GENET_MPSAFE
321 splx(s);
322 #endif
323 }
324
325 static void
326 genet_setup_rxfilter(struct genet_softc *sc)
327 {
328 uint32_t val;
329
330 GENET_ASSERT_LOCKED(sc);
331
332 /* Enable promiscuous mode */
333 val = RD4(sc, GENET_UMAC_CMD);
334 val |= GENET_UMAC_CMD_PROMISC;
335 WR4(sc, GENET_UMAC_CMD, val);
336
337 /* Disable filters */
338 WR4(sc, GENET_UMAC_MDF_CTRL, 0);
339 }
340
341 static int
342 genet_reset(struct genet_softc *sc)
343 {
344 uint32_t val;
345
346 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
347 val |= GENET_SYS_RBUF_FLUSH_RESET;
348 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
349 delay(10);
350
351 val &= ~GENET_SYS_RBUF_FLUSH_RESET;
352 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
353 delay(10);
354
355 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
356 delay(10);
357
358 WR4(sc, GENET_UMAC_CMD, 0);
359 WR4(sc, GENET_UMAC_CMD,
360 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
361 delay(10);
362 WR4(sc, GENET_UMAC_CMD, 0);
363
364 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
365 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
366 WR4(sc, GENET_UMAC_MIB_CTRL, 0);
367
368 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
369
370 val = RD4(sc, GENET_RBUF_CTRL);
371 val |= GENET_RBUF_ALIGN_2B;
372 WR4(sc, GENET_RBUF_CTRL, val);
373
374 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
375
376 return 0;
377 }
378
379 static void
380 genet_init_rings(struct genet_softc *sc, int qid)
381 {
382 uint32_t val;
383
384 /* TX ring */
385
386 sc->sc_tx.queued = 0;
387 sc->sc_tx.cidx = sc->sc_tx.pidx = 0;
388
389 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
390
391 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
392 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
393 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
394 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
395 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
396 __SHIFTIN(TX_DESC_COUNT, GENET_TX_DMA_RING_BUF_SIZE_DESC_COUNT) |
397 __SHIFTIN(MCLBYTES, GENET_TX_DMA_RING_BUF_SIZE_BUF_LENGTH));
398 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
399 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
400 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
401 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
402 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
403 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
404 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
405 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
406 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
407
408 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */
409
410 /* Enable transmit DMA */
411 val = RD4(sc, GENET_TX_DMA_CTRL);
412 val |= GENET_TX_DMA_CTRL_EN;
413 val |= GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
414 WR4(sc, GENET_TX_DMA_CTRL, val);
415
416 /* RX ring */
417
418 sc->sc_rx.cidx = sc->sc_rx.pidx = 0;
419
420 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
421
422 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
423 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
424 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
425 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
426 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
427 __SHIFTIN(RX_DESC_COUNT, GENET_RX_DMA_RING_BUF_SIZE_DESC_COUNT) |
428 __SHIFTIN(MCLBYTES, GENET_RX_DMA_RING_BUF_SIZE_BUF_LENGTH));
429 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
430 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
431 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
432 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
433 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
434 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
435 __SHIFTIN(5, GENET_RX_DMA_XON_XOFF_THRES_LO) |
436 __SHIFTIN(RX_DESC_COUNT >> 4, GENET_RX_DMA_XON_XOFF_THRES_HI));
437 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
438 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
439
440 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */
441
442 /* Enable receive DMA */
443 val = RD4(sc, GENET_RX_DMA_CTRL);
444 val |= GENET_RX_DMA_CTRL_EN;
445 val |= GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
446 WR4(sc, GENET_RX_DMA_CTRL, val);
447 }
448
449 static int
450 genet_init_locked(struct genet_softc *sc)
451 {
452 struct ifnet *ifp = &sc->sc_ec.ec_if;
453 struct mii_data *mii = &sc->sc_mii;
454 uint32_t val;
455 const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
456
457 GENET_ASSERT_LOCKED(sc);
458
459 if ((ifp->if_flags & IFF_RUNNING) != 0)
460 return 0;
461
462 if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII ||
463 sc->sc_phy_mode == GENET_PHY_MODE_RGMII_RXID)
464 WR4(sc, GENET_SYS_PORT_CTRL,
465 GENET_SYS_PORT_MODE_EXT_GPHY);
466
467 /* Write hardware address */
468 val = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
469 (enaddr[3] << 24);
470 WR4(sc, GENET_UMAC_MAC0, val);
471 val = enaddr[4] | (enaddr[5] << 8);
472 WR4(sc, GENET_UMAC_MAC1, val);
473
474 /* Setup RX filter */
475 genet_setup_rxfilter(sc);
476
477 /* Setup TX/RX rings */
478 genet_init_rings(sc, GENET_DMA_DEFAULT_QUEUE);
479
480 /* Enable transmitter and receiver */
481 val = RD4(sc, GENET_UMAC_CMD);
482 val |= GENET_UMAC_CMD_TXEN;
483 val |= GENET_UMAC_CMD_RXEN;
484 WR4(sc, GENET_UMAC_CMD, val);
485
486 /* Enable interrupts */
487 genet_enable_intr(sc);
488
489 ifp->if_flags |= IFF_RUNNING;
490 ifp->if_flags &= ~IFF_OACTIVE;
491
492 mii_mediachg(mii);
493 callout_schedule(&sc->sc_stat_ch, hz);
494
495 return 0;
496 }
497
498 static int
499 genet_init(struct ifnet *ifp)
500 {
501 struct genet_softc *sc = ifp->if_softc;
502 int error;
503
504 GENET_LOCK(sc);
505 error = genet_init_locked(sc);
506 GENET_UNLOCK(sc);
507
508 return error;
509 }
510
511 static void
512 genet_stop_locked(struct genet_softc *sc, int disable)
513 {
514 struct ifnet *ifp = &sc->sc_ec.ec_if;
515 uint32_t val;
516
517 GENET_ASSERT_LOCKED(sc);
518
519 callout_stop(&sc->sc_stat_ch);
520
521 mii_down(&sc->sc_mii);
522
523 /* Disable receiver */
524 val = RD4(sc, GENET_UMAC_CMD);
525 val &= ~GENET_UMAC_CMD_RXEN;
526 WR4(sc, GENET_UMAC_CMD, val);
527
528 /* Stop receive DMA */
529 val = RD4(sc, GENET_RX_DMA_CTRL);
530 val &= ~GENET_RX_DMA_CTRL_EN;
531 WR4(sc, GENET_RX_DMA_CTRL, val);
532
533 /* Stop transmit DMA */
534 val = RD4(sc, GENET_TX_DMA_CTRL);
535 val &= ~GENET_TX_DMA_CTRL_EN;
536 WR4(sc, GENET_TX_DMA_CTRL, val);
537
538 /* Flush data in the TX FIFO */
539 WR4(sc, GENET_UMAC_TX_FLUSH, 1);
540 delay(10);
541 WR4(sc, GENET_UMAC_TX_FLUSH, 0);
542
543 /* Disable transmitter */
544 val = RD4(sc, GENET_UMAC_CMD);
545 val &= ~GENET_UMAC_CMD_TXEN;
546 WR4(sc, GENET_UMAC_CMD, val);
547
548 /* Disable interrupts */
549 genet_disable_intr(sc);
550
551 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
552 }
553
554 static void
555 genet_stop(struct ifnet *ifp, int disable)
556 {
557 struct genet_softc * const sc = ifp->if_softc;
558
559 GENET_LOCK(sc);
560 genet_stop_locked(sc, disable);
561 GENET_UNLOCK(sc);
562 }
563
564 static void
565 genet_rxintr(struct genet_softc *sc, int qid)
566 {
567 struct ifnet *ifp = &sc->sc_ec.ec_if;
568 int error, index, len, n;
569 struct mbuf *m, *m0;
570 uint32_t status, pidx, total;
571
572 pidx = RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)) & 0xffff;
573 total = (pidx - sc->sc_rx.cidx) & 0xffff;
574
575 DPRINTF("RX pidx=%08x total=%d\n", pidx, total);
576
577 index = sc->sc_rx.cidx & (RX_DESC_COUNT - 1);
578 for (n = 0; n < total; n++) {
579 status = RD4(sc, GENET_RX_DESC_STATUS(index));
580 len = __SHIFTOUT(status, GENET_RX_DESC_STATUS_BUFLEN);
581
582 /* XXX check for errors */
583
584 bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
585 0, sc->sc_rx.buf_map[index].map->dm_mapsize,
586 BUS_DMASYNC_POSTREAD);
587 bus_dmamap_unload(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map);
588
589 DPRINTF("RX [#%d] index=%02x status=%08x len=%d adj_len=%d\n",
590 n, index, status, len, len - ETHER_ALIGN);
591
592 if (len > ETHER_ALIGN) {
593 m = sc->sc_rx.buf_map[index].mbuf;
594
595 m_adj(m, ETHER_ALIGN);
596
597 m_set_rcvif(m, ifp);
598 m->m_len = m->m_pkthdr.len = len - ETHER_ALIGN;
599 m->m_nextpkt = NULL;
600
601 if_percpuq_enqueue(ifp->if_percpuq, m);
602 }
603
604 if ((m0 = genet_alloc_mbufcl(sc)) != NULL) {
605 error = genet_setup_rxbuf(sc, index, m0);
606 if (error != 0) {
607 /* XXX hole in RX ring */
608 }
609 } else {
610 if_statinc(ifp, if_ierrors);
611 }
612
613 index = RX_NEXT(index);
614
615 sc->sc_rx.cidx = (sc->sc_rx.cidx + 1) & 0xffff;
616 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx);
617 }
618 }
619
620 static void
621 genet_txintr(struct genet_softc *sc, int qid)
622 {
623 struct ifnet *ifp = &sc->sc_ec.ec_if;
624 struct genet_bufmap *bmap;
625 uint32_t cidx, total;
626 int i;
627
628 GENET_ASSERT_LOCKED(sc);
629
630 cidx = RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)) & 0xffff;
631 total = (cidx - sc->sc_tx.cidx) & 0xffff;
632
633 for (i = sc->sc_tx.next; sc->sc_tx.queued > 0 && total > 0; i = TX_NEXT(i), total--) {
634 /* XXX check for errors */
635
636 bmap = &sc->sc_tx.buf_map[i];
637 if (bmap->mbuf != NULL) {
638 bus_dmamap_sync(sc->sc_tx.buf_tag, bmap->map,
639 0, bmap->map->dm_mapsize,
640 BUS_DMASYNC_POSTWRITE);
641 bus_dmamap_unload(sc->sc_tx.buf_tag, bmap->map);
642 m_freem(bmap->mbuf);
643 bmap->mbuf = NULL;
644 }
645
646 --sc->sc_tx.queued;
647 ifp->if_flags &= ~IFF_OACTIVE;
648 if_statinc(ifp, if_opackets);
649 }
650
651 sc->sc_tx.next = i;
652 sc->sc_tx.cidx = cidx;
653 }
654
655 static void
656 genet_start_locked(struct genet_softc *sc)
657 {
658 struct ifnet *ifp = &sc->sc_ec.ec_if;
659 struct mbuf *m;
660 int nsegs, index, cnt;
661
662 GENET_ASSERT_LOCKED(sc);
663
664 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
665 return;
666
667 const int qid = GENET_DMA_DEFAULT_QUEUE;
668
669 index = sc->sc_tx.pidx & (TX_DESC_COUNT - 1);
670 cnt = 0;
671
672 for (;;) {
673 IFQ_POLL(&ifp->if_snd, m);
674 if (m == NULL)
675 break;
676
677 nsegs = genet_setup_txbuf(sc, index, m);
678 if (nsegs <= 0) {
679 if (nsegs == -1)
680 ifp->if_flags |= IFF_OACTIVE;
681 break;
682 }
683 IFQ_DEQUEUE(&ifp->if_snd, m);
684 bpf_mtap(ifp, m, BPF_D_OUT);
685
686 index = TX_SKIP(index, nsegs);
687
688 sc->sc_tx.pidx = (sc->sc_tx.pidx + nsegs) & 0xffff;
689 cnt++;
690 }
691
692 if (cnt != 0)
693 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx);
694 }
695
696 static void
697 genet_start(struct ifnet *ifp)
698 {
699 struct genet_softc *sc = ifp->if_softc;
700
701 GENET_LOCK(sc);
702 genet_start_locked(sc);
703 GENET_UNLOCK(sc);
704 }
705
706 int
707 genet_intr(void *arg)
708 {
709 struct genet_softc *sc = arg;
710 struct ifnet *ifp = &sc->sc_ec.ec_if;
711 uint32_t val;
712
713 GENET_LOCK(sc);
714
715 val = RD4(sc, GENET_INTRL2_CPU_STAT);
716 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
717 WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
718
719 if (val & GENET_IRQ_RXDMA_DONE)
720 genet_rxintr(sc, GENET_DMA_DEFAULT_QUEUE);
721
722 if (val & GENET_IRQ_TXDMA_DONE) {
723 genet_txintr(sc, GENET_DMA_DEFAULT_QUEUE);
724 if_schedule_deferred_start(ifp);
725 }
726
727 GENET_UNLOCK(sc);
728
729 return 1;
730 }
731
732 static int
733 genet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
734 {
735 struct genet_softc *sc = ifp->if_softc;
736 int error, s;
737
738 #ifndef GENET_MPSAFE
739 s = splnet();
740 #endif
741
742 switch (cmd) {
743 default:
744 #ifdef GENET_MPSAFE
745 s = splnet();
746 #endif
747 error = ether_ioctl(ifp, cmd, data);
748 #ifdef GENET_MPSAFE
749 splx(s);
750 #endif
751 if (error != ENETRESET)
752 break;
753
754 error = 0;
755
756 if (cmd == SIOCSIFCAP)
757 error = (*ifp->if_init)(ifp);
758 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
759 ;
760 else if ((ifp->if_flags & IFF_RUNNING) != 0) {
761 GENET_LOCK(sc);
762 genet_setup_rxfilter(sc);
763 GENET_UNLOCK(sc);
764 }
765 break;
766 }
767
768 #ifndef GENET_MPSAFE
769 splx(s);
770 #endif
771
772 return error;
773 }
774
775 static void
776 genet_get_eaddr(struct genet_softc *sc, uint8_t *eaddr)
777 {
778 prop_dictionary_t prop = device_properties(sc->sc_dev);
779 uint32_t maclo, machi;
780 prop_data_t eaprop;
781
782 eaprop = prop_dictionary_get(prop, "mac-address");
783 if (eaprop == NULL) {
784 /* Create one */
785 maclo = 0x00f2 | (cprng_strong32() & 0xffff0000);
786 machi = cprng_strong32() & 0xffff;
787
788 eaddr[0] = maclo & 0xff;
789 eaddr[1] = (maclo >> 8) & 0xff;
790 eaddr[2] = (maclo >> 16) & 0xff;
791 eaddr[3] = (maclo >> 24) & 0xff;
792 eaddr[4] = machi & 0xff;
793 eaddr[5] = (machi >> 8) & 0xff;
794 } else {
795 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
796 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
797 memcpy(eaddr, prop_data_data_nocopy(eaprop),
798 ETHER_ADDR_LEN);
799 }
800
801 }
802
803 static int
804 genet_setup_dma(struct genet_softc *sc, int qid)
805 {
806 struct mbuf *m;
807 int error, i;
808
809 /* Setup TX ring */
810 sc->sc_tx.buf_tag = sc->sc_dmat;
811 for (i = 0; i < TX_DESC_COUNT; i++) {
812 error = bus_dmamap_create(sc->sc_tx.buf_tag, MCLBYTES,
813 TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK,
814 &sc->sc_tx.buf_map[i].map);
815 if (error != 0) {
816 device_printf(sc->sc_dev,
817 "cannot create TX buffer map\n");
818 return error;
819 }
820 }
821
822 /* Setup RX ring */
823 sc->sc_rx.buf_tag = sc->sc_dmat;
824 for (i = 0; i < RX_DESC_COUNT; i++) {
825 error = bus_dmamap_create(sc->sc_rx.buf_tag, MCLBYTES,
826 1, MCLBYTES, 0, BUS_DMA_WAITOK,
827 &sc->sc_rx.buf_map[i].map);
828 if (error != 0) {
829 device_printf(sc->sc_dev,
830 "cannot create RX buffer map\n");
831 return error;
832 }
833 if ((m = genet_alloc_mbufcl(sc)) == NULL) {
834 device_printf(sc->sc_dev, "cannot allocate RX mbuf\n");
835 return ENOMEM;
836 }
837 error = genet_setup_rxbuf(sc, i, m);
838 if (error != 0) {
839 device_printf(sc->sc_dev, "cannot create RX buffer\n");
840 return error;
841 }
842 }
843
844 return 0;
845 }
846
847 int
848 genet_attach(struct genet_softc *sc)
849 {
850 struct mii_data *mii = &sc->sc_mii;
851 struct ifnet *ifp = &sc->sc_ec.ec_if;
852 uint8_t eaddr[ETHER_ADDR_LEN];
853 u_int maj, min;
854
855 const uint32_t rev = RD4(sc, GENET_SYS_REV_CTRL);
856 min = __SHIFTOUT(rev, SYS_REV_MINOR);
857 maj = __SHIFTOUT(rev, SYS_REV_MAJOR);
858 if (maj == 0)
859 maj++;
860 else if (maj == 5 || maj == 6)
861 maj--;
862
863 if (maj != 5) {
864 aprint_error(": GENETv%d.%d not supported\n", maj, min);
865 return ENXIO;
866 }
867
868 aprint_naive("\n");
869 aprint_normal(": GENETv%d.%d\n", maj, min);
870
871 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET);
872 callout_init(&sc->sc_stat_ch, CALLOUT_FLAGS);
873 callout_setfunc(&sc->sc_stat_ch, genet_tick, sc);
874
875 genet_get_eaddr(sc, eaddr);
876 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr));
877
878 /* Soft reset EMAC core */
879 genet_reset(sc);
880
881 /* Setup DMA descriptors */
882 if (genet_setup_dma(sc, GENET_DMA_DEFAULT_QUEUE) != 0) {
883 aprint_error_dev(sc->sc_dev, "failed to setup DMA descriptors\n");
884 return EINVAL;
885 }
886
887 /* Setup ethernet interface */
888 ifp->if_softc = sc;
889 snprintf(ifp->if_xname, IFNAMSIZ, device_xname(sc->sc_dev));
890 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
891 #ifdef GENET_MPSAFE
892 ifp->if_extflags = IFEF_MPSAFE;
893 #endif
894 ifp->if_start = genet_start;
895 ifp->if_ioctl = genet_ioctl;
896 ifp->if_init = genet_init;
897 ifp->if_stop = genet_stop;
898 ifp->if_capabilities = 0;
899 ifp->if_capenable = ifp->if_capabilities;
900 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
901 IFQ_SET_READY(&ifp->if_snd);
902
903 /* 802.1Q VLAN-sized frames are supported */
904 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
905
906 /* Attach MII driver */
907 sc->sc_ec.ec_mii = mii;
908 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
909 mii->mii_ifp = ifp;
910 mii->mii_readreg = genet_mii_readreg;
911 mii->mii_writereg = genet_mii_writereg;
912 mii->mii_statchg = genet_mii_statchg;
913 mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY,
914 0);
915
916 if (LIST_EMPTY(&mii->mii_phys)) {
917 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
918 return ENOENT;
919 }
920 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
921
922 /* Attach interface */
923 if_attach(ifp);
924 if_deferred_start_init(ifp, NULL);
925
926 /* Attach ethernet interface */
927 ether_ifattach(ifp, eaddr);
928
929 return 0;
930 }
931
932 #ifdef DDB
933 void genet_debug(void);
934
935 void
936 genet_debug(void)
937 {
938 device_t dev = device_find_by_xname("genet0");
939 if (dev == NULL)
940 return;
941
942 struct genet_softc * const sc = device_private(dev);
943 const int qid = GENET_DMA_DEFAULT_QUEUE;
944
945 printf("TX CIDX = %08x (soft)\n", sc->sc_tx.cidx);
946 printf("TX CIDX = %08x\n", RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)));
947 printf("TX PIDX = %08x (soft)\n", sc->sc_tx.pidx);
948 printf("TX PIDX = %08x\n", RD4(sc, GENET_TX_DMA_PROD_INDEX(qid)));
949
950 printf("RX CIDX = %08x (soft)\n", sc->sc_rx.cidx);
951 printf("RX CIDX = %08x\n", RD4(sc, GENET_RX_DMA_CONS_INDEX(qid)));
952 printf("RX PIDX = %08x (soft)\n", sc->sc_rx.pidx);
953 printf("RX PIDX = %08x\n", RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)));
954 }
955 #endif
956