bcm53xx_eth.c revision 1.7 1 /*-
2 * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #define GMAC_PRIVATE
31
32 #include "locators.h"
33
34 #include <sys/cdefs.h>
35
36 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.7 2012/10/06 01:30:46 matt Exp $");
37
38 #include <sys/param.h>
39 #include <sys/atomic.h>
40 #include <sys/bus.h>
41 #include <sys/device.h>
42 #include <sys/ioctl.h>
43 #include <sys/intr.h>
44 #include <sys/kmem.h>
45 #include <sys/mutex.h>
46 #include <sys/socket.h>
47 #include <sys/systm.h>
48
49 #include <net/if.h>
50 #include <net/if_ether.h>
51 #include <net/if_media.h>
52
53 #include <net/if_dl.h>
54
55 #include <net/bpf.h>
56
57 #include <dev/mii/miivar.h>
58
59 #include <arm/broadcom/bcm53xx_reg.h>
60 #include <arm/broadcom/bcm53xx_var.h>
61
62 #define BCMETH_RCVOFFSET 6
63 #define BCMETH_MAXTXMBUFS 32
64 #define BCMETH_NTXSEGS 30
65 #define BCMETH_MAXRXMBUFS 255
66 #define BCMETH_MINRXMBUFS 32
67 #define BCMETH_NRXSEGS 1
68
69 static int bcmeth_ccb_match(device_t, cfdata_t, void *);
70 static void bcmeth_ccb_attach(device_t, device_t, void *);
71
72 struct bcmeth_txqueue {
73 bus_dmamap_t txq_descmap;
74 struct gmac_txdb *txq_consumer;
75 struct gmac_txdb *txq_producer;
76 struct gmac_txdb *txq_first;
77 struct gmac_txdb *txq_last;
78 struct ifqueue txq_mbufs;
79 struct mbuf *txq_next;
80 size_t txq_free;
81 size_t txq_threshold;
82 size_t txq_lastintr;
83 bus_size_t txq_reg_xmtaddrlo;
84 bus_size_t txq_reg_xmtptr;
85 bus_size_t txq_reg_xmtctl;
86 bus_size_t txq_reg_xmtsts0;
87 bus_dma_segment_t txq_descmap_seg;
88 };
89
90 struct bcmeth_rxqueue {
91 bus_dmamap_t rxq_descmap;
92 struct gmac_rxdb *rxq_consumer;
93 struct gmac_rxdb *rxq_producer;
94 struct gmac_rxdb *rxq_first;
95 struct gmac_rxdb *rxq_last;
96 struct mbuf *rxq_mhead;
97 struct mbuf **rxq_mtail;
98 struct mbuf *rxq_mconsumer;
99 size_t rxq_inuse;
100 size_t rxq_threshold;
101 bus_size_t rxq_reg_rcvaddrlo;
102 bus_size_t rxq_reg_rcvptr;
103 bus_size_t rxq_reg_rcvctl;
104 bus_size_t rxq_reg_rcvsts0;
105 bus_dma_segment_t rxq_descmap_seg;
106 };
107
108 struct bcmeth_mapcache {
109 u_int dmc_nmaps;
110 u_int dmc_maxseg;
111 u_int dmc_maxmaps;
112 u_int dmc_maxmapsize;
113 bus_dmamap_t dmc_maps[0];
114 };
115
116 struct bcmeth_softc {
117 device_t sc_dev;
118 bus_space_tag_t sc_bst;
119 bus_space_handle_t sc_bsh;
120 bus_dma_tag_t sc_dmat;
121 kmutex_t *sc_lock;
122 kmutex_t *sc_hwlock;
123 struct ethercom sc_ec;
124 #define sc_if sc_ec.ec_if
125 struct ifmedia sc_media;
126 void *sc_soft_ih;
127 void *sc_ih;
128
129 struct bcmeth_rxqueue sc_rxq;
130 struct bcmeth_txqueue sc_txq;
131
132 uint32_t sc_maxfrm;
133 uint32_t sc_cmdcfg;
134 uint32_t sc_intmask;
135 volatile uint32_t sc_soft_flags;
136 #define SOFT_RXINTR 0x01
137 #define SOFT_RXUNDERFLOW 0x02
138 #define SOFT_TXINTR 0x04
139 #define SOFT_REINIT 0x08
140
141 struct evcnt sc_ev_intr;
142 struct evcnt sc_ev_soft_intr;
143 struct evcnt sc_ev_tx_stall;
144
145 struct ifqueue sc_rx_bufcache;
146 struct bcmeth_mapcache *sc_rx_mapcache;
147 struct bcmeth_mapcache *sc_tx_mapcache;
148
149 uint8_t sc_enaddr[ETHER_ADDR_LEN];
150 };
151
152 static void bcmeth_ifstart(struct ifnet *);
153 static void bcmeth_ifwatchdog(struct ifnet *);
154 static int bcmeth_ifinit(struct ifnet *);
155 static void bcmeth_ifstop(struct ifnet *, int);
156 static int bcmeth_ifioctl(struct ifnet *, u_long, void *);
157
158 static int bcmeth_mapcache_create(struct bcmeth_softc *,
159 struct bcmeth_mapcache **, size_t, size_t, size_t);
160 static void bcmeth_mapcache_destroy(struct bcmeth_softc *,
161 struct bcmeth_mapcache *);
162 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *,
163 struct bcmeth_mapcache *);
164 static void bcmeth_mapcache_put(struct bcmeth_softc *,
165 struct bcmeth_mapcache *, bus_dmamap_t);
166
167 static int bcmeth_txq_attach(struct bcmeth_softc *,
168 struct bcmeth_txqueue *, u_int);
169 static void bcmeth_txq_purge(struct bcmeth_softc *,
170 struct bcmeth_txqueue *);
171 static void bcmeth_txq_reset(struct bcmeth_softc *,
172 struct bcmeth_txqueue *);
173 static bool bcmeth_txq_consume(struct bcmeth_softc *,
174 struct bcmeth_txqueue *);
175 static bool bcmeth_txq_produce(struct bcmeth_softc *,
176 struct bcmeth_txqueue *, struct mbuf *m);
177 static bool bcmeth_txq_active_p(struct bcmeth_softc *,
178 struct bcmeth_txqueue *);
179
180 static int bcmeth_rxq_attach(struct bcmeth_softc *,
181 struct bcmeth_rxqueue *, u_int);
182 static bool bcmeth_rxq_produce(struct bcmeth_softc *,
183 struct bcmeth_rxqueue *);
184 static void bcmeth_rxq_purge(struct bcmeth_softc *,
185 struct bcmeth_rxqueue *, bool);
186 static void bcmeth_rxq_reset(struct bcmeth_softc *,
187 struct bcmeth_rxqueue *);
188
189 static int bcmeth_intr(void *);
190 static void bcmeth_soft_intr(void *);
191
192 static int bcmeth_mediachange(struct ifnet *);
193 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *);
194
195 static inline uint32_t
196 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o)
197 {
198 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o);
199 }
200
201 static inline void
202 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v)
203 {
204 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v);
205 }
206
207 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc),
208 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL);
209
210 static int
211 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux)
212 {
213 struct bcmccb_attach_args * const ccbaa = aux;
214 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
215
216 if (strcmp(cf->cf_name, loc->loc_name))
217 return 0;
218
219 #ifdef DIAGNOSTIC
220 const int port = cf->cf_loc[BCMCCBCF_PORT];
221 #endif
222 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port);
223
224 return 1;
225 }
226
227 static void
228 bcmeth_ccb_attach(device_t parent, device_t self, void *aux)
229 {
230 struct bcmeth_softc * const sc = device_private(self);
231 struct ethercom * const ec = &sc->sc_ec;
232 struct ifnet * const ifp = &ec->ec_if;
233 struct bcmccb_attach_args * const ccbaa = aux;
234 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
235 const char * const xname = device_xname(self);
236 prop_dictionary_t dict = device_properties(self);
237 int error;
238
239 sc->sc_bst = ccbaa->ccbaa_ccb_bst;
240 sc->sc_dmat = ccbaa->ccbaa_dmat;
241 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh,
242 loc->loc_offset, loc->loc_size, &sc->sc_bsh);
243
244 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
245 if (eaprop == NULL) {
246 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0);
247 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1);
248 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) {
249 aprint_error(": mac-address property is missing\n");
250 return;
251 }
252 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff;
253 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff;
254 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff;
255 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff;
256 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff;
257 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff;
258 } else {
259 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
260 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
261 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
262 ETHER_ADDR_LEN);
263 }
264 sc->sc_dev = self;
265 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
266 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
267
268 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts
269
270 aprint_naive("\n");
271 aprint_normal(": Gigabit Ethernet Controller\n");
272
273 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0);
274 if (error) {
275 aprint_error(": failed to init rxq: %d\n", error);
276 return;
277 }
278
279 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0);
280 if (error) {
281 aprint_error(": failed to init txq: %d\n", error);
282 return;
283 }
284
285 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache,
286 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS);
287 if (error) {
288 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
289 return;
290 }
291
292 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
293 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS);
294 if (error) {
295 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
296 return;
297 }
298
299 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET,
300 bcmeth_soft_intr, sc);
301
302 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL,
303 bcmeth_intr, sc);
304
305 if (sc->sc_ih == NULL) {
306 aprint_error_dev(self, "failed to establish interrupt %d\n",
307 loc->loc_intrs[0]);
308 } else {
309 aprint_normal_dev(self, "interrupting on irq %d\n",
310 loc->loc_intrs[0]);
311 }
312
313 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
314 ether_sprintf(sc->sc_enaddr));
315
316 /*
317 * Since each port in plugged into the switch/flow-accelerator,
318 * we hard code at Gige Full-Duplex with Flow Control enabled.
319 */
320 int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX;
321 //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE;
322 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange,
323 bcmeth_mediastatus);
324 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL);
325 ifmedia_set(&sc->sc_media, ifmedia);
326
327 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
328
329 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
330 ifp->if_softc = sc;
331 ifp->if_baudrate = IF_Mbps(1000);
332 ifp->if_capabilities = 0;
333 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
334 ifp->if_ioctl = bcmeth_ifioctl;
335 ifp->if_start = bcmeth_ifstart;
336 ifp->if_watchdog = bcmeth_ifwatchdog;
337 ifp->if_init = bcmeth_ifinit;
338 ifp->if_stop = bcmeth_ifstop;
339 IFQ_SET_READY(&ifp->if_snd);
340
341 bcmeth_ifstop(ifp, true);
342
343 /*
344 * Attach the interface.
345 */
346 if_attach(ifp);
347 ether_ifattach(ifp, sc->sc_enaddr);
348
349 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
350 NULL, xname, "intr");
351 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
352 NULL, xname, "soft intr");
353 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
354 NULL, xname, "tx stalls");
355 }
356
357 static int
358 bcmeth_mediachange(struct ifnet *ifp)
359 {
360 //struct bcmeth_softc * const sc = ifp->if_softc;
361 return 0;
362 }
363
364 static void
365 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm)
366 {
367 //struct bcmeth_softc * const sc = ifp->if_softc;
368
369 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE;
370 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T;
371 }
372
373 static uint64_t
374 bcmeth_macaddr_create(const uint8_t *enaddr)
375 {
376 return (enaddr[3] << 0) // UNIMAC_MAC_0
377 | (enaddr[2] << 8) // UNIMAC_MAC_0
378 | (enaddr[1] << 16) // UNIMAC_MAC_0
379 | (enaddr[0] << 24) // UNIMAC_MAC_0
380 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1
381 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1
382 }
383
384 static int
385 bcmeth_ifinit(struct ifnet *ifp)
386 {
387 struct bcmeth_softc * const sc = ifp->if_softc;
388 int error = 0;
389
390 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
391 if (ifp->if_mtu > ETHERMTU_JUMBO)
392 return error;
393
394 KASSERT(ifp->if_flags & IFF_UP);
395
396 /*
397 * Stop the interface
398 */
399 bcmeth_ifstop(ifp, 0);
400
401 /*
402 * If our frame size has changed (or it's our first time through)
403 * destroy the existing transmit mapcache.
404 */
405 if (sc->sc_tx_mapcache != NULL
406 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
407 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache);
408 sc->sc_tx_mapcache = NULL;
409 }
410
411 if (sc->sc_tx_mapcache == NULL) {
412 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
413 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS);
414 if (error)
415 return error;
416 }
417
418 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE
419 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED)
420 | RX_ENA | TX_ENA;
421
422 if (ifp->if_flags & IFF_PROMISC) {
423 sc->sc_cmdcfg |= PROMISC_EN;
424 } else {
425 sc->sc_cmdcfg &= ~PROMISC_EN;
426 }
427
428 const uint64_t macstnaddr =
429 bcmeth_macaddr_create(CLLADDR(ifp->if_sadl));
430
431 sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR;
432
433 /* 5. Load RCVADDR_LO with new pointer */
434 bcmeth_rxq_reset(sc, &sc->sc_rxq);
435
436 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
437 __SHIFTIN(BCMETH_RCVOFFSET, RCVCTL_RCVOFFSET)
438 | RCVCTL_PARITY_DIS
439 | RCVCTL_OFLOW_CONTINUE
440 | __SHIFTIN(4, RCVCTL_BURSTLEN));
441
442 /* 6. Load XMTADDR_LO with new pointer */
443 bcmeth_txq_reset(sc, &sc->sc_txq);
444
445 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX
446 | XMTCTL_PARITY_DIS
447 | __SHIFTIN(4, XMTCTL_BURSTLEN));
448
449 /* 7. Setup other UNIMAC registers */
450 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm);
451 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0));
452 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32));
453 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg);
454
455 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL);
456 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE;
457 devctl &= ~FLOW_CTRL_MODE;
458 devctl &= ~MIB_RD_RESET_EN;
459 devctl &= ~RXQ_OVERFLOW_CTRL_SEL;
460 devctl &= ~CPU_FLOW_CTRL_ON;
461 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl);
462
463 /* Setup lazy receive (at most 1ms). */
464 bcmeth_write_4(sc, GMAC_INTRCVLAZY, __SHIFTIN(10, INTRCVLAZY_FRAMECOUNT)
465 | __SHIFTIN(125000000 / 1000, INTRCVLAZY_TIMEOUT));
466
467 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
468 sc->sc_intmask |= XMTINT_0|XMTUF;
469 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl,
470 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE);
471
472
473 /* 12. Enable receive queues in RQUEUE, */
474 sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF;
475 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
476 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE);
477
478 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
479
480 #if 0
481 aprint_normal_dev(sc->sc_dev,
482 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n",
483 devctl, sc->sc_cmdcfg,
484 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl),
485 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl));
486 #endif
487
488 sc->sc_soft_flags = 0;
489
490 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
491
492 ifp->if_flags |= IFF_RUNNING;
493
494 return error;
495 }
496
497 static void
498 bcmeth_ifstop(struct ifnet *ifp, int disable)
499 {
500 struct bcmeth_softc * const sc = ifp->if_softc;
501 struct bcmeth_txqueue * const txq = &sc->sc_txq;
502 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
503
504 KASSERT(!cpu_intr_p());
505
506 sc->sc_soft_flags = 0;
507
508 /* Disable Rx processing */
509 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
510 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE);
511
512 /* Disable Tx processing */
513 bcmeth_write_4(sc, txq->txq_reg_xmtctl,
514 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE);
515
516 /* Disable all interrupts */
517 bcmeth_write_4(sc, GMAC_INTMASK, 0);
518
519 for (;;) {
520 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
521 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
522 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS
523 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS)
524 break;
525 delay(50);
526 }
527 /*
528 * Now reset the controller.
529 *
530 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register
531 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register
532 */
533 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET);
534 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0);
535 sc->sc_intmask = 0;
536 ifp->if_flags &= ~IFF_RUNNING;
537
538 /*
539 * Let's consume any remaining transmitted packets. And if we are
540 * disabling the interface, purge ourselves of any untransmitted
541 * packets. But don't consume any received packets, just drop them.
542 * If we aren't disabling the interface, save the mbufs in the
543 * receive queue for reuse.
544 */
545 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable);
546 bcmeth_txq_consume(sc, &sc->sc_txq);
547 if (disable) {
548 bcmeth_txq_purge(sc, &sc->sc_txq);
549 IF_PURGE(&ifp->if_snd);
550 }
551
552 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0);
553 }
554
555 static void
556 bcmeth_ifwatchdog(struct ifnet *ifp)
557 {
558 }
559
560 static int
561 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
562 {
563 struct bcmeth_softc *sc = ifp->if_softc;
564 struct ifreq * const ifr = data;
565 const int s = splnet();
566 int error;
567
568 switch (cmd) {
569 case SIOCSIFMEDIA:
570 case SIOCGIFMEDIA:
571 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
572 break;
573
574 default:
575 error = ether_ioctl(ifp, cmd, data);
576 if (error != ENETRESET)
577 break;
578
579 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
580 error = 0;
581 break;
582 }
583 error = bcmeth_ifinit(ifp);
584 break;
585 }
586
587 splx(s);
588 return error;
589 }
590
591 static void
592 bcmeth_rxq_desc_presync(
593 struct bcmeth_softc *sc,
594 struct bcmeth_rxqueue *rxq,
595 struct gmac_rxdb *rxdb,
596 size_t count)
597 {
598 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
599 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
600 BUS_DMASYNC_PREWRITE);
601 }
602
603 static void
604 bcmeth_rxq_desc_postsync(
605 struct bcmeth_softc *sc,
606 struct bcmeth_rxqueue *rxq,
607 struct gmac_rxdb *rxdb,
608 size_t count)
609 {
610 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
611 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
612 BUS_DMASYNC_POSTWRITE);
613 }
614
615 static void
616 bcmeth_txq_desc_presync(
617 struct bcmeth_softc *sc,
618 struct bcmeth_txqueue *txq,
619 struct gmac_txdb *txdb,
620 size_t count)
621 {
622 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
623 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
624 BUS_DMASYNC_PREWRITE);
625 }
626
627 static void
628 bcmeth_txq_desc_postsync(
629 struct bcmeth_softc *sc,
630 struct bcmeth_txqueue *txq,
631 struct gmac_txdb *txdb,
632 size_t count)
633 {
634 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
635 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
636 BUS_DMASYNC_POSTWRITE);
637 }
638
639 static bus_dmamap_t
640 bcmeth_mapcache_get(
641 struct bcmeth_softc *sc,
642 struct bcmeth_mapcache *dmc)
643 {
644 KASSERT(dmc->dmc_nmaps > 0);
645 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
646 return dmc->dmc_maps[--dmc->dmc_nmaps];
647 }
648
649 static void
650 bcmeth_mapcache_put(
651 struct bcmeth_softc *sc,
652 struct bcmeth_mapcache *dmc,
653 bus_dmamap_t map)
654 {
655 KASSERT(map != NULL);
656 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
657 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
658 }
659
660 static void
661 bcmeth_mapcache_destroy(
662 struct bcmeth_softc *sc,
663 struct bcmeth_mapcache *dmc)
664 {
665 const size_t dmc_size =
666 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]);
667
668 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
669 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
670 }
671 kmem_intr_free(dmc, dmc_size);
672 }
673
674 static int
675 bcmeth_mapcache_create(
676 struct bcmeth_softc *sc,
677 struct bcmeth_mapcache **dmc_p,
678 size_t maxmaps,
679 size_t maxmapsize,
680 size_t maxseg)
681 {
682 const size_t dmc_size =
683 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]);
684 struct bcmeth_mapcache * const dmc =
685 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
686
687 dmc->dmc_maxmaps = maxmaps;
688 dmc->dmc_nmaps = maxmaps;
689 dmc->dmc_maxmapsize = maxmapsize;
690 dmc->dmc_maxseg = maxseg;
691
692 for (u_int i = 0; i < maxmaps; i++) {
693 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
694 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
695 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
696 if (error) {
697 aprint_error_dev(sc->sc_dev,
698 "failed to creat dma map cache "
699 "entry %u of %zu: %d\n",
700 i, maxmaps, error);
701 while (i-- > 0) {
702 bus_dmamap_destroy(sc->sc_dmat,
703 dmc->dmc_maps[i]);
704 }
705 kmem_intr_free(dmc, dmc_size);
706 return error;
707 }
708 KASSERT(dmc->dmc_maps[i] != NULL);
709 }
710
711 *dmc_p = dmc;
712
713 return 0;
714 }
715
716 #if 0
717 static void
718 bcmeth_dmamem_free(
719 bus_dma_tag_t dmat,
720 size_t map_size,
721 bus_dma_segment_t *seg,
722 bus_dmamap_t map,
723 void *kvap)
724 {
725 bus_dmamap_destroy(dmat, map);
726 bus_dmamem_unmap(dmat, kvap, map_size);
727 bus_dmamem_free(dmat, seg, 1);
728 }
729 #endif
730
731 static int
732 bcmeth_dmamem_alloc(
733 bus_dma_tag_t dmat,
734 size_t map_size,
735 bus_dma_segment_t *seg,
736 bus_dmamap_t *map,
737 void **kvap)
738 {
739 int error;
740 int nseg;
741
742 *kvap = NULL;
743 *map = NULL;
744
745 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
746 seg, 1, &nseg, 0);
747 if (error)
748 return error;
749
750 KASSERT(nseg == 1);
751
752 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
753 BUS_DMA_COHERENT);
754 if (error == 0) {
755 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
756 map);
757 if (error == 0) {
758 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
759 NULL, 0);
760 if (error == 0)
761 return 0;
762 bus_dmamap_destroy(dmat, *map);
763 *map = NULL;
764 }
765 bus_dmamem_unmap(dmat, *kvap, map_size);
766 *kvap = NULL;
767 }
768 bus_dmamem_free(dmat, seg, nseg);
769 return 0;
770 }
771
772 static struct mbuf *
773 bcmeth_rx_buf_alloc(
774 struct bcmeth_softc *sc)
775 {
776 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
777 if (m == NULL) {
778 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
779 return NULL;
780 }
781 MCLGET(m, M_DONTWAIT);
782 if ((m->m_flags & M_EXT) == 0) {
783 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
784 m_freem(m);
785 return NULL;
786 }
787 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
788
789 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache);
790 if (map == NULL) {
791 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
792 m_freem(m);
793 return NULL;
794 }
795 M_SETCTX(m, map);
796 m->m_len = m->m_pkthdr.len = MCLBYTES;
797 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
798 BUS_DMA_READ|BUS_DMA_NOWAIT);
799 if (error) {
800 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
801 error);
802 M_SETCTX(m, NULL);
803 m_freem(m);
804 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
805 return NULL;
806 }
807 KASSERT(map->dm_mapsize == MCLBYTES);
808 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
809 BUS_DMASYNC_PREREAD);
810
811 return m;
812 }
813
814 static void
815 bcmeth_rx_map_unload(
816 struct bcmeth_softc *sc,
817 struct mbuf *m)
818 {
819 KASSERT(m);
820 for (; m != NULL; m = m->m_next) {
821 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
822 KASSERT(map);
823 KASSERT(map->dm_mapsize == MCLBYTES);
824 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
825 BUS_DMASYNC_POSTREAD);
826 bus_dmamap_unload(sc->sc_dmat, map);
827 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
828 M_SETCTX(m, NULL);
829 }
830 }
831
832 static bool
833 bcmeth_rxq_produce(
834 struct bcmeth_softc *sc,
835 struct bcmeth_rxqueue *rxq)
836 {
837 struct gmac_rxdb *producer = rxq->rxq_producer;
838 bool produced = false;
839
840 while (rxq->rxq_inuse < rxq->rxq_threshold) {
841 struct mbuf *m;
842 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
843 if (m == NULL) {
844 m = bcmeth_rx_buf_alloc(sc);
845 if (m == NULL) {
846 printf("%s: bcmeth_rx_buf_alloc failed\n", __func__);
847 break;
848 }
849 }
850 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
851 KASSERT(map);
852
853 producer->rxdb_buflen = MCLBYTES;
854 producer->rxdb_addrlo = map->dm_segs[0].ds_addr;
855 producer->rxdb_flags &= RXDB_FLAG_ET;
856 *rxq->rxq_mtail = m;
857 rxq->rxq_mtail = &m->m_next;
858 m->m_len = MCLBYTES;
859 m->m_next = NULL;
860 rxq->rxq_inuse++;
861 if (++producer == rxq->rxq_last) {
862 membar_producer();
863 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
864 rxq->rxq_last - rxq->rxq_producer);
865 producer = rxq->rxq_producer = rxq->rxq_first;
866 }
867 produced = true;
868 }
869 if (produced) {
870 membar_producer();
871 if (producer != rxq->rxq_producer) {
872 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
873 producer - rxq->rxq_producer);
874 rxq->rxq_producer = producer;
875 }
876 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr,
877 rxq->rxq_descmap->dm_segs[0].ds_addr
878 + ((uintptr_t)producer & RCVPTR));
879 }
880 return true;
881 }
882
883 static void
884 bcmeth_rx_input(
885 struct bcmeth_softc *sc,
886 struct mbuf *m,
887 uint32_t rxdb_flags)
888 {
889 struct ifnet * const ifp = &sc->sc_if;
890
891 bcmeth_rx_map_unload(sc, m);
892
893 m_adj(m, BCMETH_RCVOFFSET);
894
895 switch (__SHIFTOUT(rxdb_flags, RXSTS_PKTTYPE)) {
896 case RXSTS_PKTTYPE_UC:
897 break;
898 case RXSTS_PKTTYPE_MC:
899 m->m_flags |= M_MCAST;
900 break;
901 case RXSTS_PKTTYPE_BC:
902 m->m_flags |= M_BCAST|M_MCAST;
903 break;
904 default:
905 if (sc->sc_cmdcfg & PROMISC_EN)
906 m->m_flags |= M_PROMISC;
907 break;
908 }
909 m->m_pkthdr.rcvif = ifp;
910
911 ifp->if_ipackets++;
912 ifp->if_ibytes += m->m_pkthdr.len;
913
914 /*
915 * Let's give it to the network subsystm to deal with.
916 */
917 int s = splnet();
918 bpf_mtap(ifp, m);
919 (*ifp->if_input)(ifp, m);
920 splx(s);
921 }
922
923 static void
924 bcmeth_rxq_consume(
925 struct bcmeth_softc *sc,
926 struct bcmeth_rxqueue *rxq)
927 {
928 struct ifnet * const ifp = &sc->sc_if;
929 struct gmac_rxdb *consumer = rxq->rxq_consumer;
930 size_t rxconsumed = 0;
931
932 for (;;) {
933 if (consumer == rxq->rxq_producer) {
934 rxq->rxq_consumer = consumer;
935 rxq->rxq_inuse -= rxconsumed;
936 KASSERT(rxq->rxq_inuse == 0);
937 return;
938 }
939
940 uint32_t rcvsts0 = bcmeth_read_4(sc, GMAC_RCVSTATUS0);
941 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
942 if (consumer == rxq->rxq_first + currdscr) {
943 rxq->rxq_consumer = consumer;
944 rxq->rxq_inuse -= rxconsumed;
945 return;
946 }
947 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1);
948
949 /*
950 * We own this packet again. Copy the rxsts word from it.
951 */
952 rxconsumed++;
953 uint32_t rxsts;
954 KASSERT(rxq->rxq_mhead != NULL);
955 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t);
956 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align,
957 BUS_DMASYNC_POSTREAD);
958 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4);
959
960 /*
961 * Get the count of descriptors. Fetch the correct number
962 * of mbufs.
963 */
964 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
965 struct mbuf *m = rxq->rxq_mhead;
966 struct mbuf *m_last = m;
967 for (size_t i = 1; i < desc_count; i++) {
968 if (++consumer == rxq->rxq_last) {
969 consumer = rxq->rxq_first;
970 }
971 KASSERT(consumer != rxq->rxq_first + currdscr);
972 m_last = m_last->m_next;
973 }
974
975 /*
976 * Now remove it/them from the list of enqueued mbufs.
977 */
978 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
979 rxq->rxq_mtail = &rxq->rxq_mhead;
980 m_last->m_next = NULL;
981
982 if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
983 aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n",
984 consumer - rxq->rxq_first, desc_count, rxsts);
985 /*
986 * We encountered an error, take the mbufs and add them
987 * to the rx bufcache so we can quickly reuse them.
988 */
989 ifp->if_ierrors++;
990 do {
991 struct mbuf *m0 = m->m_next;
992 m->m_next = NULL;
993 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
994 m = m0;
995 } while (m);
996 } else {
997 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN);
998 framelen += BCMETH_RCVOFFSET;
999 m->m_pkthdr.len = framelen;
1000 if (desc_count == 1) {
1001 KASSERT(framelen <= MCLBYTES);
1002 m->m_len = framelen;
1003 } else {
1004 m_last->m_len = framelen & (MCLBYTES - 1);
1005 }
1006 bcmeth_rx_input(sc, m, rxsts);
1007 }
1008
1009 /*
1010 * Wrap at the last entry!
1011 */
1012 if (++consumer == rxq->rxq_last) {
1013 KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1014 consumer = rxq->rxq_first;
1015 }
1016 }
1017 }
1018
1019 static void
1020 bcmeth_rxq_purge(
1021 struct bcmeth_softc *sc,
1022 struct bcmeth_rxqueue *rxq,
1023 bool discard)
1024 {
1025 struct mbuf *m;
1026
1027 if ((m = rxq->rxq_mhead) != NULL) {
1028 if (discard) {
1029 bcmeth_rx_map_unload(sc, m);
1030 m_freem(m);
1031 } else {
1032 while (m != NULL) {
1033 struct mbuf *m0 = m->m_next;
1034 m->m_next = NULL;
1035 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1036 m = m0;
1037 }
1038 }
1039
1040 }
1041
1042 rxq->rxq_mhead = NULL;
1043 rxq->rxq_mtail = &rxq->rxq_mhead;
1044 rxq->rxq_inuse = 0;
1045 }
1046
1047 static void
1048 bcmeth_rxq_reset(
1049 struct bcmeth_softc *sc,
1050 struct bcmeth_rxqueue *rxq)
1051 {
1052 /*
1053 * sync all the descriptors
1054 */
1055 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1056 rxq->rxq_last - rxq->rxq_first);
1057
1058 /*
1059 * Make sure we own all descriptors in the ring.
1060 */
1061 struct gmac_rxdb *rxdb;
1062 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) {
1063 rxdb->rxdb_flags = RXDB_FLAG_IC;
1064 }
1065
1066 /*
1067 * Last descriptor has the wrap flag.
1068 */
1069 rxdb->rxdb_flags = RXDB_FLAG_ET|RXDB_FLAG_IC;
1070
1071 /*
1072 * Reset the producer consumer indexes.
1073 */
1074 rxq->rxq_consumer = rxq->rxq_first;
1075 rxq->rxq_producer = rxq->rxq_first;
1076 rxq->rxq_inuse = 0;
1077 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS)
1078 rxq->rxq_threshold = BCMETH_MINRXMBUFS;
1079
1080 sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF;
1081
1082 /*
1083 * Restart the receiver at the first descriptor
1084 */
1085 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo,
1086 rxq->rxq_descmap->dm_segs[0].ds_addr);
1087 }
1088
1089 static int
1090 bcmeth_rxq_attach(
1091 struct bcmeth_softc *sc,
1092 struct bcmeth_rxqueue *rxq,
1093 u_int qno)
1094 {
1095 size_t map_size = PAGE_SIZE;
1096 size_t desc_count = map_size / sizeof(rxq->rxq_first[0]);
1097 int error;
1098 void *descs;
1099
1100 KASSERT(desc_count == 256 || desc_count == 512);
1101
1102 error = bcmeth_dmamem_alloc(sc->sc_dmat, map_size,
1103 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1104 if (error)
1105 return error;
1106
1107 memset(descs, 0, map_size);
1108 rxq->rxq_first = descs;
1109 rxq->rxq_last = rxq->rxq_first + desc_count;
1110 rxq->rxq_consumer = descs;
1111 rxq->rxq_producer = descs;
1112
1113 bcmeth_rxq_purge(sc, rxq, true);
1114 bcmeth_rxq_reset(sc, rxq);
1115
1116 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW;
1117 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL;
1118 rxq->rxq_reg_rcvptr = GMAC_RCVPTR;
1119 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0;
1120
1121 return 0;
1122 }
1123
1124 static bool
1125 bcmeth_txq_active_p(
1126 struct bcmeth_softc * const sc,
1127 struct bcmeth_txqueue *txq)
1128 {
1129 return !IF_IS_EMPTY(&txq->txq_mbufs);
1130 }
1131
1132 static bool
1133 bcmeth_txq_fillable_p(
1134 struct bcmeth_softc * const sc,
1135 struct bcmeth_txqueue *txq)
1136 {
1137 return txq->txq_free >= txq->txq_threshold;
1138 }
1139
1140 static int
1141 bcmeth_txq_attach(
1142 struct bcmeth_softc *sc,
1143 struct bcmeth_txqueue *txq,
1144 u_int qno)
1145 {
1146 size_t map_size = PAGE_SIZE;
1147 size_t desc_count = map_size / sizeof(txq->txq_first[0]);
1148 int error;
1149 void *descs;
1150
1151 KASSERT(desc_count == 256 || desc_count == 512);
1152
1153 error = bcmeth_dmamem_alloc(sc->sc_dmat, map_size,
1154 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1155 if (error)
1156 return error;
1157
1158 memset(descs, 0, map_size);
1159 txq->txq_first = descs;
1160 txq->txq_last = txq->txq_first + desc_count;
1161 txq->txq_consumer = descs;
1162 txq->txq_producer = descs;
1163
1164 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS);
1165
1166 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW;
1167 txq->txq_reg_xmtctl = GMAC_XMTCONTROL;
1168 txq->txq_reg_xmtptr = GMAC_XMTPTR;
1169 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0;
1170
1171 bcmeth_txq_reset(sc, txq);
1172
1173 return 0;
1174 }
1175
1176 static int
1177 bcmeth_txq_map_load(
1178 struct bcmeth_softc *sc,
1179 struct bcmeth_txqueue *txq,
1180 struct mbuf *m)
1181 {
1182 bus_dmamap_t map;
1183 int error;
1184
1185 map = M_GETCTX(m, bus_dmamap_t);
1186 if (map != NULL)
1187 return 0;
1188
1189 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache);
1190 if (map == NULL)
1191 return ENOMEM;
1192
1193 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1194 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1195 if (error)
1196 return error;
1197
1198 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1199 BUS_DMASYNC_PREWRITE);
1200 M_SETCTX(m, map);
1201 return 0;
1202 }
1203
1204 static void
1205 bcmeth_txq_map_unload(
1206 struct bcmeth_softc *sc,
1207 struct bcmeth_txqueue *txq,
1208 struct mbuf *m)
1209 {
1210 KASSERT(m);
1211 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1212 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1213 BUS_DMASYNC_POSTWRITE);
1214 bus_dmamap_unload(sc->sc_dmat, map);
1215 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map);
1216 }
1217
1218 static bool
1219 bcmeth_txq_produce(
1220 struct bcmeth_softc *sc,
1221 struct bcmeth_txqueue *txq,
1222 struct mbuf *m)
1223 {
1224 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1225
1226 if (map->dm_nsegs > txq->txq_free)
1227 return false;
1228
1229 /*
1230 * TCP Offload flag must be set in the first descriptor.
1231 */
1232 struct gmac_txdb *producer = txq->txq_producer;
1233 uint32_t first_flags = TXDB_FLAG_SF;
1234 uint32_t last_flags = TXDB_FLAG_EF;
1235
1236 /*
1237 * If we've produced enough descriptors without consuming any
1238 * we need to ask for an interrupt to reclaim some.
1239 */
1240 txq->txq_lastintr += map->dm_nsegs;
1241 if (txq->txq_lastintr >= txq->txq_threshold
1242 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1243 txq->txq_lastintr = 0;
1244 last_flags |= TXDB_FLAG_IC;
1245 }
1246
1247 KASSERT(producer != txq->txq_last);
1248
1249 struct gmac_txdb *start = producer;
1250 size_t count = map->dm_nsegs;
1251 producer->txdb_flags |= first_flags;
1252 producer->txdb_addrlo = map->dm_segs[0].ds_addr;
1253 producer->txdb_buflen = map->dm_segs[0].ds_len;
1254 for (u_int i = 1; i < map->dm_nsegs; i++) {
1255 #if 0
1256 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1257 producer->txdb_flags, producer->txdb_buflen,
1258 producer->txdb_addrlo, producer->txdb_addrhi);
1259 #endif
1260 if (__predict_false(++producer == txq->txq_last)) {
1261 bcmeth_txq_desc_presync(sc, txq, start,
1262 txq->txq_last - start);
1263 count -= txq->txq_last - start;
1264 producer = txq->txq_first;
1265 start = txq->txq_first;
1266 }
1267 producer->txdb_addrlo = map->dm_segs[i].ds_addr;
1268 producer->txdb_buflen = map->dm_segs[i].ds_len;
1269 }
1270 producer->txdb_flags |= last_flags;
1271 #if 0
1272 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1273 producer->txdb_flags, producer->txdb_buflen,
1274 producer->txdb_addrlo, producer->txdb_addrhi);
1275 #endif
1276 bcmeth_txq_desc_presync(sc, txq, start, count);
1277
1278 /*
1279 * Reduce free count by the number of segments we consumed.
1280 */
1281 txq->txq_free -= map->dm_nsegs;
1282 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1283 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & TXDB_FLAG_EF) == 0);
1284 KASSERT(producer->txdb_flags & TXDB_FLAG_EF);
1285
1286 #if 0
1287 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n",
1288 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1289 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1290 #endif
1291
1292 if (++producer == txq->txq_last)
1293 txq->txq_producer = txq->txq_first;
1294 else
1295 txq->txq_producer = producer;
1296 IF_ENQUEUE(&txq->txq_mbufs, m);
1297 bpf_mtap(&sc->sc_if, m);
1298
1299 /*
1300 * Let the transmitter know there's more to do
1301 */
1302 bcmeth_write_4(sc, txq->txq_reg_xmtptr,
1303 txq->txq_descmap->dm_segs[0].ds_addr
1304 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR));
1305
1306 return true;
1307 }
1308
1309 static bool
1310 bcmeth_txq_enqueue(
1311 struct bcmeth_softc *sc,
1312 struct bcmeth_txqueue *txq)
1313 {
1314 for (;;) {
1315 if (IF_QFULL(&txq->txq_mbufs))
1316 return false;
1317 struct mbuf *m = txq->txq_next;
1318 if (m == NULL) {
1319 int s = splnet();
1320 IF_DEQUEUE(&sc->sc_if.if_snd, m);
1321 splx(s);
1322 if (m == NULL)
1323 return true;
1324 M_SETCTX(m, NULL);
1325 } else {
1326 txq->txq_next = NULL;
1327 }
1328 int error = bcmeth_txq_map_load(sc, txq, m);
1329 if (error) {
1330 aprint_error_dev(sc->sc_dev,
1331 "discarded packet due to "
1332 "dmamap load failure: %d\n", error);
1333 m_freem(m);
1334 continue;
1335 }
1336 KASSERT(txq->txq_next == NULL);
1337 if (!bcmeth_txq_produce(sc, txq, m)) {
1338 txq->txq_next = m;
1339 return false;
1340 }
1341 KASSERT(txq->txq_next == NULL);
1342 }
1343 }
1344
1345 static bool
1346 bcmeth_txq_consume(
1347 struct bcmeth_softc *sc,
1348 struct bcmeth_txqueue *txq)
1349 {
1350 struct ifnet * const ifp = &sc->sc_if;
1351 struct gmac_txdb *consumer = txq->txq_consumer;
1352 size_t txfree = 0;
1353
1354 #if 0
1355 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1356 #endif
1357
1358 for (;;) {
1359 if (consumer == txq->txq_producer) {
1360 txq->txq_consumer = consumer;
1361 txq->txq_free += txfree;
1362 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1363 #if 0
1364 printf("%s: empty: freed %zu descriptors going from %zu to %zu\n",
1365 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
1366 #endif
1367 KASSERT(txq->txq_lastintr == 0);
1368 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
1369 return true;
1370 }
1371 bcmeth_txq_desc_postsync(sc, txq, consumer, 1);
1372 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
1373 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) {
1374 txq->txq_consumer = consumer;
1375 txq->txq_free += txfree;
1376 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1377 #if 0
1378 printf("%s: freed %zu descriptors\n",
1379 __func__, txfree);
1380 #endif
1381 return bcmeth_txq_fillable_p(sc, txq);
1382 }
1383
1384 /*
1385 * If this is the last descriptor in the chain, get the
1386 * mbuf, free its dmamap, and free the mbuf chain itself.
1387 */
1388 const uint32_t txdb_flags = consumer->txdb_flags;
1389 if (txdb_flags & TXDB_FLAG_EF) {
1390 struct mbuf *m;
1391
1392 IF_DEQUEUE(&txq->txq_mbufs, m);
1393 KASSERT(m);
1394 bcmeth_txq_map_unload(sc, txq, m);
1395 #if 0
1396 printf("%s: mbuf %p: consumed a %u byte packet\n",
1397 __func__, m, m->m_pkthdr.len);
1398 #endif
1399 ifp->if_opackets++;
1400 ifp->if_obytes += m->m_pkthdr.len;
1401 if (m->m_flags & M_MCAST)
1402 ifp->if_omcasts++;
1403 m_freem(m);
1404 }
1405
1406 /*
1407 * We own this packet again. Clear all flags except wrap.
1408 */
1409 txfree++;
1410
1411 /*
1412 * Wrap at the last entry!
1413 */
1414 if (txdb_flags & TXDB_FLAG_ET) {
1415 consumer->txdb_flags = TXDB_FLAG_ET;
1416 KASSERT(consumer + 1 == txq->txq_last);
1417 consumer = txq->txq_first;
1418 } else {
1419 consumer->txdb_flags = 0;
1420 consumer++;
1421 KASSERT(consumer < txq->txq_last);
1422 }
1423 }
1424 }
1425
1426 static void
1427 bcmeth_txq_purge(
1428 struct bcmeth_softc *sc,
1429 struct bcmeth_txqueue *txq)
1430 {
1431 struct mbuf *m;
1432 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0);
1433
1434 for (;;) {
1435 IF_DEQUEUE(&txq->txq_mbufs, m);
1436 if (m == NULL)
1437 break;
1438 bcmeth_txq_map_unload(sc, txq, m);
1439 m_freem(m);
1440 }
1441 if ((m = txq->txq_next) != NULL) {
1442 txq->txq_next = NULL;
1443 bcmeth_txq_map_unload(sc, txq, m);
1444 m_freem(m);
1445 }
1446 }
1447
1448 static void
1449 bcmeth_txq_reset(
1450 struct bcmeth_softc *sc,
1451 struct bcmeth_txqueue *txq)
1452 {
1453 /*
1454 * sync all the descriptors
1455 */
1456 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first,
1457 txq->txq_last - txq->txq_first);
1458
1459 /*
1460 * Make sure we own all descriptors in the ring.
1461 */
1462 struct gmac_txdb *txdb;
1463 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) {
1464 txdb->txdb_flags = 0;
1465 }
1466
1467 /*
1468 * Last descriptor has the wrap flag.
1469 */
1470 txdb->txdb_flags = TXDB_FLAG_ET;
1471
1472 /*
1473 * Reset the producer consumer indexes.
1474 */
1475 txq->txq_consumer = txq->txq_first;
1476 txq->txq_producer = txq->txq_first;
1477 txq->txq_free = txq->txq_last - txq->txq_first - 1;
1478 txq->txq_threshold = txq->txq_free / 2;
1479 txq->txq_lastintr = 0;
1480
1481 /*
1482 * What do we want to get interrupted on?
1483 */
1484 sc->sc_intmask |= XMTINT_0 | XMTUF;
1485
1486 /*
1487 * Restart the transmiter at the first descriptor
1488 */
1489 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo,
1490 txq->txq_descmap->dm_segs->ds_addr);
1491 }
1492
1493 static void
1494 bcmeth_ifstart(struct ifnet *ifp)
1495 {
1496 struct bcmeth_softc * const sc = ifp->if_softc;
1497
1498 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
1499 softint_schedule(sc->sc_soft_ih);
1500 }
1501
1502 int
1503 bcmeth_intr(void *arg)
1504 {
1505 struct bcmeth_softc * const sc = arg;
1506 uint32_t soft_flags = 0;
1507 int rv = 0;
1508
1509 mutex_enter(sc->sc_hwlock);
1510
1511 sc->sc_ev_intr.ev_count++;
1512
1513 for (;;) {
1514 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1515 intstatus &= sc->sc_intmask;
1516 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */
1517 if (intstatus == 0) {
1518 break;
1519 }
1520 #if 0
1521 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x intmask=%#x\n",
1522 __func__, ievent, bcmeth_read_4(sc, GMAC_INTMASK));
1523 #endif
1524 if (intstatus & RCVINT) {
1525 intstatus &= ~RCVINT;
1526 sc->sc_intmask &= ~RCVINT;
1527 soft_flags |= SOFT_RXINTR;
1528 }
1529
1530 if (intstatus & XMTINT_0) {
1531 intstatus &= ~XMTINT_0;
1532 sc->sc_intmask &= ~XMTINT_0;
1533 soft_flags |= SOFT_TXINTR;
1534 }
1535
1536 if (intstatus & RCVDESCUF) {
1537 intstatus &= ~RCVDESCUF;
1538 sc->sc_intmask &= ~RCVDESCUF;
1539 soft_flags |= SOFT_RXUNDERFLOW;
1540 }
1541
1542 if (intstatus) {
1543 aprint_error_dev(sc->sc_dev, "intr: intstatus=%#x\n",
1544 intstatus);
1545 Debugger();
1546 sc->sc_intmask &= ~intstatus;
1547 soft_flags |= SOFT_REINIT;
1548 break;
1549 }
1550 }
1551
1552 if (soft_flags) {
1553 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1554 atomic_or_uint(&sc->sc_soft_flags, soft_flags);
1555 softint_schedule(sc->sc_soft_ih);
1556 rv = 1;
1557 }
1558
1559 mutex_exit(sc->sc_hwlock);
1560
1561 return rv;
1562 }
1563
1564 void
1565 bcmeth_soft_intr(void *arg)
1566 {
1567 struct bcmeth_softc * const sc = arg;
1568 struct ifnet * const ifp = &sc->sc_if;
1569
1570 mutex_enter(sc->sc_lock);
1571
1572 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
1573
1574 sc->sc_ev_soft_intr.ev_count++;
1575
1576 if (soft_flags & SOFT_REINIT) {
1577 int s = splnet();
1578 bcmeth_ifinit(ifp);
1579 splx(s);
1580 soft_flags = 0;
1581 }
1582
1583 if (soft_flags & SOFT_RXUNDERFLOW) {
1584 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1585 size_t threshold = 5 * rxq->rxq_threshold / 4;
1586 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
1587 threshold = rxq->rxq_last - rxq->rxq_first - 1;
1588 } else {
1589 sc->sc_intmask |= RCVDESCUF;
1590 }
1591 aprint_normal_dev(sc->sc_dev,
1592 "increasing receive buffers from %zu to %zu\n",
1593 rxq->rxq_threshold, threshold);
1594 rxq->rxq_threshold = threshold;
1595 }
1596
1597 if ((soft_flags & SOFT_TXINTR)
1598 || bcmeth_txq_active_p(sc, &sc->sc_txq)) {
1599 /*
1600 * Let's do what we came here for. Consume transmitted
1601 * packets off the the transmit ring.
1602 */
1603 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1604 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1605 sc->sc_ev_tx_stall.ev_count++;
1606 ifp->if_flags |= IFF_OACTIVE;
1607 } else {
1608 ifp->if_flags &= ~IFF_OACTIVE;
1609 }
1610 sc->sc_intmask |= XMTINT_0;
1611 }
1612
1613 if (soft_flags & (SOFT_RXINTR|SOFT_RXUNDERFLOW)) {
1614 /*
1615 * Let's consume
1616 */
1617 bcmeth_rxq_consume(sc, &sc->sc_rxq);
1618 sc->sc_intmask |= RCVINT;
1619 }
1620
1621 if (ifp->if_flags & IFF_RUNNING) {
1622 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1623 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1624 } else {
1625 KASSERT((soft_flags & SOFT_RXUNDERFLOW) == 0);
1626 }
1627
1628 mutex_exit(sc->sc_lock);
1629 }
1630