bcm53xx_eth.c revision 1.18 1 /*-
2 * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #define _ARM32_BUS_DMA_PRIVATE
31 #define GMAC_PRIVATE
32
33 #include "locators.h"
34 #include "opt_broadcom.h"
35
36 #include <sys/cdefs.h>
37
38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.18 2012/12/07 22:21:03 matt Exp $");
39
40 #include <sys/param.h>
41 #include <sys/atomic.h>
42 #include <sys/bus.h>
43 #include <sys/device.h>
44 #include <sys/ioctl.h>
45 #include <sys/intr.h>
46 #include <sys/kmem.h>
47 #include <sys/mutex.h>
48 #include <sys/socket.h>
49 #include <sys/systm.h>
50 #include <sys/workqueue.h>
51
52 #include <net/if.h>
53 #include <net/if_ether.h>
54 #include <net/if_media.h>
55
56 #include <net/if_dl.h>
57
58 #include <net/bpf.h>
59
60 #include <dev/mii/miivar.h>
61
62 #include <arm/broadcom/bcm53xx_reg.h>
63 #include <arm/broadcom/bcm53xx_var.h>
64
65 //#define BCMETH_MPSAFE
66
67 #ifdef BCMETH_COUNTERS
68 #define BCMETH_EVCNT_ADD(a,b) ((void)((a).ev_count += (b)))
69 #else
70 #define BCMETH_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0)
71 #endif
72 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1)
73
74 #define BCMETH_RCVOFFSET 10
75 #define BCMETH_MAXTXMBUFS 128
76 #define BCMETH_NTXSEGS 30
77 #define BCMETH_MAXRXMBUFS 255
78 #define BCMETH_MINRXMBUFS 64
79 #define BCMETH_NRXSEGS 1
80 #define BCMETH_RINGSIZE PAGE_SIZE
81
82 #if 0
83 #define BCMETH_RCVMAGIC 0xfeedface
84 #endif
85
86 static int bcmeth_ccb_match(device_t, cfdata_t, void *);
87 static void bcmeth_ccb_attach(device_t, device_t, void *);
88
89 struct bcmeth_txqueue {
90 bus_dmamap_t txq_descmap;
91 struct gmac_txdb *txq_consumer;
92 struct gmac_txdb *txq_producer;
93 struct gmac_txdb *txq_first;
94 struct gmac_txdb *txq_last;
95 struct ifqueue txq_mbufs;
96 struct mbuf *txq_next;
97 size_t txq_free;
98 size_t txq_threshold;
99 size_t txq_lastintr;
100 bus_size_t txq_reg_xmtaddrlo;
101 bus_size_t txq_reg_xmtptr;
102 bus_size_t txq_reg_xmtctl;
103 bus_size_t txq_reg_xmtsts0;
104 bus_size_t txq_reg_xmtsts1;
105 bus_dma_segment_t txq_descmap_seg;
106 };
107
108 struct bcmeth_rxqueue {
109 bus_dmamap_t rxq_descmap;
110 struct gmac_rxdb *rxq_consumer;
111 struct gmac_rxdb *rxq_producer;
112 struct gmac_rxdb *rxq_first;
113 struct gmac_rxdb *rxq_last;
114 struct mbuf *rxq_mhead;
115 struct mbuf **rxq_mtail;
116 struct mbuf *rxq_mconsumer;
117 size_t rxq_inuse;
118 size_t rxq_threshold;
119 bus_size_t rxq_reg_rcvaddrlo;
120 bus_size_t rxq_reg_rcvptr;
121 bus_size_t rxq_reg_rcvctl;
122 bus_size_t rxq_reg_rcvsts0;
123 bus_size_t rxq_reg_rcvsts1;
124 bus_dma_segment_t rxq_descmap_seg;
125 };
126
127 struct bcmeth_mapcache {
128 u_int dmc_nmaps;
129 u_int dmc_maxseg;
130 u_int dmc_maxmaps;
131 u_int dmc_maxmapsize;
132 bus_dmamap_t dmc_maps[0];
133 };
134
135 struct bcmeth_softc {
136 device_t sc_dev;
137 bus_space_tag_t sc_bst;
138 bus_space_handle_t sc_bsh;
139 bus_dma_tag_t sc_dmat;
140 kmutex_t *sc_lock;
141 kmutex_t *sc_hwlock;
142 struct ethercom sc_ec;
143 #define sc_if sc_ec.ec_if
144 struct ifmedia sc_media;
145 void *sc_soft_ih;
146 void *sc_ih;
147
148 struct bcmeth_rxqueue sc_rxq;
149 struct bcmeth_txqueue sc_txq;
150
151 uint32_t sc_maxfrm;
152 uint32_t sc_cmdcfg;
153 uint32_t sc_intmask;
154 uint32_t sc_rcvlazy;
155 volatile uint32_t sc_soft_flags;
156 #define SOFT_RXINTR 0x01
157 #define SOFT_TXINTR 0x02
158
159 #ifdef BCMETH_COUNTERS
160 struct evcnt sc_ev_intr;
161 struct evcnt sc_ev_soft_intr;
162 struct evcnt sc_ev_work;
163 struct evcnt sc_ev_tx_stall;
164 struct evcnt sc_ev_rx_badmagic_lo;
165 struct evcnt sc_ev_rx_badmagic_hi;
166 #endif
167
168 struct ifqueue sc_rx_bufcache;
169 struct bcmeth_mapcache *sc_rx_mapcache;
170 struct bcmeth_mapcache *sc_tx_mapcache;
171
172 struct workqueue *sc_workq;
173 struct work sc_work;
174
175 volatile uint32_t sc_work_flags;
176 #define WORK_RXINTR 0x01
177 #define WORK_RXUNDERFLOW 0x02
178 #define WORK_REINIT 0x04
179
180 uint8_t sc_enaddr[ETHER_ADDR_LEN];
181 };
182
183 static void bcmeth_ifstart(struct ifnet *);
184 static void bcmeth_ifwatchdog(struct ifnet *);
185 static int bcmeth_ifinit(struct ifnet *);
186 static void bcmeth_ifstop(struct ifnet *, int);
187 static int bcmeth_ifioctl(struct ifnet *, u_long, void *);
188
189 static int bcmeth_mapcache_create(struct bcmeth_softc *,
190 struct bcmeth_mapcache **, size_t, size_t, size_t);
191 static void bcmeth_mapcache_destroy(struct bcmeth_softc *,
192 struct bcmeth_mapcache *);
193 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *,
194 struct bcmeth_mapcache *);
195 static void bcmeth_mapcache_put(struct bcmeth_softc *,
196 struct bcmeth_mapcache *, bus_dmamap_t);
197
198 static int bcmeth_txq_attach(struct bcmeth_softc *,
199 struct bcmeth_txqueue *, u_int);
200 static void bcmeth_txq_purge(struct bcmeth_softc *,
201 struct bcmeth_txqueue *);
202 static void bcmeth_txq_reset(struct bcmeth_softc *,
203 struct bcmeth_txqueue *);
204 static bool bcmeth_txq_consume(struct bcmeth_softc *,
205 struct bcmeth_txqueue *);
206 static bool bcmeth_txq_produce(struct bcmeth_softc *,
207 struct bcmeth_txqueue *, struct mbuf *m);
208 static bool bcmeth_txq_active_p(struct bcmeth_softc *,
209 struct bcmeth_txqueue *);
210
211 static int bcmeth_rxq_attach(struct bcmeth_softc *,
212 struct bcmeth_rxqueue *, u_int);
213 static bool bcmeth_rxq_produce(struct bcmeth_softc *,
214 struct bcmeth_rxqueue *);
215 static void bcmeth_rxq_purge(struct bcmeth_softc *,
216 struct bcmeth_rxqueue *, bool);
217 static void bcmeth_rxq_reset(struct bcmeth_softc *,
218 struct bcmeth_rxqueue *);
219
220 static int bcmeth_intr(void *);
221 #ifdef BCMETH_MPSAFETX
222 static void bcmeth_soft_txintr(struct bcmeth_softc *);
223 #endif
224 static void bcmeth_soft_intr(void *);
225 static void bcmeth_worker(struct work *, void *);
226
227 static int bcmeth_mediachange(struct ifnet *);
228 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *);
229
230 static inline uint32_t
231 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o)
232 {
233 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o);
234 }
235
236 static inline void
237 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v)
238 {
239 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v);
240 }
241
242 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc),
243 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL);
244
245 static int
246 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux)
247 {
248 struct bcmccb_attach_args * const ccbaa = aux;
249 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
250
251 if (strcmp(cf->cf_name, loc->loc_name))
252 return 0;
253
254 #ifdef DIAGNOSTIC
255 const int port = cf->cf_loc[BCMCCBCF_PORT];
256 #endif
257 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port);
258
259 return 1;
260 }
261
262 static void
263 bcmeth_ccb_attach(device_t parent, device_t self, void *aux)
264 {
265 struct bcmeth_softc * const sc = device_private(self);
266 struct ethercom * const ec = &sc->sc_ec;
267 struct ifnet * const ifp = &ec->ec_if;
268 struct bcmccb_attach_args * const ccbaa = aux;
269 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
270 const char * const xname = device_xname(self);
271 prop_dictionary_t dict = device_properties(self);
272 int error;
273
274 sc->sc_bst = ccbaa->ccbaa_ccb_bst;
275 sc->sc_dmat = ccbaa->ccbaa_dmat;
276 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh,
277 loc->loc_offset, loc->loc_size, &sc->sc_bsh);
278
279 /*
280 * We need to use the coherent dma tag for the GMAC.
281 */
282 sc->sc_dmat = &bcm53xx_coherent_dma_tag;
283
284 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
285 if (eaprop == NULL) {
286 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0);
287 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1);
288 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) {
289 aprint_error(": mac-address property is missing\n");
290 return;
291 }
292 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff;
293 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff;
294 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff;
295 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff;
296 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff;
297 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff;
298 } else {
299 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
300 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
301 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
302 ETHER_ADDR_LEN);
303 }
304 sc->sc_dev = self;
305 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
306 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
307
308 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts
309
310 aprint_naive("\n");
311 aprint_normal(": Gigabit Ethernet Controller\n");
312
313 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0);
314 if (error) {
315 aprint_error(": failed to init rxq: %d\n", error);
316 return;
317 }
318
319 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0);
320 if (error) {
321 aprint_error(": failed to init txq: %d\n", error);
322 return;
323 }
324
325 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache,
326 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS);
327 if (error) {
328 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
329 return;
330 }
331
332 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
333 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS);
334 if (error) {
335 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
336 return;
337 }
338
339 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc,
340 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU);
341 if (error) {
342 aprint_error(": failed to create workqueue: %d\n", error);
343 return;
344 }
345
346 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET,
347 bcmeth_soft_intr, sc);
348
349 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL,
350 bcmeth_intr, sc);
351
352 if (sc->sc_ih == NULL) {
353 aprint_error_dev(self, "failed to establish interrupt %d\n",
354 loc->loc_intrs[0]);
355 } else {
356 aprint_normal_dev(self, "interrupting on irq %d\n",
357 loc->loc_intrs[0]);
358 }
359
360 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
361 ether_sprintf(sc->sc_enaddr));
362
363 /*
364 * Since each port in plugged into the switch/flow-accelerator,
365 * we hard code at Gige Full-Duplex with Flow Control enabled.
366 */
367 int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX;
368 //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE;
369 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange,
370 bcmeth_mediastatus);
371 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL);
372 ifmedia_set(&sc->sc_media, ifmedia);
373
374 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
375
376 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
377 ifp->if_softc = sc;
378 ifp->if_baudrate = IF_Mbps(1000);
379 ifp->if_capabilities = 0;
380 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
381 #ifdef BCMETH_MPSAFE
382 ifp->if_flags2 = IFF2_MPSAFE;
383 #endif
384 ifp->if_ioctl = bcmeth_ifioctl;
385 ifp->if_start = bcmeth_ifstart;
386 ifp->if_watchdog = bcmeth_ifwatchdog;
387 ifp->if_init = bcmeth_ifinit;
388 ifp->if_stop = bcmeth_ifstop;
389 IFQ_SET_READY(&ifp->if_snd);
390
391 bcmeth_ifstop(ifp, true);
392
393 /*
394 * Attach the interface.
395 */
396 if_attach(ifp);
397 ether_ifattach(ifp, sc->sc_enaddr);
398
399 #ifdef BCMETH_COUNTERS
400 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
401 NULL, xname, "intr");
402 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
403 NULL, xname, "soft intr");
404 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC,
405 NULL, xname, "work items");
406 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
407 NULL, xname, "tx stalls");
408 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC,
409 NULL, xname, "rx badmagic lo");
410 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC,
411 NULL, xname, "rx badmagic hi");
412 #endif
413 }
414
415 static int
416 bcmeth_mediachange(struct ifnet *ifp)
417 {
418 //struct bcmeth_softc * const sc = ifp->if_softc;
419 return 0;
420 }
421
422 static void
423 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm)
424 {
425 //struct bcmeth_softc * const sc = ifp->if_softc;
426
427 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE;
428 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T;
429 }
430
431 static uint64_t
432 bcmeth_macaddr_create(const uint8_t *enaddr)
433 {
434 return (enaddr[3] << 0) // UNIMAC_MAC_0
435 | (enaddr[2] << 8) // UNIMAC_MAC_0
436 | (enaddr[1] << 16) // UNIMAC_MAC_0
437 | (enaddr[0] << 24) // UNIMAC_MAC_0
438 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1
439 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1
440 }
441
442 static int
443 bcmeth_ifinit(struct ifnet *ifp)
444 {
445 struct bcmeth_softc * const sc = ifp->if_softc;
446 int error = 0;
447
448 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
449 if (ifp->if_mtu > ETHERMTU_JUMBO)
450 return error;
451
452 KASSERT(ifp->if_flags & IFF_UP);
453
454 /*
455 * Stop the interface
456 */
457 bcmeth_ifstop(ifp, 0);
458
459 /*
460 * If our frame size has changed (or it's our first time through)
461 * destroy the existing transmit mapcache.
462 */
463 if (sc->sc_tx_mapcache != NULL
464 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
465 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache);
466 sc->sc_tx_mapcache = NULL;
467 }
468
469 if (sc->sc_tx_mapcache == NULL) {
470 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
471 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS);
472 if (error)
473 return error;
474 }
475
476 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE
477 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED)
478 | RX_ENA | TX_ENA;
479
480 if (ifp->if_flags & IFF_PROMISC) {
481 sc->sc_cmdcfg |= PROMISC_EN;
482 } else {
483 sc->sc_cmdcfg &= ~PROMISC_EN;
484 }
485
486 const uint64_t macstnaddr =
487 bcmeth_macaddr_create(CLLADDR(ifp->if_sadl));
488
489 sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR;
490
491 /* 5. Load RCVADDR_LO with new pointer */
492 bcmeth_rxq_reset(sc, &sc->sc_rxq);
493
494 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
495 __SHIFTIN(BCMETH_RCVOFFSET, RCVCTL_RCVOFFSET)
496 | RCVCTL_PARITY_DIS
497 | RCVCTL_OFLOW_CONTINUE
498 | __SHIFTIN(3, RCVCTL_BURSTLEN));
499
500 /* 6. Load XMTADDR_LO with new pointer */
501 bcmeth_txq_reset(sc, &sc->sc_txq);
502
503 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX
504 | XMTCTL_PARITY_DIS
505 | __SHIFTIN(3, XMTCTL_BURSTLEN));
506
507 /* 7. Setup other UNIMAC registers */
508 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm);
509 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0));
510 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32));
511 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg);
512
513 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL);
514 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE;
515 devctl &= ~FLOW_CTRL_MODE;
516 devctl &= ~MIB_RD_RESET_EN;
517 devctl &= ~RXQ_OVERFLOW_CTRL_SEL;
518 devctl &= ~CPU_FLOW_CTRL_ON;
519 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl);
520
521 /* Setup lazy receive (at most 1ms). */
522 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT)
523 | __SHIFTIN(125000000 / 1000, INTRCVLAZY_TIMEOUT);
524 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy);
525
526 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
527 sc->sc_intmask |= XMTINT_0|XMTUF;
528 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl,
529 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE);
530
531
532 /* 12. Enable receive queues in RQUEUE, */
533 sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF;
534 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
535 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE);
536
537 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
538
539 #if 0
540 aprint_normal_dev(sc->sc_dev,
541 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n",
542 devctl, sc->sc_cmdcfg,
543 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl),
544 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl));
545 #endif
546
547 sc->sc_soft_flags = 0;
548
549 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
550
551 ifp->if_flags |= IFF_RUNNING;
552
553 return error;
554 }
555
556 static void
557 bcmeth_ifstop(struct ifnet *ifp, int disable)
558 {
559 struct bcmeth_softc * const sc = ifp->if_softc;
560 struct bcmeth_txqueue * const txq = &sc->sc_txq;
561 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
562
563 KASSERT(!cpu_intr_p());
564
565 sc->sc_soft_flags = 0;
566 sc->sc_work_flags = 0;
567
568 /* Disable Rx processing */
569 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
570 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE);
571
572 /* Disable Tx processing */
573 bcmeth_write_4(sc, txq->txq_reg_xmtctl,
574 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE);
575
576 /* Disable all interrupts */
577 bcmeth_write_4(sc, GMAC_INTMASK, 0);
578
579 for (;;) {
580 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
581 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
582 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS
583 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS)
584 break;
585 delay(50);
586 }
587 /*
588 * Now reset the controller.
589 *
590 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register
591 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register
592 */
593 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET);
594 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0);
595 sc->sc_intmask = 0;
596 ifp->if_flags &= ~IFF_RUNNING;
597
598 /*
599 * Let's consume any remaining transmitted packets. And if we are
600 * disabling the interface, purge ourselves of any untransmitted
601 * packets. But don't consume any received packets, just drop them.
602 * If we aren't disabling the interface, save the mbufs in the
603 * receive queue for reuse.
604 */
605 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable);
606 bcmeth_txq_consume(sc, &sc->sc_txq);
607 if (disable) {
608 bcmeth_txq_purge(sc, &sc->sc_txq);
609 IF_PURGE(&ifp->if_snd);
610 }
611
612 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0);
613 }
614
615 static void
616 bcmeth_ifwatchdog(struct ifnet *ifp)
617 {
618 }
619
620 static int
621 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
622 {
623 struct bcmeth_softc *sc = ifp->if_softc;
624 struct ifreq * const ifr = data;
625 const int s = splnet();
626 int error;
627
628 switch (cmd) {
629 case SIOCSIFMEDIA:
630 case SIOCGIFMEDIA:
631 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
632 break;
633
634 default:
635 error = ether_ioctl(ifp, cmd, data);
636 if (error != ENETRESET)
637 break;
638
639 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
640 error = 0;
641 break;
642 }
643 error = bcmeth_ifinit(ifp);
644 break;
645 }
646
647 splx(s);
648 return error;
649 }
650
651 static void
652 bcmeth_rxq_desc_presync(
653 struct bcmeth_softc *sc,
654 struct bcmeth_rxqueue *rxq,
655 struct gmac_rxdb *rxdb,
656 size_t count)
657 {
658 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
659 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
660 BUS_DMASYNC_PREWRITE);
661 }
662
663 static void
664 bcmeth_rxq_desc_postsync(
665 struct bcmeth_softc *sc,
666 struct bcmeth_rxqueue *rxq,
667 struct gmac_rxdb *rxdb,
668 size_t count)
669 {
670 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
671 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
672 BUS_DMASYNC_POSTWRITE);
673 }
674
675 static void
676 bcmeth_txq_desc_presync(
677 struct bcmeth_softc *sc,
678 struct bcmeth_txqueue *txq,
679 struct gmac_txdb *txdb,
680 size_t count)
681 {
682 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
683 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
684 BUS_DMASYNC_PREWRITE);
685 }
686
687 static void
688 bcmeth_txq_desc_postsync(
689 struct bcmeth_softc *sc,
690 struct bcmeth_txqueue *txq,
691 struct gmac_txdb *txdb,
692 size_t count)
693 {
694 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
695 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
696 BUS_DMASYNC_POSTWRITE);
697 }
698
699 static bus_dmamap_t
700 bcmeth_mapcache_get(
701 struct bcmeth_softc *sc,
702 struct bcmeth_mapcache *dmc)
703 {
704 KASSERT(dmc->dmc_nmaps > 0);
705 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
706 return dmc->dmc_maps[--dmc->dmc_nmaps];
707 }
708
709 static void
710 bcmeth_mapcache_put(
711 struct bcmeth_softc *sc,
712 struct bcmeth_mapcache *dmc,
713 bus_dmamap_t map)
714 {
715 KASSERT(map != NULL);
716 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
717 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
718 }
719
720 static void
721 bcmeth_mapcache_destroy(
722 struct bcmeth_softc *sc,
723 struct bcmeth_mapcache *dmc)
724 {
725 const size_t dmc_size =
726 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]);
727
728 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
729 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
730 }
731 kmem_intr_free(dmc, dmc_size);
732 }
733
734 static int
735 bcmeth_mapcache_create(
736 struct bcmeth_softc *sc,
737 struct bcmeth_mapcache **dmc_p,
738 size_t maxmaps,
739 size_t maxmapsize,
740 size_t maxseg)
741 {
742 const size_t dmc_size =
743 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]);
744 struct bcmeth_mapcache * const dmc =
745 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
746
747 dmc->dmc_maxmaps = maxmaps;
748 dmc->dmc_nmaps = maxmaps;
749 dmc->dmc_maxmapsize = maxmapsize;
750 dmc->dmc_maxseg = maxseg;
751
752 for (u_int i = 0; i < maxmaps; i++) {
753 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
754 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
755 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
756 if (error) {
757 aprint_error_dev(sc->sc_dev,
758 "failed to creat dma map cache "
759 "entry %u of %zu: %d\n",
760 i, maxmaps, error);
761 while (i-- > 0) {
762 bus_dmamap_destroy(sc->sc_dmat,
763 dmc->dmc_maps[i]);
764 }
765 kmem_intr_free(dmc, dmc_size);
766 return error;
767 }
768 KASSERT(dmc->dmc_maps[i] != NULL);
769 }
770
771 *dmc_p = dmc;
772
773 return 0;
774 }
775
776 #if 0
777 static void
778 bcmeth_dmamem_free(
779 bus_dma_tag_t dmat,
780 size_t map_size,
781 bus_dma_segment_t *seg,
782 bus_dmamap_t map,
783 void *kvap)
784 {
785 bus_dmamap_destroy(dmat, map);
786 bus_dmamem_unmap(dmat, kvap, map_size);
787 bus_dmamem_free(dmat, seg, 1);
788 }
789 #endif
790
791 static int
792 bcmeth_dmamem_alloc(
793 bus_dma_tag_t dmat,
794 size_t map_size,
795 bus_dma_segment_t *seg,
796 bus_dmamap_t *map,
797 void **kvap)
798 {
799 int error;
800 int nseg;
801
802 *kvap = NULL;
803 *map = NULL;
804
805 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0,
806 seg, 1, &nseg, 0);
807 if (error)
808 return error;
809
810 KASSERT(nseg == 1);
811
812 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0);
813 if (error == 0) {
814 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
815 map);
816 if (error == 0) {
817 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
818 NULL, 0);
819 if (error == 0)
820 return 0;
821 bus_dmamap_destroy(dmat, *map);
822 *map = NULL;
823 }
824 bus_dmamem_unmap(dmat, *kvap, map_size);
825 *kvap = NULL;
826 }
827 bus_dmamem_free(dmat, seg, nseg);
828 return 0;
829 }
830
831 static struct mbuf *
832 bcmeth_rx_buf_alloc(
833 struct bcmeth_softc *sc)
834 {
835 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
836 if (m == NULL) {
837 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
838 return NULL;
839 }
840 MCLGET(m, M_DONTWAIT);
841 if ((m->m_flags & M_EXT) == 0) {
842 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
843 m_freem(m);
844 return NULL;
845 }
846 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
847
848 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache);
849 if (map == NULL) {
850 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
851 m_freem(m);
852 return NULL;
853 }
854 M_SETCTX(m, map);
855 m->m_len = m->m_pkthdr.len = MCLBYTES;
856 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
857 BUS_DMA_READ|BUS_DMA_NOWAIT);
858 if (error) {
859 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
860 error);
861 M_SETCTX(m, NULL);
862 m_freem(m);
863 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
864 return NULL;
865 }
866 KASSERT(((map->_dm_flags ^ sc->sc_dmat->_ranges[0].dr_flags) & _BUS_DMAMAP_COHERENT) == 0);
867 KASSERT(map->dm_mapsize == MCLBYTES);
868 #ifdef BCMETH_RCVMAGIC
869 *mtod(m, uint32_t *) = BCMETH_RCVMAGIC;
870 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
871 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
872 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t),
873 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD);
874 #else
875 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
876 BUS_DMASYNC_PREREAD);
877 #endif
878
879 return m;
880 }
881
882 static void
883 bcmeth_rx_map_unload(
884 struct bcmeth_softc *sc,
885 struct mbuf *m)
886 {
887 KASSERT(m);
888 for (; m != NULL; m = m->m_next) {
889 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
890 KASSERT(map);
891 KASSERT(map->dm_mapsize == MCLBYTES);
892 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
893 BUS_DMASYNC_POSTREAD);
894 bus_dmamap_unload(sc->sc_dmat, map);
895 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
896 M_SETCTX(m, NULL);
897 }
898 }
899
900 static bool
901 bcmeth_rxq_produce(
902 struct bcmeth_softc *sc,
903 struct bcmeth_rxqueue *rxq)
904 {
905 struct gmac_rxdb *producer = rxq->rxq_producer;
906 bool produced = false;
907
908 while (rxq->rxq_inuse < rxq->rxq_threshold) {
909 struct mbuf *m;
910 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
911 if (m == NULL) {
912 m = bcmeth_rx_buf_alloc(sc);
913 if (m == NULL) {
914 printf("%s: bcmeth_rx_buf_alloc failed\n", __func__);
915 break;
916 }
917 }
918 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
919 KASSERT(map);
920
921 producer->rxdb_buflen = MCLBYTES;
922 producer->rxdb_addrlo = map->dm_segs[0].ds_addr;
923 producer->rxdb_flags &= RXDB_FLAG_ET;
924 *rxq->rxq_mtail = m;
925 rxq->rxq_mtail = &m->m_next;
926 m->m_len = MCLBYTES;
927 m->m_next = NULL;
928 rxq->rxq_inuse++;
929 if (++producer == rxq->rxq_last) {
930 membar_producer();
931 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
932 rxq->rxq_last - rxq->rxq_producer);
933 producer = rxq->rxq_producer = rxq->rxq_first;
934 }
935 produced = true;
936 }
937 if (produced) {
938 membar_producer();
939 if (producer != rxq->rxq_producer) {
940 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
941 producer - rxq->rxq_producer);
942 rxq->rxq_producer = producer;
943 }
944 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr,
945 rxq->rxq_descmap->dm_segs[0].ds_addr
946 + ((uintptr_t)producer & RCVPTR));
947 }
948 return true;
949 }
950
951 static void
952 bcmeth_rx_input(
953 struct bcmeth_softc *sc,
954 struct mbuf *m,
955 uint32_t rxdb_flags)
956 {
957 struct ifnet * const ifp = &sc->sc_if;
958
959 bcmeth_rx_map_unload(sc, m);
960
961 m_adj(m, BCMETH_RCVOFFSET);
962
963 switch (__SHIFTOUT(rxdb_flags, RXSTS_PKTTYPE)) {
964 case RXSTS_PKTTYPE_UC:
965 break;
966 case RXSTS_PKTTYPE_MC:
967 m->m_flags |= M_MCAST;
968 break;
969 case RXSTS_PKTTYPE_BC:
970 m->m_flags |= M_BCAST|M_MCAST;
971 break;
972 default:
973 if (sc->sc_cmdcfg & PROMISC_EN)
974 m->m_flags |= M_PROMISC;
975 break;
976 }
977 m->m_pkthdr.rcvif = ifp;
978
979 ifp->if_ipackets++;
980 ifp->if_ibytes += m->m_pkthdr.len;
981
982 /*
983 * Let's give it to the network subsystm to deal with.
984 */
985 #ifdef BCMETH_MPSAFE
986 mutex_exit(sc->sc_lock);
987 (*ifp->if_input)(ifp, m);
988 mutex_enter(sc->sc_lock);
989 #else
990 int s = splnet();
991 bpf_mtap(ifp, m);
992 (*ifp->if_input)(ifp, m);
993 splx(s);
994 #endif
995 }
996
997 static void
998 bcmeth_rxq_consume(
999 struct bcmeth_softc *sc,
1000 struct bcmeth_rxqueue *rxq)
1001 {
1002 struct ifnet * const ifp = &sc->sc_if;
1003 struct gmac_rxdb *consumer = rxq->rxq_consumer;
1004 size_t rxconsumed = 0;
1005
1006 for (;;) {
1007 if (consumer == rxq->rxq_producer) {
1008 rxq->rxq_consumer = consumer;
1009 rxq->rxq_inuse -= rxconsumed;
1010 KASSERT(rxq->rxq_inuse == 0);
1011 return;
1012 }
1013
1014 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1015 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1016 if (consumer == rxq->rxq_first + currdscr) {
1017 rxq->rxq_consumer = consumer;
1018 rxq->rxq_inuse -= rxconsumed;
1019 return;
1020 }
1021 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1);
1022
1023 /*
1024 * We own this packet again. Copy the rxsts word from it.
1025 */
1026 rxconsumed++;
1027 uint32_t rxsts;
1028 KASSERT(rxq->rxq_mhead != NULL);
1029 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t);
1030 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align,
1031 BUS_DMASYNC_POSTREAD);
1032 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4);
1033 #if 0
1034 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd",
1035 currdscr, consumer - rxq->rxq_first);
1036 #endif
1037
1038 /*
1039 * Get the count of descriptors. Fetch the correct number
1040 * of mbufs.
1041 */
1042 #ifdef BCMETH_RCVMAGIC
1043 size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1;
1044 #else
1045 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
1046 #endif
1047 struct mbuf *m = rxq->rxq_mhead;
1048 struct mbuf *m_last = m;
1049 for (size_t i = 1; i < desc_count; i++) {
1050 if (++consumer == rxq->rxq_last) {
1051 consumer = rxq->rxq_first;
1052 }
1053 KASSERTMSG(consumer != rxq->rxq_first + currdscr,
1054 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u consumer=%zd",
1055 i, rxsts, desc_count, currdscr,
1056 consumer - rxq->rxq_first);
1057 m_last = m_last->m_next;
1058 }
1059
1060 /*
1061 * Now remove it/them from the list of enqueued mbufs.
1062 */
1063 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1064 rxq->rxq_mtail = &rxq->rxq_mhead;
1065 m_last->m_next = NULL;
1066
1067 #ifdef BCMETH_RCVMAGIC
1068 if (rxsts == BCMETH_RCVMAGIC) {
1069 ifp->if_ierrors++;
1070 if ((m->m_ext.ext_paddr >> 28) == 8) {
1071 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo);
1072 } else {
1073 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi);
1074 }
1075 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1076 } else
1077 #endif /* BCMETH_RCVMAGIC */
1078 if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
1079 aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n",
1080 consumer - rxq->rxq_first, desc_count, rxsts);
1081 /*
1082 * We encountered an error, take the mbufs and add them
1083 * to the rx bufcache so we can quickly reuse them.
1084 */
1085 ifp->if_ierrors++;
1086 do {
1087 struct mbuf *m0 = m->m_next;
1088 m->m_next = NULL;
1089 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1090 m = m0;
1091 } while (m);
1092 } else {
1093 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN);
1094 framelen += BCMETH_RCVOFFSET;
1095 m->m_pkthdr.len = framelen;
1096 if (desc_count == 1) {
1097 KASSERT(framelen <= MCLBYTES);
1098 m->m_len = framelen;
1099 } else {
1100 m_last->m_len = framelen & (MCLBYTES - 1);
1101 }
1102
1103 #ifdef BCMETH_MPSAFE
1104 /*
1105 * Wrap at the last entry!
1106 */
1107 if (++consumer == rxq->rxq_last) {
1108 KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1109 rxq->rxq_consumer = rxq->rxq_first;
1110 } else {
1111 rxq->rxq_consumer = consumer;
1112 }
1113 rxq->rxq_inuse -= rxconsumed;
1114 #endif /* BCMETH_MPSAFE */
1115
1116 /*
1117 * Receive the packet (which releases our lock)
1118 */
1119 bcmeth_rx_input(sc, m, rxsts);
1120
1121 #ifdef BCMETH_MPSAFE
1122 /*
1123 * Since we had to give up our lock, we need to
1124 * refresh these.
1125 */
1126 consumer = rxq->rxq_consumer;
1127 rxconsumed = 0;
1128 continue;
1129 #endif /* BCMETH_MPSAFE */
1130 }
1131
1132 /*
1133 * Wrap at the last entry!
1134 */
1135 if (++consumer == rxq->rxq_last) {
1136 KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1137 consumer = rxq->rxq_first;
1138 }
1139 }
1140 }
1141
1142 static void
1143 bcmeth_rxq_purge(
1144 struct bcmeth_softc *sc,
1145 struct bcmeth_rxqueue *rxq,
1146 bool discard)
1147 {
1148 struct mbuf *m;
1149
1150 if ((m = rxq->rxq_mhead) != NULL) {
1151 if (discard) {
1152 bcmeth_rx_map_unload(sc, m);
1153 m_freem(m);
1154 } else {
1155 while (m != NULL) {
1156 struct mbuf *m0 = m->m_next;
1157 m->m_next = NULL;
1158 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1159 m = m0;
1160 }
1161 }
1162
1163 }
1164
1165 rxq->rxq_mhead = NULL;
1166 rxq->rxq_mtail = &rxq->rxq_mhead;
1167 rxq->rxq_inuse = 0;
1168 }
1169
1170 static void
1171 bcmeth_rxq_reset(
1172 struct bcmeth_softc *sc,
1173 struct bcmeth_rxqueue *rxq)
1174 {
1175 /*
1176 * sync all the descriptors
1177 */
1178 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1179 rxq->rxq_last - rxq->rxq_first);
1180
1181 /*
1182 * Make sure we own all descriptors in the ring.
1183 */
1184 struct gmac_rxdb *rxdb;
1185 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) {
1186 rxdb->rxdb_flags = RXDB_FLAG_IC;
1187 }
1188
1189 /*
1190 * Last descriptor has the wrap flag.
1191 */
1192 rxdb->rxdb_flags = RXDB_FLAG_ET|RXDB_FLAG_IC;
1193
1194 /*
1195 * Reset the producer consumer indexes.
1196 */
1197 rxq->rxq_consumer = rxq->rxq_first;
1198 rxq->rxq_producer = rxq->rxq_first;
1199 rxq->rxq_inuse = 0;
1200 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS)
1201 rxq->rxq_threshold = BCMETH_MINRXMBUFS;
1202
1203 sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF;
1204
1205 /*
1206 * Restart the receiver at the first descriptor
1207 */
1208 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo,
1209 rxq->rxq_descmap->dm_segs[0].ds_addr);
1210 }
1211
1212 static int
1213 bcmeth_rxq_attach(
1214 struct bcmeth_softc *sc,
1215 struct bcmeth_rxqueue *rxq,
1216 u_int qno)
1217 {
1218 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]);
1219 int error;
1220 void *descs;
1221
1222 KASSERT(desc_count == 256 || desc_count == 512);
1223
1224 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1225 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1226 if (error)
1227 return error;
1228
1229 memset(descs, 0, BCMETH_RINGSIZE);
1230 rxq->rxq_first = descs;
1231 rxq->rxq_last = rxq->rxq_first + desc_count;
1232 rxq->rxq_consumer = descs;
1233 rxq->rxq_producer = descs;
1234
1235 bcmeth_rxq_purge(sc, rxq, true);
1236 bcmeth_rxq_reset(sc, rxq);
1237
1238 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW;
1239 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL;
1240 rxq->rxq_reg_rcvptr = GMAC_RCVPTR;
1241 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0;
1242 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1;
1243
1244 return 0;
1245 }
1246
1247 static bool
1248 bcmeth_txq_active_p(
1249 struct bcmeth_softc * const sc,
1250 struct bcmeth_txqueue *txq)
1251 {
1252 return !IF_IS_EMPTY(&txq->txq_mbufs);
1253 }
1254
1255 static bool
1256 bcmeth_txq_fillable_p(
1257 struct bcmeth_softc * const sc,
1258 struct bcmeth_txqueue *txq)
1259 {
1260 return txq->txq_free >= txq->txq_threshold;
1261 }
1262
1263 static int
1264 bcmeth_txq_attach(
1265 struct bcmeth_softc *sc,
1266 struct bcmeth_txqueue *txq,
1267 u_int qno)
1268 {
1269 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]);
1270 int error;
1271 void *descs;
1272
1273 KASSERT(desc_count == 256 || desc_count == 512);
1274
1275 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1276 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1277 if (error)
1278 return error;
1279
1280 memset(descs, 0, BCMETH_RINGSIZE);
1281 txq->txq_first = descs;
1282 txq->txq_last = txq->txq_first + desc_count;
1283 txq->txq_consumer = descs;
1284 txq->txq_producer = descs;
1285
1286 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS);
1287
1288 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW;
1289 txq->txq_reg_xmtctl = GMAC_XMTCONTROL;
1290 txq->txq_reg_xmtptr = GMAC_XMTPTR;
1291 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0;
1292 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1;
1293
1294 bcmeth_txq_reset(sc, txq);
1295
1296 return 0;
1297 }
1298
1299 static int
1300 bcmeth_txq_map_load(
1301 struct bcmeth_softc *sc,
1302 struct bcmeth_txqueue *txq,
1303 struct mbuf *m)
1304 {
1305 bus_dmamap_t map;
1306 int error;
1307
1308 map = M_GETCTX(m, bus_dmamap_t);
1309 if (map != NULL)
1310 return 0;
1311
1312 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache);
1313 if (map == NULL)
1314 return ENOMEM;
1315
1316 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1317 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1318 if (error)
1319 return error;
1320
1321 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1322 BUS_DMASYNC_PREWRITE);
1323 M_SETCTX(m, map);
1324 return 0;
1325 }
1326
1327 static void
1328 bcmeth_txq_map_unload(
1329 struct bcmeth_softc *sc,
1330 struct bcmeth_txqueue *txq,
1331 struct mbuf *m)
1332 {
1333 KASSERT(m);
1334 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1335 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1336 BUS_DMASYNC_POSTWRITE);
1337 bus_dmamap_unload(sc->sc_dmat, map);
1338 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map);
1339 }
1340
1341 static bool
1342 bcmeth_txq_produce(
1343 struct bcmeth_softc *sc,
1344 struct bcmeth_txqueue *txq,
1345 struct mbuf *m)
1346 {
1347 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1348
1349 if (map->dm_nsegs > txq->txq_free)
1350 return false;
1351
1352 /*
1353 * TCP Offload flag must be set in the first descriptor.
1354 */
1355 struct gmac_txdb *producer = txq->txq_producer;
1356 uint32_t first_flags = TXDB_FLAG_SF;
1357 uint32_t last_flags = TXDB_FLAG_EF;
1358
1359 /*
1360 * If we've produced enough descriptors without consuming any
1361 * we need to ask for an interrupt to reclaim some.
1362 */
1363 txq->txq_lastintr += map->dm_nsegs;
1364 if (txq->txq_lastintr >= txq->txq_threshold
1365 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1366 txq->txq_lastintr = 0;
1367 last_flags |= TXDB_FLAG_IC;
1368 }
1369
1370 KASSERT(producer != txq->txq_last);
1371
1372 struct gmac_txdb *start = producer;
1373 size_t count = map->dm_nsegs;
1374 producer->txdb_flags |= first_flags;
1375 producer->txdb_addrlo = map->dm_segs[0].ds_addr;
1376 producer->txdb_buflen = map->dm_segs[0].ds_len;
1377 for (u_int i = 1; i < map->dm_nsegs; i++) {
1378 #if 0
1379 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1380 producer->txdb_flags, producer->txdb_buflen,
1381 producer->txdb_addrlo, producer->txdb_addrhi);
1382 #endif
1383 if (__predict_false(++producer == txq->txq_last)) {
1384 bcmeth_txq_desc_presync(sc, txq, start,
1385 txq->txq_last - start);
1386 count -= txq->txq_last - start;
1387 producer = txq->txq_first;
1388 start = txq->txq_first;
1389 }
1390 producer->txdb_addrlo = map->dm_segs[i].ds_addr;
1391 producer->txdb_buflen = map->dm_segs[i].ds_len;
1392 }
1393 producer->txdb_flags |= last_flags;
1394 #if 0
1395 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1396 producer->txdb_flags, producer->txdb_buflen,
1397 producer->txdb_addrlo, producer->txdb_addrhi);
1398 #endif
1399 if (count)
1400 bcmeth_txq_desc_presync(sc, txq, start, count);
1401
1402 /*
1403 * Reduce free count by the number of segments we consumed.
1404 */
1405 txq->txq_free -= map->dm_nsegs;
1406 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1407 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & TXDB_FLAG_EF) == 0);
1408 KASSERT(producer->txdb_flags & TXDB_FLAG_EF);
1409
1410 #if 0
1411 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n",
1412 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1413 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1414 #endif
1415
1416 if (producer + 1 == txq->txq_last)
1417 txq->txq_producer = txq->txq_first;
1418 else
1419 txq->txq_producer = producer + 1;
1420 IF_ENQUEUE(&txq->txq_mbufs, m);
1421
1422 /*
1423 * Let the transmitter know there's more to do
1424 */
1425 bcmeth_write_4(sc, txq->txq_reg_xmtptr,
1426 txq->txq_descmap->dm_segs[0].ds_addr
1427 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR));
1428
1429 return true;
1430 }
1431
1432 static struct mbuf *
1433 bcmeth_copy_packet(struct mbuf *m)
1434 {
1435 struct mbuf *mext = NULL;
1436 size_t misalignment = 0;
1437 size_t hlen = 0;
1438
1439 for (mext = m; mext != NULL; mext = mext->m_next) {
1440 if (mext->m_flags & M_EXT) {
1441 misalignment = mtod(mext, vaddr_t) & arm_dcache_align;
1442 break;
1443 }
1444 hlen += m->m_len;
1445 }
1446
1447 struct mbuf *n = m->m_next;
1448 if (m != mext && hlen + misalignment <= MHLEN && false) {
1449 KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]);
1450 size_t oldoff = m->m_data - m->m_pktdat;
1451 size_t off;
1452 if (mext == NULL) {
1453 off = (oldoff + hlen > MHLEN) ? 0 : oldoff;
1454 } else {
1455 off = MHLEN - (hlen + misalignment);
1456 }
1457 KASSERT(off + hlen + misalignment <= MHLEN);
1458 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) {
1459 memmove(&m->m_pktdat[off], m->m_data, m->m_len);
1460 m->m_data = &m->m_pktdat[off];
1461 }
1462 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]);
1463 m->m_len = hlen;
1464 m->m_next = mext;
1465 while (n != mext) {
1466 n = m_free(n);
1467 }
1468 return m;
1469 }
1470
1471 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
1472 if (m0 == NULL) {
1473 return NULL;
1474 }
1475 M_COPY_PKTHDR(m0, m);
1476 MCLAIM(m0, m->m_owner);
1477 if (m0->m_pkthdr.len > MHLEN) {
1478 MCLGET(m0, M_DONTWAIT);
1479 if ((m0->m_flags & M_EXT) == 0) {
1480 m_freem(m0);
1481 return NULL;
1482 }
1483 }
1484 m0->m_len = m->m_pkthdr.len;
1485 m_copydata(m, 0, m0->m_len, mtod(m0, void *));
1486 m_freem(m);
1487 return m0;
1488 }
1489
1490 static bool
1491 bcmeth_txq_enqueue(
1492 struct bcmeth_softc *sc,
1493 struct bcmeth_txqueue *txq)
1494 {
1495 for (;;) {
1496 if (IF_QFULL(&txq->txq_mbufs))
1497 return false;
1498 struct mbuf *m = txq->txq_next;
1499 if (m == NULL) {
1500 int s = splnet();
1501 IF_DEQUEUE(&sc->sc_if.if_snd, m);
1502 splx(s);
1503 if (m == NULL)
1504 return true;
1505 M_SETCTX(m, NULL);
1506 } else {
1507 txq->txq_next = NULL;
1508 }
1509 /*
1510 * If LINK2 is set and this packet uses multiple mbufs,
1511 * consolidate it into a single mbuf.
1512 */
1513 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) {
1514 struct mbuf *m0 = bcmeth_copy_packet(m);
1515 if (m0 == NULL) {
1516 txq->txq_next = m;
1517 return true;
1518 }
1519 m = m0;
1520 }
1521 int error = bcmeth_txq_map_load(sc, txq, m);
1522 if (error) {
1523 aprint_error_dev(sc->sc_dev,
1524 "discarded packet due to "
1525 "dmamap load failure: %d\n", error);
1526 m_freem(m);
1527 continue;
1528 }
1529 KASSERT(txq->txq_next == NULL);
1530 if (!bcmeth_txq_produce(sc, txq, m)) {
1531 txq->txq_next = m;
1532 return false;
1533 }
1534 KASSERT(txq->txq_next == NULL);
1535 }
1536 }
1537
1538 static bool
1539 bcmeth_txq_consume(
1540 struct bcmeth_softc *sc,
1541 struct bcmeth_txqueue *txq)
1542 {
1543 struct ifnet * const ifp = &sc->sc_if;
1544 struct gmac_txdb *consumer = txq->txq_consumer;
1545 size_t txfree = 0;
1546
1547 #if 0
1548 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1549 #endif
1550
1551 for (;;) {
1552 if (consumer == txq->txq_producer) {
1553 txq->txq_consumer = consumer;
1554 txq->txq_free += txfree;
1555 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1556 #if 0
1557 printf("%s: empty: freed %zu descriptors going from %zu to %zu\n",
1558 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
1559 #endif
1560 KASSERT(txq->txq_lastintr == 0);
1561 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
1562 return true;
1563 }
1564 bcmeth_txq_desc_postsync(sc, txq, consumer, 1);
1565 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
1566 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) {
1567 txq->txq_consumer = consumer;
1568 txq->txq_free += txfree;
1569 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1570 #if 0
1571 printf("%s: freed %zu descriptors\n",
1572 __func__, txfree);
1573 #endif
1574 return bcmeth_txq_fillable_p(sc, txq);
1575 }
1576
1577 /*
1578 * If this is the last descriptor in the chain, get the
1579 * mbuf, free its dmamap, and free the mbuf chain itself.
1580 */
1581 const uint32_t txdb_flags = consumer->txdb_flags;
1582 if (txdb_flags & TXDB_FLAG_EF) {
1583 struct mbuf *m;
1584
1585 IF_DEQUEUE(&txq->txq_mbufs, m);
1586 KASSERT(m);
1587 bcmeth_txq_map_unload(sc, txq, m);
1588 #if 0
1589 printf("%s: mbuf %p: consumed a %u byte packet\n",
1590 __func__, m, m->m_pkthdr.len);
1591 #endif
1592 bpf_mtap(ifp, m);
1593 ifp->if_opackets++;
1594 ifp->if_obytes += m->m_pkthdr.len;
1595 if (m->m_flags & M_MCAST)
1596 ifp->if_omcasts++;
1597 m_freem(m);
1598 }
1599
1600 /*
1601 * We own this packet again. Clear all flags except wrap.
1602 */
1603 txfree++;
1604
1605 /*
1606 * Wrap at the last entry!
1607 */
1608 if (txdb_flags & TXDB_FLAG_ET) {
1609 consumer->txdb_flags = TXDB_FLAG_ET;
1610 KASSERT(consumer + 1 == txq->txq_last);
1611 consumer = txq->txq_first;
1612 } else {
1613 consumer->txdb_flags = 0;
1614 consumer++;
1615 KASSERT(consumer < txq->txq_last);
1616 }
1617 }
1618 }
1619
1620 static void
1621 bcmeth_txq_purge(
1622 struct bcmeth_softc *sc,
1623 struct bcmeth_txqueue *txq)
1624 {
1625 struct mbuf *m;
1626 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0);
1627
1628 for (;;) {
1629 IF_DEQUEUE(&txq->txq_mbufs, m);
1630 if (m == NULL)
1631 break;
1632 bcmeth_txq_map_unload(sc, txq, m);
1633 m_freem(m);
1634 }
1635 if ((m = txq->txq_next) != NULL) {
1636 txq->txq_next = NULL;
1637 bcmeth_txq_map_unload(sc, txq, m);
1638 m_freem(m);
1639 }
1640 }
1641
1642 static void
1643 bcmeth_txq_reset(
1644 struct bcmeth_softc *sc,
1645 struct bcmeth_txqueue *txq)
1646 {
1647 /*
1648 * sync all the descriptors
1649 */
1650 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first,
1651 txq->txq_last - txq->txq_first);
1652
1653 /*
1654 * Make sure we own all descriptors in the ring.
1655 */
1656 struct gmac_txdb *txdb;
1657 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) {
1658 txdb->txdb_flags = 0;
1659 }
1660
1661 /*
1662 * Last descriptor has the wrap flag.
1663 */
1664 txdb->txdb_flags = TXDB_FLAG_ET;
1665
1666 /*
1667 * Reset the producer consumer indexes.
1668 */
1669 txq->txq_consumer = txq->txq_first;
1670 txq->txq_producer = txq->txq_first;
1671 txq->txq_free = txq->txq_last - txq->txq_first - 1;
1672 txq->txq_threshold = txq->txq_free / 2;
1673 txq->txq_lastintr = 0;
1674
1675 /*
1676 * What do we want to get interrupted on?
1677 */
1678 sc->sc_intmask |= XMTINT_0 | XMTUF;
1679
1680 /*
1681 * Restart the transmiter at the first descriptor
1682 */
1683 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo,
1684 txq->txq_descmap->dm_segs->ds_addr);
1685 }
1686
1687 static void
1688 bcmeth_ifstart(struct ifnet *ifp)
1689 {
1690 struct bcmeth_softc * const sc = ifp->if_softc;
1691
1692 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
1693 return;
1694 }
1695
1696 #ifdef BCMETH_MPSAFETX
1697 if (cpu_intr_p()) {
1698 #endif
1699 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
1700 softint_schedule(sc->sc_soft_ih);
1701 #ifdef BCMETH_MPSAFETX
1702 } else {
1703 /*
1704 * Either we are in a softintr thread already or some other
1705 * thread so just borrow it to do the send and save ourselves
1706 * the overhead of a fast soft int.
1707 */
1708 bcmeth_soft_txintr(sc);
1709 }
1710 #endif
1711 }
1712
1713 int
1714 bcmeth_intr(void *arg)
1715 {
1716 struct bcmeth_softc * const sc = arg;
1717 uint32_t soft_flags = 0;
1718 uint32_t work_flags = 0;
1719 int rv = 0;
1720
1721 mutex_enter(sc->sc_hwlock);
1722
1723 uint32_t intmask = sc->sc_intmask;
1724 BCMETH_EVCNT_INCR(sc->sc_ev_intr);
1725
1726 for (;;) {
1727 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1728 intstatus &= intmask;
1729 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */
1730 if (intstatus == 0) {
1731 break;
1732 }
1733 #if 0
1734 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n",
1735 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK));
1736 #endif
1737 if (intstatus & RCVINT) {
1738 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1739 intmask &= ~RCVINT;
1740
1741 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1742 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1743 if (descs < rxq->rxq_consumer - rxq->rxq_first) {
1744 /*
1745 * We wrapped at the end so count how far
1746 * we are from the end.
1747 */
1748 descs += rxq->rxq_last - rxq->rxq_consumer;
1749 } else {
1750 descs -= rxq->rxq_consumer - rxq->rxq_first;
1751 }
1752 /*
1753 * If we "timedout" we can't be hogging so use
1754 * softints. If we exceeded then we might hogging
1755 * so let the workqueue deal with them.
1756 */
1757 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, INTRCVLAZY_FRAMECOUNT);
1758 if (descs < framecount
1759 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) {
1760 soft_flags |= SOFT_RXINTR;
1761 } else {
1762 work_flags |= WORK_RXINTR;
1763 }
1764 }
1765
1766 if (intstatus & XMTINT_0) {
1767 intmask &= ~XMTINT_0;
1768 soft_flags |= SOFT_TXINTR;
1769 }
1770
1771 if (intstatus & RCVDESCUF) {
1772 intmask &= ~RCVDESCUF;
1773 work_flags |= WORK_RXUNDERFLOW;
1774 }
1775
1776 intstatus &= intmask;
1777 if (intstatus) {
1778 aprint_error_dev(sc->sc_dev,
1779 "intr: intstatus=%#x\n", intstatus);
1780 aprint_error_dev(sc->sc_dev,
1781 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n",
1782 sc->sc_rxq.rxq_first,
1783 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr,
1784 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr),
1785 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0),
1786 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1));
1787 aprint_error_dev(sc->sc_dev,
1788 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n",
1789 sc->sc_txq.txq_first,
1790 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr,
1791 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr),
1792 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0),
1793 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1));
1794 intmask &= ~intstatus;
1795 work_flags |= WORK_REINIT;
1796 break;
1797 }
1798 }
1799
1800 if (intmask != sc->sc_intmask) {
1801 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1802 }
1803
1804 if (work_flags) {
1805 if (sc->sc_work_flags == 0) {
1806 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL);
1807 }
1808 atomic_or_32(&sc->sc_work_flags, work_flags);
1809 rv = 1;
1810 }
1811
1812 if (soft_flags) {
1813 if (sc->sc_soft_flags == 0) {
1814 softint_schedule(sc->sc_soft_ih);
1815 }
1816 atomic_or_32(&sc->sc_soft_flags, soft_flags);
1817 rv = 1;
1818 }
1819
1820 mutex_exit(sc->sc_hwlock);
1821
1822 return rv;
1823 }
1824
1825 #ifdef BCMETH_MPSAFETX
1826 void
1827 bcmeth_soft_txintr(struct bcmeth_softc *sc)
1828 {
1829 mutex_enter(sc->sc_lock);
1830 /*
1831 * Let's do what we came here for. Consume transmitted
1832 * packets off the the transmit ring.
1833 */
1834 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1835 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1836 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1837 sc->sc_if.if_flags |= IFF_OACTIVE;
1838 } else {
1839 sc->sc_if.if_flags &= ~IFF_OACTIVE;
1840 }
1841 if (sc->sc_if.if_flags & IFF_RUNNING) {
1842 mutex_spin_enter(sc->sc_hwlock);
1843 sc->sc_intmask |= XMTINT_0;
1844 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1845 mutex_spin_exit(sc->sc_hwlock);
1846 }
1847 mutex_exit(sc->sc_lock);
1848 }
1849 #endif /* BCMETH_MPSAFETX */
1850
1851 void
1852 bcmeth_soft_intr(void *arg)
1853 {
1854 struct bcmeth_softc * const sc = arg;
1855 struct ifnet * const ifp = &sc->sc_if;
1856 uint32_t intmask = 0;
1857
1858 mutex_enter(sc->sc_lock);
1859
1860 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
1861
1862 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr);
1863
1864 if ((soft_flags & SOFT_TXINTR)
1865 || bcmeth_txq_active_p(sc, &sc->sc_txq)) {
1866 /*
1867 * Let's do what we came here for. Consume transmitted
1868 * packets off the the transmit ring.
1869 */
1870 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1871 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1872 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1873 ifp->if_flags |= IFF_OACTIVE;
1874 } else {
1875 ifp->if_flags &= ~IFF_OACTIVE;
1876 }
1877 intmask |= XMTINT_0;
1878 }
1879
1880 if (soft_flags & SOFT_RXINTR) {
1881 /*
1882 * Let's consume
1883 */
1884 bcmeth_rxq_consume(sc, &sc->sc_rxq);
1885 intmask |= RCVINT;
1886 }
1887
1888 if (ifp->if_flags & IFF_RUNNING) {
1889 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1890 mutex_spin_enter(sc->sc_hwlock);
1891 sc->sc_intmask |= intmask;
1892 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1893 mutex_spin_exit(sc->sc_hwlock);
1894 }
1895
1896 mutex_exit(sc->sc_lock);
1897 }
1898
1899 void
1900 bcmeth_worker(struct work *wk, void *arg)
1901 {
1902 struct bcmeth_softc * const sc = arg;
1903 struct ifnet * const ifp = &sc->sc_if;
1904 uint32_t intmask = 0;
1905
1906 mutex_enter(sc->sc_lock);
1907
1908 BCMETH_EVCNT_INCR(sc->sc_ev_work);
1909
1910 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0);
1911 if (work_flags & WORK_REINIT) {
1912 int s = splnet();
1913 sc->sc_soft_flags = 0;
1914 bcmeth_ifinit(ifp);
1915 splx(s);
1916 work_flags &= ~WORK_RXUNDERFLOW;
1917 }
1918
1919 if (work_flags & WORK_RXUNDERFLOW) {
1920 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1921 size_t threshold = 5 * rxq->rxq_threshold / 4;
1922 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
1923 threshold = rxq->rxq_last - rxq->rxq_first - 1;
1924 } else {
1925 intmask |= RCVDESCUF;
1926 }
1927 aprint_normal_dev(sc->sc_dev,
1928 "increasing receive buffers from %zu to %zu\n",
1929 rxq->rxq_threshold, threshold);
1930 rxq->rxq_threshold = threshold;
1931 }
1932
1933 if (work_flags & WORK_RXINTR) {
1934 /*
1935 * Let's consume
1936 */
1937 bcmeth_rxq_consume(sc, &sc->sc_rxq);
1938 intmask |= RCVINT;
1939 }
1940
1941 if (ifp->if_flags & IFF_RUNNING) {
1942 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1943 #if 0
1944 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1945 if (intstatus & RCVINT) {
1946 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT);
1947 work_flags |= WORK_RXINTR;
1948 continue;
1949 }
1950 #endif
1951 mutex_spin_enter(sc->sc_hwlock);
1952 sc->sc_intmask |= intmask;
1953 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1954 mutex_spin_exit(sc->sc_hwlock);
1955 }
1956
1957 mutex_exit(sc->sc_lock);
1958 }
1959