bcm53xx_eth.c revision 1.36 1 /*-
2 * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #define _ARM32_BUS_DMA_PRIVATE
31 #define GMAC_PRIVATE
32
33 #include "locators.h"
34 #include "opt_broadcom.h"
35
36 #include <sys/cdefs.h>
37
38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.36 2019/05/29 06:21:56 msaitoh Exp $");
39
40 #include <sys/param.h>
41 #include <sys/atomic.h>
42 #include <sys/bus.h>
43 #include <sys/device.h>
44 #include <sys/ioctl.h>
45 #include <sys/intr.h>
46 #include <sys/kmem.h>
47 #include <sys/mutex.h>
48 #include <sys/socket.h>
49 #include <sys/systm.h>
50 #include <sys/workqueue.h>
51
52 #include <net/if.h>
53 #include <net/if_ether.h>
54 #include <net/if_media.h>
55 #include <net/if_dl.h>
56 #include <net/bpf.h>
57
58 #include <dev/mii/miivar.h>
59
60 #include <arm/locore.h>
61
62 #include <arm/broadcom/bcm53xx_reg.h>
63 #include <arm/broadcom/bcm53xx_var.h>
64
65 //#define BCMETH_MPSAFE
66
67 #ifdef BCMETH_COUNTERS
68 #define BCMETH_EVCNT_ADD(a, b) ((void)((a).ev_count += (b)))
69 #else
70 #define BCMETH_EVCNT_ADD(a, b) do { } while (/*CONSTCOND*/0)
71 #endif
72 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1)
73
74 #define BCMETH_MAXTXMBUFS 128
75 #define BCMETH_NTXSEGS 30
76 #define BCMETH_MAXRXMBUFS 255
77 #define BCMETH_MINRXMBUFS 64
78 #define BCMETH_NRXSEGS 1
79 #define BCMETH_RINGSIZE PAGE_SIZE
80
81 #if 1
82 #define BCMETH_RCVMAGIC 0xfeedface
83 #endif
84
85 static int bcmeth_ccb_match(device_t, cfdata_t, void *);
86 static void bcmeth_ccb_attach(device_t, device_t, void *);
87
88 struct bcmeth_txqueue {
89 bus_dmamap_t txq_descmap;
90 struct gmac_txdb *txq_consumer;
91 struct gmac_txdb *txq_producer;
92 struct gmac_txdb *txq_first;
93 struct gmac_txdb *txq_last;
94 struct ifqueue txq_mbufs;
95 struct mbuf *txq_next;
96 size_t txq_free;
97 size_t txq_threshold;
98 size_t txq_lastintr;
99 bus_size_t txq_reg_xmtaddrlo;
100 bus_size_t txq_reg_xmtptr;
101 bus_size_t txq_reg_xmtctl;
102 bus_size_t txq_reg_xmtsts0;
103 bus_size_t txq_reg_xmtsts1;
104 bus_dma_segment_t txq_descmap_seg;
105 };
106
107 struct bcmeth_rxqueue {
108 bus_dmamap_t rxq_descmap;
109 struct gmac_rxdb *rxq_consumer;
110 struct gmac_rxdb *rxq_producer;
111 struct gmac_rxdb *rxq_first;
112 struct gmac_rxdb *rxq_last;
113 struct mbuf *rxq_mhead;
114 struct mbuf **rxq_mtail;
115 struct mbuf *rxq_mconsumer;
116 size_t rxq_inuse;
117 size_t rxq_threshold;
118 bus_size_t rxq_reg_rcvaddrlo;
119 bus_size_t rxq_reg_rcvptr;
120 bus_size_t rxq_reg_rcvctl;
121 bus_size_t rxq_reg_rcvsts0;
122 bus_size_t rxq_reg_rcvsts1;
123 bus_dma_segment_t rxq_descmap_seg;
124 };
125
126 struct bcmeth_mapcache {
127 u_int dmc_nmaps;
128 u_int dmc_maxseg;
129 u_int dmc_maxmaps;
130 u_int dmc_maxmapsize;
131 bus_dmamap_t dmc_maps[0];
132 };
133
134 struct bcmeth_softc {
135 device_t sc_dev;
136 bus_space_tag_t sc_bst;
137 bus_space_handle_t sc_bsh;
138 bus_dma_tag_t sc_dmat;
139 kmutex_t *sc_lock;
140 kmutex_t *sc_hwlock;
141 struct ethercom sc_ec;
142 #define sc_if sc_ec.ec_if
143 struct ifmedia sc_media;
144 void *sc_soft_ih;
145 void *sc_ih;
146
147 struct bcmeth_rxqueue sc_rxq;
148 struct bcmeth_txqueue sc_txq;
149
150 size_t sc_rcvoffset;
151 uint32_t sc_macaddr[2];
152 uint32_t sc_maxfrm;
153 uint32_t sc_cmdcfg;
154 uint32_t sc_intmask;
155 uint32_t sc_rcvlazy;
156 volatile uint32_t sc_soft_flags;
157 #define SOFT_RXINTR 0x01
158 #define SOFT_TXINTR 0x02
159
160 #ifdef BCMETH_COUNTERS
161 struct evcnt sc_ev_intr;
162 struct evcnt sc_ev_soft_intr;
163 struct evcnt sc_ev_work;
164 struct evcnt sc_ev_tx_stall;
165 struct evcnt sc_ev_rx_badmagic_lo;
166 struct evcnt sc_ev_rx_badmagic_hi;
167 #endif
168
169 struct ifqueue sc_rx_bufcache;
170 struct bcmeth_mapcache *sc_rx_mapcache;
171 struct bcmeth_mapcache *sc_tx_mapcache;
172
173 struct workqueue *sc_workq;
174 struct work sc_work;
175
176 volatile uint32_t sc_work_flags;
177 #define WORK_RXINTR 0x01
178 #define WORK_RXUNDERFLOW 0x02
179 #define WORK_REINIT 0x04
180
181 uint8_t sc_enaddr[ETHER_ADDR_LEN];
182 };
183
184 static void bcmeth_ifstart(struct ifnet *);
185 static void bcmeth_ifwatchdog(struct ifnet *);
186 static int bcmeth_ifinit(struct ifnet *);
187 static void bcmeth_ifstop(struct ifnet *, int);
188 static int bcmeth_ifioctl(struct ifnet *, u_long, void *);
189
190 static int bcmeth_mapcache_create(struct bcmeth_softc *,
191 struct bcmeth_mapcache **, size_t, size_t, size_t);
192 static void bcmeth_mapcache_destroy(struct bcmeth_softc *,
193 struct bcmeth_mapcache *);
194 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *,
195 struct bcmeth_mapcache *);
196 static void bcmeth_mapcache_put(struct bcmeth_softc *,
197 struct bcmeth_mapcache *, bus_dmamap_t);
198
199 static int bcmeth_txq_attach(struct bcmeth_softc *,
200 struct bcmeth_txqueue *, u_int);
201 static void bcmeth_txq_purge(struct bcmeth_softc *,
202 struct bcmeth_txqueue *);
203 static void bcmeth_txq_reset(struct bcmeth_softc *,
204 struct bcmeth_txqueue *);
205 static bool bcmeth_txq_consume(struct bcmeth_softc *,
206 struct bcmeth_txqueue *);
207 static bool bcmeth_txq_produce(struct bcmeth_softc *,
208 struct bcmeth_txqueue *, struct mbuf *m);
209 static bool bcmeth_txq_active_p(struct bcmeth_softc *,
210 struct bcmeth_txqueue *);
211
212 static int bcmeth_rxq_attach(struct bcmeth_softc *,
213 struct bcmeth_rxqueue *, u_int);
214 static bool bcmeth_rxq_produce(struct bcmeth_softc *,
215 struct bcmeth_rxqueue *);
216 static void bcmeth_rxq_purge(struct bcmeth_softc *,
217 struct bcmeth_rxqueue *, bool);
218 static void bcmeth_rxq_reset(struct bcmeth_softc *,
219 struct bcmeth_rxqueue *);
220
221 static int bcmeth_intr(void *);
222 #ifdef BCMETH_MPSAFETX
223 static void bcmeth_soft_txintr(struct bcmeth_softc *);
224 #endif
225 static void bcmeth_soft_intr(void *);
226 static void bcmeth_worker(struct work *, void *);
227
228 static int bcmeth_mediachange(struct ifnet *);
229 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *);
230
231 static inline uint32_t
232 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o)
233 {
234 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o);
235 }
236
237 static inline void
238 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v)
239 {
240 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v);
241 }
242
243 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc),
244 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL);
245
246 static int
247 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux)
248 {
249 struct bcmccb_attach_args * const ccbaa = aux;
250 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
251
252 if (strcmp(cf->cf_name, loc->loc_name))
253 return 0;
254
255 #ifdef DIAGNOSTIC
256 const int port = cf->cf_loc[BCMCCBCF_PORT];
257 #endif
258 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port);
259
260 return 1;
261 }
262
263 static void
264 bcmeth_ccb_attach(device_t parent, device_t self, void *aux)
265 {
266 struct bcmeth_softc * const sc = device_private(self);
267 struct ethercom * const ec = &sc->sc_ec;
268 struct ifnet * const ifp = &ec->ec_if;
269 struct bcmccb_attach_args * const ccbaa = aux;
270 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
271 const char * const xname = device_xname(self);
272 prop_dictionary_t dict = device_properties(self);
273 int error;
274
275 sc->sc_bst = ccbaa->ccbaa_ccb_bst;
276 sc->sc_dmat = ccbaa->ccbaa_dmat;
277 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh,
278 loc->loc_offset, loc->loc_size, &sc->sc_bsh);
279
280 /*
281 * We need to use the coherent dma tag for the GMAC.
282 */
283 sc->sc_dmat = &bcm53xx_coherent_dma_tag;
284 #if _ARM32_NEED_BUS_DMA_BOUNCE
285 if (device_cfdata(self)->cf_flags & 2) {
286 sc->sc_dmat = &bcm53xx_bounce_dma_tag;
287 }
288 #endif
289
290 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
291 if (eaprop == NULL) {
292 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0);
293 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1);
294 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) {
295 aprint_error(": mac-address property is missing\n");
296 return;
297 }
298 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff;
299 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff;
300 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff;
301 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff;
302 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff;
303 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff;
304 } else {
305 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
306 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
307 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
308 ETHER_ADDR_LEN);
309 }
310 sc->sc_dev = self;
311 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
312 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
313
314 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts
315
316 aprint_naive("\n");
317 aprint_normal(": Gigabit Ethernet Controller\n");
318
319 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0);
320 if (error) {
321 aprint_error(": failed to init rxq: %d\n", error);
322 goto fail_1;
323 }
324
325 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0);
326 if (error) {
327 aprint_error(": failed to init txq: %d\n", error);
328 goto fail_1;
329 }
330
331 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache,
332 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS);
333 if (error) {
334 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
335 goto fail_1;
336 }
337
338 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
339 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS);
340 if (error) {
341 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
342 goto fail_1;
343 }
344
345 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc,
346 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU);
347 if (error) {
348 aprint_error(": failed to create workqueue: %d\n", error);
349 goto fail_2;
350 }
351
352 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET,
353 bcmeth_soft_intr, sc);
354
355 if (sc->sc_ih == NULL) {
356 aprint_error_dev(self, "failed to establish interrupt %d\n",
357 loc->loc_intrs[0]);
358 goto fail_3;
359 }
360
361 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL,
362 bcmeth_intr, sc);
363
364 if (sc->sc_ih == NULL) {
365 aprint_error_dev(self, "failed to establish interrupt %d\n",
366 loc->loc_intrs[0]);
367 goto fail_4;
368 } else {
369 aprint_normal_dev(self, "interrupting on irq %d\n",
370 loc->loc_intrs[0]);
371 }
372
373 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
374 ether_sprintf(sc->sc_enaddr));
375
376 /*
377 * Since each port in plugged into the switch/flow-accelerator,
378 * we hard code at Gige Full-Duplex with Flow Control enabled.
379 */
380 int ifmedia = IFM_ETHER | IFM_1000_T | IFM_FDX;
381 //ifmedia |= IFM_FLOW | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
382 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange,
383 bcmeth_mediastatus);
384 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL);
385 ifmedia_set(&sc->sc_media, ifmedia);
386
387 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
388
389 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
390 ifp->if_softc = sc;
391 ifp->if_baudrate = IF_Mbps(1000);
392 ifp->if_capabilities = 0;
393 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
394 #ifdef BCMETH_MPSAFE
395 ifp->if_flags2 = IFF2_MPSAFE;
396 #endif
397 ifp->if_ioctl = bcmeth_ifioctl;
398 ifp->if_start = bcmeth_ifstart;
399 ifp->if_watchdog = bcmeth_ifwatchdog;
400 ifp->if_init = bcmeth_ifinit;
401 ifp->if_stop = bcmeth_ifstop;
402 IFQ_SET_READY(&ifp->if_snd);
403
404 bcmeth_ifstop(ifp, true);
405
406 /*
407 * Attach the interface.
408 */
409 error = if_initialize(ifp);
410 if (error != 0) {
411 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
412 error);
413 goto fail_5;
414 }
415 ether_ifattach(ifp, sc->sc_enaddr);
416 if_register(ifp);
417
418 #ifdef BCMETH_COUNTERS
419 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
420 NULL, xname, "intr");
421 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
422 NULL, xname, "soft intr");
423 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC,
424 NULL, xname, "work items");
425 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
426 NULL, xname, "tx stalls");
427 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC,
428 NULL, xname, "rx badmagic lo");
429 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC,
430 NULL, xname, "rx badmagic hi");
431 #endif
432
433 return;
434
435 fail_5:
436 ifmedia_removeall(&sc->sc_media);
437 fail_4:
438 intr_disestablish(sc->sc_ih);
439 fail_3:
440 softint_disestablish(sc->sc_soft_ih);
441 fail_2:
442 workqueue_destroy(sc->sc_workq);
443 fail_1:
444 mutex_obj_free(sc->sc_lock);
445 mutex_obj_free(sc->sc_hwlock);
446 }
447
448 static int
449 bcmeth_mediachange(struct ifnet *ifp)
450 {
451 //struct bcmeth_softc * const sc = ifp->if_softc;
452 return 0;
453 }
454
455 static void
456 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm)
457 {
458 //struct bcmeth_softc * const sc = ifp->if_softc;
459
460 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE;
461 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T;
462 }
463
464 static uint64_t
465 bcmeth_macaddr_create(const uint8_t *enaddr)
466 {
467 return (enaddr[3] << 0) // UNIMAC_MAC_0
468 | (enaddr[2] << 8) // UNIMAC_MAC_0
469 | (enaddr[1] << 16) // UNIMAC_MAC_0
470 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0
471 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1
472 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1
473 }
474
475 static int
476 bcmeth_ifinit(struct ifnet *ifp)
477 {
478 struct bcmeth_softc * const sc = ifp->if_softc;
479 int error = 0;
480
481 sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES);
482 if (ifp->if_mtu > ETHERMTU_JUMBO)
483 return error;
484
485 KASSERT(ifp->if_flags & IFF_UP);
486
487 /*
488 * Stop the interface
489 */
490 bcmeth_ifstop(ifp, 0);
491
492 /*
493 * Reserve enough space at the front so that we can insert a maxsized
494 * link header and a VLAN tag. Also make sure we have enough room for
495 * the rcvsts field as well.
496 */
497 KASSERT(ALIGN(max_linkhdr) == max_linkhdr);
498 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu",
499 max_linkhdr, sizeof(struct ether_header));
500 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header);
501 if (sc->sc_rcvoffset <= 4)
502 sc->sc_rcvoffset += 4;
503 KASSERT((sc->sc_rcvoffset & 3) == 2);
504 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET));
505 KASSERT(sc->sc_rcvoffset >= 6);
506
507 /*
508 * If our frame size has changed (or it's our first time through)
509 * destroy the existing transmit mapcache.
510 */
511 if (sc->sc_tx_mapcache != NULL
512 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
513 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache);
514 sc->sc_tx_mapcache = NULL;
515 }
516
517 if (sc->sc_tx_mapcache == NULL) {
518 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
519 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS);
520 if (error)
521 return error;
522 }
523
524 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE
525 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED)
526 | RX_ENA | TX_ENA;
527
528 if (ifp->if_flags & IFF_PROMISC) {
529 sc->sc_cmdcfg |= PROMISC_EN;
530 } else {
531 sc->sc_cmdcfg &= ~PROMISC_EN;
532 }
533
534 const uint8_t * const lladdr = CLLADDR(ifp->if_sadl);
535 const uint64_t macstnaddr = bcmeth_macaddr_create(lladdr);
536
537 /*
538 * We make sure that a received Ethernet packet start on a non-word
539 * boundary so that the packet payload will be on a word boundary.
540 * So to check the destination address we keep around two words to
541 * quickly compare with.
542 */
543 #if __ARMEL__
544 sc->sc_macaddr[0] = lladdr[0] | (lladdr[1] << 8);
545 sc->sc_macaddr[1] = lladdr[2] | (lladdr[3] << 8)
546 | (lladdr[4] << 16) | (lladdr[5] << 24);
547 #else
548 sc->sc_macaddr[0] = lladdr[1] | (lladdr[0] << 8);
549 sc->sc_macaddr[1] = lladdr[5] | (lladdr[4] << 8)
550 | (lladdr[1] << 16) | (lladdr[2] << 24);
551 #endif
552
553 sc->sc_intmask = DESCPROTOERR | DATAERR | DESCERR;
554
555 /* 5. Load RCVADDR_LO with new pointer */
556 bcmeth_rxq_reset(sc, &sc->sc_rxq);
557
558 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
559 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET)
560 | RCVCTL_PARITY_DIS
561 | RCVCTL_OFLOW_CONTINUE
562 | __SHIFTIN(3, RCVCTL_BURSTLEN));
563
564 /* 6. Load XMTADDR_LO with new pointer */
565 bcmeth_txq_reset(sc, &sc->sc_txq);
566
567 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX
568 | XMTCTL_PARITY_DIS
569 | __SHIFTIN(3, XMTCTL_BURSTLEN));
570
571 /* 7. Setup other UNIMAC registers */
572 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm);
573 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0));
574 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32));
575 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg);
576
577 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL);
578 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE;
579 devctl &= ~FLOW_CTRL_MODE;
580 devctl &= ~MIB_RD_RESET_EN;
581 devctl &= ~RXQ_OVERFLOW_CTRL_SEL;
582 devctl &= ~CPU_FLOW_CTRL_ON;
583 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl);
584
585 /* Setup lazy receive (at most 1ms). */
586 const struct cpu_softc * const cpu = curcpu()->ci_softc;
587 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT)
588 | __SHIFTIN(cpu->cpu_clk.clk_apb / 1000, INTRCVLAZY_TIMEOUT);
589 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy);
590
591 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
592 sc->sc_intmask |= XMTINT_0 | XMTUF;
593 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl,
594 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE);
595
596
597 /* 12. Enable receive queues in RQUEUE, */
598 sc->sc_intmask |= RCVINT | RCVDESCUF | RCVFIFOOF;
599 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
600 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE);
601
602 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
603
604 #if 0
605 aprint_normal_dev(sc->sc_dev,
606 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n",
607 devctl, sc->sc_cmdcfg,
608 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl),
609 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl));
610 #endif
611
612 sc->sc_soft_flags = 0;
613
614 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
615
616 ifp->if_flags |= IFF_RUNNING;
617
618 return error;
619 }
620
621 static void
622 bcmeth_ifstop(struct ifnet *ifp, int disable)
623 {
624 struct bcmeth_softc * const sc = ifp->if_softc;
625 struct bcmeth_txqueue * const txq = &sc->sc_txq;
626 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
627
628 KASSERT(!cpu_intr_p());
629
630 sc->sc_soft_flags = 0;
631 sc->sc_work_flags = 0;
632
633 /* Disable Rx processing */
634 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
635 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE);
636
637 /* Disable Tx processing */
638 bcmeth_write_4(sc, txq->txq_reg_xmtctl,
639 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE);
640
641 /* Disable all interrupts */
642 bcmeth_write_4(sc, GMAC_INTMASK, 0);
643
644 for (;;) {
645 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
646 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
647 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS
648 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS)
649 break;
650 delay(50);
651 }
652 /*
653 * Now reset the controller.
654 *
655 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register
656 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register
657 */
658 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET);
659 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0);
660 sc->sc_intmask = 0;
661 ifp->if_flags &= ~IFF_RUNNING;
662
663 /*
664 * Let's consume any remaining transmitted packets. And if we are
665 * disabling the interface, purge ourselves of any untransmitted
666 * packets. But don't consume any received packets, just drop them.
667 * If we aren't disabling the interface, save the mbufs in the
668 * receive queue for reuse.
669 */
670 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable);
671 bcmeth_txq_consume(sc, &sc->sc_txq);
672 if (disable) {
673 bcmeth_txq_purge(sc, &sc->sc_txq);
674 IF_PURGE(&ifp->if_snd);
675 }
676
677 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0);
678 }
679
680 static void
681 bcmeth_ifwatchdog(struct ifnet *ifp)
682 {
683 }
684
685 static int
686 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
687 {
688 struct bcmeth_softc *sc = ifp->if_softc;
689 struct ifreq * const ifr = data;
690 const int s = splnet();
691 int error;
692
693 switch (cmd) {
694 case SIOCSIFMEDIA:
695 case SIOCGIFMEDIA:
696 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
697 break;
698
699 default:
700 error = ether_ioctl(ifp, cmd, data);
701 if (error != ENETRESET)
702 break;
703
704 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
705 error = 0;
706 break;
707 }
708 error = bcmeth_ifinit(ifp);
709 break;
710 }
711
712 splx(s);
713 return error;
714 }
715
716 static void
717 bcmeth_rxq_desc_presync(
718 struct bcmeth_softc *sc,
719 struct bcmeth_rxqueue *rxq,
720 struct gmac_rxdb *rxdb,
721 size_t count)
722 {
723 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
724 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
725 BUS_DMASYNC_PREWRITE);
726 }
727
728 static void
729 bcmeth_rxq_desc_postsync(
730 struct bcmeth_softc *sc,
731 struct bcmeth_rxqueue *rxq,
732 struct gmac_rxdb *rxdb,
733 size_t count)
734 {
735 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
736 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
737 BUS_DMASYNC_POSTWRITE);
738 }
739
740 static void
741 bcmeth_txq_desc_presync(
742 struct bcmeth_softc *sc,
743 struct bcmeth_txqueue *txq,
744 struct gmac_txdb *txdb,
745 size_t count)
746 {
747 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
748 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
749 BUS_DMASYNC_PREWRITE);
750 }
751
752 static void
753 bcmeth_txq_desc_postsync(
754 struct bcmeth_softc *sc,
755 struct bcmeth_txqueue *txq,
756 struct gmac_txdb *txdb,
757 size_t count)
758 {
759 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
760 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
761 BUS_DMASYNC_POSTWRITE);
762 }
763
764 static bus_dmamap_t
765 bcmeth_mapcache_get(
766 struct bcmeth_softc *sc,
767 struct bcmeth_mapcache *dmc)
768 {
769 KASSERT(dmc->dmc_nmaps > 0);
770 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
771 return dmc->dmc_maps[--dmc->dmc_nmaps];
772 }
773
774 static void
775 bcmeth_mapcache_put(
776 struct bcmeth_softc *sc,
777 struct bcmeth_mapcache *dmc,
778 bus_dmamap_t map)
779 {
780 KASSERT(map != NULL);
781 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
782 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
783 }
784
785 static void
786 bcmeth_mapcache_destroy(
787 struct bcmeth_softc *sc,
788 struct bcmeth_mapcache *dmc)
789 {
790 const size_t dmc_size =
791 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]);
792
793 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
794 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
795 }
796 kmem_intr_free(dmc, dmc_size);
797 }
798
799 static int
800 bcmeth_mapcache_create(
801 struct bcmeth_softc *sc,
802 struct bcmeth_mapcache **dmc_p,
803 size_t maxmaps,
804 size_t maxmapsize,
805 size_t maxseg)
806 {
807 const size_t dmc_size =
808 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]);
809 struct bcmeth_mapcache * const dmc =
810 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
811
812 dmc->dmc_maxmaps = maxmaps;
813 dmc->dmc_nmaps = maxmaps;
814 dmc->dmc_maxmapsize = maxmapsize;
815 dmc->dmc_maxseg = maxseg;
816
817 for (u_int i = 0; i < maxmaps; i++) {
818 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
819 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
820 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
821 if (error) {
822 aprint_error_dev(sc->sc_dev,
823 "failed to creat dma map cache "
824 "entry %u of %zu: %d\n",
825 i, maxmaps, error);
826 while (i-- > 0) {
827 bus_dmamap_destroy(sc->sc_dmat,
828 dmc->dmc_maps[i]);
829 }
830 kmem_intr_free(dmc, dmc_size);
831 return error;
832 }
833 KASSERT(dmc->dmc_maps[i] != NULL);
834 }
835
836 *dmc_p = dmc;
837
838 return 0;
839 }
840
841 #if 0
842 static void
843 bcmeth_dmamem_free(
844 bus_dma_tag_t dmat,
845 size_t map_size,
846 bus_dma_segment_t *seg,
847 bus_dmamap_t map,
848 void *kvap)
849 {
850 bus_dmamap_destroy(dmat, map);
851 bus_dmamem_unmap(dmat, kvap, map_size);
852 bus_dmamem_free(dmat, seg, 1);
853 }
854 #endif
855
856 static int
857 bcmeth_dmamem_alloc(
858 bus_dma_tag_t dmat,
859 size_t map_size,
860 bus_dma_segment_t *seg,
861 bus_dmamap_t *map,
862 void **kvap)
863 {
864 int error;
865 int nseg;
866
867 *kvap = NULL;
868 *map = NULL;
869
870 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0,
871 seg, 1, &nseg, 0);
872 if (error)
873 return error;
874
875 KASSERT(nseg == 1);
876
877 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0);
878 if (error == 0) {
879 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
880 map);
881 if (error == 0) {
882 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
883 NULL, 0);
884 if (error == 0)
885 return 0;
886 bus_dmamap_destroy(dmat, *map);
887 *map = NULL;
888 }
889 bus_dmamem_unmap(dmat, *kvap, map_size);
890 *kvap = NULL;
891 }
892 bus_dmamem_free(dmat, seg, nseg);
893 return 0;
894 }
895
896 static struct mbuf *
897 bcmeth_rx_buf_alloc(
898 struct bcmeth_softc *sc)
899 {
900 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
901 if (m == NULL) {
902 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
903 return NULL;
904 }
905 MCLGET(m, M_DONTWAIT);
906 if ((m->m_flags & M_EXT) == 0) {
907 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
908 m_freem(m);
909 return NULL;
910 }
911 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
912
913 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache);
914 if (map == NULL) {
915 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
916 m_freem(m);
917 return NULL;
918 }
919 M_SETCTX(m, map);
920 m->m_len = m->m_pkthdr.len = MCLBYTES;
921 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
922 BUS_DMA_READ | BUS_DMA_NOWAIT);
923 if (error) {
924 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
925 error);
926 M_SETCTX(m, NULL);
927 m_freem(m);
928 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
929 return NULL;
930 }
931 KASSERT(map->dm_mapsize == MCLBYTES);
932 #ifdef BCMETH_RCVMAGIC
933 *mtod(m, uint32_t *) = htole32(BCMETH_RCVMAGIC);
934 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
935 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
936 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t),
937 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD);
938 #else
939 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
940 BUS_DMASYNC_PREREAD);
941 #endif
942
943 return m;
944 }
945
946 static void
947 bcmeth_rx_map_unload(
948 struct bcmeth_softc *sc,
949 struct mbuf *m)
950 {
951 KASSERT(m);
952 for (; m != NULL; m = m->m_next) {
953 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
954 KASSERT(map);
955 KASSERT(map->dm_mapsize == MCLBYTES);
956 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
957 BUS_DMASYNC_POSTREAD);
958 bus_dmamap_unload(sc->sc_dmat, map);
959 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
960 M_SETCTX(m, NULL);
961 }
962 }
963
964 static bool
965 bcmeth_rxq_produce(
966 struct bcmeth_softc *sc,
967 struct bcmeth_rxqueue *rxq)
968 {
969 struct gmac_rxdb *producer = rxq->rxq_producer;
970 bool produced = false;
971
972 while (rxq->rxq_inuse < rxq->rxq_threshold) {
973 struct mbuf *m;
974 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
975 if (m == NULL) {
976 m = bcmeth_rx_buf_alloc(sc);
977 if (m == NULL) {
978 printf("%s: bcmeth_rx_buf_alloc failed\n",
979 __func__);
980 break;
981 }
982 }
983 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
984 KASSERT(map);
985
986 producer->rxdb_buflen = htole32(MCLBYTES);
987 producer->rxdb_addrlo = htole32(map->dm_segs[0].ds_addr);
988 producer->rxdb_flags &= htole32(RXDB_FLAG_ET);
989 *rxq->rxq_mtail = m;
990 rxq->rxq_mtail = &m->m_next;
991 m->m_len = MCLBYTES;
992 m->m_next = NULL;
993 rxq->rxq_inuse++;
994 if (++producer == rxq->rxq_last) {
995 membar_producer();
996 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
997 rxq->rxq_last - rxq->rxq_producer);
998 producer = rxq->rxq_producer = rxq->rxq_first;
999 }
1000 produced = true;
1001 }
1002 if (produced) {
1003 membar_producer();
1004 if (producer != rxq->rxq_producer) {
1005 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1006 producer - rxq->rxq_producer);
1007 rxq->rxq_producer = producer;
1008 }
1009 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr,
1010 rxq->rxq_descmap->dm_segs[0].ds_addr
1011 + ((uintptr_t)producer & RCVPTR));
1012 }
1013 return true;
1014 }
1015
1016 static void
1017 bcmeth_rx_input(
1018 struct bcmeth_softc *sc,
1019 struct mbuf *m,
1020 uint32_t rxdb_flags)
1021 {
1022 struct ifnet * const ifp = &sc->sc_if;
1023
1024 bcmeth_rx_map_unload(sc, m);
1025
1026 m_adj(m, sc->sc_rcvoffset);
1027
1028 /*
1029 * If we are in promiscuous mode and this isn't a multicast, check the
1030 * destination address to make sure it matches our own. If it doesn't,
1031 * mark the packet as being received promiscuously.
1032 */
1033 if ((sc->sc_cmdcfg & PROMISC_EN)
1034 && (m->m_data[0] & 1) == 0
1035 && (*(uint16_t *)&m->m_data[0] != sc->sc_macaddr[0]
1036 || *(uint32_t *)&m->m_data[2] != sc->sc_macaddr[1])) {
1037 m->m_flags |= M_PROMISC;
1038 }
1039 m_set_rcvif(m, ifp);
1040
1041 ifp->if_ibytes += m->m_pkthdr.len;
1042
1043 /*
1044 * Let's give it to the network subsystm to deal with.
1045 */
1046 #ifdef BCMETH_MPSAFE
1047 mutex_exit(sc->sc_lock);
1048 if_input(ifp, m);
1049 mutex_enter(sc->sc_lock);
1050 #else
1051 int s = splnet();
1052 if_input(ifp, m);
1053 splx(s);
1054 #endif
1055 }
1056
1057 static bool
1058 bcmeth_rxq_consume(
1059 struct bcmeth_softc *sc,
1060 struct bcmeth_rxqueue *rxq,
1061 size_t atmost)
1062 {
1063 struct ifnet * const ifp = &sc->sc_if;
1064 struct gmac_rxdb *consumer = rxq->rxq_consumer;
1065 size_t rxconsumed = 0;
1066 bool didconsume = false;
1067
1068 while (atmost-- > 0) {
1069 if (consumer == rxq->rxq_producer) {
1070 KASSERT(rxq->rxq_inuse == 0);
1071 break;
1072 }
1073
1074 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1075 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1076 if (consumer == rxq->rxq_first + currdscr) {
1077 break;
1078 }
1079 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1);
1080
1081 /*
1082 * We own this packet again. Copy the rxsts word from it.
1083 */
1084 rxconsumed++;
1085 didconsume = true;
1086 uint32_t rxsts;
1087 KASSERT(rxq->rxq_mhead != NULL);
1088 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t);
1089 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align,
1090 BUS_DMASYNC_POSTREAD);
1091 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4);
1092 rxsts = le32toh(rxsts);
1093 #if 0
1094 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd",
1095 currdscr, consumer - rxq->rxq_first);
1096 #endif
1097
1098 /*
1099 * Get the count of descriptors. Fetch the correct number
1100 * of mbufs.
1101 */
1102 #ifdef BCMETH_RCVMAGIC
1103 size_t desc_count = rxsts != BCMETH_RCVMAGIC
1104 ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1;
1105 #else
1106 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
1107 #endif
1108 struct mbuf *m = rxq->rxq_mhead;
1109 struct mbuf *m_last = m;
1110 for (size_t i = 1; i < desc_count; i++) {
1111 if (++consumer == rxq->rxq_last) {
1112 consumer = rxq->rxq_first;
1113 }
1114 KASSERTMSG(consumer != rxq->rxq_first + currdscr,
1115 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u "
1116 "consumer=%zd", i, rxsts, desc_count, currdscr,
1117 consumer - rxq->rxq_first);
1118 m_last = m_last->m_next;
1119 }
1120
1121 /*
1122 * Now remove it/them from the list of enqueued mbufs.
1123 */
1124 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1125 rxq->rxq_mtail = &rxq->rxq_mhead;
1126 m_last->m_next = NULL;
1127
1128 #ifdef BCMETH_RCVMAGIC
1129 if (rxsts == BCMETH_RCVMAGIC) {
1130 ifp->if_ierrors++;
1131 if ((m->m_ext.ext_paddr >> 28) == 8) {
1132 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo);
1133 } else {
1134 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi);
1135 }
1136 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1137 } else
1138 #endif /* BCMETH_RCVMAGIC */
1139 if (rxsts
1140 & (RXSTS_CRC_ERROR |RXSTS_OVERSIZED |RXSTS_PKT_OVERFLOW)) {
1141 aprint_error_dev(sc->sc_dev,
1142 "[%zu]: count=%zu rxsts=%#x\n",
1143 consumer - rxq->rxq_first, desc_count, rxsts);
1144 /*
1145 * We encountered an error, take the mbufs and add them
1146 * to the rx bufcache so we can quickly reuse them.
1147 */
1148 ifp->if_ierrors++;
1149 do {
1150 struct mbuf *m0 = m->m_next;
1151 m->m_next = NULL;
1152 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1153 m = m0;
1154 } while (m);
1155 } else {
1156 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN);
1157 framelen += sc->sc_rcvoffset;
1158 m->m_pkthdr.len = framelen;
1159 if (desc_count == 1) {
1160 KASSERT(framelen <= MCLBYTES);
1161 m->m_len = framelen;
1162 } else {
1163 m_last->m_len = framelen & (MCLBYTES - 1);
1164 }
1165
1166 #ifdef BCMETH_MPSAFE
1167 /*
1168 * Wrap at the last entry!
1169 */
1170 if (++consumer == rxq->rxq_last) {
1171 KASSERT(consumer[-1].rxdb_flags
1172 & htole32(RXDB_FLAG_ET));
1173 rxq->rxq_consumer = rxq->rxq_first;
1174 } else {
1175 rxq->rxq_consumer = consumer;
1176 }
1177 rxq->rxq_inuse -= rxconsumed;
1178 #endif /* BCMETH_MPSAFE */
1179
1180 /*
1181 * Receive the packet (which releases our lock)
1182 */
1183 bcmeth_rx_input(sc, m, rxsts);
1184
1185 #ifdef BCMETH_MPSAFE
1186 /*
1187 * Since we had to give up our lock, we need to
1188 * refresh these.
1189 */
1190 consumer = rxq->rxq_consumer;
1191 rxconsumed = 0;
1192 continue;
1193 #endif /* BCMETH_MPSAFE */
1194 }
1195
1196 /*
1197 * Wrap at the last entry!
1198 */
1199 if (++consumer == rxq->rxq_last) {
1200 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET));
1201 consumer = rxq->rxq_first;
1202 }
1203 }
1204
1205 /*
1206 * Update queue info.
1207 */
1208 rxq->rxq_consumer = consumer;
1209 rxq->rxq_inuse -= rxconsumed;
1210
1211 /*
1212 * Did we consume anything?
1213 */
1214 return didconsume;
1215 }
1216
1217 static void
1218 bcmeth_rxq_purge(
1219 struct bcmeth_softc *sc,
1220 struct bcmeth_rxqueue *rxq,
1221 bool discard)
1222 {
1223 struct mbuf *m;
1224
1225 if ((m = rxq->rxq_mhead) != NULL) {
1226 if (discard) {
1227 bcmeth_rx_map_unload(sc, m);
1228 m_freem(m);
1229 } else {
1230 while (m != NULL) {
1231 struct mbuf *m0 = m->m_next;
1232 m->m_next = NULL;
1233 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1234 m = m0;
1235 }
1236 }
1237 }
1238
1239 rxq->rxq_mhead = NULL;
1240 rxq->rxq_mtail = &rxq->rxq_mhead;
1241 rxq->rxq_inuse = 0;
1242 }
1243
1244 static void
1245 bcmeth_rxq_reset(
1246 struct bcmeth_softc *sc,
1247 struct bcmeth_rxqueue *rxq)
1248 {
1249 /*
1250 * sync all the descriptors
1251 */
1252 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1253 rxq->rxq_last - rxq->rxq_first);
1254
1255 /*
1256 * Make sure we own all descriptors in the ring.
1257 */
1258 struct gmac_rxdb *rxdb;
1259 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) {
1260 rxdb->rxdb_flags = htole32(RXDB_FLAG_IC);
1261 }
1262
1263 /*
1264 * Last descriptor has the wrap flag.
1265 */
1266 rxdb->rxdb_flags = htole32(RXDB_FLAG_ET | RXDB_FLAG_IC);
1267
1268 /*
1269 * Reset the producer consumer indexes.
1270 */
1271 rxq->rxq_consumer = rxq->rxq_first;
1272 rxq->rxq_producer = rxq->rxq_first;
1273 rxq->rxq_inuse = 0;
1274 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS)
1275 rxq->rxq_threshold = BCMETH_MINRXMBUFS;
1276
1277 sc->sc_intmask |= RCVINT | RCVFIFOOF | RCVDESCUF;
1278
1279 /*
1280 * Restart the receiver at the first descriptor
1281 */
1282 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo,
1283 rxq->rxq_descmap->dm_segs[0].ds_addr);
1284 }
1285
1286 static int
1287 bcmeth_rxq_attach(
1288 struct bcmeth_softc *sc,
1289 struct bcmeth_rxqueue *rxq,
1290 u_int qno)
1291 {
1292 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]);
1293 int error;
1294 void *descs;
1295
1296 KASSERT(desc_count == 256 || desc_count == 512);
1297
1298 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1299 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1300 if (error)
1301 return error;
1302
1303 memset(descs, 0, BCMETH_RINGSIZE);
1304 rxq->rxq_first = descs;
1305 rxq->rxq_last = rxq->rxq_first + desc_count;
1306 rxq->rxq_consumer = descs;
1307 rxq->rxq_producer = descs;
1308
1309 bcmeth_rxq_purge(sc, rxq, true);
1310 bcmeth_rxq_reset(sc, rxq);
1311
1312 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW;
1313 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL;
1314 rxq->rxq_reg_rcvptr = GMAC_RCVPTR;
1315 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0;
1316 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1;
1317
1318 return 0;
1319 }
1320
1321 static bool
1322 bcmeth_txq_active_p(
1323 struct bcmeth_softc * const sc,
1324 struct bcmeth_txqueue *txq)
1325 {
1326 return !IF_IS_EMPTY(&txq->txq_mbufs);
1327 }
1328
1329 static bool
1330 bcmeth_txq_fillable_p(
1331 struct bcmeth_softc * const sc,
1332 struct bcmeth_txqueue *txq)
1333 {
1334 return txq->txq_free >= txq->txq_threshold;
1335 }
1336
1337 static int
1338 bcmeth_txq_attach(
1339 struct bcmeth_softc *sc,
1340 struct bcmeth_txqueue *txq,
1341 u_int qno)
1342 {
1343 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]);
1344 int error;
1345 void *descs;
1346
1347 KASSERT(desc_count == 256 || desc_count == 512);
1348
1349 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1350 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1351 if (error)
1352 return error;
1353
1354 memset(descs, 0, BCMETH_RINGSIZE);
1355 txq->txq_first = descs;
1356 txq->txq_last = txq->txq_first + desc_count;
1357 txq->txq_consumer = descs;
1358 txq->txq_producer = descs;
1359
1360 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS);
1361
1362 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW;
1363 txq->txq_reg_xmtctl = GMAC_XMTCONTROL;
1364 txq->txq_reg_xmtptr = GMAC_XMTPTR;
1365 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0;
1366 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1;
1367
1368 bcmeth_txq_reset(sc, txq);
1369
1370 return 0;
1371 }
1372
1373 static int
1374 bcmeth_txq_map_load(
1375 struct bcmeth_softc *sc,
1376 struct bcmeth_txqueue *txq,
1377 struct mbuf *m)
1378 {
1379 bus_dmamap_t map;
1380 int error;
1381
1382 map = M_GETCTX(m, bus_dmamap_t);
1383 if (map != NULL)
1384 return 0;
1385
1386 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache);
1387 if (map == NULL)
1388 return ENOMEM;
1389
1390 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1391 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1392 if (error)
1393 return error;
1394
1395 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1396 BUS_DMASYNC_PREWRITE);
1397 M_SETCTX(m, map);
1398 return 0;
1399 }
1400
1401 static void
1402 bcmeth_txq_map_unload(
1403 struct bcmeth_softc *sc,
1404 struct bcmeth_txqueue *txq,
1405 struct mbuf *m)
1406 {
1407 KASSERT(m);
1408 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1409 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1410 BUS_DMASYNC_POSTWRITE);
1411 bus_dmamap_unload(sc->sc_dmat, map);
1412 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map);
1413 }
1414
1415 static bool
1416 bcmeth_txq_produce(
1417 struct bcmeth_softc *sc,
1418 struct bcmeth_txqueue *txq,
1419 struct mbuf *m)
1420 {
1421 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1422
1423 if (map->dm_nsegs > txq->txq_free)
1424 return false;
1425
1426 /*
1427 * TCP Offload flag must be set in the first descriptor.
1428 */
1429 struct gmac_txdb *producer = txq->txq_producer;
1430 uint32_t first_flags = TXDB_FLAG_SF;
1431 uint32_t last_flags = TXDB_FLAG_EF;
1432
1433 /*
1434 * If we've produced enough descriptors without consuming any
1435 * we need to ask for an interrupt to reclaim some.
1436 */
1437 txq->txq_lastintr += map->dm_nsegs;
1438 if (txq->txq_lastintr >= txq->txq_threshold
1439 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1440 txq->txq_lastintr = 0;
1441 last_flags |= TXDB_FLAG_IC;
1442 }
1443
1444 KASSERT(producer != txq->txq_last);
1445
1446 struct gmac_txdb *start = producer;
1447 size_t count = map->dm_nsegs;
1448 producer->txdb_flags |= htole32(first_flags);
1449 producer->txdb_addrlo = htole32(map->dm_segs[0].ds_addr);
1450 producer->txdb_buflen = htole32(map->dm_segs[0].ds_len);
1451 for (u_int i = 1; i < map->dm_nsegs; i++) {
1452 #if 0
1453 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1454 le32toh(producer->txdb_flags),
1455 le32toh(producer->txdb_buflen),
1456 le32toh(producer->txdb_addrlo),
1457 le32toh(producer->txdb_addrhi));
1458 #endif
1459 if (__predict_false(++producer == txq->txq_last)) {
1460 bcmeth_txq_desc_presync(sc, txq, start,
1461 txq->txq_last - start);
1462 count -= txq->txq_last - start;
1463 producer = txq->txq_first;
1464 start = txq->txq_first;
1465 }
1466 producer->txdb_addrlo = htole32(map->dm_segs[i].ds_addr);
1467 producer->txdb_buflen = htole32(map->dm_segs[i].ds_len);
1468 }
1469 producer->txdb_flags |= htole32(last_flags);
1470 #if 0
1471 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1472 le32toh(producer->txdb_flags), le32toh(producer->txdb_buflen),
1473 le32toh(producer->txdb_addrlo), le32toh(producer->txdb_addrhi));
1474 #endif
1475 if (count)
1476 bcmeth_txq_desc_presync(sc, txq, start, count);
1477
1478 /*
1479 * Reduce free count by the number of segments we consumed.
1480 */
1481 txq->txq_free -= map->dm_nsegs;
1482 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1483 KASSERT(map->dm_nsegs == 1
1484 || (txq->txq_producer->txdb_flags & htole32(TXDB_FLAG_EF)) == 0);
1485 KASSERT(producer->txdb_flags & htole32(TXDB_FLAG_EF));
1486
1487 #if 0
1488 printf("%s: mbuf %p: produced a %u byte packet in %u segments "
1489 "(%zd..%zd)\n", __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1490 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1491 #endif
1492
1493 if (producer + 1 == txq->txq_last)
1494 txq->txq_producer = txq->txq_first;
1495 else
1496 txq->txq_producer = producer + 1;
1497 IF_ENQUEUE(&txq->txq_mbufs, m);
1498
1499 /*
1500 * Let the transmitter know there's more to do
1501 */
1502 bcmeth_write_4(sc, txq->txq_reg_xmtptr,
1503 txq->txq_descmap->dm_segs[0].ds_addr
1504 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR));
1505
1506 return true;
1507 }
1508
1509 static struct mbuf *
1510 bcmeth_copy_packet(struct mbuf *m)
1511 {
1512 struct mbuf *mext = NULL;
1513 size_t misalignment = 0;
1514 size_t hlen = 0;
1515
1516 for (mext = m; mext != NULL; mext = mext->m_next) {
1517 if (mext->m_flags & M_EXT) {
1518 misalignment = mtod(mext, vaddr_t) & arm_dcache_align;
1519 break;
1520 }
1521 hlen += m->m_len;
1522 }
1523
1524 struct mbuf *n = m->m_next;
1525 if (m != mext && hlen + misalignment <= MHLEN && false) {
1526 KASSERT(m->m_pktdat <= m->m_data
1527 && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]);
1528 size_t oldoff = m->m_data - m->m_pktdat;
1529 size_t off;
1530 if (mext == NULL) {
1531 off = (oldoff + hlen > MHLEN) ? 0 : oldoff;
1532 } else {
1533 off = MHLEN - (hlen + misalignment);
1534 }
1535 KASSERT(off + hlen + misalignment <= MHLEN);
1536 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) {
1537 memmove(&m->m_pktdat[off], m->m_data, m->m_len);
1538 m->m_data = &m->m_pktdat[off];
1539 }
1540 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]);
1541 m->m_len = hlen;
1542 m->m_next = mext;
1543 while (n != mext) {
1544 n = m_free(n);
1545 }
1546 return m;
1547 }
1548
1549 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
1550 if (m0 == NULL) {
1551 return NULL;
1552 }
1553 m_copy_pkthdr(m0, m);
1554 MCLAIM(m0, m->m_owner);
1555 if (m0->m_pkthdr.len > MHLEN) {
1556 MCLGET(m0, M_DONTWAIT);
1557 if ((m0->m_flags & M_EXT) == 0) {
1558 m_freem(m0);
1559 return NULL;
1560 }
1561 }
1562 m0->m_len = m->m_pkthdr.len;
1563 m_copydata(m, 0, m0->m_len, mtod(m0, void *));
1564 m_freem(m);
1565 return m0;
1566 }
1567
1568 static bool
1569 bcmeth_txq_enqueue(
1570 struct bcmeth_softc *sc,
1571 struct bcmeth_txqueue *txq)
1572 {
1573 for (;;) {
1574 if (IF_QFULL(&txq->txq_mbufs))
1575 return false;
1576 struct mbuf *m = txq->txq_next;
1577 if (m == NULL) {
1578 int s = splnet();
1579 IF_DEQUEUE(&sc->sc_if.if_snd, m);
1580 splx(s);
1581 if (m == NULL)
1582 return true;
1583 M_SETCTX(m, NULL);
1584 } else {
1585 txq->txq_next = NULL;
1586 }
1587 /*
1588 * If LINK2 is set and this packet uses multiple mbufs,
1589 * consolidate it into a single mbuf.
1590 */
1591 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) {
1592 struct mbuf *m0 = bcmeth_copy_packet(m);
1593 if (m0 == NULL) {
1594 txq->txq_next = m;
1595 return true;
1596 }
1597 m = m0;
1598 }
1599 int error = bcmeth_txq_map_load(sc, txq, m);
1600 if (error) {
1601 aprint_error_dev(sc->sc_dev,
1602 "discarded packet due to "
1603 "dmamap load failure: %d\n", error);
1604 m_freem(m);
1605 continue;
1606 }
1607 KASSERT(txq->txq_next == NULL);
1608 if (!bcmeth_txq_produce(sc, txq, m)) {
1609 txq->txq_next = m;
1610 return false;
1611 }
1612 KASSERT(txq->txq_next == NULL);
1613 }
1614 }
1615
1616 static bool
1617 bcmeth_txq_consume(
1618 struct bcmeth_softc *sc,
1619 struct bcmeth_txqueue *txq)
1620 {
1621 struct ifnet * const ifp = &sc->sc_if;
1622 struct gmac_txdb *consumer = txq->txq_consumer;
1623 size_t txfree = 0;
1624
1625 #if 0
1626 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1627 #endif
1628
1629 for (;;) {
1630 if (consumer == txq->txq_producer) {
1631 txq->txq_consumer = consumer;
1632 txq->txq_free += txfree;
1633 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree);
1634 #if 0
1635 printf("%s: empty: freed %zu descriptors going from "
1636 "%zu to %zu\n", __func__, txfree,
1637 txq->txq_free - txfree, txq->txq_free);
1638 #endif
1639 KASSERT(txq->txq_lastintr == 0);
1640 KASSERT(txq->txq_free
1641 == txq->txq_last - txq->txq_first - 1);
1642 return true;
1643 }
1644 bcmeth_txq_desc_postsync(sc, txq, consumer, 1);
1645 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
1646 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) {
1647 txq->txq_consumer = consumer;
1648 txq->txq_free += txfree;
1649 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree);
1650 #if 0
1651 printf("%s: freed %zu descriptors\n",
1652 __func__, txfree);
1653 #endif
1654 return bcmeth_txq_fillable_p(sc, txq);
1655 }
1656
1657 /*
1658 * If this is the last descriptor in the chain, get the
1659 * mbuf, free its dmamap, and free the mbuf chain itself.
1660 */
1661 const uint32_t txdb_flags = le32toh(consumer->txdb_flags);
1662 if (txdb_flags & TXDB_FLAG_EF) {
1663 struct mbuf *m;
1664
1665 IF_DEQUEUE(&txq->txq_mbufs, m);
1666 KASSERT(m);
1667 bcmeth_txq_map_unload(sc, txq, m);
1668 #if 0
1669 printf("%s: mbuf %p: consumed a %u byte packet\n",
1670 __func__, m, m->m_pkthdr.len);
1671 #endif
1672 bpf_mtap(ifp, m, BPF_D_OUT);
1673 ifp->if_opackets++;
1674 ifp->if_obytes += m->m_pkthdr.len;
1675 if (m->m_flags & M_MCAST)
1676 ifp->if_omcasts++;
1677 m_freem(m);
1678 }
1679
1680 /*
1681 * We own this packet again. Clear all flags except wrap.
1682 */
1683 txfree++;
1684
1685 /*
1686 * Wrap at the last entry!
1687 */
1688 if (txdb_flags & TXDB_FLAG_ET) {
1689 consumer->txdb_flags = htole32(TXDB_FLAG_ET);
1690 KASSERT(consumer + 1 == txq->txq_last);
1691 consumer = txq->txq_first;
1692 } else {
1693 consumer->txdb_flags = 0;
1694 consumer++;
1695 KASSERT(consumer < txq->txq_last);
1696 }
1697 }
1698 }
1699
1700 static void
1701 bcmeth_txq_purge(
1702 struct bcmeth_softc *sc,
1703 struct bcmeth_txqueue *txq)
1704 {
1705 struct mbuf *m;
1706 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0);
1707
1708 for (;;) {
1709 IF_DEQUEUE(&txq->txq_mbufs, m);
1710 if (m == NULL)
1711 break;
1712 bcmeth_txq_map_unload(sc, txq, m);
1713 m_freem(m);
1714 }
1715 if ((m = txq->txq_next) != NULL) {
1716 txq->txq_next = NULL;
1717 bcmeth_txq_map_unload(sc, txq, m);
1718 m_freem(m);
1719 }
1720 }
1721
1722 static void
1723 bcmeth_txq_reset(
1724 struct bcmeth_softc *sc,
1725 struct bcmeth_txqueue *txq)
1726 {
1727 /*
1728 * sync all the descriptors
1729 */
1730 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first,
1731 txq->txq_last - txq->txq_first);
1732
1733 /*
1734 * Make sure we own all descriptors in the ring.
1735 */
1736 struct gmac_txdb *txdb;
1737 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) {
1738 txdb->txdb_flags = 0;
1739 }
1740
1741 /*
1742 * Last descriptor has the wrap flag.
1743 */
1744 txdb->txdb_flags = htole32(TXDB_FLAG_ET);
1745
1746 /*
1747 * Reset the producer consumer indexes.
1748 */
1749 txq->txq_consumer = txq->txq_first;
1750 txq->txq_producer = txq->txq_first;
1751 txq->txq_free = txq->txq_last - txq->txq_first - 1;
1752 txq->txq_threshold = txq->txq_free / 2;
1753 txq->txq_lastintr = 0;
1754
1755 /*
1756 * What do we want to get interrupted on?
1757 */
1758 sc->sc_intmask |= XMTINT_0 | XMTUF;
1759
1760 /*
1761 * Restart the transmiter at the first descriptor
1762 */
1763 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo,
1764 txq->txq_descmap->dm_segs->ds_addr);
1765 }
1766
1767 static void
1768 bcmeth_ifstart(struct ifnet *ifp)
1769 {
1770 struct bcmeth_softc * const sc = ifp->if_softc;
1771
1772 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
1773 return;
1774 }
1775
1776 #ifdef BCMETH_MPSAFETX
1777 if (cpu_intr_p()) {
1778 #endif
1779 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
1780 softint_schedule(sc->sc_soft_ih);
1781 #ifdef BCMETH_MPSAFETX
1782 } else {
1783 /*
1784 * Either we are in a softintr thread already or some other
1785 * thread so just borrow it to do the send and save ourselves
1786 * the overhead of a fast soft int.
1787 */
1788 bcmeth_soft_txintr(sc);
1789 }
1790 #endif
1791 }
1792
1793 int
1794 bcmeth_intr(void *arg)
1795 {
1796 struct bcmeth_softc * const sc = arg;
1797 uint32_t soft_flags = 0;
1798 uint32_t work_flags = 0;
1799 int rv = 0;
1800
1801 mutex_enter(sc->sc_hwlock);
1802
1803 uint32_t intmask = sc->sc_intmask;
1804 BCMETH_EVCNT_INCR(sc->sc_ev_intr);
1805
1806 for (;;) {
1807 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1808 intstatus &= intmask;
1809 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */
1810 if (intstatus == 0) {
1811 break;
1812 }
1813 #if 0
1814 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n",
1815 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK));
1816 #endif
1817 if (intstatus & RCVINT) {
1818 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1819 intmask &= ~RCVINT;
1820
1821 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1822 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1823 if (descs < rxq->rxq_consumer - rxq->rxq_first) {
1824 /*
1825 * We wrapped at the end so count how far
1826 * we are from the end.
1827 */
1828 descs += rxq->rxq_last - rxq->rxq_consumer;
1829 } else {
1830 descs -= rxq->rxq_consumer - rxq->rxq_first;
1831 }
1832 /*
1833 * If we "timedout" we can't be hogging so use
1834 * softints. If we exceeded then we might hogging
1835 * so let the workqueue deal with them.
1836 */
1837 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy,
1838 INTRCVLAZY_FRAMECOUNT);
1839 if (descs < framecount
1840 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) {
1841 soft_flags |= SOFT_RXINTR;
1842 } else {
1843 work_flags |= WORK_RXINTR;
1844 }
1845 }
1846
1847 if (intstatus & XMTINT_0) {
1848 intmask &= ~XMTINT_0;
1849 soft_flags |= SOFT_TXINTR;
1850 }
1851
1852 if (intstatus & RCVDESCUF) {
1853 intmask &= ~RCVDESCUF;
1854 work_flags |= WORK_RXUNDERFLOW;
1855 }
1856
1857 intstatus &= intmask;
1858 if (intstatus) {
1859 aprint_error_dev(sc->sc_dev,
1860 "intr: intstatus=%#x\n", intstatus);
1861 aprint_error_dev(sc->sc_dev,
1862 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n",
1863 sc->sc_rxq.rxq_first,
1864 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr,
1865 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr),
1866 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0),
1867 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1));
1868 aprint_error_dev(sc->sc_dev,
1869 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n",
1870 sc->sc_txq.txq_first,
1871 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr,
1872 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr),
1873 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0),
1874 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1));
1875 intmask &= ~intstatus;
1876 work_flags |= WORK_REINIT;
1877 break;
1878 }
1879 }
1880
1881 if (intmask != sc->sc_intmask) {
1882 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1883 }
1884
1885 if (work_flags) {
1886 if (sc->sc_work_flags == 0) {
1887 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL);
1888 }
1889 atomic_or_32(&sc->sc_work_flags, work_flags);
1890 rv = 1;
1891 }
1892
1893 if (soft_flags) {
1894 if (sc->sc_soft_flags == 0) {
1895 softint_schedule(sc->sc_soft_ih);
1896 }
1897 atomic_or_32(&sc->sc_soft_flags, soft_flags);
1898 rv = 1;
1899 }
1900
1901 mutex_exit(sc->sc_hwlock);
1902
1903 return rv;
1904 }
1905
1906 #ifdef BCMETH_MPSAFETX
1907 void
1908 bcmeth_soft_txintr(struct bcmeth_softc *sc)
1909 {
1910 mutex_enter(sc->sc_lock);
1911 /*
1912 * Let's do what we came here for. Consume transmitted
1913 * packets off the transmit ring.
1914 */
1915 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1916 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1917 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1918 sc->sc_if.if_flags |= IFF_OACTIVE;
1919 } else {
1920 sc->sc_if.if_flags &= ~IFF_OACTIVE;
1921 }
1922 if (sc->sc_if.if_flags & IFF_RUNNING) {
1923 mutex_spin_enter(sc->sc_hwlock);
1924 sc->sc_intmask |= XMTINT_0;
1925 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1926 mutex_spin_exit(sc->sc_hwlock);
1927 }
1928 mutex_exit(sc->sc_lock);
1929 }
1930 #endif /* BCMETH_MPSAFETX */
1931
1932 void
1933 bcmeth_soft_intr(void *arg)
1934 {
1935 struct bcmeth_softc * const sc = arg;
1936 struct ifnet * const ifp = &sc->sc_if;
1937 uint32_t intmask = 0;
1938
1939 mutex_enter(sc->sc_lock);
1940
1941 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
1942
1943 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr);
1944
1945 if ((soft_flags & SOFT_TXINTR)
1946 || bcmeth_txq_active_p(sc, &sc->sc_txq)) {
1947 /*
1948 * Let's do what we came here for. Consume transmitted
1949 * packets off the transmit ring.
1950 */
1951 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1952 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1953 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1954 ifp->if_flags |= IFF_OACTIVE;
1955 } else {
1956 ifp->if_flags &= ~IFF_OACTIVE;
1957 }
1958 intmask |= XMTINT_0;
1959 }
1960
1961 if (soft_flags & SOFT_RXINTR) {
1962 /*
1963 * Let's consume
1964 */
1965 while (bcmeth_rxq_consume(sc, &sc->sc_rxq,
1966 sc->sc_rxq.rxq_threshold / 4)) {
1967 /*
1968 * We've consumed a quarter of the ring and still have
1969 * more to do. Refill the ring.
1970 */
1971 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1972 }
1973 intmask |= RCVINT;
1974 }
1975
1976 if (ifp->if_flags & IFF_RUNNING) {
1977 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1978 mutex_spin_enter(sc->sc_hwlock);
1979 sc->sc_intmask |= intmask;
1980 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1981 mutex_spin_exit(sc->sc_hwlock);
1982 }
1983
1984 mutex_exit(sc->sc_lock);
1985 }
1986
1987 void
1988 bcmeth_worker(struct work *wk, void *arg)
1989 {
1990 struct bcmeth_softc * const sc = arg;
1991 struct ifnet * const ifp = &sc->sc_if;
1992 uint32_t intmask = 0;
1993
1994 mutex_enter(sc->sc_lock);
1995
1996 BCMETH_EVCNT_INCR(sc->sc_ev_work);
1997
1998 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0);
1999 if (work_flags & WORK_REINIT) {
2000 int s = splnet();
2001 sc->sc_soft_flags = 0;
2002 bcmeth_ifinit(ifp);
2003 splx(s);
2004 work_flags &= ~WORK_RXUNDERFLOW;
2005 }
2006
2007 if (work_flags & WORK_RXUNDERFLOW) {
2008 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
2009 size_t threshold = 5 * rxq->rxq_threshold / 4;
2010 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
2011 threshold = rxq->rxq_last - rxq->rxq_first - 1;
2012 } else {
2013 intmask |= RCVDESCUF;
2014 }
2015 aprint_normal_dev(sc->sc_dev,
2016 "increasing receive buffers from %zu to %zu\n",
2017 rxq->rxq_threshold, threshold);
2018 rxq->rxq_threshold = threshold;
2019 }
2020
2021 if (work_flags & WORK_RXINTR) {
2022 /*
2023 * Let's consume
2024 */
2025 while (bcmeth_rxq_consume(sc, &sc->sc_rxq,
2026 sc->sc_rxq.rxq_threshold / 4)) {
2027 /*
2028 * We've consumed a quarter of the ring and still have
2029 * more to do. Refill the ring.
2030 */
2031 bcmeth_rxq_produce(sc, &sc->sc_rxq);
2032 }
2033 intmask |= RCVINT;
2034 }
2035
2036 if (ifp->if_flags & IFF_RUNNING) {
2037 bcmeth_rxq_produce(sc, &sc->sc_rxq);
2038 #if 0
2039 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
2040 if (intstatus & RCVINT) {
2041 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT);
2042 work_flags |= WORK_RXINTR;
2043 continue;
2044 }
2045 #endif
2046 mutex_spin_enter(sc->sc_hwlock);
2047 sc->sc_intmask |= intmask;
2048 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
2049 mutex_spin_exit(sc->sc_hwlock);
2050 }
2051
2052 mutex_exit(sc->sc_lock);
2053 }
2054