bcm53xx_eth.c revision 1.22 1 /*-
2 * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #define _ARM32_BUS_DMA_PRIVATE
31 #define GMAC_PRIVATE
32
33 #include "locators.h"
34 #include "opt_broadcom.h"
35
36 #include <sys/cdefs.h>
37
38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.22 2013/01/10 22:07:19 matt Exp $");
39
40 #include <sys/param.h>
41 #include <sys/atomic.h>
42 #include <sys/bus.h>
43 #include <sys/device.h>
44 #include <sys/ioctl.h>
45 #include <sys/intr.h>
46 #include <sys/kmem.h>
47 #include <sys/mutex.h>
48 #include <sys/socket.h>
49 #include <sys/systm.h>
50 #include <sys/workqueue.h>
51
52 #include <net/if.h>
53 #include <net/if_ether.h>
54 #include <net/if_media.h>
55
56 #include <net/if_dl.h>
57
58 #include <net/bpf.h>
59
60 #include <dev/mii/miivar.h>
61
62 #include <arm/broadcom/bcm53xx_reg.h>
63 #include <arm/broadcom/bcm53xx_var.h>
64
65 //#define BCMETH_MPSAFE
66
67 #ifdef BCMETH_COUNTERS
68 #define BCMETH_EVCNT_ADD(a,b) ((void)((a).ev_count += (b)))
69 #else
70 #define BCMETH_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0)
71 #endif
72 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1)
73
74 #define BCMETH_MAXTXMBUFS 128
75 #define BCMETH_NTXSEGS 30
76 #define BCMETH_MAXRXMBUFS 255
77 #define BCMETH_MINRXMBUFS 64
78 #define BCMETH_NRXSEGS 1
79 #define BCMETH_RINGSIZE PAGE_SIZE
80
81 #if 1
82 #define BCMETH_RCVMAGIC 0xfeedface
83 #endif
84
85 static int bcmeth_ccb_match(device_t, cfdata_t, void *);
86 static void bcmeth_ccb_attach(device_t, device_t, void *);
87
88 struct bcmeth_txqueue {
89 bus_dmamap_t txq_descmap;
90 struct gmac_txdb *txq_consumer;
91 struct gmac_txdb *txq_producer;
92 struct gmac_txdb *txq_first;
93 struct gmac_txdb *txq_last;
94 struct ifqueue txq_mbufs;
95 struct mbuf *txq_next;
96 size_t txq_free;
97 size_t txq_threshold;
98 size_t txq_lastintr;
99 bus_size_t txq_reg_xmtaddrlo;
100 bus_size_t txq_reg_xmtptr;
101 bus_size_t txq_reg_xmtctl;
102 bus_size_t txq_reg_xmtsts0;
103 bus_size_t txq_reg_xmtsts1;
104 bus_dma_segment_t txq_descmap_seg;
105 };
106
107 struct bcmeth_rxqueue {
108 bus_dmamap_t rxq_descmap;
109 struct gmac_rxdb *rxq_consumer;
110 struct gmac_rxdb *rxq_producer;
111 struct gmac_rxdb *rxq_first;
112 struct gmac_rxdb *rxq_last;
113 struct mbuf *rxq_mhead;
114 struct mbuf **rxq_mtail;
115 struct mbuf *rxq_mconsumer;
116 size_t rxq_inuse;
117 size_t rxq_threshold;
118 bus_size_t rxq_reg_rcvaddrlo;
119 bus_size_t rxq_reg_rcvptr;
120 bus_size_t rxq_reg_rcvctl;
121 bus_size_t rxq_reg_rcvsts0;
122 bus_size_t rxq_reg_rcvsts1;
123 bus_dma_segment_t rxq_descmap_seg;
124 };
125
126 struct bcmeth_mapcache {
127 u_int dmc_nmaps;
128 u_int dmc_maxseg;
129 u_int dmc_maxmaps;
130 u_int dmc_maxmapsize;
131 bus_dmamap_t dmc_maps[0];
132 };
133
134 struct bcmeth_softc {
135 device_t sc_dev;
136 bus_space_tag_t sc_bst;
137 bus_space_handle_t sc_bsh;
138 bus_dma_tag_t sc_dmat;
139 kmutex_t *sc_lock;
140 kmutex_t *sc_hwlock;
141 struct ethercom sc_ec;
142 #define sc_if sc_ec.ec_if
143 struct ifmedia sc_media;
144 void *sc_soft_ih;
145 void *sc_ih;
146
147 struct bcmeth_rxqueue sc_rxq;
148 struct bcmeth_txqueue sc_txq;
149
150 size_t sc_rcvoffset;
151 uint32_t sc_macaddr[2];
152 uint32_t sc_maxfrm;
153 uint32_t sc_cmdcfg;
154 uint32_t sc_intmask;
155 uint32_t sc_rcvlazy;
156 volatile uint32_t sc_soft_flags;
157 #define SOFT_RXINTR 0x01
158 #define SOFT_TXINTR 0x02
159
160 #ifdef BCMETH_COUNTERS
161 struct evcnt sc_ev_intr;
162 struct evcnt sc_ev_soft_intr;
163 struct evcnt sc_ev_work;
164 struct evcnt sc_ev_tx_stall;
165 struct evcnt sc_ev_rx_badmagic_lo;
166 struct evcnt sc_ev_rx_badmagic_hi;
167 #endif
168
169 struct ifqueue sc_rx_bufcache;
170 struct bcmeth_mapcache *sc_rx_mapcache;
171 struct bcmeth_mapcache *sc_tx_mapcache;
172
173 struct workqueue *sc_workq;
174 struct work sc_work;
175
176 volatile uint32_t sc_work_flags;
177 #define WORK_RXINTR 0x01
178 #define WORK_RXUNDERFLOW 0x02
179 #define WORK_REINIT 0x04
180
181 uint8_t sc_enaddr[ETHER_ADDR_LEN];
182 };
183
184 static void bcmeth_ifstart(struct ifnet *);
185 static void bcmeth_ifwatchdog(struct ifnet *);
186 static int bcmeth_ifinit(struct ifnet *);
187 static void bcmeth_ifstop(struct ifnet *, int);
188 static int bcmeth_ifioctl(struct ifnet *, u_long, void *);
189
190 static int bcmeth_mapcache_create(struct bcmeth_softc *,
191 struct bcmeth_mapcache **, size_t, size_t, size_t);
192 static void bcmeth_mapcache_destroy(struct bcmeth_softc *,
193 struct bcmeth_mapcache *);
194 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *,
195 struct bcmeth_mapcache *);
196 static void bcmeth_mapcache_put(struct bcmeth_softc *,
197 struct bcmeth_mapcache *, bus_dmamap_t);
198
199 static int bcmeth_txq_attach(struct bcmeth_softc *,
200 struct bcmeth_txqueue *, u_int);
201 static void bcmeth_txq_purge(struct bcmeth_softc *,
202 struct bcmeth_txqueue *);
203 static void bcmeth_txq_reset(struct bcmeth_softc *,
204 struct bcmeth_txqueue *);
205 static bool bcmeth_txq_consume(struct bcmeth_softc *,
206 struct bcmeth_txqueue *);
207 static bool bcmeth_txq_produce(struct bcmeth_softc *,
208 struct bcmeth_txqueue *, struct mbuf *m);
209 static bool bcmeth_txq_active_p(struct bcmeth_softc *,
210 struct bcmeth_txqueue *);
211
212 static int bcmeth_rxq_attach(struct bcmeth_softc *,
213 struct bcmeth_rxqueue *, u_int);
214 static bool bcmeth_rxq_produce(struct bcmeth_softc *,
215 struct bcmeth_rxqueue *);
216 static void bcmeth_rxq_purge(struct bcmeth_softc *,
217 struct bcmeth_rxqueue *, bool);
218 static void bcmeth_rxq_reset(struct bcmeth_softc *,
219 struct bcmeth_rxqueue *);
220
221 static int bcmeth_intr(void *);
222 #ifdef BCMETH_MPSAFETX
223 static void bcmeth_soft_txintr(struct bcmeth_softc *);
224 #endif
225 static void bcmeth_soft_intr(void *);
226 static void bcmeth_worker(struct work *, void *);
227
228 static int bcmeth_mediachange(struct ifnet *);
229 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *);
230
231 static inline uint32_t
232 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o)
233 {
234 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o);
235 }
236
237 static inline void
238 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v)
239 {
240 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v);
241 }
242
243 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc),
244 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL);
245
246 static int
247 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux)
248 {
249 struct bcmccb_attach_args * const ccbaa = aux;
250 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
251
252 if (strcmp(cf->cf_name, loc->loc_name))
253 return 0;
254
255 #ifdef DIAGNOSTIC
256 const int port = cf->cf_loc[BCMCCBCF_PORT];
257 #endif
258 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port);
259
260 return 1;
261 }
262
263 static void
264 bcmeth_ccb_attach(device_t parent, device_t self, void *aux)
265 {
266 struct bcmeth_softc * const sc = device_private(self);
267 struct ethercom * const ec = &sc->sc_ec;
268 struct ifnet * const ifp = &ec->ec_if;
269 struct bcmccb_attach_args * const ccbaa = aux;
270 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
271 const char * const xname = device_xname(self);
272 prop_dictionary_t dict = device_properties(self);
273 int error;
274
275 sc->sc_bst = ccbaa->ccbaa_ccb_bst;
276 sc->sc_dmat = ccbaa->ccbaa_dmat;
277 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh,
278 loc->loc_offset, loc->loc_size, &sc->sc_bsh);
279
280 /*
281 * We need to use the coherent dma tag for the GMAC.
282 */
283 sc->sc_dmat = &bcm53xx_coherent_dma_tag;
284
285 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
286 if (eaprop == NULL) {
287 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0);
288 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1);
289 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) {
290 aprint_error(": mac-address property is missing\n");
291 return;
292 }
293 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff;
294 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff;
295 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff;
296 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff;
297 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff;
298 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff;
299 } else {
300 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
301 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
302 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
303 ETHER_ADDR_LEN);
304 }
305 sc->sc_dev = self;
306 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
307 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
308
309 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts
310
311 aprint_naive("\n");
312 aprint_normal(": Gigabit Ethernet Controller\n");
313
314 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0);
315 if (error) {
316 aprint_error(": failed to init rxq: %d\n", error);
317 return;
318 }
319
320 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0);
321 if (error) {
322 aprint_error(": failed to init txq: %d\n", error);
323 return;
324 }
325
326 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache,
327 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS);
328 if (error) {
329 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
330 return;
331 }
332
333 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
334 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS);
335 if (error) {
336 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
337 return;
338 }
339
340 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc,
341 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU);
342 if (error) {
343 aprint_error(": failed to create workqueue: %d\n", error);
344 return;
345 }
346
347 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET,
348 bcmeth_soft_intr, sc);
349
350 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL,
351 bcmeth_intr, sc);
352
353 if (sc->sc_ih == NULL) {
354 aprint_error_dev(self, "failed to establish interrupt %d\n",
355 loc->loc_intrs[0]);
356 } else {
357 aprint_normal_dev(self, "interrupting on irq %d\n",
358 loc->loc_intrs[0]);
359 }
360
361 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
362 ether_sprintf(sc->sc_enaddr));
363
364 /*
365 * Since each port in plugged into the switch/flow-accelerator,
366 * we hard code at Gige Full-Duplex with Flow Control enabled.
367 */
368 int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX;
369 //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE;
370 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange,
371 bcmeth_mediastatus);
372 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL);
373 ifmedia_set(&sc->sc_media, ifmedia);
374
375 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
376
377 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
378 ifp->if_softc = sc;
379 ifp->if_baudrate = IF_Mbps(1000);
380 ifp->if_capabilities = 0;
381 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
382 #ifdef BCMETH_MPSAFE
383 ifp->if_flags2 = IFF2_MPSAFE;
384 #endif
385 ifp->if_ioctl = bcmeth_ifioctl;
386 ifp->if_start = bcmeth_ifstart;
387 ifp->if_watchdog = bcmeth_ifwatchdog;
388 ifp->if_init = bcmeth_ifinit;
389 ifp->if_stop = bcmeth_ifstop;
390 IFQ_SET_READY(&ifp->if_snd);
391
392 bcmeth_ifstop(ifp, true);
393
394 /*
395 * Attach the interface.
396 */
397 if_attach(ifp);
398 ether_ifattach(ifp, sc->sc_enaddr);
399
400 #ifdef BCMETH_COUNTERS
401 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
402 NULL, xname, "intr");
403 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
404 NULL, xname, "soft intr");
405 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC,
406 NULL, xname, "work items");
407 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
408 NULL, xname, "tx stalls");
409 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC,
410 NULL, xname, "rx badmagic lo");
411 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC,
412 NULL, xname, "rx badmagic hi");
413 #endif
414 }
415
416 static int
417 bcmeth_mediachange(struct ifnet *ifp)
418 {
419 //struct bcmeth_softc * const sc = ifp->if_softc;
420 return 0;
421 }
422
423 static void
424 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm)
425 {
426 //struct bcmeth_softc * const sc = ifp->if_softc;
427
428 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE;
429 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T;
430 }
431
432 static uint64_t
433 bcmeth_macaddr_create(const uint8_t *enaddr)
434 {
435 return (enaddr[3] << 0) // UNIMAC_MAC_0
436 | (enaddr[2] << 8) // UNIMAC_MAC_0
437 | (enaddr[1] << 16) // UNIMAC_MAC_0
438 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0
439 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1
440 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1
441 }
442
443 static int
444 bcmeth_ifinit(struct ifnet *ifp)
445 {
446 struct bcmeth_softc * const sc = ifp->if_softc;
447 int error = 0;
448
449 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
450 if (ifp->if_mtu > ETHERMTU_JUMBO)
451 return error;
452
453 KASSERT(ifp->if_flags & IFF_UP);
454
455 /*
456 * Stop the interface
457 */
458 bcmeth_ifstop(ifp, 0);
459
460 /*
461 * Reserve enough space at the front so that we can insert a maxsized
462 * link header and a VLAN tag. Also make sure we have enough room for
463 * the rcvsts field as well.
464 */
465 KASSERT(ALIGN(max_linkhdr) == max_linkhdr);
466 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu",
467 max_linkhdr, sizeof(struct ether_header));
468 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header);
469 if (sc->sc_rcvoffset <= 4)
470 sc->sc_rcvoffset += 4;
471 KASSERT((sc->sc_rcvoffset & 3) == 2);
472 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET));
473 KASSERT(sc->sc_rcvoffset >= 6);
474
475 /*
476 * If our frame size has changed (or it's our first time through)
477 * destroy the existing transmit mapcache.
478 */
479 if (sc->sc_tx_mapcache != NULL
480 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
481 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache);
482 sc->sc_tx_mapcache = NULL;
483 }
484
485 if (sc->sc_tx_mapcache == NULL) {
486 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
487 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS);
488 if (error)
489 return error;
490 }
491
492 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE
493 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED)
494 | RX_ENA | TX_ENA;
495
496 if (ifp->if_flags & IFF_PROMISC) {
497 sc->sc_cmdcfg |= PROMISC_EN;
498 } else {
499 sc->sc_cmdcfg &= ~PROMISC_EN;
500 }
501
502 const uint8_t * const lladdr = CLLADDR(ifp->if_sadl);
503 const uint64_t macstnaddr = bcmeth_macaddr_create(lladdr);
504
505 /*
506 * We make sure that a received Ethernet packet start on a non-word
507 * boundary so that the packet payload will be on a word boundary.
508 * So to check the destination address we keep around two words to
509 * quickly compare with.
510 */
511 #if __ARMEL__
512 sc->sc_macaddr[0] = lladdr[0] | (lladdr[1] << 8);
513 sc->sc_macaddr[1] = lladdr[2] | (lladdr[3] << 8)
514 | (lladdr[4] << 16) | (lladdr[5] << 24);
515 #else
516 sc->sc_macaddr[0] = lladdr[1] | (lladdr[0] << 8);
517 sc->sc_macaddr[1] = lladdr[5] | (lladdr[4] << 8)
518 | (lladdr[1] << 16) | (lladdr[2] << 24);
519 #endif
520
521 sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR;
522
523 /* 5. Load RCVADDR_LO with new pointer */
524 bcmeth_rxq_reset(sc, &sc->sc_rxq);
525
526 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
527 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET)
528 | RCVCTL_PARITY_DIS
529 | RCVCTL_OFLOW_CONTINUE
530 | __SHIFTIN(3, RCVCTL_BURSTLEN));
531
532 /* 6. Load XMTADDR_LO with new pointer */
533 bcmeth_txq_reset(sc, &sc->sc_txq);
534
535 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX
536 | XMTCTL_PARITY_DIS
537 | __SHIFTIN(3, XMTCTL_BURSTLEN));
538
539 /* 7. Setup other UNIMAC registers */
540 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm);
541 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0));
542 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32));
543 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg);
544
545 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL);
546 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE;
547 devctl &= ~FLOW_CTRL_MODE;
548 devctl &= ~MIB_RD_RESET_EN;
549 devctl &= ~RXQ_OVERFLOW_CTRL_SEL;
550 devctl &= ~CPU_FLOW_CTRL_ON;
551 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl);
552
553 /* Setup lazy receive (at most 1ms). */
554 const struct cpu_softc * const cpu = curcpu()->ci_softc;
555 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT)
556 | __SHIFTIN(cpu->cpu_clk.clk_apb / 1000, INTRCVLAZY_TIMEOUT);
557 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy);
558
559 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
560 sc->sc_intmask |= XMTINT_0|XMTUF;
561 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl,
562 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE);
563
564
565 /* 12. Enable receive queues in RQUEUE, */
566 sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF;
567 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
568 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE);
569
570 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
571
572 #if 0
573 aprint_normal_dev(sc->sc_dev,
574 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n",
575 devctl, sc->sc_cmdcfg,
576 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl),
577 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl));
578 #endif
579
580 sc->sc_soft_flags = 0;
581
582 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
583
584 ifp->if_flags |= IFF_RUNNING;
585
586 return error;
587 }
588
589 static void
590 bcmeth_ifstop(struct ifnet *ifp, int disable)
591 {
592 struct bcmeth_softc * const sc = ifp->if_softc;
593 struct bcmeth_txqueue * const txq = &sc->sc_txq;
594 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
595
596 KASSERT(!cpu_intr_p());
597
598 sc->sc_soft_flags = 0;
599 sc->sc_work_flags = 0;
600
601 /* Disable Rx processing */
602 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
603 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE);
604
605 /* Disable Tx processing */
606 bcmeth_write_4(sc, txq->txq_reg_xmtctl,
607 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE);
608
609 /* Disable all interrupts */
610 bcmeth_write_4(sc, GMAC_INTMASK, 0);
611
612 for (;;) {
613 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
614 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
615 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS
616 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS)
617 break;
618 delay(50);
619 }
620 /*
621 * Now reset the controller.
622 *
623 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register
624 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register
625 */
626 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET);
627 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0);
628 sc->sc_intmask = 0;
629 ifp->if_flags &= ~IFF_RUNNING;
630
631 /*
632 * Let's consume any remaining transmitted packets. And if we are
633 * disabling the interface, purge ourselves of any untransmitted
634 * packets. But don't consume any received packets, just drop them.
635 * If we aren't disabling the interface, save the mbufs in the
636 * receive queue for reuse.
637 */
638 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable);
639 bcmeth_txq_consume(sc, &sc->sc_txq);
640 if (disable) {
641 bcmeth_txq_purge(sc, &sc->sc_txq);
642 IF_PURGE(&ifp->if_snd);
643 }
644
645 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0);
646 }
647
648 static void
649 bcmeth_ifwatchdog(struct ifnet *ifp)
650 {
651 }
652
653 static int
654 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
655 {
656 struct bcmeth_softc *sc = ifp->if_softc;
657 struct ifreq * const ifr = data;
658 const int s = splnet();
659 int error;
660
661 switch (cmd) {
662 case SIOCSIFMEDIA:
663 case SIOCGIFMEDIA:
664 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
665 break;
666
667 default:
668 error = ether_ioctl(ifp, cmd, data);
669 if (error != ENETRESET)
670 break;
671
672 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
673 error = 0;
674 break;
675 }
676 error = bcmeth_ifinit(ifp);
677 break;
678 }
679
680 splx(s);
681 return error;
682 }
683
684 static void
685 bcmeth_rxq_desc_presync(
686 struct bcmeth_softc *sc,
687 struct bcmeth_rxqueue *rxq,
688 struct gmac_rxdb *rxdb,
689 size_t count)
690 {
691 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
692 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
693 BUS_DMASYNC_PREWRITE);
694 }
695
696 static void
697 bcmeth_rxq_desc_postsync(
698 struct bcmeth_softc *sc,
699 struct bcmeth_rxqueue *rxq,
700 struct gmac_rxdb *rxdb,
701 size_t count)
702 {
703 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
704 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
705 BUS_DMASYNC_POSTWRITE);
706 }
707
708 static void
709 bcmeth_txq_desc_presync(
710 struct bcmeth_softc *sc,
711 struct bcmeth_txqueue *txq,
712 struct gmac_txdb *txdb,
713 size_t count)
714 {
715 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
716 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
717 BUS_DMASYNC_PREWRITE);
718 }
719
720 static void
721 bcmeth_txq_desc_postsync(
722 struct bcmeth_softc *sc,
723 struct bcmeth_txqueue *txq,
724 struct gmac_txdb *txdb,
725 size_t count)
726 {
727 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
728 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
729 BUS_DMASYNC_POSTWRITE);
730 }
731
732 static bus_dmamap_t
733 bcmeth_mapcache_get(
734 struct bcmeth_softc *sc,
735 struct bcmeth_mapcache *dmc)
736 {
737 KASSERT(dmc->dmc_nmaps > 0);
738 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
739 return dmc->dmc_maps[--dmc->dmc_nmaps];
740 }
741
742 static void
743 bcmeth_mapcache_put(
744 struct bcmeth_softc *sc,
745 struct bcmeth_mapcache *dmc,
746 bus_dmamap_t map)
747 {
748 KASSERT(map != NULL);
749 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
750 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
751 }
752
753 static void
754 bcmeth_mapcache_destroy(
755 struct bcmeth_softc *sc,
756 struct bcmeth_mapcache *dmc)
757 {
758 const size_t dmc_size =
759 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]);
760
761 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
762 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
763 }
764 kmem_intr_free(dmc, dmc_size);
765 }
766
767 static int
768 bcmeth_mapcache_create(
769 struct bcmeth_softc *sc,
770 struct bcmeth_mapcache **dmc_p,
771 size_t maxmaps,
772 size_t maxmapsize,
773 size_t maxseg)
774 {
775 const size_t dmc_size =
776 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]);
777 struct bcmeth_mapcache * const dmc =
778 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
779
780 dmc->dmc_maxmaps = maxmaps;
781 dmc->dmc_nmaps = maxmaps;
782 dmc->dmc_maxmapsize = maxmapsize;
783 dmc->dmc_maxseg = maxseg;
784
785 for (u_int i = 0; i < maxmaps; i++) {
786 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
787 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
788 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
789 if (error) {
790 aprint_error_dev(sc->sc_dev,
791 "failed to creat dma map cache "
792 "entry %u of %zu: %d\n",
793 i, maxmaps, error);
794 while (i-- > 0) {
795 bus_dmamap_destroy(sc->sc_dmat,
796 dmc->dmc_maps[i]);
797 }
798 kmem_intr_free(dmc, dmc_size);
799 return error;
800 }
801 KASSERT(dmc->dmc_maps[i] != NULL);
802 }
803
804 *dmc_p = dmc;
805
806 return 0;
807 }
808
809 #if 0
810 static void
811 bcmeth_dmamem_free(
812 bus_dma_tag_t dmat,
813 size_t map_size,
814 bus_dma_segment_t *seg,
815 bus_dmamap_t map,
816 void *kvap)
817 {
818 bus_dmamap_destroy(dmat, map);
819 bus_dmamem_unmap(dmat, kvap, map_size);
820 bus_dmamem_free(dmat, seg, 1);
821 }
822 #endif
823
824 static int
825 bcmeth_dmamem_alloc(
826 bus_dma_tag_t dmat,
827 size_t map_size,
828 bus_dma_segment_t *seg,
829 bus_dmamap_t *map,
830 void **kvap)
831 {
832 int error;
833 int nseg;
834
835 *kvap = NULL;
836 *map = NULL;
837
838 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0,
839 seg, 1, &nseg, 0);
840 if (error)
841 return error;
842
843 KASSERT(nseg == 1);
844
845 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0);
846 if (error == 0) {
847 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
848 map);
849 if (error == 0) {
850 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
851 NULL, 0);
852 if (error == 0)
853 return 0;
854 bus_dmamap_destroy(dmat, *map);
855 *map = NULL;
856 }
857 bus_dmamem_unmap(dmat, *kvap, map_size);
858 *kvap = NULL;
859 }
860 bus_dmamem_free(dmat, seg, nseg);
861 return 0;
862 }
863
864 static struct mbuf *
865 bcmeth_rx_buf_alloc(
866 struct bcmeth_softc *sc)
867 {
868 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
869 if (m == NULL) {
870 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
871 return NULL;
872 }
873 MCLGET(m, M_DONTWAIT);
874 if ((m->m_flags & M_EXT) == 0) {
875 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
876 m_freem(m);
877 return NULL;
878 }
879 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
880
881 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache);
882 if (map == NULL) {
883 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
884 m_freem(m);
885 return NULL;
886 }
887 M_SETCTX(m, map);
888 m->m_len = m->m_pkthdr.len = MCLBYTES;
889 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
890 BUS_DMA_READ|BUS_DMA_NOWAIT);
891 if (error) {
892 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
893 error);
894 M_SETCTX(m, NULL);
895 m_freem(m);
896 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
897 return NULL;
898 }
899 KASSERT(((map->_dm_flags ^ sc->sc_dmat->_ranges[0].dr_flags) & _BUS_DMAMAP_COHERENT) == 0);
900 KASSERT(map->dm_mapsize == MCLBYTES);
901 #ifdef BCMETH_RCVMAGIC
902 *mtod(m, uint32_t *) = BCMETH_RCVMAGIC;
903 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
904 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
905 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t),
906 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD);
907 #else
908 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
909 BUS_DMASYNC_PREREAD);
910 #endif
911
912 return m;
913 }
914
915 static void
916 bcmeth_rx_map_unload(
917 struct bcmeth_softc *sc,
918 struct mbuf *m)
919 {
920 KASSERT(m);
921 for (; m != NULL; m = m->m_next) {
922 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
923 KASSERT(map);
924 KASSERT(map->dm_mapsize == MCLBYTES);
925 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
926 BUS_DMASYNC_POSTREAD);
927 bus_dmamap_unload(sc->sc_dmat, map);
928 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
929 M_SETCTX(m, NULL);
930 }
931 }
932
933 static bool
934 bcmeth_rxq_produce(
935 struct bcmeth_softc *sc,
936 struct bcmeth_rxqueue *rxq)
937 {
938 struct gmac_rxdb *producer = rxq->rxq_producer;
939 bool produced = false;
940
941 while (rxq->rxq_inuse < rxq->rxq_threshold) {
942 struct mbuf *m;
943 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
944 if (m == NULL) {
945 m = bcmeth_rx_buf_alloc(sc);
946 if (m == NULL) {
947 printf("%s: bcmeth_rx_buf_alloc failed\n", __func__);
948 break;
949 }
950 }
951 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
952 KASSERT(map);
953
954 producer->rxdb_buflen = MCLBYTES;
955 producer->rxdb_addrlo = map->dm_segs[0].ds_addr;
956 producer->rxdb_flags &= RXDB_FLAG_ET;
957 *rxq->rxq_mtail = m;
958 rxq->rxq_mtail = &m->m_next;
959 m->m_len = MCLBYTES;
960 m->m_next = NULL;
961 rxq->rxq_inuse++;
962 if (++producer == rxq->rxq_last) {
963 membar_producer();
964 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
965 rxq->rxq_last - rxq->rxq_producer);
966 producer = rxq->rxq_producer = rxq->rxq_first;
967 }
968 produced = true;
969 }
970 if (produced) {
971 membar_producer();
972 if (producer != rxq->rxq_producer) {
973 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
974 producer - rxq->rxq_producer);
975 rxq->rxq_producer = producer;
976 }
977 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr,
978 rxq->rxq_descmap->dm_segs[0].ds_addr
979 + ((uintptr_t)producer & RCVPTR));
980 }
981 return true;
982 }
983
984 static void
985 bcmeth_rx_input(
986 struct bcmeth_softc *sc,
987 struct mbuf *m,
988 uint32_t rxdb_flags)
989 {
990 struct ifnet * const ifp = &sc->sc_if;
991
992 bcmeth_rx_map_unload(sc, m);
993
994 m_adj(m, sc->sc_rcvoffset);
995
996 /*
997 * If we are in promiscuous mode and this isn't a multicast, check the
998 * destination address to make sure it matches our own. If it doesn't,
999 * mark the packet as being received promiscuously.
1000 */
1001 if ((sc->sc_cmdcfg & PROMISC_EN)
1002 && (m->m_data[0] & 1) == 0
1003 && (*(uint16_t *)&m->m_data[0] != sc->sc_macaddr[0]
1004 || *(uint32_t *)&m->m_data[2] != sc->sc_macaddr[1])) {
1005 m->m_flags |= M_PROMISC;
1006 }
1007 m->m_pkthdr.rcvif = ifp;
1008
1009 ifp->if_ipackets++;
1010 ifp->if_ibytes += m->m_pkthdr.len;
1011
1012 /*
1013 * Let's give it to the network subsystm to deal with.
1014 */
1015 #ifdef BCMETH_MPSAFE
1016 mutex_exit(sc->sc_lock);
1017 (*ifp->if_input)(ifp, m);
1018 mutex_enter(sc->sc_lock);
1019 #else
1020 int s = splnet();
1021 bpf_mtap(ifp, m);
1022 (*ifp->if_input)(ifp, m);
1023 splx(s);
1024 #endif
1025 }
1026
1027 static bool
1028 bcmeth_rxq_consume(
1029 struct bcmeth_softc *sc,
1030 struct bcmeth_rxqueue *rxq,
1031 size_t atmost)
1032 {
1033 struct ifnet * const ifp = &sc->sc_if;
1034 struct gmac_rxdb *consumer = rxq->rxq_consumer;
1035 size_t rxconsumed = 0;
1036 bool didconsume = false;
1037
1038 while (atmost-- > 0) {
1039 if (consumer == rxq->rxq_producer) {
1040 KASSERT(rxq->rxq_inuse == 0);
1041 break;
1042 }
1043
1044 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1045 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1046 if (consumer == rxq->rxq_first + currdscr) {
1047 break;
1048 }
1049 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1);
1050
1051 /*
1052 * We own this packet again. Copy the rxsts word from it.
1053 */
1054 rxconsumed++;
1055 didconsume = true;
1056 uint32_t rxsts;
1057 KASSERT(rxq->rxq_mhead != NULL);
1058 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t);
1059 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align,
1060 BUS_DMASYNC_POSTREAD);
1061 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4);
1062 #if 0
1063 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd",
1064 currdscr, consumer - rxq->rxq_first);
1065 #endif
1066
1067 /*
1068 * Get the count of descriptors. Fetch the correct number
1069 * of mbufs.
1070 */
1071 #ifdef BCMETH_RCVMAGIC
1072 size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1;
1073 #else
1074 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
1075 #endif
1076 struct mbuf *m = rxq->rxq_mhead;
1077 struct mbuf *m_last = m;
1078 for (size_t i = 1; i < desc_count; i++) {
1079 if (++consumer == rxq->rxq_last) {
1080 consumer = rxq->rxq_first;
1081 }
1082 KASSERTMSG(consumer != rxq->rxq_first + currdscr,
1083 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u consumer=%zd",
1084 i, rxsts, desc_count, currdscr,
1085 consumer - rxq->rxq_first);
1086 m_last = m_last->m_next;
1087 }
1088
1089 /*
1090 * Now remove it/them from the list of enqueued mbufs.
1091 */
1092 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1093 rxq->rxq_mtail = &rxq->rxq_mhead;
1094 m_last->m_next = NULL;
1095
1096 #ifdef BCMETH_RCVMAGIC
1097 if (rxsts == BCMETH_RCVMAGIC) {
1098 ifp->if_ierrors++;
1099 if ((m->m_ext.ext_paddr >> 28) == 8) {
1100 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo);
1101 } else {
1102 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi);
1103 }
1104 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1105 } else
1106 #endif /* BCMETH_RCVMAGIC */
1107 if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
1108 aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n",
1109 consumer - rxq->rxq_first, desc_count, rxsts);
1110 /*
1111 * We encountered an error, take the mbufs and add them
1112 * to the rx bufcache so we can quickly reuse them.
1113 */
1114 ifp->if_ierrors++;
1115 do {
1116 struct mbuf *m0 = m->m_next;
1117 m->m_next = NULL;
1118 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1119 m = m0;
1120 } while (m);
1121 } else {
1122 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN);
1123 framelen += sc->sc_rcvoffset;
1124 m->m_pkthdr.len = framelen;
1125 if (desc_count == 1) {
1126 KASSERT(framelen <= MCLBYTES);
1127 m->m_len = framelen;
1128 } else {
1129 m_last->m_len = framelen & (MCLBYTES - 1);
1130 }
1131
1132 #ifdef BCMETH_MPSAFE
1133 /*
1134 * Wrap at the last entry!
1135 */
1136 if (++consumer == rxq->rxq_last) {
1137 KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1138 rxq->rxq_consumer = rxq->rxq_first;
1139 } else {
1140 rxq->rxq_consumer = consumer;
1141 }
1142 rxq->rxq_inuse -= rxconsumed;
1143 #endif /* BCMETH_MPSAFE */
1144
1145 /*
1146 * Receive the packet (which releases our lock)
1147 */
1148 bcmeth_rx_input(sc, m, rxsts);
1149
1150 #ifdef BCMETH_MPSAFE
1151 /*
1152 * Since we had to give up our lock, we need to
1153 * refresh these.
1154 */
1155 consumer = rxq->rxq_consumer;
1156 rxconsumed = 0;
1157 continue;
1158 #endif /* BCMETH_MPSAFE */
1159 }
1160
1161 /*
1162 * Wrap at the last entry!
1163 */
1164 if (++consumer == rxq->rxq_last) {
1165 KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1166 consumer = rxq->rxq_first;
1167 }
1168 }
1169
1170 /*
1171 * Update queue info.
1172 */
1173 rxq->rxq_consumer = consumer;
1174 rxq->rxq_inuse -= rxconsumed;
1175
1176 /*
1177 * Did we consume anything?
1178 */
1179 return didconsume;
1180 }
1181
1182 static void
1183 bcmeth_rxq_purge(
1184 struct bcmeth_softc *sc,
1185 struct bcmeth_rxqueue *rxq,
1186 bool discard)
1187 {
1188 struct mbuf *m;
1189
1190 if ((m = rxq->rxq_mhead) != NULL) {
1191 if (discard) {
1192 bcmeth_rx_map_unload(sc, m);
1193 m_freem(m);
1194 } else {
1195 while (m != NULL) {
1196 struct mbuf *m0 = m->m_next;
1197 m->m_next = NULL;
1198 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1199 m = m0;
1200 }
1201 }
1202
1203 }
1204
1205 rxq->rxq_mhead = NULL;
1206 rxq->rxq_mtail = &rxq->rxq_mhead;
1207 rxq->rxq_inuse = 0;
1208 }
1209
1210 static void
1211 bcmeth_rxq_reset(
1212 struct bcmeth_softc *sc,
1213 struct bcmeth_rxqueue *rxq)
1214 {
1215 /*
1216 * sync all the descriptors
1217 */
1218 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1219 rxq->rxq_last - rxq->rxq_first);
1220
1221 /*
1222 * Make sure we own all descriptors in the ring.
1223 */
1224 struct gmac_rxdb *rxdb;
1225 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) {
1226 rxdb->rxdb_flags = RXDB_FLAG_IC;
1227 }
1228
1229 /*
1230 * Last descriptor has the wrap flag.
1231 */
1232 rxdb->rxdb_flags = RXDB_FLAG_ET|RXDB_FLAG_IC;
1233
1234 /*
1235 * Reset the producer consumer indexes.
1236 */
1237 rxq->rxq_consumer = rxq->rxq_first;
1238 rxq->rxq_producer = rxq->rxq_first;
1239 rxq->rxq_inuse = 0;
1240 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS)
1241 rxq->rxq_threshold = BCMETH_MINRXMBUFS;
1242
1243 sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF;
1244
1245 /*
1246 * Restart the receiver at the first descriptor
1247 */
1248 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo,
1249 rxq->rxq_descmap->dm_segs[0].ds_addr);
1250 }
1251
1252 static int
1253 bcmeth_rxq_attach(
1254 struct bcmeth_softc *sc,
1255 struct bcmeth_rxqueue *rxq,
1256 u_int qno)
1257 {
1258 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]);
1259 int error;
1260 void *descs;
1261
1262 KASSERT(desc_count == 256 || desc_count == 512);
1263
1264 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1265 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1266 if (error)
1267 return error;
1268
1269 memset(descs, 0, BCMETH_RINGSIZE);
1270 rxq->rxq_first = descs;
1271 rxq->rxq_last = rxq->rxq_first + desc_count;
1272 rxq->rxq_consumer = descs;
1273 rxq->rxq_producer = descs;
1274
1275 bcmeth_rxq_purge(sc, rxq, true);
1276 bcmeth_rxq_reset(sc, rxq);
1277
1278 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW;
1279 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL;
1280 rxq->rxq_reg_rcvptr = GMAC_RCVPTR;
1281 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0;
1282 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1;
1283
1284 return 0;
1285 }
1286
1287 static bool
1288 bcmeth_txq_active_p(
1289 struct bcmeth_softc * const sc,
1290 struct bcmeth_txqueue *txq)
1291 {
1292 return !IF_IS_EMPTY(&txq->txq_mbufs);
1293 }
1294
1295 static bool
1296 bcmeth_txq_fillable_p(
1297 struct bcmeth_softc * const sc,
1298 struct bcmeth_txqueue *txq)
1299 {
1300 return txq->txq_free >= txq->txq_threshold;
1301 }
1302
1303 static int
1304 bcmeth_txq_attach(
1305 struct bcmeth_softc *sc,
1306 struct bcmeth_txqueue *txq,
1307 u_int qno)
1308 {
1309 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]);
1310 int error;
1311 void *descs;
1312
1313 KASSERT(desc_count == 256 || desc_count == 512);
1314
1315 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1316 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1317 if (error)
1318 return error;
1319
1320 memset(descs, 0, BCMETH_RINGSIZE);
1321 txq->txq_first = descs;
1322 txq->txq_last = txq->txq_first + desc_count;
1323 txq->txq_consumer = descs;
1324 txq->txq_producer = descs;
1325
1326 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS);
1327
1328 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW;
1329 txq->txq_reg_xmtctl = GMAC_XMTCONTROL;
1330 txq->txq_reg_xmtptr = GMAC_XMTPTR;
1331 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0;
1332 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1;
1333
1334 bcmeth_txq_reset(sc, txq);
1335
1336 return 0;
1337 }
1338
1339 static int
1340 bcmeth_txq_map_load(
1341 struct bcmeth_softc *sc,
1342 struct bcmeth_txqueue *txq,
1343 struct mbuf *m)
1344 {
1345 bus_dmamap_t map;
1346 int error;
1347
1348 map = M_GETCTX(m, bus_dmamap_t);
1349 if (map != NULL)
1350 return 0;
1351
1352 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache);
1353 if (map == NULL)
1354 return ENOMEM;
1355
1356 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1357 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1358 if (error)
1359 return error;
1360
1361 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1362 BUS_DMASYNC_PREWRITE);
1363 M_SETCTX(m, map);
1364 return 0;
1365 }
1366
1367 static void
1368 bcmeth_txq_map_unload(
1369 struct bcmeth_softc *sc,
1370 struct bcmeth_txqueue *txq,
1371 struct mbuf *m)
1372 {
1373 KASSERT(m);
1374 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1375 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1376 BUS_DMASYNC_POSTWRITE);
1377 bus_dmamap_unload(sc->sc_dmat, map);
1378 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map);
1379 }
1380
1381 static bool
1382 bcmeth_txq_produce(
1383 struct bcmeth_softc *sc,
1384 struct bcmeth_txqueue *txq,
1385 struct mbuf *m)
1386 {
1387 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1388
1389 if (map->dm_nsegs > txq->txq_free)
1390 return false;
1391
1392 /*
1393 * TCP Offload flag must be set in the first descriptor.
1394 */
1395 struct gmac_txdb *producer = txq->txq_producer;
1396 uint32_t first_flags = TXDB_FLAG_SF;
1397 uint32_t last_flags = TXDB_FLAG_EF;
1398
1399 /*
1400 * If we've produced enough descriptors without consuming any
1401 * we need to ask for an interrupt to reclaim some.
1402 */
1403 txq->txq_lastintr += map->dm_nsegs;
1404 if (txq->txq_lastintr >= txq->txq_threshold
1405 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1406 txq->txq_lastintr = 0;
1407 last_flags |= TXDB_FLAG_IC;
1408 }
1409
1410 KASSERT(producer != txq->txq_last);
1411
1412 struct gmac_txdb *start = producer;
1413 size_t count = map->dm_nsegs;
1414 producer->txdb_flags |= first_flags;
1415 producer->txdb_addrlo = map->dm_segs[0].ds_addr;
1416 producer->txdb_buflen = map->dm_segs[0].ds_len;
1417 for (u_int i = 1; i < map->dm_nsegs; i++) {
1418 #if 0
1419 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1420 producer->txdb_flags, producer->txdb_buflen,
1421 producer->txdb_addrlo, producer->txdb_addrhi);
1422 #endif
1423 if (__predict_false(++producer == txq->txq_last)) {
1424 bcmeth_txq_desc_presync(sc, txq, start,
1425 txq->txq_last - start);
1426 count -= txq->txq_last - start;
1427 producer = txq->txq_first;
1428 start = txq->txq_first;
1429 }
1430 producer->txdb_addrlo = map->dm_segs[i].ds_addr;
1431 producer->txdb_buflen = map->dm_segs[i].ds_len;
1432 }
1433 producer->txdb_flags |= last_flags;
1434 #if 0
1435 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1436 producer->txdb_flags, producer->txdb_buflen,
1437 producer->txdb_addrlo, producer->txdb_addrhi);
1438 #endif
1439 if (count)
1440 bcmeth_txq_desc_presync(sc, txq, start, count);
1441
1442 /*
1443 * Reduce free count by the number of segments we consumed.
1444 */
1445 txq->txq_free -= map->dm_nsegs;
1446 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1447 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & TXDB_FLAG_EF) == 0);
1448 KASSERT(producer->txdb_flags & TXDB_FLAG_EF);
1449
1450 #if 0
1451 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n",
1452 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1453 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1454 #endif
1455
1456 if (producer + 1 == txq->txq_last)
1457 txq->txq_producer = txq->txq_first;
1458 else
1459 txq->txq_producer = producer + 1;
1460 IF_ENQUEUE(&txq->txq_mbufs, m);
1461
1462 /*
1463 * Let the transmitter know there's more to do
1464 */
1465 bcmeth_write_4(sc, txq->txq_reg_xmtptr,
1466 txq->txq_descmap->dm_segs[0].ds_addr
1467 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR));
1468
1469 return true;
1470 }
1471
1472 static struct mbuf *
1473 bcmeth_copy_packet(struct mbuf *m)
1474 {
1475 struct mbuf *mext = NULL;
1476 size_t misalignment = 0;
1477 size_t hlen = 0;
1478
1479 for (mext = m; mext != NULL; mext = mext->m_next) {
1480 if (mext->m_flags & M_EXT) {
1481 misalignment = mtod(mext, vaddr_t) & arm_dcache_align;
1482 break;
1483 }
1484 hlen += m->m_len;
1485 }
1486
1487 struct mbuf *n = m->m_next;
1488 if (m != mext && hlen + misalignment <= MHLEN && false) {
1489 KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]);
1490 size_t oldoff = m->m_data - m->m_pktdat;
1491 size_t off;
1492 if (mext == NULL) {
1493 off = (oldoff + hlen > MHLEN) ? 0 : oldoff;
1494 } else {
1495 off = MHLEN - (hlen + misalignment);
1496 }
1497 KASSERT(off + hlen + misalignment <= MHLEN);
1498 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) {
1499 memmove(&m->m_pktdat[off], m->m_data, m->m_len);
1500 m->m_data = &m->m_pktdat[off];
1501 }
1502 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]);
1503 m->m_len = hlen;
1504 m->m_next = mext;
1505 while (n != mext) {
1506 n = m_free(n);
1507 }
1508 return m;
1509 }
1510
1511 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
1512 if (m0 == NULL) {
1513 return NULL;
1514 }
1515 M_COPY_PKTHDR(m0, m);
1516 MCLAIM(m0, m->m_owner);
1517 if (m0->m_pkthdr.len > MHLEN) {
1518 MCLGET(m0, M_DONTWAIT);
1519 if ((m0->m_flags & M_EXT) == 0) {
1520 m_freem(m0);
1521 return NULL;
1522 }
1523 }
1524 m0->m_len = m->m_pkthdr.len;
1525 m_copydata(m, 0, m0->m_len, mtod(m0, void *));
1526 m_freem(m);
1527 return m0;
1528 }
1529
1530 static bool
1531 bcmeth_txq_enqueue(
1532 struct bcmeth_softc *sc,
1533 struct bcmeth_txqueue *txq)
1534 {
1535 for (;;) {
1536 if (IF_QFULL(&txq->txq_mbufs))
1537 return false;
1538 struct mbuf *m = txq->txq_next;
1539 if (m == NULL) {
1540 int s = splnet();
1541 IF_DEQUEUE(&sc->sc_if.if_snd, m);
1542 splx(s);
1543 if (m == NULL)
1544 return true;
1545 M_SETCTX(m, NULL);
1546 } else {
1547 txq->txq_next = NULL;
1548 }
1549 /*
1550 * If LINK2 is set and this packet uses multiple mbufs,
1551 * consolidate it into a single mbuf.
1552 */
1553 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) {
1554 struct mbuf *m0 = bcmeth_copy_packet(m);
1555 if (m0 == NULL) {
1556 txq->txq_next = m;
1557 return true;
1558 }
1559 m = m0;
1560 }
1561 int error = bcmeth_txq_map_load(sc, txq, m);
1562 if (error) {
1563 aprint_error_dev(sc->sc_dev,
1564 "discarded packet due to "
1565 "dmamap load failure: %d\n", error);
1566 m_freem(m);
1567 continue;
1568 }
1569 KASSERT(txq->txq_next == NULL);
1570 if (!bcmeth_txq_produce(sc, txq, m)) {
1571 txq->txq_next = m;
1572 return false;
1573 }
1574 KASSERT(txq->txq_next == NULL);
1575 }
1576 }
1577
1578 static bool
1579 bcmeth_txq_consume(
1580 struct bcmeth_softc *sc,
1581 struct bcmeth_txqueue *txq)
1582 {
1583 struct ifnet * const ifp = &sc->sc_if;
1584 struct gmac_txdb *consumer = txq->txq_consumer;
1585 size_t txfree = 0;
1586
1587 #if 0
1588 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1589 #endif
1590
1591 for (;;) {
1592 if (consumer == txq->txq_producer) {
1593 txq->txq_consumer = consumer;
1594 txq->txq_free += txfree;
1595 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1596 #if 0
1597 printf("%s: empty: freed %zu descriptors going from %zu to %zu\n",
1598 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
1599 #endif
1600 KASSERT(txq->txq_lastintr == 0);
1601 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
1602 return true;
1603 }
1604 bcmeth_txq_desc_postsync(sc, txq, consumer, 1);
1605 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
1606 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) {
1607 txq->txq_consumer = consumer;
1608 txq->txq_free += txfree;
1609 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1610 #if 0
1611 printf("%s: freed %zu descriptors\n",
1612 __func__, txfree);
1613 #endif
1614 return bcmeth_txq_fillable_p(sc, txq);
1615 }
1616
1617 /*
1618 * If this is the last descriptor in the chain, get the
1619 * mbuf, free its dmamap, and free the mbuf chain itself.
1620 */
1621 const uint32_t txdb_flags = consumer->txdb_flags;
1622 if (txdb_flags & TXDB_FLAG_EF) {
1623 struct mbuf *m;
1624
1625 IF_DEQUEUE(&txq->txq_mbufs, m);
1626 KASSERT(m);
1627 bcmeth_txq_map_unload(sc, txq, m);
1628 #if 0
1629 printf("%s: mbuf %p: consumed a %u byte packet\n",
1630 __func__, m, m->m_pkthdr.len);
1631 #endif
1632 bpf_mtap(ifp, m);
1633 ifp->if_opackets++;
1634 ifp->if_obytes += m->m_pkthdr.len;
1635 if (m->m_flags & M_MCAST)
1636 ifp->if_omcasts++;
1637 m_freem(m);
1638 }
1639
1640 /*
1641 * We own this packet again. Clear all flags except wrap.
1642 */
1643 txfree++;
1644
1645 /*
1646 * Wrap at the last entry!
1647 */
1648 if (txdb_flags & TXDB_FLAG_ET) {
1649 consumer->txdb_flags = TXDB_FLAG_ET;
1650 KASSERT(consumer + 1 == txq->txq_last);
1651 consumer = txq->txq_first;
1652 } else {
1653 consumer->txdb_flags = 0;
1654 consumer++;
1655 KASSERT(consumer < txq->txq_last);
1656 }
1657 }
1658 }
1659
1660 static void
1661 bcmeth_txq_purge(
1662 struct bcmeth_softc *sc,
1663 struct bcmeth_txqueue *txq)
1664 {
1665 struct mbuf *m;
1666 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0);
1667
1668 for (;;) {
1669 IF_DEQUEUE(&txq->txq_mbufs, m);
1670 if (m == NULL)
1671 break;
1672 bcmeth_txq_map_unload(sc, txq, m);
1673 m_freem(m);
1674 }
1675 if ((m = txq->txq_next) != NULL) {
1676 txq->txq_next = NULL;
1677 bcmeth_txq_map_unload(sc, txq, m);
1678 m_freem(m);
1679 }
1680 }
1681
1682 static void
1683 bcmeth_txq_reset(
1684 struct bcmeth_softc *sc,
1685 struct bcmeth_txqueue *txq)
1686 {
1687 /*
1688 * sync all the descriptors
1689 */
1690 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first,
1691 txq->txq_last - txq->txq_first);
1692
1693 /*
1694 * Make sure we own all descriptors in the ring.
1695 */
1696 struct gmac_txdb *txdb;
1697 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) {
1698 txdb->txdb_flags = 0;
1699 }
1700
1701 /*
1702 * Last descriptor has the wrap flag.
1703 */
1704 txdb->txdb_flags = TXDB_FLAG_ET;
1705
1706 /*
1707 * Reset the producer consumer indexes.
1708 */
1709 txq->txq_consumer = txq->txq_first;
1710 txq->txq_producer = txq->txq_first;
1711 txq->txq_free = txq->txq_last - txq->txq_first - 1;
1712 txq->txq_threshold = txq->txq_free / 2;
1713 txq->txq_lastintr = 0;
1714
1715 /*
1716 * What do we want to get interrupted on?
1717 */
1718 sc->sc_intmask |= XMTINT_0 | XMTUF;
1719
1720 /*
1721 * Restart the transmiter at the first descriptor
1722 */
1723 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo,
1724 txq->txq_descmap->dm_segs->ds_addr);
1725 }
1726
1727 static void
1728 bcmeth_ifstart(struct ifnet *ifp)
1729 {
1730 struct bcmeth_softc * const sc = ifp->if_softc;
1731
1732 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
1733 return;
1734 }
1735
1736 #ifdef BCMETH_MPSAFETX
1737 if (cpu_intr_p()) {
1738 #endif
1739 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
1740 softint_schedule(sc->sc_soft_ih);
1741 #ifdef BCMETH_MPSAFETX
1742 } else {
1743 /*
1744 * Either we are in a softintr thread already or some other
1745 * thread so just borrow it to do the send and save ourselves
1746 * the overhead of a fast soft int.
1747 */
1748 bcmeth_soft_txintr(sc);
1749 }
1750 #endif
1751 }
1752
1753 int
1754 bcmeth_intr(void *arg)
1755 {
1756 struct bcmeth_softc * const sc = arg;
1757 uint32_t soft_flags = 0;
1758 uint32_t work_flags = 0;
1759 int rv = 0;
1760
1761 mutex_enter(sc->sc_hwlock);
1762
1763 uint32_t intmask = sc->sc_intmask;
1764 BCMETH_EVCNT_INCR(sc->sc_ev_intr);
1765
1766 for (;;) {
1767 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1768 intstatus &= intmask;
1769 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */
1770 if (intstatus == 0) {
1771 break;
1772 }
1773 #if 0
1774 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n",
1775 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK));
1776 #endif
1777 if (intstatus & RCVINT) {
1778 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1779 intmask &= ~RCVINT;
1780
1781 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1782 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1783 if (descs < rxq->rxq_consumer - rxq->rxq_first) {
1784 /*
1785 * We wrapped at the end so count how far
1786 * we are from the end.
1787 */
1788 descs += rxq->rxq_last - rxq->rxq_consumer;
1789 } else {
1790 descs -= rxq->rxq_consumer - rxq->rxq_first;
1791 }
1792 /*
1793 * If we "timedout" we can't be hogging so use
1794 * softints. If we exceeded then we might hogging
1795 * so let the workqueue deal with them.
1796 */
1797 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, INTRCVLAZY_FRAMECOUNT);
1798 if (descs < framecount
1799 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) {
1800 soft_flags |= SOFT_RXINTR;
1801 } else {
1802 work_flags |= WORK_RXINTR;
1803 }
1804 }
1805
1806 if (intstatus & XMTINT_0) {
1807 intmask &= ~XMTINT_0;
1808 soft_flags |= SOFT_TXINTR;
1809 }
1810
1811 if (intstatus & RCVDESCUF) {
1812 intmask &= ~RCVDESCUF;
1813 work_flags |= WORK_RXUNDERFLOW;
1814 }
1815
1816 intstatus &= intmask;
1817 if (intstatus) {
1818 aprint_error_dev(sc->sc_dev,
1819 "intr: intstatus=%#x\n", intstatus);
1820 aprint_error_dev(sc->sc_dev,
1821 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n",
1822 sc->sc_rxq.rxq_first,
1823 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr,
1824 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr),
1825 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0),
1826 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1));
1827 aprint_error_dev(sc->sc_dev,
1828 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n",
1829 sc->sc_txq.txq_first,
1830 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr,
1831 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr),
1832 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0),
1833 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1));
1834 intmask &= ~intstatus;
1835 work_flags |= WORK_REINIT;
1836 break;
1837 }
1838 }
1839
1840 if (intmask != sc->sc_intmask) {
1841 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1842 }
1843
1844 if (work_flags) {
1845 if (sc->sc_work_flags == 0) {
1846 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL);
1847 }
1848 atomic_or_32(&sc->sc_work_flags, work_flags);
1849 rv = 1;
1850 }
1851
1852 if (soft_flags) {
1853 if (sc->sc_soft_flags == 0) {
1854 softint_schedule(sc->sc_soft_ih);
1855 }
1856 atomic_or_32(&sc->sc_soft_flags, soft_flags);
1857 rv = 1;
1858 }
1859
1860 mutex_exit(sc->sc_hwlock);
1861
1862 return rv;
1863 }
1864
1865 #ifdef BCMETH_MPSAFETX
1866 void
1867 bcmeth_soft_txintr(struct bcmeth_softc *sc)
1868 {
1869 mutex_enter(sc->sc_lock);
1870 /*
1871 * Let's do what we came here for. Consume transmitted
1872 * packets off the the transmit ring.
1873 */
1874 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1875 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1876 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1877 sc->sc_if.if_flags |= IFF_OACTIVE;
1878 } else {
1879 sc->sc_if.if_flags &= ~IFF_OACTIVE;
1880 }
1881 if (sc->sc_if.if_flags & IFF_RUNNING) {
1882 mutex_spin_enter(sc->sc_hwlock);
1883 sc->sc_intmask |= XMTINT_0;
1884 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1885 mutex_spin_exit(sc->sc_hwlock);
1886 }
1887 mutex_exit(sc->sc_lock);
1888 }
1889 #endif /* BCMETH_MPSAFETX */
1890
1891 void
1892 bcmeth_soft_intr(void *arg)
1893 {
1894 struct bcmeth_softc * const sc = arg;
1895 struct ifnet * const ifp = &sc->sc_if;
1896 uint32_t intmask = 0;
1897
1898 mutex_enter(sc->sc_lock);
1899
1900 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
1901
1902 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr);
1903
1904 if ((soft_flags & SOFT_TXINTR)
1905 || bcmeth_txq_active_p(sc, &sc->sc_txq)) {
1906 /*
1907 * Let's do what we came here for. Consume transmitted
1908 * packets off the the transmit ring.
1909 */
1910 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1911 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1912 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1913 ifp->if_flags |= IFF_OACTIVE;
1914 } else {
1915 ifp->if_flags &= ~IFF_OACTIVE;
1916 }
1917 intmask |= XMTINT_0;
1918 }
1919
1920 if (soft_flags & SOFT_RXINTR) {
1921 /*
1922 * Let's consume
1923 */
1924 while (bcmeth_rxq_consume(sc, &sc->sc_rxq,
1925 sc->sc_rxq.rxq_threshold / 4)) {
1926 /*
1927 * We've consumed a quarter of the ring and still have
1928 * more to do. Refill the ring.
1929 */
1930 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1931 }
1932 intmask |= RCVINT;
1933 }
1934
1935 if (ifp->if_flags & IFF_RUNNING) {
1936 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1937 mutex_spin_enter(sc->sc_hwlock);
1938 sc->sc_intmask |= intmask;
1939 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1940 mutex_spin_exit(sc->sc_hwlock);
1941 }
1942
1943 mutex_exit(sc->sc_lock);
1944 }
1945
1946 void
1947 bcmeth_worker(struct work *wk, void *arg)
1948 {
1949 struct bcmeth_softc * const sc = arg;
1950 struct ifnet * const ifp = &sc->sc_if;
1951 uint32_t intmask = 0;
1952
1953 mutex_enter(sc->sc_lock);
1954
1955 BCMETH_EVCNT_INCR(sc->sc_ev_work);
1956
1957 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0);
1958 if (work_flags & WORK_REINIT) {
1959 int s = splnet();
1960 sc->sc_soft_flags = 0;
1961 bcmeth_ifinit(ifp);
1962 splx(s);
1963 work_flags &= ~WORK_RXUNDERFLOW;
1964 }
1965
1966 if (work_flags & WORK_RXUNDERFLOW) {
1967 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1968 size_t threshold = 5 * rxq->rxq_threshold / 4;
1969 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
1970 threshold = rxq->rxq_last - rxq->rxq_first - 1;
1971 } else {
1972 intmask |= RCVDESCUF;
1973 }
1974 aprint_normal_dev(sc->sc_dev,
1975 "increasing receive buffers from %zu to %zu\n",
1976 rxq->rxq_threshold, threshold);
1977 rxq->rxq_threshold = threshold;
1978 }
1979
1980 if (work_flags & WORK_RXINTR) {
1981 /*
1982 * Let's consume
1983 */
1984 while (bcmeth_rxq_consume(sc, &sc->sc_rxq,
1985 sc->sc_rxq.rxq_threshold / 4)) {
1986 /*
1987 * We've consumed a quarter of the ring and still have
1988 * more to do. Refill the ring.
1989 */
1990 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1991 }
1992 intmask |= RCVINT;
1993 }
1994
1995 if (ifp->if_flags & IFF_RUNNING) {
1996 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1997 #if 0
1998 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1999 if (intstatus & RCVINT) {
2000 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT);
2001 work_flags |= WORK_RXINTR;
2002 continue;
2003 }
2004 #endif
2005 mutex_spin_enter(sc->sc_hwlock);
2006 sc->sc_intmask |= intmask;
2007 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
2008 mutex_spin_exit(sc->sc_hwlock);
2009 }
2010
2011 mutex_exit(sc->sc_lock);
2012 }
2013