bcm53xx_eth.c revision 1.19 1 /*-
2 * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #define _ARM32_BUS_DMA_PRIVATE
31 #define GMAC_PRIVATE
32
33 #include "locators.h"
34 #include "opt_broadcom.h"
35
36 #include <sys/cdefs.h>
37
38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.19 2012/12/19 02:44:39 matt Exp $");
39
40 #include <sys/param.h>
41 #include <sys/atomic.h>
42 #include <sys/bus.h>
43 #include <sys/device.h>
44 #include <sys/ioctl.h>
45 #include <sys/intr.h>
46 #include <sys/kmem.h>
47 #include <sys/mutex.h>
48 #include <sys/socket.h>
49 #include <sys/systm.h>
50 #include <sys/workqueue.h>
51
52 #include <net/if.h>
53 #include <net/if_ether.h>
54 #include <net/if_media.h>
55
56 #include <net/if_dl.h>
57
58 #include <net/bpf.h>
59
60 #include <dev/mii/miivar.h>
61
62 #include <arm/broadcom/bcm53xx_reg.h>
63 #include <arm/broadcom/bcm53xx_var.h>
64
65 //#define BCMETH_MPSAFE
66
67 #ifdef BCMETH_COUNTERS
68 #define BCMETH_EVCNT_ADD(a,b) ((void)((a).ev_count += (b)))
69 #else
70 #define BCMETH_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0)
71 #endif
72 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1)
73
74 #define BCMETH_MAXTXMBUFS 128
75 #define BCMETH_NTXSEGS 30
76 #define BCMETH_MAXRXMBUFS 255
77 #define BCMETH_MINRXMBUFS 64
78 #define BCMETH_NRXSEGS 1
79 #define BCMETH_RINGSIZE PAGE_SIZE
80
81 #if 1
82 #define BCMETH_RCVMAGIC 0xfeedface
83 #endif
84
85 static int bcmeth_ccb_match(device_t, cfdata_t, void *);
86 static void bcmeth_ccb_attach(device_t, device_t, void *);
87
88 struct bcmeth_txqueue {
89 bus_dmamap_t txq_descmap;
90 struct gmac_txdb *txq_consumer;
91 struct gmac_txdb *txq_producer;
92 struct gmac_txdb *txq_first;
93 struct gmac_txdb *txq_last;
94 struct ifqueue txq_mbufs;
95 struct mbuf *txq_next;
96 size_t txq_free;
97 size_t txq_threshold;
98 size_t txq_lastintr;
99 bus_size_t txq_reg_xmtaddrlo;
100 bus_size_t txq_reg_xmtptr;
101 bus_size_t txq_reg_xmtctl;
102 bus_size_t txq_reg_xmtsts0;
103 bus_size_t txq_reg_xmtsts1;
104 bus_dma_segment_t txq_descmap_seg;
105 };
106
107 struct bcmeth_rxqueue {
108 bus_dmamap_t rxq_descmap;
109 struct gmac_rxdb *rxq_consumer;
110 struct gmac_rxdb *rxq_producer;
111 struct gmac_rxdb *rxq_first;
112 struct gmac_rxdb *rxq_last;
113 struct mbuf *rxq_mhead;
114 struct mbuf **rxq_mtail;
115 struct mbuf *rxq_mconsumer;
116 size_t rxq_inuse;
117 size_t rxq_threshold;
118 bus_size_t rxq_reg_rcvaddrlo;
119 bus_size_t rxq_reg_rcvptr;
120 bus_size_t rxq_reg_rcvctl;
121 bus_size_t rxq_reg_rcvsts0;
122 bus_size_t rxq_reg_rcvsts1;
123 bus_dma_segment_t rxq_descmap_seg;
124 };
125
126 struct bcmeth_mapcache {
127 u_int dmc_nmaps;
128 u_int dmc_maxseg;
129 u_int dmc_maxmaps;
130 u_int dmc_maxmapsize;
131 bus_dmamap_t dmc_maps[0];
132 };
133
134 struct bcmeth_softc {
135 device_t sc_dev;
136 bus_space_tag_t sc_bst;
137 bus_space_handle_t sc_bsh;
138 bus_dma_tag_t sc_dmat;
139 kmutex_t *sc_lock;
140 kmutex_t *sc_hwlock;
141 struct ethercom sc_ec;
142 #define sc_if sc_ec.ec_if
143 struct ifmedia sc_media;
144 void *sc_soft_ih;
145 void *sc_ih;
146
147 struct bcmeth_rxqueue sc_rxq;
148 struct bcmeth_txqueue sc_txq;
149
150 size_t sc_rcvoffset;
151 uint32_t sc_maxfrm;
152 uint32_t sc_cmdcfg;
153 uint32_t sc_intmask;
154 uint32_t sc_rcvlazy;
155 volatile uint32_t sc_soft_flags;
156 #define SOFT_RXINTR 0x01
157 #define SOFT_TXINTR 0x02
158
159 #ifdef BCMETH_COUNTERS
160 struct evcnt sc_ev_intr;
161 struct evcnt sc_ev_soft_intr;
162 struct evcnt sc_ev_work;
163 struct evcnt sc_ev_tx_stall;
164 struct evcnt sc_ev_rx_badmagic_lo;
165 struct evcnt sc_ev_rx_badmagic_hi;
166 #endif
167
168 struct ifqueue sc_rx_bufcache;
169 struct bcmeth_mapcache *sc_rx_mapcache;
170 struct bcmeth_mapcache *sc_tx_mapcache;
171
172 struct workqueue *sc_workq;
173 struct work sc_work;
174
175 volatile uint32_t sc_work_flags;
176 #define WORK_RXINTR 0x01
177 #define WORK_RXUNDERFLOW 0x02
178 #define WORK_REINIT 0x04
179
180 uint8_t sc_enaddr[ETHER_ADDR_LEN];
181 };
182
183 static void bcmeth_ifstart(struct ifnet *);
184 static void bcmeth_ifwatchdog(struct ifnet *);
185 static int bcmeth_ifinit(struct ifnet *);
186 static void bcmeth_ifstop(struct ifnet *, int);
187 static int bcmeth_ifioctl(struct ifnet *, u_long, void *);
188
189 static int bcmeth_mapcache_create(struct bcmeth_softc *,
190 struct bcmeth_mapcache **, size_t, size_t, size_t);
191 static void bcmeth_mapcache_destroy(struct bcmeth_softc *,
192 struct bcmeth_mapcache *);
193 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *,
194 struct bcmeth_mapcache *);
195 static void bcmeth_mapcache_put(struct bcmeth_softc *,
196 struct bcmeth_mapcache *, bus_dmamap_t);
197
198 static int bcmeth_txq_attach(struct bcmeth_softc *,
199 struct bcmeth_txqueue *, u_int);
200 static void bcmeth_txq_purge(struct bcmeth_softc *,
201 struct bcmeth_txqueue *);
202 static void bcmeth_txq_reset(struct bcmeth_softc *,
203 struct bcmeth_txqueue *);
204 static bool bcmeth_txq_consume(struct bcmeth_softc *,
205 struct bcmeth_txqueue *);
206 static bool bcmeth_txq_produce(struct bcmeth_softc *,
207 struct bcmeth_txqueue *, struct mbuf *m);
208 static bool bcmeth_txq_active_p(struct bcmeth_softc *,
209 struct bcmeth_txqueue *);
210
211 static int bcmeth_rxq_attach(struct bcmeth_softc *,
212 struct bcmeth_rxqueue *, u_int);
213 static bool bcmeth_rxq_produce(struct bcmeth_softc *,
214 struct bcmeth_rxqueue *);
215 static void bcmeth_rxq_purge(struct bcmeth_softc *,
216 struct bcmeth_rxqueue *, bool);
217 static void bcmeth_rxq_reset(struct bcmeth_softc *,
218 struct bcmeth_rxqueue *);
219
220 static int bcmeth_intr(void *);
221 #ifdef BCMETH_MPSAFETX
222 static void bcmeth_soft_txintr(struct bcmeth_softc *);
223 #endif
224 static void bcmeth_soft_intr(void *);
225 static void bcmeth_worker(struct work *, void *);
226
227 static int bcmeth_mediachange(struct ifnet *);
228 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *);
229
230 static inline uint32_t
231 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o)
232 {
233 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o);
234 }
235
236 static inline void
237 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v)
238 {
239 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v);
240 }
241
242 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc),
243 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL);
244
245 static int
246 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux)
247 {
248 struct bcmccb_attach_args * const ccbaa = aux;
249 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
250
251 if (strcmp(cf->cf_name, loc->loc_name))
252 return 0;
253
254 #ifdef DIAGNOSTIC
255 const int port = cf->cf_loc[BCMCCBCF_PORT];
256 #endif
257 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port);
258
259 return 1;
260 }
261
262 static void
263 bcmeth_ccb_attach(device_t parent, device_t self, void *aux)
264 {
265 struct bcmeth_softc * const sc = device_private(self);
266 struct ethercom * const ec = &sc->sc_ec;
267 struct ifnet * const ifp = &ec->ec_if;
268 struct bcmccb_attach_args * const ccbaa = aux;
269 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
270 const char * const xname = device_xname(self);
271 prop_dictionary_t dict = device_properties(self);
272 int error;
273
274 sc->sc_bst = ccbaa->ccbaa_ccb_bst;
275 sc->sc_dmat = ccbaa->ccbaa_dmat;
276 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh,
277 loc->loc_offset, loc->loc_size, &sc->sc_bsh);
278
279 /*
280 * We need to use the coherent dma tag for the GMAC.
281 */
282 sc->sc_dmat = &bcm53xx_coherent_dma_tag;
283
284 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
285 if (eaprop == NULL) {
286 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0);
287 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1);
288 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) {
289 aprint_error(": mac-address property is missing\n");
290 return;
291 }
292 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff;
293 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff;
294 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff;
295 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff;
296 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff;
297 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff;
298 } else {
299 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
300 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
301 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
302 ETHER_ADDR_LEN);
303 }
304 sc->sc_dev = self;
305 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
306 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
307
308 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts
309
310 aprint_naive("\n");
311 aprint_normal(": Gigabit Ethernet Controller\n");
312
313 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0);
314 if (error) {
315 aprint_error(": failed to init rxq: %d\n", error);
316 return;
317 }
318
319 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0);
320 if (error) {
321 aprint_error(": failed to init txq: %d\n", error);
322 return;
323 }
324
325 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache,
326 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS);
327 if (error) {
328 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
329 return;
330 }
331
332 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
333 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS);
334 if (error) {
335 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
336 return;
337 }
338
339 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc,
340 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU);
341 if (error) {
342 aprint_error(": failed to create workqueue: %d\n", error);
343 return;
344 }
345
346 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET,
347 bcmeth_soft_intr, sc);
348
349 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL,
350 bcmeth_intr, sc);
351
352 if (sc->sc_ih == NULL) {
353 aprint_error_dev(self, "failed to establish interrupt %d\n",
354 loc->loc_intrs[0]);
355 } else {
356 aprint_normal_dev(self, "interrupting on irq %d\n",
357 loc->loc_intrs[0]);
358 }
359
360 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
361 ether_sprintf(sc->sc_enaddr));
362
363 /*
364 * Since each port in plugged into the switch/flow-accelerator,
365 * we hard code at Gige Full-Duplex with Flow Control enabled.
366 */
367 int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX;
368 //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE;
369 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange,
370 bcmeth_mediastatus);
371 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL);
372 ifmedia_set(&sc->sc_media, ifmedia);
373
374 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
375
376 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
377 ifp->if_softc = sc;
378 ifp->if_baudrate = IF_Mbps(1000);
379 ifp->if_capabilities = 0;
380 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
381 #ifdef BCMETH_MPSAFE
382 ifp->if_flags2 = IFF2_MPSAFE;
383 #endif
384 ifp->if_ioctl = bcmeth_ifioctl;
385 ifp->if_start = bcmeth_ifstart;
386 ifp->if_watchdog = bcmeth_ifwatchdog;
387 ifp->if_init = bcmeth_ifinit;
388 ifp->if_stop = bcmeth_ifstop;
389 IFQ_SET_READY(&ifp->if_snd);
390
391 bcmeth_ifstop(ifp, true);
392
393 /*
394 * Attach the interface.
395 */
396 if_attach(ifp);
397 ether_ifattach(ifp, sc->sc_enaddr);
398
399 #ifdef BCMETH_COUNTERS
400 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
401 NULL, xname, "intr");
402 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
403 NULL, xname, "soft intr");
404 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC,
405 NULL, xname, "work items");
406 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
407 NULL, xname, "tx stalls");
408 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC,
409 NULL, xname, "rx badmagic lo");
410 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC,
411 NULL, xname, "rx badmagic hi");
412 #endif
413 }
414
415 static int
416 bcmeth_mediachange(struct ifnet *ifp)
417 {
418 //struct bcmeth_softc * const sc = ifp->if_softc;
419 return 0;
420 }
421
422 static void
423 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm)
424 {
425 //struct bcmeth_softc * const sc = ifp->if_softc;
426
427 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE;
428 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T;
429 }
430
431 static uint64_t
432 bcmeth_macaddr_create(const uint8_t *enaddr)
433 {
434 return (enaddr[3] << 0) // UNIMAC_MAC_0
435 | (enaddr[2] << 8) // UNIMAC_MAC_0
436 | (enaddr[1] << 16) // UNIMAC_MAC_0
437 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0
438 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1
439 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1
440 }
441
442 static int
443 bcmeth_ifinit(struct ifnet *ifp)
444 {
445 struct bcmeth_softc * const sc = ifp->if_softc;
446 int error = 0;
447
448 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
449 if (ifp->if_mtu > ETHERMTU_JUMBO)
450 return error;
451
452 KASSERT(ifp->if_flags & IFF_UP);
453
454 /*
455 * Stop the interface
456 */
457 bcmeth_ifstop(ifp, 0);
458
459 /*
460 * Reserve enough space at the front so that we can insert a maxsized
461 * link header and a VLAN tag. Also make sure we have enough room for
462 * the rcvsts field as well.
463 */
464 KASSERT(ALIGN(max_linkhdr) == max_linkhdr);
465 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu",
466 max_linkhdr, sizeof(struct ether_header));
467 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header);
468 if (sc->sc_rcvoffset <= 4)
469 sc->sc_rcvoffset += 4;
470 KASSERT((sc->sc_rcvoffset & 3) == 2);
471 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET));
472 KASSERT(sc->sc_rcvoffset >= 6);
473
474 /*
475 * If our frame size has changed (or it's our first time through)
476 * destroy the existing transmit mapcache.
477 */
478 if (sc->sc_tx_mapcache != NULL
479 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
480 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache);
481 sc->sc_tx_mapcache = NULL;
482 }
483
484 if (sc->sc_tx_mapcache == NULL) {
485 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
486 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS);
487 if (error)
488 return error;
489 }
490
491 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE
492 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED)
493 | RX_ENA | TX_ENA;
494
495 if (ifp->if_flags & IFF_PROMISC) {
496 sc->sc_cmdcfg |= PROMISC_EN;
497 } else {
498 sc->sc_cmdcfg &= ~PROMISC_EN;
499 }
500
501 const uint64_t macstnaddr =
502 bcmeth_macaddr_create(CLLADDR(ifp->if_sadl));
503
504 sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR;
505
506 /* 5. Load RCVADDR_LO with new pointer */
507 bcmeth_rxq_reset(sc, &sc->sc_rxq);
508
509 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
510 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET)
511 | RCVCTL_PARITY_DIS
512 | RCVCTL_OFLOW_CONTINUE
513 | __SHIFTIN(3, RCVCTL_BURSTLEN));
514
515 /* 6. Load XMTADDR_LO with new pointer */
516 bcmeth_txq_reset(sc, &sc->sc_txq);
517
518 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX
519 | XMTCTL_PARITY_DIS
520 | __SHIFTIN(3, XMTCTL_BURSTLEN));
521
522 /* 7. Setup other UNIMAC registers */
523 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm);
524 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0));
525 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32));
526 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg);
527
528 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL);
529 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE;
530 devctl &= ~FLOW_CTRL_MODE;
531 devctl &= ~MIB_RD_RESET_EN;
532 devctl &= ~RXQ_OVERFLOW_CTRL_SEL;
533 devctl &= ~CPU_FLOW_CTRL_ON;
534 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl);
535
536 /* Setup lazy receive (at most 1ms). */
537 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT)
538 | __SHIFTIN(125000000 / 1000, INTRCVLAZY_TIMEOUT);
539 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy);
540
541 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
542 sc->sc_intmask |= XMTINT_0|XMTUF;
543 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl,
544 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE);
545
546
547 /* 12. Enable receive queues in RQUEUE, */
548 sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF;
549 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
550 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE);
551
552 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
553
554 #if 0
555 aprint_normal_dev(sc->sc_dev,
556 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n",
557 devctl, sc->sc_cmdcfg,
558 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl),
559 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl));
560 #endif
561
562 sc->sc_soft_flags = 0;
563
564 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
565
566 ifp->if_flags |= IFF_RUNNING;
567
568 return error;
569 }
570
571 static void
572 bcmeth_ifstop(struct ifnet *ifp, int disable)
573 {
574 struct bcmeth_softc * const sc = ifp->if_softc;
575 struct bcmeth_txqueue * const txq = &sc->sc_txq;
576 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
577
578 KASSERT(!cpu_intr_p());
579
580 sc->sc_soft_flags = 0;
581 sc->sc_work_flags = 0;
582
583 /* Disable Rx processing */
584 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
585 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE);
586
587 /* Disable Tx processing */
588 bcmeth_write_4(sc, txq->txq_reg_xmtctl,
589 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE);
590
591 /* Disable all interrupts */
592 bcmeth_write_4(sc, GMAC_INTMASK, 0);
593
594 for (;;) {
595 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
596 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
597 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS
598 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS)
599 break;
600 delay(50);
601 }
602 /*
603 * Now reset the controller.
604 *
605 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register
606 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register
607 */
608 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET);
609 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0);
610 sc->sc_intmask = 0;
611 ifp->if_flags &= ~IFF_RUNNING;
612
613 /*
614 * Let's consume any remaining transmitted packets. And if we are
615 * disabling the interface, purge ourselves of any untransmitted
616 * packets. But don't consume any received packets, just drop them.
617 * If we aren't disabling the interface, save the mbufs in the
618 * receive queue for reuse.
619 */
620 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable);
621 bcmeth_txq_consume(sc, &sc->sc_txq);
622 if (disable) {
623 bcmeth_txq_purge(sc, &sc->sc_txq);
624 IF_PURGE(&ifp->if_snd);
625 }
626
627 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0);
628 }
629
630 static void
631 bcmeth_ifwatchdog(struct ifnet *ifp)
632 {
633 }
634
635 static int
636 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
637 {
638 struct bcmeth_softc *sc = ifp->if_softc;
639 struct ifreq * const ifr = data;
640 const int s = splnet();
641 int error;
642
643 switch (cmd) {
644 case SIOCSIFMEDIA:
645 case SIOCGIFMEDIA:
646 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
647 break;
648
649 default:
650 error = ether_ioctl(ifp, cmd, data);
651 if (error != ENETRESET)
652 break;
653
654 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
655 error = 0;
656 break;
657 }
658 error = bcmeth_ifinit(ifp);
659 break;
660 }
661
662 splx(s);
663 return error;
664 }
665
666 static void
667 bcmeth_rxq_desc_presync(
668 struct bcmeth_softc *sc,
669 struct bcmeth_rxqueue *rxq,
670 struct gmac_rxdb *rxdb,
671 size_t count)
672 {
673 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
674 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
675 BUS_DMASYNC_PREWRITE);
676 }
677
678 static void
679 bcmeth_rxq_desc_postsync(
680 struct bcmeth_softc *sc,
681 struct bcmeth_rxqueue *rxq,
682 struct gmac_rxdb *rxdb,
683 size_t count)
684 {
685 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
686 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
687 BUS_DMASYNC_POSTWRITE);
688 }
689
690 static void
691 bcmeth_txq_desc_presync(
692 struct bcmeth_softc *sc,
693 struct bcmeth_txqueue *txq,
694 struct gmac_txdb *txdb,
695 size_t count)
696 {
697 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
698 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
699 BUS_DMASYNC_PREWRITE);
700 }
701
702 static void
703 bcmeth_txq_desc_postsync(
704 struct bcmeth_softc *sc,
705 struct bcmeth_txqueue *txq,
706 struct gmac_txdb *txdb,
707 size_t count)
708 {
709 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
710 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
711 BUS_DMASYNC_POSTWRITE);
712 }
713
714 static bus_dmamap_t
715 bcmeth_mapcache_get(
716 struct bcmeth_softc *sc,
717 struct bcmeth_mapcache *dmc)
718 {
719 KASSERT(dmc->dmc_nmaps > 0);
720 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
721 return dmc->dmc_maps[--dmc->dmc_nmaps];
722 }
723
724 static void
725 bcmeth_mapcache_put(
726 struct bcmeth_softc *sc,
727 struct bcmeth_mapcache *dmc,
728 bus_dmamap_t map)
729 {
730 KASSERT(map != NULL);
731 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
732 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
733 }
734
735 static void
736 bcmeth_mapcache_destroy(
737 struct bcmeth_softc *sc,
738 struct bcmeth_mapcache *dmc)
739 {
740 const size_t dmc_size =
741 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]);
742
743 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
744 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
745 }
746 kmem_intr_free(dmc, dmc_size);
747 }
748
749 static int
750 bcmeth_mapcache_create(
751 struct bcmeth_softc *sc,
752 struct bcmeth_mapcache **dmc_p,
753 size_t maxmaps,
754 size_t maxmapsize,
755 size_t maxseg)
756 {
757 const size_t dmc_size =
758 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]);
759 struct bcmeth_mapcache * const dmc =
760 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
761
762 dmc->dmc_maxmaps = maxmaps;
763 dmc->dmc_nmaps = maxmaps;
764 dmc->dmc_maxmapsize = maxmapsize;
765 dmc->dmc_maxseg = maxseg;
766
767 for (u_int i = 0; i < maxmaps; i++) {
768 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
769 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
770 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
771 if (error) {
772 aprint_error_dev(sc->sc_dev,
773 "failed to creat dma map cache "
774 "entry %u of %zu: %d\n",
775 i, maxmaps, error);
776 while (i-- > 0) {
777 bus_dmamap_destroy(sc->sc_dmat,
778 dmc->dmc_maps[i]);
779 }
780 kmem_intr_free(dmc, dmc_size);
781 return error;
782 }
783 KASSERT(dmc->dmc_maps[i] != NULL);
784 }
785
786 *dmc_p = dmc;
787
788 return 0;
789 }
790
791 #if 0
792 static void
793 bcmeth_dmamem_free(
794 bus_dma_tag_t dmat,
795 size_t map_size,
796 bus_dma_segment_t *seg,
797 bus_dmamap_t map,
798 void *kvap)
799 {
800 bus_dmamap_destroy(dmat, map);
801 bus_dmamem_unmap(dmat, kvap, map_size);
802 bus_dmamem_free(dmat, seg, 1);
803 }
804 #endif
805
806 static int
807 bcmeth_dmamem_alloc(
808 bus_dma_tag_t dmat,
809 size_t map_size,
810 bus_dma_segment_t *seg,
811 bus_dmamap_t *map,
812 void **kvap)
813 {
814 int error;
815 int nseg;
816
817 *kvap = NULL;
818 *map = NULL;
819
820 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0,
821 seg, 1, &nseg, 0);
822 if (error)
823 return error;
824
825 KASSERT(nseg == 1);
826
827 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0);
828 if (error == 0) {
829 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
830 map);
831 if (error == 0) {
832 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
833 NULL, 0);
834 if (error == 0)
835 return 0;
836 bus_dmamap_destroy(dmat, *map);
837 *map = NULL;
838 }
839 bus_dmamem_unmap(dmat, *kvap, map_size);
840 *kvap = NULL;
841 }
842 bus_dmamem_free(dmat, seg, nseg);
843 return 0;
844 }
845
846 static struct mbuf *
847 bcmeth_rx_buf_alloc(
848 struct bcmeth_softc *sc)
849 {
850 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
851 if (m == NULL) {
852 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
853 return NULL;
854 }
855 MCLGET(m, M_DONTWAIT);
856 if ((m->m_flags & M_EXT) == 0) {
857 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
858 m_freem(m);
859 return NULL;
860 }
861 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
862
863 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache);
864 if (map == NULL) {
865 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
866 m_freem(m);
867 return NULL;
868 }
869 M_SETCTX(m, map);
870 m->m_len = m->m_pkthdr.len = MCLBYTES;
871 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
872 BUS_DMA_READ|BUS_DMA_NOWAIT);
873 if (error) {
874 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
875 error);
876 M_SETCTX(m, NULL);
877 m_freem(m);
878 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
879 return NULL;
880 }
881 KASSERT(((map->_dm_flags ^ sc->sc_dmat->_ranges[0].dr_flags) & _BUS_DMAMAP_COHERENT) == 0);
882 KASSERT(map->dm_mapsize == MCLBYTES);
883 #ifdef BCMETH_RCVMAGIC
884 *mtod(m, uint32_t *) = BCMETH_RCVMAGIC;
885 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
886 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
887 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t),
888 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD);
889 #else
890 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
891 BUS_DMASYNC_PREREAD);
892 #endif
893
894 return m;
895 }
896
897 static void
898 bcmeth_rx_map_unload(
899 struct bcmeth_softc *sc,
900 struct mbuf *m)
901 {
902 KASSERT(m);
903 for (; m != NULL; m = m->m_next) {
904 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
905 KASSERT(map);
906 KASSERT(map->dm_mapsize == MCLBYTES);
907 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
908 BUS_DMASYNC_POSTREAD);
909 bus_dmamap_unload(sc->sc_dmat, map);
910 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
911 M_SETCTX(m, NULL);
912 }
913 }
914
915 static bool
916 bcmeth_rxq_produce(
917 struct bcmeth_softc *sc,
918 struct bcmeth_rxqueue *rxq)
919 {
920 struct gmac_rxdb *producer = rxq->rxq_producer;
921 bool produced = false;
922
923 while (rxq->rxq_inuse < rxq->rxq_threshold) {
924 struct mbuf *m;
925 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
926 if (m == NULL) {
927 m = bcmeth_rx_buf_alloc(sc);
928 if (m == NULL) {
929 printf("%s: bcmeth_rx_buf_alloc failed\n", __func__);
930 break;
931 }
932 }
933 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
934 KASSERT(map);
935
936 producer->rxdb_buflen = MCLBYTES;
937 producer->rxdb_addrlo = map->dm_segs[0].ds_addr;
938 producer->rxdb_flags &= RXDB_FLAG_ET;
939 *rxq->rxq_mtail = m;
940 rxq->rxq_mtail = &m->m_next;
941 m->m_len = MCLBYTES;
942 m->m_next = NULL;
943 rxq->rxq_inuse++;
944 if (++producer == rxq->rxq_last) {
945 membar_producer();
946 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
947 rxq->rxq_last - rxq->rxq_producer);
948 producer = rxq->rxq_producer = rxq->rxq_first;
949 }
950 produced = true;
951 }
952 if (produced) {
953 membar_producer();
954 if (producer != rxq->rxq_producer) {
955 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
956 producer - rxq->rxq_producer);
957 rxq->rxq_producer = producer;
958 }
959 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr,
960 rxq->rxq_descmap->dm_segs[0].ds_addr
961 + ((uintptr_t)producer & RCVPTR));
962 }
963 return true;
964 }
965
966 static void
967 bcmeth_rx_input(
968 struct bcmeth_softc *sc,
969 struct mbuf *m,
970 uint32_t rxdb_flags)
971 {
972 struct ifnet * const ifp = &sc->sc_if;
973
974 bcmeth_rx_map_unload(sc, m);
975
976 m_adj(m, sc->sc_rcvoffset);
977
978 switch (__SHIFTOUT(rxdb_flags, RXSTS_PKTTYPE)) {
979 case RXSTS_PKTTYPE_UC:
980 break;
981 case RXSTS_PKTTYPE_MC:
982 m->m_flags |= M_MCAST;
983 break;
984 case RXSTS_PKTTYPE_BC:
985 m->m_flags |= M_BCAST|M_MCAST;
986 break;
987 default:
988 if (sc->sc_cmdcfg & PROMISC_EN)
989 m->m_flags |= M_PROMISC;
990 break;
991 }
992 m->m_pkthdr.rcvif = ifp;
993
994 ifp->if_ipackets++;
995 ifp->if_ibytes += m->m_pkthdr.len;
996
997 /*
998 * Let's give it to the network subsystm to deal with.
999 */
1000 #ifdef BCMETH_MPSAFE
1001 mutex_exit(sc->sc_lock);
1002 (*ifp->if_input)(ifp, m);
1003 mutex_enter(sc->sc_lock);
1004 #else
1005 int s = splnet();
1006 bpf_mtap(ifp, m);
1007 (*ifp->if_input)(ifp, m);
1008 splx(s);
1009 #endif
1010 }
1011
1012 static void
1013 bcmeth_rxq_consume(
1014 struct bcmeth_softc *sc,
1015 struct bcmeth_rxqueue *rxq)
1016 {
1017 struct ifnet * const ifp = &sc->sc_if;
1018 struct gmac_rxdb *consumer = rxq->rxq_consumer;
1019 size_t rxconsumed = 0;
1020
1021 for (;;) {
1022 if (consumer == rxq->rxq_producer) {
1023 rxq->rxq_consumer = consumer;
1024 rxq->rxq_inuse -= rxconsumed;
1025 KASSERT(rxq->rxq_inuse == 0);
1026 return;
1027 }
1028
1029 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1030 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1031 if (consumer == rxq->rxq_first + currdscr) {
1032 rxq->rxq_consumer = consumer;
1033 rxq->rxq_inuse -= rxconsumed;
1034 return;
1035 }
1036 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1);
1037
1038 /*
1039 * We own this packet again. Copy the rxsts word from it.
1040 */
1041 rxconsumed++;
1042 uint32_t rxsts;
1043 KASSERT(rxq->rxq_mhead != NULL);
1044 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t);
1045 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align,
1046 BUS_DMASYNC_POSTREAD);
1047 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4);
1048 #if 0
1049 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd",
1050 currdscr, consumer - rxq->rxq_first);
1051 #endif
1052
1053 /*
1054 * Get the count of descriptors. Fetch the correct number
1055 * of mbufs.
1056 */
1057 #ifdef BCMETH_RCVMAGIC
1058 size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1;
1059 #else
1060 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
1061 #endif
1062 struct mbuf *m = rxq->rxq_mhead;
1063 struct mbuf *m_last = m;
1064 for (size_t i = 1; i < desc_count; i++) {
1065 if (++consumer == rxq->rxq_last) {
1066 consumer = rxq->rxq_first;
1067 }
1068 KASSERTMSG(consumer != rxq->rxq_first + currdscr,
1069 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u consumer=%zd",
1070 i, rxsts, desc_count, currdscr,
1071 consumer - rxq->rxq_first);
1072 m_last = m_last->m_next;
1073 }
1074
1075 /*
1076 * Now remove it/them from the list of enqueued mbufs.
1077 */
1078 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1079 rxq->rxq_mtail = &rxq->rxq_mhead;
1080 m_last->m_next = NULL;
1081
1082 #ifdef BCMETH_RCVMAGIC
1083 if (rxsts == BCMETH_RCVMAGIC) {
1084 ifp->if_ierrors++;
1085 if ((m->m_ext.ext_paddr >> 28) == 8) {
1086 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo);
1087 } else {
1088 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi);
1089 }
1090 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1091 } else
1092 #endif /* BCMETH_RCVMAGIC */
1093 if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
1094 aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n",
1095 consumer - rxq->rxq_first, desc_count, rxsts);
1096 /*
1097 * We encountered an error, take the mbufs and add them
1098 * to the rx bufcache so we can quickly reuse them.
1099 */
1100 ifp->if_ierrors++;
1101 do {
1102 struct mbuf *m0 = m->m_next;
1103 m->m_next = NULL;
1104 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1105 m = m0;
1106 } while (m);
1107 } else {
1108 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN);
1109 framelen += sc->sc_rcvoffset;
1110 m->m_pkthdr.len = framelen;
1111 if (desc_count == 1) {
1112 KASSERT(framelen <= MCLBYTES);
1113 m->m_len = framelen;
1114 } else {
1115 m_last->m_len = framelen & (MCLBYTES - 1);
1116 }
1117
1118 #ifdef BCMETH_MPSAFE
1119 /*
1120 * Wrap at the last entry!
1121 */
1122 if (++consumer == rxq->rxq_last) {
1123 KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1124 rxq->rxq_consumer = rxq->rxq_first;
1125 } else {
1126 rxq->rxq_consumer = consumer;
1127 }
1128 rxq->rxq_inuse -= rxconsumed;
1129 #endif /* BCMETH_MPSAFE */
1130
1131 /*
1132 * Receive the packet (which releases our lock)
1133 */
1134 bcmeth_rx_input(sc, m, rxsts);
1135
1136 #ifdef BCMETH_MPSAFE
1137 /*
1138 * Since we had to give up our lock, we need to
1139 * refresh these.
1140 */
1141 consumer = rxq->rxq_consumer;
1142 rxconsumed = 0;
1143 continue;
1144 #endif /* BCMETH_MPSAFE */
1145 }
1146
1147 /*
1148 * Wrap at the last entry!
1149 */
1150 if (++consumer == rxq->rxq_last) {
1151 KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1152 consumer = rxq->rxq_first;
1153 }
1154 }
1155 }
1156
1157 static void
1158 bcmeth_rxq_purge(
1159 struct bcmeth_softc *sc,
1160 struct bcmeth_rxqueue *rxq,
1161 bool discard)
1162 {
1163 struct mbuf *m;
1164
1165 if ((m = rxq->rxq_mhead) != NULL) {
1166 if (discard) {
1167 bcmeth_rx_map_unload(sc, m);
1168 m_freem(m);
1169 } else {
1170 while (m != NULL) {
1171 struct mbuf *m0 = m->m_next;
1172 m->m_next = NULL;
1173 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1174 m = m0;
1175 }
1176 }
1177
1178 }
1179
1180 rxq->rxq_mhead = NULL;
1181 rxq->rxq_mtail = &rxq->rxq_mhead;
1182 rxq->rxq_inuse = 0;
1183 }
1184
1185 static void
1186 bcmeth_rxq_reset(
1187 struct bcmeth_softc *sc,
1188 struct bcmeth_rxqueue *rxq)
1189 {
1190 /*
1191 * sync all the descriptors
1192 */
1193 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1194 rxq->rxq_last - rxq->rxq_first);
1195
1196 /*
1197 * Make sure we own all descriptors in the ring.
1198 */
1199 struct gmac_rxdb *rxdb;
1200 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) {
1201 rxdb->rxdb_flags = RXDB_FLAG_IC;
1202 }
1203
1204 /*
1205 * Last descriptor has the wrap flag.
1206 */
1207 rxdb->rxdb_flags = RXDB_FLAG_ET|RXDB_FLAG_IC;
1208
1209 /*
1210 * Reset the producer consumer indexes.
1211 */
1212 rxq->rxq_consumer = rxq->rxq_first;
1213 rxq->rxq_producer = rxq->rxq_first;
1214 rxq->rxq_inuse = 0;
1215 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS)
1216 rxq->rxq_threshold = BCMETH_MINRXMBUFS;
1217
1218 sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF;
1219
1220 /*
1221 * Restart the receiver at the first descriptor
1222 */
1223 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo,
1224 rxq->rxq_descmap->dm_segs[0].ds_addr);
1225 }
1226
1227 static int
1228 bcmeth_rxq_attach(
1229 struct bcmeth_softc *sc,
1230 struct bcmeth_rxqueue *rxq,
1231 u_int qno)
1232 {
1233 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]);
1234 int error;
1235 void *descs;
1236
1237 KASSERT(desc_count == 256 || desc_count == 512);
1238
1239 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1240 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1241 if (error)
1242 return error;
1243
1244 memset(descs, 0, BCMETH_RINGSIZE);
1245 rxq->rxq_first = descs;
1246 rxq->rxq_last = rxq->rxq_first + desc_count;
1247 rxq->rxq_consumer = descs;
1248 rxq->rxq_producer = descs;
1249
1250 bcmeth_rxq_purge(sc, rxq, true);
1251 bcmeth_rxq_reset(sc, rxq);
1252
1253 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW;
1254 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL;
1255 rxq->rxq_reg_rcvptr = GMAC_RCVPTR;
1256 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0;
1257 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1;
1258
1259 return 0;
1260 }
1261
1262 static bool
1263 bcmeth_txq_active_p(
1264 struct bcmeth_softc * const sc,
1265 struct bcmeth_txqueue *txq)
1266 {
1267 return !IF_IS_EMPTY(&txq->txq_mbufs);
1268 }
1269
1270 static bool
1271 bcmeth_txq_fillable_p(
1272 struct bcmeth_softc * const sc,
1273 struct bcmeth_txqueue *txq)
1274 {
1275 return txq->txq_free >= txq->txq_threshold;
1276 }
1277
1278 static int
1279 bcmeth_txq_attach(
1280 struct bcmeth_softc *sc,
1281 struct bcmeth_txqueue *txq,
1282 u_int qno)
1283 {
1284 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]);
1285 int error;
1286 void *descs;
1287
1288 KASSERT(desc_count == 256 || desc_count == 512);
1289
1290 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1291 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1292 if (error)
1293 return error;
1294
1295 memset(descs, 0, BCMETH_RINGSIZE);
1296 txq->txq_first = descs;
1297 txq->txq_last = txq->txq_first + desc_count;
1298 txq->txq_consumer = descs;
1299 txq->txq_producer = descs;
1300
1301 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS);
1302
1303 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW;
1304 txq->txq_reg_xmtctl = GMAC_XMTCONTROL;
1305 txq->txq_reg_xmtptr = GMAC_XMTPTR;
1306 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0;
1307 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1;
1308
1309 bcmeth_txq_reset(sc, txq);
1310
1311 return 0;
1312 }
1313
1314 static int
1315 bcmeth_txq_map_load(
1316 struct bcmeth_softc *sc,
1317 struct bcmeth_txqueue *txq,
1318 struct mbuf *m)
1319 {
1320 bus_dmamap_t map;
1321 int error;
1322
1323 map = M_GETCTX(m, bus_dmamap_t);
1324 if (map != NULL)
1325 return 0;
1326
1327 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache);
1328 if (map == NULL)
1329 return ENOMEM;
1330
1331 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1332 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1333 if (error)
1334 return error;
1335
1336 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1337 BUS_DMASYNC_PREWRITE);
1338 M_SETCTX(m, map);
1339 return 0;
1340 }
1341
1342 static void
1343 bcmeth_txq_map_unload(
1344 struct bcmeth_softc *sc,
1345 struct bcmeth_txqueue *txq,
1346 struct mbuf *m)
1347 {
1348 KASSERT(m);
1349 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1350 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1351 BUS_DMASYNC_POSTWRITE);
1352 bus_dmamap_unload(sc->sc_dmat, map);
1353 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map);
1354 }
1355
1356 static bool
1357 bcmeth_txq_produce(
1358 struct bcmeth_softc *sc,
1359 struct bcmeth_txqueue *txq,
1360 struct mbuf *m)
1361 {
1362 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1363
1364 if (map->dm_nsegs > txq->txq_free)
1365 return false;
1366
1367 /*
1368 * TCP Offload flag must be set in the first descriptor.
1369 */
1370 struct gmac_txdb *producer = txq->txq_producer;
1371 uint32_t first_flags = TXDB_FLAG_SF;
1372 uint32_t last_flags = TXDB_FLAG_EF;
1373
1374 /*
1375 * If we've produced enough descriptors without consuming any
1376 * we need to ask for an interrupt to reclaim some.
1377 */
1378 txq->txq_lastintr += map->dm_nsegs;
1379 if (txq->txq_lastintr >= txq->txq_threshold
1380 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1381 txq->txq_lastintr = 0;
1382 last_flags |= TXDB_FLAG_IC;
1383 }
1384
1385 KASSERT(producer != txq->txq_last);
1386
1387 struct gmac_txdb *start = producer;
1388 size_t count = map->dm_nsegs;
1389 producer->txdb_flags |= first_flags;
1390 producer->txdb_addrlo = map->dm_segs[0].ds_addr;
1391 producer->txdb_buflen = map->dm_segs[0].ds_len;
1392 for (u_int i = 1; i < map->dm_nsegs; i++) {
1393 #if 0
1394 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1395 producer->txdb_flags, producer->txdb_buflen,
1396 producer->txdb_addrlo, producer->txdb_addrhi);
1397 #endif
1398 if (__predict_false(++producer == txq->txq_last)) {
1399 bcmeth_txq_desc_presync(sc, txq, start,
1400 txq->txq_last - start);
1401 count -= txq->txq_last - start;
1402 producer = txq->txq_first;
1403 start = txq->txq_first;
1404 }
1405 producer->txdb_addrlo = map->dm_segs[i].ds_addr;
1406 producer->txdb_buflen = map->dm_segs[i].ds_len;
1407 }
1408 producer->txdb_flags |= last_flags;
1409 #if 0
1410 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1411 producer->txdb_flags, producer->txdb_buflen,
1412 producer->txdb_addrlo, producer->txdb_addrhi);
1413 #endif
1414 if (count)
1415 bcmeth_txq_desc_presync(sc, txq, start, count);
1416
1417 /*
1418 * Reduce free count by the number of segments we consumed.
1419 */
1420 txq->txq_free -= map->dm_nsegs;
1421 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1422 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & TXDB_FLAG_EF) == 0);
1423 KASSERT(producer->txdb_flags & TXDB_FLAG_EF);
1424
1425 #if 0
1426 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n",
1427 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1428 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1429 #endif
1430
1431 if (producer + 1 == txq->txq_last)
1432 txq->txq_producer = txq->txq_first;
1433 else
1434 txq->txq_producer = producer + 1;
1435 IF_ENQUEUE(&txq->txq_mbufs, m);
1436
1437 /*
1438 * Let the transmitter know there's more to do
1439 */
1440 bcmeth_write_4(sc, txq->txq_reg_xmtptr,
1441 txq->txq_descmap->dm_segs[0].ds_addr
1442 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR));
1443
1444 return true;
1445 }
1446
1447 static struct mbuf *
1448 bcmeth_copy_packet(struct mbuf *m)
1449 {
1450 struct mbuf *mext = NULL;
1451 size_t misalignment = 0;
1452 size_t hlen = 0;
1453
1454 for (mext = m; mext != NULL; mext = mext->m_next) {
1455 if (mext->m_flags & M_EXT) {
1456 misalignment = mtod(mext, vaddr_t) & arm_dcache_align;
1457 break;
1458 }
1459 hlen += m->m_len;
1460 }
1461
1462 struct mbuf *n = m->m_next;
1463 if (m != mext && hlen + misalignment <= MHLEN && false) {
1464 KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]);
1465 size_t oldoff = m->m_data - m->m_pktdat;
1466 size_t off;
1467 if (mext == NULL) {
1468 off = (oldoff + hlen > MHLEN) ? 0 : oldoff;
1469 } else {
1470 off = MHLEN - (hlen + misalignment);
1471 }
1472 KASSERT(off + hlen + misalignment <= MHLEN);
1473 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) {
1474 memmove(&m->m_pktdat[off], m->m_data, m->m_len);
1475 m->m_data = &m->m_pktdat[off];
1476 }
1477 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]);
1478 m->m_len = hlen;
1479 m->m_next = mext;
1480 while (n != mext) {
1481 n = m_free(n);
1482 }
1483 return m;
1484 }
1485
1486 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
1487 if (m0 == NULL) {
1488 return NULL;
1489 }
1490 M_COPY_PKTHDR(m0, m);
1491 MCLAIM(m0, m->m_owner);
1492 if (m0->m_pkthdr.len > MHLEN) {
1493 MCLGET(m0, M_DONTWAIT);
1494 if ((m0->m_flags & M_EXT) == 0) {
1495 m_freem(m0);
1496 return NULL;
1497 }
1498 }
1499 m0->m_len = m->m_pkthdr.len;
1500 m_copydata(m, 0, m0->m_len, mtod(m0, void *));
1501 m_freem(m);
1502 return m0;
1503 }
1504
1505 static bool
1506 bcmeth_txq_enqueue(
1507 struct bcmeth_softc *sc,
1508 struct bcmeth_txqueue *txq)
1509 {
1510 for (;;) {
1511 if (IF_QFULL(&txq->txq_mbufs))
1512 return false;
1513 struct mbuf *m = txq->txq_next;
1514 if (m == NULL) {
1515 int s = splnet();
1516 IF_DEQUEUE(&sc->sc_if.if_snd, m);
1517 splx(s);
1518 if (m == NULL)
1519 return true;
1520 M_SETCTX(m, NULL);
1521 } else {
1522 txq->txq_next = NULL;
1523 }
1524 /*
1525 * If LINK2 is set and this packet uses multiple mbufs,
1526 * consolidate it into a single mbuf.
1527 */
1528 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) {
1529 struct mbuf *m0 = bcmeth_copy_packet(m);
1530 if (m0 == NULL) {
1531 txq->txq_next = m;
1532 return true;
1533 }
1534 m = m0;
1535 }
1536 int error = bcmeth_txq_map_load(sc, txq, m);
1537 if (error) {
1538 aprint_error_dev(sc->sc_dev,
1539 "discarded packet due to "
1540 "dmamap load failure: %d\n", error);
1541 m_freem(m);
1542 continue;
1543 }
1544 KASSERT(txq->txq_next == NULL);
1545 if (!bcmeth_txq_produce(sc, txq, m)) {
1546 txq->txq_next = m;
1547 return false;
1548 }
1549 KASSERT(txq->txq_next == NULL);
1550 }
1551 }
1552
1553 static bool
1554 bcmeth_txq_consume(
1555 struct bcmeth_softc *sc,
1556 struct bcmeth_txqueue *txq)
1557 {
1558 struct ifnet * const ifp = &sc->sc_if;
1559 struct gmac_txdb *consumer = txq->txq_consumer;
1560 size_t txfree = 0;
1561
1562 #if 0
1563 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1564 #endif
1565
1566 for (;;) {
1567 if (consumer == txq->txq_producer) {
1568 txq->txq_consumer = consumer;
1569 txq->txq_free += txfree;
1570 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1571 #if 0
1572 printf("%s: empty: freed %zu descriptors going from %zu to %zu\n",
1573 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
1574 #endif
1575 KASSERT(txq->txq_lastintr == 0);
1576 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
1577 return true;
1578 }
1579 bcmeth_txq_desc_postsync(sc, txq, consumer, 1);
1580 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
1581 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) {
1582 txq->txq_consumer = consumer;
1583 txq->txq_free += txfree;
1584 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1585 #if 0
1586 printf("%s: freed %zu descriptors\n",
1587 __func__, txfree);
1588 #endif
1589 return bcmeth_txq_fillable_p(sc, txq);
1590 }
1591
1592 /*
1593 * If this is the last descriptor in the chain, get the
1594 * mbuf, free its dmamap, and free the mbuf chain itself.
1595 */
1596 const uint32_t txdb_flags = consumer->txdb_flags;
1597 if (txdb_flags & TXDB_FLAG_EF) {
1598 struct mbuf *m;
1599
1600 IF_DEQUEUE(&txq->txq_mbufs, m);
1601 KASSERT(m);
1602 bcmeth_txq_map_unload(sc, txq, m);
1603 #if 0
1604 printf("%s: mbuf %p: consumed a %u byte packet\n",
1605 __func__, m, m->m_pkthdr.len);
1606 #endif
1607 bpf_mtap(ifp, m);
1608 ifp->if_opackets++;
1609 ifp->if_obytes += m->m_pkthdr.len;
1610 if (m->m_flags & M_MCAST)
1611 ifp->if_omcasts++;
1612 m_freem(m);
1613 }
1614
1615 /*
1616 * We own this packet again. Clear all flags except wrap.
1617 */
1618 txfree++;
1619
1620 /*
1621 * Wrap at the last entry!
1622 */
1623 if (txdb_flags & TXDB_FLAG_ET) {
1624 consumer->txdb_flags = TXDB_FLAG_ET;
1625 KASSERT(consumer + 1 == txq->txq_last);
1626 consumer = txq->txq_first;
1627 } else {
1628 consumer->txdb_flags = 0;
1629 consumer++;
1630 KASSERT(consumer < txq->txq_last);
1631 }
1632 }
1633 }
1634
1635 static void
1636 bcmeth_txq_purge(
1637 struct bcmeth_softc *sc,
1638 struct bcmeth_txqueue *txq)
1639 {
1640 struct mbuf *m;
1641 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0);
1642
1643 for (;;) {
1644 IF_DEQUEUE(&txq->txq_mbufs, m);
1645 if (m == NULL)
1646 break;
1647 bcmeth_txq_map_unload(sc, txq, m);
1648 m_freem(m);
1649 }
1650 if ((m = txq->txq_next) != NULL) {
1651 txq->txq_next = NULL;
1652 bcmeth_txq_map_unload(sc, txq, m);
1653 m_freem(m);
1654 }
1655 }
1656
1657 static void
1658 bcmeth_txq_reset(
1659 struct bcmeth_softc *sc,
1660 struct bcmeth_txqueue *txq)
1661 {
1662 /*
1663 * sync all the descriptors
1664 */
1665 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first,
1666 txq->txq_last - txq->txq_first);
1667
1668 /*
1669 * Make sure we own all descriptors in the ring.
1670 */
1671 struct gmac_txdb *txdb;
1672 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) {
1673 txdb->txdb_flags = 0;
1674 }
1675
1676 /*
1677 * Last descriptor has the wrap flag.
1678 */
1679 txdb->txdb_flags = TXDB_FLAG_ET;
1680
1681 /*
1682 * Reset the producer consumer indexes.
1683 */
1684 txq->txq_consumer = txq->txq_first;
1685 txq->txq_producer = txq->txq_first;
1686 txq->txq_free = txq->txq_last - txq->txq_first - 1;
1687 txq->txq_threshold = txq->txq_free / 2;
1688 txq->txq_lastintr = 0;
1689
1690 /*
1691 * What do we want to get interrupted on?
1692 */
1693 sc->sc_intmask |= XMTINT_0 | XMTUF;
1694
1695 /*
1696 * Restart the transmiter at the first descriptor
1697 */
1698 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo,
1699 txq->txq_descmap->dm_segs->ds_addr);
1700 }
1701
1702 static void
1703 bcmeth_ifstart(struct ifnet *ifp)
1704 {
1705 struct bcmeth_softc * const sc = ifp->if_softc;
1706
1707 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
1708 return;
1709 }
1710
1711 #ifdef BCMETH_MPSAFETX
1712 if (cpu_intr_p()) {
1713 #endif
1714 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
1715 softint_schedule(sc->sc_soft_ih);
1716 #ifdef BCMETH_MPSAFETX
1717 } else {
1718 /*
1719 * Either we are in a softintr thread already or some other
1720 * thread so just borrow it to do the send and save ourselves
1721 * the overhead of a fast soft int.
1722 */
1723 bcmeth_soft_txintr(sc);
1724 }
1725 #endif
1726 }
1727
1728 int
1729 bcmeth_intr(void *arg)
1730 {
1731 struct bcmeth_softc * const sc = arg;
1732 uint32_t soft_flags = 0;
1733 uint32_t work_flags = 0;
1734 int rv = 0;
1735
1736 mutex_enter(sc->sc_hwlock);
1737
1738 uint32_t intmask = sc->sc_intmask;
1739 BCMETH_EVCNT_INCR(sc->sc_ev_intr);
1740
1741 for (;;) {
1742 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1743 intstatus &= intmask;
1744 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */
1745 if (intstatus == 0) {
1746 break;
1747 }
1748 #if 0
1749 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n",
1750 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK));
1751 #endif
1752 if (intstatus & RCVINT) {
1753 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1754 intmask &= ~RCVINT;
1755
1756 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1757 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1758 if (descs < rxq->rxq_consumer - rxq->rxq_first) {
1759 /*
1760 * We wrapped at the end so count how far
1761 * we are from the end.
1762 */
1763 descs += rxq->rxq_last - rxq->rxq_consumer;
1764 } else {
1765 descs -= rxq->rxq_consumer - rxq->rxq_first;
1766 }
1767 /*
1768 * If we "timedout" we can't be hogging so use
1769 * softints. If we exceeded then we might hogging
1770 * so let the workqueue deal with them.
1771 */
1772 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, INTRCVLAZY_FRAMECOUNT);
1773 if (descs < framecount
1774 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) {
1775 soft_flags |= SOFT_RXINTR;
1776 } else {
1777 work_flags |= WORK_RXINTR;
1778 }
1779 }
1780
1781 if (intstatus & XMTINT_0) {
1782 intmask &= ~XMTINT_0;
1783 soft_flags |= SOFT_TXINTR;
1784 }
1785
1786 if (intstatus & RCVDESCUF) {
1787 intmask &= ~RCVDESCUF;
1788 work_flags |= WORK_RXUNDERFLOW;
1789 }
1790
1791 intstatus &= intmask;
1792 if (intstatus) {
1793 aprint_error_dev(sc->sc_dev,
1794 "intr: intstatus=%#x\n", intstatus);
1795 aprint_error_dev(sc->sc_dev,
1796 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n",
1797 sc->sc_rxq.rxq_first,
1798 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr,
1799 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr),
1800 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0),
1801 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1));
1802 aprint_error_dev(sc->sc_dev,
1803 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n",
1804 sc->sc_txq.txq_first,
1805 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr,
1806 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr),
1807 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0),
1808 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1));
1809 intmask &= ~intstatus;
1810 work_flags |= WORK_REINIT;
1811 break;
1812 }
1813 }
1814
1815 if (intmask != sc->sc_intmask) {
1816 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1817 }
1818
1819 if (work_flags) {
1820 if (sc->sc_work_flags == 0) {
1821 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL);
1822 }
1823 atomic_or_32(&sc->sc_work_flags, work_flags);
1824 rv = 1;
1825 }
1826
1827 if (soft_flags) {
1828 if (sc->sc_soft_flags == 0) {
1829 softint_schedule(sc->sc_soft_ih);
1830 }
1831 atomic_or_32(&sc->sc_soft_flags, soft_flags);
1832 rv = 1;
1833 }
1834
1835 mutex_exit(sc->sc_hwlock);
1836
1837 return rv;
1838 }
1839
1840 #ifdef BCMETH_MPSAFETX
1841 void
1842 bcmeth_soft_txintr(struct bcmeth_softc *sc)
1843 {
1844 mutex_enter(sc->sc_lock);
1845 /*
1846 * Let's do what we came here for. Consume transmitted
1847 * packets off the the transmit ring.
1848 */
1849 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1850 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1851 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1852 sc->sc_if.if_flags |= IFF_OACTIVE;
1853 } else {
1854 sc->sc_if.if_flags &= ~IFF_OACTIVE;
1855 }
1856 if (sc->sc_if.if_flags & IFF_RUNNING) {
1857 mutex_spin_enter(sc->sc_hwlock);
1858 sc->sc_intmask |= XMTINT_0;
1859 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1860 mutex_spin_exit(sc->sc_hwlock);
1861 }
1862 mutex_exit(sc->sc_lock);
1863 }
1864 #endif /* BCMETH_MPSAFETX */
1865
1866 void
1867 bcmeth_soft_intr(void *arg)
1868 {
1869 struct bcmeth_softc * const sc = arg;
1870 struct ifnet * const ifp = &sc->sc_if;
1871 uint32_t intmask = 0;
1872
1873 mutex_enter(sc->sc_lock);
1874
1875 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
1876
1877 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr);
1878
1879 if ((soft_flags & SOFT_TXINTR)
1880 || bcmeth_txq_active_p(sc, &sc->sc_txq)) {
1881 /*
1882 * Let's do what we came here for. Consume transmitted
1883 * packets off the the transmit ring.
1884 */
1885 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1886 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1887 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1888 ifp->if_flags |= IFF_OACTIVE;
1889 } else {
1890 ifp->if_flags &= ~IFF_OACTIVE;
1891 }
1892 intmask |= XMTINT_0;
1893 }
1894
1895 if (soft_flags & SOFT_RXINTR) {
1896 /*
1897 * Let's consume
1898 */
1899 bcmeth_rxq_consume(sc, &sc->sc_rxq);
1900 intmask |= RCVINT;
1901 }
1902
1903 if (ifp->if_flags & IFF_RUNNING) {
1904 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1905 mutex_spin_enter(sc->sc_hwlock);
1906 sc->sc_intmask |= intmask;
1907 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1908 mutex_spin_exit(sc->sc_hwlock);
1909 }
1910
1911 mutex_exit(sc->sc_lock);
1912 }
1913
1914 void
1915 bcmeth_worker(struct work *wk, void *arg)
1916 {
1917 struct bcmeth_softc * const sc = arg;
1918 struct ifnet * const ifp = &sc->sc_if;
1919 uint32_t intmask = 0;
1920
1921 mutex_enter(sc->sc_lock);
1922
1923 BCMETH_EVCNT_INCR(sc->sc_ev_work);
1924
1925 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0);
1926 if (work_flags & WORK_REINIT) {
1927 int s = splnet();
1928 sc->sc_soft_flags = 0;
1929 bcmeth_ifinit(ifp);
1930 splx(s);
1931 work_flags &= ~WORK_RXUNDERFLOW;
1932 }
1933
1934 if (work_flags & WORK_RXUNDERFLOW) {
1935 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1936 size_t threshold = 5 * rxq->rxq_threshold / 4;
1937 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
1938 threshold = rxq->rxq_last - rxq->rxq_first - 1;
1939 } else {
1940 intmask |= RCVDESCUF;
1941 }
1942 aprint_normal_dev(sc->sc_dev,
1943 "increasing receive buffers from %zu to %zu\n",
1944 rxq->rxq_threshold, threshold);
1945 rxq->rxq_threshold = threshold;
1946 }
1947
1948 if (work_flags & WORK_RXINTR) {
1949 /*
1950 * Let's consume
1951 */
1952 bcmeth_rxq_consume(sc, &sc->sc_rxq);
1953 intmask |= RCVINT;
1954 }
1955
1956 if (ifp->if_flags & IFF_RUNNING) {
1957 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1958 #if 0
1959 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1960 if (intstatus & RCVINT) {
1961 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT);
1962 work_flags |= WORK_RXINTR;
1963 continue;
1964 }
1965 #endif
1966 mutex_spin_enter(sc->sc_hwlock);
1967 sc->sc_intmask |= intmask;
1968 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1969 mutex_spin_exit(sc->sc_hwlock);
1970 }
1971
1972 mutex_exit(sc->sc_lock);
1973 }
1974