bcm53xx_eth.c revision 1.5 1 /*-
2 * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #define GMAC_PRIVATE
31
32 #include "locators.h"
33
34 #include <sys/cdefs.h>
35
36 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.5 2012/10/05 03:57:21 matt Exp $");
37
38 #include <sys/param.h>
39 #include <sys/atomic.h>
40 #include <sys/bus.h>
41 #include <sys/device.h>
42 #include <sys/ioctl.h>
43 #include <sys/intr.h>
44 #include <sys/kmem.h>
45 #include <sys/mutex.h>
46 #include <sys/socket.h>
47 #include <sys/systm.h>
48
49 #include <net/if.h>
50 #include <net/if_ether.h>
51 #include <net/if_media.h>
52
53 #include <net/if_dl.h>
54
55 #include <net/bpf.h>
56
57 #include <dev/mii/miivar.h>
58
59 #include <arm/broadcom/bcm53xx_reg.h>
60 #include <arm/broadcom/bcm53xx_var.h>
61
62 #define BCMETH_RCVOFFSET 6
63 #define BCMETH_MAXTXMBUFS 32
64 #define BCMETH_NTXSEGS 30
65 #define BCMETH_MAXRXMBUFS 255
66 #define BCMETH_MINRXMBUFS 32
67 #define BCMETH_NRXSEGS 1
68
69 static int bcmeth_ccb_match(device_t, cfdata_t, void *);
70 static void bcmeth_ccb_attach(device_t, device_t, void *);
71
72 struct bcmeth_txqueue {
73 bus_dmamap_t txq_descmap;
74 struct gmac_txdb *txq_consumer;
75 struct gmac_txdb *txq_producer;
76 struct gmac_txdb *txq_first;
77 struct gmac_txdb *txq_last;
78 struct ifqueue txq_mbufs;
79 struct mbuf *txq_next;
80 size_t txq_free;
81 size_t txq_threshold;
82 size_t txq_lastintr;
83 bus_size_t txq_reg_xmtaddrlo;
84 bus_size_t txq_reg_xmtptr;
85 bus_size_t txq_reg_xmtctl;
86 bus_size_t txq_reg_xmtsts0;
87 bus_dma_segment_t txq_descmap_seg;
88 };
89
90 struct bcmeth_rxqueue {
91 bus_dmamap_t rxq_descmap;
92 struct gmac_rxdb *rxq_consumer;
93 struct gmac_rxdb *rxq_producer;
94 struct gmac_rxdb *rxq_first;
95 struct gmac_rxdb *rxq_last;
96 struct mbuf *rxq_mhead;
97 struct mbuf **rxq_mtail;
98 struct mbuf *rxq_mconsumer;
99 size_t rxq_inuse;
100 size_t rxq_threshold;
101 bus_size_t rxq_reg_rcvaddrlo;
102 bus_size_t rxq_reg_rcvptr;
103 bus_size_t rxq_reg_rcvctl;
104 bus_size_t rxq_reg_rcvsts0;
105 bus_dma_segment_t rxq_descmap_seg;
106 };
107
108 struct bcmeth_mapcache {
109 u_int dmc_nmaps;
110 u_int dmc_maxseg;
111 u_int dmc_maxmaps;
112 u_int dmc_maxmapsize;
113 bus_dmamap_t dmc_maps[0];
114 };
115
116 struct bcmeth_softc {
117 device_t sc_dev;
118 bus_space_tag_t sc_bst;
119 bus_space_handle_t sc_bsh;
120 bus_dma_tag_t sc_dmat;
121 kmutex_t *sc_lock;
122 kmutex_t *sc_hwlock;
123 struct ethercom sc_ec;
124 #define sc_if sc_ec.ec_if
125 struct ifmedia sc_media;
126 void *sc_soft_ih;
127 void *sc_ih;
128
129 struct bcmeth_rxqueue sc_rxq;
130 struct bcmeth_txqueue sc_txq;
131
132 uint32_t sc_maxfrm;
133 uint32_t sc_cmdcfg;
134 uint32_t sc_intmask;
135 volatile uint32_t sc_soft_flags;
136 #define SOFT_RXINTR 0x01
137 #define SOFT_RXUNDERFLOW 0x02
138 #define SOFT_TXINTR 0x04
139 #define SOFT_REINIT 0x08
140
141 struct evcnt sc_ev_intr;
142 struct evcnt sc_ev_soft_intr;
143 struct evcnt sc_ev_tx_stall;
144
145 struct ifqueue sc_rx_bufcache;
146 struct bcmeth_mapcache *sc_rx_mapcache;
147 struct bcmeth_mapcache *sc_tx_mapcache;
148
149 uint8_t sc_enaddr[ETHER_ADDR_LEN];
150 };
151
152 static void bcmeth_ifstart(struct ifnet *);
153 static void bcmeth_ifwatchdog(struct ifnet *);
154 static int bcmeth_ifinit(struct ifnet *);
155 static void bcmeth_ifstop(struct ifnet *, int);
156 static int bcmeth_ifioctl(struct ifnet *, u_long, void *);
157
158 static int bcmeth_mapcache_create(struct bcmeth_softc *,
159 struct bcmeth_mapcache **, size_t, size_t, size_t);
160 static void bcmeth_mapcache_destroy(struct bcmeth_softc *,
161 struct bcmeth_mapcache *);
162 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *,
163 struct bcmeth_mapcache *);
164 static void bcmeth_mapcache_put(struct bcmeth_softc *,
165 struct bcmeth_mapcache *, bus_dmamap_t);
166
167 static int bcmeth_txq_attach(struct bcmeth_softc *,
168 struct bcmeth_txqueue *, u_int);
169 static void bcmeth_txq_purge(struct bcmeth_softc *,
170 struct bcmeth_txqueue *);
171 static void bcmeth_txq_reset(struct bcmeth_softc *,
172 struct bcmeth_txqueue *);
173 static bool bcmeth_txq_consume(struct bcmeth_softc *,
174 struct bcmeth_txqueue *);
175 static bool bcmeth_txq_produce(struct bcmeth_softc *,
176 struct bcmeth_txqueue *, struct mbuf *m);
177 static bool bcmeth_txq_active_p(struct bcmeth_softc *,
178 struct bcmeth_txqueue *);
179
180 static int bcmeth_rxq_attach(struct bcmeth_softc *,
181 struct bcmeth_rxqueue *, u_int);
182 static bool bcmeth_rxq_produce(struct bcmeth_softc *,
183 struct bcmeth_rxqueue *);
184 static void bcmeth_rxq_purge(struct bcmeth_softc *,
185 struct bcmeth_rxqueue *, bool);
186 static void bcmeth_rxq_reset(struct bcmeth_softc *,
187 struct bcmeth_rxqueue *);
188
189 static int bcmeth_intr(void *);
190 static void bcmeth_soft_intr(void *);
191
192 static int bcmeth_mediachange(struct ifnet *);
193 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *);
194
195 static inline uint32_t
196 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o)
197 {
198 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o);
199 }
200
201 static inline void
202 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v)
203 {
204 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v);
205 }
206
207 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc),
208 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL);
209
210 static int
211 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux)
212 {
213 struct bcmccb_attach_args * const ccbaa = aux;
214 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
215
216 if (strcmp(cf->cf_name, loc->loc_name))
217 return 0;
218
219 #ifdef DIAGNOSTIC
220 const int port = cf->cf_loc[BCMCCBCF_PORT];
221 #endif
222 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port);
223
224 return 1;
225 }
226
227 static void
228 bcmeth_ccb_attach(device_t parent, device_t self, void *aux)
229 {
230 struct bcmeth_softc * const sc = device_private(self);
231 struct ethercom * const ec = &sc->sc_ec;
232 struct ifnet * const ifp = &ec->ec_if;
233 struct bcmccb_attach_args * const ccbaa = aux;
234 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
235 const char * const xname = device_xname(self);
236 prop_dictionary_t dict = device_properties(self);
237 int error;
238
239 sc->sc_bst = ccbaa->ccbaa_ccb_bst;
240 sc->sc_dmat = ccbaa->ccbaa_dmat;
241 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh,
242 loc->loc_offset, loc->loc_size, &sc->sc_bsh);
243
244 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
245 if (eaprop == NULL) {
246 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0);
247 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1);
248 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) {
249 aprint_error(": mac-address property is missing\n");
250 return;
251 }
252 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff;
253 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff;
254 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff;
255 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff;
256 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff;
257 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff;
258 } else {
259 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
260 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
261 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
262 ETHER_ADDR_LEN);
263 }
264 sc->sc_dev = self;
265 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
266 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
267
268 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts
269
270 aprint_naive("\n");
271 aprint_normal(": Gigabit Ethernet Controller\n");
272
273 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0);
274 if (error) {
275 aprint_error(": failed to init rxq: %d\n", error);
276 return;
277 }
278
279 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0);
280 if (error) {
281 aprint_error(": failed to init txq: %d\n", error);
282 return;
283 }
284
285 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache,
286 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS);
287 if (error) {
288 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
289 return;
290 }
291
292 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
293 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS);
294 if (error) {
295 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
296 return;
297 }
298
299 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET,
300 bcmeth_soft_intr, sc);
301
302 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL,
303 bcmeth_intr, sc);
304
305 if (sc->sc_ih == NULL) {
306 aprint_error_dev(self, "failed to establish interrupt %d\n",
307 loc->loc_intrs[0]);
308 } else {
309 aprint_normal_dev(self, "interrupting on irq %d\n",
310 loc->loc_intrs[0]);
311 }
312
313 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
314 ether_sprintf(sc->sc_enaddr));
315
316 /*
317 * Since each port in plugged into the switch/flow-accelerator,
318 * we hard code at Gige Full-Duplex with Flow Control enabled.
319 */
320 int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX;
321 //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE;
322 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange,
323 bcmeth_mediastatus);
324 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL);
325 ifmedia_set(&sc->sc_media, ifmedia);
326
327 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
328
329 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
330 ifp->if_softc = sc;
331 ifp->if_baudrate = IF_Mbps(1000);
332 ifp->if_capabilities = 0;
333 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
334 ifp->if_ioctl = bcmeth_ifioctl;
335 ifp->if_start = bcmeth_ifstart;
336 ifp->if_watchdog = bcmeth_ifwatchdog;
337 ifp->if_init = bcmeth_ifinit;
338 ifp->if_stop = bcmeth_ifstop;
339 IFQ_SET_READY(&ifp->if_snd);
340
341 bcmeth_ifstop(ifp, true);
342
343 /*
344 * Attach the interface.
345 */
346 if_attach(ifp);
347 ether_ifattach(ifp, sc->sc_enaddr);
348
349 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
350 NULL, xname, "intr");
351 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
352 NULL, xname, "soft intr");
353 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
354 NULL, xname, "tx stalls");
355 }
356
357 static int
358 bcmeth_mediachange(struct ifnet *ifp)
359 {
360 //struct bcmeth_softc * const sc = ifp->if_softc;
361 return 0;
362 }
363
364 static void
365 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm)
366 {
367 //struct bcmeth_softc * const sc = ifp->if_softc;
368
369 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE;
370 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T;
371 }
372
373 static uint64_t
374 bcmeth_macaddr_create(const uint8_t *enaddr)
375 {
376 return (enaddr[3] << 0) // UNIMAC_MAC_0
377 | (enaddr[2] << 8) // UNIMAC_MAC_0
378 | (enaddr[1] << 16) // UNIMAC_MAC_0
379 | (enaddr[0] << 24) // UNIMAC_MAC_0
380 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1
381 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1
382 }
383
384 static int
385 bcmeth_ifinit(struct ifnet *ifp)
386 {
387 struct bcmeth_softc * const sc = ifp->if_softc;
388 int error = 0;
389
390 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
391 if (ifp->if_mtu > ETHERMTU_JUMBO)
392 return error;
393
394 KASSERT(ifp->if_flags & IFF_UP);
395
396 /*
397 * Stop the interface
398 */
399 bcmeth_ifstop(ifp, 0);
400
401 /*
402 * If our frame size has changed (or it's our first time through)
403 * destroy the existing transmit mapcache.
404 */
405 if (sc->sc_tx_mapcache != NULL
406 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
407 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache);
408 sc->sc_tx_mapcache = NULL;
409 }
410
411 if (sc->sc_tx_mapcache == NULL) {
412 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
413 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS);
414 if (error)
415 return error;
416 }
417
418 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE
419 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED)
420 | RX_ENA | TX_ENA;
421
422 if (ifp->if_flags & IFF_PROMISC) {
423 sc->sc_cmdcfg |= PROMISC_EN;
424 } else {
425 sc->sc_cmdcfg &= ~PROMISC_EN;
426 }
427
428 const uint64_t macstnaddr =
429 bcmeth_macaddr_create(CLLADDR(ifp->if_sadl));
430
431 sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR;
432
433 /* 5. Load RCVADDR_LO with new pointer */
434 bcmeth_rxq_reset(sc, &sc->sc_rxq);
435
436 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
437 __SHIFTIN(BCMETH_RCVOFFSET, RCVCTL_RCVOFFSET)
438 | RCVCTL_PARITY_DIS
439 | RCVCTL_OFLOW_CONTINUE
440 | __SHIFTIN(4, RCVCTL_BURSTLEN));
441
442 /* 6. Load XMTADDR_LO with new pointer */
443 bcmeth_txq_reset(sc, &sc->sc_txq);
444
445 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX
446 | XMTCTL_PARITY_DIS
447 | __SHIFTIN(4, XMTCTL_BURSTLEN));
448
449 /* 7. Setup other UNIMAC registers */
450 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm);
451 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0));
452 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32));
453 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg);
454
455 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL);
456 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE;
457 devctl &= ~FLOW_CTRL_MODE;
458 devctl &= ~MIB_RD_RESET_EN;
459 devctl &= ~RXQ_OVERFLOW_CTRL_SEL;
460 devctl &= ~CPU_FLOW_CTRL_ON;
461 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl);
462
463 /* Setup lazy receive (at most 1ms). */
464 bcmeth_write_4(sc, GMAC_INTRCVLAZY, __SHIFTIN(10, INTRCVLAZY_FRAMECOUNT)
465 | __SHIFTIN(125000000 / 1000, INTRCVLAZY_TIMEOUT));
466
467 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
468 sc->sc_intmask |= XMTINT_0|XMTUF;
469 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl,
470 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE);
471
472
473 /* 12. Enable receive queues in RQUEUE, */
474 sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF;
475 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
476 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE);
477
478 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
479
480 #if 0
481 aprint_normal_dev(sc->sc_dev,
482 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n",
483 devctl, sc->sc_cmdcfg,
484 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl),
485 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl));
486 #endif
487
488 sc->sc_soft_flags = 0;
489
490 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
491
492 ifp->if_flags |= IFF_RUNNING;
493
494 return error;
495 }
496
497 static void
498 bcmeth_ifstop(struct ifnet *ifp, int disable)
499 {
500 struct bcmeth_softc * const sc = ifp->if_softc;
501 struct bcmeth_txqueue * const txq = &sc->sc_txq;
502 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
503
504 KASSERT(!cpu_intr_p());
505
506 sc->sc_soft_flags = 0;
507
508 /* Disable Rx processing */
509 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
510 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE);
511
512 /* Disable Tx processing */
513 bcmeth_write_4(sc, txq->txq_reg_xmtctl,
514 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE);
515
516 /* Disable all interrupts */
517 bcmeth_write_4(sc, GMAC_INTMASK, 0);
518
519 for (;;) {
520 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
521 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
522 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS
523 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS)
524 break;
525 delay(50);
526 }
527 /*
528 * Now reset the controller.
529 *
530 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register
531 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register
532 */
533 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET);
534 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0);
535 sc->sc_intmask = 0;
536 ifp->if_flags &= ~IFF_RUNNING;
537
538 /*
539 * Let's consume any remaining transmitted packets. And if we are
540 * disabling the interface, purge ourselves of any untransmitted
541 * packets. But don't consume any received packets, just drop them.
542 * If we aren't disabling the interface, save the mbufs in the
543 * receive queue for reuse.
544 */
545 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable);
546 bcmeth_txq_consume(sc, &sc->sc_txq);
547 if (disable) {
548 bcmeth_txq_purge(sc, &sc->sc_txq);
549 IF_PURGE(&ifp->if_snd);
550 }
551
552 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0);
553 }
554
555 static void
556 bcmeth_ifwatchdog(struct ifnet *ifp)
557 {
558 }
559
560 static int
561 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
562 {
563 struct bcmeth_softc *sc = ifp->if_softc;
564 struct ifreq * const ifr = data;
565 const int s = splnet();
566 int error;
567
568 switch (cmd) {
569 case SIOCSIFMEDIA:
570 case SIOCGIFMEDIA:
571 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
572 break;
573
574 default:
575 error = ether_ioctl(ifp, cmd, data);
576 if (error != ENETRESET)
577 break;
578
579 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
580 error = 0;
581 break;
582 }
583 error = bcmeth_ifinit(ifp);
584 break;
585 }
586
587 splx(s);
588 return error;
589 }
590
591 static void
592 bcmeth_rxq_desc_presync(
593 struct bcmeth_softc *sc,
594 struct bcmeth_rxqueue *rxq,
595 struct gmac_rxdb *rxdb,
596 size_t count)
597 {
598 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
599 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
600 BUS_DMASYNC_PREWRITE);
601 }
602
603 static void
604 bcmeth_rxq_desc_postsync(
605 struct bcmeth_softc *sc,
606 struct bcmeth_rxqueue *rxq,
607 struct gmac_rxdb *rxdb,
608 size_t count)
609 {
610 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
611 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
612 BUS_DMASYNC_POSTWRITE);
613 }
614
615 static void
616 bcmeth_txq_desc_presync(
617 struct bcmeth_softc *sc,
618 struct bcmeth_txqueue *txq,
619 struct gmac_txdb *txdb,
620 size_t count)
621 {
622 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
623 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
624 BUS_DMASYNC_PREWRITE);
625 }
626
627 static void
628 bcmeth_txq_desc_postsync(
629 struct bcmeth_softc *sc,
630 struct bcmeth_txqueue *txq,
631 struct gmac_txdb *txdb,
632 size_t count)
633 {
634 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
635 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
636 BUS_DMASYNC_POSTWRITE);
637 }
638
639 static bus_dmamap_t
640 bcmeth_mapcache_get(
641 struct bcmeth_softc *sc,
642 struct bcmeth_mapcache *dmc)
643 {
644 KASSERT(dmc->dmc_nmaps > 0);
645 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
646 return dmc->dmc_maps[--dmc->dmc_nmaps];
647 }
648
649 static void
650 bcmeth_mapcache_put(
651 struct bcmeth_softc *sc,
652 struct bcmeth_mapcache *dmc,
653 bus_dmamap_t map)
654 {
655 KASSERT(map != NULL);
656 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
657 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
658 }
659
660 static void
661 bcmeth_mapcache_destroy(
662 struct bcmeth_softc *sc,
663 struct bcmeth_mapcache *dmc)
664 {
665 const size_t dmc_size =
666 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]);
667
668 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
669 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
670 }
671 kmem_intr_free(dmc, dmc_size);
672 }
673
674 static int
675 bcmeth_mapcache_create(
676 struct bcmeth_softc *sc,
677 struct bcmeth_mapcache **dmc_p,
678 size_t maxmaps,
679 size_t maxmapsize,
680 size_t maxseg)
681 {
682 const size_t dmc_size =
683 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]);
684 struct bcmeth_mapcache * const dmc =
685 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
686
687 dmc->dmc_maxmaps = maxmaps;
688 dmc->dmc_nmaps = maxmaps;
689 dmc->dmc_maxmapsize = maxmapsize;
690 dmc->dmc_maxseg = maxseg;
691
692 for (u_int i = 0; i < maxmaps; i++) {
693 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
694 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
695 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
696 if (error) {
697 aprint_error_dev(sc->sc_dev,
698 "failed to creat dma map cache "
699 "entry %u of %zu: %d\n",
700 i, maxmaps, error);
701 while (i-- > 0) {
702 bus_dmamap_destroy(sc->sc_dmat,
703 dmc->dmc_maps[i]);
704 }
705 kmem_intr_free(dmc, dmc_size);
706 return error;
707 }
708 KASSERT(dmc->dmc_maps[i] != NULL);
709 }
710
711 *dmc_p = dmc;
712
713 return 0;
714 }
715
716 #if 0
717 static void
718 bcmeth_dmamem_free(
719 bus_dma_tag_t dmat,
720 size_t map_size,
721 bus_dma_segment_t *seg,
722 bus_dmamap_t map,
723 void *kvap)
724 {
725 bus_dmamap_destroy(dmat, map);
726 bus_dmamem_unmap(dmat, kvap, map_size);
727 bus_dmamem_free(dmat, seg, 1);
728 }
729 #endif
730
731 static int
732 bcmeth_dmamem_alloc(
733 bus_dma_tag_t dmat,
734 size_t map_size,
735 bus_dma_segment_t *seg,
736 bus_dmamap_t *map,
737 void **kvap)
738 {
739 int error;
740 int nseg;
741
742 *kvap = NULL;
743 *map = NULL;
744
745 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
746 seg, 1, &nseg, 0);
747 if (error)
748 return error;
749
750 KASSERT(nseg == 1);
751
752 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
753 BUS_DMA_COHERENT);
754 if (error == 0) {
755 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
756 map);
757 if (error == 0) {
758 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
759 NULL, 0);
760 if (error == 0)
761 return 0;
762 bus_dmamap_destroy(dmat, *map);
763 *map = NULL;
764 }
765 bus_dmamem_unmap(dmat, *kvap, map_size);
766 *kvap = NULL;
767 }
768 bus_dmamem_free(dmat, seg, nseg);
769 return 0;
770 }
771
772 static struct mbuf *
773 bcmeth_rx_buf_alloc(
774 struct bcmeth_softc *sc)
775 {
776 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
777 if (m == NULL) {
778 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
779 return NULL;
780 }
781 MCLGET(m, M_DONTWAIT);
782 if ((m->m_flags & M_EXT) == 0) {
783 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
784 m_freem(m);
785 return NULL;
786 }
787 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
788
789 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache);
790 if (map == NULL) {
791 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
792 m_freem(m);
793 return NULL;
794 }
795 M_SETCTX(m, map);
796 m->m_len = m->m_pkthdr.len = MCLBYTES;
797 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
798 BUS_DMA_READ|BUS_DMA_NOWAIT);
799 if (error) {
800 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
801 error);
802 M_SETCTX(m, NULL);
803 m_freem(m);
804 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
805 return NULL;
806 }
807 KASSERT(map->dm_mapsize == MCLBYTES);
808 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
809 BUS_DMASYNC_PREREAD);
810
811 return m;
812 }
813
814 static void
815 bcmeth_rx_map_unload(
816 struct bcmeth_softc *sc,
817 struct mbuf *m)
818 {
819 KASSERT(m);
820 for (; m != NULL; m = m->m_next) {
821 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
822 KASSERT(map);
823 KASSERT(map->dm_mapsize == MCLBYTES);
824 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
825 BUS_DMASYNC_POSTREAD);
826 bus_dmamap_unload(sc->sc_dmat, map);
827 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
828 M_SETCTX(m, NULL);
829 }
830 }
831
832 static bool
833 bcmeth_rxq_produce(
834 struct bcmeth_softc *sc,
835 struct bcmeth_rxqueue *rxq)
836 {
837 struct gmac_rxdb *producer = rxq->rxq_producer;
838 #if 0
839 size_t inuse = rxq->rxq_inuse;
840 #endif
841 while (rxq->rxq_inuse < rxq->rxq_threshold) {
842 struct mbuf *m;
843 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
844 if (m == NULL) {
845 m = bcmeth_rx_buf_alloc(sc);
846 if (m == NULL) {
847 printf("%s: bcmeth_rx_buf_alloc failed\n", __func__);
848 break;
849 }
850 }
851 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
852 KASSERT(map);
853
854 producer->rxdb_buflen = MCLBYTES;
855 producer->rxdb_addrlo = map->dm_segs[0].ds_addr;
856 producer->rxdb_flags &= RXDB_FLAG_ET;
857 producer->rxdb_flags |= RXDB_FLAG_IC;
858 *rxq->rxq_mtail = m;
859 rxq->rxq_mtail = &m->m_next;
860 m->m_len = MCLBYTES;
861 m->m_next = NULL;
862 rxq->rxq_inuse++;
863 if (++producer == rxq->rxq_last) {
864 membar_producer();
865 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
866 rxq->rxq_last - rxq->rxq_producer);
867 producer = rxq->rxq_producer = rxq->rxq_first;
868 }
869 }
870 if (producer != rxq->rxq_producer) {
871 membar_producer();
872 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
873 producer - rxq->rxq_producer);
874 rxq->rxq_producer = producer;
875 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr,
876 rxq->rxq_descmap->dm_segs[0].ds_addr
877 + ((uintptr_t)rxq->rxq_producer & RCVPTR));
878 }
879 return true;
880 }
881
882 static void
883 bcmeth_rx_input(
884 struct bcmeth_softc *sc,
885 struct mbuf *m,
886 uint32_t rxdb_flags)
887 {
888 struct ifnet * const ifp = &sc->sc_if;
889
890 bcmeth_rx_map_unload(sc, m);
891
892 m_adj(m, BCMETH_RCVOFFSET);
893
894 switch (__SHIFTOUT(rxdb_flags, RXSTS_PKTTYPE)) {
895 case RXSTS_PKTTYPE_UC:
896 break;
897 case RXSTS_PKTTYPE_MC:
898 m->m_flags |= M_MCAST;
899 break;
900 case RXSTS_PKTTYPE_BC:
901 m->m_flags |= M_BCAST|M_MCAST;
902 break;
903 }
904 if (sc->sc_cmdcfg & PROMISC_EN)
905 m->m_flags |= M_PROMISC;
906 m->m_pkthdr.rcvif = ifp;
907
908 ifp->if_ipackets++;
909 ifp->if_ibytes += m->m_pkthdr.len;
910
911 /*
912 * Let's give it to the network subsystm to deal with.
913 */
914 int s = splnet();
915 bpf_mtap(ifp, m);
916 (*ifp->if_input)(ifp, m);
917 splx(s);
918 }
919
920 static void
921 bcmeth_rxq_consume(
922 struct bcmeth_softc *sc,
923 struct bcmeth_rxqueue *rxq)
924 {
925 struct ifnet * const ifp = &sc->sc_if;
926 struct gmac_rxdb *consumer = rxq->rxq_consumer;
927 size_t rxconsumed = 0;
928
929 for (;;) {
930 if (consumer == rxq->rxq_producer) {
931 rxq->rxq_consumer = consumer;
932 rxq->rxq_inuse -= rxconsumed;
933 KASSERT(rxq->rxq_inuse == 0);
934 return;
935 }
936
937 uint32_t rcvsts0 = bcmeth_read_4(sc, GMAC_RCVSTATUS0);
938 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
939 if (consumer == rxq->rxq_first + currdscr) {
940 rxq->rxq_consumer = consumer;
941 rxq->rxq_inuse -= rxconsumed;
942 return;
943 }
944 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1);
945
946 /*
947 * We own this packet again. Copy the rxsts word from it.
948 */
949 rxconsumed++;
950 uint32_t rxsts;
951 KASSERT(rxq->rxq_mhead != NULL);
952 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t);
953 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align,
954 BUS_DMASYNC_POSTREAD);
955 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4);
956
957 /*
958 * Get the count of descriptors. Fetch the correct number
959 * of mbufs.
960 */
961 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
962 struct mbuf *m = rxq->rxq_mhead;
963 struct mbuf *m_last = m;
964 for (size_t i = 1; i < desc_count; i++) {
965 if (++consumer == rxq->rxq_last) {
966 consumer = rxq->rxq_first;
967 }
968 KASSERT(consumer != rxq->rxq_first + currdscr);
969 m_last = m_last->m_next;
970 }
971
972 /*
973 * Now remove it/them from the list of enqueued mbufs.
974 */
975 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
976 rxq->rxq_mtail = &rxq->rxq_mhead;
977 m_last->m_next = NULL;
978
979 if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
980 aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n",
981 consumer - rxq->rxq_first, desc_count, rxsts);
982 /*
983 * We encountered an error, take the mbufs and add them
984 * to the rx bufcache so we can quickly reuse them.
985 */
986 ifp->if_ierrors++;
987 do {
988 struct mbuf *m0 = m->m_next;
989 m->m_next = NULL;
990 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
991 m = m0;
992 } while (m);
993 } else {
994 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN);
995 framelen += BCMETH_RCVOFFSET;
996 m->m_pkthdr.len = framelen;
997 if (desc_count == 1) {
998 KASSERT(framelen <= MCLBYTES);
999 m->m_len = framelen;
1000 } else {
1001 m_last->m_len = framelen & (MCLBYTES - 1);
1002 }
1003 bcmeth_rx_input(sc, m, rxsts);
1004 }
1005
1006 /*
1007 * Wrap at the last entry!
1008 */
1009 if (++consumer == rxq->rxq_last) {
1010 KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1011 consumer = rxq->rxq_first;
1012 }
1013 }
1014 }
1015
1016 static void
1017 bcmeth_rxq_purge(
1018 struct bcmeth_softc *sc,
1019 struct bcmeth_rxqueue *rxq,
1020 bool discard)
1021 {
1022 struct mbuf *m;
1023
1024 if ((m = rxq->rxq_mhead) != NULL) {
1025 if (discard) {
1026 bcmeth_rx_map_unload(sc, m);
1027 m_freem(m);
1028 } else {
1029 while (m != NULL) {
1030 struct mbuf *m0 = m->m_next;
1031 m->m_next = NULL;
1032 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1033 m = m0;
1034 }
1035 }
1036
1037 }
1038
1039 rxq->rxq_mhead = NULL;
1040 rxq->rxq_mtail = &rxq->rxq_mhead;
1041 rxq->rxq_inuse = 0;
1042 }
1043
1044 static void
1045 bcmeth_rxq_reset(
1046 struct bcmeth_softc *sc,
1047 struct bcmeth_rxqueue *rxq)
1048 {
1049 /*
1050 * sync all the descriptors
1051 */
1052 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1053 rxq->rxq_last - rxq->rxq_first);
1054
1055 /*
1056 * Make sure we own all descriptors in the ring.
1057 */
1058 struct gmac_rxdb *rxdb;
1059 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) {
1060 rxdb->rxdb_flags = 0;
1061 }
1062
1063 /*
1064 * Last descriptor has the wrap flag.
1065 */
1066 rxdb->rxdb_flags = RXDB_FLAG_ET;
1067
1068 /*
1069 * Reset the producer consumer indexes.
1070 */
1071 rxq->rxq_consumer = rxq->rxq_first;
1072 rxq->rxq_producer = rxq->rxq_first;
1073 rxq->rxq_inuse = 0;
1074 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS)
1075 rxq->rxq_threshold = BCMETH_MINRXMBUFS;
1076
1077 sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF;
1078
1079 /*
1080 * Restart the receiver at the first descriptor
1081 */
1082 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo,
1083 rxq->rxq_descmap->dm_segs[0].ds_addr);
1084 }
1085
1086 static int
1087 bcmeth_rxq_attach(
1088 struct bcmeth_softc *sc,
1089 struct bcmeth_rxqueue *rxq,
1090 u_int qno)
1091 {
1092 size_t map_size = PAGE_SIZE;
1093 size_t desc_count = map_size / sizeof(rxq->rxq_first[0]);
1094 int error;
1095 void *descs;
1096
1097 KASSERT(desc_count == 256 || desc_count == 512);
1098
1099 error = bcmeth_dmamem_alloc(sc->sc_dmat, map_size,
1100 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1101 if (error)
1102 return error;
1103
1104 memset(descs, 0, map_size);
1105 rxq->rxq_first = descs;
1106 rxq->rxq_last = rxq->rxq_first + desc_count;
1107 rxq->rxq_consumer = descs;
1108 rxq->rxq_producer = descs;
1109
1110 bcmeth_rxq_purge(sc, rxq, true);
1111 bcmeth_rxq_reset(sc, rxq);
1112
1113 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW;
1114 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL;
1115 rxq->rxq_reg_rcvptr = GMAC_RCVPTR;
1116 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0;
1117
1118 return 0;
1119 }
1120
1121 static bool
1122 bcmeth_txq_active_p(
1123 struct bcmeth_softc * const sc,
1124 struct bcmeth_txqueue *txq)
1125 {
1126 return !IF_IS_EMPTY(&txq->txq_mbufs);
1127 }
1128
1129 static bool
1130 bcmeth_txq_fillable_p(
1131 struct bcmeth_softc * const sc,
1132 struct bcmeth_txqueue *txq)
1133 {
1134 return txq->txq_free >= txq->txq_threshold;
1135 }
1136
1137 static int
1138 bcmeth_txq_attach(
1139 struct bcmeth_softc *sc,
1140 struct bcmeth_txqueue *txq,
1141 u_int qno)
1142 {
1143 size_t map_size = PAGE_SIZE;
1144 size_t desc_count = map_size / sizeof(txq->txq_first[0]);
1145 int error;
1146 void *descs;
1147
1148 KASSERT(desc_count == 256 || desc_count == 512);
1149
1150 error = bcmeth_dmamem_alloc(sc->sc_dmat, map_size,
1151 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1152 if (error)
1153 return error;
1154
1155 memset(descs, 0, map_size);
1156 txq->txq_first = descs;
1157 txq->txq_last = txq->txq_first + desc_count;
1158 txq->txq_consumer = descs;
1159 txq->txq_producer = descs;
1160
1161 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS);
1162
1163 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW;
1164 txq->txq_reg_xmtctl = GMAC_XMTCONTROL;
1165 txq->txq_reg_xmtptr = GMAC_XMTPTR;
1166 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0;
1167
1168 bcmeth_txq_reset(sc, txq);
1169
1170 return 0;
1171 }
1172
1173 static int
1174 bcmeth_txq_map_load(
1175 struct bcmeth_softc *sc,
1176 struct bcmeth_txqueue *txq,
1177 struct mbuf *m)
1178 {
1179 bus_dmamap_t map;
1180 int error;
1181
1182 map = M_GETCTX(m, bus_dmamap_t);
1183 if (map != NULL)
1184 return 0;
1185
1186 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache);
1187 if (map == NULL)
1188 return ENOMEM;
1189
1190 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1191 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1192 if (error)
1193 return error;
1194
1195 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1196 BUS_DMASYNC_PREWRITE);
1197 M_SETCTX(m, map);
1198 return 0;
1199 }
1200
1201 static void
1202 bcmeth_txq_map_unload(
1203 struct bcmeth_softc *sc,
1204 struct bcmeth_txqueue *txq,
1205 struct mbuf *m)
1206 {
1207 KASSERT(m);
1208 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1209 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1210 BUS_DMASYNC_POSTWRITE);
1211 bus_dmamap_unload(sc->sc_dmat, map);
1212 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map);
1213 }
1214
1215 static bool
1216 bcmeth_txq_produce(
1217 struct bcmeth_softc *sc,
1218 struct bcmeth_txqueue *txq,
1219 struct mbuf *m)
1220 {
1221 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1222
1223 if (map->dm_nsegs > txq->txq_free)
1224 return false;
1225
1226 /*
1227 * TCP Offload flag must be set in the first descriptor.
1228 */
1229 struct gmac_txdb *producer = txq->txq_producer;
1230 uint32_t first_flags = TXDB_FLAG_SF;
1231 uint32_t last_flags = TXDB_FLAG_EF;
1232
1233 /*
1234 * If we've produced enough descriptors without consuming any
1235 * we need to ask for an interrupt to reclaim some.
1236 */
1237 txq->txq_lastintr += map->dm_nsegs;
1238 if (txq->txq_lastintr >= txq->txq_threshold
1239 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1240 txq->txq_lastintr = 0;
1241 last_flags |= TXDB_FLAG_IC;
1242 }
1243
1244 KASSERT(producer != txq->txq_last);
1245
1246 struct gmac_txdb *start = producer;
1247 size_t count = map->dm_nsegs;
1248 producer->txdb_flags |= first_flags;
1249 producer->txdb_addrlo = map->dm_segs[0].ds_addr;
1250 producer->txdb_buflen = map->dm_segs[0].ds_len;
1251 for (u_int i = 1; i < map->dm_nsegs; i++) {
1252 #if 0
1253 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1254 producer->txdb_flags, producer->txdb_buflen,
1255 producer->txdb_addrlo, producer->txdb_addrhi);
1256 #endif
1257 if (__predict_false(++producer == txq->txq_last)) {
1258 bcmeth_txq_desc_presync(sc, txq, start,
1259 txq->txq_last - start);
1260 count -= txq->txq_last - start;
1261 producer = txq->txq_first;
1262 start = txq->txq_first;
1263 }
1264 producer->txdb_addrlo = map->dm_segs[i].ds_addr;
1265 producer->txdb_buflen = map->dm_segs[i].ds_len;
1266 }
1267 producer->txdb_flags |= last_flags;
1268 #if 0
1269 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1270 producer->txdb_flags, producer->txdb_buflen,
1271 producer->txdb_addrlo, producer->txdb_addrhi);
1272 #endif
1273 bcmeth_txq_desc_presync(sc, txq, start, count);
1274
1275 /*
1276 * Reduce free count by the number of segments we consumed.
1277 */
1278 txq->txq_free -= map->dm_nsegs;
1279 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1280 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & TXDB_FLAG_EF) == 0);
1281 KASSERT(producer->txdb_flags & TXDB_FLAG_EF);
1282
1283 #if 0
1284 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n",
1285 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1286 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1287 #endif
1288
1289 if (++producer == txq->txq_last)
1290 txq->txq_producer = txq->txq_first;
1291 else
1292 txq->txq_producer = producer;
1293 IF_ENQUEUE(&txq->txq_mbufs, m);
1294 bpf_mtap(&sc->sc_if, m);
1295
1296 /*
1297 * Let the transmitter know there's more to do
1298 */
1299 bcmeth_write_4(sc, txq->txq_reg_xmtptr,
1300 txq->txq_descmap->dm_segs[0].ds_addr
1301 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR));
1302
1303 return true;
1304 }
1305
1306 static bool
1307 bcmeth_txq_enqueue(
1308 struct bcmeth_softc *sc,
1309 struct bcmeth_txqueue *txq)
1310 {
1311 for (;;) {
1312 if (IF_QFULL(&txq->txq_mbufs))
1313 return false;
1314 struct mbuf *m = txq->txq_next;
1315 if (m == NULL) {
1316 int s = splnet();
1317 IF_DEQUEUE(&sc->sc_if.if_snd, m);
1318 splx(s);
1319 if (m == NULL)
1320 return true;
1321 M_SETCTX(m, NULL);
1322 } else {
1323 txq->txq_next = NULL;
1324 }
1325 int error = bcmeth_txq_map_load(sc, txq, m);
1326 if (error) {
1327 aprint_error_dev(sc->sc_dev,
1328 "discarded packet due to "
1329 "dmamap load failure: %d\n", error);
1330 m_freem(m);
1331 continue;
1332 }
1333 KASSERT(txq->txq_next == NULL);
1334 if (!bcmeth_txq_produce(sc, txq, m)) {
1335 txq->txq_next = m;
1336 return false;
1337 }
1338 KASSERT(txq->txq_next == NULL);
1339 }
1340 }
1341
1342 static bool
1343 bcmeth_txq_consume(
1344 struct bcmeth_softc *sc,
1345 struct bcmeth_txqueue *txq)
1346 {
1347 struct ifnet * const ifp = &sc->sc_if;
1348 struct gmac_txdb *consumer = txq->txq_consumer;
1349 size_t txfree = 0;
1350
1351 #if 0
1352 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1353 #endif
1354
1355 for (;;) {
1356 if (consumer == txq->txq_producer) {
1357 txq->txq_consumer = consumer;
1358 txq->txq_free += txfree;
1359 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1360 #if 0
1361 printf("%s: empty: freed %zu descriptors going from %zu to %zu\n",
1362 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
1363 #endif
1364 KASSERT(txq->txq_lastintr == 0);
1365 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
1366 return true;
1367 }
1368 bcmeth_txq_desc_postsync(sc, txq, consumer, 1);
1369 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
1370 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) {
1371 txq->txq_consumer = consumer;
1372 txq->txq_free += txfree;
1373 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1374 #if 0
1375 printf("%s: freed %zu descriptors\n",
1376 __func__, txfree);
1377 #endif
1378 return bcmeth_txq_fillable_p(sc, txq);
1379 }
1380
1381 /*
1382 * If this is the last descriptor in the chain, get the
1383 * mbuf, free its dmamap, and free the mbuf chain itself.
1384 */
1385 const uint32_t txdb_flags = consumer->txdb_flags;
1386 if (txdb_flags & TXDB_FLAG_EF) {
1387 struct mbuf *m;
1388
1389 IF_DEQUEUE(&txq->txq_mbufs, m);
1390 KASSERT(m);
1391 bcmeth_txq_map_unload(sc, txq, m);
1392 #if 0
1393 printf("%s: mbuf %p: consumed a %u byte packet\n",
1394 __func__, m, m->m_pkthdr.len);
1395 #endif
1396 ifp->if_opackets++;
1397 ifp->if_obytes += m->m_pkthdr.len;
1398 if (m->m_flags & M_MCAST)
1399 ifp->if_omcasts++;
1400 m_freem(m);
1401 }
1402
1403 /*
1404 * We own this packet again. Clear all flags except wrap.
1405 */
1406 txfree++;
1407
1408 /*
1409 * Wrap at the last entry!
1410 */
1411 if (txdb_flags & TXDB_FLAG_ET) {
1412 consumer->txdb_flags = TXDB_FLAG_ET;
1413 KASSERT(consumer + 1 == txq->txq_last);
1414 consumer = txq->txq_first;
1415 } else {
1416 consumer->txdb_flags = 0;
1417 consumer++;
1418 KASSERT(consumer < txq->txq_last);
1419 }
1420 }
1421 }
1422
1423 static void
1424 bcmeth_txq_purge(
1425 struct bcmeth_softc *sc,
1426 struct bcmeth_txqueue *txq)
1427 {
1428 struct mbuf *m;
1429 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0);
1430
1431 for (;;) {
1432 IF_DEQUEUE(&txq->txq_mbufs, m);
1433 if (m == NULL)
1434 break;
1435 bcmeth_txq_map_unload(sc, txq, m);
1436 m_freem(m);
1437 }
1438 if ((m = txq->txq_next) != NULL) {
1439 txq->txq_next = NULL;
1440 bcmeth_txq_map_unload(sc, txq, m);
1441 m_freem(m);
1442 }
1443 }
1444
1445 static void
1446 bcmeth_txq_reset(
1447 struct bcmeth_softc *sc,
1448 struct bcmeth_txqueue *txq)
1449 {
1450 /*
1451 * sync all the descriptors
1452 */
1453 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first,
1454 txq->txq_last - txq->txq_first);
1455
1456 /*
1457 * Make sure we own all descriptors in the ring.
1458 */
1459 struct gmac_txdb *txdb;
1460 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) {
1461 txdb->txdb_flags = 0;
1462 }
1463
1464 /*
1465 * Last descriptor has the wrap flag.
1466 */
1467 txdb->txdb_flags = TXDB_FLAG_ET;
1468
1469 /*
1470 * Reset the producer consumer indexes.
1471 */
1472 txq->txq_consumer = txq->txq_first;
1473 txq->txq_producer = txq->txq_first;
1474 txq->txq_free = txq->txq_last - txq->txq_first - 1;
1475 txq->txq_threshold = txq->txq_free / 2;
1476 txq->txq_lastintr = 0;
1477
1478 /*
1479 * What do we want to get interrupted on?
1480 */
1481 sc->sc_intmask |= XMTINT_0 | XMTUF;
1482
1483 /*
1484 * Restart the transmiter at the first descriptor
1485 */
1486 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo,
1487 txq->txq_descmap->dm_segs->ds_addr);
1488 }
1489
1490 static void
1491 bcmeth_ifstart(struct ifnet *ifp)
1492 {
1493 struct bcmeth_softc * const sc = ifp->if_softc;
1494
1495 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
1496 softint_schedule(sc->sc_soft_ih);
1497 }
1498
1499 int
1500 bcmeth_intr(void *arg)
1501 {
1502 struct bcmeth_softc * const sc = arg;
1503 uint32_t soft_flags = 0;
1504 int rv = 0;
1505
1506 mutex_enter(sc->sc_hwlock);
1507
1508 sc->sc_ev_intr.ev_count++;
1509
1510 for (;;) {
1511 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1512 intstatus &= sc->sc_intmask;
1513 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */
1514 if (intstatus == 0) {
1515 break;
1516 }
1517 #if 0
1518 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x intmask=%#x\n",
1519 __func__, ievent, bcmeth_read_4(sc, GMAC_INTMASK));
1520 #endif
1521 if (intstatus & RCVINT) {
1522 intstatus &= ~RCVINT;
1523 sc->sc_intmask &= ~RCVINT;
1524 soft_flags |= SOFT_RXINTR;
1525 }
1526
1527 if (intstatus & XMTINT_0) {
1528 intstatus &= ~XMTINT_0;
1529 sc->sc_intmask &= ~XMTINT_0;
1530 soft_flags |= SOFT_TXINTR;
1531 }
1532
1533 if (intstatus & RCVDESCUF) {
1534 intstatus &= ~RCVDESCUF;
1535 sc->sc_intmask &= ~RCVDESCUF;
1536 soft_flags |= SOFT_RXUNDERFLOW;
1537 }
1538
1539 if (intstatus) {
1540 aprint_error_dev(sc->sc_dev, "intr: intstatus=%#x\n",
1541 intstatus);
1542 Debugger();
1543 sc->sc_intmask &= ~intstatus;
1544 soft_flags |= SOFT_REINIT;
1545 break;
1546 }
1547 }
1548
1549 if (soft_flags) {
1550 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1551 atomic_or_uint(&sc->sc_soft_flags, soft_flags);
1552 softint_schedule(sc->sc_soft_ih);
1553 rv = 1;
1554 }
1555
1556 mutex_exit(sc->sc_hwlock);
1557
1558 return rv;
1559 }
1560
1561 void
1562 bcmeth_soft_intr(void *arg)
1563 {
1564 struct bcmeth_softc * const sc = arg;
1565 struct ifnet * const ifp = &sc->sc_if;
1566
1567 mutex_enter(sc->sc_lock);
1568
1569 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
1570
1571 sc->sc_ev_soft_intr.ev_count++;
1572
1573 if (soft_flags & SOFT_REINIT) {
1574 int s = splnet();
1575 bcmeth_ifinit(ifp);
1576 splx(s);
1577 soft_flags = 0;
1578 }
1579
1580 if (soft_flags & SOFT_RXUNDERFLOW) {
1581 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1582 size_t threshold = 5 * rxq->rxq_threshold / 4;
1583 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
1584 threshold = rxq->rxq_last - rxq->rxq_first - 1;
1585 } else {
1586 sc->sc_intmask |= RCVDESCUF;
1587 }
1588 aprint_normal_dev(sc->sc_dev,
1589 "increasing receive buffers from %zu to %zu\n",
1590 rxq->rxq_threshold, threshold);
1591 rxq->rxq_threshold = threshold;
1592 }
1593
1594 if ((soft_flags & SOFT_TXINTR)
1595 || bcmeth_txq_active_p(sc, &sc->sc_txq)) {
1596 /*
1597 * Let's do what we came here for. Consume transmitted
1598 * packets off the the transmit ring.
1599 */
1600 if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1601 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1602 sc->sc_ev_tx_stall.ev_count++;
1603 ifp->if_flags |= IFF_OACTIVE;
1604 } else {
1605 ifp->if_flags &= ~IFF_OACTIVE;
1606 }
1607 sc->sc_intmask |= XMTINT_0;
1608 }
1609
1610 if (soft_flags & (SOFT_RXINTR|SOFT_RXUNDERFLOW)) {
1611 /*
1612 * Let's consume
1613 */
1614 bcmeth_rxq_consume(sc, &sc->sc_rxq);
1615 sc->sc_intmask |= RCVINT;
1616 }
1617
1618 if (ifp->if_flags & IFF_RUNNING) {
1619 bcmeth_rxq_produce(sc, &sc->sc_rxq);
1620 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1621 } else {
1622 KASSERT((soft_flags & SOFT_RXUNDERFLOW) == 0);
1623 }
1624
1625 mutex_exit(sc->sc_lock);
1626 }
1627