1 /*- 2 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas of 3am Software Foundry. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #define _ARM32_BUS_DMA_PRIVATE 31 #define GMAC_PRIVATE 32 33 #include "locators.h" 34 #include "opt_broadcom.h" 35 36 #include <sys/cdefs.h> 37 38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.46 2025/10/04 04:44:19 thorpej Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/atomic.h> 42 #include <sys/bus.h> 43 #include <sys/device.h> 44 #include <sys/ioctl.h> 45 #include <sys/intr.h> 46 #include <sys/kmem.h> 47 #include <sys/mutex.h> 48 #include <sys/socket.h> 49 #include <sys/systm.h> 50 #include <sys/workqueue.h> 51 52 #include <net/if.h> 53 #include <net/if_ether.h> 54 #include <net/if_media.h> 55 #include <net/if_dl.h> 56 #include <net/bpf.h> 57 58 #include <dev/mii/miivar.h> 59 60 #include <arm/locore.h> 61 62 #include <arm/broadcom/bcm53xx_reg.h> 63 #include <arm/broadcom/bcm53xx_var.h> 64 65 //#define BCMETH_MPSAFE 66 67 #ifdef BCMETH_COUNTERS 68 #define BCMETH_EVCNT_ADD(a, b) ((void)((a).ev_count += (b))) 69 #else 70 #define BCMETH_EVCNT_ADD(a, b) do { } while (/*CONSTCOND*/0) 71 #endif 72 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1) 73 74 #define BCMETH_MAXTXMBUFS 128 75 #define BCMETH_NTXSEGS 30 76 #define BCMETH_MAXRXMBUFS 255 77 #define BCMETH_MINRXMBUFS 64 78 #define BCMETH_NRXSEGS 1 79 #define BCMETH_RINGSIZE PAGE_SIZE 80 81 #if 1 82 #define BCMETH_RCVMAGIC 0xfeedface 83 #endif 84 85 static int bcmeth_ccb_match(device_t, cfdata_t, void *); 86 static void bcmeth_ccb_attach(device_t, device_t, void *); 87 88 struct bcmeth_txqueue { 89 bus_dmamap_t txq_descmap; 90 struct gmac_txdb *txq_consumer; 91 struct gmac_txdb *txq_producer; 92 struct gmac_txdb *txq_first; 93 struct gmac_txdb *txq_last; 94 struct ifqueue txq_mbufs; 95 struct mbuf *txq_next; 96 size_t txq_free; 97 size_t txq_threshold; 98 size_t txq_lastintr; 99 bus_size_t txq_reg_xmtaddrlo; 100 bus_size_t txq_reg_xmtptr; 101 bus_size_t txq_reg_xmtctl; 102 bus_size_t txq_reg_xmtsts0; 103 bus_size_t txq_reg_xmtsts1; 104 bus_dma_segment_t txq_descmap_seg; 105 }; 106 107 struct bcmeth_rxqueue { 108 bus_dmamap_t rxq_descmap; 109 struct gmac_rxdb *rxq_consumer; 110 struct gmac_rxdb *rxq_producer; 111 struct gmac_rxdb *rxq_first; 112 struct gmac_rxdb *rxq_last; 113 struct mbuf *rxq_mhead; 114 struct mbuf **rxq_mtail; 115 struct mbuf *rxq_mconsumer; 116 size_t rxq_inuse; 117 size_t rxq_threshold; 118 bus_size_t rxq_reg_rcvaddrlo; 119 bus_size_t rxq_reg_rcvptr; 120 bus_size_t rxq_reg_rcvctl; 121 bus_size_t rxq_reg_rcvsts0; 122 bus_size_t rxq_reg_rcvsts1; 123 bus_dma_segment_t rxq_descmap_seg; 124 }; 125 126 struct bcmeth_mapcache { 127 u_int dmc_nmaps; 128 u_int dmc_maxseg; 129 u_int dmc_maxmaps; 130 u_int dmc_maxmapsize; 131 bus_dmamap_t dmc_maps[0]; 132 }; 133 134 struct bcmeth_softc { 135 device_t sc_dev; 136 bus_space_tag_t sc_bst; 137 bus_space_handle_t sc_bsh; 138 bus_dma_tag_t sc_dmat; 139 kmutex_t *sc_lock; 140 kmutex_t *sc_hwlock; 141 struct ethercom sc_ec; 142 #define sc_if sc_ec.ec_if 143 struct ifmedia sc_media; 144 void *sc_soft_ih; 145 void *sc_ih; 146 147 struct bcmeth_rxqueue sc_rxq; 148 struct bcmeth_txqueue sc_txq; 149 150 size_t sc_rcvoffset; 151 uint32_t sc_macaddr[2]; 152 uint32_t sc_maxfrm; 153 uint32_t sc_cmdcfg; 154 uint32_t sc_intmask; 155 uint32_t sc_rcvlazy; 156 volatile uint32_t sc_soft_flags; 157 #define SOFT_RXINTR 0x01 158 #define SOFT_TXINTR 0x02 159 160 #ifdef BCMETH_COUNTERS 161 struct evcnt sc_ev_intr; 162 struct evcnt sc_ev_soft_intr; 163 struct evcnt sc_ev_work; 164 struct evcnt sc_ev_tx_stall; 165 struct evcnt sc_ev_rx_badmagic_lo; 166 struct evcnt sc_ev_rx_badmagic_hi; 167 #endif 168 169 struct ifqueue sc_rx_bufcache; 170 struct bcmeth_mapcache *sc_rx_mapcache; 171 struct bcmeth_mapcache *sc_tx_mapcache; 172 173 struct workqueue *sc_workq; 174 struct work sc_work; 175 176 volatile uint32_t sc_work_flags; 177 #define WORK_RXINTR 0x01 178 #define WORK_RXUNDERFLOW 0x02 179 #define WORK_REINIT 0x04 180 181 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 182 }; 183 184 static void bcmeth_ifstart(struct ifnet *); 185 static void bcmeth_ifwatchdog(struct ifnet *); 186 static int bcmeth_ifinit(struct ifnet *); 187 static void bcmeth_ifstop(struct ifnet *, int); 188 static int bcmeth_ifioctl(struct ifnet *, u_long, void *); 189 190 static int bcmeth_mapcache_create(struct bcmeth_softc *, 191 struct bcmeth_mapcache **, size_t, size_t, size_t); 192 static void bcmeth_mapcache_destroy(struct bcmeth_softc *, 193 struct bcmeth_mapcache *); 194 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *, 195 struct bcmeth_mapcache *); 196 static void bcmeth_mapcache_put(struct bcmeth_softc *, 197 struct bcmeth_mapcache *, bus_dmamap_t); 198 199 static int bcmeth_txq_attach(struct bcmeth_softc *, 200 struct bcmeth_txqueue *, u_int); 201 static void bcmeth_txq_purge(struct bcmeth_softc *, 202 struct bcmeth_txqueue *); 203 static void bcmeth_txq_reset(struct bcmeth_softc *, 204 struct bcmeth_txqueue *); 205 static bool bcmeth_txq_consume(struct bcmeth_softc *, 206 struct bcmeth_txqueue *); 207 static bool bcmeth_txq_produce(struct bcmeth_softc *, 208 struct bcmeth_txqueue *, struct mbuf *m); 209 static bool bcmeth_txq_active_p(struct bcmeth_softc *, 210 struct bcmeth_txqueue *); 211 212 static int bcmeth_rxq_attach(struct bcmeth_softc *, 213 struct bcmeth_rxqueue *, u_int); 214 static bool bcmeth_rxq_produce(struct bcmeth_softc *, 215 struct bcmeth_rxqueue *); 216 static void bcmeth_rxq_purge(struct bcmeth_softc *, 217 struct bcmeth_rxqueue *, bool); 218 static void bcmeth_rxq_reset(struct bcmeth_softc *, 219 struct bcmeth_rxqueue *); 220 221 static int bcmeth_intr(void *); 222 #ifdef BCMETH_MPSAFETX 223 static void bcmeth_soft_txintr(struct bcmeth_softc *); 224 #endif 225 static void bcmeth_soft_intr(void *); 226 static void bcmeth_worker(struct work *, void *); 227 228 static int bcmeth_mediachange(struct ifnet *); 229 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *); 230 231 static inline uint32_t 232 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o) 233 { 234 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o); 235 } 236 237 static inline void 238 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v) 239 { 240 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v); 241 } 242 243 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc), 244 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL); 245 246 static int 247 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux) 248 { 249 struct bcmccb_attach_args * const ccbaa = aux; 250 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 251 252 if (strcmp(cf->cf_name, loc->loc_name)) 253 return 0; 254 255 const int port __diagused = cf->cf_loc[BCMCCBCF_PORT]; 256 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port); 257 258 return 1; 259 } 260 261 static void 262 bcmeth_ccb_attach(device_t parent, device_t self, void *aux) 263 { 264 struct bcmeth_softc * const sc = device_private(self); 265 struct ethercom * const ec = &sc->sc_ec; 266 struct ifnet * const ifp = &ec->ec_if; 267 struct bcmccb_attach_args * const ccbaa = aux; 268 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 269 const char * const xname = device_xname(self); 270 int error; 271 272 sc->sc_bst = ccbaa->ccbaa_ccb_bst; 273 sc->sc_dmat = ccbaa->ccbaa_dmat; 274 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh, 275 loc->loc_offset, loc->loc_size, &sc->sc_bsh); 276 277 /* 278 * We need to use the coherent dma tag for the GMAC. 279 */ 280 sc->sc_dmat = &bcm53xx_coherent_dma_tag; 281 #if _ARM32_NEED_BUS_DMA_BOUNCE 282 if (device_cfdata(self)->cf_flags & 2) { 283 sc->sc_dmat = &bcm53xx_bounce_dma_tag; 284 } 285 #endif 286 287 if (! ether_getaddr(self, sc->sc_enaddr)) { 288 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0); 289 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1); 290 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) { 291 aprint_error(": mac-address property is missing\n"); 292 return; 293 } 294 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff; 295 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff; 296 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff; 297 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff; 298 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff; 299 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff; 300 } 301 sc->sc_dev = self; 302 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 303 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 304 305 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts 306 307 aprint_naive("\n"); 308 aprint_normal(": Gigabit Ethernet Controller\n"); 309 310 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0); 311 if (error) { 312 aprint_error(": failed to init rxq: %d\n", error); 313 goto fail_1; 314 } 315 316 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0); 317 if (error) { 318 aprint_error(": failed to init txq: %d\n", error); 319 goto fail_1; 320 } 321 322 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache, 323 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS); 324 if (error) { 325 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 326 goto fail_1; 327 } 328 329 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 330 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS); 331 if (error) { 332 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 333 goto fail_1; 334 } 335 336 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc, 337 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU); 338 if (error) { 339 aprint_error(": failed to create workqueue: %d\n", error); 340 goto fail_1; 341 } 342 343 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET, 344 bcmeth_soft_intr, sc); 345 346 if (sc->sc_soft_ih == NULL) { 347 aprint_error_dev(self, "failed to establish soft interrupt\n"); 348 goto fail_2; 349 } 350 351 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL, 352 bcmeth_intr, sc); 353 354 if (sc->sc_ih == NULL) { 355 aprint_error_dev(self, "failed to establish interrupt %d\n", 356 loc->loc_intrs[0]); 357 goto fail_3; 358 } else { 359 aprint_normal_dev(self, "interrupting on irq %d\n", 360 loc->loc_intrs[0]); 361 } 362 363 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 364 ether_sprintf(sc->sc_enaddr)); 365 366 /* 367 * Since each port in plugged into the switch/flow-accelerator, 368 * we hard code at Gige Full-Duplex with Flow Control enabled. 369 */ 370 int ifmedia = IFM_ETHER | IFM_1000_T | IFM_FDX; 371 //ifmedia |= IFM_FLOW | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 372 ec->ec_ifmedia = &sc->sc_media; 373 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange, 374 bcmeth_mediastatus); 375 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL); 376 ifmedia_set(&sc->sc_media, ifmedia); 377 378 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 379 380 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 381 ifp->if_softc = sc; 382 ifp->if_baudrate = IF_Mbps(1000); 383 ifp->if_capabilities = 0; 384 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 385 #ifdef BCMETH_MPSAFE 386 ifp->if_flags2 = IFF2_MPSAFE; 387 #endif 388 ifp->if_ioctl = bcmeth_ifioctl; 389 ifp->if_start = bcmeth_ifstart; 390 ifp->if_watchdog = bcmeth_ifwatchdog; 391 ifp->if_init = bcmeth_ifinit; 392 ifp->if_stop = bcmeth_ifstop; 393 IFQ_SET_READY(&ifp->if_snd); 394 395 bcmeth_ifstop(ifp, true); 396 397 /* 398 * Attach the interface. 399 */ 400 if_initialize(ifp); 401 ether_ifattach(ifp, sc->sc_enaddr); 402 if_register(ifp); 403 404 #ifdef BCMETH_COUNTERS 405 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 406 NULL, xname, "intr"); 407 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 408 NULL, xname, "soft intr"); 409 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC, 410 NULL, xname, "work items"); 411 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 412 NULL, xname, "tx stalls"); 413 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC, 414 NULL, xname, "rx badmagic lo"); 415 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC, 416 NULL, xname, "rx badmagic hi"); 417 #endif 418 419 return; 420 421 fail_3: 422 softint_disestablish(sc->sc_soft_ih); 423 fail_2: 424 workqueue_destroy(sc->sc_workq); 425 fail_1: 426 mutex_obj_free(sc->sc_lock); 427 mutex_obj_free(sc->sc_hwlock); 428 } 429 430 static int 431 bcmeth_mediachange(struct ifnet *ifp) 432 { 433 //struct bcmeth_softc * const sc = ifp->if_softc; 434 return 0; 435 } 436 437 static void 438 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm) 439 { 440 //struct bcmeth_softc * const sc = ifp->if_softc; 441 442 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE; 443 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T; 444 } 445 446 static uint64_t 447 bcmeth_macaddr_create(const uint8_t *enaddr) 448 { 449 return (enaddr[3] << 0) // UNIMAC_MAC_0 450 | (enaddr[2] << 8) // UNIMAC_MAC_0 451 | (enaddr[1] << 16) // UNIMAC_MAC_0 452 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0 453 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1 454 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1 455 } 456 457 static int 458 bcmeth_ifinit(struct ifnet *ifp) 459 { 460 struct bcmeth_softc * const sc = ifp->if_softc; 461 int error = 0; 462 463 sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES); 464 if (ifp->if_mtu > ETHERMTU_JUMBO) 465 return error; 466 467 KASSERT(ifp->if_flags & IFF_UP); 468 469 /* 470 * Stop the interface 471 */ 472 bcmeth_ifstop(ifp, 0); 473 474 /* 475 * Reserve enough space at the front so that we can insert a maxsized 476 * link header and a VLAN tag. Also make sure we have enough room for 477 * the rcvsts field as well. 478 */ 479 KASSERT(ALIGN(max_linkhdr) == max_linkhdr); 480 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu", 481 max_linkhdr, sizeof(struct ether_header)); 482 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header); 483 if (sc->sc_rcvoffset <= 4) 484 sc->sc_rcvoffset += 4; 485 KASSERT((sc->sc_rcvoffset & 3) == 2); 486 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET)); 487 KASSERT(sc->sc_rcvoffset >= 6); 488 489 /* 490 * If our frame size has changed (or it's our first time through) 491 * destroy the existing transmit mapcache. 492 */ 493 if (sc->sc_tx_mapcache != NULL 494 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 495 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache); 496 sc->sc_tx_mapcache = NULL; 497 } 498 499 if (sc->sc_tx_mapcache == NULL) { 500 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 501 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS); 502 if (error) 503 return error; 504 } 505 506 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE 507 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED) 508 | RX_ENA | TX_ENA; 509 510 if (ifp->if_flags & IFF_PROMISC) { 511 sc->sc_cmdcfg |= PROMISC_EN; 512 } else { 513 sc->sc_cmdcfg &= ~PROMISC_EN; 514 } 515 516 const uint8_t * const lladdr = CLLADDR(ifp->if_sadl); 517 const uint64_t macstnaddr = bcmeth_macaddr_create(lladdr); 518 519 /* 520 * We make sure that a received Ethernet packet start on a non-word 521 * boundary so that the packet payload will be on a word boundary. 522 * So to check the destination address we keep around two words to 523 * quickly compare with. 524 */ 525 #if __ARMEL__ 526 sc->sc_macaddr[0] = lladdr[0] | (lladdr[1] << 8); 527 sc->sc_macaddr[1] = lladdr[2] | (lladdr[3] << 8) 528 | (lladdr[4] << 16) | (lladdr[5] << 24); 529 #else 530 sc->sc_macaddr[0] = lladdr[1] | (lladdr[0] << 8); 531 sc->sc_macaddr[1] = lladdr[5] | (lladdr[4] << 8) 532 | (lladdr[1] << 16) | (lladdr[2] << 24); 533 #endif 534 535 sc->sc_intmask = DESCPROTOERR | DATAERR | DESCERR; 536 537 /* 5. Load RCVADDR_LO with new pointer */ 538 bcmeth_rxq_reset(sc, &sc->sc_rxq); 539 540 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 541 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET) 542 | RCVCTL_PARITY_DIS 543 | RCVCTL_OFLOW_CONTINUE 544 | __SHIFTIN(3, RCVCTL_BURSTLEN)); 545 546 /* 6. Load XMTADDR_LO with new pointer */ 547 bcmeth_txq_reset(sc, &sc->sc_txq); 548 549 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX 550 | XMTCTL_PARITY_DIS 551 | __SHIFTIN(3, XMTCTL_BURSTLEN)); 552 553 /* 7. Setup other UNIMAC registers */ 554 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm); 555 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0)); 556 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32)); 557 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg); 558 559 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL); 560 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE; 561 devctl &= ~FLOW_CTRL_MODE; 562 devctl &= ~MIB_RD_RESET_EN; 563 devctl &= ~RXQ_OVERFLOW_CTRL_SEL; 564 devctl &= ~CPU_FLOW_CTRL_ON; 565 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl); 566 567 /* Setup lazy receive (at most 1ms). */ 568 const struct cpu_softc * const cpu = curcpu()->ci_softc; 569 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT) 570 | __SHIFTIN(cpu->cpu_clk.clk_apb / 1000, INTRCVLAZY_TIMEOUT); 571 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy); 572 573 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 574 sc->sc_intmask |= XMTINT_0 | XMTUF; 575 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, 576 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE); 577 578 579 /* 12. Enable receive queues in RQUEUE, */ 580 sc->sc_intmask |= RCVINT | RCVDESCUF | RCVFIFOOF; 581 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 582 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE); 583 584 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 585 586 #if 0 587 aprint_normal_dev(sc->sc_dev, 588 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n", 589 devctl, sc->sc_cmdcfg, 590 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl), 591 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl)); 592 #endif 593 594 sc->sc_soft_flags = 0; 595 596 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 597 598 ifp->if_flags |= IFF_RUNNING; 599 600 return error; 601 } 602 603 static void 604 bcmeth_ifstop(struct ifnet *ifp, int disable) 605 { 606 struct bcmeth_softc * const sc = ifp->if_softc; 607 struct bcmeth_txqueue * const txq = &sc->sc_txq; 608 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 609 610 KASSERT(!cpu_intr_p()); 611 612 sc->sc_soft_flags = 0; 613 sc->sc_work_flags = 0; 614 615 /* Disable Rx processing */ 616 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl, 617 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE); 618 619 /* Disable Tx processing */ 620 bcmeth_write_4(sc, txq->txq_reg_xmtctl, 621 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE); 622 623 /* Disable all interrupts */ 624 bcmeth_write_4(sc, GMAC_INTMASK, 0); 625 626 for (;;) { 627 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 628 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 629 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS 630 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS) 631 break; 632 delay(50); 633 } 634 /* 635 * Now reset the controller. 636 * 637 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register 638 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register 639 */ 640 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET); 641 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0); 642 sc->sc_intmask = 0; 643 ifp->if_flags &= ~IFF_RUNNING; 644 645 /* 646 * Let's consume any remaining transmitted packets. And if we are 647 * disabling the interface, purge ourselves of any untransmitted 648 * packets. But don't consume any received packets, just drop them. 649 * If we aren't disabling the interface, save the mbufs in the 650 * receive queue for reuse. 651 */ 652 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable); 653 bcmeth_txq_consume(sc, &sc->sc_txq); 654 if (disable) { 655 bcmeth_txq_purge(sc, &sc->sc_txq); 656 IF_PURGE(&ifp->if_snd); 657 } 658 659 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0); 660 } 661 662 static void 663 bcmeth_ifwatchdog(struct ifnet *ifp) 664 { 665 } 666 667 static int 668 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 669 { 670 const int s = splnet(); 671 int error; 672 673 switch (cmd) { 674 default: 675 error = ether_ioctl(ifp, cmd, data); 676 if (error != ENETRESET) 677 break; 678 679 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 680 error = 0; 681 break; 682 } 683 error = bcmeth_ifinit(ifp); 684 break; 685 } 686 687 splx(s); 688 return error; 689 } 690 691 static void 692 bcmeth_rxq_desc_presync( 693 struct bcmeth_softc *sc, 694 struct bcmeth_rxqueue *rxq, 695 struct gmac_rxdb *rxdb, 696 size_t count) 697 { 698 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 699 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 700 BUS_DMASYNC_PREWRITE); 701 } 702 703 static void 704 bcmeth_rxq_desc_postsync( 705 struct bcmeth_softc *sc, 706 struct bcmeth_rxqueue *rxq, 707 struct gmac_rxdb *rxdb, 708 size_t count) 709 { 710 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 711 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 712 BUS_DMASYNC_POSTWRITE); 713 } 714 715 static void 716 bcmeth_txq_desc_presync( 717 struct bcmeth_softc *sc, 718 struct bcmeth_txqueue *txq, 719 struct gmac_txdb *txdb, 720 size_t count) 721 { 722 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 723 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 724 BUS_DMASYNC_PREWRITE); 725 } 726 727 static void 728 bcmeth_txq_desc_postsync( 729 struct bcmeth_softc *sc, 730 struct bcmeth_txqueue *txq, 731 struct gmac_txdb *txdb, 732 size_t count) 733 { 734 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 735 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 736 BUS_DMASYNC_POSTWRITE); 737 } 738 739 static bus_dmamap_t 740 bcmeth_mapcache_get( 741 struct bcmeth_softc *sc, 742 struct bcmeth_mapcache *dmc) 743 { 744 KASSERT(dmc->dmc_nmaps > 0); 745 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 746 return dmc->dmc_maps[--dmc->dmc_nmaps]; 747 } 748 749 static void 750 bcmeth_mapcache_put( 751 struct bcmeth_softc *sc, 752 struct bcmeth_mapcache *dmc, 753 bus_dmamap_t map) 754 { 755 KASSERT(map != NULL); 756 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 757 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 758 } 759 760 static void 761 bcmeth_mapcache_destroy( 762 struct bcmeth_softc *sc, 763 struct bcmeth_mapcache *dmc) 764 { 765 const size_t dmc_size = 766 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]); 767 768 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 769 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 770 } 771 kmem_intr_free(dmc, dmc_size); 772 } 773 774 static int 775 bcmeth_mapcache_create( 776 struct bcmeth_softc *sc, 777 struct bcmeth_mapcache **dmc_p, 778 size_t maxmaps, 779 size_t maxmapsize, 780 size_t maxseg) 781 { 782 const size_t dmc_size = 783 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]); 784 struct bcmeth_mapcache * const dmc = 785 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 786 787 dmc->dmc_maxmaps = maxmaps; 788 dmc->dmc_nmaps = maxmaps; 789 dmc->dmc_maxmapsize = maxmapsize; 790 dmc->dmc_maxseg = maxseg; 791 792 for (u_int i = 0; i < maxmaps; i++) { 793 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 794 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 795 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 796 if (error) { 797 aprint_error_dev(sc->sc_dev, 798 "failed to creat dma map cache " 799 "entry %u of %zu: %d\n", 800 i, maxmaps, error); 801 while (i-- > 0) { 802 bus_dmamap_destroy(sc->sc_dmat, 803 dmc->dmc_maps[i]); 804 } 805 kmem_intr_free(dmc, dmc_size); 806 return error; 807 } 808 KASSERT(dmc->dmc_maps[i] != NULL); 809 } 810 811 *dmc_p = dmc; 812 813 return 0; 814 } 815 816 #if 0 817 static void 818 bcmeth_dmamem_free( 819 bus_dma_tag_t dmat, 820 size_t map_size, 821 bus_dma_segment_t *seg, 822 bus_dmamap_t map, 823 void *kvap) 824 { 825 bus_dmamap_destroy(dmat, map); 826 bus_dmamem_unmap(dmat, kvap, map_size); 827 bus_dmamem_free(dmat, seg, 1); 828 } 829 #endif 830 831 static int 832 bcmeth_dmamem_alloc( 833 bus_dma_tag_t dmat, 834 size_t map_size, 835 bus_dma_segment_t *seg, 836 bus_dmamap_t *map, 837 void **kvap) 838 { 839 int error; 840 int nseg; 841 842 *kvap = NULL; 843 *map = NULL; 844 845 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0, 846 seg, 1, &nseg, 0); 847 if (error) 848 return error; 849 850 KASSERT(nseg == 1); 851 852 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0); 853 if (error == 0) { 854 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 855 map); 856 if (error == 0) { 857 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 858 NULL, 0); 859 if (error == 0) 860 return 0; 861 bus_dmamap_destroy(dmat, *map); 862 *map = NULL; 863 } 864 bus_dmamem_unmap(dmat, *kvap, map_size); 865 *kvap = NULL; 866 } 867 bus_dmamem_free(dmat, seg, nseg); 868 return 0; 869 } 870 871 static struct mbuf * 872 bcmeth_rx_buf_alloc( 873 struct bcmeth_softc *sc) 874 { 875 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 876 if (m == NULL) { 877 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 878 return NULL; 879 } 880 MCLGET(m, M_DONTWAIT); 881 if ((m->m_flags & M_EXT) == 0) { 882 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 883 m_freem(m); 884 return NULL; 885 } 886 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 887 888 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache); 889 if (map == NULL) { 890 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 891 m_freem(m); 892 return NULL; 893 } 894 M_SETCTX(m, map); 895 m->m_len = m->m_pkthdr.len = MCLBYTES; 896 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 897 BUS_DMA_READ | BUS_DMA_NOWAIT); 898 if (error) { 899 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 900 error); 901 M_SETCTX(m, NULL); 902 m_freem(m); 903 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 904 return NULL; 905 } 906 KASSERT(map->dm_mapsize == MCLBYTES); 907 #ifdef BCMETH_RCVMAGIC 908 *mtod(m, uint32_t *) = htole32(BCMETH_RCVMAGIC); 909 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t), 910 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 911 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t), 912 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD); 913 #else 914 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 915 BUS_DMASYNC_PREREAD); 916 #endif 917 918 return m; 919 } 920 921 static void 922 bcmeth_rx_map_unload( 923 struct bcmeth_softc *sc, 924 struct mbuf *m) 925 { 926 KASSERT(m); 927 for (; m != NULL; m = m->m_next) { 928 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 929 KASSERT(map); 930 KASSERT(map->dm_mapsize == MCLBYTES); 931 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 932 BUS_DMASYNC_POSTREAD); 933 bus_dmamap_unload(sc->sc_dmat, map); 934 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 935 M_SETCTX(m, NULL); 936 } 937 } 938 939 static bool 940 bcmeth_rxq_produce( 941 struct bcmeth_softc *sc, 942 struct bcmeth_rxqueue *rxq) 943 { 944 struct gmac_rxdb *producer = rxq->rxq_producer; 945 bool produced = false; 946 947 while (rxq->rxq_inuse < rxq->rxq_threshold) { 948 struct mbuf *m; 949 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 950 if (m == NULL) { 951 m = bcmeth_rx_buf_alloc(sc); 952 if (m == NULL) { 953 printf("%s: bcmeth_rx_buf_alloc failed\n", 954 __func__); 955 break; 956 } 957 } 958 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 959 KASSERT(map); 960 961 producer->rxdb_buflen = htole32(MCLBYTES); 962 producer->rxdb_addrlo = htole32(map->dm_segs[0].ds_addr); 963 producer->rxdb_flags &= htole32(RXDB_FLAG_ET); 964 *rxq->rxq_mtail = m; 965 rxq->rxq_mtail = &m->m_next; 966 m->m_len = MCLBYTES; 967 m->m_next = NULL; 968 rxq->rxq_inuse++; 969 if (++producer == rxq->rxq_last) { 970 membar_producer(); 971 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 972 rxq->rxq_last - rxq->rxq_producer); 973 producer = rxq->rxq_producer = rxq->rxq_first; 974 } 975 produced = true; 976 } 977 if (produced) { 978 membar_producer(); 979 if (producer != rxq->rxq_producer) { 980 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 981 producer - rxq->rxq_producer); 982 rxq->rxq_producer = producer; 983 } 984 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr, 985 rxq->rxq_descmap->dm_segs[0].ds_addr 986 + ((uintptr_t)producer & RCVPTR)); 987 } 988 return true; 989 } 990 991 static void 992 bcmeth_rx_input( 993 struct bcmeth_softc *sc, 994 struct mbuf *m, 995 uint32_t rxdb_flags) 996 { 997 struct ifnet * const ifp = &sc->sc_if; 998 999 bcmeth_rx_map_unload(sc, m); 1000 1001 m_adj(m, sc->sc_rcvoffset); 1002 1003 /* 1004 * If we are in promiscuous mode and this isn't a multicast, check the 1005 * destination address to make sure it matches our own. If it doesn't, 1006 * mark the packet as being received promiscuously. 1007 */ 1008 if ((sc->sc_cmdcfg & PROMISC_EN) 1009 && (m->m_data[0] & 1) == 0 1010 && (*(uint16_t *)&m->m_data[0] != sc->sc_macaddr[0] 1011 || *(uint32_t *)&m->m_data[2] != sc->sc_macaddr[1])) { 1012 m->m_flags |= M_PROMISC; 1013 } 1014 m_set_rcvif(m, ifp); 1015 1016 /* 1017 * Let's give it to the network subsystm to deal with. 1018 */ 1019 #ifdef BCMETH_MPSAFE 1020 mutex_exit(sc->sc_lock); 1021 if_input(ifp, m); 1022 mutex_enter(sc->sc_lock); 1023 #else 1024 int s = splnet(); 1025 if_input(ifp, m); 1026 splx(s); 1027 #endif 1028 } 1029 1030 static bool 1031 bcmeth_rxq_consume( 1032 struct bcmeth_softc *sc, 1033 struct bcmeth_rxqueue *rxq, 1034 size_t atmost) 1035 { 1036 struct ifnet * const ifp = &sc->sc_if; 1037 struct gmac_rxdb *consumer = rxq->rxq_consumer; 1038 size_t rxconsumed = 0; 1039 bool didconsume = false; 1040 1041 while (atmost-- > 0) { 1042 if (consumer == rxq->rxq_producer) { 1043 KASSERT(rxq->rxq_inuse == 0); 1044 break; 1045 } 1046 1047 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1048 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1049 if (consumer == rxq->rxq_first + currdscr) { 1050 break; 1051 } 1052 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1); 1053 1054 /* 1055 * We own this packet again. Copy the rxsts word from it. 1056 */ 1057 rxconsumed++; 1058 didconsume = true; 1059 uint32_t rxsts; 1060 KASSERT(rxq->rxq_mhead != NULL); 1061 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t); 1062 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align, 1063 BUS_DMASYNC_POSTREAD); 1064 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4); 1065 rxsts = le32toh(rxsts); 1066 #if 0 1067 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd", 1068 currdscr, consumer - rxq->rxq_first); 1069 #endif 1070 1071 /* 1072 * Get the count of descriptors. Fetch the correct number 1073 * of mbufs. 1074 */ 1075 #ifdef BCMETH_RCVMAGIC 1076 size_t desc_count = rxsts != BCMETH_RCVMAGIC 1077 ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1; 1078 #else 1079 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1; 1080 #endif 1081 struct mbuf *m = rxq->rxq_mhead; 1082 struct mbuf *m_last = m; 1083 for (size_t i = 1; i < desc_count; i++) { 1084 if (++consumer == rxq->rxq_last) { 1085 consumer = rxq->rxq_first; 1086 } 1087 KASSERTMSG(consumer != rxq->rxq_first + currdscr, 1088 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u " 1089 "consumer=%zd", i, rxsts, desc_count, currdscr, 1090 consumer - rxq->rxq_first); 1091 m_last = m_last->m_next; 1092 } 1093 1094 /* 1095 * Now remove it/them from the list of enqueued mbufs. 1096 */ 1097 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1098 rxq->rxq_mtail = &rxq->rxq_mhead; 1099 m_last->m_next = NULL; 1100 1101 #ifdef BCMETH_RCVMAGIC 1102 if (rxsts == BCMETH_RCVMAGIC) { 1103 if_statinc(ifp, if_ierrors); 1104 if ((m->m_ext.ext_paddr >> 28) == 8) { 1105 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo); 1106 } else { 1107 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi); 1108 } 1109 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1110 } else 1111 #endif /* BCMETH_RCVMAGIC */ 1112 if (rxsts 1113 & (RXSTS_CRC_ERROR |RXSTS_OVERSIZED |RXSTS_PKT_OVERFLOW)) { 1114 aprint_error_dev(sc->sc_dev, 1115 "[%zu]: count=%zu rxsts=%#x\n", 1116 consumer - rxq->rxq_first, desc_count, rxsts); 1117 /* 1118 * We encountered an error, take the mbufs and add them 1119 * to the rx bufcache so we can quickly reuse them. 1120 */ 1121 if_statinc(ifp, if_ierrors); 1122 do { 1123 struct mbuf *m0 = m->m_next; 1124 m->m_next = NULL; 1125 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1126 m = m0; 1127 } while (m); 1128 } else { 1129 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN); 1130 framelen += sc->sc_rcvoffset; 1131 m->m_pkthdr.len = framelen; 1132 if (desc_count == 1) { 1133 KASSERT(framelen <= MCLBYTES); 1134 m->m_len = framelen; 1135 } else { 1136 m_last->m_len = framelen & (MCLBYTES - 1); 1137 } 1138 1139 #ifdef BCMETH_MPSAFE 1140 /* 1141 * Wrap at the last entry! 1142 */ 1143 if (++consumer == rxq->rxq_last) { 1144 KASSERT(consumer[-1].rxdb_flags 1145 & htole32(RXDB_FLAG_ET)); 1146 rxq->rxq_consumer = rxq->rxq_first; 1147 } else { 1148 rxq->rxq_consumer = consumer; 1149 } 1150 rxq->rxq_inuse -= rxconsumed; 1151 #endif /* BCMETH_MPSAFE */ 1152 1153 /* 1154 * Receive the packet (which releases our lock) 1155 */ 1156 bcmeth_rx_input(sc, m, rxsts); 1157 1158 #ifdef BCMETH_MPSAFE 1159 /* 1160 * Since we had to give up our lock, we need to 1161 * refresh these. 1162 */ 1163 consumer = rxq->rxq_consumer; 1164 rxconsumed = 0; 1165 continue; 1166 #endif /* BCMETH_MPSAFE */ 1167 } 1168 1169 /* 1170 * Wrap at the last entry! 1171 */ 1172 if (++consumer == rxq->rxq_last) { 1173 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1174 consumer = rxq->rxq_first; 1175 } 1176 } 1177 1178 /* 1179 * Update queue info. 1180 */ 1181 rxq->rxq_consumer = consumer; 1182 rxq->rxq_inuse -= rxconsumed; 1183 1184 /* 1185 * Did we consume anything? 1186 */ 1187 return didconsume; 1188 } 1189 1190 static void 1191 bcmeth_rxq_purge( 1192 struct bcmeth_softc *sc, 1193 struct bcmeth_rxqueue *rxq, 1194 bool discard) 1195 { 1196 struct mbuf *m; 1197 1198 if ((m = rxq->rxq_mhead) != NULL) { 1199 if (discard) { 1200 bcmeth_rx_map_unload(sc, m); 1201 m_freem(m); 1202 } else { 1203 while (m != NULL) { 1204 struct mbuf *m0 = m->m_next; 1205 m->m_next = NULL; 1206 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1207 m = m0; 1208 } 1209 } 1210 } 1211 1212 rxq->rxq_mhead = NULL; 1213 rxq->rxq_mtail = &rxq->rxq_mhead; 1214 rxq->rxq_inuse = 0; 1215 } 1216 1217 static void 1218 bcmeth_rxq_reset( 1219 struct bcmeth_softc *sc, 1220 struct bcmeth_rxqueue *rxq) 1221 { 1222 /* 1223 * sync all the descriptors 1224 */ 1225 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1226 rxq->rxq_last - rxq->rxq_first); 1227 1228 /* 1229 * Make sure we own all descriptors in the ring. 1230 */ 1231 struct gmac_rxdb *rxdb; 1232 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) { 1233 rxdb->rxdb_flags = htole32(RXDB_FLAG_IC); 1234 } 1235 1236 /* 1237 * Last descriptor has the wrap flag. 1238 */ 1239 rxdb->rxdb_flags = htole32(RXDB_FLAG_ET | RXDB_FLAG_IC); 1240 1241 /* 1242 * Reset the producer consumer indexes. 1243 */ 1244 rxq->rxq_consumer = rxq->rxq_first; 1245 rxq->rxq_producer = rxq->rxq_first; 1246 rxq->rxq_inuse = 0; 1247 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS) 1248 rxq->rxq_threshold = BCMETH_MINRXMBUFS; 1249 1250 sc->sc_intmask |= RCVINT | RCVFIFOOF | RCVDESCUF; 1251 1252 /* 1253 * Restart the receiver at the first descriptor 1254 */ 1255 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo, 1256 rxq->rxq_descmap->dm_segs[0].ds_addr); 1257 } 1258 1259 static int 1260 bcmeth_rxq_attach( 1261 struct bcmeth_softc *sc, 1262 struct bcmeth_rxqueue *rxq, 1263 u_int qno) 1264 { 1265 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]); 1266 int error; 1267 void *descs; 1268 1269 KASSERT(desc_count == 256 || desc_count == 512); 1270 1271 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1272 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1273 if (error) 1274 return error; 1275 1276 memset(descs, 0, BCMETH_RINGSIZE); 1277 rxq->rxq_first = descs; 1278 rxq->rxq_last = rxq->rxq_first + desc_count; 1279 rxq->rxq_consumer = descs; 1280 rxq->rxq_producer = descs; 1281 1282 bcmeth_rxq_purge(sc, rxq, true); 1283 bcmeth_rxq_reset(sc, rxq); 1284 1285 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW; 1286 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL; 1287 rxq->rxq_reg_rcvptr = GMAC_RCVPTR; 1288 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0; 1289 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1; 1290 1291 return 0; 1292 } 1293 1294 static bool 1295 bcmeth_txq_active_p( 1296 struct bcmeth_softc * const sc, 1297 struct bcmeth_txqueue *txq) 1298 { 1299 return !IF_IS_EMPTY(&txq->txq_mbufs); 1300 } 1301 1302 static bool 1303 bcmeth_txq_fillable_p( 1304 struct bcmeth_softc * const sc, 1305 struct bcmeth_txqueue *txq) 1306 { 1307 return txq->txq_free >= txq->txq_threshold; 1308 } 1309 1310 static int 1311 bcmeth_txq_attach( 1312 struct bcmeth_softc *sc, 1313 struct bcmeth_txqueue *txq, 1314 u_int qno) 1315 { 1316 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]); 1317 int error; 1318 void *descs; 1319 1320 KASSERT(desc_count == 256 || desc_count == 512); 1321 1322 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1323 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1324 if (error) 1325 return error; 1326 1327 memset(descs, 0, BCMETH_RINGSIZE); 1328 txq->txq_first = descs; 1329 txq->txq_last = txq->txq_first + desc_count; 1330 txq->txq_consumer = descs; 1331 txq->txq_producer = descs; 1332 1333 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS); 1334 1335 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW; 1336 txq->txq_reg_xmtctl = GMAC_XMTCONTROL; 1337 txq->txq_reg_xmtptr = GMAC_XMTPTR; 1338 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0; 1339 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1; 1340 1341 bcmeth_txq_reset(sc, txq); 1342 1343 return 0; 1344 } 1345 1346 static int 1347 bcmeth_txq_map_load( 1348 struct bcmeth_softc *sc, 1349 struct bcmeth_txqueue *txq, 1350 struct mbuf *m) 1351 { 1352 bus_dmamap_t map; 1353 int error; 1354 1355 map = M_GETCTX(m, bus_dmamap_t); 1356 if (map != NULL) 1357 return 0; 1358 1359 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache); 1360 if (map == NULL) 1361 return ENOMEM; 1362 1363 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1364 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1365 if (error) 1366 return error; 1367 1368 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1369 BUS_DMASYNC_PREWRITE); 1370 M_SETCTX(m, map); 1371 return 0; 1372 } 1373 1374 static void 1375 bcmeth_txq_map_unload( 1376 struct bcmeth_softc *sc, 1377 struct bcmeth_txqueue *txq, 1378 struct mbuf *m) 1379 { 1380 KASSERT(m); 1381 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1382 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1383 BUS_DMASYNC_POSTWRITE); 1384 bus_dmamap_unload(sc->sc_dmat, map); 1385 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map); 1386 } 1387 1388 static bool 1389 bcmeth_txq_produce( 1390 struct bcmeth_softc *sc, 1391 struct bcmeth_txqueue *txq, 1392 struct mbuf *m) 1393 { 1394 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1395 1396 if (map->dm_nsegs > txq->txq_free) 1397 return false; 1398 1399 /* 1400 * TCP Offload flag must be set in the first descriptor. 1401 */ 1402 struct gmac_txdb *producer = txq->txq_producer; 1403 uint32_t first_flags = TXDB_FLAG_SF; 1404 uint32_t last_flags = TXDB_FLAG_EF; 1405 1406 /* 1407 * If we've produced enough descriptors without consuming any 1408 * we need to ask for an interrupt to reclaim some. 1409 */ 1410 txq->txq_lastintr += map->dm_nsegs; 1411 if (txq->txq_lastintr >= txq->txq_threshold 1412 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1413 txq->txq_lastintr = 0; 1414 last_flags |= TXDB_FLAG_IC; 1415 } 1416 1417 KASSERT(producer != txq->txq_last); 1418 1419 struct gmac_txdb *start = producer; 1420 size_t count = map->dm_nsegs; 1421 producer->txdb_flags |= htole32(first_flags); 1422 producer->txdb_addrlo = htole32(map->dm_segs[0].ds_addr); 1423 producer->txdb_buflen = htole32(map->dm_segs[0].ds_len); 1424 for (u_int i = 1; i < map->dm_nsegs; i++) { 1425 #if 0 1426 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1427 le32toh(producer->txdb_flags), 1428 le32toh(producer->txdb_buflen), 1429 le32toh(producer->txdb_addrlo), 1430 le32toh(producer->txdb_addrhi)); 1431 #endif 1432 if (__predict_false(++producer == txq->txq_last)) { 1433 bcmeth_txq_desc_presync(sc, txq, start, 1434 txq->txq_last - start); 1435 count -= txq->txq_last - start; 1436 producer = txq->txq_first; 1437 start = txq->txq_first; 1438 } 1439 producer->txdb_addrlo = htole32(map->dm_segs[i].ds_addr); 1440 producer->txdb_buflen = htole32(map->dm_segs[i].ds_len); 1441 } 1442 producer->txdb_flags |= htole32(last_flags); 1443 #if 0 1444 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1445 le32toh(producer->txdb_flags), le32toh(producer->txdb_buflen), 1446 le32toh(producer->txdb_addrlo), le32toh(producer->txdb_addrhi)); 1447 #endif 1448 if (count) 1449 bcmeth_txq_desc_presync(sc, txq, start, count); 1450 1451 /* 1452 * Reduce free count by the number of segments we consumed. 1453 */ 1454 txq->txq_free -= map->dm_nsegs; 1455 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1456 KASSERT(map->dm_nsegs == 1 1457 || (txq->txq_producer->txdb_flags & htole32(TXDB_FLAG_EF)) == 0); 1458 KASSERT(producer->txdb_flags & htole32(TXDB_FLAG_EF)); 1459 1460 #if 0 1461 printf("%s: mbuf %p: produced a %u byte packet in %u segments " 1462 "(%zd..%zd)\n", __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1463 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1464 #endif 1465 1466 if (producer + 1 == txq->txq_last) 1467 txq->txq_producer = txq->txq_first; 1468 else 1469 txq->txq_producer = producer + 1; 1470 IF_ENQUEUE(&txq->txq_mbufs, m); 1471 1472 /* 1473 * Let the transmitter know there's more to do 1474 */ 1475 bcmeth_write_4(sc, txq->txq_reg_xmtptr, 1476 txq->txq_descmap->dm_segs[0].ds_addr 1477 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR)); 1478 1479 return true; 1480 } 1481 1482 static struct mbuf * 1483 bcmeth_copy_packet(struct mbuf *m) 1484 { 1485 struct mbuf *mext = NULL; 1486 size_t misalignment = 0; 1487 size_t hlen = 0; 1488 1489 for (mext = m; mext != NULL; mext = mext->m_next) { 1490 if (mext->m_flags & M_EXT) { 1491 misalignment = mtod(mext, vaddr_t) & arm_dcache_align; 1492 break; 1493 } 1494 hlen += m->m_len; 1495 } 1496 1497 struct mbuf *n = m->m_next; 1498 if (m != mext && hlen + misalignment <= MHLEN && false) { 1499 KASSERT(m->m_pktdat <= m->m_data 1500 && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]); 1501 size_t oldoff = m->m_data - m->m_pktdat; 1502 size_t off; 1503 if (mext == NULL) { 1504 off = (oldoff + hlen > MHLEN) ? 0 : oldoff; 1505 } else { 1506 off = MHLEN - (hlen + misalignment); 1507 } 1508 KASSERT(off + hlen + misalignment <= MHLEN); 1509 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) { 1510 memmove(&m->m_pktdat[off], m->m_data, m->m_len); 1511 m->m_data = &m->m_pktdat[off]; 1512 } 1513 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]); 1514 m->m_len = hlen; 1515 m->m_next = mext; 1516 while (n != mext) { 1517 n = m_free(n); 1518 } 1519 return m; 1520 } 1521 1522 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type); 1523 if (m0 == NULL) { 1524 return NULL; 1525 } 1526 m_copy_pkthdr(m0, m); 1527 MCLAIM(m0, m->m_owner); 1528 if (m0->m_pkthdr.len > MHLEN) { 1529 MCLGET(m0, M_DONTWAIT); 1530 if ((m0->m_flags & M_EXT) == 0) { 1531 m_freem(m0); 1532 return NULL; 1533 } 1534 } 1535 m0->m_len = m->m_pkthdr.len; 1536 m_copydata(m, 0, m0->m_len, mtod(m0, void *)); 1537 m_freem(m); 1538 return m0; 1539 } 1540 1541 static bool 1542 bcmeth_txq_enqueue( 1543 struct bcmeth_softc *sc, 1544 struct bcmeth_txqueue *txq) 1545 { 1546 for (;;) { 1547 if (IF_QFULL(&txq->txq_mbufs)) 1548 return false; 1549 struct mbuf *m = txq->txq_next; 1550 if (m == NULL) { 1551 int s = splnet(); 1552 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1553 splx(s); 1554 if (m == NULL) 1555 return true; 1556 M_SETCTX(m, NULL); 1557 } else { 1558 txq->txq_next = NULL; 1559 } 1560 /* 1561 * If LINK2 is set and this packet uses multiple mbufs, 1562 * consolidate it into a single mbuf. 1563 */ 1564 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) { 1565 struct mbuf *m0 = bcmeth_copy_packet(m); 1566 if (m0 == NULL) { 1567 txq->txq_next = m; 1568 return true; 1569 } 1570 m = m0; 1571 } 1572 int error = bcmeth_txq_map_load(sc, txq, m); 1573 if (error) { 1574 aprint_error_dev(sc->sc_dev, 1575 "discarded packet due to " 1576 "dmamap load failure: %d\n", error); 1577 m_freem(m); 1578 continue; 1579 } 1580 KASSERT(txq->txq_next == NULL); 1581 if (!bcmeth_txq_produce(sc, txq, m)) { 1582 txq->txq_next = m; 1583 return false; 1584 } 1585 KASSERT(txq->txq_next == NULL); 1586 } 1587 } 1588 1589 static bool 1590 bcmeth_txq_consume( 1591 struct bcmeth_softc *sc, 1592 struct bcmeth_txqueue *txq) 1593 { 1594 struct ifnet * const ifp = &sc->sc_if; 1595 struct gmac_txdb *consumer = txq->txq_consumer; 1596 size_t txfree = 0; 1597 1598 #if 0 1599 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 1600 #endif 1601 1602 for (;;) { 1603 if (consumer == txq->txq_producer) { 1604 txq->txq_consumer = consumer; 1605 txq->txq_free += txfree; 1606 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 1607 #if 0 1608 printf("%s: empty: freed %zu descriptors going from " 1609 "%zu to %zu\n", __func__, txfree, 1610 txq->txq_free - txfree, txq->txq_free); 1611 #endif 1612 KASSERT(txq->txq_lastintr == 0); 1613 KASSERT(txq->txq_free 1614 == txq->txq_last - txq->txq_first - 1); 1615 return true; 1616 } 1617 bcmeth_txq_desc_postsync(sc, txq, consumer, 1); 1618 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 1619 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) { 1620 txq->txq_consumer = consumer; 1621 txq->txq_free += txfree; 1622 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 1623 #if 0 1624 printf("%s: freed %zu descriptors\n", 1625 __func__, txfree); 1626 #endif 1627 return bcmeth_txq_fillable_p(sc, txq); 1628 } 1629 1630 /* 1631 * If this is the last descriptor in the chain, get the 1632 * mbuf, free its dmamap, and free the mbuf chain itself. 1633 */ 1634 const uint32_t txdb_flags = le32toh(consumer->txdb_flags); 1635 if (txdb_flags & TXDB_FLAG_EF) { 1636 struct mbuf *m; 1637 1638 IF_DEQUEUE(&txq->txq_mbufs, m); 1639 KASSERT(m); 1640 bcmeth_txq_map_unload(sc, txq, m); 1641 #if 0 1642 printf("%s: mbuf %p: consumed a %u byte packet\n", 1643 __func__, m, m->m_pkthdr.len); 1644 #endif 1645 bpf_mtap(ifp, m, BPF_D_OUT); 1646 if_statinc(ifp, if_opackets); 1647 if_statadd(ifp, if_obytes, m->m_pkthdr.len); 1648 if (m->m_flags & M_MCAST) 1649 if_statinc(ifp, if_omcasts); 1650 m_freem(m); 1651 } 1652 1653 /* 1654 * We own this packet again. Clear all flags except wrap. 1655 */ 1656 txfree++; 1657 1658 /* 1659 * Wrap at the last entry! 1660 */ 1661 if (txdb_flags & TXDB_FLAG_ET) { 1662 consumer->txdb_flags = htole32(TXDB_FLAG_ET); 1663 KASSERT(consumer + 1 == txq->txq_last); 1664 consumer = txq->txq_first; 1665 } else { 1666 consumer->txdb_flags = 0; 1667 consumer++; 1668 KASSERT(consumer < txq->txq_last); 1669 } 1670 } 1671 } 1672 1673 static void 1674 bcmeth_txq_purge( 1675 struct bcmeth_softc *sc, 1676 struct bcmeth_txqueue *txq) 1677 { 1678 struct mbuf *m; 1679 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0); 1680 1681 for (;;) { 1682 IF_DEQUEUE(&txq->txq_mbufs, m); 1683 if (m == NULL) 1684 break; 1685 bcmeth_txq_map_unload(sc, txq, m); 1686 m_freem(m); 1687 } 1688 if ((m = txq->txq_next) != NULL) { 1689 txq->txq_next = NULL; 1690 bcmeth_txq_map_unload(sc, txq, m); 1691 m_freem(m); 1692 } 1693 } 1694 1695 static void 1696 bcmeth_txq_reset( 1697 struct bcmeth_softc *sc, 1698 struct bcmeth_txqueue *txq) 1699 { 1700 /* 1701 * sync all the descriptors 1702 */ 1703 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first, 1704 txq->txq_last - txq->txq_first); 1705 1706 /* 1707 * Make sure we own all descriptors in the ring. 1708 */ 1709 struct gmac_txdb *txdb; 1710 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) { 1711 txdb->txdb_flags = 0; 1712 } 1713 1714 /* 1715 * Last descriptor has the wrap flag. 1716 */ 1717 txdb->txdb_flags = htole32(TXDB_FLAG_ET); 1718 1719 /* 1720 * Reset the producer consumer indexes. 1721 */ 1722 txq->txq_consumer = txq->txq_first; 1723 txq->txq_producer = txq->txq_first; 1724 txq->txq_free = txq->txq_last - txq->txq_first - 1; 1725 txq->txq_threshold = txq->txq_free / 2; 1726 txq->txq_lastintr = 0; 1727 1728 /* 1729 * What do we want to get interrupted on? 1730 */ 1731 sc->sc_intmask |= XMTINT_0 | XMTUF; 1732 1733 /* 1734 * Restart the transmitter at the first descriptor 1735 */ 1736 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo, 1737 txq->txq_descmap->dm_segs->ds_addr); 1738 } 1739 1740 static void 1741 bcmeth_ifstart(struct ifnet *ifp) 1742 { 1743 struct bcmeth_softc * const sc = ifp->if_softc; 1744 1745 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { 1746 return; 1747 } 1748 1749 #ifdef BCMETH_MPSAFETX 1750 if (cpu_intr_p()) { 1751 #endif 1752 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 1753 softint_schedule(sc->sc_soft_ih); 1754 #ifdef BCMETH_MPSAFETX 1755 } else { 1756 /* 1757 * Either we are in a softintr thread already or some other 1758 * thread so just borrow it to do the send and save ourselves 1759 * the overhead of a fast soft int. 1760 */ 1761 bcmeth_soft_txintr(sc); 1762 } 1763 #endif 1764 } 1765 1766 int 1767 bcmeth_intr(void *arg) 1768 { 1769 struct bcmeth_softc * const sc = arg; 1770 uint32_t soft_flags = 0; 1771 uint32_t work_flags = 0; 1772 int rv = 0; 1773 1774 mutex_enter(sc->sc_hwlock); 1775 1776 uint32_t intmask = sc->sc_intmask; 1777 BCMETH_EVCNT_INCR(sc->sc_ev_intr); 1778 1779 for (;;) { 1780 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 1781 intstatus &= intmask; 1782 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */ 1783 if (intstatus == 0) { 1784 break; 1785 } 1786 #if 0 1787 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n", 1788 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK)); 1789 #endif 1790 if (intstatus & RCVINT) { 1791 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1792 intmask &= ~RCVINT; 1793 1794 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1795 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1796 if (descs < rxq->rxq_consumer - rxq->rxq_first) { 1797 /* 1798 * We wrapped at the end so count how far 1799 * we are from the end. 1800 */ 1801 descs += rxq->rxq_last - rxq->rxq_consumer; 1802 } else { 1803 descs -= rxq->rxq_consumer - rxq->rxq_first; 1804 } 1805 /* 1806 * If we "timedout" we can't be hogging so use 1807 * softints. If we exceeded then we might hogging 1808 * so let the workqueue deal with them. 1809 */ 1810 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, 1811 INTRCVLAZY_FRAMECOUNT); 1812 if (descs < framecount 1813 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) { 1814 soft_flags |= SOFT_RXINTR; 1815 } else { 1816 work_flags |= WORK_RXINTR; 1817 } 1818 } 1819 1820 if (intstatus & XMTINT_0) { 1821 intmask &= ~XMTINT_0; 1822 soft_flags |= SOFT_TXINTR; 1823 } 1824 1825 if (intstatus & RCVDESCUF) { 1826 intmask &= ~RCVDESCUF; 1827 work_flags |= WORK_RXUNDERFLOW; 1828 } 1829 1830 intstatus &= intmask; 1831 if (intstatus) { 1832 aprint_error_dev(sc->sc_dev, 1833 "intr: intstatus=%#x\n", intstatus); 1834 aprint_error_dev(sc->sc_dev, 1835 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n", 1836 sc->sc_rxq.rxq_first, 1837 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr, 1838 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr), 1839 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0), 1840 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1)); 1841 aprint_error_dev(sc->sc_dev, 1842 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n", 1843 sc->sc_txq.txq_first, 1844 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr, 1845 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr), 1846 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0), 1847 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1)); 1848 intmask &= ~intstatus; 1849 work_flags |= WORK_REINIT; 1850 break; 1851 } 1852 } 1853 1854 if (intmask != sc->sc_intmask) { 1855 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1856 } 1857 1858 if (work_flags) { 1859 if (sc->sc_work_flags == 0) { 1860 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL); 1861 } 1862 atomic_or_32(&sc->sc_work_flags, work_flags); 1863 rv = 1; 1864 } 1865 1866 if (soft_flags) { 1867 if (sc->sc_soft_flags == 0) { 1868 softint_schedule(sc->sc_soft_ih); 1869 } 1870 atomic_or_32(&sc->sc_soft_flags, soft_flags); 1871 rv = 1; 1872 } 1873 1874 mutex_exit(sc->sc_hwlock); 1875 1876 return rv; 1877 } 1878 1879 #ifdef BCMETH_MPSAFETX 1880 void 1881 bcmeth_soft_txintr(struct bcmeth_softc *sc) 1882 { 1883 mutex_enter(sc->sc_lock); 1884 /* 1885 * Let's do what we came here for. Consume transmitted 1886 * packets off the transmit ring. 1887 */ 1888 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1889 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1890 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1891 } 1892 if (sc->sc_if.if_flags & IFF_RUNNING) { 1893 mutex_spin_enter(sc->sc_hwlock); 1894 sc->sc_intmask |= XMTINT_0; 1895 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1896 mutex_spin_exit(sc->sc_hwlock); 1897 } 1898 mutex_exit(sc->sc_lock); 1899 } 1900 #endif /* BCMETH_MPSAFETX */ 1901 1902 void 1903 bcmeth_soft_intr(void *arg) 1904 { 1905 struct bcmeth_softc * const sc = arg; 1906 struct ifnet * const ifp = &sc->sc_if; 1907 uint32_t intmask = 0; 1908 1909 mutex_enter(sc->sc_lock); 1910 1911 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 1912 1913 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr); 1914 1915 if ((soft_flags & SOFT_TXINTR) 1916 || bcmeth_txq_active_p(sc, &sc->sc_txq)) { 1917 /* 1918 * Let's do what we came here for. Consume transmitted 1919 * packets off the transmit ring. 1920 */ 1921 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1922 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1923 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1924 } 1925 intmask |= XMTINT_0; 1926 } 1927 1928 if (soft_flags & SOFT_RXINTR) { 1929 /* 1930 * Let's consume 1931 */ 1932 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1933 sc->sc_rxq.rxq_threshold / 4)) { 1934 /* 1935 * We've consumed a quarter of the ring and still have 1936 * more to do. Refill the ring. 1937 */ 1938 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1939 } 1940 intmask |= RCVINT; 1941 } 1942 1943 if (ifp->if_flags & IFF_RUNNING) { 1944 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1945 mutex_spin_enter(sc->sc_hwlock); 1946 sc->sc_intmask |= intmask; 1947 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1948 mutex_spin_exit(sc->sc_hwlock); 1949 } 1950 1951 mutex_exit(sc->sc_lock); 1952 } 1953 1954 void 1955 bcmeth_worker(struct work *wk, void *arg) 1956 { 1957 struct bcmeth_softc * const sc = arg; 1958 struct ifnet * const ifp = &sc->sc_if; 1959 uint32_t intmask = 0; 1960 1961 mutex_enter(sc->sc_lock); 1962 1963 BCMETH_EVCNT_INCR(sc->sc_ev_work); 1964 1965 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0); 1966 if (work_flags & WORK_REINIT) { 1967 int s = splnet(); 1968 sc->sc_soft_flags = 0; 1969 bcmeth_ifinit(ifp); 1970 splx(s); 1971 work_flags &= ~WORK_RXUNDERFLOW; 1972 } 1973 1974 if (work_flags & WORK_RXUNDERFLOW) { 1975 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1976 size_t threshold = 5 * rxq->rxq_threshold / 4; 1977 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 1978 threshold = rxq->rxq_last - rxq->rxq_first - 1; 1979 } else { 1980 intmask |= RCVDESCUF; 1981 } 1982 aprint_normal_dev(sc->sc_dev, 1983 "increasing receive buffers from %zu to %zu\n", 1984 rxq->rxq_threshold, threshold); 1985 rxq->rxq_threshold = threshold; 1986 } 1987 1988 if (work_flags & WORK_RXINTR) { 1989 /* 1990 * Let's consume 1991 */ 1992 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1993 sc->sc_rxq.rxq_threshold / 4)) { 1994 /* 1995 * We've consumed a quarter of the ring and still have 1996 * more to do. Refill the ring. 1997 */ 1998 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1999 } 2000 intmask |= RCVINT; 2001 } 2002 2003 if (ifp->if_flags & IFF_RUNNING) { 2004 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2005 #if 0 2006 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 2007 if (intstatus & RCVINT) { 2008 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT); 2009 work_flags |= WORK_RXINTR; 2010 continue; 2011 } 2012 #endif 2013 mutex_spin_enter(sc->sc_hwlock); 2014 sc->sc_intmask |= intmask; 2015 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 2016 mutex_spin_exit(sc->sc_hwlock); 2017 } 2018 2019 mutex_exit(sc->sc_lock); 2020 } 2021