1 /* $NetBSD: if_ae.c,v 1.46 2025/10/04 04:44:20 thorpej Exp $ */ 2 /*- 3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center. 4 * Copyright (c) 2006 Garrett D'Amore. 5 * All rights reserved. 6 * 7 * This code was written by Garrett D'Amore for the Champaign-Urbana 8 * Community Wireless Network Project. 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 3. All advertising materials mentioning features or use of this 20 * software must display the following acknowledgements: 21 * This product includes software developed by the Urbana-Champaign 22 * Independent Media Center. 23 * This product includes software developed by Garrett D'Amore. 24 * 4. Urbana-Champaign Independent Media Center's name and Garrett 25 * D'Amore's name may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT 29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT 33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 */ 42 /*- 43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc. 44 * All rights reserved. 45 * 46 * This code is derived from software contributed to The NetBSD Foundation 47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 48 * NASA Ames Research Center; and by Charles M. Hannum. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 60 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 69 * POSSIBILITY OF SUCH DAMAGE. 70 */ 71 72 /* 73 * Device driver for the onboard ethernet MAC found on the AR5312 74 * chip's AHB bus. 75 * 76 * This device is very simliar to the tulip in most regards, and 77 * the code is directly derived from NetBSD's tulip.c. However, it 78 * is different enough that it did not seem to be a good idea to 79 * add further complexity to the tulip driver, so we have our own. 80 * 81 * Also tulip has a lot of complexity in it for various parts/options 82 * that we don't need, and on these little boxes with only ~8MB RAM, we 83 * don't want any extra bloat. 84 */ 85 86 /* 87 * TODO: 88 * 89 * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align 90 * inbound packets on a half-word boundary, which would make life easier 91 * for TCP/IP. (Aligning IP headers on a word.) 92 * 93 * 2) There is stuff in original tulip to shut down the device when reacting 94 * to a change in link status. Is that needed. 95 * 96 * 3) Test with variety of 10/100 HDX/FDX scenarios. 97 * 98 */ 99 100 #include <sys/cdefs.h> 101 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.46 2025/10/04 04:44:20 thorpej Exp $"); 102 103 104 #include <sys/param.h> 105 #include <sys/bus.h> 106 #include <sys/callout.h> 107 #include <sys/device.h> 108 #include <sys/endian.h> 109 #include <sys/errno.h> 110 #include <sys/intr.h> 111 #include <sys/ioctl.h> 112 #include <sys/kernel.h> 113 #include <sys/mbuf.h> 114 #include <sys/socket.h> 115 116 #include <uvm/uvm_extern.h> 117 118 #include <net/if.h> 119 #include <net/if_dl.h> 120 #include <net/if_media.h> 121 #include <net/if_ether.h> 122 123 #include <net/bpf.h> 124 125 #include <dev/mii/mii.h> 126 #include <dev/mii/miivar.h> 127 #include <dev/mii/mii_bitbang.h> 128 129 #include <mips/atheros/include/arbusvar.h> 130 #include <mips/atheros/dev/aereg.h> 131 #include <mips/atheros/dev/aevar.h> 132 133 static const struct { 134 uint32_t txth_opmode; /* OPMODE bits */ 135 const char *txth_name; /* name of mode */ 136 } ae_txthresh[] = { 137 { OPMODE_TR_32, "32 words" }, 138 { OPMODE_TR_64, "64 words" }, 139 { OPMODE_TR_128, "128 words" }, 140 { OPMODE_TR_256, "256 words" }, 141 { OPMODE_SF, "store and forward mode" }, 142 { 0, NULL }, 143 }; 144 145 static int ae_match(device_t, struct cfdata *, void *); 146 static void ae_attach(device_t, device_t, void *); 147 static int ae_detach(device_t, int); 148 static int ae_activate(device_t, enum devact); 149 150 static int ae_ifflags_cb(struct ethercom *); 151 static void ae_reset(struct ae_softc *); 152 static void ae_idle(struct ae_softc *, uint32_t); 153 154 static void ae_start(struct ifnet *); 155 static void ae_watchdog(struct ifnet *); 156 static int ae_ioctl(struct ifnet *, u_long, void *); 157 static int ae_init(struct ifnet *); 158 static void ae_stop(struct ifnet *, int); 159 160 static void ae_shutdown(void *); 161 162 static void ae_rxdrain(struct ae_softc *); 163 static int ae_add_rxbuf(struct ae_softc *, int); 164 165 static int ae_enable(struct ae_softc *); 166 static void ae_disable(struct ae_softc *); 167 static void ae_power(int, void *); 168 169 static void ae_filter_setup(struct ae_softc *); 170 171 static int ae_intr(void *); 172 static void ae_rxintr(struct ae_softc *); 173 static void ae_txintr(struct ae_softc *); 174 175 static void ae_mii_tick(void *); 176 static void ae_mii_statchg(struct ifnet *); 177 178 static int ae_mii_readreg(device_t, int, int, uint16_t *); 179 static int ae_mii_writereg(device_t, int, int, uint16_t); 180 181 #ifdef AE_DEBUG 182 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 183 printf x 184 #else 185 #define DPRINTF(sc, x) /* nothing */ 186 #endif 187 188 #ifdef AE_STATS 189 static void ae_print_stats(struct ae_softc *); 190 #endif 191 192 CFATTACH_DECL_NEW(ae, sizeof(struct ae_softc), 193 ae_match, ae_attach, ae_detach, ae_activate); 194 195 /* 196 * ae_match: 197 * 198 * Check for a device match. 199 */ 200 int 201 ae_match(device_t parent, struct cfdata *cf, void *aux) 202 { 203 struct arbus_attach_args *aa = aux; 204 205 if (strcmp(aa->aa_name, cf->cf_name) == 0) 206 return 1; 207 208 return 0; 209 210 } 211 212 /* 213 * ae_attach: 214 * 215 * Attach an ae interface to the system. 216 */ 217 void 218 ae_attach(device_t parent, device_t self, void *aux) 219 { 220 struct ae_softc *sc = device_private(self); 221 struct arbus_attach_args *aa = aux; 222 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 223 struct mii_data * const mii = &sc->sc_mii; 224 int i, error; 225 uint8_t enaddr[ETHER_ADDR_LEN]; 226 227 sc->sc_dev = self; 228 229 callout_init(&sc->sc_tick_callout, 0); 230 231 printf(": Atheros AR531X 10/100 Ethernet\n"); 232 233 /* 234 * Try to get MAC address. 235 */ 236 if (! ether_getaddr(self, enaddr)) { 237 printf("%s: unable to get mac-address\n", 238 device_xname(sc->sc_dev)); 239 return; 240 } 241 242 /* Announce ourselves. */ 243 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev), 244 ether_sprintf(enaddr)); 245 246 sc->sc_cirq = aa->aa_cirq; 247 sc->sc_mirq = aa->aa_mirq; 248 sc->sc_st = aa->aa_bst; 249 sc->sc_dmat = aa->aa_dmat; 250 251 SIMPLEQ_INIT(&sc->sc_txfreeq); 252 SIMPLEQ_INIT(&sc->sc_txdirtyq); 253 254 /* 255 * Map registers. 256 */ 257 sc->sc_size = aa->aa_size; 258 if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0, 259 &sc->sc_sh)) != 0) { 260 printf("%s: unable to map registers, error = %d\n", 261 device_xname(sc->sc_dev), error); 262 goto fail_0; 263 } 264 265 /* 266 * Allocate the control data structures, and create and load the 267 * DMA map for it. 268 */ 269 if ((error = bus_dmamem_alloc(sc->sc_dmat, 270 sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 271 1, &sc->sc_cdnseg, 0)) != 0) { 272 printf("%s: unable to allocate control data, error = %d\n", 273 device_xname(sc->sc_dev), error); 274 goto fail_1; 275 } 276 277 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg, 278 sizeof(struct ae_control_data), (void **)&sc->sc_control_data, 279 BUS_DMA_COHERENT)) != 0) { 280 printf("%s: unable to map control data, error = %d\n", 281 device_xname(sc->sc_dev), error); 282 goto fail_2; 283 } 284 285 if ((error = bus_dmamap_create(sc->sc_dmat, 286 sizeof(struct ae_control_data), 1, 287 sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 288 printf("%s: unable to create control data DMA map, " 289 "error = %d\n", device_xname(sc->sc_dev), error); 290 goto fail_3; 291 } 292 293 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 294 sc->sc_control_data, sizeof(struct ae_control_data), NULL, 295 0)) != 0) { 296 printf("%s: unable to load control data DMA map, error = %d\n", 297 device_xname(sc->sc_dev), error); 298 goto fail_4; 299 } 300 301 /* 302 * Create the transmit buffer DMA maps. 303 */ 304 for (i = 0; i < AE_TXQUEUELEN; i++) { 305 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 306 AE_NTXSEGS, MCLBYTES, 0, 0, 307 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 308 printf("%s: unable to create tx DMA map %d, " 309 "error = %d\n", device_xname(sc->sc_dev), i, error); 310 goto fail_5; 311 } 312 } 313 314 /* 315 * Create the receive buffer DMA maps. 316 */ 317 for (i = 0; i < AE_NRXDESC; i++) { 318 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 319 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 320 printf("%s: unable to create rx DMA map %d, " 321 "error = %d\n", device_xname(sc->sc_dev), i, error); 322 goto fail_6; 323 } 324 sc->sc_rxsoft[i].rxs_mbuf = NULL; 325 } 326 327 /* 328 * Reset the chip to a known state. 329 */ 330 ae_reset(sc); 331 332 /* 333 * From this point forward, the attachment cannot fail. A failure 334 * before this point releases all resources that may have been 335 * allocated. 336 */ 337 sc->sc_flags |= AE_ATTACHED; 338 339 /* 340 * Initialize our media structures. This may probe the MII, if 341 * present. 342 */ 343 mii->mii_ifp = ifp; 344 mii->mii_readreg = ae_mii_readreg; 345 mii->mii_writereg = ae_mii_writereg; 346 mii->mii_statchg = ae_mii_statchg; 347 sc->sc_ethercom.ec_mii = mii; 348 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 349 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 350 MII_OFFSET_ANY, 0); 351 352 if (LIST_FIRST(&mii->mii_phys) == NULL) { 353 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 354 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 355 } else 356 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 357 358 sc->sc_tick = ae_mii_tick; 359 360 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 361 ifp->if_softc = sc; 362 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 363 sc->sc_if_flags = ifp->if_flags; 364 ifp->if_ioctl = ae_ioctl; 365 ifp->if_start = ae_start; 366 ifp->if_watchdog = ae_watchdog; 367 ifp->if_init = ae_init; 368 ifp->if_stop = ae_stop; 369 IFQ_SET_READY(&ifp->if_snd); 370 371 /* 372 * We can support 802.1Q VLAN-sized frames. 373 */ 374 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 375 376 /* 377 * Attach the interface. 378 */ 379 if_attach(ifp); 380 if_deferred_start_init(ifp, NULL); 381 ether_ifattach(ifp, enaddr); 382 ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb); 383 384 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 385 RND_TYPE_NET, RND_FLAG_DEFAULT); 386 387 /* 388 * Make sure the interface is shutdown during reboot. 389 */ 390 sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc); 391 if (sc->sc_sdhook == NULL) 392 printf("%s: WARNING: unable to establish shutdown hook\n", 393 device_xname(sc->sc_dev)); 394 395 /* 396 * Add a suspend hook to make sure we come back up after a 397 * resume. 398 */ 399 sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev), 400 ae_power, sc); 401 if (sc->sc_powerhook == NULL) 402 printf("%s: WARNING: unable to establish power hook\n", 403 device_xname(sc->sc_dev)); 404 return; 405 406 /* 407 * Free any resources we've allocated during the failed attach 408 * attempt. Do this in reverse order and fall through. 409 */ 410 fail_6: 411 for (i = 0; i < AE_NRXDESC; i++) { 412 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 413 bus_dmamap_destroy(sc->sc_dmat, 414 sc->sc_rxsoft[i].rxs_dmamap); 415 } 416 fail_5: 417 for (i = 0; i < AE_TXQUEUELEN; i++) { 418 if (sc->sc_txsoft[i].txs_dmamap != NULL) 419 bus_dmamap_destroy(sc->sc_dmat, 420 sc->sc_txsoft[i].txs_dmamap); 421 } 422 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 423 fail_4: 424 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 425 fail_3: 426 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 427 sizeof(struct ae_control_data)); 428 fail_2: 429 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 430 fail_1: 431 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 432 fail_0: 433 return; 434 } 435 436 /* 437 * ae_activate: 438 * 439 * Handle device activation/deactivation requests. 440 */ 441 int 442 ae_activate(device_t self, enum devact act) 443 { 444 struct ae_softc *sc = device_private(self); 445 446 switch (act) { 447 case DVACT_DEACTIVATE: 448 if_deactivate(&sc->sc_ethercom.ec_if); 449 return 0; 450 default: 451 return EOPNOTSUPP; 452 } 453 } 454 455 /* 456 * ae_detach: 457 * 458 * Detach a device interface. 459 */ 460 int 461 ae_detach(device_t self, int flags) 462 { 463 struct ae_softc *sc = device_private(self); 464 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 465 struct ae_rxsoft *rxs; 466 struct ae_txsoft *txs; 467 int i; 468 469 /* 470 * Succeed now if there isn't any work to do. 471 */ 472 if ((sc->sc_flags & AE_ATTACHED) == 0) 473 return (0); 474 475 /* Unhook our tick handler. */ 476 if (sc->sc_tick) 477 callout_stop(&sc->sc_tick_callout); 478 479 /* Detach all PHYs */ 480 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 481 482 rnd_detach_source(&sc->sc_rnd_source); 483 ether_ifdetach(ifp); 484 if_detach(ifp); 485 486 /* Delete all remaining media. */ 487 ifmedia_fini(&sc->sc_mii.mii_media); 488 489 for (i = 0; i < AE_NRXDESC; i++) { 490 rxs = &sc->sc_rxsoft[i]; 491 if (rxs->rxs_mbuf != NULL) { 492 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 493 m_freem(rxs->rxs_mbuf); 494 rxs->rxs_mbuf = NULL; 495 } 496 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap); 497 } 498 for (i = 0; i < AE_TXQUEUELEN; i++) { 499 txs = &sc->sc_txsoft[i]; 500 if (txs->txs_mbuf != NULL) { 501 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 502 m_freem(txs->txs_mbuf); 503 txs->txs_mbuf = NULL; 504 } 505 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); 506 } 507 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 508 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 509 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 510 sizeof(struct ae_control_data)); 511 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 512 513 shutdownhook_disestablish(sc->sc_sdhook); 514 powerhook_disestablish(sc->sc_powerhook); 515 516 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 517 518 519 return (0); 520 } 521 522 /* 523 * ae_shutdown: 524 * 525 * Make sure the interface is stopped at reboot time. 526 */ 527 static void 528 ae_shutdown(void *arg) 529 { 530 struct ae_softc *sc = arg; 531 532 ae_stop(&sc->sc_ethercom.ec_if, 1); 533 } 534 535 /* 536 * ae_start: [ifnet interface function] 537 * 538 * Start packet transmission on the interface. 539 */ 540 static void 541 ae_start(struct ifnet *ifp) 542 { 543 struct ae_softc *sc = ifp->if_softc; 544 struct mbuf *m0, *m; 545 struct ae_txsoft *txs; 546 bus_dmamap_t dmamap; 547 int error, firsttx, nexttx, lasttx = 1, ofree, seg; 548 549 DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n", 550 device_xname(sc->sc_dev), sc->sc_flags, ifp->if_flags)); 551 552 553 if ((ifp->if_flags & IFF_RUNNING) == 0) 554 return; 555 556 /* 557 * Remember the previous number of free descriptors and 558 * the first descriptor we'll use. 559 */ 560 ofree = sc->sc_txfree; 561 firsttx = sc->sc_txnext; 562 563 DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n", 564 device_xname(sc->sc_dev), ofree, firsttx)); 565 566 /* 567 * Loop through the send queue, setting up transmit descriptors 568 * until we drain the queue, or use up all available transmit 569 * descriptors. 570 */ 571 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL && 572 sc->sc_txfree != 0) { 573 /* 574 * Grab a packet off the queue. 575 */ 576 IFQ_POLL(&ifp->if_snd, m0); 577 if (m0 == NULL) 578 break; 579 m = NULL; 580 581 dmamap = txs->txs_dmamap; 582 583 /* 584 * Load the DMA map. If this fails, the packet either 585 * didn't fit in the allotted number of segments, or we were 586 * short on resources. In this case, we'll copy and try 587 * again. 588 */ 589 if (((mtod(m0, uintptr_t) & 3) != 0) || 590 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 591 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { 592 MGETHDR(m, M_DONTWAIT, MT_DATA); 593 if (m == NULL) { 594 printf("%s: unable to allocate Tx mbuf\n", 595 device_xname(sc->sc_dev)); 596 break; 597 } 598 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 599 if (m0->m_pkthdr.len > MHLEN) { 600 MCLGET(m, M_DONTWAIT); 601 if ((m->m_flags & M_EXT) == 0) { 602 printf("%s: unable to allocate Tx " 603 "cluster\n", device_xname(sc->sc_dev)); 604 m_freem(m); 605 break; 606 } 607 } 608 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 609 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 610 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 611 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 612 if (error) { 613 printf("%s: unable to load Tx buffer, " 614 "error = %d\n", device_xname(sc->sc_dev), 615 error); 616 break; 617 } 618 } 619 620 /* 621 * Ensure we have enough descriptors free to describe 622 * the packet. 623 */ 624 if (dmamap->dm_nsegs > sc->sc_txfree) { 625 /* 626 * Not enough free descriptors to transmit this 627 * packet. We haven't committed to anything yet, 628 * so just unload the DMA map, put the packet 629 * back on the queue, and punt. Notify the upper 630 * layer that there are no more slots left. 631 * 632 * XXX We could allocate an mbuf and copy, but 633 * XXX it is worth it? 634 */ 635 bus_dmamap_unload(sc->sc_dmat, dmamap); 636 m_freem(m); 637 break; 638 } 639 640 IFQ_DEQUEUE(&ifp->if_snd, m0); 641 if (m != NULL) { 642 m_freem(m0); 643 m0 = m; 644 } 645 646 /* 647 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 648 */ 649 650 /* Sync the DMA map. */ 651 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 652 BUS_DMASYNC_PREWRITE); 653 654 /* 655 * Initialize the transmit descriptors. 656 */ 657 for (nexttx = sc->sc_txnext, seg = 0; 658 seg < dmamap->dm_nsegs; 659 seg++, nexttx = AE_NEXTTX(nexttx)) { 660 /* 661 * If this is the first descriptor we're 662 * enqueueing, don't set the OWN bit just 663 * yet. That could cause a race condition. 664 * We'll do it below. 665 */ 666 sc->sc_txdescs[nexttx].ad_status = 667 (nexttx == firsttx) ? 0 : ADSTAT_OWN; 668 sc->sc_txdescs[nexttx].ad_bufaddr1 = 669 dmamap->dm_segs[seg].ds_addr; 670 sc->sc_txdescs[nexttx].ad_ctl = 671 (dmamap->dm_segs[seg].ds_len << 672 ADCTL_SIZE1_SHIFT) | 673 (nexttx == (AE_NTXDESC - 1) ? 674 ADCTL_ER : 0); 675 lasttx = nexttx; 676 } 677 678 KASSERT(lasttx != -1); 679 680 /* Set `first segment' and `last segment' appropriately. */ 681 sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS; 682 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS; 683 684 #ifdef AE_DEBUG 685 if (ifp->if_flags & IFF_DEBUG) { 686 printf(" txsoft %p transmit chain:\n", txs); 687 for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) { 688 printf(" descriptor %d:\n", seg); 689 printf(" ad_status: 0x%08x\n", 690 sc->sc_txdescs[seg].ad_status); 691 printf(" ad_ctl: 0x%08x\n", 692 sc->sc_txdescs[seg].ad_ctl); 693 printf(" ad_bufaddr1: 0x%08x\n", 694 sc->sc_txdescs[seg].ad_bufaddr1); 695 printf(" ad_bufaddr2: 0x%08x\n", 696 sc->sc_txdescs[seg].ad_bufaddr2); 697 if (seg == lasttx) 698 break; 699 } 700 } 701 #endif 702 703 /* Sync the descriptors we're using. */ 704 AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 705 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 706 707 /* 708 * Store a pointer to the packet so we can free it later, 709 * and remember what txdirty will be once the packet is 710 * done. 711 */ 712 txs->txs_mbuf = m0; 713 txs->txs_firstdesc = sc->sc_txnext; 714 txs->txs_lastdesc = lasttx; 715 txs->txs_ndescs = dmamap->dm_nsegs; 716 717 /* Advance the tx pointer. */ 718 sc->sc_txfree -= dmamap->dm_nsegs; 719 sc->sc_txnext = nexttx; 720 721 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 722 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 723 724 /* 725 * Pass the packet to any BPF listeners. 726 */ 727 bpf_mtap(ifp, m0, BPF_D_OUT); 728 } 729 730 if (sc->sc_txfree != ofree) { 731 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 732 device_xname(sc->sc_dev), lasttx, firsttx)); 733 /* 734 * Cause a transmit interrupt to happen on the 735 * last packet we enqueued. 736 */ 737 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC; 738 AE_CDTXSYNC(sc, lasttx, 1, 739 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 740 741 /* 742 * The entire packet chain is set up. Give the 743 * first descriptor to the chip now. 744 */ 745 sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN; 746 AE_CDTXSYNC(sc, firsttx, 1, 747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 748 749 /* Wake up the transmitter. */ 750 /* XXX USE AUTOPOLLING? */ 751 AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD); 752 AE_BARRIER(sc); 753 754 /* Set a watchdog timer in case the chip flakes out. */ 755 ifp->if_timer = 5; 756 } 757 } 758 759 /* 760 * ae_watchdog: [ifnet interface function] 761 * 762 * Watchdog timer handler. 763 */ 764 static void 765 ae_watchdog(struct ifnet *ifp) 766 { 767 struct ae_softc *sc = ifp->if_softc; 768 int doing_transmit; 769 770 doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq)); 771 772 if (doing_transmit) { 773 printf("%s: transmit timeout\n", device_xname(sc->sc_dev)); 774 if_statinc(ifp, if_oerrors); 775 } 776 else 777 printf("%s: spurious watchdog timeout\n", device_xname(sc->sc_dev)); 778 779 (void) ae_init(ifp); 780 781 /* Try to get more packets going. */ 782 ae_start(ifp); 783 } 784 785 /* If the interface is up and running, only modify the receive 786 * filter when changing to/from promiscuous mode. Otherwise return 787 * ENETRESET so that ether_ioctl will reset the chip. 788 */ 789 static int 790 ae_ifflags_cb(struct ethercom *ec) 791 { 792 struct ifnet *ifp = &ec->ec_if; 793 struct ae_softc *sc = ifp->if_softc; 794 u_short change = ifp->if_flags ^ sc->sc_if_flags; 795 796 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 797 return ENETRESET; 798 else if ((change & IFF_PROMISC) != 0) 799 ae_filter_setup(sc); 800 return 0; 801 } 802 803 /* 804 * ae_ioctl: [ifnet interface function] 805 * 806 * Handle control requests from the operator. 807 */ 808 static int 809 ae_ioctl(struct ifnet *ifp, u_long cmd, void *data) 810 { 811 struct ae_softc *sc = ifp->if_softc; 812 int s, error; 813 814 s = splnet(); 815 816 error = ether_ioctl(ifp, cmd, data); 817 if (error == ENETRESET) { 818 if (ifp->if_flags & IFF_RUNNING) { 819 /* 820 * Multicast list has changed. Set the 821 * hardware filter accordingly. 822 */ 823 ae_filter_setup(sc); 824 } 825 error = 0; 826 } 827 828 /* Try to get more packets going. */ 829 if (AE_IS_ENABLED(sc)) 830 ae_start(ifp); 831 832 sc->sc_if_flags = ifp->if_flags; 833 splx(s); 834 return (error); 835 } 836 837 /* 838 * ae_intr: 839 * 840 * Interrupt service routine. 841 */ 842 int 843 ae_intr(void *arg) 844 { 845 struct ae_softc *sc = arg; 846 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 847 uint32_t status, rxstatus, txstatus; 848 int handled = 0, txthresh; 849 850 DPRINTF(sc, ("%s: ae_intr\n", device_xname(sc->sc_dev))); 851 852 #ifdef DEBUG 853 if (AE_IS_ENABLED(sc) == 0) 854 panic("%s: ae_intr: not enabled", device_xname(sc->sc_dev)); 855 #endif 856 857 /* 858 * If the interface isn't running, the interrupt couldn't 859 * possibly have come from us. 860 */ 861 if ((ifp->if_flags & IFF_RUNNING) == 0 || 862 !device_is_active(sc->sc_dev)) { 863 printf("spurious?!?\n"); 864 return (0); 865 } 866 867 for (;;) { 868 status = AE_READ(sc, CSR_STATUS); 869 if (status) { 870 AE_WRITE(sc, CSR_STATUS, status); 871 AE_BARRIER(sc); 872 } 873 874 if ((status & sc->sc_inten) == 0) 875 break; 876 877 handled = 1; 878 879 rxstatus = status & sc->sc_rxint_mask; 880 txstatus = status & sc->sc_txint_mask; 881 882 if (rxstatus) { 883 /* Grab new any new packets. */ 884 ae_rxintr(sc); 885 886 if (rxstatus & STATUS_RU) { 887 printf("%s: receive ring overrun\n", 888 device_xname(sc->sc_dev)); 889 /* Get the receive process going again. */ 890 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 891 AE_BARRIER(sc); 892 break; 893 } 894 } 895 896 if (txstatus) { 897 /* Sweep up transmit descriptors. */ 898 ae_txintr(sc); 899 900 if (txstatus & STATUS_TJT) 901 printf("%s: transmit jabber timeout\n", 902 device_xname(sc->sc_dev)); 903 904 if (txstatus & STATUS_UNF) { 905 /* 906 * Increase our transmit threshold if 907 * another is available. 908 */ 909 txthresh = sc->sc_txthresh + 1; 910 if (ae_txthresh[txthresh].txth_name != NULL) { 911 uint32_t opmode; 912 /* Idle the transmit process. */ 913 opmode = AE_READ(sc, CSR_OPMODE); 914 ae_idle(sc, OPMODE_ST); 915 916 sc->sc_txthresh = txthresh; 917 opmode &= ~(OPMODE_TR | OPMODE_SF); 918 opmode |= 919 ae_txthresh[txthresh].txth_opmode; 920 printf("%s: transmit underrun; new " 921 "threshold: %s\n", 922 device_xname(sc->sc_dev), 923 ae_txthresh[txthresh].txth_name); 924 925 /* 926 * Set the new threshold and restart 927 * the transmit process. 928 */ 929 AE_WRITE(sc, CSR_OPMODE, opmode); 930 AE_BARRIER(sc); 931 } 932 /* 933 * XXX Log every Nth underrun from 934 * XXX now on? 935 */ 936 } 937 } 938 939 if (status & (STATUS_TPS | STATUS_RPS)) { 940 if (status & STATUS_TPS) 941 printf("%s: transmit process stopped\n", 942 device_xname(sc->sc_dev)); 943 if (status & STATUS_RPS) 944 printf("%s: receive process stopped\n", 945 device_xname(sc->sc_dev)); 946 (void) ae_init(ifp); 947 break; 948 } 949 950 if (status & STATUS_SE) { 951 const char *str; 952 953 if (status & STATUS_TX_ABORT) 954 str = "tx abort"; 955 else if (status & STATUS_RX_ABORT) 956 str = "rx abort"; 957 else 958 str = "unknown error"; 959 960 printf("%s: fatal system error: %s\n", 961 device_xname(sc->sc_dev), str); 962 (void) ae_init(ifp); 963 break; 964 } 965 966 /* 967 * Not handled: 968 * 969 * Transmit buffer unavailable -- normal 970 * condition, nothing to do, really. 971 * 972 * General purpose timer experied -- we don't 973 * use the general purpose timer. 974 * 975 * Early receive interrupt -- not available on 976 * all chips, we just use RI. We also only 977 * use single-segment receive DMA, so this 978 * is mostly useless. 979 */ 980 } 981 982 /* Try to get more packets going. */ 983 if_schedule_deferred_start(ifp); 984 985 if (handled) 986 rnd_add_uint32(&sc->sc_rnd_source, status); 987 return (handled); 988 } 989 990 /* 991 * ae_rxintr: 992 * 993 * Helper; handle receive interrupts. 994 */ 995 static void 996 ae_rxintr(struct ae_softc *sc) 997 { 998 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 999 struct ae_rxsoft *rxs; 1000 struct mbuf *m; 1001 uint32_t rxstat; 1002 int i, len; 1003 1004 for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) { 1005 rxs = &sc->sc_rxsoft[i]; 1006 1007 AE_CDRXSYNC(sc, i, 1008 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1009 1010 rxstat = sc->sc_rxdescs[i].ad_status; 1011 1012 if (rxstat & ADSTAT_OWN) { 1013 /* 1014 * We have processed all of the receive buffers. 1015 */ 1016 break; 1017 } 1018 1019 /* 1020 * If any collisions were seen on the wire, count one. 1021 */ 1022 if (rxstat & ADSTAT_Rx_CS) 1023 if_statinc(ifp, if_collisions); 1024 1025 /* 1026 * If an error occurred, update stats, clear the status 1027 * word, and leave the packet buffer in place. It will 1028 * simply be reused the next time the ring comes around. 1029 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long 1030 * error. 1031 */ 1032 if (rxstat & ADSTAT_ES && 1033 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 || 1034 (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF | 1035 ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) { 1036 #define PRINTERR(bit, str) \ 1037 if (rxstat & (bit)) \ 1038 printf("%s: receive error: %s\n", \ 1039 device_xname(sc->sc_dev), str) 1040 if_statinc(ifp, if_ierrors); 1041 PRINTERR(ADSTAT_Rx_DE, "descriptor error"); 1042 PRINTERR(ADSTAT_Rx_RF, "runt frame"); 1043 PRINTERR(ADSTAT_Rx_TL, "frame too long"); 1044 PRINTERR(ADSTAT_Rx_RE, "MII error"); 1045 PRINTERR(ADSTAT_Rx_DB, "dribbling bit"); 1046 PRINTERR(ADSTAT_Rx_CE, "CRC error"); 1047 #undef PRINTERR 1048 AE_INIT_RXDESC(sc, i); 1049 continue; 1050 } 1051 1052 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1053 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1054 1055 /* 1056 * No errors; receive the packet. Note the chip 1057 * includes the CRC with every packet. 1058 */ 1059 len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN; 1060 1061 /* 1062 * XXX: the Atheros part can align on half words. what 1063 * is the performance implication of this? Probably 1064 * minimal, and we should use it... 1065 */ 1066 #ifdef __NO_STRICT_ALIGNMENT 1067 /* 1068 * Allocate a new mbuf cluster. If that fails, we are 1069 * out of memory, and must drop the packet and recycle 1070 * the buffer that's already attached to this descriptor. 1071 */ 1072 m = rxs->rxs_mbuf; 1073 if (ae_add_rxbuf(sc, i) != 0) { 1074 if_statinc(ifp, if_ierrors); 1075 AE_INIT_RXDESC(sc, i); 1076 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1077 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1078 continue; 1079 } 1080 #else 1081 /* 1082 * The chip's receive buffers must be 4-byte aligned. 1083 * But this means that the data after the Ethernet header 1084 * is misaligned. We must allocate a new buffer and 1085 * copy the data, shifted forward 2 bytes. 1086 */ 1087 MGETHDR(m, M_DONTWAIT, MT_DATA); 1088 if (m == NULL) { 1089 dropit: 1090 if_statinc(ifp, if_ierrors); 1091 AE_INIT_RXDESC(sc, i); 1092 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1093 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1094 continue; 1095 } 1096 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1097 if (len > (MHLEN - 2)) { 1098 MCLGET(m, M_DONTWAIT); 1099 if ((m->m_flags & M_EXT) == 0) { 1100 m_freem(m); 1101 goto dropit; 1102 } 1103 } 1104 m->m_data += 2; 1105 1106 /* 1107 * Note that we use clusters for incoming frames, so the 1108 * buffer is virtually contiguous. 1109 */ 1110 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len); 1111 1112 /* Allow the receive descriptor to continue using its mbuf. */ 1113 AE_INIT_RXDESC(sc, i); 1114 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1115 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1116 #endif /* __NO_STRICT_ALIGNMENT */ 1117 1118 m_set_rcvif(m, ifp); 1119 m->m_pkthdr.len = m->m_len = len; 1120 1121 /* Pass it on. */ 1122 if_percpuq_enqueue(ifp->if_percpuq, m); 1123 } 1124 1125 /* Update the receive pointer. */ 1126 sc->sc_rxptr = i; 1127 } 1128 1129 /* 1130 * ae_txintr: 1131 * 1132 * Helper; handle transmit interrupts. 1133 */ 1134 static void 1135 ae_txintr(struct ae_softc *sc) 1136 { 1137 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1138 struct ae_txsoft *txs; 1139 uint32_t txstat; 1140 1141 DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n", 1142 device_xname(sc->sc_dev), sc->sc_flags)); 1143 1144 /* 1145 * Go through our Tx list and free mbufs for those 1146 * frames that have been transmitted. 1147 */ 1148 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1149 AE_CDTXSYNC(sc, txs->txs_lastdesc, 1150 txs->txs_ndescs, 1151 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1152 1153 #ifdef AE_DEBUG 1154 if (ifp->if_flags & IFF_DEBUG) { 1155 int i; 1156 printf(" txsoft %p transmit chain:\n", txs); 1157 for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) { 1158 printf(" descriptor %d:\n", i); 1159 printf(" ad_status: 0x%08x\n", 1160 sc->sc_txdescs[i].ad_status); 1161 printf(" ad_ctl: 0x%08x\n", 1162 sc->sc_txdescs[i].ad_ctl); 1163 printf(" ad_bufaddr1: 0x%08x\n", 1164 sc->sc_txdescs[i].ad_bufaddr1); 1165 printf(" ad_bufaddr2: 0x%08x\n", 1166 sc->sc_txdescs[i].ad_bufaddr2); 1167 if (i == txs->txs_lastdesc) 1168 break; 1169 } 1170 } 1171 #endif 1172 1173 txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status; 1174 if (txstat & ADSTAT_OWN) 1175 break; 1176 1177 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1178 1179 sc->sc_txfree += txs->txs_ndescs; 1180 1181 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1182 0, txs->txs_dmamap->dm_mapsize, 1183 BUS_DMASYNC_POSTWRITE); 1184 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1185 m_freem(txs->txs_mbuf); 1186 txs->txs_mbuf = NULL; 1187 1188 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1189 1190 /* 1191 * Check for errors and collisions. 1192 */ 1193 #ifdef AE_STATS 1194 if (txstat & ADSTAT_Tx_UF) 1195 sc->sc_stats.ts_tx_uf++; 1196 if (txstat & ADSTAT_Tx_TO) 1197 sc->sc_stats.ts_tx_to++; 1198 if (txstat & ADSTAT_Tx_EC) 1199 sc->sc_stats.ts_tx_ec++; 1200 if (txstat & ADSTAT_Tx_LC) 1201 sc->sc_stats.ts_tx_lc++; 1202 #endif 1203 1204 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1205 if (txstat & (ADSTAT_Tx_UF | ADSTAT_Tx_TO)) 1206 if_statinc_ref(ifp, nsr, if_oerrors); 1207 1208 if (txstat & ADSTAT_Tx_EC) 1209 if_statadd_ref(ifp, nsr, if_collisions, 16); 1210 else if (ADSTAT_Tx_COLLISIONS(txstat)) 1211 if_statadd_ref(ifp, nsr, if_collisions, 1212 ADSTAT_Tx_COLLISIONS(txstat)); 1213 if (txstat & ADSTAT_Tx_LC) 1214 if_statinc_ref(ifp, nsr, if_collisions); 1215 1216 if_statinc_ref(ifp, nsr, if_opackets); 1217 IF_STAT_PUTREF(ifp); 1218 } 1219 1220 /* 1221 * If there are no more pending transmissions, cancel the watchdog 1222 * timer. 1223 */ 1224 if (txs == NULL) 1225 ifp->if_timer = 0; 1226 } 1227 1228 #ifdef AE_STATS 1229 void 1230 ae_print_stats(struct ae_softc *sc) 1231 { 1232 1233 printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n", 1234 device_xname(sc->sc_dev), 1235 sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to, 1236 sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc); 1237 } 1238 #endif 1239 1240 /* 1241 * ae_reset: 1242 * 1243 * Perform a soft reset on the chip. 1244 */ 1245 void 1246 ae_reset(struct ae_softc *sc) 1247 { 1248 int i; 1249 1250 AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR); 1251 AE_BARRIER(sc); 1252 1253 /* 1254 * The chip doesn't take itself out of reset automatically. 1255 * We need to do so after 2us. 1256 */ 1257 delay(10); 1258 AE_WRITE(sc, CSR_BUSMODE, 0); 1259 AE_BARRIER(sc); 1260 1261 for (i = 0; i < 1000; i++) { 1262 /* 1263 * Wait a bit for the reset to complete before peeking 1264 * at the chip again. 1265 */ 1266 delay(10); 1267 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0) 1268 break; 1269 } 1270 1271 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR)) 1272 printf("%s: reset failed to complete\n", device_xname(sc->sc_dev)); 1273 1274 delay(1000); 1275 } 1276 1277 /* 1278 * ae_init: [ ifnet interface function ] 1279 * 1280 * Initialize the interface. Must be called at splnet(). 1281 */ 1282 static int 1283 ae_init(struct ifnet *ifp) 1284 { 1285 struct ae_softc *sc = ifp->if_softc; 1286 struct ae_txsoft *txs; 1287 struct ae_rxsoft *rxs; 1288 const uint8_t *enaddr; 1289 int i, error = 0; 1290 1291 if ((error = ae_enable(sc)) != 0) 1292 goto out; 1293 1294 /* 1295 * Cancel any pending I/O. 1296 */ 1297 ae_stop(ifp, 0); 1298 1299 /* 1300 * Reset the chip to a known state. 1301 */ 1302 ae_reset(sc); 1303 1304 /* 1305 * Initialize the BUSMODE register. 1306 */ 1307 AE_WRITE(sc, CSR_BUSMODE, 1308 /* XXX: not sure if this is a good thing or not... */ 1309 //BUSMODE_ALIGN_16B | 1310 BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW); 1311 AE_BARRIER(sc); 1312 1313 /* 1314 * Initialize the transmit descriptor ring. 1315 */ 1316 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1317 for (i = 0; i < AE_NTXDESC; i++) { 1318 sc->sc_txdescs[i].ad_ctl = 0; 1319 sc->sc_txdescs[i].ad_bufaddr2 = 1320 AE_CDTXADDR(sc, AE_NEXTTX(i)); 1321 } 1322 sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER; 1323 AE_CDTXSYNC(sc, 0, AE_NTXDESC, 1324 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1325 sc->sc_txfree = AE_NTXDESC; 1326 sc->sc_txnext = 0; 1327 1328 /* 1329 * Initialize the transmit job descriptors. 1330 */ 1331 SIMPLEQ_INIT(&sc->sc_txfreeq); 1332 SIMPLEQ_INIT(&sc->sc_txdirtyq); 1333 for (i = 0; i < AE_TXQUEUELEN; i++) { 1334 txs = &sc->sc_txsoft[i]; 1335 txs->txs_mbuf = NULL; 1336 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1337 } 1338 1339 /* 1340 * Initialize the receive descriptor and receive job 1341 * descriptor rings. 1342 */ 1343 for (i = 0; i < AE_NRXDESC; i++) { 1344 rxs = &sc->sc_rxsoft[i]; 1345 if (rxs->rxs_mbuf == NULL) { 1346 if ((error = ae_add_rxbuf(sc, i)) != 0) { 1347 printf("%s: unable to allocate or map rx " 1348 "buffer %d, error = %d\n", 1349 device_xname(sc->sc_dev), i, error); 1350 /* 1351 * XXX Should attempt to run with fewer receive 1352 * XXX buffers instead of just failing. 1353 */ 1354 ae_rxdrain(sc); 1355 goto out; 1356 } 1357 } else 1358 AE_INIT_RXDESC(sc, i); 1359 } 1360 sc->sc_rxptr = 0; 1361 1362 /* 1363 * Initialize the interrupt mask and enable interrupts. 1364 */ 1365 /* normal interrupts */ 1366 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS; 1367 1368 /* abnormal interrupts */ 1369 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF | 1370 STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS; 1371 1372 sc->sc_rxint_mask = STATUS_RI | STATUS_RU; 1373 sc->sc_txint_mask = STATUS_TI | STATUS_UNF | STATUS_TJT; 1374 1375 sc->sc_rxint_mask &= sc->sc_inten; 1376 sc->sc_txint_mask &= sc->sc_inten; 1377 1378 AE_WRITE(sc, CSR_INTEN, sc->sc_inten); 1379 AE_WRITE(sc, CSR_STATUS, 0xffffffff); 1380 1381 /* 1382 * Give the transmit and receive rings to the chip. 1383 */ 1384 AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext)); 1385 AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr)); 1386 AE_BARRIER(sc); 1387 1388 /* 1389 * Set the station address. 1390 */ 1391 enaddr = CLLADDR(ifp->if_sadl); 1392 AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]); 1393 AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 | 1394 enaddr[1] << 8 | enaddr[0]); 1395 AE_BARRIER(sc); 1396 1397 /* 1398 * Set the receive filter. This will start the transmit and 1399 * receive processes. 1400 */ 1401 ae_filter_setup(sc); 1402 1403 /* 1404 * Set the current media. 1405 */ 1406 if ((error = ether_mediachange(ifp)) != 0) 1407 goto out; 1408 1409 /* 1410 * Start the mac. 1411 */ 1412 AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE); 1413 AE_BARRIER(sc); 1414 1415 /* 1416 * Write out the opmode. 1417 */ 1418 AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST | 1419 ae_txthresh[sc->sc_txthresh].txth_opmode); 1420 /* 1421 * Start the receive process. 1422 */ 1423 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 1424 AE_BARRIER(sc); 1425 1426 if (sc->sc_tick != NULL) { 1427 /* Start the one second clock. */ 1428 callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc); 1429 } 1430 1431 /* 1432 * Note that the interface is now running. 1433 */ 1434 ifp->if_flags |= IFF_RUNNING; 1435 sc->sc_if_flags = ifp->if_flags; 1436 1437 out: 1438 if (error) { 1439 ifp->if_flags &= ~IFF_RUNNING; 1440 ifp->if_timer = 0; 1441 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1442 } 1443 return (error); 1444 } 1445 1446 /* 1447 * ae_enable: 1448 * 1449 * Enable the chip. 1450 */ 1451 static int 1452 ae_enable(struct ae_softc *sc) 1453 { 1454 1455 if (AE_IS_ENABLED(sc) == 0) { 1456 sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq, 1457 ae_intr, sc); 1458 if (sc->sc_ih == NULL) { 1459 printf("%s: unable to establish interrupt\n", 1460 device_xname(sc->sc_dev)); 1461 return (EIO); 1462 } 1463 sc->sc_flags |= AE_ENABLED; 1464 } 1465 return (0); 1466 } 1467 1468 /* 1469 * ae_disable: 1470 * 1471 * Disable the chip. 1472 */ 1473 static void 1474 ae_disable(struct ae_softc *sc) 1475 { 1476 1477 if (AE_IS_ENABLED(sc)) { 1478 arbus_intr_disestablish(sc->sc_ih); 1479 sc->sc_flags &= ~AE_ENABLED; 1480 } 1481 } 1482 1483 /* 1484 * ae_power: 1485 * 1486 * Power management (suspend/resume) hook. 1487 */ 1488 static void 1489 ae_power(int why, void *arg) 1490 { 1491 struct ae_softc *sc = arg; 1492 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1493 int s; 1494 1495 printf("power called: %d, %x\n", why, (uint32_t)arg); 1496 s = splnet(); 1497 switch (why) { 1498 case PWR_STANDBY: 1499 /* do nothing! */ 1500 break; 1501 case PWR_SUSPEND: 1502 ae_stop(ifp, 0); 1503 ae_disable(sc); 1504 break; 1505 case PWR_RESUME: 1506 if (ifp->if_flags & IFF_UP) { 1507 ae_enable(sc); 1508 ae_init(ifp); 1509 } 1510 break; 1511 case PWR_SOFTSUSPEND: 1512 case PWR_SOFTSTANDBY: 1513 case PWR_SOFTRESUME: 1514 break; 1515 } 1516 splx(s); 1517 } 1518 1519 /* 1520 * ae_rxdrain: 1521 * 1522 * Drain the receive queue. 1523 */ 1524 static void 1525 ae_rxdrain(struct ae_softc *sc) 1526 { 1527 struct ae_rxsoft *rxs; 1528 int i; 1529 1530 for (i = 0; i < AE_NRXDESC; i++) { 1531 rxs = &sc->sc_rxsoft[i]; 1532 if (rxs->rxs_mbuf != NULL) { 1533 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1534 m_freem(rxs->rxs_mbuf); 1535 rxs->rxs_mbuf = NULL; 1536 } 1537 } 1538 } 1539 1540 /* 1541 * ae_stop: [ ifnet interface function ] 1542 * 1543 * Stop transmission on the interface. 1544 */ 1545 static void 1546 ae_stop(struct ifnet *ifp, int disable) 1547 { 1548 struct ae_softc *sc = ifp->if_softc; 1549 struct ae_txsoft *txs; 1550 1551 if (sc->sc_tick != NULL) { 1552 /* Stop the one second clock. */ 1553 callout_stop(&sc->sc_tick_callout); 1554 } 1555 1556 /* Down the MII. */ 1557 mii_down(&sc->sc_mii); 1558 1559 /* Disable interrupts. */ 1560 AE_WRITE(sc, CSR_INTEN, 0); 1561 1562 /* Stop the transmit and receive processes. */ 1563 AE_WRITE(sc, CSR_OPMODE, 0); 1564 AE_WRITE(sc, CSR_RXLIST, 0); 1565 AE_WRITE(sc, CSR_TXLIST, 0); 1566 AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE); 1567 AE_BARRIER(sc); 1568 1569 /* 1570 * Release any queued transmit buffers. 1571 */ 1572 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1573 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1574 if (txs->txs_mbuf != NULL) { 1575 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1576 m_freem(txs->txs_mbuf); 1577 txs->txs_mbuf = NULL; 1578 } 1579 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1580 } 1581 1582 /* 1583 * Mark the interface down and cancel the watchdog timer. 1584 */ 1585 ifp->if_flags &= ~IFF_RUNNING; 1586 sc->sc_if_flags = ifp->if_flags; 1587 ifp->if_timer = 0; 1588 1589 if (disable) { 1590 ae_rxdrain(sc); 1591 ae_disable(sc); 1592 } 1593 1594 /* 1595 * Reset the chip (needed on some flavors to actually disable it). 1596 */ 1597 ae_reset(sc); 1598 } 1599 1600 /* 1601 * ae_add_rxbuf: 1602 * 1603 * Add a receive buffer to the indicated descriptor. 1604 */ 1605 static int 1606 ae_add_rxbuf(struct ae_softc *sc, int idx) 1607 { 1608 struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1609 struct mbuf *m; 1610 int error; 1611 1612 MGETHDR(m, M_DONTWAIT, MT_DATA); 1613 if (m == NULL) 1614 return (ENOBUFS); 1615 1616 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1617 MCLGET(m, M_DONTWAIT); 1618 if ((m->m_flags & M_EXT) == 0) { 1619 m_freem(m); 1620 return (ENOBUFS); 1621 } 1622 1623 if (rxs->rxs_mbuf != NULL) 1624 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1625 1626 rxs->rxs_mbuf = m; 1627 1628 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1629 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1630 BUS_DMA_READ | BUS_DMA_NOWAIT); 1631 if (error) { 1632 printf("%s: can't load rx DMA map %d, error = %d\n", 1633 device_xname(sc->sc_dev), idx, error); 1634 panic("ae_add_rxbuf"); /* XXX */ 1635 } 1636 1637 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1638 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1639 1640 AE_INIT_RXDESC(sc, idx); 1641 1642 return (0); 1643 } 1644 1645 /* 1646 * ae_filter_setup: 1647 * 1648 * Set the chip's receive filter. 1649 */ 1650 static void 1651 ae_filter_setup(struct ae_softc *sc) 1652 { 1653 struct ethercom *ec = &sc->sc_ethercom; 1654 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1655 struct ether_multi *enm; 1656 struct ether_multistep step; 1657 uint32_t hash, mchash[2]; 1658 uint32_t macctl = 0; 1659 1660 /* 1661 * If the chip is running, we need to reset the interface, 1662 * and will revisit here (with IFF_RUNNING) clear. The 1663 * chip seems to really not like to have its multicast 1664 * filter programmed without a reset. 1665 */ 1666 if (ifp->if_flags & IFF_RUNNING) { 1667 (void) ae_init(ifp); 1668 return; 1669 } 1670 1671 DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n", 1672 device_xname(sc->sc_dev), sc->sc_flags)); 1673 1674 macctl = AE_READ(sc, CSR_MACCTL); 1675 macctl &= ~(MACCTL_PR | MACCTL_PM); 1676 macctl |= MACCTL_HASH; 1677 macctl |= MACCTL_HBD; 1678 macctl |= MACCTL_PR; 1679 1680 if (ifp->if_flags & IFF_PROMISC) { 1681 macctl |= MACCTL_PR; 1682 goto allmulti; 1683 } 1684 1685 mchash[0] = mchash[1] = 0; 1686 1687 ETHER_LOCK(ec); 1688 ETHER_FIRST_MULTI(step, ec, enm); 1689 while (enm != NULL) { 1690 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1691 /* 1692 * We must listen to a range of multicast addresses. 1693 * For now, just accept all multicasts, rather than 1694 * trying to set only those filter bits needed to match 1695 * the range. (At this time, the only use of address 1696 * ranges is for IP multicast routing, for which the 1697 * range is big enough to require all bits set.) 1698 */ 1699 ETHER_UNLOCK(ec); 1700 goto allmulti; 1701 } 1702 1703 /* Verify whether we use big or little endian hashes */ 1704 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f; 1705 mchash[hash >> 5] |= 1 << (hash & 0x1f); 1706 ETHER_NEXT_MULTI(step, enm); 1707 } 1708 ETHER_UNLOCK(ec); 1709 ifp->if_flags &= ~IFF_ALLMULTI; 1710 goto setit; 1711 1712 allmulti: 1713 ifp->if_flags |= IFF_ALLMULTI; 1714 mchash[0] = mchash[1] = 0xffffffff; 1715 macctl |= MACCTL_PM; 1716 1717 setit: 1718 AE_WRITE(sc, CSR_HTHI, mchash[0]); 1719 AE_WRITE(sc, CSR_HTHI, mchash[1]); 1720 1721 AE_WRITE(sc, CSR_MACCTL, macctl); 1722 AE_BARRIER(sc); 1723 1724 DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n", 1725 device_xname(sc->sc_dev), macctl)); 1726 } 1727 1728 /* 1729 * ae_idle: 1730 * 1731 * Cause the transmit and/or receive processes to go idle. 1732 */ 1733 void 1734 ae_idle(struct ae_softc *sc, uint32_t bits) 1735 { 1736 static const char * const txstate_names[] = { 1737 "STOPPED", 1738 "RUNNING - FETCH", 1739 "RUNNING - WAIT", 1740 "RUNNING - READING", 1741 "-- RESERVED --", 1742 "RUNNING - SETUP", 1743 "SUSPENDED", 1744 "RUNNING - CLOSE", 1745 }; 1746 static const char * const rxstate_names[] = { 1747 "STOPPED", 1748 "RUNNING - FETCH", 1749 "RUNNING - CHECK", 1750 "RUNNING - WAIT", 1751 "SUSPENDED", 1752 "RUNNING - CLOSE", 1753 "RUNNING - FLUSH", 1754 "RUNNING - QUEUE", 1755 }; 1756 1757 uint32_t csr, ackmask = 0; 1758 int i; 1759 1760 if (bits & OPMODE_ST) 1761 ackmask |= STATUS_TPS; 1762 1763 if (bits & OPMODE_SR) 1764 ackmask |= STATUS_RPS; 1765 1766 AE_CLR(sc, CSR_OPMODE, bits); 1767 1768 for (i = 0; i < 1000; i++) { 1769 if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask) 1770 break; 1771 delay(10); 1772 } 1773 1774 csr = AE_READ(sc, CSR_STATUS); 1775 if ((csr & ackmask) != ackmask) { 1776 if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 && 1777 (csr & STATUS_TS) != STATUS_TS_STOPPED) { 1778 printf("%s: transmit process failed to idle: " 1779 "state %s\n", device_xname(sc->sc_dev), 1780 txstate_names[(csr & STATUS_TS) >> 20]); 1781 } 1782 if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 && 1783 (csr & STATUS_RS) != STATUS_RS_STOPPED) { 1784 printf("%s: receive process failed to idle: " 1785 "state %s\n", device_xname(sc->sc_dev), 1786 rxstate_names[(csr & STATUS_RS) >> 17]); 1787 } 1788 } 1789 } 1790 1791 /***************************************************************************** 1792 * Support functions for MII-attached media. 1793 *****************************************************************************/ 1794 1795 /* 1796 * ae_mii_tick: 1797 * 1798 * One second timer, used to tick the MII. 1799 */ 1800 static void 1801 ae_mii_tick(void *arg) 1802 { 1803 struct ae_softc *sc = arg; 1804 int s; 1805 1806 if (!device_is_active(sc->sc_dev)) 1807 return; 1808 1809 s = splnet(); 1810 mii_tick(&sc->sc_mii); 1811 splx(s); 1812 1813 callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc); 1814 } 1815 1816 /* 1817 * ae_mii_statchg: [mii interface function] 1818 * 1819 * Callback from PHY when media changes. 1820 */ 1821 static void 1822 ae_mii_statchg(struct ifnet *ifp) 1823 { 1824 struct ae_softc *sc = ifp->if_softc; 1825 uint32_t macctl, flowc; 1826 1827 //opmode = AE_READ(sc, CSR_OPMODE); 1828 macctl = AE_READ(sc, CSR_MACCTL); 1829 1830 /* XXX: do we need to do this? */ 1831 /* Idle the transmit and receive processes. */ 1832 //ae_idle(sc, OPMODE_ST | OPMODE_SR); 1833 1834 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1835 flowc = FLOWC_FCE; 1836 macctl &= ~MACCTL_DRO; 1837 macctl |= MACCTL_FDX; 1838 } else { 1839 flowc = 0; /* cannot do flow control in HDX */ 1840 macctl |= MACCTL_DRO; 1841 macctl &= ~MACCTL_FDX; 1842 } 1843 1844 AE_WRITE(sc, CSR_FLOWC, flowc); 1845 AE_WRITE(sc, CSR_MACCTL, macctl); 1846 1847 /* restore operational mode */ 1848 //AE_WRITE(sc, CSR_OPMODE, opmode); 1849 AE_BARRIER(sc); 1850 } 1851 1852 /* 1853 * ae_mii_readreg: 1854 * 1855 * Read a PHY register. 1856 */ 1857 static int 1858 ae_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 1859 { 1860 struct ae_softc *sc = device_private(self); 1861 uint32_t addr; 1862 int i; 1863 1864 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT); 1865 AE_WRITE(sc, CSR_MIIADDR, addr); 1866 AE_BARRIER(sc); 1867 for (i = 0; i < 100000000; i++) { 1868 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1869 break; 1870 } 1871 1872 if (i >= 100000000) 1873 return ETIMEDOUT; 1874 1875 *val = AE_READ(sc, CSR_MIIDATA) & 0xffff; 1876 return 0; 1877 } 1878 1879 /* 1880 * ae_mii_writereg: 1881 * 1882 * Write a PHY register. 1883 */ 1884 static int 1885 ae_mii_writereg(device_t self, int phy, int reg, uint16_t val) 1886 { 1887 struct ae_softc *sc = device_private(self); 1888 uint32_t addr; 1889 int i; 1890 1891 /* write the data register */ 1892 AE_WRITE(sc, CSR_MIIDATA, val); 1893 1894 /* write the address to latch it in */ 1895 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) | 1896 MIIADDR_WRITE; 1897 AE_WRITE(sc, CSR_MIIADDR, addr); 1898 AE_BARRIER(sc); 1899 1900 for (i = 0; i < 100000000; i++) { 1901 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1902 break; 1903 } 1904 1905 if (i >= 100000000) 1906 return ETIMEDOUT; 1907 1908 return 0; 1909 } 1910