1 /* $NetBSD: if_temac.c,v 1.20 2022/09/18 15:57:13 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Jachym Holecek 5 * All rights reserved. 6 * 7 * Written for DFC Design, s.r.o. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Driver for Xilinx LocalLink TEMAC as wired on the GSRD platform. 34 * 35 * TODO: 36 * - Optimize 37 * - Checksum offload 38 * - Address filters 39 * - Support jumbo frames 40 */ 41 42 #include <sys/cdefs.h> 43 __KERNEL_RCSID(0, "$NetBSD: if_temac.c,v 1.20 2022/09/18 15:57:13 thorpej Exp $"); 44 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/mbuf.h> 49 #include <sys/kernel.h> 50 #include <sys/socket.h> 51 #include <sys/ioctl.h> 52 #include <sys/device.h> 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 56 #include <uvm/uvm_extern.h> 57 58 #include <net/if.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_ether.h> 62 63 #include <net/bpf.h> 64 65 #include <powerpc/ibm4xx/cpu.h> 66 67 #include <evbppc/virtex/idcr.h> 68 #include <evbppc/virtex/dev/xcvbusvar.h> 69 #include <evbppc/virtex/dev/cdmacreg.h> 70 #include <evbppc/virtex/dev/temacreg.h> 71 #include <evbppc/virtex/dev/temacvar.h> 72 73 #include <dev/mii/miivar.h> 74 75 76 /* This is outside of TEMAC's DCR window, we have to hardcode it... */ 77 #define DCR_ETH_BASE 0x0030 78 79 #define TEMAC_REGDEBUG 0 80 #define TEMAC_RXDEBUG 0 81 #define TEMAC_TXDEBUG 0 82 83 #if TEMAC_RXDEBUG > 0 || TEMAC_TXDEBUG > 0 84 #define TEMAC_DEBUG 1 85 #else 86 #define TEMAC_DEBUG 0 87 #endif 88 89 #if TEMAC_REGDEBUG > 0 90 #define TRACEREG(arg) printf arg 91 #else 92 #define TRACEREG(arg) /* nop */ 93 #endif 94 95 /* DMA control chains take up one (16KB) page. */ 96 #define TEMAC_NTXDESC 256 97 #define TEMAC_NRXDESC 256 98 99 #define TEMAC_TXQLEN 64 /* Software Tx queue length */ 100 #define TEMAC_NTXSEG 16 /* Maximum Tx segments per packet */ 101 102 #define TEMAC_NRXSEG 1 /* Maximum Rx segments per packet */ 103 #define TEMAC_RXPERIOD 1 /* Interrupt every N descriptors. */ 104 #define TEMAC_RXTIMO_HZ 100 /* Rx reaper frequency */ 105 106 /* Next Tx descriptor and descriptor's offset WRT sc_cdaddr. */ 107 #define TEMAC_TXSINC(n, i) (((n) + TEMAC_TXQLEN + (i)) % TEMAC_TXQLEN) 108 #define TEMAC_TXINC(n, i) (((n) + TEMAC_NTXDESC + (i)) % TEMAC_NTXDESC) 109 110 #define TEMAC_TXSNEXT(n) TEMAC_TXSINC((n), 1) 111 #define TEMAC_TXNEXT(n) TEMAC_TXINC((n), 1) 112 #define TEMAC_TXDOFF(n) (offsetof(struct temac_control, cd_txdesc) + \ 113 (n) * sizeof(struct cdmac_descr)) 114 115 /* Next Rx descriptor and descriptor's offset WRT sc_cdaddr. */ 116 #define TEMAC_RXINC(n, i) (((n) + TEMAC_NRXDESC + (i)) % TEMAC_NRXDESC) 117 #define TEMAC_RXNEXT(n) TEMAC_RXINC((n), 1) 118 #define TEMAC_RXDOFF(n) (offsetof(struct temac_control, cd_rxdesc) + \ 119 (n) * sizeof(struct cdmac_descr)) 120 #define TEMAC_ISINTR(i) (((i) % TEMAC_RXPERIOD) == 0) 121 #define TEMAC_ISLAST(i) ((i) == (TEMAC_NRXDESC - 1)) 122 123 124 struct temac_control { 125 struct cdmac_descr cd_txdesc[TEMAC_NTXDESC]; 126 struct cdmac_descr cd_rxdesc[TEMAC_NRXDESC]; 127 }; 128 129 struct temac_txsoft { 130 bus_dmamap_t txs_dmap; 131 struct mbuf *txs_mbuf; 132 int txs_last; 133 }; 134 135 struct temac_rxsoft { 136 bus_dmamap_t rxs_dmap; 137 struct mbuf *rxs_mbuf; 138 }; 139 140 struct temac_softc { 141 device_t sc_dev; 142 struct ethercom sc_ec; 143 #define sc_if sc_ec.ec_if 144 145 /* Peripheral registers */ 146 bus_space_tag_t sc_iot; 147 bus_space_handle_t sc_ioh; 148 149 /* CDMAC channel registers */ 150 bus_space_tag_t sc_dma_rxt; 151 bus_space_handle_t sc_dma_rxh; /* Rx channel */ 152 bus_space_handle_t sc_dma_rsh; /* Rx status */ 153 154 bus_space_tag_t sc_dma_txt; 155 bus_space_handle_t sc_dma_txh; /* Tx channel */ 156 bus_space_handle_t sc_dma_tsh; /* Tx status */ 157 158 struct temac_txsoft sc_txsoft[TEMAC_TXQLEN]; 159 struct temac_rxsoft sc_rxsoft[TEMAC_NRXDESC]; 160 161 struct callout sc_rx_timo; 162 struct callout sc_mii_tick; 163 struct mii_data sc_mii; 164 165 bus_dmamap_t sc_control_dmap; 166 #define sc_cdaddr sc_control_dmap->dm_segs[0].ds_addr 167 168 struct temac_control *sc_control_data; 169 #define sc_rxdescs sc_control_data->cd_rxdesc 170 #define sc_txdescs sc_control_data->cd_txdesc 171 172 int sc_txbusy; 173 174 int sc_txfree; 175 int sc_txcur; 176 int sc_txreap; 177 178 int sc_rxreap; 179 180 int sc_txsfree; 181 int sc_txscur; 182 int sc_txsreap; 183 184 int sc_dead; /* Rx/Tx DMA error (fatal) */ 185 int sc_rx_drained; 186 187 int sc_rx_chan; 188 int sc_tx_chan; 189 190 void *sc_sdhook; 191 void *sc_rx_ih; 192 void *sc_tx_ih; 193 194 bus_dma_tag_t sc_dmat; 195 }; 196 197 /* Device interface. */ 198 static void temac_attach(device_t, device_t, void *); 199 200 /* Ifnet interface. */ 201 static int temac_init(struct ifnet *); 202 static int temac_ioctl(struct ifnet *, u_long, void *); 203 static void temac_start(struct ifnet *); 204 static void temac_stop(struct ifnet *, int); 205 206 /* Media management. */ 207 static int temac_mii_readreg(device_t, int, int, uint16_t *); 208 static void temac_mii_statchg(struct ifnet *); 209 static void temac_mii_tick(void *); 210 static int temac_mii_writereg(device_t, int, int, uint16_t); 211 212 /* Indirect hooks. */ 213 static void temac_shutdown(void *); 214 static void temac_rx_intr(void *); 215 static void temac_tx_intr(void *); 216 217 /* Tools. */ 218 static inline void temac_rxcdsync(struct temac_softc *, int, int, int); 219 static inline void temac_txcdsync(struct temac_softc *, int, int, int); 220 static void temac_txreap(struct temac_softc *); 221 static void temac_rxreap(struct temac_softc *); 222 static int temac_rxalloc(struct temac_softc *, int, int); 223 static void temac_rxtimo(void *); 224 static void temac_rxdrain(struct temac_softc *); 225 static void temac_reset(struct temac_softc *); 226 static void temac_txkick(struct temac_softc *); 227 228 /* Register access. */ 229 static inline void gmi_write_8(uint32_t, uint32_t, uint32_t); 230 static inline void gmi_write_4(uint32_t, uint32_t); 231 static inline void gmi_read_8(uint32_t, uint32_t *, uint32_t *); 232 static inline uint32_t gmi_read_4(uint32_t); 233 static inline int hif_wait_stat(uint32_t); 234 235 #define cdmac_rx_stat(sc) \ 236 bus_space_read_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0 /* XXX hack */) 237 238 #define cdmac_rx_reset(sc) \ 239 bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0, CDMAC_STAT_RESET) 240 241 #define cdmac_rx_start(sc, val) \ 242 bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rxh, CDMAC_CURDESC, (val)) 243 244 #define cdmac_tx_stat(sc) \ 245 bus_space_read_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0 /* XXX hack */) 246 247 #define cdmac_tx_reset(sc) \ 248 bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0, CDMAC_STAT_RESET) 249 250 #define cdmac_tx_start(sc, val) \ 251 bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_txh, CDMAC_CURDESC, (val)) 252 253 254 CFATTACH_DECL_NEW(temac, sizeof(struct temac_softc), 255 xcvbus_child_match, temac_attach, NULL, NULL); 256 257 258 /* 259 * Private bus utilities. 260 */ 261 static inline int 262 hif_wait_stat(uint32_t mask) 263 { 264 int i = 0; 265 int rv = 0; 266 267 while (mask != (mfidcr(IDCR_HIF_STAT) & mask)) { 268 if (i++ > 100) { 269 printf("%s: timeout waiting for 0x%08x\n", 270 __func__, mask); 271 rv = ETIMEDOUT; 272 break; 273 } 274 delay(5); 275 } 276 277 TRACEREG(("%s: stat %#08x loops %d\n", __func__, mask, i)); 278 return rv; 279 } 280 281 static inline void 282 gmi_write_4(uint32_t addr, uint32_t lo) 283 { 284 mtidcr(IDCR_HIF_ARG0, lo); 285 mtidcr(IDCR_HIF_CTRL, (addr & HIF_CTRL_GMIADDR) | HIF_CTRL_WRITE); 286 hif_wait_stat(HIF_STAT_GMIWR); 287 288 TRACEREG(("%s: %#08x <- %#08x\n", __func__, addr, lo)); 289 } 290 291 static inline void __unused 292 gmi_write_8(uint32_t addr, uint32_t lo, uint32_t hi) 293 { 294 mtidcr(IDCR_HIF_ARG1, hi); 295 gmi_write_4(addr, lo); 296 } 297 298 static inline void __unused 299 gmi_read_8(uint32_t addr, uint32_t *lo, uint32_t *hi) 300 { 301 *lo = gmi_read_4(addr); 302 *hi = mfidcr(IDCR_HIF_ARG1); 303 } 304 305 static inline uint32_t 306 gmi_read_4(uint32_t addr) 307 { 308 uint32_t res; 309 310 mtidcr(IDCR_HIF_CTRL, addr & HIF_CTRL_GMIADDR); 311 hif_wait_stat(HIF_STAT_GMIRR); 312 313 res = mfidcr(IDCR_HIF_ARG0); 314 TRACEREG(("%s: %#08x -> %#08x\n", __func__, addr, res)); 315 return (res); 316 } 317 318 /* 319 * Generic device. 320 */ 321 static void 322 temac_attach(device_t parent, device_t self, void *aux) 323 { 324 struct xcvbus_attach_args *vaa = aux; 325 struct ll_dmac *rx = vaa->vaa_rx_dmac; 326 struct ll_dmac *tx = vaa->vaa_tx_dmac; 327 struct temac_softc *sc = device_private(self); 328 struct ifnet *ifp = &sc->sc_if; 329 struct mii_data *mii = &sc->sc_mii; 330 uint8_t enaddr[ETHER_ADDR_LEN]; 331 bus_dma_segment_t seg; 332 int error, nseg, i; 333 const char * const xname = device_xname(self); 334 335 aprint_normal(": TEMAC\n"); /* XXX will be LL_TEMAC, PLB_TEMAC */ 336 337 KASSERT(rx); 338 KASSERT(tx); 339 340 sc->sc_dev = self; 341 sc->sc_dmat = vaa->vaa_dmat; 342 sc->sc_dead = 0; 343 sc->sc_rx_drained = 1; 344 sc->sc_txbusy = 0; 345 sc->sc_iot = vaa->vaa_iot; 346 sc->sc_dma_rxt = rx->dmac_iot; 347 sc->sc_dma_txt = tx->dmac_iot; 348 349 /* 350 * Map HIF and receive/transmit dmac registers. 351 */ 352 if ((error = bus_space_map(vaa->vaa_iot, vaa->vaa_addr, TEMAC_SIZE, 0, 353 &sc->sc_ioh)) != 0) { 354 aprint_error_dev(self, "could not map registers\n"); 355 goto fail_0; 356 } 357 358 if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_ctrl_addr, 359 CDMAC_CTRL_SIZE, 0, &sc->sc_dma_rxh)) != 0) { 360 aprint_error_dev(self, "could not map Rx control registers\n"); 361 goto fail_0; 362 } 363 if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_stat_addr, 364 CDMAC_STAT_SIZE, 0, &sc->sc_dma_rsh)) != 0) { 365 aprint_error_dev(self, "could not map Rx status register\n"); 366 goto fail_0; 367 } 368 369 if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_ctrl_addr, 370 CDMAC_CTRL_SIZE, 0, &sc->sc_dma_txh)) != 0) { 371 aprint_error_dev(self, "could not map Tx control registers\n"); 372 goto fail_0; 373 } 374 if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_stat_addr, 375 CDMAC_STAT_SIZE, 0, &sc->sc_dma_tsh)) != 0) { 376 aprint_error_dev(self, "could not map Tx status register\n"); 377 goto fail_0; 378 } 379 380 /* 381 * Allocate and initialize DMA control chains. 382 */ 383 if ((error = bus_dmamem_alloc(sc->sc_dmat, 384 sizeof(struct temac_control), 8, 0, &seg, 1, &nseg, 0)) != 0) { 385 aprint_error_dev(self, "could not allocate control data\n"); 386 goto fail_0; 387 } 388 389 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 390 sizeof(struct temac_control), 391 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) { 392 aprint_error_dev(self, "could not map control data\n"); 393 goto fail_1; 394 } 395 396 if ((error = bus_dmamap_create(sc->sc_dmat, 397 sizeof(struct temac_control), 1, 398 sizeof(struct temac_control), 0, 0, &sc->sc_control_dmap)) != 0) { 399 aprint_error_dev(self, 400 "could not create control data DMA map\n"); 401 goto fail_2; 402 } 403 404 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_control_dmap, 405 sc->sc_control_data, sizeof(struct temac_control), NULL, 0)) != 0) { 406 aprint_error_dev(self, "could not load control data DMA map\n"); 407 goto fail_3; 408 } 409 410 /* 411 * Link descriptor chains. 412 */ 413 memset(sc->sc_control_data, 0, sizeof(struct temac_control)); 414 415 for (i = 0; i < TEMAC_NTXDESC; i++) { 416 sc->sc_txdescs[i].desc_next = sc->sc_cdaddr + 417 TEMAC_TXDOFF(TEMAC_TXNEXT(i)); 418 sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE; 419 } 420 for (i = 0; i < TEMAC_NRXDESC; i++) { 421 sc->sc_rxdescs[i].desc_next = sc->sc_cdaddr + 422 TEMAC_RXDOFF(TEMAC_RXNEXT(i)); 423 sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE; 424 } 425 426 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 0, 427 sizeof(struct temac_control), 428 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 429 430 /* 431 * Initialize software state for transmit/receive jobs. 432 */ 433 for (i = 0; i < TEMAC_TXQLEN; i++) { 434 if ((error = bus_dmamap_create(sc->sc_dmat, 435 ETHER_MAX_LEN_JUMBO, TEMAC_NTXSEG, ETHER_MAX_LEN_JUMBO, 436 0, 0, &sc->sc_txsoft[i].txs_dmap)) != 0) { 437 aprint_error_dev(self, 438 "could not create Tx DMA map %d\n", 439 i); 440 goto fail_4; 441 } 442 sc->sc_txsoft[i].txs_mbuf = NULL; 443 sc->sc_txsoft[i].txs_last = 0; 444 } 445 446 for (i = 0; i < TEMAC_NRXDESC; i++) { 447 if ((error = bus_dmamap_create(sc->sc_dmat, 448 MCLBYTES, TEMAC_NRXSEG, MCLBYTES, 0, 0, 449 &sc->sc_rxsoft[i].rxs_dmap)) != 0) { 450 aprint_error_dev(self, 451 "could not create Rx DMA map %d\n", i); 452 goto fail_5; 453 } 454 sc->sc_rxsoft[i].rxs_mbuf = NULL; 455 } 456 457 /* 458 * Setup transfer interrupt handlers. 459 */ 460 error = ENOMEM; 461 462 sc->sc_rx_ih = ll_dmac_intr_establish(rx->dmac_chan, 463 temac_rx_intr, sc); 464 if (sc->sc_rx_ih == NULL) { 465 aprint_error_dev(self, "could not establish Rx interrupt\n"); 466 goto fail_5; 467 } 468 469 sc->sc_tx_ih = ll_dmac_intr_establish(tx->dmac_chan, 470 temac_tx_intr, sc); 471 if (sc->sc_tx_ih == NULL) { 472 aprint_error_dev(self, "could not establish Tx interrupt\n"); 473 goto fail_6; 474 } 475 476 /* XXXFreza: faked, should read unicast address filter. */ 477 enaddr[0] = 0x00; 478 enaddr[1] = 0x11; 479 enaddr[2] = 0x17; 480 enaddr[3] = 0xff; 481 enaddr[4] = 0xff; 482 enaddr[5] = 0x01; 483 484 /* 485 * Initialize the TEMAC. 486 */ 487 temac_reset(sc); 488 489 /* Configure MDIO link. */ 490 gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO); 491 492 /* Initialize PHY. */ 493 mii->mii_ifp = ifp; 494 mii->mii_readreg = temac_mii_readreg; 495 mii->mii_writereg = temac_mii_writereg; 496 mii->mii_statchg = temac_mii_statchg; 497 sc->sc_ec.ec_mii = mii; 498 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 499 500 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 501 MII_OFFSET_ANY, 0); 502 if (LIST_FIRST(&mii->mii_phys) == NULL) { 503 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 504 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 505 } else { 506 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 507 } 508 509 /* Hold PHY in reset. */ 510 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, TEMAC_RESET_PHY); 511 512 /* Reset EMAC. */ 513 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, 514 TEMAC_RESET_EMAC); 515 delay(10000); 516 517 /* Reset peripheral, awakes PHY and EMAC. */ 518 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, 519 TEMAC_RESET_PERIPH); 520 delay(40000); 521 522 /* (Re-)Configure MDIO link. */ 523 gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO); 524 525 /* 526 * Hook up with network stack. 527 */ 528 strcpy(ifp->if_xname, xname); 529 ifp->if_softc = sc; 530 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 531 ifp->if_ioctl = temac_ioctl; 532 ifp->if_start = temac_start; 533 ifp->if_init = temac_init; 534 ifp->if_stop = temac_stop; 535 ifp->if_watchdog = NULL; 536 IFQ_SET_READY(&ifp->if_snd); 537 IFQ_SET_MAXLEN(&ifp->if_snd, TEMAC_TXQLEN); 538 539 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 540 541 if_attach(ifp); 542 ether_ifattach(ifp, enaddr); 543 544 sc->sc_sdhook = shutdownhook_establish(temac_shutdown, sc); 545 if (sc->sc_sdhook == NULL) 546 aprint_error_dev(self, 547 "WARNING: unable to establish shutdown hook\n"); 548 549 callout_setfunc(&sc->sc_mii_tick, temac_mii_tick, sc); 550 callout_setfunc(&sc->sc_rx_timo, temac_rxtimo, sc); 551 552 return ; 553 554 fail_6: 555 ll_dmac_intr_disestablish(rx->dmac_chan, sc->sc_rx_ih); 556 i = TEMAC_NRXDESC; 557 fail_5: 558 for (--i; i >= 0; i--) 559 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].rxs_dmap); 560 i = TEMAC_TXQLEN; 561 fail_4: 562 for (--i; i >= 0; i--) 563 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].txs_dmap); 564 fail_3: 565 bus_dmamap_destroy(sc->sc_dmat, sc->sc_control_dmap); 566 fail_2: 567 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 568 sizeof(struct temac_control)); 569 fail_1: 570 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 571 fail_0: 572 aprint_error_dev(self, "error = %d\n", error); 573 } 574 575 /* 576 * Network device. 577 */ 578 static int 579 temac_init(struct ifnet *ifp) 580 { 581 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc; 582 uint32_t rcr, tcr; 583 int i, error; 584 585 /* Reset DMA channels. */ 586 cdmac_tx_reset(sc); 587 cdmac_rx_reset(sc); 588 589 /* Set current media. */ 590 if ((error = ether_mediachange(ifp)) != 0) 591 return error; 592 593 callout_schedule(&sc->sc_mii_tick, hz); 594 595 /* Enable EMAC engine. */ 596 rcr = (gmi_read_4(TEMAC_GMI_RXCF1) | GMI_RX_ENABLE) & 597 ~(GMI_RX_JUMBO | GMI_RX_FCS); 598 gmi_write_4(TEMAC_GMI_RXCF1, rcr); 599 600 tcr = (gmi_read_4(TEMAC_GMI_TXCF) | GMI_TX_ENABLE) & 601 ~(GMI_TX_JUMBO | GMI_TX_FCS); 602 gmi_write_4(TEMAC_GMI_TXCF, tcr); 603 604 /* XXXFreza: Force promiscuous mode, for now. */ 605 gmi_write_4(TEMAC_GMI_AFM, GMI_AFM_PROMISC); 606 ifp->if_flags |= IFF_PROMISC; 607 608 /* Rx/Tx queues are drained -- either from attach() or stop(). */ 609 sc->sc_txsfree = TEMAC_TXQLEN; 610 sc->sc_txsreap = 0; 611 sc->sc_txscur = 0; 612 613 sc->sc_txfree = TEMAC_NTXDESC; 614 sc->sc_txreap = 0; 615 sc->sc_txcur = 0; 616 617 sc->sc_rxreap = 0; 618 619 /* Allocate and map receive buffers. */ 620 if (sc->sc_rx_drained) { 621 for (i = 0; i < TEMAC_NRXDESC; i++) { 622 if ((error = temac_rxalloc(sc, i, 1)) != 0) { 623 aprint_error_dev(sc->sc_dev, 624 "failed to allocate Rx descriptor %d\n", 625 i); 626 temac_rxdrain(sc); 627 return (error); 628 } 629 } 630 sc->sc_rx_drained = 0; 631 632 temac_rxcdsync(sc, 0, TEMAC_NRXDESC, 633 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 634 cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0)); 635 } 636 637 ifp->if_flags |= IFF_RUNNING; 638 639 return (0); 640 } 641 642 static int 643 temac_ioctl(struct ifnet *ifp, u_long cmd, void *data) 644 { 645 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc; 646 int s, ret; 647 648 s = splnet(); 649 if (sc->sc_dead) 650 ret = EIO; 651 else 652 ret = ether_ioctl(ifp, cmd, data); 653 splx(s); 654 return (ret); 655 } 656 657 static void 658 temac_start(struct ifnet *ifp) 659 { 660 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc; 661 struct temac_txsoft *txs; 662 struct mbuf *m; 663 bus_dmamap_t dmap; 664 int error, head, nsegs, i; 665 666 nsegs = 0; 667 head = sc->sc_txcur; 668 txs = NULL; /* gcc */ 669 670 if (sc->sc_dead) 671 return; 672 673 KASSERT(sc->sc_txfree >= 0); 674 KASSERT(sc->sc_txsfree >= 0); 675 676 /* 677 * Push mbufs into descriptor chain until we drain the interface 678 * queue or run out of descriptors. We'll mark the first segment 679 * as "done" in hope that we might put CDMAC interrupt above IPL_NET 680 * and have it start jobs & mark packets for GC preemtively for 681 * us -- creativity due to limitations in CDMAC transfer engine 682 * (it really consumes lists, not circular queues, AFAICS). 683 * 684 * We schedule one interrupt per Tx batch. 685 */ 686 while (sc->sc_txsfree) { 687 IFQ_POLL(&ifp->if_snd, m); 688 if (m == NULL) 689 break; 690 691 txs = &sc->sc_txsoft[sc->sc_txscur]; 692 dmap = txs->txs_dmap; 693 694 if (txs->txs_mbuf != NULL) 695 printf("FOO\n"); 696 if (txs->txs_last) 697 printf("BAR\n"); 698 699 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, 700 BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) { 701 if (error == EFBIG) { 702 aprint_error_dev(sc->sc_dev, 703 "Tx consumes too many segments, dropped\n"); 704 IFQ_DEQUEUE(&ifp->if_snd, m); 705 m_freem(m); 706 continue; 707 } else { 708 aprint_debug_dev(sc->sc_dev, 709 "Tx stall due to resource shortage\n"); 710 break; 711 } 712 } 713 714 /* 715 * If we're short on DMA descriptors; leave this packet 716 * for later. 717 */ 718 if (dmap->dm_nsegs > sc->sc_txfree) { 719 bus_dmamap_unload(sc->sc_dmat, dmap); 720 break; 721 } 722 723 IFQ_DEQUEUE(&ifp->if_snd, m); 724 725 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 726 BUS_DMASYNC_PREWRITE); 727 txs->txs_mbuf = m; 728 729 /* 730 * Map the packet into descriptor chain. XXX We'll want 731 * to fill checksum offload commands here. 732 * 733 * We would be in a race if we weren't blocking CDMAC intr 734 * at this point -- we need to be locked against txreap() 735 * because of dmasync ops. 736 */ 737 738 temac_txcdsync(sc, sc->sc_txcur, dmap->dm_nsegs, 739 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 740 741 for (i = 0; i < dmap->dm_nsegs; i++) { 742 sc->sc_txdescs[sc->sc_txcur].desc_addr = 743 dmap->dm_segs[i].ds_addr; 744 sc->sc_txdescs[sc->sc_txcur].desc_size = 745 dmap->dm_segs[i].ds_len; 746 sc->sc_txdescs[sc->sc_txcur].desc_stat = 747 (i == 0 ? CDMAC_STAT_SOP : 0) | 748 (i == (dmap->dm_nsegs - 1) ? CDMAC_STAT_EOP : 0); 749 750 sc->sc_txcur = TEMAC_TXNEXT(sc->sc_txcur); 751 } 752 753 sc->sc_txfree -= dmap->dm_nsegs; 754 nsegs += dmap->dm_nsegs; 755 756 sc->sc_txscur = TEMAC_TXSNEXT(sc->sc_txscur); 757 sc->sc_txsfree--; 758 } 759 760 /* Get data running if we queued any. */ 761 if (nsegs > 0) { 762 int tail = TEMAC_TXINC(sc->sc_txcur, -1); 763 764 /* Mark the last packet in this job. */ 765 txs->txs_last = 1; 766 767 /* Mark the last descriptor in this job. */ 768 sc->sc_txdescs[tail].desc_stat |= CDMAC_STAT_STOP | 769 CDMAC_STAT_INTR; 770 temac_txcdsync(sc, head, nsegs, 771 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 772 773 temac_txkick(sc); 774 #if TEMAC_TXDEBUG > 0 775 aprint_debug_dev(sc->sc_dev, 776 "start: txcur %03d -> %03d, nseg %03d\n", 777 head, sc->sc_txcur, nsegs); 778 #endif 779 } 780 } 781 782 static void 783 temac_stop(struct ifnet *ifp, int disable) 784 { 785 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc; 786 struct temac_txsoft *txs; 787 int i; 788 789 #if TEMAC_DEBUG > 0 790 aprint_debug_dev(sc->sc_dev, "stop\n"); 791 #endif 792 793 /* Down the MII. */ 794 callout_stop(&sc->sc_mii_tick); 795 mii_down(&sc->sc_mii); 796 797 /* Stop the engine. */ 798 temac_reset(sc); 799 800 /* Drain buffers queues (unconditionally). */ 801 temac_rxdrain(sc); 802 803 for (i = 0; i < TEMAC_TXQLEN; i++) { 804 txs = &sc->sc_txsoft[i]; 805 806 if (txs->txs_mbuf != NULL) { 807 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap); 808 m_freem(txs->txs_mbuf); 809 txs->txs_mbuf = NULL; 810 txs->txs_last = 0; 811 } 812 } 813 sc->sc_txbusy = 0; 814 815 /* Acknowledge we're down. */ 816 ifp->if_flags &= ~IFF_RUNNING; 817 } 818 819 static int 820 temac_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 821 { 822 int rv; 823 824 mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg); 825 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR); 826 827 if ((rv = hif_wait_stat(HIF_STAT_MIIRR)) != 0) 828 return rv; 829 830 *val = mfidcr(IDCR_HIF_ARG0) & 0xffff; 831 return 0; 832 } 833 834 static int 835 temac_mii_writereg(device_t self, int phy, int reg, uint16_t val) 836 { 837 mtidcr(IDCR_HIF_ARG0, val); 838 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_WRVAL | HIF_CTRL_WRITE); 839 mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg); 840 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR | HIF_CTRL_WRITE); 841 return hif_wait_stat(HIF_STAT_MIIWR); 842 } 843 844 static void 845 temac_mii_statchg(struct ifnet *ifp) 846 { 847 struct temac_softc *sc = ifp->if_softc; 848 uint32_t rcf, tcf, mmc; 849 850 /* Full/half duplex link. */ 851 rcf = gmi_read_4(TEMAC_GMI_RXCF1); 852 tcf = gmi_read_4(TEMAC_GMI_TXCF); 853 854 if (sc->sc_mii.mii_media_active & IFM_FDX) { 855 gmi_write_4(TEMAC_GMI_RXCF1, rcf & ~GMI_RX_HDX); 856 gmi_write_4(TEMAC_GMI_TXCF, tcf & ~GMI_TX_HDX); 857 } else { 858 gmi_write_4(TEMAC_GMI_RXCF1, rcf | GMI_RX_HDX); 859 gmi_write_4(TEMAC_GMI_TXCF, tcf | GMI_TX_HDX); 860 } 861 862 /* Link speed. */ 863 mmc = gmi_read_4(TEMAC_GMI_MMC) & ~GMI_MMC_SPEED_MASK; 864 865 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 866 case IFM_10_T: 867 /* 868 * XXXFreza: the GMAC is not happy with 10Mbit ethernet, 869 * although the documentation claims it's supported. Maybe 870 * it's just my equipment... 871 */ 872 mmc |= GMI_MMC_SPEED_10; 873 break; 874 case IFM_100_TX: 875 mmc |= GMI_MMC_SPEED_100; 876 break; 877 case IFM_1000_T: 878 mmc |= GMI_MMC_SPEED_1000; 879 break; 880 } 881 882 gmi_write_4(TEMAC_GMI_MMC, mmc); 883 } 884 885 static void 886 temac_mii_tick(void *arg) 887 { 888 struct temac_softc *sc = (struct temac_softc *)arg; 889 int s; 890 891 if (!device_is_active(sc->sc_dev)) 892 return; 893 894 s = splnet(); 895 mii_tick(&sc->sc_mii); 896 splx(s); 897 898 callout_schedule(&sc->sc_mii_tick, hz); 899 } 900 901 /* 902 * External hooks. 903 */ 904 static void 905 temac_shutdown(void *arg) 906 { 907 struct temac_softc *sc = (struct temac_softc *)arg; 908 909 temac_reset(sc); 910 } 911 912 static void 913 temac_tx_intr(void *arg) 914 { 915 struct temac_softc *sc = (struct temac_softc *)arg; 916 uint32_t stat; 917 918 /* XXX: We may need to splnet() here if cdmac(4) changes. */ 919 920 if ((stat = cdmac_tx_stat(sc)) & CDMAC_STAT_ERROR) { 921 aprint_error_dev(sc->sc_dev, 922 "transmit DMA is toast (%#08x), halted!\n", 923 stat); 924 925 /* XXXFreza: how to signal this upstream? */ 926 temac_stop(&sc->sc_if, 1); 927 sc->sc_dead = 1; 928 } 929 930 #if TEMAC_DEBUG > 0 931 aprint_debug_dev(sc->sc_dev, "tx intr 0x%08x\n", stat); 932 #endif 933 temac_txreap(sc); 934 } 935 936 static void 937 temac_rx_intr(void *arg) 938 { 939 struct temac_softc *sc = (struct temac_softc *)arg; 940 uint32_t stat; 941 942 /* XXX: We may need to splnet() here if cdmac(4) changes. */ 943 944 if ((stat = cdmac_rx_stat(sc)) & CDMAC_STAT_ERROR) { 945 aprint_error_dev(sc->sc_dev, 946 "receive DMA is toast (%#08x), halted!\n", 947 stat); 948 949 /* XXXFreza: how to signal this upstream? */ 950 temac_stop(&sc->sc_if, 1); 951 sc->sc_dead = 1; 952 } 953 954 #if TEMAC_DEBUG > 0 955 aprint_debug_dev(sc->sc_dev, "rx intr 0x%08x\n", stat); 956 #endif 957 temac_rxreap(sc); 958 } 959 960 /* 961 * Utils. 962 */ 963 static inline void 964 temac_txcdsync(struct temac_softc *sc, int first, int cnt, int flag) 965 { 966 if ((first + cnt) > TEMAC_NTXDESC) { 967 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 968 TEMAC_TXDOFF(first), 969 sizeof(struct cdmac_descr) * (TEMAC_NTXDESC - first), 970 flag); 971 cnt = (first + cnt) % TEMAC_NTXDESC; 972 first = 0; 973 } 974 975 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 976 TEMAC_TXDOFF(first), 977 sizeof(struct cdmac_descr) * cnt, 978 flag); 979 } 980 981 static inline void 982 temac_rxcdsync(struct temac_softc *sc, int first, int cnt, int flag) 983 { 984 if ((first + cnt) > TEMAC_NRXDESC) { 985 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 986 TEMAC_RXDOFF(first), 987 sizeof(struct cdmac_descr) * (TEMAC_NRXDESC - first), 988 flag); 989 cnt = (first + cnt) % TEMAC_NRXDESC; 990 first = 0; 991 } 992 993 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 994 TEMAC_RXDOFF(first), 995 sizeof(struct cdmac_descr) * cnt, 996 flag); 997 } 998 999 static void 1000 temac_txreap(struct temac_softc *sc) 1001 { 1002 struct temac_txsoft *txs; 1003 bus_dmamap_t dmap; 1004 1005 /* 1006 * Transmit interrupts happen on the last descriptor of Tx jobs. 1007 * Hence, every time we're called (and we assume txintr is our 1008 * only caller!), we reap packets upto and including the one 1009 * marked as last-in-batch. 1010 * 1011 * XXX we rely on that we make EXACTLY one batch per intr, no more 1012 */ 1013 while (sc->sc_txsfree != TEMAC_TXQLEN) { 1014 txs = &sc->sc_txsoft[sc->sc_txsreap]; 1015 dmap = txs->txs_dmap; 1016 1017 sc->sc_txreap = TEMAC_TXINC(sc->sc_txreap, dmap->dm_nsegs); 1018 sc->sc_txfree += dmap->dm_nsegs; 1019 1020 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap); 1021 m_freem(txs->txs_mbuf); 1022 txs->txs_mbuf = NULL; 1023 1024 if_statinc(&sc->sc_if, if_opackets); 1025 1026 sc->sc_txsreap = TEMAC_TXSNEXT(sc->sc_txsreap); 1027 sc->sc_txsfree++; 1028 1029 if (txs->txs_last) { 1030 txs->txs_last = 0; 1031 sc->sc_txbusy = 0; /* channel stopped now */ 1032 1033 temac_txkick(sc); 1034 break; 1035 } 1036 } 1037 } 1038 1039 static int 1040 temac_rxalloc(struct temac_softc *sc, int which, int verbose) 1041 { 1042 struct temac_rxsoft *rxs; 1043 struct mbuf *m; 1044 uint32_t stat; 1045 int error; 1046 1047 rxs = &sc->sc_rxsoft[which]; 1048 1049 /* The mbuf itself is not our problem, just clear DMA related stuff. */ 1050 if (rxs->rxs_mbuf != NULL) { 1051 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap); 1052 rxs->rxs_mbuf = NULL; 1053 } 1054 1055 /* 1056 * We would like to store mbuf and dmap in application specific 1057 * fields of the descriptor, but that doesn't work for Rx. Shame 1058 * on Xilinx for this (and for the useless timer architecture). 1059 * 1060 * Hence each descriptor needs its own soft state. We may want 1061 * to merge multiple rxs's into a monster mbuf when we support 1062 * jumbo frames though. Also, we use single set of indexing 1063 * variables for both sc_rxdescs[] and sc_rxsoft[]. 1064 */ 1065 MGETHDR(m, M_DONTWAIT, MT_DATA); 1066 if (m == NULL) { 1067 if (verbose) 1068 aprint_debug_dev(sc->sc_dev, 1069 "out of Rx header mbufs\n"); 1070 return (ENOBUFS); 1071 } 1072 MCLAIM(m, &sc->sc_ec.ec_rx_mowner); 1073 1074 MCLGET(m, M_DONTWAIT); 1075 if ((m->m_flags & M_EXT) == 0) { 1076 if (verbose) 1077 aprint_debug_dev(sc->sc_dev, 1078 "out of Rx cluster mbufs\n"); 1079 m_freem(m); 1080 return (ENOBUFS); 1081 } 1082 1083 rxs->rxs_mbuf = m; 1084 m->m_pkthdr.len = m->m_len = MCLBYTES; 1085 1086 /* Make sure the payload after ethernet header is 4-aligned. */ 1087 m_adj(m, 2); 1088 1089 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmap, m, 1090 BUS_DMA_NOWAIT); 1091 if (error) { 1092 if (verbose) 1093 aprint_debug_dev(sc->sc_dev, 1094 "could not map Rx descriptor %d, error = %d\n", 1095 which, error); 1096 1097 rxs->rxs_mbuf = NULL; 1098 m_freem(m); 1099 1100 return (error); 1101 } 1102 1103 stat = 1104 (TEMAC_ISINTR(which) ? CDMAC_STAT_INTR : 0) | 1105 (TEMAC_ISLAST(which) ? CDMAC_STAT_STOP : 0); 1106 1107 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmap, 0, 1108 rxs->rxs_dmap->dm_mapsize, BUS_DMASYNC_PREREAD); 1109 1110 /* Descriptor post-sync, if needed, left to the caller. */ 1111 1112 sc->sc_rxdescs[which].desc_addr = rxs->rxs_dmap->dm_segs[0].ds_addr; 1113 sc->sc_rxdescs[which].desc_size = rxs->rxs_dmap->dm_segs[0].ds_len; 1114 sc->sc_rxdescs[which].desc_stat = stat; 1115 1116 /* Descriptor pre-sync, if needed, left to the caller. */ 1117 1118 return (0); 1119 } 1120 1121 static void 1122 temac_rxreap(struct temac_softc *sc) 1123 { 1124 struct ifnet *ifp = &sc->sc_if; 1125 uint32_t stat, rxstat, rxsize; 1126 struct mbuf *m; 1127 int nseg, head, tail; 1128 1129 head = sc->sc_rxreap; 1130 tail = 0; /* gcc */ 1131 nseg = 0; 1132 1133 /* 1134 * Collect finished entries on the Rx list, kick DMA if we hit 1135 * the end. DMA will always stop on the last descriptor in chain, 1136 * so it will never hit a reap-in-progress descriptor. 1137 */ 1138 while (1) { 1139 /* Maybe we previously failed to refresh this one? */ 1140 if (sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf == NULL) { 1141 if (temac_rxalloc(sc, sc->sc_rxreap, 0) != 0) 1142 break; 1143 1144 sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap); 1145 continue; 1146 } 1147 temac_rxcdsync(sc, sc->sc_rxreap, 1, 1148 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1149 1150 stat = sc->sc_rxdescs[sc->sc_rxreap].desc_stat; 1151 m = NULL; 1152 1153 if ((stat & CDMAC_STAT_DONE) == 0) 1154 break; 1155 1156 /* 1157 * Count any descriptor we've collected, regardless of status. 1158 */ 1159 nseg ++; 1160 1161 /* XXXFreza: This won't work for jumbo frames. */ 1162 1163 if ((stat & (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) != 1164 (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) { 1165 aprint_error_dev(sc->sc_dev, 1166 "Rx packet doesn't fit in one descriptor, " 1167 "stat = %#08x\n", stat); 1168 goto badframe; 1169 } 1170 1171 /* Dissect TEMAC footer if this is end of packet. */ 1172 rxstat = sc->sc_rxdescs[sc->sc_rxreap].desc_rxstat; 1173 rxsize = sc->sc_rxdescs[sc->sc_rxreap].desc_rxsize & 1174 RXSIZE_MASK; 1175 1176 if ((rxstat & RXSTAT_GOOD) == 0 || 1177 (rxstat & RXSTAT_SICK) != 0) { 1178 aprint_error_dev(sc->sc_dev, 1179 "corrupt Rx packet, rxstat = %#08x\n", 1180 rxstat); 1181 goto badframe; 1182 } 1183 1184 /* We are now bound to succeed. */ 1185 bus_dmamap_sync(sc->sc_dmat, 1186 sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap, 0, 1187 sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap->dm_mapsize, 1188 BUS_DMASYNC_POSTREAD); 1189 1190 m = sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf; 1191 m_set_rcvif(m, ifp); 1192 m->m_pkthdr.len = m->m_len = rxsize; 1193 1194 badframe: 1195 /* Get ready for more work. */ 1196 tail = sc->sc_rxreap; 1197 sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap); 1198 1199 /* On failures we reuse the descriptor and go ahead. */ 1200 if (m == NULL) { 1201 sc->sc_rxdescs[tail].desc_stat = 1202 (TEMAC_ISINTR(tail) ? CDMAC_STAT_INTR : 0) | 1203 (TEMAC_ISLAST(tail) ? CDMAC_STAT_STOP : 0); 1204 1205 if_statinc(ifp, if_ierrors); 1206 continue; 1207 } 1208 1209 if_percpuq_enqueue(ifp->if_percpuq, m); 1210 1211 /* Refresh descriptor, bail out if we're out of buffers. */ 1212 if (temac_rxalloc(sc, tail, 1) != 0) { 1213 sc->sc_rxreap = TEMAC_RXINC(sc->sc_rxreap, -1); 1214 aprint_error_dev(sc->sc_dev, "Rx give up for now\n"); 1215 break; 1216 } 1217 } 1218 1219 /* We may now have a contiguous ready-to-go chunk of descriptors. */ 1220 if (nseg > 0) { 1221 #if TEMAC_RXDEBUG > 0 1222 aprint_debug_dev(sc->sc_dev, 1223 "rxreap: rxreap %03d -> %03d, nseg %03d\n", 1224 head, sc->sc_rxreap, nseg); 1225 #endif 1226 temac_rxcdsync(sc, head, nseg, 1227 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1228 1229 if (TEMAC_ISLAST(tail)) 1230 cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0)); 1231 } 1232 1233 /* Ensure maximum Rx latency is kept under control. */ 1234 callout_schedule(&sc->sc_rx_timo, hz / TEMAC_RXTIMO_HZ); 1235 } 1236 1237 static void 1238 temac_rxtimo(void *arg) 1239 { 1240 struct temac_softc *sc = (struct temac_softc *)arg; 1241 int s; 1242 1243 /* We run TEMAC_RXTIMO_HZ times/sec to ensure Rx doesn't stall. */ 1244 s = splnet(); 1245 temac_rxreap(sc); 1246 splx(s); 1247 } 1248 1249 static void 1250 temac_reset(struct temac_softc *sc) 1251 { 1252 uint32_t rcr, tcr; 1253 1254 /* Kill CDMAC channels. */ 1255 cdmac_tx_reset(sc); 1256 cdmac_rx_reset(sc); 1257 1258 /* Disable receiver. */ 1259 rcr = gmi_read_4(TEMAC_GMI_RXCF1) & ~GMI_RX_ENABLE; 1260 gmi_write_4(TEMAC_GMI_RXCF1, rcr); 1261 1262 /* Disable transmitter. */ 1263 tcr = gmi_read_4(TEMAC_GMI_TXCF) & ~GMI_TX_ENABLE; 1264 gmi_write_4(TEMAC_GMI_TXCF, tcr); 1265 } 1266 1267 static void 1268 temac_rxdrain(struct temac_softc *sc) 1269 { 1270 struct temac_rxsoft *rxs; 1271 int i; 1272 1273 for (i = 0; i < TEMAC_NRXDESC; i++) { 1274 rxs = &sc->sc_rxsoft[i]; 1275 1276 if (rxs->rxs_mbuf != NULL) { 1277 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap); 1278 m_freem(rxs->rxs_mbuf); 1279 rxs->rxs_mbuf = NULL; 1280 } 1281 } 1282 1283 sc->sc_rx_drained = 1; 1284 } 1285 1286 static void 1287 temac_txkick(struct temac_softc *sc) 1288 { 1289 if (sc->sc_txsoft[sc->sc_txsreap].txs_mbuf != NULL && 1290 sc->sc_txbusy == 0) { 1291 cdmac_tx_start(sc, sc->sc_cdaddr + TEMAC_TXDOFF(sc->sc_txreap)); 1292 sc->sc_txbusy = 1; 1293 } 1294 } 1295