1 /* $NetBSD: if_emac.c,v 1.60 2025/10/04 04:44:20 thorpej Exp $ */ 2 3 /* 4 * Copyright 2001, 2002 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * emac(4) supports following ibm4xx's EMACs. 40 * XXXX: ZMII and 'TCP Accelaration Hardware' not support yet... 41 * 42 * tested 43 * ------ 44 * 405EP - 10/100 x2 45 * 405EX/EXr o 10/100/1000 x2 (EXr x1), STA v2, 256bit hash-Table, RGMII 46 * 405GP/GPr o 10/100 47 * 440EP - 10/100 x2, ZMII 48 * 440GP - 10/100 x2, ZMII 49 * 440GX - 10/100/1000 x4, ZMII/RGMII(ch 2, 3), TAH(ch 2, 3) 50 * 440SP - 10/100/1000 51 * 440SPe - 10/100/1000, STA v2 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: if_emac.c,v 1.60 2025/10/04 04:44:20 thorpej Exp $"); 56 57 #ifdef _KERNEL_OPT 58 #include "opt_emac.h" 59 #endif 60 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/mbuf.h> 64 #include <sys/kernel.h> 65 #include <sys/socket.h> 66 #include <sys/ioctl.h> 67 #include <sys/cpu.h> 68 #include <sys/device.h> 69 70 #include <sys/rndsource.h> 71 72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 73 74 #include <net/if.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/if_ether.h> 78 79 #include <net/bpf.h> 80 81 #include <powerpc/ibm4xx/cpu.h> 82 #include <powerpc/ibm4xx/dcr4xx.h> 83 #include <powerpc/ibm4xx/mal405gp.h> 84 #include <powerpc/ibm4xx/dev/emacreg.h> 85 #include <powerpc/ibm4xx/dev/if_emacreg.h> 86 #include <powerpc/ibm4xx/dev/if_emacvar.h> 87 #include <powerpc/ibm4xx/dev/malvar.h> 88 #include <powerpc/ibm4xx/dev/opbreg.h> 89 #include <powerpc/ibm4xx/dev/opbvar.h> 90 #include <powerpc/ibm4xx/dev/plbvar.h> 91 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY) 92 #include <powerpc/ibm4xx/dev/rmiivar.h> 93 #endif 94 95 #include <dev/mii/miivar.h> 96 97 #include "locators.h" 98 99 100 /* 101 * Transmit descriptor list size. There are two Tx channels, each with 102 * up to 256 hardware descriptors available. We currently use one Tx 103 * channel. We tell the upper layers that they can queue a lot of 104 * packets, and we go ahead and manage up to 64 of them at a time. We 105 * allow up to 16 DMA segments per packet. 106 */ 107 #define EMAC_NTXSEGS 16 108 #define EMAC_TXQUEUELEN 64 109 #define EMAC_TXQUEUELEN_MASK (EMAC_TXQUEUELEN - 1) 110 #define EMAC_TXQUEUE_GC (EMAC_TXQUEUELEN / 4) 111 #define EMAC_NTXDESC 256 112 #define EMAC_NTXDESC_MASK (EMAC_NTXDESC - 1) 113 #define EMAC_NEXTTX(x) (((x) + 1) & EMAC_NTXDESC_MASK) 114 #define EMAC_NEXTTXS(x) (((x) + 1) & EMAC_TXQUEUELEN_MASK) 115 116 /* 117 * Receive descriptor list size. There is one Rx channel with up to 256 118 * hardware descriptors available. We allocate 64 receive descriptors, 119 * each with a 2k buffer (MCLBYTES). 120 */ 121 #define EMAC_NRXDESC 64 122 #define EMAC_NRXDESC_MASK (EMAC_NRXDESC - 1) 123 #define EMAC_NEXTRX(x) (((x) + 1) & EMAC_NRXDESC_MASK) 124 #define EMAC_PREVRX(x) (((x) - 1) & EMAC_NRXDESC_MASK) 125 126 /* 127 * Transmit/receive descriptors that are DMA'd to the EMAC. 128 */ 129 struct emac_control_data { 130 struct mal_descriptor ecd_txdesc[EMAC_NTXDESC]; 131 struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC]; 132 }; 133 134 #define EMAC_CDOFF(x) offsetof(struct emac_control_data, x) 135 #define EMAC_CDTXOFF(x) EMAC_CDOFF(ecd_txdesc[(x)]) 136 #define EMAC_CDRXOFF(x) EMAC_CDOFF(ecd_rxdesc[(x)]) 137 138 /* 139 * Software state for transmit jobs. 140 */ 141 struct emac_txsoft { 142 struct mbuf *txs_mbuf; /* head of mbuf chain */ 143 bus_dmamap_t txs_dmamap; /* our DMA map */ 144 int txs_firstdesc; /* first descriptor in packet */ 145 int txs_lastdesc; /* last descriptor in packet */ 146 int txs_ndesc; /* # of descriptors used */ 147 }; 148 149 /* 150 * Software state for receive descriptors. 151 */ 152 struct emac_rxsoft { 153 struct mbuf *rxs_mbuf; /* head of mbuf chain */ 154 bus_dmamap_t rxs_dmamap; /* our DMA map */ 155 }; 156 157 /* 158 * Software state per device. 159 */ 160 struct emac_softc { 161 device_t sc_dev; /* generic device information */ 162 int sc_instance; /* instance no. */ 163 bus_space_tag_t sc_st; /* bus space tag */ 164 bus_space_handle_t sc_sh; /* bus space handle */ 165 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 166 struct ethercom sc_ethercom; /* ethernet common data */ 167 void *sc_sdhook; /* shutdown hook */ 168 void *sc_powerhook; /* power management hook */ 169 170 struct mii_data sc_mii; /* MII/media information */ 171 struct callout sc_callout; /* tick callout */ 172 173 uint32_t sc_mr1; /* copy of Mode Register 1 */ 174 uint32_t sc_stacr_read; /* Read opcode of STAOPC of STACR */ 175 uint32_t sc_stacr_write; /* Write opcode of STAOPC of STACR */ 176 uint32_t sc_stacr_bits; /* misc bits of STACR */ 177 bool sc_stacr_completed; /* Operation completed of STACR */ 178 int sc_htsize; /* Hash Table size */ 179 180 bus_dmamap_t sc_cddmamap; /* control data dma map */ 181 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 182 183 /* Software state for transmit/receive descriptors. */ 184 struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN]; 185 struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC]; 186 187 /* Control data structures. */ 188 struct emac_control_data *sc_control_data; 189 #define sc_txdescs sc_control_data->ecd_txdesc 190 #define sc_rxdescs sc_control_data->ecd_rxdesc 191 192 #ifdef EMAC_EVENT_COUNTERS 193 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 194 struct evcnt sc_ev_txintr; /* Tx interrupts */ 195 struct evcnt sc_ev_rxde; /* Rx descriptor interrupts */ 196 struct evcnt sc_ev_txde; /* Tx descriptor interrupts */ 197 struct evcnt sc_ev_intr; /* General EMAC interrupts */ 198 199 struct evcnt sc_ev_txreap; /* Calls to Tx descriptor reaper */ 200 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 201 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 202 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 203 struct evcnt sc_ev_tu; /* Tx underrun */ 204 #endif /* EMAC_EVENT_COUNTERS */ 205 206 int sc_txfree; /* number of free Tx descriptors */ 207 int sc_txnext; /* next ready Tx descriptor */ 208 209 int sc_txsfree; /* number of free Tx jobs */ 210 int sc_txsnext; /* next ready Tx job */ 211 int sc_txsdirty; /* dirty Tx jobs */ 212 213 int sc_rxptr; /* next ready RX descriptor/descsoft */ 214 215 krndsource_t rnd_source; /* random source */ 216 217 void (*sc_rmii_enable)(device_t, int); /* reduced MII enable */ 218 void (*sc_rmii_disable)(device_t, int); /* reduced MII disable*/ 219 void (*sc_rmii_speed)(device_t, int, int); /* reduced MII speed */ 220 }; 221 222 #ifdef EMAC_EVENT_COUNTERS 223 #define EMAC_EVCNT_INCR(ev) (ev)->ev_count++ 224 #else 225 #define EMAC_EVCNT_INCR(ev) /* nothing */ 226 #endif 227 228 #define EMAC_CDTXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDTXOFF((x))) 229 #define EMAC_CDRXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDRXOFF((x))) 230 231 #define EMAC_CDTXSYNC(sc, x, n, ops) \ 232 do { \ 233 int __x, __n; \ 234 \ 235 __x = (x); \ 236 __n = (n); \ 237 \ 238 /* If it will wrap around, sync to the end of the ring. */ \ 239 if ((__x + __n) > EMAC_NTXDESC) { \ 240 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 241 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * \ 242 (EMAC_NTXDESC - __x), (ops)); \ 243 __n -= (EMAC_NTXDESC - __x); \ 244 __x = 0; \ 245 } \ 246 \ 247 /* Now sync whatever is left. */ \ 248 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 249 EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \ 250 } while (/*CONSTCOND*/0) 251 252 #define EMAC_CDRXSYNC(sc, x, ops) \ 253 do { \ 254 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 255 EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops)); \ 256 } while (/*CONSTCOND*/0) 257 258 #define EMAC_INIT_RXDESC(sc, x) \ 259 do { \ 260 struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 261 struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)]; \ 262 struct mbuf *__m = __rxs->rxs_mbuf; \ 263 \ 264 /* \ 265 * Note: We scoot the packet forward 2 bytes in the buffer \ 266 * so that the payload after the Ethernet header is aligned \ 267 * to a 4-byte boundary. \ 268 */ \ 269 __m->m_data = __m->m_ext.ext_buf + 2; \ 270 \ 271 __rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2; \ 272 __rxd->md_data_len = __m->m_ext.ext_size - 2; \ 273 __rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT | \ 274 /* Set wrap on last descriptor. */ \ 275 (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0); \ 276 EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ 277 } while (/*CONSTCOND*/0) 278 279 #define EMAC_WRITE(sc, reg, val) \ 280 bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 281 #define EMAC_READ(sc, reg) \ 282 bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg)) 283 284 #define EMAC_SET_FILTER(aht, crc) \ 285 do { \ 286 (aht)[3 - (((crc) >> 26) >> 4)] |= 1 << (((crc) >> 26) & 0xf); \ 287 } while (/*CONSTCOND*/0) 288 #define EMAC_SET_FILTER256(aht, crc) \ 289 do { \ 290 (aht)[7 - (((crc) >> 24) >> 5)] |= 1 << (((crc) >> 24) & 0x1f); \ 291 } while (/*CONSTCOND*/0) 292 293 static int emac_match(device_t, cfdata_t, void *); 294 static void emac_attach(device_t, device_t, void *); 295 296 static int emac_intr(void *); 297 static void emac_shutdown(void *); 298 299 static void emac_start(struct ifnet *); 300 static int emac_ioctl(struct ifnet *, u_long, void *); 301 static int emac_init(struct ifnet *); 302 static void emac_stop(struct ifnet *, int); 303 static void emac_watchdog(struct ifnet *); 304 305 static int emac_add_rxbuf(struct emac_softc *, int); 306 static void emac_rxdrain(struct emac_softc *); 307 static int emac_set_filter(struct emac_softc *); 308 static int emac_txreap(struct emac_softc *); 309 310 static void emac_soft_reset(struct emac_softc *); 311 static void emac_smart_reset(struct emac_softc *); 312 313 static int emac_mii_readreg(device_t, int, int, uint16_t *); 314 static int emac_mii_writereg(device_t, int, int, uint16_t); 315 static void emac_mii_statchg(struct ifnet *); 316 static uint32_t emac_mii_wait(struct emac_softc *); 317 static void emac_mii_tick(void *); 318 319 int emac_copy_small = 0; 320 321 CFATTACH_DECL_NEW(emac, sizeof(struct emac_softc), 322 emac_match, emac_attach, NULL, NULL); 323 324 325 static int 326 emac_match(device_t parent, cfdata_t cf, void *aux) 327 { 328 struct opb_attach_args *oaa = aux; 329 330 /* match only on-chip ethernet devices */ 331 if (strcmp(oaa->opb_name, cf->cf_name) == 0) 332 return 1; 333 334 return 0; 335 } 336 337 static void 338 emac_attach(device_t parent, device_t self, void *aux) 339 { 340 struct opb_attach_args *oaa = aux; 341 struct emac_softc *sc = device_private(self); 342 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 343 struct mii_data *mii = &sc->sc_mii; 344 const char * xname = device_xname(self); 345 bus_dma_segment_t seg; 346 int error, i, nseg, opb_freq, opbc, mii_phy = MII_PHY_ANY; 347 uint8_t enaddr[ETHER_ADDR_LEN]; 348 349 bus_space_map(oaa->opb_bt, oaa->opb_addr, EMAC_NREG, 0, &sc->sc_sh); 350 351 sc->sc_dev = self; 352 sc->sc_instance = oaa->opb_instance; 353 sc->sc_st = oaa->opb_bt; 354 sc->sc_dmat = oaa->opb_dmat; 355 356 callout_init(&sc->sc_callout, 0); 357 358 aprint_naive("\n"); 359 aprint_normal(": Ethernet Media Access Controller\n"); 360 361 /* Fetch the Ethernet address. */ 362 if (! ether_getaddr(self, enaddr)) { 363 aprint_error_dev(self, "unable to get mac-address\n"); 364 return; 365 } 366 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr)); 367 368 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY) 369 /* Fetch the MII offset. */ 370 prop_dictionary_get_uint32(device_properties(self), 371 "mii-phy", &mii_phy); 372 373 #ifdef EMAC_ZMII_PHY 374 if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_ZMII) 375 zmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable, 376 &sc->sc_rmii_disable, &sc->sc_rmii_speed); 377 #endif 378 #ifdef EMAC_RGMII_PHY 379 if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_RGMII) 380 rgmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable, 381 &sc->sc_rmii_disable, &sc->sc_rmii_speed); 382 #endif 383 #endif 384 385 /* 386 * Allocate the control data structures, and create and load the 387 * DMA map for it. 388 */ 389 if ((error = bus_dmamem_alloc(sc->sc_dmat, 390 sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) { 391 aprint_error_dev(self, 392 "unable to allocate control data, error = %d\n", error); 393 goto fail_0; 394 } 395 396 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 397 sizeof(struct emac_control_data), (void **)&sc->sc_control_data, 398 BUS_DMA_COHERENT)) != 0) { 399 aprint_error_dev(self, 400 "unable to map control data, error = %d\n", error); 401 goto fail_1; 402 } 403 404 if ((error = bus_dmamap_create(sc->sc_dmat, 405 sizeof(struct emac_control_data), 1, 406 sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 407 aprint_error_dev(self, 408 "unable to create control data DMA map, error = %d\n", 409 error); 410 goto fail_2; 411 } 412 413 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 414 sc->sc_control_data, sizeof(struct emac_control_data), NULL, 415 0)) != 0) { 416 aprint_error_dev(self, 417 "unable to load control data DMA map, error = %d\n", error); 418 goto fail_3; 419 } 420 421 /* 422 * Create the transmit buffer DMA maps. 423 */ 424 for (i = 0; i < EMAC_TXQUEUELEN; i++) { 425 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 426 EMAC_NTXSEGS, MCLBYTES, 0, 0, 427 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 428 aprint_error_dev(self, 429 "unable to create tx DMA map %d, error = %d\n", 430 i, error); 431 goto fail_4; 432 } 433 } 434 435 /* 436 * Create the receive buffer DMA maps. 437 */ 438 for (i = 0; i < EMAC_NRXDESC; i++) { 439 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 440 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 441 aprint_error_dev(self, 442 "unable to create rx DMA map %d, error = %d\n", 443 i, error); 444 goto fail_5; 445 } 446 sc->sc_rxsoft[i].rxs_mbuf = NULL; 447 } 448 449 /* Soft Reset the EMAC. The chip to a known state. */ 450 emac_soft_reset(sc); 451 452 opb_freq = opb_get_frequency(); 453 switch (opb_freq) { 454 case 33333333: opbc = STACR_OPBC_33MHZ; break; 455 case 50000000: opbc = STACR_OPBC_50MHZ; break; 456 case 66666666: opbc = STACR_OPBC_66MHZ; break; 457 case 83333333: opbc = STACR_OPBC_83MHZ; break; 458 case 100000000: opbc = STACR_OPBC_100MHZ; break; 459 460 default: 461 if (opb_freq > 100000000) { 462 opbc = STACR_OPBC_A100MHZ; 463 break; 464 } 465 aprint_error_dev(self, "unsupported OPB frequency %dMHz\n", 466 opb_freq / 1000 / 1000); 467 goto fail_5; 468 } 469 if (oaa->opb_flags & OPB_FLAGS_EMAC_GBE) { 470 sc->sc_mr1 = 471 MR1_RFS_GBE(MR1__FS_16KB) | 472 MR1_TFS_GBE(MR1__FS_16KB) | 473 MR1_TR0_MULTIPLE | 474 MR1_OBCI(opbc); 475 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 476 477 if (oaa->opb_flags & OPB_FLAGS_EMAC_STACV2) { 478 sc->sc_stacr_read = STACR_STAOPC_READ; 479 sc->sc_stacr_write = STACR_STAOPC_WRITE; 480 sc->sc_stacr_bits = STACR_OC; 481 sc->sc_stacr_completed = false; 482 } else { 483 sc->sc_stacr_read = STACR_READ; 484 sc->sc_stacr_write = STACR_WRITE; 485 sc->sc_stacr_completed = true; 486 } 487 } else { 488 /* 489 * Set up Mode Register 1 - set receive and transmit FIFOs to 490 * maximum size, allow transmit of multiple packets (only 491 * channel 0 is used). 492 * 493 * XXX: Allow pause packets?? 494 */ 495 sc->sc_mr1 = 496 MR1_RFS(MR1__FS_4KB) | 497 MR1_TFS(MR1__FS_2KB) | 498 MR1_TR0_MULTIPLE; 499 500 sc->sc_stacr_read = STACR_READ; 501 sc->sc_stacr_write = STACR_WRITE; 502 sc->sc_stacr_bits = STACR_OPBC(opbc); 503 sc->sc_stacr_completed = true; 504 } 505 506 intr_establish_xname(oaa->opb_irq, IST_LEVEL, IPL_NET, emac_intr, sc, 507 device_xname(self)); 508 mal_intr_establish(sc->sc_instance, sc); 509 510 if (oaa->opb_flags & OPB_FLAGS_EMAC_HT256) 511 sc->sc_htsize = 256; 512 else 513 sc->sc_htsize = 64; 514 515 /* Clear all interrupts */ 516 EMAC_WRITE(sc, EMAC_ISR, ISR_ALL); 517 518 /* 519 * Initialise the media structures. 520 */ 521 mii->mii_ifp = ifp; 522 mii->mii_readreg = emac_mii_readreg; 523 mii->mii_writereg = emac_mii_writereg; 524 mii->mii_statchg = emac_mii_statchg; 525 526 sc->sc_ethercom.ec_mii = mii; 527 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 528 mii_attach(self, mii, 0xffffffff, mii_phy, MII_OFFSET_ANY, 529 MIIF_DOPAUSE); 530 if (LIST_FIRST(&mii->mii_phys) == NULL) { 531 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 532 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 533 } else 534 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 535 536 ifp = &sc->sc_ethercom.ec_if; 537 strcpy(ifp->if_xname, xname); 538 ifp->if_softc = sc; 539 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 540 ifp->if_start = emac_start; 541 ifp->if_ioctl = emac_ioctl; 542 ifp->if_init = emac_init; 543 ifp->if_stop = emac_stop; 544 ifp->if_watchdog = emac_watchdog; 545 IFQ_SET_READY(&ifp->if_snd); 546 547 /* 548 * We can support 802.1Q VLAN-sized frames. 549 */ 550 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 551 552 /* 553 * Attach the interface. 554 */ 555 if_attach(ifp); 556 if_deferred_start_init(ifp, NULL); 557 ether_ifattach(ifp, enaddr); 558 559 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 560 RND_FLAG_DEFAULT); 561 562 #ifdef EMAC_EVENT_COUNTERS 563 /* 564 * Attach the event counters. 565 */ 566 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, 567 NULL, xname, "txintr"); 568 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 569 NULL, xname, "rxintr"); 570 evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR, 571 NULL, xname, "txde"); 572 evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR, 573 NULL, xname, "rxde"); 574 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 575 NULL, xname, "intr"); 576 577 evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC, 578 NULL, xname, "txreap"); 579 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 580 NULL, xname, "txsstall"); 581 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 582 NULL, xname, "txdstall"); 583 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 584 NULL, xname, "txdrop"); 585 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 586 NULL, xname, "tu"); 587 #endif /* EMAC_EVENT_COUNTERS */ 588 589 /* 590 * Make sure the interface is shutdown during reboot. 591 */ 592 sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc); 593 if (sc->sc_sdhook == NULL) 594 aprint_error_dev(self, 595 "WARNING: unable to establish shutdown hook\n"); 596 597 return; 598 599 /* 600 * Free any resources we've allocated during the failed attach 601 * attempt. Do this in reverse order and fall through. 602 */ 603 fail_5: 604 for (i = 0; i < EMAC_NRXDESC; i++) { 605 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 606 bus_dmamap_destroy(sc->sc_dmat, 607 sc->sc_rxsoft[i].rxs_dmamap); 608 } 609 fail_4: 610 for (i = 0; i < EMAC_TXQUEUELEN; i++) { 611 if (sc->sc_txsoft[i].txs_dmamap != NULL) 612 bus_dmamap_destroy(sc->sc_dmat, 613 sc->sc_txsoft[i].txs_dmamap); 614 } 615 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 616 fail_3: 617 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 618 fail_2: 619 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 620 sizeof(struct emac_control_data)); 621 fail_1: 622 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 623 fail_0: 624 return; 625 } 626 627 /* 628 * EMAC General interrupt handler 629 */ 630 static int 631 emac_intr(void *arg) 632 { 633 struct emac_softc *sc = arg; 634 uint32_t status; 635 636 EMAC_EVCNT_INCR(&sc->sc_ev_intr); 637 status = EMAC_READ(sc, EMAC_ISR); 638 639 /* Clear the interrupt status bits. */ 640 EMAC_WRITE(sc, EMAC_ISR, status); 641 642 return 1; 643 } 644 645 static void 646 emac_shutdown(void *arg) 647 { 648 struct emac_softc *sc = arg; 649 650 emac_stop(&sc->sc_ethercom.ec_if, 0); 651 } 652 653 654 /* 655 * ifnet interface functions 656 */ 657 658 static void 659 emac_start(struct ifnet *ifp) 660 { 661 struct emac_softc *sc = ifp->if_softc; 662 struct mbuf *m0; 663 struct emac_txsoft *txs; 664 bus_dmamap_t dmamap; 665 int error, firsttx, nexttx, lasttx, ofree, seg; 666 667 lasttx = 0; /* XXX gcc */ 668 669 if ((ifp->if_flags & IFF_RUNNING) == 0) 670 return; 671 672 /* 673 * Remember the previous number of free descriptors. 674 */ 675 ofree = sc->sc_txfree; 676 677 /* 678 * Loop through the send queue, setting up transmit descriptors 679 * until we drain the queue, or use up all available transmit 680 * descriptors. 681 */ 682 for (;;) { 683 /* Grab a packet off the queue. */ 684 IFQ_POLL(&ifp->if_snd, m0); 685 if (m0 == NULL) 686 break; 687 688 /* 689 * Get a work queue entry. Reclaim used Tx descriptors if 690 * we are running low. 691 */ 692 if (sc->sc_txsfree < EMAC_TXQUEUE_GC) { 693 emac_txreap(sc); 694 if (sc->sc_txsfree == 0) { 695 EMAC_EVCNT_INCR(&sc->sc_ev_txsstall); 696 break; 697 } 698 } 699 700 txs = &sc->sc_txsoft[sc->sc_txsnext]; 701 dmamap = txs->txs_dmamap; 702 703 /* 704 * Load the DMA map. If this fails, the packet either 705 * didn't fit in the allotted number of segments, or we 706 * were short on resources. In this case, we'll copy 707 * and try again. 708 */ 709 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 710 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 711 if (error) { 712 if (error == EFBIG) { 713 EMAC_EVCNT_INCR(&sc->sc_ev_txdrop); 714 aprint_error_ifnet(ifp, 715 "Tx packet consumes too many " 716 "DMA segments, dropping...\n"); 717 IFQ_DEQUEUE(&ifp->if_snd, m0); 718 m_freem(m0); 719 continue; 720 } 721 /* Short on resources, just stop for now. */ 722 break; 723 } 724 725 /* 726 * Ensure we have enough descriptors free to describe 727 * the packet. 728 */ 729 if (dmamap->dm_nsegs > sc->sc_txfree) { 730 /* 731 * Not enough free descriptors to transmit this 732 * packet. We haven't committed anything yet, 733 * so just unload the DMA map, put the packet 734 * back on the queue, and punt. Notify the upper 735 * layer that there are not more slots left. 736 * 737 */ 738 bus_dmamap_unload(sc->sc_dmat, dmamap); 739 EMAC_EVCNT_INCR(&sc->sc_ev_txdstall); 740 break; 741 } 742 743 IFQ_DEQUEUE(&ifp->if_snd, m0); 744 745 /* 746 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 747 */ 748 749 /* Sync the DMA map. */ 750 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 751 BUS_DMASYNC_PREWRITE); 752 753 /* 754 * Store a pointer to the packet so that we can free it 755 * later. 756 */ 757 txs->txs_mbuf = m0; 758 txs->txs_firstdesc = sc->sc_txnext; 759 txs->txs_ndesc = dmamap->dm_nsegs; 760 761 /* 762 * Initialize the transmit descriptor. 763 */ 764 firsttx = sc->sc_txnext; 765 for (nexttx = sc->sc_txnext, seg = 0; 766 seg < dmamap->dm_nsegs; 767 seg++, nexttx = EMAC_NEXTTX(nexttx)) { 768 struct mal_descriptor *txdesc = 769 &sc->sc_txdescs[nexttx]; 770 771 /* 772 * If this is the first descriptor we're 773 * enqueueing, don't set the TX_READY bit just 774 * yet. That could cause a race condition. 775 * We'll do it below. 776 */ 777 txdesc->md_data = dmamap->dm_segs[seg].ds_addr; 778 txdesc->md_data_len = dmamap->dm_segs[seg].ds_len; 779 txdesc->md_stat_ctrl = 780 (txdesc->md_stat_ctrl & MAL_TX_WRAP) | 781 (nexttx == firsttx ? 0 : MAL_TX_READY) | 782 EMAC_TXC_GFCS | EMAC_TXC_GPAD; 783 lasttx = nexttx; 784 } 785 786 /* Set the LAST bit on the last segment. */ 787 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST; 788 789 /* 790 * Set up last segment descriptor to send an interrupt after 791 * that descriptor is transmitted, and bypass existing Tx 792 * descriptor reaping method (for now...). 793 */ 794 sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_INTERRUPT; 795 796 797 txs->txs_lastdesc = lasttx; 798 799 /* Sync the descriptors we're using. */ 800 EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 801 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 802 803 /* 804 * The entire packet chain is set up. Give the 805 * first descriptor to the chip now. 806 */ 807 sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY; 808 EMAC_CDTXSYNC(sc, firsttx, 1, 809 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 810 /* 811 * Tell the EMAC that a new packet is available. 812 */ 813 EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0 | TMR0_TFAE_2); 814 815 /* Advance the tx pointer. */ 816 sc->sc_txfree -= txs->txs_ndesc; 817 sc->sc_txnext = nexttx; 818 819 sc->sc_txsfree--; 820 sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext); 821 822 /* 823 * Pass the packet to any BPF listeners. 824 */ 825 bpf_mtap(ifp, m0, BPF_D_OUT); 826 } 827 828 if (sc->sc_txfree != ofree) 829 /* Set a watchdog timer in case the chip flakes out. */ 830 ifp->if_timer = 5; 831 } 832 833 static int 834 emac_ioctl(struct ifnet *ifp, u_long cmd, void *data) 835 { 836 struct emac_softc *sc = ifp->if_softc; 837 int s, error; 838 839 s = splnet(); 840 841 switch (cmd) { 842 case SIOCSIFMTU: 843 { 844 struct ifreq *ifr = (struct ifreq *)data; 845 int maxmtu; 846 847 if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) 848 maxmtu = EMAC_MAX_MTU; 849 else 850 maxmtu = ETHERMTU; 851 852 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > maxmtu) 853 error = EINVAL; 854 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET) 855 break; 856 else if (ifp->if_flags & IFF_UP) 857 error = emac_init(ifp); 858 else 859 error = 0; 860 break; 861 } 862 863 default: 864 error = ether_ioctl(ifp, cmd, data); 865 if (error == ENETRESET) { 866 /* 867 * Multicast list has changed; set the hardware filter 868 * accordingly. 869 */ 870 if (ifp->if_flags & IFF_RUNNING) 871 error = emac_set_filter(sc); 872 else 873 error = 0; 874 } 875 } 876 877 /* try to get more packets going */ 878 emac_start(ifp); 879 880 splx(s); 881 return error; 882 } 883 884 static int 885 emac_init(struct ifnet *ifp) 886 { 887 struct emac_softc *sc = ifp->if_softc; 888 struct emac_rxsoft *rxs; 889 const uint8_t *enaddr = CLLADDR(ifp->if_sadl); 890 int error, i; 891 892 error = 0; 893 894 /* Cancel any pending I/O. */ 895 emac_stop(ifp, 0); 896 897 /* Reset the chip to a known state. */ 898 emac_soft_reset(sc); 899 900 /* 901 * Initialise the transmit descriptor ring. 902 */ 903 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 904 /* set wrap on last descriptor */ 905 sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP; 906 EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC, 907 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 908 sc->sc_txfree = EMAC_NTXDESC; 909 sc->sc_txnext = 0; 910 911 /* 912 * Initialise the transmit job descriptors. 913 */ 914 for (i = 0; i < EMAC_TXQUEUELEN; i++) 915 sc->sc_txsoft[i].txs_mbuf = NULL; 916 sc->sc_txsfree = EMAC_TXQUEUELEN; 917 sc->sc_txsnext = 0; 918 sc->sc_txsdirty = 0; 919 920 /* 921 * Initialise the receiver descriptor and receive job 922 * descriptor rings. 923 */ 924 for (i = 0; i < EMAC_NRXDESC; i++) { 925 rxs = &sc->sc_rxsoft[i]; 926 if (rxs->rxs_mbuf == NULL) { 927 if ((error = emac_add_rxbuf(sc, i)) != 0) { 928 aprint_error_ifnet(ifp, 929 "unable to allocate or map rx buffer %d," 930 " error = %d\n", 931 i, error); 932 /* 933 * XXX Should attempt to run with fewer receive 934 * XXX buffers instead of just failing. 935 */ 936 emac_rxdrain(sc); 937 goto out; 938 } 939 } else 940 EMAC_INIT_RXDESC(sc, i); 941 } 942 sc->sc_rxptr = 0; 943 944 /* 945 * Set the current media. 946 */ 947 if ((error = ether_mediachange(ifp)) != 0) 948 goto out; 949 950 /* 951 * Load the MAC address. 952 */ 953 EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]); 954 EMAC_WRITE(sc, EMAC_IALR, 955 enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]); 956 957 /* Enable the transmit and receive channel on the MAL. */ 958 error = mal_start(sc->sc_instance, 959 EMAC_CDTXADDR(sc, 0), EMAC_CDRXADDR(sc, 0)); 960 if (error) 961 goto out; 962 963 sc->sc_mr1 &= ~MR1_JPSM; 964 if (ifp->if_mtu > ETHERMTU) 965 /* Enable Jumbo Packet Support Mode */ 966 sc->sc_mr1 |= MR1_JPSM; 967 968 /* Set fifos, media modes. */ 969 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1); 970 971 /* 972 * Enable Individual and (possibly) Broadcast Address modes, 973 * runt packets, and strip padding. 974 */ 975 EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP | RMR_TFAE_2 | 976 (ifp->if_flags & IFF_PROMISC ? RMR_PME : 0) | 977 (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0)); 978 979 /* 980 * Set multicast filter. 981 */ 982 emac_set_filter(sc); 983 984 /* 985 * Set low- and urgent-priority request thresholds. 986 */ 987 EMAC_WRITE(sc, EMAC_TMR1, 988 ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */ 989 ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK)); 990 /* 991 * Set Transmit Request Threshold Register. 992 */ 993 EMAC_WRITE(sc, EMAC_TRTR, TRTR_256); 994 995 /* 996 * Set high and low receive watermarks. 997 */ 998 EMAC_WRITE(sc, EMAC_RWMR, 999 30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT); 1000 1001 /* 1002 * Set frame gap. 1003 */ 1004 EMAC_WRITE(sc, EMAC_IPGVR, 8); 1005 1006 /* 1007 * Set interrupt status enable bits for EMAC. 1008 */ 1009 EMAC_WRITE(sc, EMAC_ISER, 1010 ISR_TXPE | /* TX Parity Error */ 1011 ISR_RXPE | /* RX Parity Error */ 1012 ISR_TXUE | /* TX Underrun Event */ 1013 ISR_RXOE | /* RX Overrun Event */ 1014 ISR_OVR | /* Overrun Error */ 1015 ISR_PP | /* Pause Packet */ 1016 ISR_BP | /* Bad Packet */ 1017 ISR_RP | /* Runt Packet */ 1018 ISR_SE | /* Short Event */ 1019 ISR_ALE | /* Alignment Error */ 1020 ISR_BFCS | /* Bad FCS */ 1021 ISR_PTLE | /* Packet Too Long Error */ 1022 ISR_ORE | /* Out of Range Error */ 1023 ISR_IRE | /* In Range Error */ 1024 ISR_SE0 | /* Signal Quality Error 0 (SQE) */ 1025 ISR_TE0 | /* Transmit Error 0 */ 1026 ISR_MOS | /* MMA Operation Succeeded */ 1027 ISR_MOF); /* MMA Operation Failed */ 1028 1029 /* 1030 * Enable the transmit and receive channel on the EMAC. 1031 */ 1032 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE); 1033 1034 /* 1035 * Start the one second MII clock. 1036 */ 1037 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc); 1038 1039 /* 1040 * ... all done! 1041 */ 1042 ifp->if_flags |= IFF_RUNNING; 1043 1044 out: 1045 if (error) { 1046 ifp->if_flags &= ~IFF_RUNNING; 1047 ifp->if_timer = 0; 1048 aprint_error_ifnet(ifp, "interface not running\n"); 1049 } 1050 return error; 1051 } 1052 1053 static void 1054 emac_stop(struct ifnet *ifp, int disable) 1055 { 1056 struct emac_softc *sc = ifp->if_softc; 1057 struct emac_txsoft *txs; 1058 int i; 1059 1060 /* Stop the one second clock. */ 1061 callout_stop(&sc->sc_callout); 1062 1063 /* Down the MII */ 1064 mii_down(&sc->sc_mii); 1065 1066 /* Disable interrupts. */ 1067 EMAC_WRITE(sc, EMAC_ISER, 0); 1068 1069 /* Disable the receive and transmit channels. */ 1070 mal_stop(sc->sc_instance); 1071 1072 /* Disable the transmit enable and receive MACs. */ 1073 EMAC_WRITE(sc, EMAC_MR0, 1074 EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE)); 1075 1076 /* Release any queued transmit buffers. */ 1077 for (i = 0; i < EMAC_TXQUEUELEN; i++) { 1078 txs = &sc->sc_txsoft[i]; 1079 if (txs->txs_mbuf != NULL) { 1080 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1081 m_freem(txs->txs_mbuf); 1082 txs->txs_mbuf = NULL; 1083 } 1084 } 1085 1086 if (disable) 1087 emac_rxdrain(sc); 1088 1089 /* 1090 * Mark the interface down and cancel the watchdog timer. 1091 */ 1092 ifp->if_flags &= ~IFF_RUNNING; 1093 ifp->if_timer = 0; 1094 } 1095 1096 static void 1097 emac_watchdog(struct ifnet *ifp) 1098 { 1099 struct emac_softc *sc = ifp->if_softc; 1100 1101 /* 1102 * Since we're not interrupting every packet, sweep 1103 * up before we report an error. 1104 */ 1105 emac_txreap(sc); 1106 1107 if (sc->sc_txfree != EMAC_NTXDESC) { 1108 aprint_error_ifnet(ifp, 1109 "device timeout (txfree %d txsfree %d txnext %d)\n", 1110 sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext); 1111 if_statinc(ifp, if_oerrors); 1112 1113 /* Reset the interface. */ 1114 (void)emac_init(ifp); 1115 } else if (ifp->if_flags & IFF_DEBUG) 1116 aprint_error_ifnet(ifp, "recovered from device timeout\n"); 1117 1118 /* try to get more packets going */ 1119 emac_start(ifp); 1120 } 1121 1122 static int 1123 emac_add_rxbuf(struct emac_softc *sc, int idx) 1124 { 1125 struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1126 struct mbuf *m; 1127 int error; 1128 1129 MGETHDR(m, M_DONTWAIT, MT_DATA); 1130 if (m == NULL) 1131 return ENOBUFS; 1132 1133 MCLGET(m, M_DONTWAIT); 1134 if ((m->m_flags & M_EXT) == 0) { 1135 m_freem(m); 1136 return ENOBUFS; 1137 } 1138 1139 if (rxs->rxs_mbuf != NULL) 1140 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1141 1142 rxs->rxs_mbuf = m; 1143 1144 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1145 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1146 if (error) { 1147 aprint_error_dev(sc->sc_dev, 1148 "can't load rx DMA map %d, error = %d\n", idx, error); 1149 panic("emac_add_rxbuf"); /* XXX */ 1150 } 1151 1152 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1153 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1154 1155 EMAC_INIT_RXDESC(sc, idx); 1156 1157 return 0; 1158 } 1159 1160 static void 1161 emac_rxdrain(struct emac_softc *sc) 1162 { 1163 struct emac_rxsoft *rxs; 1164 int i; 1165 1166 for (i = 0; i < EMAC_NRXDESC; i++) { 1167 rxs = &sc->sc_rxsoft[i]; 1168 if (rxs->rxs_mbuf != NULL) { 1169 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1170 m_freem(rxs->rxs_mbuf); 1171 rxs->rxs_mbuf = NULL; 1172 } 1173 } 1174 } 1175 1176 static int 1177 emac_set_filter(struct emac_softc *sc) 1178 { 1179 struct ethercom *ec = &sc->sc_ethercom; 1180 struct ether_multistep step; 1181 struct ether_multi *enm; 1182 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1183 uint32_t rmr, crc, mask, tmp, reg, gaht[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 1184 int regs, cnt = 0, i; 1185 1186 if (sc->sc_htsize == 256) { 1187 reg = EMAC_GAHT256(0); 1188 regs = 8; 1189 } else { 1190 reg = EMAC_GAHT64(0); 1191 regs = 4; 1192 } 1193 mask = (1ULL << (sc->sc_htsize / regs)) - 1; 1194 1195 rmr = EMAC_READ(sc, EMAC_RMR); 1196 rmr &= ~(RMR_PMME | RMR_MAE); 1197 ifp->if_flags &= ~IFF_ALLMULTI; 1198 1199 ETHER_LOCK(ec); 1200 ETHER_FIRST_MULTI(step, ec, enm); 1201 while (enm != NULL) { 1202 if (memcmp(enm->enm_addrlo, 1203 enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1204 /* 1205 * We must listen to a range of multicast addresses. 1206 * For now, just accept all multicasts, rather than 1207 * trying to set only those filter bits needed to match 1208 * the range. (At this time, the only use of address 1209 * ranges is for IP multicast routing, for which the 1210 * range is big enough to require all bits set.) 1211 */ 1212 gaht[0] = gaht[1] = gaht[2] = gaht[3] = 1213 gaht[4] = gaht[5] = gaht[6] = gaht[7] = mask; 1214 break; 1215 } 1216 1217 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1218 1219 if (sc->sc_htsize == 256) 1220 EMAC_SET_FILTER256(gaht, crc); 1221 else 1222 EMAC_SET_FILTER(gaht, crc); 1223 1224 ETHER_NEXT_MULTI(step, enm); 1225 cnt++; 1226 } 1227 ETHER_UNLOCK(ec); 1228 1229 for (i = 1, tmp = gaht[0]; i < regs; i++) 1230 tmp &= gaht[i]; 1231 if (tmp == mask) { 1232 /* All categories are true. */ 1233 ifp->if_flags |= IFF_ALLMULTI; 1234 rmr |= RMR_PMME; 1235 } else if (cnt != 0) { 1236 /* Some categories are true. */ 1237 for (i = 0; i < regs; i++) 1238 EMAC_WRITE(sc, reg + (i << 2), gaht[i]); 1239 rmr |= RMR_MAE; 1240 } 1241 EMAC_WRITE(sc, EMAC_RMR, rmr); 1242 1243 return 0; 1244 } 1245 1246 /* 1247 * Reap completed Tx descriptors. 1248 */ 1249 static int 1250 emac_txreap(struct emac_softc *sc) 1251 { 1252 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1253 struct emac_txsoft *txs; 1254 int handled, i; 1255 uint32_t txstat, count; 1256 1257 EMAC_EVCNT_INCR(&sc->sc_ev_txreap); 1258 handled = 0; 1259 1260 count = 0; 1261 /* 1262 * Go through our Tx list and free mbufs for those 1263 * frames that have been transmitted. 1264 */ 1265 for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN; 1266 i = EMAC_NEXTTXS(i), sc->sc_txsfree++) { 1267 txs = &sc->sc_txsoft[i]; 1268 1269 EMAC_CDTXSYNC(sc, txs->txs_lastdesc, 1270 txs->txs_dmamap->dm_nsegs, 1271 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1272 1273 txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl; 1274 if (txstat & MAL_TX_READY) 1275 break; 1276 1277 handled = 1; 1278 1279 /* 1280 * Check for errors and collisions. 1281 */ 1282 if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED)) 1283 if_statinc(ifp, if_oerrors); 1284 1285 #ifdef EMAC_EVENT_COUNTERS 1286 if (txstat & EMAC_TXS_UR) 1287 EMAC_EVCNT_INCR(&sc->sc_ev_tu); 1288 #endif /* EMAC_EVENT_COUNTERS */ 1289 1290 if (txstat & 1291 (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) { 1292 if (txstat & EMAC_TXS_EC) 1293 if_statadd(ifp, if_collisions, 16); 1294 else if (txstat & EMAC_TXS_MC) 1295 if_statadd(ifp, if_collisions, 2); /* XXX? */ 1296 else if (txstat & EMAC_TXS_SC) 1297 if_statinc(ifp, if_collisions); 1298 if (txstat & EMAC_TXS_LC) 1299 if_statinc(ifp, if_collisions); 1300 } else 1301 if_statinc(ifp, if_opackets); 1302 1303 if (ifp->if_flags & IFF_DEBUG) { 1304 if (txstat & EMAC_TXS_ED) 1305 aprint_error_ifnet(ifp, "excessive deferral\n"); 1306 if (txstat & EMAC_TXS_EC) 1307 aprint_error_ifnet(ifp, 1308 "excessive collisions\n"); 1309 } 1310 1311 sc->sc_txfree += txs->txs_ndesc; 1312 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1313 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1314 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1315 m_freem(txs->txs_mbuf); 1316 txs->txs_mbuf = NULL; 1317 1318 count++; 1319 } 1320 1321 /* Update the dirty transmit buffer pointer. */ 1322 sc->sc_txsdirty = i; 1323 1324 /* 1325 * If there are no more pending transmissions, cancel the watchdog 1326 * timer. 1327 */ 1328 if (sc->sc_txsfree == EMAC_TXQUEUELEN) 1329 ifp->if_timer = 0; 1330 1331 if (count != 0) 1332 rnd_add_uint32(&sc->rnd_source, count); 1333 1334 return handled; 1335 } 1336 1337 1338 /* 1339 * Reset functions 1340 */ 1341 1342 static void 1343 emac_soft_reset(struct emac_softc *sc) 1344 { 1345 uint32_t sdr; 1346 int t = 0; 1347 1348 /* 1349 * The PHY must provide a TX Clk in order perform a soft reset the 1350 * EMAC. If none is present, select the internal clock, 1351 * SDR0_MFR[E0CS, E1CS]. After the soft reset, select the external 1352 * clock. 1353 */ 1354 1355 sdr = mfsdr(DCR_SDR0_MFR); 1356 sdr |= SDR0_MFR_ECS(sc->sc_instance); 1357 mtsdr(DCR_SDR0_MFR, sdr); 1358 1359 EMAC_WRITE(sc, EMAC_MR0, MR0_SRST); 1360 1361 sdr = mfsdr(DCR_SDR0_MFR); 1362 sdr &= ~SDR0_MFR_ECS(sc->sc_instance); 1363 mtsdr(DCR_SDR0_MFR, sdr); 1364 1365 delay(5); 1366 1367 /* wait finish */ 1368 while (EMAC_READ(sc, EMAC_MR0) & MR0_SRST) { 1369 if (++t == 1000000 /* 1sec XXXXX */) { 1370 aprint_error_dev(sc->sc_dev, "Soft Reset failed\n"); 1371 return; 1372 } 1373 delay(1); 1374 } 1375 } 1376 1377 static void 1378 emac_smart_reset(struct emac_softc *sc) 1379 { 1380 uint32_t mr0; 1381 int t = 0; 1382 1383 mr0 = EMAC_READ(sc, EMAC_MR0); 1384 if (mr0 & (MR0_TXE | MR0_RXE)) { 1385 mr0 &= ~(MR0_TXE | MR0_RXE); 1386 EMAC_WRITE(sc, EMAC_MR0, mr0); 1387 1388 /* wait idel state */ 1389 while ((EMAC_READ(sc, EMAC_MR0) & (MR0_TXI | MR0_RXI)) != 1390 (MR0_TXI | MR0_RXI)) { 1391 if (++t == 1000000 /* 1sec XXXXX */) { 1392 aprint_error_dev(sc->sc_dev, 1393 "Smart Reset failed\n"); 1394 return; 1395 } 1396 delay(1); 1397 } 1398 } 1399 } 1400 1401 1402 /* 1403 * MII related functions 1404 */ 1405 1406 static int 1407 emac_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 1408 { 1409 struct emac_softc *sc = device_private(self); 1410 uint32_t sta_reg; 1411 int rv; 1412 1413 if (sc->sc_rmii_enable) 1414 sc->sc_rmii_enable(device_parent(self), sc->sc_instance); 1415 1416 /* wait for PHY data transfer to complete */ 1417 if ((rv = emac_mii_wait(sc)) != 0) 1418 goto fail; 1419 1420 sta_reg = 1421 sc->sc_stacr_read | 1422 (reg << STACR_PRA_SHIFT) | 1423 (phy << STACR_PCDA_SHIFT) | 1424 sc->sc_stacr_bits; 1425 EMAC_WRITE(sc, EMAC_STACR, sta_reg); 1426 1427 if ((rv = emac_mii_wait(sc)) != 0) 1428 goto fail; 1429 sta_reg = EMAC_READ(sc, EMAC_STACR); 1430 1431 if (sta_reg & STACR_PHYE) { 1432 rv = -1; 1433 goto fail; 1434 } 1435 *val = sta_reg >> STACR_PHYD_SHIFT; 1436 1437 fail: 1438 if (sc->sc_rmii_disable) 1439 sc->sc_rmii_disable(device_parent(self), sc->sc_instance); 1440 return rv; 1441 } 1442 1443 static int 1444 emac_mii_writereg(device_t self, int phy, int reg, uint16_t val) 1445 { 1446 struct emac_softc *sc = device_private(self); 1447 uint32_t sta_reg; 1448 int rv; 1449 1450 if (sc->sc_rmii_enable) 1451 sc->sc_rmii_enable(device_parent(self), sc->sc_instance); 1452 1453 /* wait for PHY data transfer to complete */ 1454 if ((rv = emac_mii_wait(sc)) != 0) 1455 goto out; 1456 1457 sta_reg = 1458 (val << STACR_PHYD_SHIFT) | 1459 sc->sc_stacr_write | 1460 (reg << STACR_PRA_SHIFT) | 1461 (phy << STACR_PCDA_SHIFT) | 1462 sc->sc_stacr_bits; 1463 EMAC_WRITE(sc, EMAC_STACR, sta_reg); 1464 1465 if ((rv = emac_mii_wait(sc)) != 0) 1466 goto out; 1467 if (EMAC_READ(sc, EMAC_STACR) & STACR_PHYE) { 1468 aprint_error_dev(sc->sc_dev, "MII PHY Error\n"); 1469 rv = -1; 1470 } 1471 1472 out: 1473 if (sc->sc_rmii_disable) 1474 sc->sc_rmii_disable(device_parent(self), sc->sc_instance); 1475 1476 return rv; 1477 } 1478 1479 static void 1480 emac_mii_statchg(struct ifnet *ifp) 1481 { 1482 struct emac_softc *sc = ifp->if_softc; 1483 struct mii_data *mii = &sc->sc_mii; 1484 1485 /* 1486 * MR1 can only be written immediately after a reset... 1487 */ 1488 emac_smart_reset(sc); 1489 1490 sc->sc_mr1 &= ~(MR1_FDE | MR1_ILE | MR1_EIFC | MR1_MF_MASK | MR1_IST); 1491 if (mii->mii_media_active & IFM_FDX) 1492 sc->sc_mr1 |= (MR1_FDE | MR1_EIFC | MR1_IST); 1493 if (mii->mii_media_active & IFM_FLOW) 1494 sc->sc_mr1 |= MR1_EIFC; 1495 if (mii->mii_media_active & IFM_LOOP) 1496 sc->sc_mr1 |= MR1_ILE; 1497 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1498 case IFM_1000_T: 1499 sc->sc_mr1 |= (MR1_MF_1000MBS | MR1_IST); 1500 break; 1501 1502 case IFM_100_TX: 1503 sc->sc_mr1 |= (MR1_MF_100MBS | MR1_IST); 1504 break; 1505 1506 case IFM_10_T: 1507 sc->sc_mr1 |= MR1_MF_10MBS; 1508 break; 1509 1510 case IFM_NONE: 1511 break; 1512 1513 default: 1514 aprint_error_dev(sc->sc_dev, "unknown sub-type %d\n", 1515 IFM_SUBTYPE(mii->mii_media_active)); 1516 break; 1517 } 1518 if (sc->sc_rmii_speed) 1519 sc->sc_rmii_speed(device_parent(sc->sc_dev), sc->sc_instance, 1520 IFM_SUBTYPE(mii->mii_media_active)); 1521 1522 EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1); 1523 1524 /* Enable TX and RX if already RUNNING */ 1525 if (ifp->if_flags & IFF_RUNNING) 1526 EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE); 1527 } 1528 1529 static uint32_t 1530 emac_mii_wait(struct emac_softc *sc) 1531 { 1532 int i; 1533 uint32_t oc; 1534 1535 /* wait for PHY data transfer to complete */ 1536 i = 0; 1537 oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC; 1538 while ((oc == STACR_OC) != sc->sc_stacr_completed) { 1539 delay(7); 1540 if (i++ > 5) { 1541 aprint_error_dev(sc->sc_dev, "MII timed out\n"); 1542 return ETIMEDOUT; 1543 } 1544 oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC; 1545 } 1546 return 0; 1547 } 1548 1549 static void 1550 emac_mii_tick(void *arg) 1551 { 1552 struct emac_softc *sc = arg; 1553 int s; 1554 1555 if (!device_is_active(sc->sc_dev)) 1556 return; 1557 1558 s = splnet(); 1559 mii_tick(&sc->sc_mii); 1560 splx(s); 1561 1562 callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc); 1563 } 1564 1565 int 1566 emac_txeob_intr(void *arg) 1567 { 1568 struct emac_softc *sc = arg; 1569 int handled = 0; 1570 1571 EMAC_EVCNT_INCR(&sc->sc_ev_txintr); 1572 handled |= emac_txreap(sc); 1573 1574 /* try to get more packets going */ 1575 if_schedule_deferred_start(&sc->sc_ethercom.ec_if); 1576 1577 return handled; 1578 } 1579 1580 int 1581 emac_rxeob_intr(void *arg) 1582 { 1583 struct emac_softc *sc = arg; 1584 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1585 struct emac_rxsoft *rxs; 1586 struct mbuf *m; 1587 uint32_t rxstat, count; 1588 int i, len; 1589 1590 EMAC_EVCNT_INCR(&sc->sc_ev_rxintr); 1591 1592 count = 0; 1593 for (i = sc->sc_rxptr; ; i = EMAC_NEXTRX(i)) { 1594 rxs = &sc->sc_rxsoft[i]; 1595 1596 EMAC_CDRXSYNC(sc, i, 1597 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1598 1599 rxstat = sc->sc_rxdescs[i].md_stat_ctrl; 1600 1601 if (rxstat & MAL_RX_EMPTY) { 1602 /* 1603 * We have processed all of the receive buffers. 1604 */ 1605 /* Flush current empty descriptor */ 1606 EMAC_CDRXSYNC(sc, i, 1607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1608 break; 1609 } 1610 1611 /* 1612 * If an error occurred, update stats, clear the status 1613 * word, and leave the packet buffer in place. It will 1614 * simply be reused the next time the ring comes around. 1615 */ 1616 if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE | 1617 EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE | 1618 EMAC_RXS_IRE)) { 1619 #define PRINTERR(bit, str) \ 1620 if (rxstat & (bit)) \ 1621 aprint_error_ifnet(ifp, \ 1622 "receive error: %s\n", str) 1623 if_statinc(ifp, if_ierrors); 1624 PRINTERR(EMAC_RXS_OE, "overrun error"); 1625 PRINTERR(EMAC_RXS_BP, "bad packet"); 1626 PRINTERR(EMAC_RXS_RP, "runt packet"); 1627 PRINTERR(EMAC_RXS_SE, "short event"); 1628 PRINTERR(EMAC_RXS_AE, "alignment error"); 1629 PRINTERR(EMAC_RXS_BFCS, "bad FCS"); 1630 PRINTERR(EMAC_RXS_PTL, "packet too long"); 1631 PRINTERR(EMAC_RXS_ORE, "out of range error"); 1632 PRINTERR(EMAC_RXS_IRE, "in range error"); 1633 #undef PRINTERR 1634 EMAC_INIT_RXDESC(sc, i); 1635 continue; 1636 } 1637 1638 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1639 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1640 1641 /* 1642 * No errors; receive the packet. Note, the 405GP emac 1643 * includes the CRC with every packet. 1644 */ 1645 len = sc->sc_rxdescs[i].md_data_len - ETHER_CRC_LEN; 1646 1647 /* 1648 * If the packet is small enough to fit in a 1649 * single header mbuf, allocate one and copy 1650 * the data into it. This greatly reduces 1651 * memory consumption when we receive lots 1652 * of small packets. 1653 * 1654 * Otherwise, we add a new buffer to the receive 1655 * chain. If this fails, we drop the packet and 1656 * recycle the old buffer. 1657 */ 1658 if (emac_copy_small != 0 && len <= MHLEN) { 1659 MGETHDR(m, M_DONTWAIT, MT_DATA); 1660 if (m == NULL) 1661 goto dropit; 1662 memcpy(mtod(m, void *), 1663 mtod(rxs->rxs_mbuf, void *), len); 1664 EMAC_INIT_RXDESC(sc, i); 1665 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1666 rxs->rxs_dmamap->dm_mapsize, 1667 BUS_DMASYNC_PREREAD); 1668 } else { 1669 m = rxs->rxs_mbuf; 1670 if (emac_add_rxbuf(sc, i) != 0) { 1671 dropit: 1672 if_statinc(ifp, if_ierrors); 1673 EMAC_INIT_RXDESC(sc, i); 1674 bus_dmamap_sync(sc->sc_dmat, 1675 rxs->rxs_dmamap, 0, 1676 rxs->rxs_dmamap->dm_mapsize, 1677 BUS_DMASYNC_PREREAD); 1678 continue; 1679 } 1680 } 1681 1682 m_set_rcvif(m, ifp); 1683 m->m_pkthdr.len = m->m_len = len; 1684 1685 /* Pass it on. */ 1686 if_percpuq_enqueue(ifp->if_percpuq, m); 1687 1688 count++; 1689 } 1690 1691 /* Update the receive pointer. */ 1692 sc->sc_rxptr = i; 1693 1694 if (count != 0) 1695 rnd_add_uint32(&sc->rnd_source, count); 1696 1697 return 1; 1698 } 1699 1700 int 1701 emac_txde_intr(void *arg) 1702 { 1703 struct emac_softc *sc = arg; 1704 1705 EMAC_EVCNT_INCR(&sc->sc_ev_txde); 1706 aprint_error_dev(sc->sc_dev, "emac_txde_intr\n"); 1707 return 1; 1708 } 1709 1710 int 1711 emac_rxde_intr(void *arg) 1712 { 1713 struct emac_softc *sc = arg; 1714 int i; 1715 1716 EMAC_EVCNT_INCR(&sc->sc_ev_rxde); 1717 aprint_error_dev(sc->sc_dev, "emac_rxde_intr\n"); 1718 /* 1719 * XXX! 1720 * This is a bit drastic; we just drop all descriptors that aren't 1721 * "clean". We should probably send any that are up the stack. 1722 */ 1723 for (i = 0; i < EMAC_NRXDESC; i++) { 1724 EMAC_CDRXSYNC(sc, i, 1725 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1726 1727 if (sc->sc_rxdescs[i].md_data_len != MCLBYTES) 1728 EMAC_INIT_RXDESC(sc, i); 1729 } 1730 1731 return 1; 1732 } 1733