1 /* $NetBSD: if_admsw.c,v 1.32 2025/10/15 01:31:27 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.32 2025/10/15 01:31:27 thorpej Exp $"); 80 81 82 #include <sys/param.h> 83 #include <sys/bus.h> 84 #include <sys/callout.h> 85 #include <sys/device.h> 86 #include <sys/endian.h> 87 #include <sys/errno.h> 88 #include <sys/intr.h> 89 #include <sys/ioctl.h> 90 #include <sys/kernel.h> 91 #include <sys/mbuf.h> 92 #include <sys/socket.h> 93 #include <sys/systm.h> 94 95 #include <prop/proplib.h> 96 97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 98 99 #include <net/if.h> 100 #include <net/if_dl.h> 101 #include <net/if_media.h> 102 #include <net/if_ether.h> 103 #include <net/bpf.h> 104 105 #include <sys/gpio.h> 106 #include <dev/gpio/gpiovar.h> 107 108 #include <mips/adm5120/include/adm5120reg.h> 109 #include <mips/adm5120/include/adm5120var.h> 110 #include <mips/adm5120/include/adm5120_obiovar.h> 111 #include <mips/adm5120/dev/if_admswreg.h> 112 #include <mips/adm5120/dev/if_admswvar.h> 113 114 static uint8_t vlan_matrix[SW_DEVS] = { 115 (1 << 6) | (1 << 0), /* CPU + port0 */ 116 (1 << 6) | (1 << 1), /* CPU + port1 */ 117 (1 << 6) | (1 << 2), /* CPU + port2 */ 118 (1 << 6) | (1 << 3), /* CPU + port3 */ 119 (1 << 6) | (1 << 4), /* CPU + port4 */ 120 (1 << 6) | (1 << 5), /* CPU + port5 */ 121 }; 122 123 #ifdef ADMSW_EVENT_COUNTERS 124 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 125 #else 126 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 127 #endif 128 129 static void admsw_start(struct ifnet *); 130 static void admsw_watchdog(struct ifnet *); 131 static int admsw_ioctl(struct ifnet *, u_long, void *); 132 static int admsw_init(struct ifnet *); 133 static void admsw_stop(struct ifnet *, int); 134 135 static void admsw_shutdown(void *); 136 137 static void admsw_reset(struct admsw_softc *); 138 static void admsw_set_filter(struct admsw_softc *); 139 140 static int admsw_intr(void *); 141 static void admsw_txintr(struct admsw_softc *, int); 142 static void admsw_rxintr(struct admsw_softc *, int); 143 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 144 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 145 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 146 147 static int admsw_mediachange(struct ifnet *); 148 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 149 150 static int admsw_match(device_t, cfdata_t, void *); 151 static void admsw_attach(device_t, device_t, void *); 152 153 CFATTACH_DECL_NEW(admsw, sizeof(struct admsw_softc), 154 admsw_match, admsw_attach, NULL, NULL); 155 156 static int 157 admsw_match(device_t parent, cfdata_t cf, void *aux) 158 { 159 struct obio_attach_args *aa = aux; 160 161 return strcmp(aa->oba_name, cf->cf_name) == 0; 162 } 163 164 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 165 #define REG_WRITE(o, v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 166 167 168 static void 169 admsw_init_bufs(struct admsw_softc *sc) 170 { 171 int i; 172 struct admsw_desc *desc; 173 174 for (i = 0; i < ADMSW_NTXHDESC; i++) { 175 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 176 m_freem(sc->sc_txhsoft[i].ds_mbuf); 177 sc->sc_txhsoft[i].ds_mbuf = NULL; 178 } 179 desc = &sc->sc_txhdescs[i]; 180 desc->data = 0; 181 desc->cntl = 0; 182 desc->len = MAC_BUFLEN; 183 desc->status = 0; 184 ADMSW_CDTXHSYNC(sc, i, 185 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 186 } 187 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 188 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 190 191 for (i = 0; i < ADMSW_NRXHDESC; i++) { 192 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 193 if (admsw_add_rxhbuf(sc, i) != 0) 194 panic("admsw_init_bufs\n"); 195 } else 196 ADMSW_INIT_RXHDESC(sc, i); 197 } 198 199 for (i = 0; i < ADMSW_NTXLDESC; i++) { 200 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 201 m_freem(sc->sc_txlsoft[i].ds_mbuf); 202 sc->sc_txlsoft[i].ds_mbuf = NULL; 203 } 204 desc = &sc->sc_txldescs[i]; 205 desc->data = 0; 206 desc->cntl = 0; 207 desc->len = MAC_BUFLEN; 208 desc->status = 0; 209 ADMSW_CDTXLSYNC(sc, i, 210 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 211 } 212 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 213 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 214 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 215 216 for (i = 0; i < ADMSW_NRXLDESC; i++) { 217 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 218 if (admsw_add_rxlbuf(sc, i) != 0) 219 panic("admsw_init_bufs\n"); 220 } else 221 ADMSW_INIT_RXLDESC(sc, i); 222 } 223 224 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 225 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 226 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 227 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 228 229 sc->sc_txfree = ADMSW_NTXLDESC; 230 sc->sc_txnext = 0; 231 sc->sc_txdirty = 0; 232 sc->sc_rxptr = 0; 233 } 234 235 static void 236 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 237 { 238 uint32_t i; 239 240 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) 241 + (matrix[3] << 24); 242 REG_WRITE(VLAN_G1_REG, i); 243 i = matrix[4] + (matrix[5] << 8); 244 REG_WRITE(VLAN_G2_REG, i); 245 } 246 247 static void 248 admsw_reset(struct admsw_softc *sc) 249 { 250 uint32_t wdog1; 251 int i; 252 253 REG_WRITE(PORT_CONF0_REG, 254 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 255 REG_WRITE(CPUP_CONF_REG, 256 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 257 258 /* Wait for DMA to complete. Overkill. In 3ms, we can 259 * send at least two entire 1500-byte packets at 10 Mb/s. 260 */ 261 DELAY(3000); 262 263 /* The datasheet recommends that we move all PHYs to reset 264 * state prior to software reset. 265 */ 266 REG_WRITE(PHY_CNTL2_REG, 267 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 268 269 /* Reset the switch. */ 270 REG_WRITE(ADMSW_SW_RES, 0x1); 271 272 DELAY(100 * 1000); 273 274 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 275 276 /* begin old code */ 277 REG_WRITE(CPUP_CONF_REG, 278 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 279 CPUP_CONF_DMCP_MASK); 280 281 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 282 283 REG_WRITE(PHY_CNTL2_REG, 284 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | 285 PHY_CNTL2_PHYR_MASK | PHY_CNTL2_AMDIX_MASK); 286 287 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 288 289 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 290 REG_WRITE(ADMSW_INT_ST, INT_MASK); 291 292 /* 293 * While in DDB, we stop servicing interrupts, RX ring 294 * fills up and when free block counter falls behind FC 295 * threshold, the switch starts to emit 802.3x PAUSE 296 * frames. This can upset peer switches. 297 * 298 * Stop this from happening by disabling FC and D2 299 * thresholds. 300 */ 301 REG_WRITE(FC_TH_REG, 302 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 303 304 admsw_setvlan(sc, vlan_matrix); 305 306 for (i = 0; i < SW_DEVS; i++) { 307 REG_WRITE(MAC_WT1_REG, 308 sc->sc_enaddr[2] | 309 (sc->sc_enaddr[3]<<8) | 310 (sc->sc_enaddr[4]<<16) | 311 ((sc->sc_enaddr[5]+i)<<24)); 312 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 313 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 314 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 315 316 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)) 317 ; 318 } 319 wdog1 = REG_READ(ADM5120_WDOG1); 320 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 321 } 322 323 static void 324 admsw_attach(device_t parent, device_t self, void *aux) 325 { 326 struct admsw_softc *sc = device_private(self); 327 struct obio_attach_args *aa = aux; 328 struct ifnet *ifp; 329 bus_dma_segment_t seg; 330 int error, i, rseg; 331 uint8_t enaddr[ETHER_ADDR_LEN]; 332 333 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 334 335 sc->sc_dev = self; 336 sc->sc_dmat = aa->oba_dt; 337 sc->sc_st = aa->oba_st; 338 339 if (! ether_getaddr(self, enaddr)) { 340 enaddr[0] = 0x02; 341 enaddr[1] = 0xaa; 342 enaddr[2] = 0xbb; 343 enaddr[3] = 0xcc; 344 enaddr[4] = 0xdd; 345 enaddr[5] = 0xee; 346 } 347 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 348 349 printf("%s: base Ethernet address %s\n", device_xname(sc->sc_dev), 350 ether_sprintf(enaddr)); 351 352 /* Map the device. */ 353 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 354 printf("%s: unable to map device\n", device_xname(sc->sc_dev)); 355 return; 356 } 357 358 /* Hook up the interrupt handler. */ 359 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 360 361 if (sc->sc_ih == NULL) { 362 printf("%s: unable to register interrupt handler\n", 363 device_xname(sc->sc_dev)); 364 return; 365 } 366 367 /* 368 * Allocate the control data structures, and create and load the 369 * DMA map for it. 370 */ 371 if ((error = bus_dmamem_alloc(sc->sc_dmat, 372 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 373 0)) != 0) { 374 printf("%s: unable to allocate control data, error = %d\n", 375 device_xname(sc->sc_dev), error); 376 return; 377 } 378 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 379 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 380 0)) != 0) { 381 printf("%s: unable to map control data, error = %d\n", 382 device_xname(sc->sc_dev), error); 383 return; 384 } 385 if ((error = bus_dmamap_create(sc->sc_dmat, 386 sizeof(struct admsw_control_data), 1, 387 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 388 printf("%s: unable to create control data DMA map, " 389 "error = %d\n", device_xname(sc->sc_dev), error); 390 return; 391 } 392 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 393 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 394 0)) != 0) { 395 printf("%s: unable to load control data DMA map, error = %d\n", 396 device_xname(sc->sc_dev), error); 397 return; 398 } 399 400 /* 401 * Create the transmit buffer DMA maps. 402 */ 403 for (i = 0; i < ADMSW_NTXHDESC; i++) { 404 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 405 2, MCLBYTES, 0, 0, 406 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 407 printf("%s: unable to create txh DMA map %d, " 408 "error = %d\n", device_xname(sc->sc_dev), i, error); 409 return; 410 } 411 sc->sc_txhsoft[i].ds_mbuf = NULL; 412 } 413 for (i = 0; i < ADMSW_NTXLDESC; i++) { 414 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 415 2, MCLBYTES, 0, 0, 416 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 417 printf("%s: unable to create txl DMA map %d, " 418 "error = %d\n", device_xname(sc->sc_dev), i, error); 419 return; 420 } 421 sc->sc_txlsoft[i].ds_mbuf = NULL; 422 } 423 424 /* 425 * Create the receive buffer DMA maps. 426 */ 427 for (i = 0; i < ADMSW_NRXHDESC; i++) { 428 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 429 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 430 printf("%s: unable to create rxh DMA map %d, " 431 "error = %d\n", device_xname(sc->sc_dev), i, error); 432 return; 433 } 434 sc->sc_rxhsoft[i].ds_mbuf = NULL; 435 } 436 for (i = 0; i < ADMSW_NRXLDESC; i++) { 437 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 438 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 439 printf("%s: unable to create rxl DMA map %d, " 440 "error = %d\n", device_xname(sc->sc_dev), i, error); 441 return; 442 } 443 sc->sc_rxlsoft[i].ds_mbuf = NULL; 444 } 445 446 admsw_init_bufs(sc); 447 448 admsw_reset(sc); 449 450 for (i = 0; i < SW_DEVS; i++) { 451 sc->sc_ethercom[i].ec_ifmedia = &sc->sc_ifmedia[i]; 452 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 453 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 454 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 455 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 456 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 457 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 458 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 459 460 ifp = &sc->sc_ethercom[i].ec_if; 461 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 462 ifp->if_xname[5] += i; 463 ifp->if_softc = sc; 464 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 465 ifp->if_ioctl = admsw_ioctl; 466 ifp->if_start = admsw_start; 467 ifp->if_watchdog = admsw_watchdog; 468 ifp->if_init = admsw_init; 469 ifp->if_stop = admsw_stop; 470 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 471 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ADMSW_NTXLDESC, IFQ_MAXLEN)); 472 IFQ_SET_READY(&ifp->if_snd); 473 474 /* Attach the interface. */ 475 if_attach(ifp); 476 if_deferred_start_init(ifp, NULL); 477 ether_ifattach(ifp, enaddr); 478 enaddr[5]++; 479 } 480 481 #ifdef ADMSW_EVENT_COUNTERS 482 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 483 NULL, device_xname(sc->sc_dev), "txstall"); 484 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 485 NULL, device_xname(sc->sc_dev), "rxstall"); 486 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 487 NULL, device_xname(sc->sc_dev), "txintr"); 488 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 489 NULL, device_xname(sc->sc_dev), "rxintr"); 490 #if 1 491 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 492 NULL, device_xname(sc->sc_dev), "rxsync"); 493 #endif 494 #endif 495 496 admwdog_attach(sc); 497 498 /* Make sure the interface is shutdown during reboot. */ 499 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 500 if (sc->sc_sdhook == NULL) 501 printf("%s: WARNING: unable to establish shutdown hook\n", 502 device_xname(sc->sc_dev)); 503 504 /* leave interrupts and cpu port disabled */ 505 return; 506 } 507 508 509 /* 510 * admsw_shutdown: 511 * 512 * Make sure the interface is stopped at reboot time. 513 */ 514 static void 515 admsw_shutdown(void *arg) 516 { 517 struct admsw_softc *sc = arg; 518 int i; 519 520 for (i = 0; i < SW_DEVS; i++) 521 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 522 } 523 524 /* 525 * admsw_start: [ifnet interface function] 526 * 527 * Start packet transmission on the interface. 528 */ 529 static void 530 admsw_start(struct ifnet *ifp) 531 { 532 struct admsw_softc *sc = ifp->if_softc; 533 struct mbuf *m0, *m; 534 struct admsw_descsoft *ds; 535 struct admsw_desc *desc; 536 bus_dmamap_t dmamap; 537 struct ether_header *eh; 538 int error, nexttx, len, i; 539 static int vlan = 0; 540 541 /* 542 * Loop through the send queues, setting up transmit descriptors 543 * unitl we drain the queues, or use up all available transmit 544 * descriptors. 545 */ 546 for (;;) { 547 vlan++; 548 if (vlan == SW_DEVS) 549 vlan = 0; 550 i = vlan; 551 for (;;) { 552 ifp = &sc->sc_ethercom[i].ec_if; 553 if ((ifp->if_flags & IFF_RUNNING) == 0) 554 continue; 555 /* Grab a packet off the queue. */ 556 IFQ_POLL(&ifp->if_snd, m0); 557 if (m0 != NULL) 558 break; 559 i++; 560 if (i == SW_DEVS) 561 i = 0; 562 if (i == vlan) 563 return; 564 } 565 vlan = i; 566 m = NULL; 567 568 /* Get a spare descriptor. */ 569 if (sc->sc_txfree == 0) { 570 /* No more slots left. */ 571 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 572 break; 573 } 574 nexttx = sc->sc_txnext; 575 desc = &sc->sc_txldescs[nexttx]; 576 ds = &sc->sc_txlsoft[nexttx]; 577 dmamap = ds->ds_dmamap; 578 579 /* 580 * Load the DMA map. If this fails, the packet either 581 * didn't fit in the allotted number of segments, or we 582 * were short on resources. In this case, we'll copy 583 * and try again. 584 */ 585 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 586 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 587 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { 588 MGETHDR(m, M_DONTWAIT, MT_DATA); 589 if (m == NULL) { 590 printf("%s: unable to allocate Tx mbuf\n", 591 device_xname(sc->sc_dev)); 592 break; 593 } 594 if (m0->m_pkthdr.len > MHLEN) { 595 MCLGET(m, M_DONTWAIT); 596 if ((m->m_flags & M_EXT) == 0) { 597 printf("%s: unable to allocate Tx " 598 "cluster\n", device_xname(sc->sc_dev)); 599 m_freem(m); 600 break; 601 } 602 } 603 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 604 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 605 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 606 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 607 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 608 panic("admsw_start: M_TRAILINGSPACE\n"); 609 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 610 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 611 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 612 } 613 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 614 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 615 if (error) { 616 printf("%s: unable to load Tx buffer, error = " 617 "%d\n", device_xname(sc->sc_dev), error); 618 break; 619 } 620 } 621 622 IFQ_DEQUEUE(&ifp->if_snd, m0); 623 if (m != NULL) { 624 m_freem(m0); 625 m0 = m; 626 } 627 628 /* 629 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 630 */ 631 632 /* Sync the DMA map. */ 633 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 634 BUS_DMASYNC_PREWRITE); 635 636 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 637 panic("admsw_start: dm_nsegs == %d\n", 638 dmamap->dm_nsegs); 639 desc->data = dmamap->dm_segs[0].ds_addr; 640 desc->len = len = dmamap->dm_segs[0].ds_len; 641 if (dmamap->dm_nsegs > 1) { 642 len += dmamap->dm_segs[1].ds_len; 643 desc->cntl = dmamap->dm_segs[1].ds_addr 644 | ADM5120_DMA_BUF2ENABLE; 645 } else 646 desc->cntl = 0; 647 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 648 eh = mtod(m0, struct ether_header *); 649 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 650 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 651 desc->status |= ADM5120_DMA_CSUM; 652 if (nexttx == ADMSW_NTXLDESC - 1) 653 desc->data |= ADM5120_DMA_RINGEND; 654 desc->data |= ADM5120_DMA_OWN; 655 656 /* Sync the descriptor. */ 657 ADMSW_CDTXLSYNC(sc, nexttx, 658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 659 660 REG_WRITE(SEND_TRIG_REG, 1); 661 /* printf("send slot %d\n", nexttx); */ 662 663 /* 664 * Store a pointer to the packet so we can free it later. 665 */ 666 ds->ds_mbuf = m0; 667 668 /* Advance the Tx pointer. */ 669 sc->sc_txfree--; 670 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 671 672 /* Pass the packet to any BPF listeners. */ 673 bpf_mtap(ifp, m0, BPF_D_OUT); 674 675 /* Set a watchdog timer in case the chip flakes out. */ 676 sc->sc_ethercom[0].ec_if.if_timer = 5; 677 } 678 } 679 680 /* 681 * admsw_watchdog: [ifnet interface function] 682 * 683 * Watchdog timer handler. 684 */ 685 static void 686 admsw_watchdog(struct ifnet *ifp) 687 { 688 struct admsw_softc *sc = ifp->if_softc; 689 int vlan; 690 691 #if 1 692 /* Check if an interrupt was lost. */ 693 if (sc->sc_txfree == ADMSW_NTXLDESC) { 694 printf("%s: watchdog false alarm\n", device_xname(sc->sc_dev)); 695 return; 696 } 697 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 698 printf("%s: watchdog timer is %d!\n", device_xname(sc->sc_dev), 699 sc->sc_ethercom[0].ec_if.if_timer); 700 admsw_txintr(sc, 0); 701 if (sc->sc_txfree == ADMSW_NTXLDESC) { 702 printf("%s: tx IRQ lost (queue empty)\n", 703 device_xname(sc->sc_dev)); 704 return; 705 } 706 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 707 printf("%s: tx IRQ lost (timer recharged)\n", 708 device_xname(sc->sc_dev)); 709 return; 710 } 711 #endif 712 713 printf("%s: device timeout, txfree = %d\n", 714 device_xname(sc->sc_dev), sc->sc_txfree); 715 for (vlan = 0; vlan < SW_DEVS; vlan++) 716 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 717 for (vlan = 0; vlan < SW_DEVS; vlan++) 718 (void)admsw_init(&sc->sc_ethercom[vlan].ec_if); 719 720 /* Try to get more packets going. */ 721 admsw_start(ifp); 722 } 723 724 /* 725 * admsw_ioctl: [ifnet interface function] 726 * 727 * Handle control requests from the operator. 728 */ 729 static int 730 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 731 { 732 struct admsw_softc *sc = ifp->if_softc; 733 struct ifdrv *ifd; 734 int s, error, port; 735 736 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 737 if (port >= SW_DEVS) 738 return EOPNOTSUPP; 739 740 s = splnet(); 741 742 switch (cmd) { 743 case SIOCSIFCAP: 744 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) 745 error = 0; 746 break; 747 case SIOCGDRVSPEC: 748 case SIOCSDRVSPEC: 749 ifd = (struct ifdrv *) data; 750 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 751 error = EINVAL; 752 break; 753 } 754 if (cmd == SIOCGDRVSPEC) { 755 error = copyout(vlan_matrix, ifd->ifd_data, 756 sizeof(vlan_matrix)); 757 } else { 758 error = copyin(ifd->ifd_data, vlan_matrix, 759 sizeof(vlan_matrix)); 760 admsw_setvlan(sc, vlan_matrix); 761 } 762 break; 763 764 default: 765 error = ether_ioctl(ifp, cmd, data); 766 if (error == ENETRESET) { 767 /* 768 * Multicast list has changed; set the hardware filter 769 * accordingly. 770 */ 771 admsw_set_filter(sc); 772 error = 0; 773 } 774 break; 775 } 776 777 /* Try to get more packets going. */ 778 admsw_start(ifp); 779 780 splx(s); 781 return error; 782 } 783 784 785 /* 786 * admsw_intr: 787 * 788 * Interrupt service routine. 789 */ 790 static int 791 admsw_intr(void *arg) 792 { 793 struct admsw_softc *sc = arg; 794 uint32_t pending; 795 char buf[64]; 796 797 pending = REG_READ(ADMSW_INT_ST); 798 799 if ((pending & ~(ADMSW_INTR_RHD | ADMSW_INTR_RLD | ADMSW_INTR_SHD | 800 ADMSW_INTR_SLD | ADMSW_INTR_W1TE | ADMSW_INTR_W0TE)) != 0) { 801 snprintb(buf, sizeof(buf), ADMSW_INT_FMT, pending); 802 printf("%s: pending=%s\n", __func__, buf); 803 } 804 REG_WRITE(ADMSW_INT_ST, pending); 805 806 if (sc->ndevs == 0) 807 return 0; 808 809 if ((pending & ADMSW_INTR_RHD) != 0) 810 admsw_rxintr(sc, 1); 811 812 if ((pending & ADMSW_INTR_RLD) != 0) 813 admsw_rxintr(sc, 0); 814 815 if ((pending & ADMSW_INTR_SHD) != 0) 816 admsw_txintr(sc, 1); 817 818 if ((pending & ADMSW_INTR_SLD) != 0) 819 admsw_txintr(sc, 0); 820 821 return 1; 822 } 823 824 /* 825 * admsw_txintr: 826 * 827 * Helper; handle transmit interrupts. 828 */ 829 static void 830 admsw_txintr(struct admsw_softc *sc, int prio) 831 { 832 struct ifnet *ifp; 833 struct admsw_desc *desc; 834 struct admsw_descsoft *ds; 835 int i, vlan; 836 int gotone = 0; 837 838 /* printf("txintr: txdirty: %d, txfree: %d\n", sc->sc_txdirty, sc->sc_txfree); */ 839 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 840 i = ADMSW_NEXTTXL(i)) { 841 842 ADMSW_CDTXLSYNC(sc, i, 843 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 844 845 desc = &sc->sc_txldescs[i]; 846 ds = &sc->sc_txlsoft[i]; 847 if (desc->data & ADM5120_DMA_OWN) { 848 ADMSW_CDTXLSYNC(sc, i, 849 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 850 break; 851 } 852 853 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 854 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 855 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 856 m_freem(ds->ds_mbuf); 857 ds->ds_mbuf = NULL; 858 859 vlan = ffs(desc->status & 0x3f) - 1; 860 if (vlan < 0 || vlan >= SW_DEVS) 861 panic("admsw_txintr: bad vlan\n"); 862 ifp = &sc->sc_ethercom[vlan].ec_if; 863 gotone = 1; 864 /* printf("clear tx slot %d\n", i); */ 865 866 if_statinc(ifp, if_opackets); 867 868 sc->sc_txfree++; 869 } 870 871 if (gotone) { 872 sc->sc_txdirty = i; 873 #ifdef ADMSW_EVENT_COUNTERS 874 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 875 #endif 876 ifp = &sc->sc_ethercom[0].ec_if; 877 878 /* Try to queue more packets. */ 879 if_schedule_deferred_start(ifp); 880 881 /* 882 * If there are no more pending transmissions, 883 * cancel the watchdog timer. 884 */ 885 if (sc->sc_txfree == ADMSW_NTXLDESC) 886 ifp->if_timer = 0; 887 888 } 889 890 /* printf("txintr end: txdirty: %d, txfree: %d\n", sc->sc_txdirty, sc->sc_txfree); */ 891 } 892 893 /* 894 * admsw_rxintr: 895 * 896 * Helper; handle receive interrupts. 897 */ 898 static void 899 admsw_rxintr(struct admsw_softc *sc, int high) 900 { 901 struct ifnet *ifp; 902 struct admsw_descsoft *ds; 903 struct mbuf *m; 904 uint32_t stat; 905 int i, len, port, vlan; 906 907 /* printf("rxintr\n"); */ 908 if (high) 909 panic("admsw_rxintr: high priority packet\n"); 910 911 #ifdef ADMSW_EVENT_COUNTERS 912 int pkts = 0; 913 #endif 914 915 #if 1 916 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 917 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 918 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 919 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 921 else { 922 i = sc->sc_rxptr; 923 do { 924 ADMSW_CDRXLSYNC(sc, i, 925 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 926 i = ADMSW_NEXTRXL(i); 927 /* the ring is empty, just return. */ 928 if (i == sc->sc_rxptr) 929 return; 930 ADMSW_CDRXLSYNC(sc, i, 931 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 932 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 933 ADMSW_CDRXLSYNC(sc, i, 934 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 935 936 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 937 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 938 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 939 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 941 else { 942 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 944 /* We've fallen behind the chip: catch it. */ 945 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 946 device_xname(sc->sc_dev), REG_READ(RECV_LBADDR_REG), 947 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 948 sc->sc_rxptr = i; 949 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 950 } 951 } 952 #endif 953 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 954 ds = &sc->sc_rxlsoft[i]; 955 956 ADMSW_CDRXLSYNC(sc, i, 957 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 958 959 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 960 ADMSW_CDRXLSYNC(sc, i, 961 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 962 break; 963 } 964 965 /* printf("process slot %d\n", i); */ 966 967 #ifdef ADMSW_EVENT_COUNTERS 968 pkts++; 969 #endif 970 971 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 972 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 973 974 stat = sc->sc_rxldescs[i].status; 975 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 976 len -= ETHER_CRC_LEN; 977 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 978 for (vlan = 0; vlan < SW_DEVS; vlan++) 979 if ((1 << port) & vlan_matrix[vlan]) 980 break; 981 if (vlan == SW_DEVS) 982 vlan = 0; 983 ifp = &sc->sc_ethercom[vlan].ec_if; 984 985 m = ds->ds_mbuf; 986 if (admsw_add_rxlbuf(sc, i) != 0) { 987 if_statinc(ifp, if_ierrors); 988 ADMSW_INIT_RXLDESC(sc, i); 989 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 990 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 991 continue; 992 } 993 994 m_set_rcvif(m, ifp); 995 m->m_pkthdr.len = m->m_len = len; 996 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 997 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 998 if (stat & ADM5120_DMA_CSUMFAIL) 999 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1000 } 1001 1002 /* Pass it on. */ 1003 if_percpuq_enqueue(ifp->if_percpuq, m); 1004 } 1005 #ifdef ADMSW_EVENT_COUNTERS 1006 if (pkts) 1007 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1008 1009 if (pkts == ADMSW_NRXLDESC) 1010 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1011 #endif 1012 1013 /* Update the receive pointer. */ 1014 sc->sc_rxptr = i; 1015 } 1016 1017 /* 1018 * admsw_init: [ifnet interface function] 1019 * 1020 * Initialize the interface. Must be called at splnet(). 1021 */ 1022 static int 1023 admsw_init(struct ifnet *ifp) 1024 { 1025 struct admsw_softc *sc = ifp->if_softc; 1026 1027 /* printf("admsw_init called\n"); */ 1028 1029 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1030 if (sc->ndevs == 0) { 1031 admsw_init_bufs(sc); 1032 admsw_reset(sc); 1033 REG_WRITE(CPUP_CONF_REG, 1034 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1035 CPUP_CONF_DMCP_MASK); 1036 /* Clear all pending interrupts */ 1037 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1038 1039 /* Enable needed interrupts */ 1040 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1041 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | 1042 ADMSW_INTR_RHD | ADMSW_INTR_RLD | 1043 ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1044 } 1045 sc->ndevs++; 1046 } 1047 1048 /* Set the receive filter. */ 1049 admsw_set_filter(sc); 1050 1051 /* Mark iface as running */ 1052 ifp->if_flags |= IFF_RUNNING; 1053 1054 return 0; 1055 } 1056 1057 /* 1058 * admsw_stop: [ifnet interface function] 1059 * 1060 * Stop transmission on the interface. 1061 */ 1062 static void 1063 admsw_stop(struct ifnet *ifp, int disable) 1064 { 1065 struct admsw_softc *sc = ifp->if_softc; 1066 1067 /* printf("admsw_stop: %d\n", disable); */ 1068 1069 if (!(ifp->if_flags & IFF_RUNNING)) 1070 return; 1071 1072 if (--sc->ndevs == 0) { 1073 /* printf("debug: de-initializing hardware\n"); */ 1074 1075 /* Disable cpu port */ 1076 REG_WRITE(CPUP_CONF_REG, 1077 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1078 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1079 1080 /* XXX We should disable, then clear? --dyoung */ 1081 /* Clear all pending interrupts */ 1082 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1083 1084 /* Disable interrupts */ 1085 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1086 } 1087 1088 /* Mark the interface as down and cancel the watchdog timer. */ 1089 ifp->if_flags &= ~IFF_RUNNING; 1090 ifp->if_timer = 0; 1091 1092 return; 1093 } 1094 1095 /* 1096 * admsw_set_filter: 1097 * 1098 * Set up the receive filter. 1099 */ 1100 static void 1101 admsw_set_filter(struct admsw_softc *sc) 1102 { 1103 int i; 1104 uint32_t allmc, anymc, conf, promisc; 1105 struct ether_multi *enm; 1106 struct ethercom *ec; 1107 struct ifnet *ifp; 1108 struct ether_multistep step; 1109 1110 /* Find which ports should be operated in promisc mode. */ 1111 allmc = anymc = promisc = 0; 1112 for (i = 0; i < SW_DEVS; i++) { 1113 ec = &sc->sc_ethercom[i]; 1114 ifp = &ec->ec_if; 1115 if (ifp->if_flags & IFF_PROMISC) 1116 promisc |= vlan_matrix[i]; 1117 1118 ifp->if_flags &= ~IFF_ALLMULTI; 1119 1120 ETHER_LOCK(ec); 1121 ETHER_FIRST_MULTI(step, ec, enm); 1122 while (enm != NULL) { 1123 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1124 ETHER_ADDR_LEN) != 0) { 1125 printf("%s: punting on mcast range\n", 1126 __func__); 1127 ifp->if_flags |= IFF_ALLMULTI; 1128 allmc |= vlan_matrix[i]; 1129 break; 1130 } 1131 1132 anymc |= vlan_matrix[i]; 1133 1134 #if 0 1135 /* XXX extract subroutine --dyoung */ 1136 REG_WRITE(MAC_WT1_REG, 1137 enm->enm_addrlo[2] | 1138 (enm->enm_addrlo[3] << 8) | 1139 (enm->enm_addrlo[4] << 16) | 1140 (enm->enm_addrlo[5] << 24)); 1141 REG_WRITE(MAC_WT0_REG, 1142 (i << MAC_WT0_VLANID_SHIFT) | 1143 (enm->enm_addrlo[0] << 16) | 1144 (enm->enm_addrlo[1] << 24) | 1145 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1146 /* Timeout? */ 1147 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)) 1148 ; 1149 #endif 1150 1151 /* Load h/w with mcast address, port = CPU */ 1152 ETHER_NEXT_MULTI(step, enm); 1153 } 1154 ETHER_UNLOCK(ec); 1155 } 1156 1157 conf = REG_READ(CPUP_CONF_REG); 1158 /* 1 Disable forwarding of unknown & multicast packets to 1159 * CPU on all ports. 1160 * 2 Enable forwarding of unknown & multicast packets to 1161 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1162 */ 1163 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1164 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1165 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1166 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1167 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1168 REG_WRITE(CPUP_CONF_REG, conf); 1169 } 1170 1171 /* 1172 * admsw_add_rxbuf: 1173 * 1174 * Add a receive buffer to the indicated descriptor. 1175 */ 1176 int 1177 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1178 { 1179 struct admsw_descsoft *ds; 1180 struct mbuf *m; 1181 int error; 1182 1183 if (high) 1184 ds = &sc->sc_rxhsoft[idx]; 1185 else 1186 ds = &sc->sc_rxlsoft[idx]; 1187 1188 MGETHDR(m, M_DONTWAIT, MT_DATA); 1189 if (m == NULL) 1190 return ENOBUFS; 1191 1192 MCLGET(m, M_DONTWAIT); 1193 if ((m->m_flags & M_EXT) == 0) { 1194 m_freem(m); 1195 return ENOBUFS; 1196 } 1197 1198 if (ds->ds_mbuf != NULL) 1199 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1200 1201 ds->ds_mbuf = m; 1202 1203 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1204 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1205 BUS_DMA_READ | BUS_DMA_NOWAIT); 1206 if (error) { 1207 printf("%s: can't load rx DMA map %d, error = %d\n", 1208 device_xname(sc->sc_dev), idx, error); 1209 panic("admsw_add_rxbuf"); /* XXX */ 1210 } 1211 1212 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1213 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1214 1215 if (high) 1216 ADMSW_INIT_RXHDESC(sc, idx); 1217 else 1218 ADMSW_INIT_RXLDESC(sc, idx); 1219 1220 return 0; 1221 } 1222 1223 int 1224 admsw_mediachange(struct ifnet *ifp) 1225 { 1226 struct admsw_softc *sc = ifp->if_softc; 1227 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1228 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1229 int old, new, val; 1230 1231 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1232 return EINVAL; 1233 1234 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1235 val = PHY_CNTL2_AUTONEG | PHY_CNTL2_100M | PHY_CNTL2_FDX; 1236 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1237 if ((ifm->ifm_media & IFM_FDX) != 0) 1238 val = PHY_CNTL2_100M | PHY_CNTL2_FDX; 1239 else 1240 val = PHY_CNTL2_100M; 1241 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1242 if ((ifm->ifm_media & IFM_FDX) != 0) 1243 val = PHY_CNTL2_FDX; 1244 else 1245 val = 0; 1246 } else 1247 return EINVAL; 1248 1249 old = REG_READ(PHY_CNTL2_REG); 1250 new = old & ~((PHY_CNTL2_AUTONEG | PHY_CNTL2_100M | PHY_CNTL2_FDX) 1251 << port); 1252 new |= (val << port); 1253 1254 if (new != old) 1255 REG_WRITE(PHY_CNTL2_REG, new); 1256 1257 return 0; 1258 } 1259 1260 void 1261 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1262 { 1263 struct admsw_softc *sc = ifp->if_softc; 1264 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1265 int status; 1266 1267 ifmr->ifm_status = IFM_AVALID; 1268 ifmr->ifm_active = IFM_ETHER; 1269 1270 status = REG_READ(PHY_ST_REG) >> port; 1271 1272 if ((status & PHY_ST_LINKUP) == 0) { 1273 ifmr->ifm_active |= IFM_NONE; 1274 return; 1275 } 1276 1277 ifmr->ifm_status |= IFM_ACTIVE; 1278 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1279 if (status & PHY_ST_FDX) 1280 ifmr->ifm_active |= IFM_FDX; 1281 else 1282 ifmr->ifm_active |= IFM_HDX; 1283 } 1284