Home | History | Annotate | Line # | Download | only in ic
smc83c170.c revision 1.33
      1 /*	$NetBSD: smc83c170.c,v 1.33 2000/10/01 23:32:42 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Device driver for the Standard Microsystems Corp. 83C170
     42  * Ethernet PCI Integrated Controller (EPIC/100).
     43  */
     44 
     45 #include "opt_inet.h"
     46 #include "opt_ns.h"
     47 #include "bpfilter.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/callout.h>
     52 #include <sys/mbuf.h>
     53 #include <sys/malloc.h>
     54 #include <sys/kernel.h>
     55 #include <sys/socket.h>
     56 #include <sys/ioctl.h>
     57 #include <sys/errno.h>
     58 #include <sys/device.h>
     59 
     60 #include <net/if.h>
     61 #include <net/if_dl.h>
     62 #include <net/if_media.h>
     63 #include <net/if_ether.h>
     64 
     65 #if NBPFILTER > 0
     66 #include <net/bpf.h>
     67 #endif
     68 
     69 #ifdef INET
     70 #include <netinet/in.h>
     71 #include <netinet/if_inarp.h>
     72 #endif
     73 
     74 #ifdef NS
     75 #include <netns/ns.h>
     76 #include <netns/ns_if.h>
     77 #endif
     78 
     79 #include <machine/bus.h>
     80 #include <machine/intr.h>
     81 
     82 #include <dev/mii/miivar.h>
     83 
     84 #include <dev/ic/smc83c170reg.h>
     85 #include <dev/ic/smc83c170var.h>
     86 
     87 void	epic_start __P((struct ifnet *));
     88 void	epic_watchdog __P((struct ifnet *));
     89 int	epic_ioctl __P((struct ifnet *, u_long, caddr_t));
     90 
     91 void	epic_shutdown __P((void *));
     92 
     93 void	epic_reset __P((struct epic_softc *));
     94 int	epic_init __P((struct epic_softc *));
     95 void	epic_rxdrain __P((struct epic_softc *));
     96 void	epic_stop __P((struct epic_softc *, int));
     97 int	epic_add_rxbuf __P((struct epic_softc *, int));
     98 void	epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
     99 void	epic_set_mchash __P((struct epic_softc *));
    100 void	epic_fixup_clock_source __P((struct epic_softc *));
    101 int	epic_mii_read __P((struct device *, int, int));
    102 void	epic_mii_write __P((struct device *, int, int, int));
    103 int	epic_mii_wait __P((struct epic_softc *, u_int32_t));
    104 void	epic_tick __P((void *));
    105 
    106 void	epic_statchg __P((struct device *));
    107 int	epic_mediachange __P((struct ifnet *));
    108 void	epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
    109 
    110 #define	INTMASK	(INTSTAT_FATAL_INT | INTSTAT_TXU | \
    111 	    INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
    112 
    113 int	epic_copy_small = 0;
    114 
    115 /*
    116  * Attach an EPIC interface to the system.
    117  */
    118 void
    119 epic_attach(sc)
    120 	struct epic_softc *sc;
    121 {
    122 	bus_space_tag_t st = sc->sc_st;
    123 	bus_space_handle_t sh = sc->sc_sh;
    124 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    125 	int i, rseg, error;
    126 	bus_dma_segment_t seg;
    127 	u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
    128 	u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
    129 
    130 	callout_init(&sc->sc_mii_callout);
    131 
    132 	/*
    133 	 * Allocate the control data structures, and create and load the
    134 	 * DMA map for it.
    135 	 */
    136 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    137 	    sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
    138 	    BUS_DMA_NOWAIT)) != 0) {
    139 		printf("%s: unable to allocate control data, error = %d\n",
    140 		    sc->sc_dev.dv_xname, error);
    141 		goto fail_0;
    142 	}
    143 
    144 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    145 	    sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
    146 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    147 		printf("%s: unable to map control data, error = %d\n",
    148 		    sc->sc_dev.dv_xname, error);
    149 		goto fail_1;
    150 	}
    151 
    152 	if ((error = bus_dmamap_create(sc->sc_dmat,
    153 	    sizeof(struct epic_control_data), 1,
    154 	    sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
    155 	    &sc->sc_cddmamap)) != 0) {
    156 		printf("%s: unable to create control data DMA map, "
    157 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    158 		goto fail_2;
    159 	}
    160 
    161 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    162 	    sc->sc_control_data, sizeof(struct epic_control_data), NULL,
    163 	    BUS_DMA_NOWAIT)) != 0) {
    164 		printf("%s: unable to load control data DMA map, error = %d\n",
    165 		    sc->sc_dev.dv_xname, error);
    166 		goto fail_3;
    167 	}
    168 
    169 	/*
    170 	 * Create the transmit buffer DMA maps.
    171 	 */
    172 	for (i = 0; i < EPIC_NTXDESC; i++) {
    173 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    174 		    EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
    175 		    &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
    176 			printf("%s: unable to create tx DMA map %d, "
    177 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    178 			goto fail_4;
    179 		}
    180 	}
    181 
    182 	/*
    183 	 * Create the recieve buffer DMA maps.
    184 	 */
    185 	for (i = 0; i < EPIC_NRXDESC; i++) {
    186 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    187 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
    188 		    &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
    189 			printf("%s: unable to create rx DMA map %d, "
    190 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    191 			goto fail_5;
    192 		}
    193 		EPIC_DSRX(sc, i)->ds_mbuf = NULL;
    194 	}
    195 
    196 
    197 	/*
    198 	 * Bring the chip out of low-power mode and reset it to a known state.
    199 	 */
    200 	bus_space_write_4(st, sh, EPIC_GENCTL, 0);
    201 	epic_reset(sc);
    202 
    203 	/*
    204 	 * Read the Ethernet address from the EEPROM.
    205 	 */
    206 	epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
    207 	for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
    208 		enaddr[i * 2]     = myea[i] & 0xff;
    209 		enaddr[i * 2 + 1] = myea[i] >> 8;
    210 	}
    211 
    212 	/*
    213 	 * ...and the device name.
    214 	 */
    215 	epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
    216 	    mydevname);
    217 	for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
    218 		devname[i * 2]     = mydevname[i] & 0xff;
    219 		devname[i * 2 + 1] = mydevname[i] >> 8;
    220 	}
    221 
    222 	devname[sizeof(mydevname)] = '\0';
    223 	for (i = sizeof(mydevname) - 1; i >= 0; i--) {
    224 		if (devname[i] == ' ')
    225 			devname[i] = '\0';
    226 		else
    227 			break;
    228 	}
    229 
    230 	printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
    231 	    devname, ether_sprintf(enaddr));
    232 
    233 	/*
    234 	 * Initialize our media structures and probe the MII.
    235 	 */
    236 	sc->sc_mii.mii_ifp = ifp;
    237 	sc->sc_mii.mii_readreg = epic_mii_read;
    238 	sc->sc_mii.mii_writereg = epic_mii_write;
    239 	sc->sc_mii.mii_statchg = epic_statchg;
    240 	ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
    241 	    epic_mediastatus);
    242 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
    243 	    MII_OFFSET_ANY, 0);
    244 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
    245 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
    246 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
    247 	} else
    248 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
    249 
    250 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    251 	ifp->if_softc = sc;
    252 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    253 	ifp->if_ioctl = epic_ioctl;
    254 	ifp->if_start = epic_start;
    255 	ifp->if_watchdog = epic_watchdog;
    256 
    257 	/*
    258 	 * Attach the interface.
    259 	 */
    260 	if_attach(ifp);
    261 	ether_ifattach(ifp, enaddr);
    262 #if NBPFILTER > 0
    263 	bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
    264 	    sizeof(struct ether_header));
    265 #endif
    266 
    267 	/*
    268 	 * Make sure the interface is shutdown during reboot.
    269 	 */
    270 	sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
    271 	if (sc->sc_sdhook == NULL)
    272 		printf("%s: WARNING: unable to establish shutdown hook\n",
    273 		    sc->sc_dev.dv_xname);
    274 	return;
    275 
    276 	/*
    277 	 * Free any resources we've allocated during the failed attach
    278 	 * attempt.  Do this in reverse order and fall through.
    279 	 */
    280  fail_5:
    281 	for (i = 0; i < EPIC_NRXDESC; i++) {
    282 		if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
    283 			bus_dmamap_destroy(sc->sc_dmat,
    284 			    EPIC_DSRX(sc, i)->ds_dmamap);
    285 	}
    286  fail_4:
    287 	for (i = 0; i < EPIC_NTXDESC; i++) {
    288 		if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
    289 			bus_dmamap_destroy(sc->sc_dmat,
    290 			    EPIC_DSTX(sc, i)->ds_dmamap);
    291 	}
    292 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
    293  fail_3:
    294 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
    295  fail_2:
    296 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
    297 	    sizeof(struct epic_control_data));
    298  fail_1:
    299 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
    300  fail_0:
    301 	return;
    302 }
    303 
    304 /*
    305  * Shutdown hook.  Make sure the interface is stopped at reboot.
    306  */
    307 void
    308 epic_shutdown(arg)
    309 	void *arg;
    310 {
    311 	struct epic_softc *sc = arg;
    312 
    313 	epic_stop(sc, 1);
    314 }
    315 
    316 /*
    317  * Start packet transmission on the interface.
    318  * [ifnet interface function]
    319  */
    320 void
    321 epic_start(ifp)
    322 	struct ifnet *ifp;
    323 {
    324 	struct epic_softc *sc = ifp->if_softc;
    325 	struct mbuf *m0, *m;
    326 	struct epic_txdesc *txd;
    327 	struct epic_descsoft *ds;
    328 	struct epic_fraglist *fr;
    329 	bus_dmamap_t dmamap;
    330 	int error, firsttx, nexttx, opending, seg;
    331 
    332 	/*
    333 	 * Remember the previous txpending and the first transmit
    334 	 * descriptor we use.
    335 	 */
    336 	opending = sc->sc_txpending;
    337 	firsttx = EPIC_NEXTTX(sc->sc_txlast);
    338 
    339 	/*
    340 	 * Loop through the send queue, setting up transmit descriptors
    341 	 * until we drain the queue, or use up all available transmit
    342 	 * descriptors.
    343 	 */
    344 	while (sc->sc_txpending < EPIC_NTXDESC) {
    345 		/*
    346 		 * Grab a packet off the queue.
    347 		 */
    348 		IF_DEQUEUE(&ifp->if_snd, m0);
    349 		if (m0 == NULL)
    350 			break;
    351 
    352 		/*
    353 		 * Get the last and next available transmit descriptor.
    354 		 */
    355 		nexttx = EPIC_NEXTTX(sc->sc_txlast);
    356 		txd = EPIC_CDTX(sc, nexttx);
    357 		fr = EPIC_CDFL(sc, nexttx);
    358 		ds = EPIC_DSTX(sc, nexttx);
    359 		dmamap = ds->ds_dmamap;
    360 
    361 		/*
    362 		 * Load the DMA map.  If this fails, the packet either
    363 		 * didn't fit in the alloted number of frags, or we were
    364 		 * short on resources.  In this case, we'll copy and try
    365 		 * again.
    366 		 */
    367 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
    368 		    BUS_DMA_NOWAIT) != 0) {
    369 			MGETHDR(m, M_DONTWAIT, MT_DATA);
    370 			if (m == NULL) {
    371 				printf("%s: unable to allocate Tx mbuf\n",
    372 				    sc->sc_dev.dv_xname);
    373 				IF_PREPEND(&ifp->if_snd, m0);
    374 				break;
    375 			}
    376 			if (m0->m_pkthdr.len > MHLEN) {
    377 				MCLGET(m, M_DONTWAIT);
    378 				if ((m->m_flags & M_EXT) == 0) {
    379 					printf("%s: unable to allocate Tx "
    380 					    "cluster\n", sc->sc_dev.dv_xname);
    381 					m_freem(m);
    382 					IF_PREPEND(&ifp->if_snd, m0);
    383 					break;
    384 				}
    385 			}
    386 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
    387 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
    388 			m_freem(m0);
    389 			m0 = m;
    390 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
    391 			    m0, BUS_DMA_NOWAIT);
    392 			if (error) {
    393 				printf("%s: unable to load Tx buffer, "
    394 				    "error = %d\n", sc->sc_dev.dv_xname, error);
    395 				IF_PREPEND(&ifp->if_snd, m0);
    396 				break;
    397 			}
    398 		}
    399 
    400 		/* Initialize the fraglist. */
    401 		fr->ef_nfrags = dmamap->dm_nsegs;
    402 		for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
    403 			fr->ef_frags[seg].ef_addr =
    404 			    dmamap->dm_segs[seg].ds_addr;
    405 			fr->ef_frags[seg].ef_length =
    406 			    dmamap->dm_segs[seg].ds_len;
    407 		}
    408 
    409 		EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
    410 
    411 		/* Sync the DMA map. */
    412 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    413 		    BUS_DMASYNC_PREWRITE);
    414 
    415 		/*
    416 		 * Store a pointer to the packet so we can free it later.
    417 		 */
    418 		ds->ds_mbuf = m0;
    419 
    420 		/*
    421 		 * Fill in the transmit descriptor.  The EPIC doesn't
    422 		 * auto-pad, so we have to do this ourselves.
    423 		 */
    424 		txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
    425 		txd->et_txlength = max(m0->m_pkthdr.len,
    426 		    ETHER_MIN_LEN - ETHER_CRC_LEN);
    427 
    428 		/*
    429 		 * If this is the first descriptor we're enqueueing,
    430 		 * don't give it to the EPIC yet.  That could cause
    431 		 * a race condition.  We'll do it below.
    432 		 */
    433 		if (nexttx == firsttx)
    434 			txd->et_txstatus = 0;
    435 		else
    436 			txd->et_txstatus = ET_TXSTAT_OWNER;
    437 
    438 		EPIC_CDTXSYNC(sc, nexttx,
    439 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    440 
    441 		/* Advance the tx pointer. */
    442 		sc->sc_txpending++;
    443 		sc->sc_txlast = nexttx;
    444 
    445 #if NBPFILTER > 0
    446 		/*
    447 		 * Pass the packet to any BPF listeners.
    448 		 */
    449 		if (ifp->if_bpf)
    450 			bpf_mtap(ifp->if_bpf, m0);
    451 #endif
    452 	}
    453 
    454 	if (sc->sc_txpending == EPIC_NTXDESC) {
    455 		/* No more slots left; notify upper layer. */
    456 		ifp->if_flags |= IFF_OACTIVE;
    457 	}
    458 
    459 	if (sc->sc_txpending != opending) {
    460 		/*
    461 		 * We enqueued packets.  If the transmitter was idle,
    462 		 * reset the txdirty pointer.
    463 		 */
    464 		if (opending == 0)
    465 			sc->sc_txdirty = firsttx;
    466 
    467 		/*
    468 		 * Cause a transmit interrupt to happen on the
    469 		 * last packet we enqueued.
    470 		 */
    471 		EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
    472 		EPIC_CDTXSYNC(sc, sc->sc_txlast,
    473 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    474 
    475 		/*
    476 		 * The entire packet chain is set up.  Give the
    477 		 * first descriptor to the EPIC now.
    478 		 */
    479 		EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
    480 		EPIC_CDTXSYNC(sc, firsttx,
    481 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    482 
    483 		/* Start the transmitter. */
    484 		bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
    485 		    COMMAND_TXQUEUED);
    486 
    487 		/* Set a watchdog timer in case the chip flakes out. */
    488 		ifp->if_timer = 5;
    489 	}
    490 }
    491 
    492 /*
    493  * Watchdog timer handler.
    494  * [ifnet interface function]
    495  */
    496 void
    497 epic_watchdog(ifp)
    498 	struct ifnet *ifp;
    499 {
    500 	struct epic_softc *sc = ifp->if_softc;
    501 
    502 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
    503 	ifp->if_oerrors++;
    504 
    505 	(void) epic_init(sc);
    506 }
    507 
    508 /*
    509  * Handle control requests from the operator.
    510  * [ifnet interface function]
    511  */
    512 int
    513 epic_ioctl(ifp, cmd, data)
    514 	struct ifnet *ifp;
    515 	u_long cmd;
    516 	caddr_t data;
    517 {
    518 	struct epic_softc *sc = ifp->if_softc;
    519 	struct ifreq *ifr = (struct ifreq *)data;
    520 	struct ifaddr *ifa = (struct ifaddr *)data;
    521 	int s, error = 0;
    522 
    523 	s = splnet();
    524 
    525 	switch (cmd) {
    526 	case SIOCSIFADDR:
    527 		ifp->if_flags |= IFF_UP;
    528 
    529 		switch (ifa->ifa_addr->sa_family) {
    530 #ifdef INET
    531 		case AF_INET:
    532 			if ((error = epic_init(sc)) != 0)
    533 				break;
    534 			arp_ifinit(ifp, ifa);
    535 			break;
    536 #endif /* INET */
    537 #ifdef NS
    538 		case AF_NS:
    539 		    {
    540 			struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
    541 
    542 			if (ns_nullhost(*ina))
    543 				ina->x_host = *(union ns_host *)
    544 				    LLADDR(ifp->if_sadl);
    545 			else
    546 				bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
    547 				    ifp->if_addrlen);
    548 			/* Set new address. */
    549 			error = epic_init(sc);
    550 			break;
    551 		    }
    552 #endif /* NS */
    553 		default:
    554 			error = epic_init(sc);
    555 			break;
    556 		}
    557 		break;
    558 
    559 	case SIOCSIFMTU:
    560 		if (ifr->ifr_mtu > ETHERMTU)
    561 			error = EINVAL;
    562 		else
    563 			ifp->if_mtu = ifr->ifr_mtu;
    564 		break;
    565 
    566 	case SIOCSIFFLAGS:
    567 		if ((ifp->if_flags & IFF_UP) == 0 &&
    568 		    (ifp->if_flags & IFF_RUNNING) != 0) {
    569 			/*
    570 			 * If interface is marked down and it is running, then
    571 			 * stop it.
    572 			 */
    573 			epic_stop(sc, 1);
    574 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
    575 			   (ifp->if_flags & IFF_RUNNING) == 0) {
    576 			/*
    577 			 * If interfase it marked up and it is stopped, then
    578 			 * start it.
    579 			 */
    580 			error = epic_init(sc);
    581 		} else if ((ifp->if_flags & IFF_UP) != 0) {
    582 			/*
    583 			 * Reset the interface to pick up changes in any other
    584 			 * flags that affect the hardware state.
    585 			 */
    586 			error = epic_init(sc);
    587 		}
    588 		break;
    589 
    590 	case SIOCADDMULTI:
    591 	case SIOCDELMULTI:
    592 		error = (cmd == SIOCADDMULTI) ?
    593 		    ether_addmulti(ifr, &sc->sc_ethercom) :
    594 		    ether_delmulti(ifr, &sc->sc_ethercom);
    595 
    596 		if (error == ENETRESET) {
    597 			/*
    598 			 * Multicast list has changed; set the hardware filter
    599 			 * accordingly.  Update our idea of the current media;
    600 			 * epic_set_mchash() needs to know what it is.
    601 			 */
    602 			mii_pollstat(&sc->sc_mii);
    603 			epic_set_mchash(sc);
    604 			error = 0;
    605 		}
    606 		break;
    607 
    608 	case SIOCSIFMEDIA:
    609 	case SIOCGIFMEDIA:
    610 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
    611 		break;
    612 
    613 	default:
    614 		error = EINVAL;
    615 		break;
    616 	}
    617 
    618 	splx(s);
    619 	return (error);
    620 }
    621 
    622 /*
    623  * Interrupt handler.
    624  */
    625 int
    626 epic_intr(arg)
    627 	void *arg;
    628 {
    629 	struct epic_softc *sc = arg;
    630 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    631 	struct epic_rxdesc *rxd;
    632 	struct epic_txdesc *txd;
    633 	struct epic_descsoft *ds;
    634 	struct mbuf *m;
    635 	u_int32_t intstat;
    636 	int i, len, claimed = 0;
    637 
    638  top:
    639 	/*
    640 	 * Get the interrupt status from the EPIC.
    641 	 */
    642 	intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
    643 	if ((intstat & INTSTAT_INT_ACTV) == 0)
    644 		return (claimed);
    645 
    646 	claimed = 1;
    647 
    648 	/*
    649 	 * Acknowledge the interrupt.
    650 	 */
    651 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
    652 	    intstat & INTMASK);
    653 
    654 	/*
    655 	 * Check for receive interrupts.
    656 	 */
    657 	if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
    658 		for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
    659 			rxd = EPIC_CDRX(sc, i);
    660 			ds = EPIC_DSRX(sc, i);
    661 
    662 			EPIC_CDRXSYNC(sc, i,
    663 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    664 
    665 			if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
    666 				/*
    667 				 * We have processed all of the
    668 				 * receive buffers.
    669 				 */
    670 				break;
    671 			}
    672 
    673 			/*
    674 			 * Make sure the packet arrived intact.  If an error
    675 			 * occurred, update stats and reset the descriptor.
    676 			 * The buffer will be reused the next time the
    677 			 * descriptor comes up in the ring.
    678 			 */
    679 			if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
    680 				if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
    681 					printf("%s: CRC error\n",
    682 					    sc->sc_dev.dv_xname);
    683 				if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
    684 					printf("%s: alignment error\n",
    685 					    sc->sc_dev.dv_xname);
    686 				ifp->if_ierrors++;
    687 				EPIC_INIT_RXDESC(sc, i);
    688 				continue;
    689 			}
    690 
    691 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
    692 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
    693 
    694 			/*
    695 			 * The EPIC includes the CRC with every packet;
    696 			 * trim it.
    697 			 */
    698 			len = rxd->er_rxlength - ETHER_CRC_LEN;
    699 
    700 			if (len < sizeof(struct ether_header)) {
    701 				/*
    702 				 * Runt packet; drop it now.
    703 				 */
    704 				ifp->if_ierrors++;
    705 				EPIC_INIT_RXDESC(sc, i);
    706 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
    707 				    ds->ds_dmamap->dm_mapsize,
    708 				    BUS_DMASYNC_PREREAD);
    709 				continue;
    710 			}
    711 
    712 			/*
    713 			 * If the packet is small enough to fit in a
    714 			 * single header mbuf, allocate one and copy
    715 			 * the data into it.  This greatly reduces
    716 			 * memory consumption when we receive lots
    717 			 * of small packets.
    718 			 *
    719 			 * Otherwise, we add a new buffer to the receive
    720 			 * chain.  If this fails, we drop the packet and
    721 			 * recycle the old buffer.
    722 			 */
    723 			if (epic_copy_small != 0 && len <= MHLEN) {
    724 				MGETHDR(m, M_DONTWAIT, MT_DATA);
    725 				if (m == NULL)
    726 					goto dropit;
    727 				memcpy(mtod(m, caddr_t),
    728 				    mtod(ds->ds_mbuf, caddr_t), len);
    729 				EPIC_INIT_RXDESC(sc, i);
    730 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
    731 				    ds->ds_dmamap->dm_mapsize,
    732 				    BUS_DMASYNC_PREREAD);
    733 			} else {
    734 				m = ds->ds_mbuf;
    735 				if (epic_add_rxbuf(sc, i) != 0) {
    736  dropit:
    737 					ifp->if_ierrors++;
    738 					EPIC_INIT_RXDESC(sc, i);
    739 					bus_dmamap_sync(sc->sc_dmat,
    740 					    ds->ds_dmamap, 0,
    741 					    ds->ds_dmamap->dm_mapsize,
    742 					    BUS_DMASYNC_PREREAD);
    743 					continue;
    744 				}
    745 			}
    746 
    747 			m->m_pkthdr.rcvif = ifp;
    748 			m->m_pkthdr.len = m->m_len = len;
    749 
    750 #if NBPFILTER > 0
    751 			/*
    752 			 * Pass this up to any BPF listeners, but only
    753 			 * pass it up the stack if its for us.
    754 			 */
    755 			if (ifp->if_bpf)
    756 				bpf_mtap(ifp->if_bpf, m);
    757 #endif
    758 
    759 			/* Pass it on. */
    760 			(*ifp->if_input)(ifp, m);
    761 			ifp->if_ipackets++;
    762 		}
    763 
    764 		/* Update the recieve pointer. */
    765 		sc->sc_rxptr = i;
    766 
    767 		/*
    768 		 * Check for receive queue underflow.
    769 		 */
    770 		if (intstat & INTSTAT_RQE) {
    771 			printf("%s: receiver queue empty\n",
    772 			    sc->sc_dev.dv_xname);
    773 			/*
    774 			 * Ring is already built; just restart the
    775 			 * receiver.
    776 			 */
    777 			bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
    778 			    EPIC_CDRXADDR(sc, sc->sc_rxptr));
    779 			bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
    780 			    COMMAND_RXQUEUED | COMMAND_START_RX);
    781 		}
    782 	}
    783 
    784 	/*
    785 	 * Check for transmission complete interrupts.
    786 	 */
    787 	if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
    788 		ifp->if_flags &= ~IFF_OACTIVE;
    789 		for (i = sc->sc_txdirty; sc->sc_txpending != 0;
    790 		     i = EPIC_NEXTTX(i), sc->sc_txpending--) {
    791 			txd = EPIC_CDTX(sc, i);
    792 			ds = EPIC_DSTX(sc, i);
    793 
    794 			EPIC_CDTXSYNC(sc, i,
    795 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    796 
    797 			if (txd->et_txstatus & ET_TXSTAT_OWNER)
    798 				break;
    799 
    800 			EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
    801 
    802 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
    803 			    0, ds->ds_dmamap->dm_mapsize,
    804 			    BUS_DMASYNC_POSTWRITE);
    805 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
    806 			m_freem(ds->ds_mbuf);
    807 			ds->ds_mbuf = NULL;
    808 
    809 			/*
    810 			 * Check for errors and collisions.
    811 			 */
    812 			if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
    813 				ifp->if_oerrors++;
    814 			else
    815 				ifp->if_opackets++;
    816 			ifp->if_collisions +=
    817 			    TXSTAT_COLLISIONS(txd->et_txstatus);
    818 			if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
    819 				printf("%s: lost carrier\n",
    820 				    sc->sc_dev.dv_xname);
    821 		}
    822 
    823 		/* Update the dirty transmit buffer pointer. */
    824 		sc->sc_txdirty = i;
    825 
    826 		/*
    827 		 * Cancel the watchdog timer if there are no pending
    828 		 * transmissions.
    829 		 */
    830 		if (sc->sc_txpending == 0)
    831 			ifp->if_timer = 0;
    832 
    833 		/*
    834 		 * Kick the transmitter after a DMA underrun.
    835 		 */
    836 		if (intstat & INTSTAT_TXU) {
    837 			printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
    838 			bus_space_write_4(sc->sc_st, sc->sc_sh,
    839 			    EPIC_COMMAND, COMMAND_TXUGO);
    840 			if (sc->sc_txpending)
    841 				bus_space_write_4(sc->sc_st, sc->sc_sh,
    842 				    EPIC_COMMAND, COMMAND_TXQUEUED);
    843 		}
    844 
    845 		/*
    846 		 * Try to get more packets going.
    847 		 */
    848 		epic_start(ifp);
    849 	}
    850 
    851 	/*
    852 	 * Check for fatal interrupts.
    853 	 */
    854 	if (intstat & INTSTAT_FATAL_INT) {
    855 		if (intstat & INTSTAT_PTA)
    856 			printf("%s: PCI target abort error\n",
    857 			    sc->sc_dev.dv_xname);
    858 		else if (intstat & INTSTAT_PMA)
    859 			printf("%s: PCI master abort error\n",
    860 			    sc->sc_dev.dv_xname);
    861 		else if (intstat & INTSTAT_APE)
    862 			printf("%s: PCI address parity error\n",
    863 			    sc->sc_dev.dv_xname);
    864 		else if (intstat & INTSTAT_DPE)
    865 			printf("%s: PCI data parity error\n",
    866 			    sc->sc_dev.dv_xname);
    867 		else
    868 			printf("%s: unknown fatal error\n",
    869 			    sc->sc_dev.dv_xname);
    870 		(void) epic_init(sc);
    871 	}
    872 
    873 	/*
    874 	 * Check for more interrupts.
    875 	 */
    876 	goto top;
    877 }
    878 
    879 /*
    880  * One second timer, used to tick the MII.
    881  */
    882 void
    883 epic_tick(arg)
    884 	void *arg;
    885 {
    886 	struct epic_softc *sc = arg;
    887 	int s;
    888 
    889 	s = splnet();
    890 	mii_tick(&sc->sc_mii);
    891 	splx(s);
    892 
    893 	callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
    894 }
    895 
    896 /*
    897  * Fixup the clock source on the EPIC.
    898  */
    899 void
    900 epic_fixup_clock_source(sc)
    901 	struct epic_softc *sc;
    902 {
    903 	int i;
    904 
    905 	/*
    906 	 * According to SMC Application Note 7-15, the EPIC's clock
    907 	 * source is incorrect following a reset.  This manifests itself
    908 	 * as failure to recognize when host software has written to
    909 	 * a register on the EPIC.  The appnote recommends issuing at
    910 	 * least 16 consecutive writes to the CLOCK TEST bit to correctly
    911 	 * configure the clock source.
    912 	 */
    913 	for (i = 0; i < 16; i++)
    914 		bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
    915 		    TEST_CLOCKTEST);
    916 }
    917 
    918 /*
    919  * Perform a soft reset on the EPIC.
    920  */
    921 void
    922 epic_reset(sc)
    923 	struct epic_softc *sc;
    924 {
    925 
    926 	epic_fixup_clock_source(sc);
    927 
    928 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
    929 	delay(100);
    930 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
    931 	delay(100);
    932 
    933 	epic_fixup_clock_source(sc);
    934 }
    935 
    936 /*
    937  * Initialize the interface.  Must be called at splnet().
    938  */
    939 int
    940 epic_init(sc)
    941 	struct epic_softc *sc;
    942 {
    943 	bus_space_tag_t st = sc->sc_st;
    944 	bus_space_handle_t sh = sc->sc_sh;
    945 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    946 	u_int8_t *enaddr = LLADDR(ifp->if_sadl);
    947 	struct epic_txdesc *txd;
    948 	struct epic_descsoft *ds;
    949 	u_int32_t genctl, reg0;
    950 	int i, error = 0;
    951 
    952 	/*
    953 	 * Cancel any pending I/O.
    954 	 */
    955 	epic_stop(sc, 0);
    956 
    957 	/*
    958 	 * Reset the EPIC to a known state.
    959 	 */
    960 	epic_reset(sc);
    961 
    962 	/*
    963 	 * Magical mystery initialization.
    964 	 */
    965 	bus_space_write_4(st, sh, EPIC_TXTEST, 0);
    966 
    967 	/*
    968 	 * Initialize the EPIC genctl register:
    969 	 *
    970 	 *	- 64 byte receive FIFO threshold
    971 	 *	- automatic advance to next receive frame
    972 	 */
    973 	genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
    974 #if BYTE_ORDER == BIG_ENDIAN
    975 	genctl |= GENCTL_BIG_ENDIAN;
    976 #endif
    977 	bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
    978 
    979 	/*
    980 	 * Reset the MII bus and PHY.
    981 	 */
    982 	reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
    983 	bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
    984 	bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
    985 	bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
    986 	delay(100);
    987 	bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
    988 	delay(100);
    989 	bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
    990 
    991 	/*
    992 	 * Initialize Ethernet address.
    993 	 */
    994 	reg0 = enaddr[1] << 8 | enaddr[0];
    995 	bus_space_write_4(st, sh, EPIC_LAN0, reg0);
    996 	reg0 = enaddr[3] << 8 | enaddr[2];
    997 	bus_space_write_4(st, sh, EPIC_LAN1, reg0);
    998 	reg0 = enaddr[5] << 8 | enaddr[4];
    999 	bus_space_write_4(st, sh, EPIC_LAN2, reg0);
   1000 
   1001 	/*
   1002 	 * Initialize receive control.  Remember the external buffer
   1003 	 * size setting.
   1004 	 */
   1005 	reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
   1006 	    (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
   1007 	reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
   1008 	if (ifp->if_flags & IFF_PROMISC)
   1009 		reg0 |= RXCON_PROMISCMODE;
   1010 	bus_space_write_4(st, sh, EPIC_RXCON, reg0);
   1011 
   1012 	/* Set the current media. */
   1013 	mii_mediachg(&sc->sc_mii);
   1014 
   1015 	/* Set up the multicast hash table. */
   1016 	epic_set_mchash(sc);
   1017 
   1018 	/*
   1019 	 * Initialize the transmit descriptor ring.  txlast is initialized
   1020 	 * to the end of the list so that it will wrap around to the first
   1021 	 * descriptor when the first packet is transmitted.
   1022 	 */
   1023 	for (i = 0; i < EPIC_NTXDESC; i++) {
   1024 		txd = EPIC_CDTX(sc, i);
   1025 		memset(txd, 0, sizeof(struct epic_txdesc));
   1026 		txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
   1027 		txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
   1028 		EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1029 	}
   1030 	sc->sc_txpending = 0;
   1031 	sc->sc_txdirty = 0;
   1032 	sc->sc_txlast = EPIC_NTXDESC - 1;
   1033 
   1034 	/*
   1035 	 * Initialize the receive descriptor ring.
   1036 	 */
   1037 	for (i = 0; i < EPIC_NRXDESC; i++) {
   1038 		ds = EPIC_DSRX(sc, i);
   1039 		if (ds->ds_mbuf == NULL) {
   1040 			if ((error = epic_add_rxbuf(sc, i)) != 0) {
   1041 				printf("%s: unable to allocate or map rx "
   1042 				    "buffer %d error = %d\n",
   1043 				    sc->sc_dev.dv_xname, i, error);
   1044 				/*
   1045 				 * XXX Should attempt to run with fewer receive
   1046 				 * XXX buffers instead of just failing.
   1047 				 */
   1048 				epic_rxdrain(sc);
   1049 				goto out;
   1050 			}
   1051 		}
   1052 	}
   1053 	sc->sc_rxptr = 0;
   1054 
   1055 	/*
   1056 	 * Initialize the interrupt mask and enable interrupts.
   1057 	 */
   1058 	bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
   1059 	bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
   1060 
   1061 	/*
   1062 	 * Give the transmit and receive rings to the EPIC.
   1063 	 */
   1064 	bus_space_write_4(st, sh, EPIC_PTCDAR,
   1065 	    EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
   1066 	bus_space_write_4(st, sh, EPIC_PRCDAR,
   1067 	    EPIC_CDRXADDR(sc, sc->sc_rxptr));
   1068 
   1069 	/*
   1070 	 * Set the EPIC in motion.
   1071 	 */
   1072 	bus_space_write_4(st, sh, EPIC_COMMAND,
   1073 	    COMMAND_RXQUEUED | COMMAND_START_RX);
   1074 
   1075 	/*
   1076 	 * ...all done!
   1077 	 */
   1078 	ifp->if_flags |= IFF_RUNNING;
   1079 	ifp->if_flags &= ~IFF_OACTIVE;
   1080 
   1081 	/*
   1082 	 * Start the one second clock.
   1083 	 */
   1084 	callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
   1085 
   1086 	/*
   1087 	 * Attempt to start output on the interface.
   1088 	 */
   1089 	epic_start(ifp);
   1090 
   1091  out:
   1092 	if (error)
   1093 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
   1094 	return (error);
   1095 }
   1096 
   1097 /*
   1098  * Drain the receive queue.
   1099  */
   1100 void
   1101 epic_rxdrain(sc)
   1102 	struct epic_softc *sc;
   1103 {
   1104 	struct epic_descsoft *ds;
   1105 	int i;
   1106 
   1107 	for (i = 0; i < EPIC_NRXDESC; i++) {
   1108 		ds = EPIC_DSRX(sc, i);
   1109 		if (ds->ds_mbuf != NULL) {
   1110 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
   1111 			m_freem(ds->ds_mbuf);
   1112 			ds->ds_mbuf = NULL;
   1113 		}
   1114 	}
   1115 }
   1116 
   1117 /*
   1118  * Stop transmission on the interface.
   1119  */
   1120 void
   1121 epic_stop(sc, drain)
   1122 	struct epic_softc *sc;
   1123 	int drain;
   1124 {
   1125 	bus_space_tag_t st = sc->sc_st;
   1126 	bus_space_handle_t sh = sc->sc_sh;
   1127 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1128 	struct epic_descsoft *ds;
   1129 	u_int32_t reg;
   1130 	int i;
   1131 
   1132 	/*
   1133 	 * Stop the one second clock.
   1134 	 */
   1135 	callout_stop(&sc->sc_mii_callout);
   1136 
   1137 	/* Down the MII. */
   1138 	mii_down(&sc->sc_mii);
   1139 
   1140 	/* Paranoia... */
   1141 	epic_fixup_clock_source(sc);
   1142 
   1143 	/*
   1144 	 * Disable interrupts.
   1145 	 */
   1146 	reg = bus_space_read_4(st, sh, EPIC_GENCTL);
   1147 	bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
   1148 	bus_space_write_4(st, sh, EPIC_INTMASK, 0);
   1149 
   1150 	/*
   1151 	 * Stop the DMA engine and take the receiver off-line.
   1152 	 */
   1153 	bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
   1154 	    COMMAND_STOP_TDMA | COMMAND_STOP_RX);
   1155 
   1156 	/*
   1157 	 * Release any queued transmit buffers.
   1158 	 */
   1159 	for (i = 0; i < EPIC_NTXDESC; i++) {
   1160 		ds = EPIC_DSTX(sc, i);
   1161 		if (ds->ds_mbuf != NULL) {
   1162 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
   1163 			m_freem(ds->ds_mbuf);
   1164 			ds->ds_mbuf = NULL;
   1165 		}
   1166 	}
   1167 
   1168 	if (drain) {
   1169 		/*
   1170 		 * Release the receive buffers.
   1171 		 */
   1172 		epic_rxdrain(sc);
   1173 	}
   1174 
   1175 	/*
   1176 	 * Mark the interface down and cancel the watchdog timer.
   1177 	 */
   1178 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1179 	ifp->if_timer = 0;
   1180 }
   1181 
   1182 /*
   1183  * Read the EPIC Serial EEPROM.
   1184  */
   1185 void
   1186 epic_read_eeprom(sc, word, wordcnt, data)
   1187 	struct epic_softc *sc;
   1188 	int word, wordcnt;
   1189 	u_int16_t *data;
   1190 {
   1191 	bus_space_tag_t st = sc->sc_st;
   1192 	bus_space_handle_t sh = sc->sc_sh;
   1193 	u_int16_t reg;
   1194 	int i, x;
   1195 
   1196 #define	EEPROM_WAIT_READY(st, sh) \
   1197 	while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
   1198 		/* nothing */
   1199 
   1200 	/*
   1201 	 * Enable the EEPROM.
   1202 	 */
   1203 	bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
   1204 	EEPROM_WAIT_READY(st, sh);
   1205 
   1206 	for (i = 0; i < wordcnt; i++) {
   1207 		/* Send CHIP SELECT for one clock tick. */
   1208 		bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
   1209 		EEPROM_WAIT_READY(st, sh);
   1210 
   1211 		/* Shift in the READ opcode. */
   1212 		for (x = 3; x > 0; x--) {
   1213 			reg = EECTL_ENABLE|EECTL_EECS;
   1214 			if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
   1215 				reg |= EECTL_EEDI;
   1216 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
   1217 			EEPROM_WAIT_READY(st, sh);
   1218 			bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
   1219 			EEPROM_WAIT_READY(st, sh);
   1220 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
   1221 			EEPROM_WAIT_READY(st, sh);
   1222 		}
   1223 
   1224 		/* Shift in address. */
   1225 		for (x = 6; x > 0; x--) {
   1226 			reg = EECTL_ENABLE|EECTL_EECS;
   1227 			if ((word + i) & (1 << (x - 1)))
   1228 				reg |= EECTL_EEDI;
   1229 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
   1230 			EEPROM_WAIT_READY(st, sh);
   1231 			bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
   1232 			EEPROM_WAIT_READY(st, sh);
   1233 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
   1234 			EEPROM_WAIT_READY(st, sh);
   1235 		}
   1236 
   1237 		/* Shift out data. */
   1238 		reg = EECTL_ENABLE|EECTL_EECS;
   1239 		data[i] = 0;
   1240 		for (x = 16; x > 0; x--) {
   1241 			bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
   1242 			EEPROM_WAIT_READY(st, sh);
   1243 			if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
   1244 				data[i] |= (1 << (x - 1));
   1245 			bus_space_write_4(st, sh, EPIC_EECTL, reg);
   1246 			EEPROM_WAIT_READY(st, sh);
   1247 		}
   1248 
   1249 		/* Clear CHIP SELECT. */
   1250 		bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
   1251 		EEPROM_WAIT_READY(st, sh);
   1252 	}
   1253 
   1254 	/*
   1255 	 * Disable the EEPROM.
   1256 	 */
   1257 	bus_space_write_4(st, sh, EPIC_EECTL, 0);
   1258 
   1259 #undef EEPROM_WAIT_READY
   1260 }
   1261 
   1262 /*
   1263  * Add a receive buffer to the indicated descriptor.
   1264  */
   1265 int
   1266 epic_add_rxbuf(sc, idx)
   1267 	struct epic_softc *sc;
   1268 	int idx;
   1269 {
   1270 	struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
   1271 	struct mbuf *m;
   1272 	int error;
   1273 
   1274 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   1275 	if (m == NULL)
   1276 		return (ENOBUFS);
   1277 
   1278 	MCLGET(m, M_DONTWAIT);
   1279 	if ((m->m_flags & M_EXT) == 0) {
   1280 		m_freem(m);
   1281 		return (ENOBUFS);
   1282 	}
   1283 
   1284 	if (ds->ds_mbuf != NULL)
   1285 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
   1286 
   1287 	ds->ds_mbuf = m;
   1288 
   1289 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
   1290 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
   1291 	if (error) {
   1292 		printf("%s: can't load rx DMA map %d, error = %d\n",
   1293 		    sc->sc_dev.dv_xname, idx, error);
   1294 		panic("epic_add_rxbuf");	/* XXX */
   1295 	}
   1296 
   1297 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
   1298 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1299 
   1300 	EPIC_INIT_RXDESC(sc, idx);
   1301 
   1302 	return (0);
   1303 }
   1304 
   1305 /*
   1306  * Set the EPIC multicast hash table.
   1307  *
   1308  * NOTE: We rely on a recently-updated mii_media_active here!
   1309  */
   1310 void
   1311 epic_set_mchash(sc)
   1312 	struct epic_softc *sc;
   1313 {
   1314 	struct ethercom *ec = &sc->sc_ethercom;
   1315 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1316 	struct ether_multi *enm;
   1317 	struct ether_multistep step;
   1318 	u_int32_t hash, mchash[4];
   1319 
   1320 	/*
   1321 	 * Set up the multicast address filter by passing all multicast
   1322 	 * addresses through a CRC generator, and then using the low-order
   1323 	 * 6 bits as an index into the 64 bit multicast hash table (only
   1324 	 * the lower 16 bits of each 32 bit multicast hash register are
   1325 	 * valid).  The high order bits select the register, while the
   1326 	 * rest of the bits select the bit within the register.
   1327 	 */
   1328 
   1329 	if (ifp->if_flags & IFF_PROMISC)
   1330 		goto allmulti;
   1331 
   1332 	if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
   1333 		/* XXX hardware bug in 10Mbps mode. */
   1334 		goto allmulti;
   1335 	}
   1336 
   1337 	mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
   1338 
   1339 	ETHER_FIRST_MULTI(step, ec, enm);
   1340 	while (enm != NULL) {
   1341 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   1342 			/*
   1343 			 * We must listen to a range of multicast addresses.
   1344 			 * For now, just accept all multicasts, rather than
   1345 			 * trying to set only those filter bits needed to match
   1346 			 * the range.  (At this time, the only use of address
   1347 			 * ranges is for IP multicast routing, for which the
   1348 			 * range is big enough to require all bits set.)
   1349 			 */
   1350 			goto allmulti;
   1351 		}
   1352 
   1353 		hash = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f;
   1354 
   1355 		/* Set the corresponding bit in the hash table. */
   1356 		mchash[hash >> 4] |= 1 << (hash & 0xf);
   1357 
   1358 		ETHER_NEXT_MULTI(step, enm);
   1359 	}
   1360 
   1361 	ifp->if_flags &= ~IFF_ALLMULTI;
   1362 	goto sethash;
   1363 
   1364  allmulti:
   1365 	ifp->if_flags |= IFF_ALLMULTI;
   1366 	mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
   1367 
   1368  sethash:
   1369 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
   1370 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
   1371 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
   1372 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
   1373 }
   1374 
   1375 /*
   1376  * Wait for the MII to become ready.
   1377  */
   1378 int
   1379 epic_mii_wait(sc, rw)
   1380 	struct epic_softc *sc;
   1381 	u_int32_t rw;
   1382 {
   1383 	int i;
   1384 
   1385 	for (i = 0; i < 50; i++) {
   1386 		if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
   1387 		    == 0)
   1388 			break;
   1389 		delay(2);
   1390 	}
   1391 	if (i == 50) {
   1392 		printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
   1393 		return (1);
   1394 	}
   1395 
   1396 	return (0);
   1397 }
   1398 
   1399 /*
   1400  * Read from the MII.
   1401  */
   1402 int
   1403 epic_mii_read(self, phy, reg)
   1404 	struct device *self;
   1405 	int phy, reg;
   1406 {
   1407 	struct epic_softc *sc = (struct epic_softc *)self;
   1408 
   1409 	if (epic_mii_wait(sc, MMCTL_WRITE))
   1410 		return (0);
   1411 
   1412 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
   1413 	    MMCTL_ARG(phy, reg, MMCTL_READ));
   1414 
   1415 	if (epic_mii_wait(sc, MMCTL_READ))
   1416 		return (0);
   1417 
   1418 	return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
   1419 	    MMDATA_MASK);
   1420 }
   1421 
   1422 /*
   1423  * Write to the MII.
   1424  */
   1425 void
   1426 epic_mii_write(self, phy, reg, val)
   1427 	struct device *self;
   1428 	int phy, reg, val;
   1429 {
   1430 	struct epic_softc *sc = (struct epic_softc *)self;
   1431 
   1432 	if (epic_mii_wait(sc, MMCTL_WRITE))
   1433 		return;
   1434 
   1435 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
   1436 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
   1437 	    MMCTL_ARG(phy, reg, MMCTL_WRITE));
   1438 }
   1439 
   1440 /*
   1441  * Callback from PHY when media changes.
   1442  */
   1443 void
   1444 epic_statchg(self)
   1445 	struct device *self;
   1446 {
   1447 	struct epic_softc *sc = (struct epic_softc *)self;
   1448 	u_int32_t txcon;
   1449 
   1450 	/*
   1451 	 * Update loopback bits in TXCON to reflect duplex mode.
   1452 	 */
   1453 	txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
   1454 	if (sc->sc_mii.mii_media_active & IFM_FDX)
   1455 		txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
   1456 	else
   1457 		txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
   1458 	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
   1459 
   1460 	/*
   1461 	 * There is a multicast filter bug in 10Mbps mode.  Kick the
   1462 	 * multicast filter in case the speed changed.
   1463 	 */
   1464 	epic_set_mchash(sc);
   1465 }
   1466 
   1467 /*
   1468  * Callback from ifmedia to request current media status.
   1469  */
   1470 void
   1471 epic_mediastatus(ifp, ifmr)
   1472 	struct ifnet *ifp;
   1473 	struct ifmediareq *ifmr;
   1474 {
   1475 	struct epic_softc *sc = ifp->if_softc;
   1476 
   1477 	mii_pollstat(&sc->sc_mii);
   1478 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   1479 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   1480 }
   1481 
   1482 /*
   1483  * Callback from ifmedia to request new media setting.
   1484  */
   1485 int
   1486 epic_mediachange(ifp)
   1487 	struct ifnet *ifp;
   1488 {
   1489 	struct epic_softc *sc = ifp->if_softc;
   1490 
   1491 	if (ifp->if_flags & IFF_UP)
   1492 		mii_mediachg(&sc->sc_mii);
   1493 	return (0);
   1494 }
   1495