Home | History | Annotate | Line # | Download | only in dev
if_ae.c revision 1.2
      1 /* $Id: if_ae.c,v 1.2 2006/05/05 18:04:41 thorpej Exp $ */
      2 /*-
      3  * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
      4  * Copyright (c) 2006 Garrett D'Amore.
      5  * All rights reserved.
      6  *
      7  * This code was written by Garrett D'Amore for the Champaign-Urbana
      8  * Community Wireless Network Project.
      9  *
     10  * Redistribution and use in source and binary forms, with or
     11  * without modification, are permitted provided that the following
     12  * conditions are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above
     16  *    copyright notice, this list of conditions and the following
     17  *    disclaimer in the documentation and/or other materials provided
     18  *    with the distribution.
     19  * 3. All advertising materials mentioning features or use of this
     20  *    software must display the following acknowledgements:
     21  *      This product includes software developed by the Urbana-Champaign
     22  *      Independent Media Center.
     23  *	This product includes software developed by Garrett D'Amore.
     24  * 4. Urbana-Champaign Independent Media Center's name and Garrett
     25  *    D'Amore's name may not be used to endorse or promote products
     26  *    derived from this software without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
     29  * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
     30  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     31  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
     33  * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
     34  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     35  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     36  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     37  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     38  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     40  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     41  */
     42 /*-
     43  * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc.
     44  * All rights reserved.
     45  *
     46  * This code is derived from software contributed to The NetBSD Foundation
     47  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     48  * NASA Ames Research Center; and by Charles M. Hannum.
     49  *
     50  * Redistribution and use in source and binary forms, with or without
     51  * modification, are permitted provided that the following conditions
     52  * are met:
     53  * 1. Redistributions of source code must retain the above copyright
     54  *    notice, this list of conditions and the following disclaimer.
     55  * 2. Redistributions in binary form must reproduce the above copyright
     56  *    notice, this list of conditions and the following disclaimer in the
     57  *    documentation and/or other materials provided with the distribution.
     58  * 3. All advertising materials mentioning features or use of this software
     59  *    must display the following acknowledgement:
     60  *	This product includes software developed by the NetBSD
     61  *	Foundation, Inc. and its contributors.
     62  * 4. Neither the name of The NetBSD Foundation nor the names of its
     63  *    contributors may be used to endorse or promote products derived
     64  *    from this software without specific prior written permission.
     65  *
     66  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     67  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     68  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     69  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     70  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     71  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     72  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     73  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     74  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     75  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     76  * POSSIBILITY OF SUCH DAMAGE.
     77  */
     78 
     79 /*
     80  * Device driver for the onboard ethernet MAC found on the AR5312
     81  * chip's AHB bus.
     82  *
     83  * This device is very simliar to the tulip in most regards, and
     84  * the code is directly derived from NetBSD's tulip.c.  However, it
     85  * is different enough that it did not seem to be a good idea to
     86  * add further complexity to the tulip driver, so we have our own.
     87  *
     88  * Also tulip has a lot of complexity in it for various parts/options
     89  * that we don't need, and on these little boxes with only ~8MB RAM, we
     90  * don't want any extra bloat.
     91  */
     92 
     93 /*
     94  * TODO:
     95  *
     96  * 1) Find out about BUS_MODE_ALIGN16B.  This chip can apparently align
     97  *    inbound packets on a half-word boundary, which would make life easier
     98  *    for TCP/IP.  (Aligning IP headers on a word.)
     99  *
    100  * 2) There is stuff in original tulip to shut down the device when reacting
    101  *    to a a change in link status.  Is that needed.
    102  *
    103  * 3) Test with variety of 10/100 HDX/FDX scenarios.
    104  *
    105  */
    106 
    107 #include <sys/cdefs.h>
    108 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.2 2006/05/05 18:04:41 thorpej Exp $");
    109 
    110 #include "bpfilter.h"
    111 
    112 #include <sys/param.h>
    113 #include <sys/systm.h>
    114 #include <sys/callout.h>
    115 #include <sys/mbuf.h>
    116 #include <sys/malloc.h>
    117 #include <sys/kernel.h>
    118 #include <sys/socket.h>
    119 #include <sys/ioctl.h>
    120 #include <sys/errno.h>
    121 #include <sys/device.h>
    122 
    123 #include <machine/endian.h>
    124 
    125 #include <uvm/uvm_extern.h>
    126 
    127 #include <net/if.h>
    128 #include <net/if_dl.h>
    129 #include <net/if_media.h>
    130 #include <net/if_ether.h>
    131 
    132 #if NBPFILTER > 0
    133 #include <net/bpf.h>
    134 #endif
    135 
    136 #include <machine/bus.h>
    137 #include <machine/intr.h>
    138 
    139 #include <dev/mii/mii.h>
    140 #include <dev/mii/miivar.h>
    141 #include <dev/mii/mii_bitbang.h>
    142 
    143 #include <mips/atheros/include/arbusvar.h>
    144 #include <mips/atheros/dev/aereg.h>
    145 #include <mips/atheros/dev/aevar.h>
    146 
    147 static const struct {
    148 	u_int32_t txth_opmode;		/* OPMODE bits */
    149 	const char *txth_name;		/* name of mode */
    150 } ae_txthresh[] = {
    151 	{ OPMODE_TR_32,		"32 words" },
    152 	{ OPMODE_TR_64,		"64 words" },
    153 	{ OPMODE_TR_128,	"128 words" },
    154 	{ OPMODE_TR_256,	"256 words" },
    155 	{ OPMODE_SF,		"store and forward mode" },
    156 	{ 0,			NULL },
    157 };
    158 
    159 static int 	ae_match(struct device *, struct cfdata *, void *);
    160 static void	ae_attach(struct device *, struct device *, void *);
    161 static int	ae_detach(struct device *, int);
    162 static int	ae_activate(struct device *, enum devact);
    163 
    164 static void	ae_reset(struct ae_softc *);
    165 static void	ae_idle(struct ae_softc *, u_int32_t);
    166 
    167 static int	ae_mediachange(struct ifnet *);
    168 static void	ae_mediastatus(struct ifnet *, struct ifmediareq *);
    169 
    170 static void	ae_start(struct ifnet *);
    171 static void	ae_watchdog(struct ifnet *);
    172 static int	ae_ioctl(struct ifnet *, u_long, caddr_t);
    173 static int	ae_init(struct ifnet *);
    174 static void	ae_stop(struct ifnet *, int);
    175 
    176 static void	ae_shutdown(void *);
    177 
    178 static void	ae_rxdrain(struct ae_softc *);
    179 static int	ae_add_rxbuf(struct ae_softc *, int);
    180 
    181 static int	ae_enable(struct ae_softc *);
    182 static void	ae_disable(struct ae_softc *);
    183 static void	ae_power(int, void *);
    184 
    185 static void	ae_filter_setup(struct ae_softc *);
    186 
    187 static int	ae_intr(void *);
    188 static void	ae_rxintr(struct ae_softc *);
    189 static void	ae_txintr(struct ae_softc *);
    190 
    191 static void	ae_mii_tick(void *);
    192 static void	ae_mii_statchg(struct device *);
    193 
    194 static int	ae_mii_readreg(struct device *, int, int);
    195 static void	ae_mii_writereg(struct device *, int, int, int);
    196 
    197 #ifdef AE_DEBUG
    198 #define	DPRINTF(sc, x)	if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
    199 				printf x
    200 #else
    201 #define	DPRINTF(sc, x)	/* nothing */
    202 #endif
    203 
    204 #ifdef AE_STATS
    205 static void	ae_print_stats(struct ae_softc *);
    206 #endif
    207 
    208 CFATTACH_DECL(ae, sizeof(struct ae_softc),
    209     ae_match, ae_attach, ae_detach, ae_activate);
    210 
    211 /*
    212  * ae_match:
    213  *
    214  *	Check for a device match.
    215  */
    216 int
    217 ae_match(struct device *parent, struct cfdata *cf, void *aux)
    218 {
    219 	struct arbus_attach_args *aa = aux;
    220 
    221 	if (strcmp(aa->aa_name, cf->cf_name) == 0)
    222 		return 1;
    223 
    224 	return 0;
    225 
    226 }
    227 
    228 /*
    229  * ae_attach:
    230  *
    231  *	Attach an ae interface to the system.
    232  */
    233 void
    234 ae_attach(struct device *parent, struct device *self, void *aux)
    235 {
    236 	const uint8_t *enaddr;
    237 	prop_data_t ea;
    238 	struct ae_softc *sc = (void *)self;
    239 	struct arbus_attach_args *aa = aux;
    240 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    241 	int i, error;
    242 
    243 	callout_init(&sc->sc_tick_callout);
    244 
    245 	printf(": Atheros AR531X 10/100 Ethernet\n");
    246 
    247 	/*
    248 	 * Try to get MAC address.
    249 	 */
    250 	ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
    251 	if (ea == NULL) {
    252 		printf("%s: unable to get mac-addr property\n",
    253 		    sc->sc_dev.dv_xname);
    254 		return;
    255 	}
    256 	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    257 	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    258 	enaddr = prop_data_data_nocopy(ea);
    259 
    260 	/* Announce ourselves. */
    261 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
    262 	    ether_sprintf(enaddr));
    263 
    264 	sc->sc_irq = aa->aa_irq;
    265 	sc->sc_st = aa->aa_bst;
    266 	sc->sc_dmat = aa->aa_dmat;
    267 
    268 	SIMPLEQ_INIT(&sc->sc_txfreeq);
    269 	SIMPLEQ_INIT(&sc->sc_txdirtyq);
    270 
    271 	/*
    272 	 * Map registers.
    273 	 */
    274 	sc->sc_size = aa->aa_size;
    275 	if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
    276 	    &sc->sc_sh)) != 0) {
    277 		printf("%s: unable to map registers, error = %d\n",
    278 		    sc->sc_dev.dv_xname, error);
    279 		goto fail_0;
    280 	}
    281 
    282 	/*
    283 	 * Allocate the control data structures, and create and load the
    284 	 * DMA map for it.
    285 	 */
    286 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    287 	    sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
    288 	    1, &sc->sc_cdnseg, 0)) != 0) {
    289 		printf("%s: unable to allocate control data, error = %d\n",
    290 		    sc->sc_dev.dv_xname, error);
    291 		goto fail_1;
    292 	}
    293 
    294 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
    295 	    sizeof(struct ae_control_data), (caddr_t *)&sc->sc_control_data,
    296 	    BUS_DMA_COHERENT)) != 0) {
    297 		printf("%s: unable to map control data, error = %d\n",
    298 		    sc->sc_dev.dv_xname, error);
    299 		goto fail_2;
    300 	}
    301 
    302 	if ((error = bus_dmamap_create(sc->sc_dmat,
    303 	    sizeof(struct ae_control_data), 1,
    304 	    sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
    305 		printf("%s: unable to create control data DMA map, "
    306 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    307 		goto fail_3;
    308 	}
    309 
    310 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    311 	    sc->sc_control_data, sizeof(struct ae_control_data), NULL,
    312 	    0)) != 0) {
    313 		printf("%s: unable to load control data DMA map, error = %d\n",
    314 		    sc->sc_dev.dv_xname, error);
    315 		goto fail_4;
    316 	}
    317 
    318 	/*
    319 	 * Create the transmit buffer DMA maps.
    320 	 */
    321 	for (i = 0; i < AE_TXQUEUELEN; i++) {
    322 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    323 		    AE_NTXSEGS, MCLBYTES, 0, 0,
    324 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    325 			printf("%s: unable to create tx DMA map %d, "
    326 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    327 			goto fail_5;
    328 		}
    329 	}
    330 
    331 	/*
    332 	 * Create the receive buffer DMA maps.
    333 	 */
    334 	for (i = 0; i < AE_NRXDESC; i++) {
    335 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    336 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    337 			printf("%s: unable to create rx DMA map %d, "
    338 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    339 			goto fail_6;
    340 		}
    341 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    342 	}
    343 
    344 	/*
    345 	 * Reset the chip to a known state.
    346 	 */
    347 	ae_reset(sc);
    348 
    349 	/*
    350 	 * From this point forward, the attachment cannot fail.  A failure
    351 	 * before this point releases all resources that may have been
    352 	 * allocated.
    353 	 */
    354 	sc->sc_flags |= AE_ATTACHED;
    355 
    356 	/*
    357 	 * Initialize our media structures.  This may probe the MII, if
    358 	 * present.
    359 	 */
    360 	sc->sc_mii.mii_ifp = ifp;
    361 	sc->sc_mii.mii_readreg = ae_mii_readreg;
    362 	sc->sc_mii.mii_writereg = ae_mii_writereg;
    363 	sc->sc_mii.mii_statchg = ae_mii_statchg;
    364 	ifmedia_init(&sc->sc_mii.mii_media, 0, ae_mediachange,
    365 	    ae_mediastatus);
    366 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
    367 	    MII_OFFSET_ANY, 0);
    368 
    369 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
    370 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
    371 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
    372 	} else
    373 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
    374 
    375 	sc->sc_tick = ae_mii_tick;
    376 
    377 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    378 	ifp->if_softc = sc;
    379 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    380 	sc->sc_if_flags = ifp->if_flags;
    381 	ifp->if_ioctl = ae_ioctl;
    382 	ifp->if_start = ae_start;
    383 	ifp->if_watchdog = ae_watchdog;
    384 	ifp->if_init = ae_init;
    385 	ifp->if_stop = ae_stop;
    386 	IFQ_SET_READY(&ifp->if_snd);
    387 
    388 	/*
    389 	 * We can support 802.1Q VLAN-sized frames.
    390 	 */
    391 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
    392 
    393 	/*
    394 	 * Attach the interface.
    395 	 */
    396 	if_attach(ifp);
    397 	ether_ifattach(ifp, enaddr);
    398 
    399 #if NRND > 0
    400 	rnd_attach_source(&sc->sc_rnd_source, sc->sc_dev.dv_xname,
    401 	    RND_TYPE_NET, 0);
    402 #endif
    403 
    404 	/*
    405 	 * Make sure the interface is shutdown during reboot.
    406 	 */
    407 	sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
    408 	if (sc->sc_sdhook == NULL)
    409 		printf("%s: WARNING: unable to establish shutdown hook\n",
    410 		    sc->sc_dev.dv_xname);
    411 
    412 	/*
    413 	 * Add a suspend hook to make sure we come back up after a
    414 	 * resume.
    415 	 */
    416 	sc->sc_powerhook = powerhook_establish(ae_power, sc);
    417 	if (sc->sc_powerhook == NULL)
    418 		printf("%s: WARNING: unable to establish power hook\n",
    419 		    sc->sc_dev.dv_xname);
    420 	return;
    421 
    422 	/*
    423 	 * Free any resources we've allocated during the failed attach
    424 	 * attempt.  Do this in reverse order and fall through.
    425 	 */
    426  fail_6:
    427 	for (i = 0; i < AE_NRXDESC; i++) {
    428 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
    429 			bus_dmamap_destroy(sc->sc_dmat,
    430 			    sc->sc_rxsoft[i].rxs_dmamap);
    431 	}
    432  fail_5:
    433 	for (i = 0; i < AE_TXQUEUELEN; i++) {
    434 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
    435 			bus_dmamap_destroy(sc->sc_dmat,
    436 			    sc->sc_txsoft[i].txs_dmamap);
    437 	}
    438 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
    439  fail_4:
    440 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
    441  fail_3:
    442 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
    443 	    sizeof(struct ae_control_data));
    444  fail_2:
    445 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
    446  fail_1:
    447 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
    448  fail_0:
    449 	return;
    450 }
    451 
    452 /*
    453  * ae_activate:
    454  *
    455  *	Handle device activation/deactivation requests.
    456  */
    457 int
    458 ae_activate(struct device *self, enum devact act)
    459 {
    460 	struct ae_softc *sc = (void *) self;
    461 	int s, error = 0;
    462 
    463 	s = splnet();
    464 	switch (act) {
    465 	case DVACT_ACTIVATE:
    466 		error = EOPNOTSUPP;
    467 		break;
    468 
    469 	case DVACT_DEACTIVATE:
    470 		mii_activate(&sc->sc_mii, act, MII_PHY_ANY, MII_OFFSET_ANY);
    471 		if_deactivate(&sc->sc_ethercom.ec_if);
    472 		break;
    473 	}
    474 	splx(s);
    475 
    476 	return (error);
    477 }
    478 
    479 /*
    480  * ae_detach:
    481  *
    482  *	Detach a device interface.
    483  */
    484 int
    485 ae_detach(struct device *self, int flags)
    486 {
    487 	struct ae_softc *sc = (void *)self;
    488 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    489 	struct ae_rxsoft *rxs;
    490 	struct ae_txsoft *txs;
    491 	int i;
    492 
    493 	/*
    494 	 * Succeed now if there isn't any work to do.
    495 	 */
    496 	if ((sc->sc_flags & AE_ATTACHED) == 0)
    497 		return (0);
    498 
    499 	/* Unhook our tick handler. */
    500 	if (sc->sc_tick)
    501 		callout_stop(&sc->sc_tick_callout);
    502 
    503 	/* Detach all PHYs */
    504 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
    505 
    506 	/* Delete all remaining media. */
    507 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
    508 
    509 #if NRND > 0
    510 	rnd_detach_source(&sc->sc_rnd_source);
    511 #endif
    512 	ether_ifdetach(ifp);
    513 	if_detach(ifp);
    514 
    515 	for (i = 0; i < AE_NRXDESC; i++) {
    516 		rxs = &sc->sc_rxsoft[i];
    517 		if (rxs->rxs_mbuf != NULL) {
    518 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
    519 			m_freem(rxs->rxs_mbuf);
    520 			rxs->rxs_mbuf = NULL;
    521 		}
    522 		bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap);
    523 	}
    524 	for (i = 0; i < AE_TXQUEUELEN; i++) {
    525 		txs = &sc->sc_txsoft[i];
    526 		if (txs->txs_mbuf != NULL) {
    527 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
    528 			m_freem(txs->txs_mbuf);
    529 			txs->txs_mbuf = NULL;
    530 		}
    531 		bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap);
    532 	}
    533 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
    534 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
    535 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
    536 	    sizeof(struct ae_control_data));
    537 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
    538 
    539 	shutdownhook_disestablish(sc->sc_sdhook);
    540 	powerhook_disestablish(sc->sc_powerhook);
    541 
    542 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
    543 
    544 
    545 	return (0);
    546 }
    547 
    548 /*
    549  * ae_shutdown:
    550  *
    551  *	Make sure the interface is stopped at reboot time.
    552  */
    553 static void
    554 ae_shutdown(void *arg)
    555 {
    556 	struct ae_softc *sc = arg;
    557 
    558 	ae_stop(&sc->sc_ethercom.ec_if, 1);
    559 }
    560 
    561 /*
    562  * ae_start:		[ifnet interface function]
    563  *
    564  *	Start packet transmission on the interface.
    565  */
    566 static void
    567 ae_start(struct ifnet *ifp)
    568 {
    569 	struct ae_softc *sc = ifp->if_softc;
    570 	struct mbuf *m0, *m;
    571 	struct ae_txsoft *txs, *last_txs = NULL;
    572 	bus_dmamap_t dmamap;
    573 	int error, firsttx, nexttx, lasttx = 1, ofree, seg;
    574 
    575 	DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
    576 	    sc->sc_dev.dv_xname, sc->sc_flags, ifp->if_flags));
    577 
    578 
    579 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
    580 		return;
    581 
    582 	/*
    583 	 * Remember the previous number of free descriptors and
    584 	 * the first descriptor we'll use.
    585 	 */
    586 	ofree = sc->sc_txfree;
    587 	firsttx = sc->sc_txnext;
    588 
    589 	DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n",
    590 	    sc->sc_dev.dv_xname, ofree, firsttx));
    591 
    592 	/*
    593 	 * Loop through the send queue, setting up transmit descriptors
    594 	 * until we drain the queue, or use up all available transmit
    595 	 * descriptors.
    596 	 */
    597 	while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
    598 	       sc->sc_txfree != 0) {
    599 		/*
    600 		 * Grab a packet off the queue.
    601 		 */
    602 		IFQ_POLL(&ifp->if_snd, m0);
    603 		if (m0 == NULL)
    604 			break;
    605 		m = NULL;
    606 
    607 		dmamap = txs->txs_dmamap;
    608 
    609 		/*
    610 		 * Load the DMA map.  If this fails, the packet either
    611 		 * didn't fit in the alloted number of segments, or we were
    612 		 * short on resources.  In this case, we'll copy and try
    613 		 * again.
    614 		 */
    615 		if (((mtod(m0, uintptr_t) & 3) != 0) ||
    616 		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
    617 		      BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
    618 			MGETHDR(m, M_DONTWAIT, MT_DATA);
    619 			if (m == NULL) {
    620 				printf("%s: unable to allocate Tx mbuf\n",
    621 				    sc->sc_dev.dv_xname);
    622 				break;
    623 			}
    624 			MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
    625 			if (m0->m_pkthdr.len > MHLEN) {
    626 				MCLGET(m, M_DONTWAIT);
    627 				if ((m->m_flags & M_EXT) == 0) {
    628 					printf("%s: unable to allocate Tx "
    629 					    "cluster\n", sc->sc_dev.dv_xname);
    630 					m_freem(m);
    631 					break;
    632 				}
    633 			}
    634 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
    635 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
    636 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
    637 			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    638 			if (error) {
    639 				printf("%s: unable to load Tx buffer, "
    640 				    "error = %d\n", sc->sc_dev.dv_xname,
    641 				    error);
    642 				break;
    643 			}
    644 		}
    645 
    646 		/*
    647 		 * Ensure we have enough descriptors free to describe
    648 		 * the packet.
    649 		 */
    650 		if (dmamap->dm_nsegs > sc->sc_txfree) {
    651 			/*
    652 			 * Not enough free descriptors to transmit this
    653 			 * packet.  We haven't committed to anything yet,
    654 			 * so just unload the DMA map, put the packet
    655 			 * back on the queue, and punt.  Notify the upper
    656 			 * layer that there are no more slots left.
    657 			 *
    658 			 * XXX We could allocate an mbuf and copy, but
    659 			 * XXX it is worth it?
    660 			 */
    661 			ifp->if_flags |= IFF_OACTIVE;
    662 			bus_dmamap_unload(sc->sc_dmat, dmamap);
    663 			if (m != NULL)
    664 				m_freem(m);
    665 			break;
    666 		}
    667 
    668 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    669 		if (m != NULL) {
    670 			m_freem(m0);
    671 			m0 = m;
    672 		}
    673 
    674 		/*
    675 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
    676 		 */
    677 
    678 		/* Sync the DMA map. */
    679 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    680 		    BUS_DMASYNC_PREWRITE);
    681 
    682 		/*
    683 		 * Initialize the transmit descriptors.
    684 		 */
    685 		for (nexttx = sc->sc_txnext, seg = 0;
    686 		     seg < dmamap->dm_nsegs;
    687 		     seg++, nexttx = AE_NEXTTX(nexttx)) {
    688 			/*
    689 			 * If this is the first descriptor we're
    690 			 * enqueueing, don't set the OWN bit just
    691 			 * yet.  That could cause a race condition.
    692 			 * We'll do it below.
    693 			 */
    694 			sc->sc_txdescs[nexttx].ad_status =
    695 			    (nexttx == firsttx) ? 0 : ADSTAT_OWN;
    696 			sc->sc_txdescs[nexttx].ad_bufaddr1 =
    697 			    dmamap->dm_segs[seg].ds_addr;
    698 			sc->sc_txdescs[nexttx].ad_ctl =
    699 			    (dmamap->dm_segs[seg].ds_len <<
    700 				ADCTL_SIZE1_SHIFT) |
    701 				(nexttx == (AE_NTXDESC - 1) ?
    702 				    ADCTL_ER : 0);
    703 			lasttx = nexttx;
    704 		}
    705 
    706 		KASSERT(lasttx != -1);
    707 
    708 		/* Set `first segment' and `last segment' appropriately. */
    709 		sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS;
    710 		sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS;
    711 
    712 #ifdef AE_DEBUG
    713 		if (ifp->if_flags & IFF_DEBUG) {
    714 			printf("     txsoft %p transmit chain:\n", txs);
    715 			for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) {
    716 				printf("     descriptor %d:\n", seg);
    717 				printf("       ad_status:   0x%08x\n",
    718 				    sc->sc_txdescs[seg].ad_status);
    719 				printf("       ad_ctl:      0x%08x\n",
    720 				    sc->sc_txdescs[seg].ad_ctl);
    721 				printf("       ad_bufaddr1: 0x%08x\n",
    722 				    sc->sc_txdescs[seg].ad_bufaddr1);
    723 				printf("       ad_bufaddr2: 0x%08x\n",
    724 				    sc->sc_txdescs[seg].ad_bufaddr2);
    725 				if (seg == lasttx)
    726 					break;
    727 			}
    728 		}
    729 #endif
    730 
    731 		/* Sync the descriptors we're using. */
    732 		AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
    733 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    734 
    735 		/*
    736 		 * Store a pointer to the packet so we can free it later,
    737 		 * and remember what txdirty will be once the packet is
    738 		 * done.
    739 		 */
    740 		txs->txs_mbuf = m0;
    741 		txs->txs_firstdesc = sc->sc_txnext;
    742 		txs->txs_lastdesc = lasttx;
    743 		txs->txs_ndescs = dmamap->dm_nsegs;
    744 
    745 		/* Advance the tx pointer. */
    746 		sc->sc_txfree -= dmamap->dm_nsegs;
    747 		sc->sc_txnext = nexttx;
    748 
    749 		SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
    750 		SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
    751 
    752 		last_txs = txs;
    753 
    754 #if NBPFILTER > 0
    755 		/*
    756 		 * Pass the packet to any BPF listeners.
    757 		 */
    758 		if (ifp->if_bpf)
    759 			bpf_mtap(ifp->if_bpf, m0);
    760 #endif /* NBPFILTER > 0 */
    761 	}
    762 
    763 	if (txs == NULL || sc->sc_txfree == 0) {
    764 		/* No more slots left; notify upper layer. */
    765 		ifp->if_flags |= IFF_OACTIVE;
    766 	}
    767 
    768 	if (sc->sc_txfree != ofree) {
    769 		DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
    770 		    sc->sc_dev.dv_xname, lasttx, firsttx));
    771 		/*
    772 		 * Cause a transmit interrupt to happen on the
    773 		 * last packet we enqueued.
    774 		 */
    775 		sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC;
    776 		AE_CDTXSYNC(sc, lasttx, 1,
    777 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    778 
    779 		/*
    780 		 * The entire packet chain is set up.  Give the
    781 		 * first descriptor to the chip now.
    782 		 */
    783 		sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN;
    784 		AE_CDTXSYNC(sc, firsttx, 1,
    785 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    786 
    787 		/* Wake up the transmitter. */
    788 		/* XXX USE AUTOPOLLING? */
    789 		AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD);
    790 		AE_BARRIER(sc);
    791 
    792 		/* Set a watchdog timer in case the chip flakes out. */
    793 		ifp->if_timer = 5;
    794 	}
    795 }
    796 
    797 /*
    798  * ae_watchdog:	[ifnet interface function]
    799  *
    800  *	Watchdog timer handler.
    801  */
    802 static void
    803 ae_watchdog(struct ifnet *ifp)
    804 {
    805 	struct ae_softc *sc = ifp->if_softc;
    806 	int doing_transmit;
    807 
    808 	doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq));
    809 
    810 	if (doing_transmit) {
    811 		printf("%s: transmit timeout\n", sc->sc_dev.dv_xname);
    812 		ifp->if_oerrors++;
    813 	}
    814 	else
    815 		printf("%s: spurious watchdog timeout\n", sc->sc_dev.dv_xname);
    816 
    817 	(void) ae_init(ifp);
    818 
    819 	/* Try to get more packets going. */
    820 	ae_start(ifp);
    821 }
    822 
    823 /*
    824  * ae_ioctl:		[ifnet interface function]
    825  *
    826  *	Handle control requests from the operator.
    827  */
    828 static int
    829 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    830 {
    831 	struct ae_softc *sc = ifp->if_softc;
    832 	struct ifreq *ifr = (struct ifreq *)data;
    833 	int s, error;
    834 
    835 	s = splnet();
    836 
    837 	switch (cmd) {
    838 	case SIOCSIFMEDIA:
    839 	case SIOCGIFMEDIA:
    840 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
    841 		break;
    842 	case SIOCSIFFLAGS:
    843 		/* If the interface is up and running, only modify the receive
    844 		 * filter when setting promiscuous or debug mode.  Otherwise
    845 		 * fall through to ether_ioctl, which will reset the chip.
    846 		 */
    847 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
    848 		if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
    849 		    == (IFF_UP|IFF_RUNNING))
    850 		    && ((ifp->if_flags & (~RESETIGN))
    851 		    == (sc->sc_if_flags & (~RESETIGN)))) {
    852 			/* Set up the receive filter. */
    853 			ae_filter_setup(sc);
    854 			error = 0;
    855 			break;
    856 #undef RESETIGN
    857 		}
    858 		/* FALLTHROUGH */
    859 	default:
    860 		error = ether_ioctl(ifp, cmd, data);
    861 		if (error == ENETRESET) {
    862 			if (ifp->if_flags & IFF_RUNNING) {
    863 				/*
    864 				 * Multicast list has changed.  Set the
    865 				 * hardware filter accordingly.
    866 				 */
    867 				ae_filter_setup(sc);
    868 			}
    869 			error = 0;
    870 		}
    871 		break;
    872 	}
    873 
    874 	/* Try to get more packets going. */
    875 	if (AE_IS_ENABLED(sc))
    876 		ae_start(ifp);
    877 
    878 	sc->sc_if_flags = ifp->if_flags;
    879 	splx(s);
    880 	return (error);
    881 }
    882 
    883 /*
    884  * ae_intr:
    885  *
    886  *	Interrupt service routine.
    887  */
    888 int
    889 ae_intr(void *arg)
    890 {
    891 	struct ae_softc *sc = arg;
    892 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    893 	u_int32_t status, rxstatus, txstatus;
    894 	int handled = 0, txthresh;
    895 
    896 	DPRINTF(sc, ("%s: ae_intr\n", sc->sc_dev.dv_xname));
    897 
    898 #ifdef DEBUG
    899 	if (AE_IS_ENABLED(sc) == 0)
    900 		panic("%s: ae_intr: not enabled", sc->sc_dev.dv_xname);
    901 #endif
    902 
    903 	/*
    904 	 * If the interface isn't running, the interrupt couldn't
    905 	 * possibly have come from us.
    906 	 */
    907 	if ((ifp->if_flags & IFF_RUNNING) == 0 ||
    908 	    !device_is_active(&sc->sc_dev)) {
    909 		printf("spurious?!?\n");
    910 		return (0);
    911 	}
    912 
    913 	for (;;) {
    914 		status = AE_READ(sc, CSR_STATUS);
    915 		if (status) {
    916 			AE_WRITE(sc, CSR_STATUS, status);
    917 			AE_BARRIER(sc);
    918 		}
    919 
    920 		if ((status & sc->sc_inten) == 0)
    921 			break;
    922 
    923 		handled = 1;
    924 
    925 		rxstatus = status & sc->sc_rxint_mask;
    926 		txstatus = status & sc->sc_txint_mask;
    927 
    928 		if (rxstatus) {
    929 			/* Grab new any new packets. */
    930 			ae_rxintr(sc);
    931 
    932 			if (rxstatus & STATUS_RU) {
    933 				printf("%s: receive ring overrun\n",
    934 				    sc->sc_dev.dv_xname);
    935 				/* Get the receive process going again. */
    936 				AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
    937 				AE_BARRIER(sc);
    938 				break;
    939 			}
    940 		}
    941 
    942 		if (txstatus) {
    943 			/* Sweep up transmit descriptors. */
    944 			ae_txintr(sc);
    945 
    946 			if (txstatus & STATUS_TJT)
    947 				printf("%s: transmit jabber timeout\n",
    948 				    sc->sc_dev.dv_xname);
    949 
    950 			if (txstatus & STATUS_UNF) {
    951 				/*
    952 				 * Increase our transmit threshold if
    953 				 * another is available.
    954 				 */
    955 				txthresh = sc->sc_txthresh + 1;
    956 				if (ae_txthresh[txthresh].txth_name != NULL) {
    957 					uint32_t opmode;
    958 					/* Idle the transmit process. */
    959 					opmode = AE_READ(sc, CSR_OPMODE);
    960 					ae_idle(sc, OPMODE_ST);
    961 
    962 					sc->sc_txthresh = txthresh;
    963 					opmode &=
    964 					    ~(OPMODE_TR|OPMODE_SF);
    965 					opmode |=
    966 					    ae_txthresh[txthresh].txth_opmode;
    967 					printf("%s: transmit underrun; new "
    968 					    "threshold: %s\n",
    969 					    sc->sc_dev.dv_xname,
    970 					    ae_txthresh[txthresh].txth_name);
    971 
    972 					/*
    973 					 * Set the new threshold and restart
    974 					 * the transmit process.
    975 					 */
    976 					AE_WRITE(sc, CSR_OPMODE, opmode);
    977 					AE_BARRIER(sc);
    978 				}
    979 					/*
    980 					 * XXX Log every Nth underrun from
    981 					 * XXX now on?
    982 					 */
    983 			}
    984 		}
    985 
    986 		if (status & (STATUS_TPS|STATUS_RPS)) {
    987 			if (status & STATUS_TPS)
    988 				printf("%s: transmit process stopped\n",
    989 				    sc->sc_dev.dv_xname);
    990 			if (status & STATUS_RPS)
    991 				printf("%s: receive process stopped\n",
    992 				    sc->sc_dev.dv_xname);
    993 			(void) ae_init(ifp);
    994 			break;
    995 		}
    996 
    997 		if (status & STATUS_SE) {
    998 			const char *str;
    999 
   1000 			if (status & STATUS_TX_ABORT)
   1001 				str = "tx abort";
   1002 			else if (status & STATUS_RX_ABORT)
   1003 				str = "rx abort";
   1004 			else
   1005 				str = "unknown error";
   1006 
   1007 			printf("%s: fatal system error: %s\n",
   1008 			    sc->sc_dev.dv_xname, str);
   1009 			(void) ae_init(ifp);
   1010 			break;
   1011 		}
   1012 
   1013 		/*
   1014 		 * Not handled:
   1015 		 *
   1016 		 *	Transmit buffer unavailable -- normal
   1017 		 *	condition, nothing to do, really.
   1018 		 *
   1019 		 *	General purpose timer experied -- we don't
   1020 		 *	use the general purpose timer.
   1021 		 *
   1022 		 *	Early receive interrupt -- not available on
   1023 		 *	all chips, we just use RI.  We also only
   1024 		 *	use single-segment receive DMA, so this
   1025 		 *	is mostly useless.
   1026 		 */
   1027 	}
   1028 
   1029 	/* Try to get more packets going. */
   1030 	ae_start(ifp);
   1031 
   1032 #if NRND > 0
   1033 	if (handled)
   1034 		rnd_add_uint32(&sc->sc_rnd_source, status);
   1035 #endif
   1036 	return (handled);
   1037 }
   1038 
   1039 /*
   1040  * ae_rxintr:
   1041  *
   1042  *	Helper; handle receive interrupts.
   1043  */
   1044 static void
   1045 ae_rxintr(struct ae_softc *sc)
   1046 {
   1047 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1048 	struct ether_header *eh;
   1049 	struct ae_rxsoft *rxs;
   1050 	struct mbuf *m;
   1051 	u_int32_t rxstat;
   1052 	int i, len;
   1053 
   1054 	for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) {
   1055 		rxs = &sc->sc_rxsoft[i];
   1056 
   1057 		AE_CDRXSYNC(sc, i,
   1058 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1059 
   1060 		rxstat = sc->sc_rxdescs[i].ad_status;
   1061 
   1062 		if (rxstat & ADSTAT_OWN) {
   1063 			/*
   1064 			 * We have processed all of the receive buffers.
   1065 			 */
   1066 			break;
   1067 		}
   1068 
   1069 		/*
   1070 		 * If any collisions were seen on the wire, count one.
   1071 		 */
   1072 		if (rxstat & ADSTAT_Rx_CS)
   1073 			ifp->if_collisions++;
   1074 
   1075 		/*
   1076 		 * If an error occurred, update stats, clear the status
   1077 		 * word, and leave the packet buffer in place.  It will
   1078 		 * simply be reused the next time the ring comes around.
   1079 	 	 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long
   1080 		 * error.
   1081 		 */
   1082 		if (rxstat & ADSTAT_ES &&
   1083 		    ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 ||
   1084 		     (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF |
   1085 				ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) {
   1086 #define	PRINTERR(bit, str)						\
   1087 			if (rxstat & (bit))				\
   1088 				printf("%s: receive error: %s\n",	\
   1089 				    sc->sc_dev.dv_xname, str)
   1090 			ifp->if_ierrors++;
   1091 			PRINTERR(ADSTAT_Rx_DE, "descriptor error");
   1092 			PRINTERR(ADSTAT_Rx_RF, "runt frame");
   1093 			PRINTERR(ADSTAT_Rx_TL, "frame too long");
   1094 			PRINTERR(ADSTAT_Rx_RE, "MII error");
   1095 			PRINTERR(ADSTAT_Rx_DB, "dribbling bit");
   1096 			PRINTERR(ADSTAT_Rx_CE, "CRC error");
   1097 #undef PRINTERR
   1098 			AE_INIT_RXDESC(sc, i);
   1099 			continue;
   1100 		}
   1101 
   1102 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1103 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1104 
   1105 		/*
   1106 		 * No errors; receive the packet.  Note the chip
   1107 		 * includes the CRC with every packet.
   1108 		 */
   1109 		len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN;
   1110 
   1111 		/*
   1112 		 * XXX: the Atheros part can align on half words.  what
   1113 		 * is the performance implication of this?  Probably
   1114 		 * minimal, and we should use it...
   1115 		 */
   1116 #ifdef __NO_STRICT_ALIGNMENT
   1117 		/*
   1118 		 * Allocate a new mbuf cluster.  If that fails, we are
   1119 		 * out of memory, and must drop the packet and recycle
   1120 		 * the buffer that's already attached to this descriptor.
   1121 		 */
   1122 		m = rxs->rxs_mbuf;
   1123 		if (ae_add_rxbuf(sc, i) != 0) {
   1124 			ifp->if_ierrors++;
   1125 			AE_INIT_RXDESC(sc, i);
   1126 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1127 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1128 			continue;
   1129 		}
   1130 #else
   1131 		/*
   1132 		 * The chip's receive buffers must be 4-byte aligned.
   1133 		 * But this means that the data after the Ethernet header
   1134 		 * is misaligned.  We must allocate a new buffer and
   1135 		 * copy the data, shifted forward 2 bytes.
   1136 		 */
   1137 		MGETHDR(m, M_DONTWAIT, MT_DATA);
   1138 		if (m == NULL) {
   1139  dropit:
   1140 			ifp->if_ierrors++;
   1141 			AE_INIT_RXDESC(sc, i);
   1142 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1143 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1144 			continue;
   1145 		}
   1146 		MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
   1147 		if (len > (MHLEN - 2)) {
   1148 			MCLGET(m, M_DONTWAIT);
   1149 			if ((m->m_flags & M_EXT) == 0) {
   1150 				m_freem(m);
   1151 				goto dropit;
   1152 			}
   1153 		}
   1154 		m->m_data += 2;
   1155 
   1156 		/*
   1157 		 * Note that we use clusters for incoming frames, so the
   1158 		 * buffer is virtually contiguous.
   1159 		 */
   1160 		memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
   1161 
   1162 		/* Allow the receive descriptor to continue using its mbuf. */
   1163 		AE_INIT_RXDESC(sc, i);
   1164 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1165 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1166 #endif /* __NO_STRICT_ALIGNMENT */
   1167 
   1168 		ifp->if_ipackets++;
   1169 		eh = mtod(m, struct ether_header *);
   1170 		m->m_pkthdr.rcvif = ifp;
   1171 		m->m_pkthdr.len = m->m_len = len;
   1172 
   1173 #if NBPFILTER > 0
   1174 		/*
   1175 		 * Pass this up to any BPF listeners, but only
   1176 		 * pass it up the stack if its for us.
   1177 		 */
   1178 		if (ifp->if_bpf)
   1179 			bpf_mtap(ifp->if_bpf, m);
   1180 #endif /* NPBFILTER > 0 */
   1181 
   1182 		/* Pass it on. */
   1183 		(*ifp->if_input)(ifp, m);
   1184 	}
   1185 
   1186 	/* Update the receive pointer. */
   1187 	sc->sc_rxptr = i;
   1188 }
   1189 
   1190 /*
   1191  * ae_txintr:
   1192  *
   1193  *	Helper; handle transmit interrupts.
   1194  */
   1195 static void
   1196 ae_txintr(struct ae_softc *sc)
   1197 {
   1198 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1199 	struct ae_txsoft *txs;
   1200 	u_int32_t txstat;
   1201 
   1202 	DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n",
   1203 	    sc->sc_dev.dv_xname, sc->sc_flags));
   1204 
   1205 	ifp->if_flags &= ~IFF_OACTIVE;
   1206 
   1207 	/*
   1208 	 * Go through our Tx list and free mbufs for those
   1209 	 * frames that have been transmitted.
   1210 	 */
   1211 	while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
   1212 		AE_CDTXSYNC(sc, txs->txs_lastdesc,
   1213 		    txs->txs_ndescs,
   1214 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1215 
   1216 #ifdef AE_DEBUG
   1217 		if (ifp->if_flags & IFF_DEBUG) {
   1218 			int i;
   1219 			printf("    txsoft %p transmit chain:\n", txs);
   1220 			for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) {
   1221 				printf("     descriptor %d:\n", i);
   1222 				printf("       ad_status:   0x%08x\n",
   1223 				    sc->sc_txdescs[i].ad_status);
   1224 				printf("       ad_ctl:      0x%08x\n",
   1225 				    sc->sc_txdescs[i].ad_ctl);
   1226 				printf("       ad_bufaddr1: 0x%08x\n",
   1227 				    sc->sc_txdescs[i].ad_bufaddr1);
   1228 				printf("       ad_bufaddr2: 0x%08x\n",
   1229 				    sc->sc_txdescs[i].ad_bufaddr2);
   1230 				if (i == txs->txs_lastdesc)
   1231 					break;
   1232 			}
   1233 		}
   1234 #endif
   1235 
   1236 		txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status;
   1237 		if (txstat & ADSTAT_OWN)
   1238 			break;
   1239 
   1240 		SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
   1241 
   1242 		sc->sc_txfree += txs->txs_ndescs;
   1243 
   1244 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1245 		    0, txs->txs_dmamap->dm_mapsize,
   1246 		    BUS_DMASYNC_POSTWRITE);
   1247 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1248 		m_freem(txs->txs_mbuf);
   1249 		txs->txs_mbuf = NULL;
   1250 
   1251 		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
   1252 
   1253 		/*
   1254 		 * Check for errors and collisions.
   1255 		 */
   1256 #ifdef AE_STATS
   1257 		if (txstat & ADSTAT_Tx_UF)
   1258 			sc->sc_stats.ts_tx_uf++;
   1259 		if (txstat & ADSTAT_Tx_TO)
   1260 			sc->sc_stats.ts_tx_to++;
   1261 		if (txstat & ADSTAT_Tx_EC)
   1262 			sc->sc_stats.ts_tx_ec++;
   1263 		if (txstat & ADSTAT_Tx_LC)
   1264 			sc->sc_stats.ts_tx_lc++;
   1265 #endif
   1266 
   1267 		if (txstat & (ADSTAT_Tx_UF|ADSTAT_Tx_TO))
   1268 			ifp->if_oerrors++;
   1269 
   1270 		if (txstat & ADSTAT_Tx_EC)
   1271 			ifp->if_collisions += 16;
   1272 		else
   1273 			ifp->if_collisions += ADSTAT_Tx_COLLISIONS(txstat);
   1274 		if (txstat & ADSTAT_Tx_LC)
   1275 			ifp->if_collisions++;
   1276 
   1277 		ifp->if_opackets++;
   1278 	}
   1279 
   1280 	/*
   1281 	 * If there are no more pending transmissions, cancel the watchdog
   1282 	 * timer.
   1283 	 */
   1284 	if (txs == NULL)
   1285 		ifp->if_timer = 0;
   1286 }
   1287 
   1288 #ifdef AE_STATS
   1289 void
   1290 ae_print_stats(struct ae_softc *sc)
   1291 {
   1292 
   1293 	printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n",
   1294 	    sc->sc_dev.dv_xname,
   1295 	    sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to,
   1296 	    sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc);
   1297 }
   1298 #endif
   1299 
   1300 /*
   1301  * ae_reset:
   1302  *
   1303  *	Perform a soft reset on the chip.
   1304  */
   1305 void
   1306 ae_reset(struct ae_softc *sc)
   1307 {
   1308 	int i;
   1309 
   1310 	AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR);
   1311 	AE_BARRIER(sc);
   1312 
   1313 	/*
   1314 	 * The chip doesn't take itself out of reset automatically.
   1315 	 * We need to do so after 2us.
   1316 	 */
   1317 	delay(10);
   1318 	AE_WRITE(sc, CSR_BUSMODE, 0);
   1319 	AE_BARRIER(sc);
   1320 
   1321 	for (i = 0; i < 1000; i++) {
   1322 		/*
   1323 		 * Wait a bit for the reset to complete before peeking
   1324 		 * at the chip again.
   1325 		 */
   1326 		delay(10);
   1327 		if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0)
   1328 			break;
   1329 	}
   1330 
   1331 	if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR))
   1332 		printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
   1333 
   1334 	delay(1000);
   1335 }
   1336 
   1337 /*
   1338  * ae_init:		[ ifnet interface function ]
   1339  *
   1340  *	Initialize the interface.  Must be called at splnet().
   1341  */
   1342 static int
   1343 ae_init(struct ifnet *ifp)
   1344 {
   1345 	struct ae_softc *sc = ifp->if_softc;
   1346 	struct ae_txsoft *txs;
   1347 	struct ae_rxsoft *rxs;
   1348 	uint8_t *enaddr;
   1349 	int i, error = 0;
   1350 
   1351 	if ((error = ae_enable(sc)) != 0)
   1352 		goto out;
   1353 
   1354 	/*
   1355 	 * Cancel any pending I/O.
   1356 	 */
   1357 	ae_stop(ifp, 0);
   1358 
   1359 	/*
   1360 	 * Reset the chip to a known state.
   1361 	 */
   1362 	ae_reset(sc);
   1363 
   1364 	/*
   1365 	 * Initialize the BUSMODE register.
   1366 	 */
   1367 	AE_WRITE(sc, CSR_BUSMODE,
   1368 	    /* XXX: not sure if this is a good thing or not... */
   1369 	    //BUSMODE_ALIGN_16B |
   1370 	    BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW);
   1371 	AE_BARRIER(sc);
   1372 
   1373 	/*
   1374 	 * Initialize the transmit descriptor ring.
   1375 	 */
   1376 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
   1377 	for (i = 0; i < AE_NTXDESC; i++) {
   1378 		sc->sc_txdescs[i].ad_ctl = 0;
   1379 		sc->sc_txdescs[i].ad_bufaddr2 =
   1380 		    AE_CDTXADDR(sc, AE_NEXTTX(i));
   1381 	}
   1382 	sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER;
   1383 	AE_CDTXSYNC(sc, 0, AE_NTXDESC,
   1384 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1385 	sc->sc_txfree = AE_NTXDESC;
   1386 	sc->sc_txnext = 0;
   1387 
   1388 	/*
   1389 	 * Initialize the transmit job descriptors.
   1390 	 */
   1391 	SIMPLEQ_INIT(&sc->sc_txfreeq);
   1392 	SIMPLEQ_INIT(&sc->sc_txdirtyq);
   1393 	for (i = 0; i < AE_TXQUEUELEN; i++) {
   1394 		txs = &sc->sc_txsoft[i];
   1395 		txs->txs_mbuf = NULL;
   1396 		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
   1397 	}
   1398 
   1399 	/*
   1400 	 * Initialize the receive descriptor and receive job
   1401 	 * descriptor rings.
   1402 	 */
   1403 	for (i = 0; i < AE_NRXDESC; i++) {
   1404 		rxs = &sc->sc_rxsoft[i];
   1405 		if (rxs->rxs_mbuf == NULL) {
   1406 			if ((error = ae_add_rxbuf(sc, i)) != 0) {
   1407 				printf("%s: unable to allocate or map rx "
   1408 				    "buffer %d, error = %d\n",
   1409 				    sc->sc_dev.dv_xname, i, error);
   1410 				/*
   1411 				 * XXX Should attempt to run with fewer receive
   1412 				 * XXX buffers instead of just failing.
   1413 				 */
   1414 				ae_rxdrain(sc);
   1415 				goto out;
   1416 			}
   1417 		} else
   1418 			AE_INIT_RXDESC(sc, i);
   1419 	}
   1420 	sc->sc_rxptr = 0;
   1421 
   1422 	/*
   1423 	 * Initialize the interrupt mask and enable interrupts.
   1424 	 */
   1425 	/* normal interrupts */
   1426 	sc->sc_inten =  STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS;
   1427 
   1428 	/* abnormal interrupts */
   1429 	sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF |
   1430 	    STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS;
   1431 
   1432 	sc->sc_rxint_mask = STATUS_RI|STATUS_RU;
   1433 	sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT;
   1434 
   1435 	sc->sc_rxint_mask &= sc->sc_inten;
   1436 	sc->sc_txint_mask &= sc->sc_inten;
   1437 
   1438 	AE_WRITE(sc, CSR_INTEN, sc->sc_inten);
   1439 	AE_WRITE(sc, CSR_STATUS, 0xffffffff);
   1440 
   1441 	/*
   1442 	 * Give the transmit and receive rings to the chip.
   1443 	 */
   1444 	AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext));
   1445 	AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr));
   1446 	AE_BARRIER(sc);
   1447 
   1448 	/*
   1449 	 * Set the station address.
   1450 	 */
   1451 	enaddr = LLADDR(ifp->if_sadl);
   1452 	AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]);
   1453 	AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 |
   1454 		enaddr[1] << 8 | enaddr[0]);
   1455 	AE_BARRIER(sc);
   1456 
   1457 	/*
   1458 	 * Set the receive filter.  This will start the transmit and
   1459 	 * receive processes.
   1460 	 */
   1461 	ae_filter_setup(sc);
   1462 
   1463 	/*
   1464 	 * Set the current media.
   1465 	 */
   1466 	ae_mediachange(ifp);
   1467 
   1468 	/*
   1469 	 * Start the mac.
   1470 	 */
   1471 	AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE);
   1472 	AE_BARRIER(sc);
   1473 
   1474 	/*
   1475 	 * Write out the opmode.
   1476 	 */
   1477 	AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST |
   1478 	    ae_txthresh[sc->sc_txthresh].txth_opmode);
   1479 	/*
   1480 	 * Start the receive process.
   1481 	 */
   1482 	AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
   1483 	AE_BARRIER(sc);
   1484 
   1485 	if (sc->sc_tick != NULL) {
   1486 		/* Start the one second clock. */
   1487 		callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc);
   1488 	}
   1489 
   1490 	/*
   1491 	 * Note that the interface is now running.
   1492 	 */
   1493 	ifp->if_flags |= IFF_RUNNING;
   1494 	ifp->if_flags &= ~IFF_OACTIVE;
   1495 	sc->sc_if_flags = ifp->if_flags;
   1496 
   1497  out:
   1498 	if (error) {
   1499 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1500 		ifp->if_timer = 0;
   1501 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
   1502 	}
   1503 	return (error);
   1504 }
   1505 
   1506 /*
   1507  * ae_enable:
   1508  *
   1509  *	Enable the chip.
   1510  */
   1511 static int
   1512 ae_enable(struct ae_softc *sc)
   1513 {
   1514 
   1515 	if (AE_IS_ENABLED(sc) == 0) {
   1516 		sc->sc_ih = arbus_intr_establish(sc->sc_irq, ae_intr, sc);
   1517 		if (sc->sc_ih == NULL) {
   1518 			printf("%s: unable to establish interrupt\n",
   1519 			    sc->sc_dev.dv_xname);
   1520 			return (EIO);
   1521 		}
   1522 		sc->sc_flags |= AE_ENABLED;
   1523 	}
   1524 	return (0);
   1525 }
   1526 
   1527 /*
   1528  * ae_disable:
   1529  *
   1530  *	Disable the chip.
   1531  */
   1532 static void
   1533 ae_disable(struct ae_softc *sc)
   1534 {
   1535 
   1536 	if (AE_IS_ENABLED(sc)) {
   1537 		arbus_intr_disestablish(sc->sc_ih);
   1538 		sc->sc_flags &= ~AE_ENABLED;
   1539 	}
   1540 }
   1541 
   1542 /*
   1543  * ae_power:
   1544  *
   1545  *	Power management (suspend/resume) hook.
   1546  */
   1547 static void
   1548 ae_power(int why, void *arg)
   1549 {
   1550 	struct ae_softc *sc = arg;
   1551 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1552 	int s;
   1553 
   1554 	printf("power called: %d, %x\n", why, (uint32_t)arg);
   1555 	s = splnet();
   1556 	switch (why) {
   1557 	case PWR_STANDBY:
   1558 		/* do nothing! */
   1559 		break;
   1560 	case PWR_SUSPEND:
   1561 		ae_stop(ifp, 0);
   1562 		ae_disable(sc);
   1563 		break;
   1564 	case PWR_RESUME:
   1565 		if (ifp->if_flags & IFF_UP) {
   1566 			ae_enable(sc);
   1567 			ae_init(ifp);
   1568 		}
   1569 		break;
   1570 	case PWR_SOFTSUSPEND:
   1571 	case PWR_SOFTSTANDBY:
   1572 	case PWR_SOFTRESUME:
   1573 		break;
   1574 	}
   1575 	splx(s);
   1576 }
   1577 
   1578 /*
   1579  * ae_rxdrain:
   1580  *
   1581  *	Drain the receive queue.
   1582  */
   1583 static void
   1584 ae_rxdrain(struct ae_softc *sc)
   1585 {
   1586 	struct ae_rxsoft *rxs;
   1587 	int i;
   1588 
   1589 	for (i = 0; i < AE_NRXDESC; i++) {
   1590 		rxs = &sc->sc_rxsoft[i];
   1591 		if (rxs->rxs_mbuf != NULL) {
   1592 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   1593 			m_freem(rxs->rxs_mbuf);
   1594 			rxs->rxs_mbuf = NULL;
   1595 		}
   1596 	}
   1597 }
   1598 
   1599 /*
   1600  * ae_stop:		[ ifnet interface function ]
   1601  *
   1602  *	Stop transmission on the interface.
   1603  */
   1604 static void
   1605 ae_stop(struct ifnet *ifp, int disable)
   1606 {
   1607 	struct ae_softc *sc = ifp->if_softc;
   1608 	struct ae_txsoft *txs;
   1609 
   1610 	if (sc->sc_tick != NULL) {
   1611 		/* Stop the one second clock. */
   1612 		callout_stop(&sc->sc_tick_callout);
   1613 	}
   1614 
   1615 	/* Down the MII. */
   1616 	mii_down(&sc->sc_mii);
   1617 
   1618 	/* Disable interrupts. */
   1619 	AE_WRITE(sc, CSR_INTEN, 0);
   1620 
   1621 	/* Stop the transmit and receive processes. */
   1622 	AE_WRITE(sc, CSR_OPMODE, 0);
   1623 	AE_WRITE(sc, CSR_RXLIST, 0);
   1624 	AE_WRITE(sc, CSR_TXLIST, 0);
   1625 	AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE);
   1626 	AE_BARRIER(sc);
   1627 
   1628 	/*
   1629 	 * Release any queued transmit buffers.
   1630 	 */
   1631 	while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
   1632 		SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
   1633 		if (txs->txs_mbuf != NULL) {
   1634 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1635 			m_freem(txs->txs_mbuf);
   1636 			txs->txs_mbuf = NULL;
   1637 		}
   1638 		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
   1639 	}
   1640 
   1641 	if (disable) {
   1642 		ae_rxdrain(sc);
   1643 		ae_disable(sc);
   1644 	}
   1645 
   1646 	/*
   1647 	 * Mark the interface down and cancel the watchdog timer.
   1648 	 */
   1649 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1650 	sc->sc_if_flags = ifp->if_flags;
   1651 	ifp->if_timer = 0;
   1652 
   1653 	/*
   1654 	 * Reset the chip (needed on some flavors to actually disable it).
   1655 	 */
   1656 	ae_reset(sc);
   1657 }
   1658 
   1659 /*
   1660  * ae_add_rxbuf:
   1661  *
   1662  *	Add a receive buffer to the indicated descriptor.
   1663  */
   1664 static int
   1665 ae_add_rxbuf(struct ae_softc *sc, int idx)
   1666 {
   1667 	struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx];
   1668 	struct mbuf *m;
   1669 	int error;
   1670 
   1671 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   1672 	if (m == NULL)
   1673 		return (ENOBUFS);
   1674 
   1675 	MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
   1676 	MCLGET(m, M_DONTWAIT);
   1677 	if ((m->m_flags & M_EXT) == 0) {
   1678 		m_freem(m);
   1679 		return (ENOBUFS);
   1680 	}
   1681 
   1682 	if (rxs->rxs_mbuf != NULL)
   1683 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   1684 
   1685 	rxs->rxs_mbuf = m;
   1686 
   1687 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
   1688 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
   1689 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   1690 	if (error) {
   1691 		printf("%s: can't load rx DMA map %d, error = %d\n",
   1692 		    sc->sc_dev.dv_xname, idx, error);
   1693 		panic("ae_add_rxbuf");	/* XXX */
   1694 	}
   1695 
   1696 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1697 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1698 
   1699 	AE_INIT_RXDESC(sc, idx);
   1700 
   1701 	return (0);
   1702 }
   1703 
   1704 /*
   1705  * ae_filter_setup:
   1706  *
   1707  *	Set the chip's receive filter.
   1708  */
   1709 static void
   1710 ae_filter_setup(struct ae_softc *sc)
   1711 {
   1712 	struct ethercom *ec = &sc->sc_ethercom;
   1713 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1714 	struct ether_multi *enm;
   1715 	struct ether_multistep step;
   1716 	uint32_t hash, mchash[2];
   1717 	uint32_t macctl = 0;
   1718 
   1719 	/*
   1720 	 * If the chip is running, we need to reset the interface,
   1721 	 * and will revisit here (with IFF_RUNNING) clear.  The
   1722 	 * chip seems to really not like to have its multicast
   1723 	 * filter programmed without a reset.
   1724 	 */
   1725 	if (ifp->if_flags & IFF_RUNNING) {
   1726 		(void) ae_init(ifp);
   1727 		return;
   1728 	}
   1729 
   1730 	DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n",
   1731 	    sc->sc_dev.dv_xname, sc->sc_flags));
   1732 
   1733 	macctl = AE_READ(sc, CSR_MACCTL);
   1734 	macctl &= ~(MACCTL_PR | MACCTL_PM);
   1735 	macctl |= MACCTL_HASH;
   1736 	macctl |= MACCTL_HBD;
   1737 	macctl |= MACCTL_PR;
   1738 
   1739 	if (ifp->if_flags & IFF_PROMISC) {
   1740 		macctl |= MACCTL_PR;
   1741 		goto allmulti;
   1742 	}
   1743 
   1744 	mchash[0] = mchash[1] = 0;
   1745 
   1746 	ETHER_FIRST_MULTI(step, ec, enm);
   1747 	while (enm != NULL) {
   1748 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   1749 			/*
   1750 			 * We must listen to a range of multicast addresses.
   1751 			 * For now, just accept all multicasts, rather than
   1752 			 * trying to set only those filter bits needed to match
   1753 			 * the range.  (At this time, the only use of address
   1754 			 * ranges is for IP multicast routing, for which the
   1755 			 * range is big enough to require all bits set.)
   1756 			 */
   1757 			goto allmulti;
   1758 		}
   1759 
   1760 		/* Verify whether we use big or little endian hashes */
   1761 		hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f;
   1762 		mchash[hash >> 5] |= 1 << (hash & 0x1f);
   1763 		ETHER_NEXT_MULTI(step, enm);
   1764 	}
   1765 	ifp->if_flags &= ~IFF_ALLMULTI;
   1766 	goto setit;
   1767 
   1768  allmulti:
   1769 	ifp->if_flags |= IFF_ALLMULTI;
   1770 	mchash[0] = mchash[1] = 0xffffffff;
   1771 	macctl |= MACCTL_PM;
   1772 
   1773  setit:
   1774 	AE_WRITE(sc, CSR_HTHI, mchash[0]);
   1775 	AE_WRITE(sc, CSR_HTHI, mchash[1]);
   1776 
   1777 	AE_WRITE(sc, CSR_MACCTL, macctl);
   1778 	AE_BARRIER(sc);
   1779 
   1780 	DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n",
   1781 		    sc->sc_dev.dv_xname, macctl));
   1782 }
   1783 
   1784 /*
   1785  * ae_idle:
   1786  *
   1787  *	Cause the transmit and/or receive processes to go idle.
   1788  */
   1789 void
   1790 ae_idle(struct ae_softc *sc, u_int32_t bits)
   1791 {
   1792 	static const char * const txstate_names[] = {
   1793 		"STOPPED",
   1794 		"RUNNING - FETCH",
   1795 		"RUNNING - WAIT",
   1796 		"RUNNING - READING",
   1797 		"-- RESERVED --",
   1798 		"RUNNING - SETUP",
   1799 		"SUSPENDED",
   1800 		"RUNNING - CLOSE",
   1801 	};
   1802 	static const char * const rxstate_names[] = {
   1803 		"STOPPED",
   1804 		"RUNNING - FETCH",
   1805 		"RUNNING - CHECK",
   1806 		"RUNNING - WAIT",
   1807 		"SUSPENDED",
   1808 		"RUNNING - CLOSE",
   1809 		"RUNNING - FLUSH",
   1810 		"RUNNING - QUEUE",
   1811 	};
   1812 
   1813 	u_int32_t csr, ackmask = 0;
   1814 	int i;
   1815 
   1816 	if (bits & OPMODE_ST)
   1817 		ackmask |= STATUS_TPS;
   1818 
   1819 	if (bits & OPMODE_SR)
   1820 		ackmask |= STATUS_RPS;
   1821 
   1822 	AE_CLR(sc, CSR_OPMODE, bits);
   1823 
   1824 	for (i = 0; i < 1000; i++) {
   1825 		if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask)
   1826 			break;
   1827 		delay(10);
   1828 	}
   1829 
   1830 	csr = AE_READ(sc, CSR_STATUS);
   1831 	if ((csr & ackmask) != ackmask) {
   1832 		if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 &&
   1833 		    (csr & STATUS_TS) != STATUS_TS_STOPPED) {
   1834 			printf("%s: transmit process failed to idle: "
   1835 			    "state %s\n", sc->sc_dev.dv_xname,
   1836 			    txstate_names[(csr & STATUS_TS) >> 20]);
   1837 		}
   1838 		if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 &&
   1839 		    (csr & STATUS_RS) != STATUS_RS_STOPPED) {
   1840 			printf("%s: receive process failed to idle: "
   1841 			    "state %s\n", sc->sc_dev.dv_xname,
   1842 			    rxstate_names[(csr & STATUS_RS) >> 17]);
   1843 		}
   1844 	}
   1845 }
   1846 
   1847 /*****************************************************************************
   1848  * Generic media support functions.
   1849  *****************************************************************************/
   1850 
   1851 /*
   1852  * ae_mediastatus:	[ifmedia interface function]
   1853  *
   1854  *	Query the current media.
   1855  */
   1856 void
   1857 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   1858 {
   1859 	struct ae_softc *sc = ifp->if_softc;
   1860 
   1861 	if (AE_IS_ENABLED(sc) == 0) {
   1862 		ifmr->ifm_active = IFM_ETHER | IFM_NONE;
   1863 		ifmr->ifm_status = 0;
   1864 		return;
   1865 	}
   1866 
   1867 	mii_pollstat(&sc->sc_mii);
   1868 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   1869 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   1870 }
   1871 
   1872 /*
   1873  * ae_mediachange:	[ifmedia interface function]
   1874  *
   1875  *	Update the current media.
   1876  */
   1877 int
   1878 ae_mediachange(struct ifnet *ifp)
   1879 {
   1880 	struct ae_softc *sc = ifp->if_softc;
   1881 
   1882 	if ((ifp->if_flags & IFF_UP) == 0)
   1883 		return (0);
   1884 
   1885 	mii_mediachg(&sc->sc_mii);
   1886 	return (0);
   1887 }
   1888 
   1889 /*****************************************************************************
   1890  * Support functions for MII-attached media.
   1891  *****************************************************************************/
   1892 
   1893 /*
   1894  * ae_mii_tick:
   1895  *
   1896  *	One second timer, used to tick the MII.
   1897  */
   1898 static void
   1899 ae_mii_tick(void *arg)
   1900 {
   1901 	struct ae_softc *sc = arg;
   1902 	int s;
   1903 
   1904 	if (!device_is_active(&sc->sc_dev))
   1905 		return;
   1906 
   1907 	s = splnet();
   1908 	mii_tick(&sc->sc_mii);
   1909 	splx(s);
   1910 
   1911 	callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc);
   1912 }
   1913 
   1914 /*
   1915  * ae_mii_statchg:	[mii interface function]
   1916  *
   1917  *	Callback from PHY when media changes.
   1918  */
   1919 static void
   1920 ae_mii_statchg(struct device *self)
   1921 {
   1922 	struct ae_softc *sc = (struct ae_softc *)self;
   1923 	uint32_t	macctl, flowc;
   1924 
   1925 	//opmode = AE_READ(sc, CSR_OPMODE);
   1926 	macctl = AE_READ(sc, CSR_MACCTL);
   1927 
   1928 	/* XXX: do we need to do this? */
   1929 	/* Idle the transmit and receive processes. */
   1930 	//ae_idle(sc, OPMODE_ST|OPMODE_SR);
   1931 
   1932 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   1933 		flowc = FLOWC_FCE;
   1934 		macctl &= ~MACCTL_DRO;
   1935 		macctl |= MACCTL_FDX;
   1936 	} else {
   1937 		flowc = 0;	/* cannot do flow control in HDX */
   1938 		macctl |= MACCTL_DRO;
   1939 		macctl &= ~MACCTL_FDX;
   1940 	}
   1941 
   1942 	AE_WRITE(sc, CSR_FLOWC, flowc);
   1943 	AE_WRITE(sc, CSR_MACCTL, macctl);
   1944 
   1945 	/* restore operational mode */
   1946 	//AE_WRITE(sc, CSR_OPMODE, opmode);
   1947 	AE_BARRIER(sc);
   1948 }
   1949 
   1950 /*
   1951  * ae_mii_readreg:
   1952  *
   1953  *	Read a PHY register.
   1954  */
   1955 static int
   1956 ae_mii_readreg(struct device *self, int phy, int reg)
   1957 {
   1958 	struct ae_softc	*sc = (struct ae_softc *)self;
   1959 	uint32_t	addr;
   1960 	int		i;
   1961 
   1962 	addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT);
   1963 	AE_WRITE(sc, CSR_MIIADDR, addr);
   1964 	AE_BARRIER(sc);
   1965 	for (i = 0; i < 100000000; i++) {
   1966 		if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
   1967 			break;
   1968 	}
   1969 
   1970 	return (AE_READ(sc, CSR_MIIDATA) & 0xffff);
   1971 }
   1972 
   1973 /*
   1974  * ae_mii_writereg:
   1975  *
   1976  *	Write a PHY register.
   1977  */
   1978 static void
   1979 ae_mii_writereg(struct device *self, int phy, int reg, int val)
   1980 {
   1981 	struct ae_softc *sc = (struct ae_softc *)self;
   1982 	uint32_t	addr;
   1983 	int		i;
   1984 
   1985 	/* write the data register */
   1986 	AE_WRITE(sc, CSR_MIIDATA, val);
   1987 
   1988 	/* write the address to latch it in */
   1989 	addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) |
   1990 	    MIIADDR_WRITE;
   1991 	AE_WRITE(sc, CSR_MIIADDR, addr);
   1992 	AE_BARRIER(sc);
   1993 
   1994 	for (i = 0; i < 100000000; i++) {
   1995 		if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
   1996 			break;
   1997 	}
   1998 }
   1999