Home | History | Annotate | Line # | Download | only in dev
if_emac.c revision 1.39
      1 /*	$NetBSD: if_emac.c,v 1.39 2011/06/18 06:41:42 matt Exp $	*/
      2 
      3 /*
      4  * Copyright 2001, 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *      This product includes software developed for the NetBSD Project by
     20  *      Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * emac(4) supports following ibm4xx's EMACs.
     40  *   XXXX: ZMII and 'TCP Accelaration Hardware' not support yet...
     41  *
     42  *            tested
     43  *            ------
     44  * 405EP	-  10/100 x2
     45  * 405EX/EXr	o  10/100/1000 x2 (EXr x1), STA v2, 256bit hash-Table, RGMII
     46  * 405GP/GPr	o  10/100
     47  * 440EP	-  10/100 x2, ZMII
     48  * 440GP	-  10/100 x2, ZMII
     49  * 440GX	-  10/100/1000 x4, ZMII/RGMII(ch 2, 3), TAH(ch 2, 3)
     50  * 440SP	-  10/100/1000
     51  * 440SPe	-  10/100/1000, STA v2
     52  */
     53 
     54 #include <sys/cdefs.h>
     55 __KERNEL_RCSID(0, "$NetBSD: if_emac.c,v 1.39 2011/06/18 06:41:42 matt Exp $");
     56 
     57 #include "opt_emac.h"
     58 
     59 #include <sys/param.h>
     60 #include <sys/systm.h>
     61 #include <sys/mbuf.h>
     62 #include <sys/kernel.h>
     63 #include <sys/socket.h>
     64 #include <sys/ioctl.h>
     65 #include <sys/cpu.h>
     66 #include <sys/device.h>
     67 
     68 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
     69 
     70 #include <net/if.h>
     71 #include <net/if_dl.h>
     72 #include <net/if_media.h>
     73 #include <net/if_ether.h>
     74 
     75 #include <net/bpf.h>
     76 
     77 #include <powerpc/ibm4xx/cpu.h>
     78 #include <powerpc/ibm4xx/dcr4xx.h>
     79 #include <powerpc/ibm4xx/mal405gp.h>
     80 #include <powerpc/ibm4xx/dev/emacreg.h>
     81 #include <powerpc/ibm4xx/dev/if_emacreg.h>
     82 #include <powerpc/ibm4xx/dev/if_emacvar.h>
     83 #include <powerpc/ibm4xx/dev/malvar.h>
     84 #include <powerpc/ibm4xx/dev/opbreg.h>
     85 #include <powerpc/ibm4xx/dev/opbvar.h>
     86 #include <powerpc/ibm4xx/dev/plbvar.h>
     87 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
     88 #include <powerpc/ibm4xx/dev/rmiivar.h>
     89 #endif
     90 
     91 #include <dev/mii/miivar.h>
     92 
     93 #include "locators.h"
     94 
     95 
     96 /*
     97  * Transmit descriptor list size.  There are two Tx channels, each with
     98  * up to 256 hardware descriptors available.  We currently use one Tx
     99  * channel.  We tell the upper layers that they can queue a lot of
    100  * packets, and we go ahead and manage up to 64 of them at a time.  We
    101  * allow up to 16 DMA segments per packet.
    102  */
    103 #define	EMAC_NTXSEGS		16
    104 #define	EMAC_TXQUEUELEN		64
    105 #define	EMAC_TXQUEUELEN_MASK	(EMAC_TXQUEUELEN - 1)
    106 #define	EMAC_TXQUEUE_GC		(EMAC_TXQUEUELEN / 4)
    107 #define	EMAC_NTXDESC		256
    108 #define	EMAC_NTXDESC_MASK	(EMAC_NTXDESC - 1)
    109 #define	EMAC_NEXTTX(x)		(((x) + 1) & EMAC_NTXDESC_MASK)
    110 #define	EMAC_NEXTTXS(x)		(((x) + 1) & EMAC_TXQUEUELEN_MASK)
    111 
    112 /*
    113  * Receive descriptor list size.  There is one Rx channel with up to 256
    114  * hardware descriptors available.  We allocate 64 receive descriptors,
    115  * each with a 2k buffer (MCLBYTES).
    116  */
    117 #define	EMAC_NRXDESC		64
    118 #define	EMAC_NRXDESC_MASK	(EMAC_NRXDESC - 1)
    119 #define	EMAC_NEXTRX(x)		(((x) + 1) & EMAC_NRXDESC_MASK)
    120 #define	EMAC_PREVRX(x)		(((x) - 1) & EMAC_NRXDESC_MASK)
    121 
    122 /*
    123  * Transmit/receive descriptors that are DMA'd to the EMAC.
    124  */
    125 struct emac_control_data {
    126 	struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
    127 	struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
    128 };
    129 
    130 #define	EMAC_CDOFF(x)		offsetof(struct emac_control_data, x)
    131 #define	EMAC_CDTXOFF(x)		EMAC_CDOFF(ecd_txdesc[(x)])
    132 #define	EMAC_CDRXOFF(x)		EMAC_CDOFF(ecd_rxdesc[(x)])
    133 
    134 /*
    135  * Software state for transmit jobs.
    136  */
    137 struct emac_txsoft {
    138 	struct mbuf *txs_mbuf;		/* head of mbuf chain */
    139 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    140 	int txs_firstdesc;		/* first descriptor in packet */
    141 	int txs_lastdesc;		/* last descriptor in packet */
    142 	int txs_ndesc;			/* # of descriptors used */
    143 };
    144 
    145 /*
    146  * Software state for receive descriptors.
    147  */
    148 struct emac_rxsoft {
    149 	struct mbuf *rxs_mbuf;		/* head of mbuf chain */
    150 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    151 };
    152 
    153 /*
    154  * Software state per device.
    155  */
    156 struct emac_softc {
    157 	device_t sc_dev;		/* generic device information */
    158 	int sc_instance;		/* instance no. */
    159 	bus_space_tag_t sc_st;		/* bus space tag */
    160 	bus_space_handle_t sc_sh;	/* bus space handle */
    161 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    162 	struct ethercom sc_ethercom;	/* ethernet common data */
    163 	void *sc_sdhook;		/* shutdown hook */
    164 	void *sc_powerhook;		/* power management hook */
    165 
    166 	struct mii_data sc_mii;		/* MII/media information */
    167 	struct callout sc_callout;	/* tick callout */
    168 
    169 	uint32_t sc_mr1;		/* copy of Mode Register 1 */
    170 	uint32_t sc_stacr_read;		/* Read opcode of STAOPC of STACR */
    171 	uint32_t sc_stacr_write;	/* Write opcode of STAOPC of STACR */
    172 	uint32_t sc_stacr_bits;		/* misc bits of STACR */
    173 	bool sc_stacr_completed;	/* Operation completed of STACR */
    174 	int sc_htsize;			/* Hash Table size */
    175 
    176 	bus_dmamap_t sc_cddmamap;	/* control data dma map */
    177 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    178 
    179 	/* Software state for transmit/receive descriptors. */
    180 	struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
    181 	struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
    182 
    183 	/* Control data structures. */
    184 	struct emac_control_data *sc_control_data;
    185 #define	sc_txdescs	sc_control_data->ecd_txdesc
    186 #define	sc_rxdescs	sc_control_data->ecd_rxdesc
    187 
    188 #ifdef EMAC_EVENT_COUNTERS
    189 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    190 	struct evcnt sc_ev_txintr;	/* Tx interrupts */
    191 	struct evcnt sc_ev_rxde;	/* Rx descriptor interrupts */
    192 	struct evcnt sc_ev_txde;	/* Tx descriptor interrupts */
    193 	struct evcnt sc_ev_intr;	/* General EMAC interrupts */
    194 
    195 	struct evcnt sc_ev_txreap;	/* Calls to Tx descriptor reaper */
    196 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    197 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    198 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    199 	struct evcnt sc_ev_tu;		/* Tx underrun */
    200 #endif /* EMAC_EVENT_COUNTERS */
    201 
    202 	int sc_txfree;			/* number of free Tx descriptors */
    203 	int sc_txnext;			/* next ready Tx descriptor */
    204 
    205 	int sc_txsfree;			/* number of free Tx jobs */
    206 	int sc_txsnext;			/* next ready Tx job */
    207 	int sc_txsdirty;		/* dirty Tx jobs */
    208 
    209 	int sc_rxptr;			/* next ready RX descriptor/descsoft */
    210 
    211 	void (*sc_rmii_enable)(device_t, int);		/* reduced MII enable */
    212 	void (*sc_rmii_disable)(device_t, int);		/* reduced MII disable*/
    213 	void (*sc_rmii_speed)(device_t, int, int);	/* reduced MII speed */
    214 };
    215 
    216 #ifdef EMAC_EVENT_COUNTERS
    217 #define	EMAC_EVCNT_INCR(ev)	(ev)->ev_count++
    218 #else
    219 #define	EMAC_EVCNT_INCR(ev)	/* nothing */
    220 #endif
    221 
    222 #define	EMAC_CDTXADDR(sc, x)	((sc)->sc_cddma + EMAC_CDTXOFF((x)))
    223 #define	EMAC_CDRXADDR(sc, x)	((sc)->sc_cddma + EMAC_CDRXOFF((x)))
    224 
    225 #define	EMAC_CDTXSYNC(sc, x, n, ops)					\
    226 do {									\
    227 	int __x, __n;							\
    228 									\
    229 	__x = (x);							\
    230 	__n = (n);							\
    231 									\
    232 	/* If it will wrap around, sync to the end of the ring. */	\
    233 	if ((__x + __n) > EMAC_NTXDESC) {				\
    234 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    235 		    EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) *	\
    236 		    (EMAC_NTXDESC - __x), (ops));			\
    237 		__n -= (EMAC_NTXDESC - __x);				\
    238 		__x = 0;						\
    239 	}								\
    240 									\
    241 	/* Now sync whatever is left. */				\
    242 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    243 	    EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
    244 } while (/*CONSTCOND*/0)
    245 
    246 #define	EMAC_CDRXSYNC(sc, x, ops)					\
    247 do {									\
    248 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    249 	    EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops));	\
    250 } while (/*CONSTCOND*/0)
    251 
    252 #define	EMAC_INIT_RXDESC(sc, x)						\
    253 do {									\
    254 	struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    255 	struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)];		\
    256 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    257 									\
    258 	/*								\
    259 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    260 	 * so that the payload after the Ethernet header is aligned	\
    261 	 * to a 4-byte boundary.					\
    262 	 */								\
    263 	__m->m_data = __m->m_ext.ext_buf + 2;				\
    264 									\
    265 	__rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2;	\
    266 	__rxd->md_data_len = __m->m_ext.ext_size - 2;			\
    267 	__rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT |		\
    268 	    /* Set wrap on last descriptor. */				\
    269 	    (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0);		\
    270 	EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    271 } while (/*CONSTCOND*/0)
    272 
    273 #define	EMAC_WRITE(sc, reg, val) \
    274 	bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    275 #define	EMAC_READ(sc, reg) \
    276 	bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
    277 
    278 #define	EMAC_SET_FILTER(aht, crc) \
    279 do {									\
    280 	(aht)[3 - (((crc) >> 26) >> 4)] |= 1 << (((crc) >> 26) & 0xf);	\
    281 } while (/*CONSTCOND*/0)
    282 #define	EMAC_SET_FILTER256(aht, crc) \
    283 do {									\
    284 	(aht)[7 - (((crc) >> 24) >> 5)] |= 1 << (((crc) >> 24) & 0x1f);	\
    285 } while (/*CONSTCOND*/0)
    286 
    287 static int	emac_match(device_t, cfdata_t, void *);
    288 static void	emac_attach(device_t, device_t, void *);
    289 
    290 static int	emac_intr(void *);
    291 static void	emac_shutdown(void *);
    292 
    293 static void	emac_start(struct ifnet *);
    294 static int	emac_ioctl(struct ifnet *, u_long, void *);
    295 static int	emac_init(struct ifnet *);
    296 static void	emac_stop(struct ifnet *, int);
    297 static void	emac_watchdog(struct ifnet *);
    298 
    299 static int	emac_add_rxbuf(struct emac_softc *, int);
    300 static void	emac_rxdrain(struct emac_softc *);
    301 static int	emac_set_filter(struct emac_softc *);
    302 static int	emac_txreap(struct emac_softc *);
    303 
    304 static void	emac_soft_reset(struct emac_softc *);
    305 static void	emac_smart_reset(struct emac_softc *);
    306 
    307 static int	emac_mii_readreg(device_t, int, int);
    308 static void	emac_mii_writereg(device_t, int, int, int);
    309 static void	emac_mii_statchg(device_t);
    310 static uint32_t	emac_mii_wait(struct emac_softc *);
    311 static void	emac_mii_tick(void *);
    312 
    313 int		emac_copy_small = 0;
    314 
    315 CFATTACH_DECL_NEW(emac, sizeof(struct emac_softc),
    316     emac_match, emac_attach, NULL, NULL);
    317 
    318 
    319 static int
    320 emac_match(device_t parent, cfdata_t cf, void *aux)
    321 {
    322 	struct opb_attach_args *oaa = aux;
    323 
    324 	/* match only on-chip ethernet devices */
    325 	if (strcmp(oaa->opb_name, cf->cf_name) == 0)
    326 		return 1;
    327 
    328 	return 0;
    329 }
    330 
    331 static void
    332 emac_attach(device_t parent, device_t self, void *aux)
    333 {
    334 	struct opb_attach_args *oaa = aux;
    335 	struct emac_softc *sc = device_private(self);
    336 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    337 	struct mii_data *mii = &sc->sc_mii;
    338 	const char * xname = device_xname(self);
    339 	bus_dma_segment_t seg;
    340 	int error, i, nseg, opb_freq, opbc, mii_phy = MII_PHY_ANY;
    341 	const uint8_t *enaddr;
    342 	prop_dictionary_t dict = device_properties(self);
    343 	prop_data_t ea;
    344 
    345 	bus_space_map(oaa->opb_bt, oaa->opb_addr, EMAC_NREG, 0, &sc->sc_sh);
    346 
    347 	sc->sc_dev = self;
    348 	sc->sc_instance = oaa->opb_instance;
    349 	sc->sc_st = oaa->opb_bt;
    350 	sc->sc_dmat = oaa->opb_dmat;
    351 
    352 	callout_init(&sc->sc_callout, 0);
    353 
    354 	aprint_naive("\n");
    355 	aprint_normal(": Ethernet Media Access Controller\n");
    356 
    357 	/* Fetch the Ethernet address. */
    358 	ea = prop_dictionary_get(dict, "mac-address");
    359 	if (ea == NULL) {
    360 		aprint_error_dev(self, "unable to get mac-address property\n");
    361 		return;
    362 	}
    363 	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    364 	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    365 	enaddr = prop_data_data_nocopy(ea);
    366 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
    367 
    368 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
    369 	/* Fetch the MII offset. */
    370 	prop_dictionary_get_uint32(dict, "mii-phy", &mii_phy);
    371 
    372 #ifdef EMAC_ZMII_PHY
    373 	if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_ZMII)
    374 		zmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
    375 		    &sc->sc_rmii_disable, &sc->sc_rmii_speed);
    376 #endif
    377 #ifdef EMAC_RGMII_PHY
    378 	if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_RGMII)
    379 		rgmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
    380 		    &sc->sc_rmii_disable, &sc->sc_rmii_speed);
    381 #endif
    382 #endif
    383 
    384 	/*
    385 	 * Allocate the control data structures, and create and load the
    386 	 * DMA map for it.
    387 	 */
    388 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    389 	    sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
    390 		aprint_error_dev(self,
    391 		    "unable to allocate control data, error = %d\n", error);
    392 		goto fail_0;
    393 	}
    394 
    395 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
    396 	    sizeof(struct emac_control_data), (void **)&sc->sc_control_data,
    397 	    BUS_DMA_COHERENT)) != 0) {
    398 		aprint_error_dev(self,
    399 		    "unable to map control data, error = %d\n", error);
    400 		goto fail_1;
    401 	}
    402 
    403 	if ((error = bus_dmamap_create(sc->sc_dmat,
    404 	    sizeof(struct emac_control_data), 1,
    405 	    sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
    406 		aprint_error_dev(self,
    407 		    "unable to create control data DMA map, error = %d\n",
    408 		    error);
    409 		goto fail_2;
    410 	}
    411 
    412 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    413 	    sc->sc_control_data, sizeof(struct emac_control_data), NULL,
    414 	    0)) != 0) {
    415 		aprint_error_dev(self,
    416 		    "unable to load control data DMA map, error = %d\n", error);
    417 		goto fail_3;
    418 	}
    419 
    420 	/*
    421 	 * Create the transmit buffer DMA maps.
    422 	 */
    423 	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
    424 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    425 		    EMAC_NTXSEGS, MCLBYTES, 0, 0,
    426 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    427 			aprint_error_dev(self,
    428 			    "unable to create tx DMA map %d, error = %d\n",
    429 			    i, error);
    430 			goto fail_4;
    431 		}
    432 	}
    433 
    434 	/*
    435 	 * Create the receive buffer DMA maps.
    436 	 */
    437 	for (i = 0; i < EMAC_NRXDESC; i++) {
    438 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    439 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    440 			aprint_error_dev(self,
    441 			    "unable to create rx DMA map %d, error = %d\n",
    442 			    i, error);
    443 			goto fail_5;
    444 		}
    445 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    446 	}
    447 
    448 	/* Soft Reset the EMAC.  The chip to a known state. */
    449 	emac_soft_reset(sc);
    450 
    451 	opb_freq = opb_get_frequency();
    452 	switch (opb_freq) {
    453 	case  50000000: opbc =  STACR_OPBC_50MHZ; break;
    454 	case  66666666: opbc =  STACR_OPBC_66MHZ; break;
    455 	case  83333333: opbc =  STACR_OPBC_83MHZ; break;
    456 	case 100000000: opbc = STACR_OPBC_100MHZ; break;
    457 
    458 	default:
    459 		if (opb_freq > 100000000) {
    460 			opbc = STACR_OPBC_A100MHZ;
    461 			break;
    462 		}
    463 		aprint_error_dev(self, "unsupport OPB frequency %dMHz\n",
    464 		    opb_freq / 1000 / 1000);
    465 		goto fail_5;
    466 	}
    467 	if (oaa->opb_flags & OPB_FLAGS_EMAC_GBE) {
    468 		sc->sc_mr1 =
    469 		    MR1_RFS_GBE(MR1__FS_16KB)	|
    470 		    MR1_TFS_GBE(MR1__FS_16KB)	|
    471 		    MR1_TR0_MULTIPLE		|
    472 		    MR1_OBCI(opbc);
    473 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
    474 
    475 		if (oaa->opb_flags & OPB_FLAGS_EMAC_STACV2) {
    476 			sc->sc_stacr_read = STACR_STAOPC_READ;
    477 			sc->sc_stacr_write = STACR_STAOPC_WRITE;
    478 			sc->sc_stacr_bits = STACR_OC;
    479 			sc->sc_stacr_completed = false;
    480 		} else {
    481 			sc->sc_stacr_read = STACR_READ;
    482 			sc->sc_stacr_write = STACR_WRITE;
    483 			sc->sc_stacr_completed = true;
    484 		}
    485 	} else {
    486 		/*
    487 		 * Set up Mode Register 1 - set receive and transmit FIFOs to
    488 		 * maximum size, allow transmit of multiple packets (only
    489 		 * channel 0 is used).
    490 		 *
    491 		 * XXX: Allow pause packets??
    492 		 */
    493 		sc->sc_mr1 =
    494 		    MR1_RFS(MR1__FS_4KB) |
    495 		    MR1_TFS(MR1__FS_2KB) |
    496 		    MR1_TR0_MULTIPLE;
    497 
    498 		sc->sc_stacr_read = STACR_READ;
    499 		sc->sc_stacr_write = STACR_WRITE;
    500 		sc->sc_stacr_bits = STACR_OPBC(opbc);
    501 		sc->sc_stacr_completed = true;
    502 	}
    503 
    504 	intr_establish(oaa->opb_irq, IST_LEVEL, IPL_NET, emac_intr, sc);
    505 	mal_intr_establish(sc->sc_instance, sc);
    506 
    507 	if (oaa->opb_flags & OPB_FLAGS_EMAC_HT256)
    508 		sc->sc_htsize = 256;
    509 	else
    510 		sc->sc_htsize = 64;
    511 
    512 	/* Clear all interrupts */
    513 	EMAC_WRITE(sc, EMAC_ISR, ISR_ALL);
    514 
    515 	/*
    516 	 * Initialise the media structures.
    517 	 */
    518 	mii->mii_ifp = ifp;
    519 	mii->mii_readreg = emac_mii_readreg;
    520 	mii->mii_writereg = emac_mii_writereg;
    521 	mii->mii_statchg = emac_mii_statchg;
    522 
    523 	sc->sc_ethercom.ec_mii = mii;
    524 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    525 	mii_attach(self, mii, 0xffffffff, mii_phy, MII_OFFSET_ANY, 0);
    526 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
    527 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
    528 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
    529 	} else
    530 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    531 
    532 	ifp = &sc->sc_ethercom.ec_if;
    533 	strcpy(ifp->if_xname, xname);
    534 	ifp->if_softc = sc;
    535 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    536 	ifp->if_start = emac_start;
    537 	ifp->if_ioctl = emac_ioctl;
    538 	ifp->if_init = emac_init;
    539 	ifp->if_stop = emac_stop;
    540 	ifp->if_watchdog = emac_watchdog;
    541 	IFQ_SET_READY(&ifp->if_snd);
    542 
    543 	/*
    544 	 * We can support 802.1Q VLAN-sized frames.
    545 	 */
    546 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
    547 
    548 	/*
    549 	 * Attach the interface.
    550 	 */
    551 	if_attach(ifp);
    552 	ether_ifattach(ifp, enaddr);
    553 
    554 #ifdef EMAC_EVENT_COUNTERS
    555 	/*
    556 	 * Attach the event counters.
    557 	 */
    558 	evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
    559 	    NULL, xname, "txintr");
    560 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
    561 	    NULL, xname, "rxintr");
    562 	evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
    563 	    NULL, xname, "txde");
    564 	evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
    565 	    NULL, xname, "rxde");
    566 	evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
    567 	    NULL, xname, "intr");
    568 
    569 	evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
    570 	    NULL, xname, "txreap");
    571 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
    572 	    NULL, xname, "txsstall");
    573 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
    574 	    NULL, xname, "txdstall");
    575 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
    576 	    NULL, xname, "txdrop");
    577 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
    578 	    NULL, xname, "tu");
    579 #endif /* EMAC_EVENT_COUNTERS */
    580 
    581 	/*
    582 	 * Make sure the interface is shutdown during reboot.
    583 	 */
    584 	sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
    585 	if (sc->sc_sdhook == NULL)
    586 		aprint_error_dev(self,
    587 		    "WARNING: unable to establish shutdown hook\n");
    588 
    589 	return;
    590 
    591 	/*
    592 	 * Free any resources we've allocated during the failed attach
    593 	 * attempt.  Do this in reverse order and fall through.
    594 	 */
    595 fail_5:
    596 	for (i = 0; i < EMAC_NRXDESC; i++) {
    597 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
    598 			bus_dmamap_destroy(sc->sc_dmat,
    599 			    sc->sc_rxsoft[i].rxs_dmamap);
    600 	}
    601 fail_4:
    602 	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
    603 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
    604 			bus_dmamap_destroy(sc->sc_dmat,
    605 			    sc->sc_txsoft[i].txs_dmamap);
    606 	}
    607 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
    608 fail_3:
    609 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
    610 fail_2:
    611 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
    612 	    sizeof(struct emac_control_data));
    613 fail_1:
    614 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
    615 fail_0:
    616 	return;
    617 }
    618 
    619 /*
    620  * EMAC General interrupt handler
    621  */
    622 static int
    623 emac_intr(void *arg)
    624 {
    625 	struct emac_softc *sc = arg;
    626 	uint32_t status;
    627 
    628 	EMAC_EVCNT_INCR(&sc->sc_ev_intr);
    629 	status = EMAC_READ(sc, EMAC_ISR);
    630 
    631 	/* Clear the interrupt status bits. */
    632 	EMAC_WRITE(sc, EMAC_ISR, status);
    633 
    634 	return 1;
    635 }
    636 
    637 static void
    638 emac_shutdown(void *arg)
    639 {
    640 	struct emac_softc *sc = arg;
    641 
    642 	emac_stop(&sc->sc_ethercom.ec_if, 0);
    643 }
    644 
    645 
    646 /*
    647  * ifnet interface functions
    648  */
    649 
    650 static void
    651 emac_start(struct ifnet *ifp)
    652 {
    653 	struct emac_softc *sc = ifp->if_softc;
    654 	struct mbuf *m0;
    655 	struct emac_txsoft *txs;
    656 	bus_dmamap_t dmamap;
    657 	int error, firsttx, nexttx, lasttx, ofree, seg;
    658 
    659 	lasttx = 0;	/* XXX gcc */
    660 
    661 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
    662 		return;
    663 
    664 	/*
    665 	 * Remember the previous number of free descriptors.
    666 	 */
    667 	ofree = sc->sc_txfree;
    668 
    669 	/*
    670 	 * Loop through the send queue, setting up transmit descriptors
    671 	 * until we drain the queue, or use up all available transmit
    672 	 * descriptors.
    673 	 */
    674 	for (;;) {
    675 		/* Grab a packet off the queue. */
    676 		IFQ_POLL(&ifp->if_snd, m0);
    677 		if (m0 == NULL)
    678 			break;
    679 
    680 		/*
    681 		 * Get a work queue entry.  Reclaim used Tx descriptors if
    682 		 * we are running low.
    683 		 */
    684 		if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
    685 			emac_txreap(sc);
    686 			if (sc->sc_txsfree == 0) {
    687 				EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
    688 				break;
    689 			}
    690 		}
    691 
    692 		txs = &sc->sc_txsoft[sc->sc_txsnext];
    693 		dmamap = txs->txs_dmamap;
    694 
    695 		/*
    696 		 * Load the DMA map.  If this fails, the packet either
    697 		 * didn't fit in the alloted number of segments, or we
    698 		 * were short on resources.  In this case, we'll copy
    699 		 * and try again.
    700 		 */
    701 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
    702 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    703 		if (error) {
    704 			if (error == EFBIG) {
    705 				EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
    706 				aprint_error_ifnet(ifp,
    707 				    "Tx packet consumes too many "
    708 				    "DMA segments, dropping...\n");
    709 				    IFQ_DEQUEUE(&ifp->if_snd, m0);
    710 				    m_freem(m0);
    711 				    continue;
    712 			}
    713 			/* Short on resources, just stop for now. */
    714 			break;
    715 		}
    716 
    717 		/*
    718 		 * Ensure we have enough descriptors free to describe
    719 		 * the packet.
    720 		 */
    721 		if (dmamap->dm_nsegs > sc->sc_txfree) {
    722 			/*
    723 			 * Not enough free descriptors to transmit this
    724 			 * packet.  We haven't committed anything yet,
    725 			 * so just unload the DMA map, put the packet
    726 			 * back on the queue, and punt.  Notify the upper
    727 			 * layer that there are not more slots left.
    728 			 *
    729 			 */
    730 			ifp->if_flags |= IFF_OACTIVE;
    731 			bus_dmamap_unload(sc->sc_dmat, dmamap);
    732 			EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
    733 			break;
    734 		}
    735 
    736 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    737 
    738 		/*
    739 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
    740 		 */
    741 
    742 		/* Sync the DMA map. */
    743 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    744 		    BUS_DMASYNC_PREWRITE);
    745 
    746 		/*
    747 		 * Store a pointer to the packet so that we can free it
    748 		 * later.
    749 		 */
    750 		txs->txs_mbuf = m0;
    751 		txs->txs_firstdesc = sc->sc_txnext;
    752 		txs->txs_ndesc = dmamap->dm_nsegs;
    753 
    754 		/*
    755 		 * Initialize the transmit descriptor.
    756 		 */
    757 		firsttx = sc->sc_txnext;
    758 		for (nexttx = sc->sc_txnext, seg = 0;
    759 		     seg < dmamap->dm_nsegs;
    760 		     seg++, nexttx = EMAC_NEXTTX(nexttx)) {
    761 			struct mal_descriptor *txdesc =
    762 			    &sc->sc_txdescs[nexttx];
    763 
    764 			/*
    765 			 * If this is the first descriptor we're
    766 			 * enqueueing, don't set the TX_READY bit just
    767 			 * yet.  That could cause a race condition.
    768 			 * We'll do it below.
    769 			 */
    770 			txdesc->md_data = dmamap->dm_segs[seg].ds_addr;
    771 			txdesc->md_data_len = dmamap->dm_segs[seg].ds_len;
    772 			txdesc->md_stat_ctrl =
    773 			    (txdesc->md_stat_ctrl & MAL_TX_WRAP) |
    774 			    (nexttx == firsttx ? 0 : MAL_TX_READY) |
    775 			    EMAC_TXC_GFCS | EMAC_TXC_GPAD;
    776 			lasttx = nexttx;
    777 		}
    778 
    779 		/* Set the LAST bit on the last segment. */
    780 		sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
    781 
    782 		/*
    783 		 * Set up last segment descriptor to send an interrupt after
    784 		 * that descriptor is transmitted, and bypass existing Tx
    785 		 * descriptor reaping method (for now...).
    786 		 */
    787 		sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_INTERRUPT;
    788 
    789 
    790 		txs->txs_lastdesc = lasttx;
    791 
    792 		/* Sync the descriptors we're using. */
    793 		EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
    794 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    795 
    796 		/*
    797 		 * The entire packet chain is set up.  Give the
    798 		 * first descriptor to the chip now.
    799 		 */
    800 		sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
    801 		EMAC_CDTXSYNC(sc, firsttx, 1,
    802 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    803 		/*
    804 		 * Tell the EMAC that a new packet is available.
    805 		 */
    806 		EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0 | TMR0_TFAE_2);
    807 
    808 		/* Advance the tx pointer. */
    809 		sc->sc_txfree -= txs->txs_ndesc;
    810 		sc->sc_txnext = nexttx;
    811 
    812 		sc->sc_txsfree--;
    813 		sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
    814 
    815 		/*
    816 		 * Pass the packet to any BPF listeners.
    817 		 */
    818 		bpf_mtap(ifp, m0);
    819 	}
    820 
    821 	if (sc->sc_txfree == 0)
    822 		/* No more slots left; notify upper layer. */
    823 		ifp->if_flags |= IFF_OACTIVE;
    824 
    825 	if (sc->sc_txfree != ofree)
    826 		/* Set a watchdog timer in case the chip flakes out. */
    827 		ifp->if_timer = 5;
    828 }
    829 
    830 static int
    831 emac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    832 {
    833 	struct emac_softc *sc = ifp->if_softc;
    834 	int s, error;
    835 
    836 	s = splnet();
    837 
    838 	switch (cmd) {
    839 	case SIOCSIFMTU:
    840 	{
    841 		struct ifreq *ifr = (struct ifreq *)data;
    842 		int maxmtu;
    843 
    844 		if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU)
    845 			maxmtu = EMAC_MAX_MTU;
    846 		else
    847 			maxmtu = ETHERMTU;
    848 
    849 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > maxmtu)
    850 			error = EINVAL;
    851 		else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
    852 			break;
    853 		else if (ifp->if_flags & IFF_UP)
    854 			error = emac_init(ifp);
    855 		else
    856 			error = 0;
    857 		break;
    858 	}
    859 
    860 	default:
    861 		error = ether_ioctl(ifp, cmd, data);
    862 		if (error == ENETRESET) {
    863 			/*
    864 			 * Multicast list has changed; set the hardware filter
    865 			 * accordingly.
    866 			 */
    867 			if (ifp->if_flags & IFF_RUNNING)
    868 				error = emac_set_filter(sc);
    869 			else
    870 				error = 0;
    871 		}
    872 	}
    873 
    874 	/* try to get more packets going */
    875 	emac_start(ifp);
    876 
    877 	splx(s);
    878 	return error;
    879 }
    880 
    881 static int
    882 emac_init(struct ifnet *ifp)
    883 {
    884 	struct emac_softc *sc = ifp->if_softc;
    885 	struct emac_rxsoft *rxs;
    886 	const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
    887 	int error, i;
    888 
    889 	error = 0;
    890 
    891 	/* Cancel any pending I/O. */
    892 	emac_stop(ifp, 0);
    893 
    894 	/* Reset the chip to a known state. */
    895 	emac_soft_reset(sc);
    896 
    897 	/*
    898 	 * Initialise the transmit descriptor ring.
    899 	 */
    900 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
    901 	/* set wrap on last descriptor */
    902 	sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
    903 	EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
    904 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    905 	sc->sc_txfree = EMAC_NTXDESC;
    906 	sc->sc_txnext = 0;
    907 
    908 	/*
    909 	 * Initialise the transmit job descriptors.
    910 	 */
    911 	for (i = 0; i < EMAC_TXQUEUELEN; i++)
    912 		sc->sc_txsoft[i].txs_mbuf = NULL;
    913 	sc->sc_txsfree = EMAC_TXQUEUELEN;
    914 	sc->sc_txsnext = 0;
    915 	sc->sc_txsdirty = 0;
    916 
    917 	/*
    918 	 * Initialise the receiver descriptor and receive job
    919 	 * descriptor rings.
    920 	 */
    921 	for (i = 0; i < EMAC_NRXDESC; i++) {
    922 		rxs = &sc->sc_rxsoft[i];
    923 		if (rxs->rxs_mbuf == NULL) {
    924 			if ((error = emac_add_rxbuf(sc, i)) != 0) {
    925 				aprint_error_ifnet(ifp,
    926 				    "unable to allocate or map rx buffer %d,"
    927 				    " error = %d\n",
    928 				    i, error);
    929 				/*
    930 				 * XXX Should attempt to run with fewer receive
    931 				 * XXX buffers instead of just failing.
    932 				 */
    933 				emac_rxdrain(sc);
    934 				goto out;
    935 			}
    936 		} else
    937 			EMAC_INIT_RXDESC(sc, i);
    938 	}
    939 	sc->sc_rxptr = 0;
    940 
    941 	/*
    942 	 * Set the current media.
    943 	 */
    944 	if ((error = ether_mediachange(ifp)) != 0)
    945 		goto out;
    946 
    947 	/*
    948 	 * Load the MAC address.
    949 	 */
    950 	EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
    951 	EMAC_WRITE(sc, EMAC_IALR,
    952 	    enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
    953 
    954 	/* Enable the transmit and receive channel on the MAL. */
    955 	error = mal_start(sc->sc_instance,
    956 	    EMAC_CDTXADDR(sc, 0), EMAC_CDRXADDR(sc, 0));
    957 	if (error)
    958 		goto out;
    959 
    960 	sc->sc_mr1 &= ~MR1_JPSM;
    961 	if (ifp->if_mtu > ETHERMTU)
    962 		/* Enable Jumbo Packet Support Mode */
    963 		sc->sc_mr1 |= MR1_JPSM;
    964 
    965 	/* Set fifos, media modes. */
    966 	EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
    967 
    968 	/*
    969 	 * Enable Individual and (possibly) Broadcast Address modes,
    970 	 * runt packets, and strip padding.
    971 	 */
    972 	EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP | RMR_TFAE_2 |
    973 	    (ifp->if_flags & IFF_PROMISC ? RMR_PME : 0) |
    974 	    (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
    975 
    976 	/*
    977 	 * Set multicast filter.
    978 	 */
    979 	emac_set_filter(sc);
    980 
    981 	/*
    982 	 * Set low- and urgent-priority request thresholds.
    983 	 */
    984 	EMAC_WRITE(sc, EMAC_TMR1,
    985 	    ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
    986 	    ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
    987 	/*
    988 	 * Set Transmit Request Threshold Register.
    989 	 */
    990 	EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
    991 
    992 	/*
    993 	 * Set high and low receive watermarks.
    994 	 */
    995 	EMAC_WRITE(sc, EMAC_RWMR,
    996 	    30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
    997 
    998 	/*
    999 	 * Set frame gap.
   1000 	 */
   1001 	EMAC_WRITE(sc, EMAC_IPGVR, 8);
   1002 
   1003 	/*
   1004 	 * Set interrupt status enable bits for EMAC.
   1005 	 */
   1006 	EMAC_WRITE(sc, EMAC_ISER,
   1007 	    ISR_TXPE |		/* TX Parity Error */
   1008 	    ISR_RXPE |		/* RX Parity Error */
   1009 	    ISR_TXUE |		/* TX Underrun Event */
   1010 	    ISR_RXOE |		/* RX Overrun Event */
   1011 	    ISR_OVR  |		/* Overrun Error */
   1012 	    ISR_PP   |		/* Pause Packet */
   1013 	    ISR_BP   |		/* Bad Packet */
   1014 	    ISR_RP   |		/* Runt Packet */
   1015 	    ISR_SE   |		/* Short Event */
   1016 	    ISR_ALE  |		/* Alignment Error */
   1017 	    ISR_BFCS |		/* Bad FCS */
   1018 	    ISR_PTLE |		/* Packet Too Long Error */
   1019 	    ISR_ORE  |		/* Out of Range Error */
   1020 	    ISR_IRE  |		/* In Range Error */
   1021 	    ISR_SE0  |		/* Signal Quality Error 0 (SQE) */
   1022 	    ISR_TE0  |		/* Transmit Error 0 */
   1023 	    ISR_MOS  |		/* MMA Operation Succeeded */
   1024 	    ISR_MOF);		/* MMA Operation Failed */
   1025 
   1026 	/*
   1027 	 * Enable the transmit and receive channel on the EMAC.
   1028 	 */
   1029 	EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
   1030 
   1031 	/*
   1032 	 * Start the one second MII clock.
   1033 	 */
   1034 	callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
   1035 
   1036 	/*
   1037 	 * ... all done!
   1038 	 */
   1039 	ifp->if_flags |= IFF_RUNNING;
   1040 	ifp->if_flags &= ~IFF_OACTIVE;
   1041 
   1042  out:
   1043 	if (error) {
   1044 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1045 		ifp->if_timer = 0;
   1046 		aprint_error_ifnet(ifp, "interface not running\n");
   1047 	}
   1048 	return error;
   1049 }
   1050 
   1051 static void
   1052 emac_stop(struct ifnet *ifp, int disable)
   1053 {
   1054 	struct emac_softc *sc = ifp->if_softc;
   1055 	struct emac_txsoft *txs;
   1056 	int i;
   1057 
   1058 	/* Stop the one second clock. */
   1059 	callout_stop(&sc->sc_callout);
   1060 
   1061 	/* Down the MII */
   1062 	mii_down(&sc->sc_mii);
   1063 
   1064 	/* Disable interrupts. */
   1065 	EMAC_WRITE(sc, EMAC_ISER, 0);
   1066 
   1067 	/* Disable the receive and transmit channels. */
   1068 	mal_stop(sc->sc_instance);
   1069 
   1070 	/* Disable the transmit enable and receive MACs. */
   1071 	EMAC_WRITE(sc, EMAC_MR0,
   1072 	    EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
   1073 
   1074 	/* Release any queued transmit buffers. */
   1075 	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
   1076 		txs = &sc->sc_txsoft[i];
   1077 		if (txs->txs_mbuf != NULL) {
   1078 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1079 			m_freem(txs->txs_mbuf);
   1080 			txs->txs_mbuf = NULL;
   1081 		}
   1082 	}
   1083 
   1084 	if (disable)
   1085 		emac_rxdrain(sc);
   1086 
   1087 	/*
   1088 	 * Mark the interface down and cancel the watchdog timer.
   1089 	 */
   1090 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1091 	ifp->if_timer = 0;
   1092 }
   1093 
   1094 static void
   1095 emac_watchdog(struct ifnet *ifp)
   1096 {
   1097 	struct emac_softc *sc = ifp->if_softc;
   1098 
   1099 	/*
   1100 	 * Since we're not interrupting every packet, sweep
   1101 	 * up before we report an error.
   1102 	 */
   1103 	emac_txreap(sc);
   1104 
   1105 	if (sc->sc_txfree != EMAC_NTXDESC) {
   1106 		aprint_error_ifnet(ifp,
   1107 		    "device timeout (txfree %d txsfree %d txnext %d)\n",
   1108 		    sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
   1109 		ifp->if_oerrors++;
   1110 
   1111 		/* Reset the interface. */
   1112 		(void)emac_init(ifp);
   1113 	} else if (ifp->if_flags & IFF_DEBUG)
   1114 		aprint_error_ifnet(ifp, "recovered from device timeout\n");
   1115 
   1116 	/* try to get more packets going */
   1117 	emac_start(ifp);
   1118 }
   1119 
   1120 static int
   1121 emac_add_rxbuf(struct emac_softc *sc, int idx)
   1122 {
   1123 	struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
   1124 	struct mbuf *m;
   1125 	int error;
   1126 
   1127 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   1128 	if (m == NULL)
   1129 		return ENOBUFS;
   1130 
   1131 	MCLGET(m, M_DONTWAIT);
   1132 	if ((m->m_flags & M_EXT) == 0) {
   1133 		m_freem(m);
   1134 		return ENOBUFS;
   1135 	}
   1136 
   1137 	if (rxs->rxs_mbuf != NULL)
   1138 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   1139 
   1140 	rxs->rxs_mbuf = m;
   1141 
   1142 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
   1143 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
   1144 	if (error) {
   1145 		aprint_error_dev(sc->sc_dev,
   1146 		    "can't load rx DMA map %d, error = %d\n", idx, error);
   1147 		panic("emac_add_rxbuf");		/* XXX */
   1148 	}
   1149 
   1150 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1151 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1152 
   1153 	EMAC_INIT_RXDESC(sc, idx);
   1154 
   1155 	return 0;
   1156 }
   1157 
   1158 static void
   1159 emac_rxdrain(struct emac_softc *sc)
   1160 {
   1161 	struct emac_rxsoft *rxs;
   1162 	int i;
   1163 
   1164 	for (i = 0; i < EMAC_NRXDESC; i++) {
   1165 		rxs = &sc->sc_rxsoft[i];
   1166 		if (rxs->rxs_mbuf != NULL) {
   1167 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   1168 			m_freem(rxs->rxs_mbuf);
   1169 			rxs->rxs_mbuf = NULL;
   1170 		}
   1171 	}
   1172 }
   1173 
   1174 static int
   1175 emac_set_filter(struct emac_softc *sc)
   1176 {
   1177 	struct ether_multistep step;
   1178 	struct ether_multi *enm;
   1179 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1180 	uint32_t rmr, crc, mask, tmp, reg, gaht[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
   1181 	int regs, cnt = 0, i;
   1182 
   1183 	if (sc->sc_htsize == 256) {
   1184 		reg = EMAC_GAHT256(0);
   1185 		regs = 8;
   1186 	} else {
   1187 		reg = EMAC_GAHT64(0);
   1188 		regs = 4;
   1189 	}
   1190 	mask = (1ULL << (sc->sc_htsize / regs)) - 1;
   1191 
   1192 	rmr = EMAC_READ(sc, EMAC_RMR);
   1193 	rmr &= ~(RMR_PMME | RMR_MAE);
   1194 	ifp->if_flags &= ~IFF_ALLMULTI;
   1195 
   1196 	ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
   1197 	while (enm != NULL) {
   1198 		if (memcmp(enm->enm_addrlo,
   1199 		    enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
   1200 			/*
   1201 			 * We must listen to a range of multicast addresses.
   1202 			 * For now, just accept all multicasts, rather than
   1203 			 * trying to set only those filter bits needed to match
   1204 			 * the range.  (At this time, the only use of address
   1205 			 * ranges is for IP multicast routing, for which the
   1206 			 * range is big enough to require all bits set.)
   1207 			 */
   1208 			gaht[0] = gaht[1] = gaht[2] = gaht[3] =
   1209 			    gaht[4] = gaht[5] = gaht[6] = gaht[7] = mask;
   1210 			break;
   1211 		}
   1212 
   1213 		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
   1214 
   1215 		if (sc->sc_htsize == 256)
   1216 			EMAC_SET_FILTER256(gaht, crc);
   1217 		else
   1218 			EMAC_SET_FILTER(gaht, crc);
   1219 
   1220 		ETHER_NEXT_MULTI(step, enm);
   1221 		cnt++;
   1222 	}
   1223 
   1224 	for (i = 1, tmp = gaht[0]; i < regs; i++)
   1225 		tmp &= gaht[i];
   1226 	if (tmp == mask) {
   1227 		/* All categories are true. */
   1228 		ifp->if_flags |= IFF_ALLMULTI;
   1229 		rmr |= RMR_PMME;
   1230 	} else if (cnt != 0) {
   1231 		/* Some categories are true. */
   1232 		for (i = 0; i < regs; i++)
   1233 			EMAC_WRITE(sc, reg + (i << 2), gaht[i]);
   1234 		rmr |= RMR_MAE;
   1235 	}
   1236 	EMAC_WRITE(sc, EMAC_RMR, rmr);
   1237 
   1238 	return 0;
   1239 }
   1240 
   1241 /*
   1242  * Reap completed Tx descriptors.
   1243  */
   1244 static int
   1245 emac_txreap(struct emac_softc *sc)
   1246 {
   1247 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1248 	struct emac_txsoft *txs;
   1249 	int handled, i;
   1250 	uint32_t txstat;
   1251 
   1252 	EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
   1253 	handled = 0;
   1254 
   1255 	ifp->if_flags &= ~IFF_OACTIVE;
   1256 
   1257 	/*
   1258 	 * Go through our Tx list and free mbufs for those
   1259 	 * frames that have been transmitted.
   1260 	 */
   1261 	for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
   1262 	    i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
   1263 		txs = &sc->sc_txsoft[i];
   1264 
   1265 		EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
   1266 		    txs->txs_dmamap->dm_nsegs,
   1267 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1268 
   1269 		txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
   1270 		if (txstat & MAL_TX_READY)
   1271 			break;
   1272 
   1273 		handled = 1;
   1274 
   1275 		/*
   1276 		 * Check for errors and collisions.
   1277 		 */
   1278 		if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
   1279 			ifp->if_oerrors++;
   1280 
   1281 #ifdef EMAC_EVENT_COUNTERS
   1282 		if (txstat & EMAC_TXS_UR)
   1283 			EMAC_EVCNT_INCR(&sc->sc_ev_tu);
   1284 #endif /* EMAC_EVENT_COUNTERS */
   1285 
   1286 		if (txstat &
   1287 		    (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
   1288 			if (txstat & EMAC_TXS_EC)
   1289 				ifp->if_collisions += 16;
   1290 			else if (txstat & EMAC_TXS_MC)
   1291 				ifp->if_collisions += 2;	/* XXX? */
   1292 			else if (txstat & EMAC_TXS_SC)
   1293 				ifp->if_collisions++;
   1294 			if (txstat & EMAC_TXS_LC)
   1295 				ifp->if_collisions++;
   1296 		} else
   1297 			ifp->if_opackets++;
   1298 
   1299 		if (ifp->if_flags & IFF_DEBUG) {
   1300 			if (txstat & EMAC_TXS_ED)
   1301 				aprint_error_ifnet(ifp, "excessive deferral\n");
   1302 			if (txstat & EMAC_TXS_EC)
   1303 				aprint_error_ifnet(ifp,
   1304 				    "excessive collisions\n");
   1305 		}
   1306 
   1307 		sc->sc_txfree += txs->txs_ndesc;
   1308 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1309 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1310 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1311 		m_freem(txs->txs_mbuf);
   1312 		txs->txs_mbuf = NULL;
   1313 	}
   1314 
   1315 	/* Update the dirty transmit buffer pointer. */
   1316 	sc->sc_txsdirty = i;
   1317 
   1318 	/*
   1319 	 * If there are no more pending transmissions, cancel the watchdog
   1320 	 * timer.
   1321 	 */
   1322 	if (sc->sc_txsfree == EMAC_TXQUEUELEN)
   1323 		ifp->if_timer = 0;
   1324 
   1325 	return handled;
   1326 }
   1327 
   1328 
   1329 /*
   1330  * Reset functions
   1331  */
   1332 
   1333 static void
   1334 emac_soft_reset(struct emac_softc *sc)
   1335 {
   1336 	uint32_t sdr;
   1337 	int t = 0;
   1338 
   1339 	/*
   1340 	 * The PHY must provide a TX Clk in order perform a soft reset the
   1341 	 * EMAC.  If none is present, select the internal clock,
   1342 	 * SDR0_MFR[E0CS,E1CS].  After the soft reset, select the external
   1343 	 * clock.
   1344 	 */
   1345 
   1346 	sdr = mfsdr(DCR_SDR0_MFR);
   1347 	sdr |= SDR0_MFR_ECS(sc->sc_instance);
   1348 	mtsdr(DCR_SDR0_MFR, sdr);
   1349 
   1350 	EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
   1351 
   1352 	sdr = mfsdr(DCR_SDR0_MFR);
   1353 	sdr &= ~SDR0_MFR_ECS(sc->sc_instance);
   1354 	mtsdr(DCR_SDR0_MFR, sdr);
   1355 
   1356 	delay(5);
   1357 
   1358 	/* wait finish */
   1359 	while (EMAC_READ(sc, EMAC_MR0) & MR0_SRST) {
   1360 		if (++t == 1000000 /* 1sec XXXXX */) {
   1361 			aprint_error_dev(sc->sc_dev, "Soft Reset failed\n");
   1362 			return;
   1363 		}
   1364 		delay(1);
   1365 	}
   1366 }
   1367 
   1368 static void
   1369 emac_smart_reset(struct emac_softc *sc)
   1370 {
   1371 	uint32_t mr0;
   1372 	int t = 0;
   1373 
   1374 	mr0 = EMAC_READ(sc, EMAC_MR0);
   1375 	if (mr0 & (MR0_TXE | MR0_RXE)) {
   1376 		mr0 &= ~(MR0_TXE | MR0_RXE);
   1377 		EMAC_WRITE(sc, EMAC_MR0, mr0);
   1378 
   1379 		/* wait idel state */
   1380 		while ((EMAC_READ(sc, EMAC_MR0) & (MR0_TXI | MR0_RXI)) !=
   1381 		    (MR0_TXI | MR0_RXI)) {
   1382 			if (++t == 1000000 /* 1sec XXXXX */) {
   1383 				aprint_error_dev(sc->sc_dev,
   1384 				    "Smart Reset failed\n");
   1385 				return;
   1386 			}
   1387 			delay(1);
   1388 		}
   1389 	}
   1390 }
   1391 
   1392 
   1393 /*
   1394  * MII related functions
   1395  */
   1396 
   1397 static int
   1398 emac_mii_readreg(device_t self, int phy, int reg)
   1399 {
   1400 	struct emac_softc *sc = device_private(self);
   1401 	uint32_t sta_reg;
   1402 
   1403 	if (sc->sc_rmii_enable)
   1404 		sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
   1405 
   1406 	/* wait for PHY data transfer to complete */
   1407 	if (emac_mii_wait(sc))
   1408 		goto fail;
   1409 
   1410 	sta_reg =
   1411 	    sc->sc_stacr_read		|
   1412 	    (reg << STACR_PRA_SHIFT)	|
   1413 	    (phy << STACR_PCDA_SHIFT)	|
   1414 	    sc->sc_stacr_bits;
   1415 	EMAC_WRITE(sc, EMAC_STACR, sta_reg);
   1416 
   1417 	if (emac_mii_wait(sc))
   1418 		goto fail;
   1419 	sta_reg = EMAC_READ(sc, EMAC_STACR);
   1420 
   1421 	if (sc->sc_rmii_disable)
   1422 		sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
   1423 
   1424 	if (sta_reg & STACR_PHYE)
   1425 		return 0;
   1426 	return sta_reg >> STACR_PHYD_SHIFT;
   1427 
   1428 fail:
   1429 	if (sc->sc_rmii_disable)
   1430 		sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
   1431 	return 0;
   1432 }
   1433 
   1434 static void
   1435 emac_mii_writereg(device_t self, int phy, int reg, int val)
   1436 {
   1437 	struct emac_softc *sc = device_private(self);
   1438 	uint32_t sta_reg;
   1439 
   1440 	if (sc->sc_rmii_enable)
   1441 		sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
   1442 
   1443 	/* wait for PHY data transfer to complete */
   1444 	if (emac_mii_wait(sc))
   1445 		goto out;
   1446 
   1447 	sta_reg =
   1448 	    (val << STACR_PHYD_SHIFT)	|
   1449 	    sc->sc_stacr_write		|
   1450 	    (reg << STACR_PRA_SHIFT)	|
   1451 	    (phy << STACR_PCDA_SHIFT)	|
   1452 	    sc->sc_stacr_bits;
   1453 	EMAC_WRITE(sc, EMAC_STACR, sta_reg);
   1454 
   1455 	if (emac_mii_wait(sc))
   1456 		goto out;
   1457 	if (EMAC_READ(sc, EMAC_STACR) & STACR_PHYE)
   1458 		aprint_error_dev(sc->sc_dev, "MII PHY Error\n");
   1459 
   1460 out:
   1461 	if (sc->sc_rmii_disable)
   1462 		sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
   1463 }
   1464 
   1465 static void
   1466 emac_mii_statchg(device_t self)
   1467 {
   1468 	struct emac_softc *sc = device_private(self);
   1469 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1470 	struct mii_data *mii = &sc->sc_mii;
   1471 
   1472 	/*
   1473 	 * MR1 can only be written immediately after a reset...
   1474 	 */
   1475 	emac_smart_reset(sc);
   1476 
   1477 	sc->sc_mr1 &= ~(MR1_FDE | MR1_ILE | MR1_EIFC | MR1_MF_MASK | MR1_IST);
   1478 	if (mii->mii_media_active & IFM_FDX)
   1479 		sc->sc_mr1 |= (MR1_FDE | MR1_EIFC | MR1_IST);
   1480 	if (mii->mii_media_active & IFM_FLOW)
   1481 		sc->sc_mr1 |= MR1_EIFC;
   1482 	if (mii->mii_media_active & IFM_LOOP)
   1483 		sc->sc_mr1 |= MR1_ILE;
   1484 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
   1485 	case IFM_1000_T:
   1486 		sc->sc_mr1 |= (MR1_MF_1000MBS | MR1_IST);
   1487 		break;
   1488 
   1489 	case IFM_100_TX:
   1490 		sc->sc_mr1 |= (MR1_MF_100MBS | MR1_IST);
   1491 		break;
   1492 
   1493 	case IFM_10_T:
   1494 		sc->sc_mr1 |= MR1_MF_10MBS;
   1495 		break;
   1496 
   1497 	case IFM_NONE:
   1498 		break;
   1499 
   1500 	default:
   1501 		aprint_error_dev(self, "unknown sub-type %d\n",
   1502 		    IFM_SUBTYPE(mii->mii_media_active));
   1503 		break;
   1504 	}
   1505 	if (sc->sc_rmii_speed)
   1506 		sc->sc_rmii_speed(device_parent(self), sc->sc_instance,
   1507 		    IFM_SUBTYPE(mii->mii_media_active));
   1508 
   1509 	EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
   1510 
   1511 	/* Enable TX and RX if already RUNNING */
   1512 	if (ifp->if_flags & IFF_RUNNING)
   1513 		EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
   1514 }
   1515 
   1516 static uint32_t
   1517 emac_mii_wait(struct emac_softc *sc)
   1518 {
   1519 	int i;
   1520 	uint32_t oc;
   1521 
   1522 	/* wait for PHY data transfer to complete */
   1523 	i = 0;
   1524 	oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
   1525 	while ((oc == STACR_OC) != sc->sc_stacr_completed) {
   1526 		delay(7);
   1527 		if (i++ > 5) {
   1528 			aprint_error_dev(sc->sc_dev, "MII timed out\n");
   1529 			return -1;
   1530 		}
   1531 		oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
   1532 	}
   1533 	return 0;
   1534 }
   1535 
   1536 static void
   1537 emac_mii_tick(void *arg)
   1538 {
   1539 	struct emac_softc *sc = arg;
   1540 	int s;
   1541 
   1542 	if (!device_is_active(sc->sc_dev))
   1543 		return;
   1544 
   1545 	s = splnet();
   1546 	mii_tick(&sc->sc_mii);
   1547 	splx(s);
   1548 
   1549 	callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
   1550 }
   1551 
   1552 int
   1553 emac_txeob_intr(void *arg)
   1554 {
   1555 	struct emac_softc *sc = arg;
   1556 	int handled = 0;
   1557 
   1558 	EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
   1559 	handled |= emac_txreap(sc);
   1560 
   1561 	/* try to get more packets going */
   1562 	emac_start(&sc->sc_ethercom.ec_if);
   1563 
   1564 	return handled;
   1565 }
   1566 
   1567 int
   1568 emac_rxeob_intr(void *arg)
   1569 {
   1570 	struct emac_softc *sc = arg;
   1571 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1572 	struct emac_rxsoft *rxs;
   1573 	struct mbuf *m;
   1574 	uint32_t rxstat;
   1575 	int i, len;
   1576 
   1577 	EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
   1578 
   1579 	for (i = sc->sc_rxptr; ; i = EMAC_NEXTRX(i)) {
   1580 		rxs = &sc->sc_rxsoft[i];
   1581 
   1582 		EMAC_CDRXSYNC(sc, i,
   1583 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1584 
   1585 		rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
   1586 
   1587 		if (rxstat & MAL_RX_EMPTY)
   1588 			/*
   1589 			 * We have processed all of the receive buffers.
   1590 			 */
   1591 			break;
   1592 
   1593 		/*
   1594 		 * If an error occurred, update stats, clear the status
   1595 		 * word, and leave the packet buffer in place.  It will
   1596 		 * simply be reused the next time the ring comes around.
   1597 		 */
   1598 		if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
   1599 		    EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
   1600 		    EMAC_RXS_IRE)) {
   1601 #define	PRINTERR(bit, str)					\
   1602 			if (rxstat & (bit))			\
   1603 				aprint_error_ifnet(ifp,		\
   1604 				    "receive error: %s\n", str)
   1605 			ifp->if_ierrors++;
   1606 			PRINTERR(EMAC_RXS_OE, "overrun error");
   1607 			PRINTERR(EMAC_RXS_BP, "bad packet");
   1608 			PRINTERR(EMAC_RXS_RP, "runt packet");
   1609 			PRINTERR(EMAC_RXS_SE, "short event");
   1610 			PRINTERR(EMAC_RXS_AE, "alignment error");
   1611 			PRINTERR(EMAC_RXS_BFCS, "bad FCS");
   1612 			PRINTERR(EMAC_RXS_PTL, "packet too long");
   1613 			PRINTERR(EMAC_RXS_ORE, "out of range error");
   1614 			PRINTERR(EMAC_RXS_IRE, "in range error");
   1615 #undef PRINTERR
   1616 			EMAC_INIT_RXDESC(sc, i);
   1617 			continue;
   1618 		}
   1619 
   1620 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1621 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1622 
   1623 		/*
   1624 		 * No errors; receive the packet.  Note, the 405GP emac
   1625 		 * includes the CRC with every packet.
   1626 		 */
   1627 		len = sc->sc_rxdescs[i].md_data_len - ETHER_CRC_LEN;
   1628 
   1629 		/*
   1630 		 * If the packet is small enough to fit in a
   1631 		 * single header mbuf, allocate one and copy
   1632 		 * the data into it.  This greatly reduces
   1633 		 * memory consumption when we receive lots
   1634 		 * of small packets.
   1635 		 *
   1636 		 * Otherwise, we add a new buffer to the receive
   1637 		 * chain.  If this fails, we drop the packet and
   1638 		 * recycle the old buffer.
   1639 		 */
   1640 		if (emac_copy_small != 0 && len <= MHLEN) {
   1641 			MGETHDR(m, M_DONTWAIT, MT_DATA);
   1642 			if (m == NULL)
   1643 				goto dropit;
   1644 			memcpy(mtod(m, void *),
   1645 			    mtod(rxs->rxs_mbuf, void *), len);
   1646 			EMAC_INIT_RXDESC(sc, i);
   1647 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1648 			    rxs->rxs_dmamap->dm_mapsize,
   1649 			    BUS_DMASYNC_PREREAD);
   1650 		} else {
   1651 			m = rxs->rxs_mbuf;
   1652 			if (emac_add_rxbuf(sc, i) != 0) {
   1653  dropit:
   1654 				ifp->if_ierrors++;
   1655 				EMAC_INIT_RXDESC(sc, i);
   1656 				bus_dmamap_sync(sc->sc_dmat,
   1657 				    rxs->rxs_dmamap, 0,
   1658 				    rxs->rxs_dmamap->dm_mapsize,
   1659 				    BUS_DMASYNC_PREREAD);
   1660 				continue;
   1661 			}
   1662 		}
   1663 
   1664 		ifp->if_ipackets++;
   1665 		m->m_pkthdr.rcvif = ifp;
   1666 		m->m_pkthdr.len = m->m_len = len;
   1667 
   1668 		/*
   1669 		 * Pass this up to any BPF listeners, but only
   1670 		 * pass if up the stack if it's for us.
   1671 		 */
   1672 		bpf_mtap(ifp, m);
   1673 
   1674 		/* Pass it on. */
   1675 		(*ifp->if_input)(ifp, m);
   1676 	}
   1677 
   1678 	/* Update the receive pointer. */
   1679 	sc->sc_rxptr = i;
   1680 
   1681 	return 1;
   1682 }
   1683 
   1684 int
   1685 emac_txde_intr(void *arg)
   1686 {
   1687 	struct emac_softc *sc = arg;
   1688 
   1689 	EMAC_EVCNT_INCR(&sc->sc_ev_txde);
   1690 	aprint_error_dev(sc->sc_dev, "emac_txde_intr\n");
   1691 	return 1;
   1692 }
   1693 
   1694 int
   1695 emac_rxde_intr(void *arg)
   1696 {
   1697 	struct emac_softc *sc = arg;
   1698 	int i;
   1699 
   1700 	EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
   1701 	aprint_error_dev(sc->sc_dev, "emac_rxde_intr\n");
   1702 	/*
   1703 	 * XXX!
   1704 	 * This is a bit drastic; we just drop all descriptors that aren't
   1705 	 * "clean".  We should probably send any that are up the stack.
   1706 	 */
   1707 	for (i = 0; i < EMAC_NRXDESC; i++) {
   1708 		EMAC_CDRXSYNC(sc, i,
   1709 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1710 
   1711 		if (sc->sc_rxdescs[i].md_data_len != MCLBYTES)
   1712 			EMAC_INIT_RXDESC(sc, i);
   1713 	}
   1714 
   1715 	return 1;
   1716 }
   1717