Home | History | Annotate | Line # | Download | only in dev
if_temac.c revision 1.15
      1 /* 	$NetBSD: if_temac.c,v 1.15 2019/05/29 06:21:57 msaitoh Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2006 Jachym Holecek
      5  * All rights reserved.
      6  *
      7  * Written for DFC Design, s.r.o.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  *
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  *
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Driver for Xilinx LocalLink TEMAC as wired on the GSRD platform.
     34  *
     35  * TODO:
     36  * 	- Optimize
     37  * 	- Checksum offload
     38  * 	- Address filters
     39  * 	- Support jumbo frames
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 __KERNEL_RCSID(0, "$NetBSD: if_temac.c,v 1.15 2019/05/29 06:21:57 msaitoh Exp $");
     44 
     45 
     46 #include <sys/param.h>
     47 #include <sys/systm.h>
     48 #include <sys/mbuf.h>
     49 #include <sys/kernel.h>
     50 #include <sys/socket.h>
     51 #include <sys/ioctl.h>
     52 #include <sys/device.h>
     53 #include <sys/bus.h>
     54 #include <sys/cpu.h>
     55 
     56 #include <uvm/uvm_extern.h>
     57 
     58 #include <net/if.h>
     59 #include <net/if_dl.h>
     60 #include <net/if_media.h>
     61 #include <net/if_ether.h>
     62 
     63 #include <net/bpf.h>
     64 
     65 #include <powerpc/ibm4xx/cpu.h>
     66 
     67 #include <evbppc/virtex/idcr.h>
     68 #include <evbppc/virtex/dev/xcvbusvar.h>
     69 #include <evbppc/virtex/dev/cdmacreg.h>
     70 #include <evbppc/virtex/dev/temacreg.h>
     71 #include <evbppc/virtex/dev/temacvar.h>
     72 
     73 #include <dev/mii/miivar.h>
     74 
     75 
     76 /* This is outside of TEMAC's DCR window, we have to hardcode it... */
     77 #define DCR_ETH_BASE 		0x0030
     78 
     79 #define	TEMAC_REGDEBUG 		0
     80 #define	TEMAC_RXDEBUG 		0
     81 #define	TEMAC_TXDEBUG 		0
     82 
     83 #if TEMAC_RXDEBUG > 0 || TEMAC_TXDEBUG > 0
     84 #define	TEMAC_DEBUG 		1
     85 #else
     86 #define	TEMAC_DEBUG 		0
     87 #endif
     88 
     89 #if TEMAC_REGDEBUG > 0
     90 #define	TRACEREG(arg) 		printf arg
     91 #else
     92 #define	TRACEREG(arg) 		/* nop */
     93 #endif
     94 
     95 /* DMA control chains take up one (16KB) page. */
     96 #define TEMAC_NTXDESC 		256
     97 #define TEMAC_NRXDESC 		256
     98 
     99 #define TEMAC_TXQLEN 		64 	/* Software Tx queue length */
    100 #define TEMAC_NTXSEG 		16 	/* Maximum Tx segments per packet */
    101 
    102 #define TEMAC_NRXSEG 		1 	/* Maximum Rx segments per packet */
    103 #define TEMAC_RXPERIOD 		1 	/* Interrupt every N descriptors. */
    104 #define TEMAC_RXTIMO_HZ 	100 	/* Rx reaper frequency */
    105 
    106 /* Next Tx descriptor and descriptor's offset WRT sc_cdaddr. */
    107 #define TEMAC_TXSINC(n, i) 	(((n) + TEMAC_TXQLEN + (i)) % TEMAC_TXQLEN)
    108 #define TEMAC_TXINC(n, i) 	(((n) + TEMAC_NTXDESC + (i)) % TEMAC_NTXDESC)
    109 
    110 #define TEMAC_TXSNEXT(n) 	TEMAC_TXSINC((n), 1)
    111 #define TEMAC_TXNEXT(n) 	TEMAC_TXINC((n), 1)
    112 #define TEMAC_TXDOFF(n) 	(offsetof(struct temac_control, cd_txdesc) + \
    113 				 (n) * sizeof(struct cdmac_descr))
    114 
    115 /* Next Rx descriptor and descriptor's offset WRT sc_cdaddr. */
    116 #define TEMAC_RXINC(n, i) 	(((n) + TEMAC_NRXDESC + (i)) % TEMAC_NRXDESC)
    117 #define TEMAC_RXNEXT(n) 	TEMAC_RXINC((n), 1)
    118 #define TEMAC_RXDOFF(n) 	(offsetof(struct temac_control, cd_rxdesc) + \
    119 				 (n) * sizeof(struct cdmac_descr))
    120 #define TEMAC_ISINTR(i) 	(((i) % TEMAC_RXPERIOD) == 0)
    121 #define TEMAC_ISLAST(i) 	((i) == (TEMAC_NRXDESC - 1))
    122 
    123 
    124 struct temac_control {
    125 	struct cdmac_descr 	cd_txdesc[TEMAC_NTXDESC];
    126 	struct cdmac_descr 	cd_rxdesc[TEMAC_NRXDESC];
    127 };
    128 
    129 struct temac_txsoft {
    130 	bus_dmamap_t 		txs_dmap;
    131 	struct mbuf 		*txs_mbuf;
    132 	int 			txs_last;
    133 };
    134 
    135 struct temac_rxsoft {
    136 	bus_dmamap_t 		rxs_dmap;
    137 	struct mbuf 		*rxs_mbuf;
    138 };
    139 
    140 struct temac_softc {
    141 	device_t 		sc_dev;
    142 	struct ethercom 	sc_ec;
    143 #define sc_if 			sc_ec.ec_if
    144 
    145 	/* Peripheral registers */
    146 	bus_space_tag_t 	sc_iot;
    147 	bus_space_handle_t 	sc_ioh;
    148 
    149 	/* CDMAC channel registers */
    150 	bus_space_tag_t 	sc_dma_rxt;
    151 	bus_space_handle_t 	sc_dma_rxh; 	/* Rx channel */
    152 	bus_space_handle_t 	sc_dma_rsh; 	/* Rx status */
    153 
    154 	bus_space_tag_t 	sc_dma_txt;
    155 	bus_space_handle_t 	sc_dma_txh; 	/* Tx channel */
    156 	bus_space_handle_t 	sc_dma_tsh; 	/* Tx status */
    157 
    158 	struct temac_txsoft 	sc_txsoft[TEMAC_TXQLEN];
    159 	struct temac_rxsoft 	sc_rxsoft[TEMAC_NRXDESC];
    160 
    161 	struct callout 		sc_rx_timo;
    162 	struct callout 		sc_mii_tick;
    163 	struct mii_data 	sc_mii;
    164 
    165 	bus_dmamap_t 		sc_control_dmap;
    166 #define sc_cdaddr 		sc_control_dmap->dm_segs[0].ds_addr
    167 
    168 	struct temac_control 	*sc_control_data;
    169 #define sc_rxdescs 		sc_control_data->cd_rxdesc
    170 #define sc_txdescs 		sc_control_data->cd_txdesc
    171 
    172 	int 			sc_txbusy;
    173 
    174 	int 			sc_txfree;
    175 	int 			sc_txcur;
    176 	int 			sc_txreap;
    177 
    178 	int 			sc_rxreap;
    179 
    180 	int 			sc_txsfree;
    181 	int 			sc_txscur;
    182 	int 			sc_txsreap;
    183 
    184 	int 			sc_dead; 	/* Rx/Tx DMA error (fatal) */
    185 	int 			sc_rx_drained;
    186 
    187 	int 			sc_rx_chan;
    188 	int 			sc_tx_chan;
    189 
    190 	void 			*sc_sdhook;
    191 	void 			*sc_rx_ih;
    192 	void 			*sc_tx_ih;
    193 
    194 	bus_dma_tag_t 		sc_dmat;
    195 };
    196 
    197 /* Device interface. */
    198 static void 	temac_attach(device_t, device_t, void *);
    199 
    200 /* Ifnet interface. */
    201 static int 	temac_init(struct ifnet *);
    202 static int 	temac_ioctl(struct ifnet *, u_long, void *);
    203 static void 	temac_start(struct ifnet *);
    204 static void 	temac_stop(struct ifnet *, int);
    205 
    206 /* Media management. */
    207 static int	temac_mii_readreg(device_t, int, int, uint16_t *);
    208 static void	temac_mii_statchg(struct ifnet *);
    209 static void	temac_mii_tick(void *);
    210 static int	temac_mii_writereg(device_t, int, int, uint16_t);
    211 
    212 /* Indirect hooks. */
    213 static void 	temac_shutdown(void *);
    214 static void 	temac_rx_intr(void *);
    215 static void 	temac_tx_intr(void *);
    216 
    217 /* Tools. */
    218 static inline void 	temac_rxcdsync(struct temac_softc *, int, int, int);
    219 static inline void 	temac_txcdsync(struct temac_softc *, int, int, int);
    220 static void 		temac_txreap(struct temac_softc *);
    221 static void 		temac_rxreap(struct temac_softc *);
    222 static int 		temac_rxalloc(struct temac_softc *, int, int);
    223 static void 		temac_rxtimo(void *);
    224 static void 		temac_rxdrain(struct temac_softc *);
    225 static void 		temac_reset(struct temac_softc *);
    226 static void 		temac_txkick(struct temac_softc *);
    227 
    228 /* Register access. */
    229 static inline void 	gmi_write_8(uint32_t, uint32_t, uint32_t);
    230 static inline void 	gmi_write_4(uint32_t, uint32_t);
    231 static inline void 	gmi_read_8(uint32_t, uint32_t *, uint32_t *);
    232 static inline uint32_t 	gmi_read_4(uint32_t);
    233 static inline void 	hif_wait_stat(uint32_t);
    234 
    235 #define cdmac_rx_stat(sc) \
    236     bus_space_read_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0 /* XXX hack */)
    237 
    238 #define cdmac_rx_reset(sc)						      \
    239     bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0, CDMAC_STAT_RESET)
    240 
    241 #define cdmac_rx_start(sc, val)						      \
    242     bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rxh, CDMAC_CURDESC, (val))
    243 
    244 #define cdmac_tx_stat(sc) \
    245     bus_space_read_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0 /* XXX hack */)
    246 
    247 #define cdmac_tx_reset(sc) \
    248     bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0, CDMAC_STAT_RESET)
    249 
    250 #define cdmac_tx_start(sc, val)						      \
    251     bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_txh, CDMAC_CURDESC, (val))
    252 
    253 
    254 CFATTACH_DECL_NEW(temac, sizeof(struct temac_softc),
    255     xcvbus_child_match, temac_attach, NULL, NULL);
    256 
    257 
    258 /*
    259  * Private bus utilities.
    260  */
    261 static inline int
    262 hif_wait_stat(uint32_t mask)
    263 {
    264 	int 			i = 0;
    265 	int			rv = 0;
    266 
    267 	while (mask != (mfidcr(IDCR_HIF_STAT) & mask)) {
    268 		if (i++ > 100) {
    269 			printf("%s: timeout waiting for 0x%08x\n",
    270 			    __func__, mask);
    271 			rv = ETIMEDOUT;
    272 			break;
    273 		}
    274 		delay(5);
    275 	}
    276 
    277 	TRACEREG(("%s: stat %#08x loops %d\n", __func__, mask, i));
    278 	return rv;
    279 }
    280 
    281 static inline void
    282 gmi_write_4(uint32_t addr, uint32_t lo)
    283 {
    284 	mtidcr(IDCR_HIF_ARG0, lo);
    285 	mtidcr(IDCR_HIF_CTRL, (addr & HIF_CTRL_GMIADDR) | HIF_CTRL_WRITE);
    286 	hif_wait_stat(HIF_STAT_GMIWR);
    287 
    288 	TRACEREG(("%s: %#08x <- %#08x\n", __func__, addr, lo));
    289 }
    290 
    291 static inline void
    292 gmi_write_8(uint32_t addr, uint32_t lo, uint32_t hi)
    293 {
    294 	mtidcr(IDCR_HIF_ARG1, hi);
    295 	gmi_write_4(addr, lo);
    296 }
    297 
    298 static inline void
    299 gmi_read_8(uint32_t addr, uint32_t *lo, uint32_t *hi)
    300 {
    301 	*lo = gmi_read_4(addr);
    302 	*hi = mfidcr(IDCR_HIF_ARG1);
    303 }
    304 
    305 static inline uint32_t
    306 gmi_read_4(uint32_t addr)
    307 {
    308 	uint32_t 		res;
    309 
    310 	mtidcr(IDCR_HIF_CTRL, addr & HIF_CTRL_GMIADDR);
    311 	hif_wait_stat(HIF_STAT_GMIRR);
    312 
    313 	res = mfidcr(IDCR_HIF_ARG0);
    314 	TRACEREG(("%s:  %#08x -> %#08x\n", __func__, addr, res));
    315 	return (res);
    316 }
    317 
    318 /*
    319  * Generic device.
    320  */
    321 static void
    322 temac_attach(device_t parent, device_t self, void *aux)
    323 {
    324 	struct xcvbus_attach_args *vaa = aux;
    325 	struct ll_dmac 		*rx = vaa->vaa_rx_dmac;
    326 	struct ll_dmac 		*tx = vaa->vaa_tx_dmac;
    327 	struct temac_softc 	*sc = device_private(self);
    328 	struct ifnet 		*ifp = &sc->sc_if;
    329 	struct mii_data 	*mii = &sc->sc_mii;
    330 	uint8_t 		enaddr[ETHER_ADDR_LEN];
    331 	bus_dma_segment_t 	seg;
    332 	int 			error, nseg, i;
    333 	const char * const xname = device_xname(self);
    334 
    335 	aprint_normal(": TEMAC\n"); 	/* XXX will be LL_TEMAC, PLB_TEMAC */
    336 
    337 	KASSERT(rx);
    338 	KASSERT(tx);
    339 
    340 	sc->sc_dev = self;
    341 	sc->sc_dmat = vaa->vaa_dmat;
    342 	sc->sc_dead = 0;
    343 	sc->sc_rx_drained = 1;
    344 	sc->sc_txbusy = 0;
    345 	sc->sc_iot = vaa->vaa_iot;
    346 	sc->sc_dma_rxt = rx->dmac_iot;
    347 	sc->sc_dma_txt = tx->dmac_iot;
    348 
    349 	/*
    350 	 * Map HIF and receive/transmit dmac registers.
    351 	 */
    352 	if ((error = bus_space_map(vaa->vaa_iot, vaa->vaa_addr, TEMAC_SIZE, 0,
    353 	    &sc->sc_ioh)) != 0) {
    354 		aprint_error_dev(self, "could not map registers\n");
    355 		goto fail_0;
    356 	}
    357 
    358 	if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_ctrl_addr,
    359 	    CDMAC_CTRL_SIZE, 0, &sc->sc_dma_rxh)) != 0) {
    360 		aprint_error_dev(self, "could not map Rx control registers\n");
    361 		goto fail_0;
    362 	}
    363 	if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_stat_addr,
    364 	    CDMAC_STAT_SIZE, 0, &sc->sc_dma_rsh)) != 0) {
    365 		aprint_error_dev(self, "could not map Rx status register\n");
    366 		goto fail_0;
    367 	}
    368 
    369 	if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_ctrl_addr,
    370 	    CDMAC_CTRL_SIZE, 0, &sc->sc_dma_txh)) != 0) {
    371 		aprint_error_dev(self, "could not map Tx control registers\n");
    372 		goto fail_0;
    373 	}
    374 	if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_stat_addr,
    375 	    CDMAC_STAT_SIZE, 0, &sc->sc_dma_tsh)) != 0) {
    376 		aprint_error_dev(self, "could not map Tx status register\n");
    377 		goto fail_0;
    378 	}
    379 
    380 	/*
    381 	 * Allocate and initialize DMA control chains.
    382 	 */
    383 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    384 	    sizeof(struct temac_control), 8, 0, &seg, 1, &nseg, 0)) != 0) {
    385 	    	aprint_error_dev(self, "could not allocate control data\n");
    386 		goto fail_0;
    387 	}
    388 
    389 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
    390 	    sizeof(struct temac_control),
    391 	    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
    392 	    	aprint_error_dev(self, "could not map control data\n");
    393 		goto fail_1;
    394 	}
    395 
    396 	if ((error = bus_dmamap_create(sc->sc_dmat,
    397 	    sizeof(struct temac_control), 1,
    398 	    sizeof(struct temac_control), 0, 0, &sc->sc_control_dmap)) != 0) {
    399 	    	aprint_error_dev(self,
    400 		    "could not create control data DMA map\n");
    401 		goto fail_2;
    402 	}
    403 
    404 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_control_dmap,
    405 	    sc->sc_control_data, sizeof(struct temac_control), NULL, 0)) != 0) {
    406 	    	aprint_error_dev(self, "could not load control data DMA map\n");
    407 		goto fail_3;
    408 	}
    409 
    410 	/*
    411 	 * Link descriptor chains.
    412 	 */
    413 	memset(sc->sc_control_data, 0, sizeof(struct temac_control));
    414 
    415 	for (i = 0; i < TEMAC_NTXDESC; i++) {
    416 		sc->sc_txdescs[i].desc_next = sc->sc_cdaddr +
    417 		    TEMAC_TXDOFF(TEMAC_TXNEXT(i));
    418 		sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
    419 	}
    420 	for (i = 0; i < TEMAC_NRXDESC; i++) {
    421 		sc->sc_rxdescs[i].desc_next = sc->sc_cdaddr +
    422 		    TEMAC_RXDOFF(TEMAC_RXNEXT(i));
    423 		sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
    424 	}
    425 
    426 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 0,
    427 	    sizeof(struct temac_control),
    428 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    429 
    430 	/*
    431 	 * Initialize software state for transmit/receive jobs.
    432 	 */
    433 	for (i = 0; i < TEMAC_TXQLEN; i++) {
    434 		if ((error = bus_dmamap_create(sc->sc_dmat,
    435 		    ETHER_MAX_LEN_JUMBO, TEMAC_NTXSEG, ETHER_MAX_LEN_JUMBO,
    436 		    0, 0, &sc->sc_txsoft[i].txs_dmap)) != 0) {
    437 		    	aprint_error_dev(self,
    438 			    "could not create Tx DMA map %d\n",
    439 		    	    i);
    440 			goto fail_4;
    441 		}
    442 		sc->sc_txsoft[i].txs_mbuf = NULL;
    443 		sc->sc_txsoft[i].txs_last = 0;
    444 	}
    445 
    446 	for (i = 0; i < TEMAC_NRXDESC; i++) {
    447 		if ((error = bus_dmamap_create(sc->sc_dmat,
    448 		    MCLBYTES, TEMAC_NRXSEG, MCLBYTES, 0, 0,
    449 		    &sc->sc_rxsoft[i].rxs_dmap)) != 0) {
    450 		    	aprint_error_dev(self,
    451 			    "could not create Rx DMA map %d\n", i);
    452 			goto fail_5;
    453 		}
    454 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    455 	}
    456 
    457 	/*
    458 	 * Setup transfer interrupt handlers.
    459 	 */
    460 	error = ENOMEM;
    461 
    462 	sc->sc_rx_ih = ll_dmac_intr_establish(rx->dmac_chan,
    463 	    temac_rx_intr, sc);
    464 	if (sc->sc_rx_ih == NULL) {
    465 		aprint_error_dev(self, "could not establish Rx interrupt\n");
    466 		goto fail_5;
    467 	}
    468 
    469 	sc->sc_tx_ih = ll_dmac_intr_establish(tx->dmac_chan,
    470 	    temac_tx_intr, sc);
    471 	if (sc->sc_tx_ih == NULL) {
    472 		aprint_error_dev(self, "could not establish Tx interrupt\n");
    473 		goto fail_6;
    474 	}
    475 
    476 	/* XXXFreza: faked, should read unicast address filter. */
    477 	enaddr[0] = 0x00;
    478 	enaddr[1] = 0x11;
    479 	enaddr[2] = 0x17;
    480 	enaddr[3] = 0xff;
    481 	enaddr[4] = 0xff;
    482 	enaddr[5] = 0x01;
    483 
    484 	/*
    485 	 * Initialize the TEMAC.
    486 	 */
    487 	temac_reset(sc);
    488 
    489 	/* Configure MDIO link. */
    490 	gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
    491 
    492 	/* Initialize PHY. */
    493 	mii->mii_ifp = ifp;
    494 	mii->mii_readreg = temac_mii_readreg;
    495 	mii->mii_writereg = temac_mii_writereg;
    496 	mii->mii_statchg = temac_mii_statchg;
    497 	sc->sc_ec.ec_mii = mii;
    498 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    499 
    500 	mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
    501 	    MII_OFFSET_ANY, 0);
    502 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
    503 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
    504 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
    505 	} else {
    506 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    507 	}
    508 
    509 	/* Hold PHY in reset. */
    510 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, TEMAC_RESET_PHY);
    511 
    512 	/* Reset EMAC. */
    513 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
    514 	    TEMAC_RESET_EMAC);
    515 	delay(10000);
    516 
    517 	/* Reset peripheral, awakes PHY and EMAC. */
    518 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
    519 	    TEMAC_RESET_PERIPH);
    520 	delay(40000);
    521 
    522 	/* (Re-)Configure MDIO link. */
    523 	gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
    524 
    525 	/*
    526 	 * Hook up with network stack.
    527 	 */
    528 	strcpy(ifp->if_xname, xname);
    529 	ifp->if_softc = sc;
    530 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    531 	ifp->if_ioctl = temac_ioctl;
    532 	ifp->if_start = temac_start;
    533 	ifp->if_init = temac_init;
    534 	ifp->if_stop = temac_stop;
    535 	ifp->if_watchdog = NULL;
    536 	IFQ_SET_READY(&ifp->if_snd);
    537 	IFQ_SET_MAXLEN(&ifp->if_snd, TEMAC_TXQLEN);
    538 
    539 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    540 
    541 	if_attach(ifp);
    542 	ether_ifattach(ifp, enaddr);
    543 
    544 	sc->sc_sdhook = shutdownhook_establish(temac_shutdown, sc);
    545 	if (sc->sc_sdhook == NULL)
    546 		aprint_error_dev(self,
    547 		    "WARNING: unable to establish shutdown hook\n");
    548 
    549 	callout_setfunc(&sc->sc_mii_tick, temac_mii_tick, sc);
    550 	callout_setfunc(&sc->sc_rx_timo, temac_rxtimo, sc);
    551 
    552 	return ;
    553 
    554  fail_6:
    555 	ll_dmac_intr_disestablish(rx->dmac_chan, sc->sc_rx_ih);
    556 	i = TEMAC_NRXDESC;
    557  fail_5:
    558  	for (--i; i >= 0; i--)
    559  		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].rxs_dmap);
    560 	i = TEMAC_TXQLEN;
    561  fail_4:
    562  	for (--i; i >= 0; i--)
    563  		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].txs_dmap);
    564  fail_3:
    565 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_control_dmap);
    566  fail_2:
    567 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
    568 	    sizeof(struct temac_control));
    569  fail_1:
    570 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
    571  fail_0:
    572  	aprint_error_dev(self, "error = %d\n", error);
    573 }
    574 
    575 /*
    576  * Network device.
    577  */
    578 static int
    579 temac_init(struct ifnet *ifp)
    580 {
    581 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
    582 	uint32_t 		rcr, tcr;
    583 	int 			i, error;
    584 
    585 	/* Reset DMA channels. */
    586 	cdmac_tx_reset(sc);
    587 	cdmac_rx_reset(sc);
    588 
    589 	/* Set current media. */
    590 	if ((error = ether_mediachange(ifp)) != 0)
    591 		return error;
    592 
    593 	callout_schedule(&sc->sc_mii_tick, hz);
    594 
    595 	/* Enable EMAC engine. */
    596 	rcr = (gmi_read_4(TEMAC_GMI_RXCF1) | GMI_RX_ENABLE) &
    597 	    ~(GMI_RX_JUMBO | GMI_RX_FCS);
    598 	gmi_write_4(TEMAC_GMI_RXCF1, rcr);
    599 
    600 	tcr = (gmi_read_4(TEMAC_GMI_TXCF) | GMI_TX_ENABLE) &
    601 	    ~(GMI_TX_JUMBO | GMI_TX_FCS);
    602 	gmi_write_4(TEMAC_GMI_TXCF, tcr);
    603 
    604 	/* XXXFreza: Force promiscuous mode, for now. */
    605 	gmi_write_4(TEMAC_GMI_AFM, GMI_AFM_PROMISC);
    606 	ifp->if_flags |= IFF_PROMISC;
    607 
    608 	/* Rx/Tx queues are drained -- either from attach() or stop(). */
    609 	sc->sc_txsfree = TEMAC_TXQLEN;
    610 	sc->sc_txsreap = 0;
    611 	sc->sc_txscur = 0;
    612 
    613 	sc->sc_txfree = TEMAC_NTXDESC;
    614 	sc->sc_txreap = 0;
    615 	sc->sc_txcur = 0;
    616 
    617 	sc->sc_rxreap = 0;
    618 
    619 	/* Allocate and map receive buffers. */
    620 	if (sc->sc_rx_drained) {
    621 		for (i = 0; i < TEMAC_NRXDESC; i++) {
    622 			if ((error = temac_rxalloc(sc, i, 1)) != 0) {
    623 				aprint_error_dev(sc->sc_dev,
    624 				    "failed to allocate Rx descriptor %d\n",
    625 				    i);
    626 				temac_rxdrain(sc);
    627 				return (error);
    628 			}
    629 		}
    630 		sc->sc_rx_drained = 0;
    631 
    632 		temac_rxcdsync(sc, 0, TEMAC_NRXDESC,
    633 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    634 		cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
    635 	}
    636 
    637 	ifp->if_flags |= IFF_RUNNING;
    638 	ifp->if_flags &= ~IFF_OACTIVE;
    639 
    640 	return (0);
    641 }
    642 
    643 static int
    644 temac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    645 {
    646 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
    647 	int 			s, ret;
    648 
    649 	s = splnet();
    650 	if (sc->sc_dead)
    651 		ret = EIO;
    652 	else
    653 		ret = ether_ioctl(ifp, cmd, data);
    654 	splx(s);
    655 	return (ret);
    656 }
    657 
    658 static void
    659 temac_start(struct ifnet *ifp)
    660 {
    661 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
    662 	struct temac_txsoft 	*txs;
    663 	struct mbuf 		*m;
    664 	bus_dmamap_t 		dmap;
    665 	int 			error, head, nsegs, i;
    666 
    667 	nsegs = 0;
    668 	head = sc->sc_txcur;
    669 	txs = NULL; 		/* gcc */
    670 
    671 	if (sc->sc_dead)
    672 		return;
    673 
    674 	KASSERT(sc->sc_txfree >= 0);
    675 	KASSERT(sc->sc_txsfree >= 0);
    676 
    677 	/*
    678 	 * Push mbufs into descriptor chain until we drain the interface
    679 	 * queue or run out of descriptors. We'll mark the first segment
    680 	 * as "done" in hope that we might put CDMAC interrupt above IPL_NET
    681 	 * and have it start jobs & mark packets for GC preemtively for
    682 	 * us -- creativity due to limitations in CDMAC transfer engine
    683 	 * (it really consumes lists, not circular queues, AFAICS).
    684 	 *
    685 	 * We schedule one interrupt per Tx batch.
    686 	 */
    687 	while (1) {
    688 		IFQ_POLL(&ifp->if_snd, m);
    689 		if (m == NULL)
    690 			break;
    691 
    692 		if (sc->sc_txsfree == 0) {
    693 			ifp->if_flags |= IFF_OACTIVE;
    694 			break;
    695 		}
    696 
    697 		txs = &sc->sc_txsoft[sc->sc_txscur];
    698 		dmap = txs->txs_dmap;
    699 
    700 		if (txs->txs_mbuf != NULL)
    701 			printf("FOO\n");
    702 		if (txs->txs_last)
    703 			printf("BAR\n");
    704 
    705 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
    706 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
    707 		    	if (error == EFBIG) {
    708 		    		aprint_error_dev(sc->sc_dev,
    709 				    "Tx consumes too many segments, dropped\n");
    710 				IFQ_DEQUEUE(&ifp->if_snd, m);
    711 				m_freem(m);
    712 				continue;
    713 		    	} else {
    714 		    		aprint_debug_dev(sc->sc_dev,
    715 				    "Tx stall due to resource shortage\n");
    716 		    		break;
    717 			}
    718 		}
    719 
    720 		/*
    721 		 * If we're short on DMA descriptors, notify upper layers
    722 		 * and leave this packet for later.
    723 		 */
    724 		if (dmap->dm_nsegs > sc->sc_txfree) {
    725 			bus_dmamap_unload(sc->sc_dmat, dmap);
    726 			ifp->if_flags |= IFF_OACTIVE;
    727 			break;
    728 		}
    729 
    730 		IFQ_DEQUEUE(&ifp->if_snd, m);
    731 
    732 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
    733 		    BUS_DMASYNC_PREWRITE);
    734 		txs->txs_mbuf = m;
    735 
    736 		/*
    737 		 * Map the packet into descriptor chain. XXX We'll want
    738 		 * to fill checksum offload commands here.
    739 		 *
    740 		 * We would be in a race if we weren't blocking CDMAC intr
    741 		 * at this point -- we need to be locked against txreap()
    742 		 * because of dmasync ops.
    743 		 */
    744 
    745 		temac_txcdsync(sc, sc->sc_txcur, dmap->dm_nsegs,
    746 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    747 
    748 		for (i = 0; i < dmap->dm_nsegs; i++) {
    749 			sc->sc_txdescs[sc->sc_txcur].desc_addr =
    750 			    dmap->dm_segs[i].ds_addr;
    751 			sc->sc_txdescs[sc->sc_txcur].desc_size =
    752 			    dmap->dm_segs[i].ds_len;
    753 			sc->sc_txdescs[sc->sc_txcur].desc_stat =
    754 			    (i == 0 			? CDMAC_STAT_SOP : 0) |
    755 			    (i == (dmap->dm_nsegs - 1) 	? CDMAC_STAT_EOP : 0);
    756 
    757 			sc->sc_txcur = TEMAC_TXNEXT(sc->sc_txcur);
    758 		}
    759 
    760 		sc->sc_txfree -= dmap->dm_nsegs;
    761 		nsegs += dmap->dm_nsegs;
    762 
    763 		sc->sc_txscur = TEMAC_TXSNEXT(sc->sc_txscur);
    764 		sc->sc_txsfree--;
    765 	}
    766 
    767 	/* Get data running if we queued any. */
    768 	if (nsegs > 0) {
    769 		int 		tail = TEMAC_TXINC(sc->sc_txcur, -1);
    770 
    771 		/* Mark the last packet in this job. */
    772 		txs->txs_last = 1;
    773 
    774 		/* Mark the last descriptor in this job. */
    775 		sc->sc_txdescs[tail].desc_stat |= CDMAC_STAT_STOP |
    776 		    CDMAC_STAT_INTR;
    777 		temac_txcdsync(sc, head, nsegs,
    778 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    779 
    780 		temac_txkick(sc);
    781 #if TEMAC_TXDEBUG > 0
    782 		aprint_debug_dev(sc->sc_dev,
    783 		    "start:  txcur  %03d -> %03d, nseg %03d\n",
    784 		    head, sc->sc_txcur, nsegs);
    785 #endif
    786 	}
    787 }
    788 
    789 static void
    790 temac_stop(struct ifnet *ifp, int disable)
    791 {
    792 	struct temac_softc 	*sc = (struct temac_softc *)ifp->if_softc;
    793 	struct temac_txsoft 	*txs;
    794 	int 			i;
    795 
    796 #if TEMAC_DEBUG > 0
    797 	aprint_debug_dev(sc->sc_dev, "stop\n");
    798 #endif
    799 
    800 	/* Down the MII. */
    801 	callout_stop(&sc->sc_mii_tick);
    802 	mii_down(&sc->sc_mii);
    803 
    804 	/* Stop the engine. */
    805 	temac_reset(sc);
    806 
    807 	/* Drain buffers queues (unconditionally). */
    808 	temac_rxdrain(sc);
    809 
    810 	for (i = 0; i < TEMAC_TXQLEN; i++) {
    811 		txs = &sc->sc_txsoft[i];
    812 
    813 		if (txs->txs_mbuf != NULL) {
    814 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
    815 			m_freem(txs->txs_mbuf);
    816 			txs->txs_mbuf = NULL;
    817 			txs->txs_last = 0;
    818 		}
    819 	}
    820 	sc->sc_txbusy = 0;
    821 
    822 	/* Acknowledge we're down. */
    823 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    824 }
    825 
    826 static int
    827 temac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
    828 {
    829 	int rv;
    830 
    831 	mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
    832 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR);
    833 
    834 	if ((rv = hif_wait_stat(HIF_STAT_MIIRR)) != 0)
    835 		return rv;
    836 
    837 	*val = mfidcr(IDCR_HIF_ARG0) & 0xffff;
    838 	return 0;
    839 }
    840 
    841 static int
    842 temac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
    843 {
    844 	mtidcr(IDCR_HIF_ARG0, val);
    845 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_WRVAL | HIF_CTRL_WRITE);
    846 	mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
    847 	mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR | HIF_CTRL_WRITE);
    848 	return hif_wait_stat(HIF_STAT_MIIWR);
    849 }
    850 
    851 static void
    852 temac_mii_statchg(struct ifnet *ifp)
    853 {
    854 	struct temac_softc 	*sc = ifp->if_softc;
    855 	uint32_t 		rcf, tcf, mmc;
    856 
    857 	/* Full/half duplex link. */
    858 	rcf = gmi_read_4(TEMAC_GMI_RXCF1);
    859 	tcf = gmi_read_4(TEMAC_GMI_TXCF);
    860 
    861 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
    862 		gmi_write_4(TEMAC_GMI_RXCF1, rcf & ~GMI_RX_HDX);
    863 		gmi_write_4(TEMAC_GMI_TXCF, tcf & ~GMI_TX_HDX);
    864 	} else {
    865 		gmi_write_4(TEMAC_GMI_RXCF1, rcf | GMI_RX_HDX);
    866 		gmi_write_4(TEMAC_GMI_TXCF, tcf | GMI_TX_HDX);
    867 	}
    868 
    869 	/* Link speed. */
    870 	mmc = gmi_read_4(TEMAC_GMI_MMC) & ~GMI_MMC_SPEED_MASK;
    871 
    872 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
    873 	case IFM_10_T:
    874 		/*
    875 		 * XXXFreza: the GMAC is not happy with 10Mbit ethernet,
    876 		 * although the documentation claims it's supported. Maybe
    877 		 * it's just my equipment...
    878 		 */
    879 		mmc |= GMI_MMC_SPEED_10;
    880 		break;
    881 	case IFM_100_TX:
    882 		mmc |= GMI_MMC_SPEED_100;
    883 		break;
    884 	case IFM_1000_T:
    885 		mmc |= GMI_MMC_SPEED_1000;
    886 		break;
    887 	}
    888 
    889 	gmi_write_4(TEMAC_GMI_MMC, mmc);
    890 }
    891 
    892 static void
    893 temac_mii_tick(void *arg)
    894 {
    895 	struct temac_softc 	*sc = (struct temac_softc *)arg;
    896 	int 			s;
    897 
    898 	if (!device_is_active(sc->sc_dev))
    899 		return;
    900 
    901 	s = splnet();
    902 	mii_tick(&sc->sc_mii);
    903 	splx(s);
    904 
    905 	callout_schedule(&sc->sc_mii_tick, hz);
    906 }
    907 
    908 /*
    909  * External hooks.
    910  */
    911 static void
    912 temac_shutdown(void *arg)
    913 {
    914 	struct temac_softc 	*sc = (struct temac_softc *)arg;
    915 
    916 	temac_reset(sc);
    917 }
    918 
    919 static void
    920 temac_tx_intr(void *arg)
    921 {
    922 	struct temac_softc 	*sc = (struct temac_softc *)arg;
    923 	uint32_t 		stat;
    924 
    925 	/* XXX: We may need to splnet() here if cdmac(4) changes. */
    926 
    927 	if ((stat = cdmac_tx_stat(sc)) & CDMAC_STAT_ERROR) {
    928 		aprint_error_dev(sc->sc_dev,
    929 		    "transmit DMA is toast (%#08x), halted!\n",
    930 		    stat);
    931 
    932 		/* XXXFreza: how to signal this upstream? */
    933 		temac_stop(&sc->sc_if, 1);
    934 		sc->sc_dead = 1;
    935 	}
    936 
    937 #if TEMAC_DEBUG > 0
    938 	aprint_debug_dev(sc->sc_dev, "tx intr 0x%08x\n", stat);
    939 #endif
    940 	temac_txreap(sc);
    941 }
    942 
    943 static void
    944 temac_rx_intr(void *arg)
    945 {
    946 	struct temac_softc 	*sc = (struct temac_softc *)arg;
    947 	uint32_t 		stat;
    948 
    949 	/* XXX: We may need to splnet() here if cdmac(4) changes. */
    950 
    951 	if ((stat = cdmac_rx_stat(sc)) & CDMAC_STAT_ERROR) {
    952 		aprint_error_dev(sc->sc_dev,
    953 		    "receive DMA is toast (%#08x), halted!\n",
    954 		    stat);
    955 
    956 		/* XXXFreza: how to signal this upstream? */
    957 		temac_stop(&sc->sc_if, 1);
    958 		sc->sc_dead = 1;
    959 	}
    960 
    961 #if TEMAC_DEBUG > 0
    962 	aprint_debug_dev(sc->sc_dev, "rx intr 0x%08x\n", stat);
    963 #endif
    964 	temac_rxreap(sc);
    965 }
    966 
    967 /*
    968  * Utils.
    969  */
    970 static inline void
    971 temac_txcdsync(struct temac_softc *sc, int first, int cnt, int flag)
    972 {
    973 	if ((first + cnt) > TEMAC_NTXDESC) {
    974 		bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
    975 		    TEMAC_TXDOFF(first),
    976 		    sizeof(struct cdmac_descr) * (TEMAC_NTXDESC - first),
    977 		    flag);
    978 		cnt = (first + cnt) % TEMAC_NTXDESC;
    979 		first = 0;
    980 	}
    981 
    982 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
    983 	    TEMAC_TXDOFF(first),
    984 	    sizeof(struct cdmac_descr) * cnt,
    985 	    flag);
    986 }
    987 
    988 static inline void
    989 temac_rxcdsync(struct temac_softc *sc, int first, int cnt, int flag)
    990 {
    991 	if ((first + cnt) > TEMAC_NRXDESC) {
    992 		bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
    993 		    TEMAC_RXDOFF(first),
    994 		    sizeof(struct cdmac_descr) * (TEMAC_NRXDESC - first),
    995 		    flag);
    996 		cnt = (first + cnt) % TEMAC_NRXDESC;
    997 		first = 0;
    998 	}
    999 
   1000 	bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
   1001 	    TEMAC_RXDOFF(first),
   1002 	    sizeof(struct cdmac_descr) * cnt,
   1003 	    flag);
   1004 }
   1005 
   1006 static void
   1007 temac_txreap(struct temac_softc *sc)
   1008 {
   1009 	struct temac_txsoft 	*txs;
   1010 	bus_dmamap_t 		dmap;
   1011 	int 			sent = 0;
   1012 
   1013 	/*
   1014 	 * Transmit interrupts happen on the last descriptor of Tx jobs.
   1015 	 * Hence, every time we're called (and we assume txintr is our
   1016 	 * only caller!), we reap packets upto and including the one
   1017 	 * marked as last-in-batch.
   1018 	 *
   1019 	 * XXX we rely on that we make EXACTLY one batch per intr, no more
   1020 	 */
   1021 	while (sc->sc_txsfree != TEMAC_TXQLEN) {
   1022 		txs = &sc->sc_txsoft[sc->sc_txsreap];
   1023 		dmap = txs->txs_dmap;
   1024 
   1025 		sc->sc_txreap = TEMAC_TXINC(sc->sc_txreap, dmap->dm_nsegs);
   1026 		sc->sc_txfree += dmap->dm_nsegs;
   1027 
   1028 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
   1029 		m_freem(txs->txs_mbuf);
   1030 		txs->txs_mbuf = NULL;
   1031 
   1032 		sc->sc_if.if_opackets++;
   1033 		sent = 1;
   1034 
   1035 		sc->sc_txsreap = TEMAC_TXSNEXT(sc->sc_txsreap);
   1036 		sc->sc_txsfree++;
   1037 
   1038 		if (txs->txs_last) {
   1039 			txs->txs_last = 0;
   1040 			sc->sc_txbusy = 0; 	/* channel stopped now */
   1041 
   1042 			temac_txkick(sc);
   1043 			break;
   1044 		}
   1045 	}
   1046 
   1047 	if (sent && (sc->sc_if.if_flags & IFF_OACTIVE))
   1048 		sc->sc_if.if_flags &= ~IFF_OACTIVE;
   1049 }
   1050 
   1051 static int
   1052 temac_rxalloc(struct temac_softc *sc, int which, int verbose)
   1053 {
   1054 	struct temac_rxsoft 	*rxs;
   1055 	struct mbuf 		*m;
   1056 	uint32_t 		stat;
   1057 	int 			error;
   1058 
   1059 	rxs = &sc->sc_rxsoft[which];
   1060 
   1061 	/* The mbuf itself is not our problem, just clear DMA related stuff. */
   1062 	if (rxs->rxs_mbuf != NULL) {
   1063 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
   1064 		rxs->rxs_mbuf = NULL;
   1065 	}
   1066 
   1067 	/*
   1068 	 * We would like to store mbuf and dmap in application specific
   1069 	 * fields of the descriptor, but that doesn't work for Rx. Shame
   1070 	 * on Xilinx for this (and for the useless timer architecture).
   1071 	 *
   1072 	 * Hence each descriptor needs its own soft state. We may want
   1073 	 * to merge multiple rxs's into a monster mbuf when we support
   1074 	 * jumbo frames though. Also, we use single set of indexing
   1075 	 * variables for both sc_rxdescs[] and sc_rxsoft[].
   1076 	 */
   1077 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   1078 	if (m == NULL) {
   1079 		if (verbose)
   1080 			aprint_debug_dev(sc->sc_dev,
   1081 			    "out of Rx header mbufs\n");
   1082 		return (ENOBUFS);
   1083 	}
   1084 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
   1085 
   1086 	MCLGET(m, M_DONTWAIT);
   1087 	if ((m->m_flags & M_EXT) == 0) {
   1088 		if (verbose)
   1089 			aprint_debug_dev(sc->sc_dev,
   1090 			    "out of Rx cluster mbufs\n");
   1091 		m_freem(m);
   1092 		return (ENOBUFS);
   1093 	}
   1094 
   1095 	rxs->rxs_mbuf = m;
   1096 	m->m_pkthdr.len = m->m_len = MCLBYTES;
   1097 
   1098 	/* Make sure the payload after ethernet header is 4-aligned. */
   1099 	m_adj(m, 2);
   1100 
   1101 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmap, m,
   1102 	    BUS_DMA_NOWAIT);
   1103 	if (error) {
   1104 		if (verbose)
   1105 			aprint_debug_dev(sc->sc_dev,
   1106 			    "could not map Rx descriptor %d, error = %d\n",
   1107 			    which, error);
   1108 
   1109 		rxs->rxs_mbuf = NULL;
   1110 		m_freem(m);
   1111 
   1112 		return (error);
   1113 	}
   1114 
   1115 	stat =
   1116 	    (TEMAC_ISINTR(which) ? CDMAC_STAT_INTR : 0) |
   1117 	    (TEMAC_ISLAST(which) ? CDMAC_STAT_STOP : 0);
   1118 
   1119 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmap, 0,
   1120 	    rxs->rxs_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1121 
   1122 	/* Descriptor post-sync, if needed, left to the caller. */
   1123 
   1124 	sc->sc_rxdescs[which].desc_addr = rxs->rxs_dmap->dm_segs[0].ds_addr;
   1125 	sc->sc_rxdescs[which].desc_size  = rxs->rxs_dmap->dm_segs[0].ds_len;
   1126 	sc->sc_rxdescs[which].desc_stat = stat;
   1127 
   1128 	/* Descriptor pre-sync, if needed, left to the caller. */
   1129 
   1130 	return (0);
   1131 }
   1132 
   1133 static void
   1134 temac_rxreap(struct temac_softc *sc)
   1135 {
   1136 	struct ifnet 		*ifp = &sc->sc_if;
   1137 	uint32_t 		stat, rxstat, rxsize;
   1138 	struct mbuf 		*m;
   1139 	int 			nseg, head, tail;
   1140 
   1141 	head = sc->sc_rxreap;
   1142 	tail = 0; 		/* gcc */
   1143 	nseg = 0;
   1144 
   1145 	/*
   1146 	 * Collect finished entries on the Rx list, kick DMA if we hit
   1147 	 * the end. DMA will always stop on the last descriptor in chain,
   1148 	 * so it will never hit a reap-in-progress descriptor.
   1149 	 */
   1150 	while (1) {
   1151 		/* Maybe we previously failed to refresh this one? */
   1152 		if (sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf == NULL) {
   1153 			if (temac_rxalloc(sc, sc->sc_rxreap, 0) != 0)
   1154 				break;
   1155 
   1156 			sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
   1157 			continue;
   1158 		}
   1159 		temac_rxcdsync(sc, sc->sc_rxreap, 1,
   1160 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1161 
   1162 		stat = sc->sc_rxdescs[sc->sc_rxreap].desc_stat;
   1163 		m = NULL;
   1164 
   1165 		if ((stat & CDMAC_STAT_DONE) == 0)
   1166 			break;
   1167 
   1168 		/* Count any decriptor we've collected, regardless of status. */
   1169 		nseg ++;
   1170 
   1171 		/* XXXFreza: This won't work for jumbo frames. */
   1172 
   1173 		if ((stat & (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) !=
   1174 		    (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) {
   1175 		    	aprint_error_dev(sc->sc_dev,
   1176 			    "Rx packet doesn't fit in one descriptor, "
   1177 			    "stat = %#08x\n", stat);
   1178 			goto badframe;
   1179 		}
   1180 
   1181 		/* Dissect TEMAC footer if this is end of packet. */
   1182 		rxstat = sc->sc_rxdescs[sc->sc_rxreap].desc_rxstat;
   1183 		rxsize = sc->sc_rxdescs[sc->sc_rxreap].desc_rxsize &
   1184 		    RXSIZE_MASK;
   1185 
   1186 		if ((rxstat & RXSTAT_GOOD) == 0 ||
   1187 		    (rxstat & RXSTAT_SICK) != 0) {
   1188 		    	aprint_error_dev(sc->sc_dev,
   1189 			    "corrupt Rx packet, rxstat = %#08x\n",
   1190 		    	    rxstat);
   1191 			goto badframe;
   1192 		}
   1193 
   1194 		/* We are now bound to succeed. */
   1195 		bus_dmamap_sync(sc->sc_dmat,
   1196 		    sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap, 0,
   1197 		    sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap->dm_mapsize,
   1198 		    BUS_DMASYNC_POSTREAD);
   1199 
   1200 		m = sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf;
   1201 		m_set_rcvif(m, ifp);
   1202 		m->m_pkthdr.len = m->m_len = rxsize;
   1203 
   1204  badframe:
   1205  		/* Get ready for more work. */
   1206 		tail = sc->sc_rxreap;
   1207 		sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
   1208 
   1209  		/* On failures we reuse the descriptor and go ahead. */
   1210  		if (m == NULL) {
   1211 			sc->sc_rxdescs[tail].desc_stat =
   1212 			    (TEMAC_ISINTR(tail) ? CDMAC_STAT_INTR : 0) |
   1213 			    (TEMAC_ISLAST(tail) ? CDMAC_STAT_STOP : 0);
   1214 
   1215 			ifp->if_ierrors++;
   1216 			continue;
   1217  		}
   1218 
   1219 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1220 
   1221 		/* Refresh descriptor, bail out if we're out of buffers. */
   1222 		if (temac_rxalloc(sc, tail, 1) != 0) {
   1223  			sc->sc_rxreap = TEMAC_RXINC(sc->sc_rxreap, -1);
   1224  			aprint_error_dev(sc->sc_dev, "Rx give up for now\n");
   1225 			break;
   1226 		}
   1227 	}
   1228 
   1229 	/* We may now have a contiguous ready-to-go chunk of descriptors. */
   1230 	if (nseg > 0) {
   1231 #if TEMAC_RXDEBUG > 0
   1232 		aprint_debug_dev(sc->sc_dev,
   1233 		    "rxreap: rxreap %03d -> %03d, nseg %03d\n",
   1234 		    head, sc->sc_rxreap, nseg);
   1235 #endif
   1236 		temac_rxcdsync(sc, head, nseg,
   1237 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1238 
   1239 		if (TEMAC_ISLAST(tail))
   1240 			cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
   1241 	}
   1242 
   1243 	/* Ensure maximum Rx latency is kept under control. */
   1244 	callout_schedule(&sc->sc_rx_timo, hz / TEMAC_RXTIMO_HZ);
   1245 }
   1246 
   1247 static void
   1248 temac_rxtimo(void *arg)
   1249 {
   1250 	struct temac_softc 	*sc = (struct temac_softc *)arg;
   1251 	int 			s;
   1252 
   1253 	/* We run TEMAC_RXTIMO_HZ times/sec to ensure Rx doesn't stall. */
   1254 	s = splnet();
   1255 	temac_rxreap(sc);
   1256 	splx(s);
   1257 }
   1258 
   1259 static void
   1260 temac_reset(struct temac_softc *sc)
   1261 {
   1262 	uint32_t 		rcr, tcr;
   1263 
   1264 	/* Kill CDMAC channels. */
   1265 	cdmac_tx_reset(sc);
   1266 	cdmac_rx_reset(sc);
   1267 
   1268 	/* Disable receiver. */
   1269 	rcr = gmi_read_4(TEMAC_GMI_RXCF1) & ~GMI_RX_ENABLE;
   1270 	gmi_write_4(TEMAC_GMI_RXCF1, rcr);
   1271 
   1272 	/* Disable transmitter. */
   1273 	tcr = gmi_read_4(TEMAC_GMI_TXCF) & ~GMI_TX_ENABLE;
   1274 	gmi_write_4(TEMAC_GMI_TXCF, tcr);
   1275 }
   1276 
   1277 static void
   1278 temac_rxdrain(struct temac_softc *sc)
   1279 {
   1280 	struct temac_rxsoft 	*rxs;
   1281 	int 			i;
   1282 
   1283 	for (i = 0; i < TEMAC_NRXDESC; i++) {
   1284 		rxs = &sc->sc_rxsoft[i];
   1285 
   1286 		if (rxs->rxs_mbuf != NULL) {
   1287 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
   1288 			m_freem(rxs->rxs_mbuf);
   1289 			rxs->rxs_mbuf = NULL;
   1290 		}
   1291 	}
   1292 
   1293 	sc->sc_rx_drained = 1;
   1294 }
   1295 
   1296 static void
   1297 temac_txkick(struct temac_softc *sc)
   1298 {
   1299 	if (sc->sc_txsoft[sc->sc_txsreap].txs_mbuf != NULL &&
   1300 	    sc->sc_txbusy == 0) {
   1301 		cdmac_tx_start(sc, sc->sc_cdaddr + TEMAC_TXDOFF(sc->sc_txreap));
   1302 		sc->sc_txbusy = 1;
   1303 	}
   1304 }
   1305