Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.12
      1 /*-
      2  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This code is derived from software contributed to The NetBSD Foundation
      6  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * This driver supports the Synopsis Designware GMAC core, as found
     32  * on Allwinner A20 cores and others.
     33  *
     34  * Real documentation seems to not be available, the marketing product
     35  * documents could be found here:
     36  *
     37  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 
     42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.12 2014/10/19 13:04:24 jmcneill Exp $");
     43 
     44 /* #define	DWC_GMAC_DEBUG	1 */
     45 
     46 #include "opt_inet.h"
     47 
     48 #include <sys/param.h>
     49 #include <sys/bus.h>
     50 #include <sys/device.h>
     51 #include <sys/intr.h>
     52 #include <sys/systm.h>
     53 #include <sys/sockio.h>
     54 
     55 #include <net/if.h>
     56 #include <net/if_ether.h>
     57 #include <net/if_media.h>
     58 #include <net/bpf.h>
     59 #ifdef INET
     60 #include <netinet/if_inarp.h>
     61 #endif
     62 
     63 #include <dev/mii/miivar.h>
     64 
     65 #include <dev/ic/dwc_gmac_reg.h>
     66 #include <dev/ic/dwc_gmac_var.h>
     67 
     68 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     69 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     70 static void dwc_gmac_miibus_statchg(struct ifnet *);
     71 
     72 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     73 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     74 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     75 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     76 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     77 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     78 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     79 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     80 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     81 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     82 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     83 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     84 static int dwc_gmac_init(struct ifnet *ifp);
     85 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     86 static void dwc_gmac_start(struct ifnet *ifp);
     87 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     88 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     89 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     90 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
     91 
     92 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
     93 				    *sizeof(struct dwc_gmac_dev_dmadesc))
     94 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
     95 
     96 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
     97 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
     98 
     99 
    100 
    101 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    102 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    103 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    104 
    105 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    106 				GMAC_DMA_INT_FBE|	\
    107 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    108 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    109 				GMAC_DMA_INT_TJE)
    110 
    111 #define	AWIN_DEF_MAC_INTRMASK	\
    112 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    113 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
    114 
    115 
    116 #ifdef DWC_GMAC_DEBUG
    117 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    118 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    119 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    120 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    121 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    122 #endif
    123 
    124 void
    125 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    126 {
    127 	uint8_t enaddr[ETHER_ADDR_LEN];
    128 	uint32_t maclo, machi;
    129 	struct mii_data * const mii = &sc->sc_mii;
    130 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    131 	prop_dictionary_t dict;
    132 
    133 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    134 	sc->sc_mii_clk = mii_clk & 7;
    135 
    136 	dict = device_properties(sc->sc_dev);
    137 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    138 	if (ea != NULL) {
    139 		/*
    140 		 * If the MAC address is overriden by a device property,
    141 		 * use that.
    142 		 */
    143 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    144 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    145 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    146 	} else {
    147 		/*
    148 		 * If we did not get an externaly configure address,
    149 		 * try to read one from the current filter setup,
    150 		 * before resetting the chip.
    151 		 */
    152 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    153 		    AWIN_GMAC_MAC_ADDR0LO);
    154 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    155 		    AWIN_GMAC_MAC_ADDR0HI);
    156 		enaddr[0] = maclo & 0x0ff;
    157 		enaddr[1] = (maclo >> 8) & 0x0ff;
    158 		enaddr[2] = (maclo >> 16) & 0x0ff;
    159 		enaddr[3] = (maclo >> 24) & 0x0ff;
    160 		enaddr[4] = machi & 0x0ff;
    161 		enaddr[5] = (machi >> 8) & 0x0ff;
    162 	}
    163 
    164 	/*
    165 	 * Init chip and do intial setup
    166 	 */
    167 	if (dwc_gmac_reset(sc) != 0)
    168 		return;	/* not much to cleanup, haven't attached yet */
    169 	dwc_gmac_write_hwaddr(sc, enaddr);
    170 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    171 	    ether_sprintf(enaddr));
    172 
    173 	/*
    174 	 * Allocate Tx and Rx rings
    175 	 */
    176 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    177 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    178 		goto fail;
    179 	}
    180 
    181 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    182 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    183 		goto fail;
    184 	}
    185 
    186 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    187 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    188 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    189 		goto fail;
    190 	}
    191 
    192 	/*
    193 	 * Prepare interface data
    194 	 */
    195 	ifp->if_softc = sc;
    196 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    197 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    198 	ifp->if_ioctl = dwc_gmac_ioctl;
    199 	ifp->if_start = dwc_gmac_start;
    200 	ifp->if_init = dwc_gmac_init;
    201 	ifp->if_stop = dwc_gmac_stop;
    202 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    203 	IFQ_SET_READY(&ifp->if_snd);
    204 
    205 	/*
    206 	 * Attach MII subdevices
    207 	 */
    208 	sc->sc_ec.ec_mii = &sc->sc_mii;
    209 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    210         mii->mii_ifp = ifp;
    211         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    212         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    213         mii->mii_statchg = dwc_gmac_miibus_statchg;
    214         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
    215 
    216         if (LIST_EMPTY(&mii->mii_phys)) {
    217                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    218                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    219                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    220         } else {
    221                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    222         }
    223 
    224 	/*
    225 	 * Ready, attach interface
    226 	 */
    227 	if_attach(ifp);
    228 	ether_ifattach(ifp, enaddr);
    229 
    230 	/*
    231 	 * Enable interrupts
    232 	 */
    233 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
    234 	    AWIN_DEF_MAC_INTRMASK);
    235 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    236 	    GMAC_DEF_DMA_INT_MASK);
    237 
    238 	return;
    239 
    240 fail:
    241 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    242 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    243 }
    244 
    245 
    246 
    247 static int
    248 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    249 {
    250 	size_t cnt;
    251 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    252 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    253 	for (cnt = 0; cnt < 3000; cnt++) {
    254 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    255 		    & GMAC_BUSMODE_RESET) == 0)
    256 			return 0;
    257 		delay(10);
    258 	}
    259 
    260 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    261 	return EIO;
    262 }
    263 
    264 static void
    265 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    266     uint8_t enaddr[ETHER_ADDR_LEN])
    267 {
    268 	uint32_t lo, hi;
    269 
    270 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    271 	    | (enaddr[3] << 24);
    272 	hi = enaddr[4] | (enaddr[5] << 8);
    273 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    274 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    275 }
    276 
    277 static int
    278 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    279 {
    280 	struct dwc_gmac_softc * const sc = device_private(self);
    281 	uint16_t mii;
    282 	size_t cnt;
    283 	int rv = 0;
    284 
    285 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    286 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    287 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    288 	    | GMAC_MII_BUSY;
    289 
    290 	mutex_enter(&sc->sc_mdio_lock);
    291 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    292 
    293 	for (cnt = 0; cnt < 1000; cnt++) {
    294 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    295 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    296 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    297 			    AWIN_GMAC_MAC_MIIDATA);
    298 			break;
    299 		}
    300 		delay(10);
    301 	}
    302 
    303 	mutex_exit(&sc->sc_mdio_lock);
    304 
    305 	return rv;
    306 }
    307 
    308 static void
    309 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    310 {
    311 	struct dwc_gmac_softc * const sc = device_private(self);
    312 	uint16_t mii;
    313 	size_t cnt;
    314 
    315 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    316 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    317 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    318 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    319 
    320 	mutex_enter(&sc->sc_mdio_lock);
    321 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    322 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    323 
    324 	for (cnt = 0; cnt < 1000; cnt++) {
    325 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    326 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    327 			break;
    328 		delay(10);
    329 	}
    330 
    331 	mutex_exit(&sc->sc_mdio_lock);
    332 }
    333 
    334 static int
    335 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    336 	struct dwc_gmac_rx_ring *ring)
    337 {
    338 	struct dwc_gmac_rx_data *data;
    339 	bus_addr_t physaddr;
    340 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    341 	int error, i, next;
    342 
    343 	ring->r_cur = ring->r_next = 0;
    344 	memset(ring->r_desc, 0, descsize);
    345 
    346 	/*
    347 	 * Pre-allocate Rx buffers and populate Rx ring.
    348 	 */
    349 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    350 		struct dwc_gmac_dev_dmadesc *desc;
    351 
    352 		data = &sc->sc_rxq.r_data[i];
    353 
    354 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    355 		if (data->rd_m == NULL) {
    356 			aprint_error_dev(sc->sc_dev,
    357 			    "could not allocate rx mbuf #%d\n", i);
    358 			error = ENOMEM;
    359 			goto fail;
    360 		}
    361 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    362 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    363 		if (error != 0) {
    364 			aprint_error_dev(sc->sc_dev,
    365 			    "could not create DMA map\n");
    366 			data->rd_map = NULL;
    367 			goto fail;
    368 		}
    369 		MCLGET(data->rd_m, M_DONTWAIT);
    370 		if (!(data->rd_m->m_flags & M_EXT)) {
    371 			aprint_error_dev(sc->sc_dev,
    372 			    "could not allocate mbuf cluster #%d\n", i);
    373 			error = ENOMEM;
    374 			goto fail;
    375 		}
    376 
    377 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    378 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    379 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    380 		if (error != 0) {
    381 			aprint_error_dev(sc->sc_dev,
    382 			    "could not load rx buf DMA map #%d", i);
    383 			goto fail;
    384 		}
    385 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    386 
    387 		desc = &sc->sc_rxq.r_desc[i];
    388 		desc->ddesc_data = htole32(physaddr);
    389 		next = RX_NEXT(i);
    390 		desc->ddesc_next = htole32(ring->r_physaddr
    391 		    + next * sizeof(*desc));
    392 		desc->ddesc_cntl = htole32(
    393 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    394 		    DDESC_CNTL_RXCHAIN | DDESC_CNTL_RXINT);
    395 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    396 	}
    397 
    398 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    399 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    400 	    BUS_DMASYNC_PREREAD);
    401 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    402 	    ring->r_physaddr);
    403 
    404 	return 0;
    405 
    406 fail:
    407 	dwc_gmac_free_rx_ring(sc, ring);
    408 	return error;
    409 }
    410 
    411 static void
    412 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    413 	struct dwc_gmac_rx_ring *ring)
    414 {
    415 	struct dwc_gmac_dev_dmadesc *desc;
    416 	int i;
    417 
    418 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    419 		desc = &sc->sc_rxq.r_desc[i];
    420 		desc->ddesc_cntl = htole32(
    421 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
    422 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    423 	}
    424 
    425 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    426 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    427 	    BUS_DMASYNC_PREWRITE);
    428 
    429 	ring->r_cur = ring->r_next = 0;
    430 	/* reset DMA address to start of ring */
    431 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    432 	    sc->sc_rxq.r_physaddr);
    433 }
    434 
    435 static int
    436 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    437 {
    438 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    439 		sizeof(struct dwc_gmac_dev_dmadesc);
    440 	int error, nsegs;
    441 	void *rings;
    442 
    443 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    444 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    445 	if (error != 0) {
    446 		aprint_error_dev(sc->sc_dev,
    447 		    "could not create desc DMA map\n");
    448 		sc->sc_dma_ring_map = NULL;
    449 		goto fail;
    450 	}
    451 
    452 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    453 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    454 	if (error != 0) {
    455 		aprint_error_dev(sc->sc_dev,
    456 		    "could not map DMA memory\n");
    457 		goto fail;
    458 	}
    459 
    460 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    461 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    462 	if (error != 0) {
    463 		aprint_error_dev(sc->sc_dev,
    464 		    "could not allocate DMA memory\n");
    465 		goto fail;
    466 	}
    467 
    468 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    469 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    470 	if (error != 0) {
    471 		aprint_error_dev(sc->sc_dev,
    472 		    "could not load desc DMA map\n");
    473 		goto fail;
    474 	}
    475 
    476 	/* give first AWGE_RX_RING_COUNT to the RX side */
    477 	sc->sc_rxq.r_desc = rings;
    478 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    479 
    480 	/* and next rings to the TX side */
    481 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    482 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    483 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    484 
    485 	return 0;
    486 
    487 fail:
    488 	dwc_gmac_free_dma_rings(sc);
    489 	return error;
    490 }
    491 
    492 static void
    493 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    494 {
    495 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    496 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    497 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    498 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    499 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    500 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    501 }
    502 
    503 static void
    504 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    505 {
    506 	struct dwc_gmac_rx_data *data;
    507 	int i;
    508 
    509 	if (ring->r_desc == NULL)
    510 		return;
    511 
    512 
    513 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    514 		data = &ring->r_data[i];
    515 
    516 		if (data->rd_map != NULL) {
    517 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    518 			    AWGE_RX_RING_COUNT
    519 				*sizeof(struct dwc_gmac_dev_dmadesc),
    520 			    BUS_DMASYNC_POSTREAD);
    521 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    522 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    523 		}
    524 		if (data->rd_m != NULL)
    525 			m_freem(data->rd_m);
    526 	}
    527 }
    528 
    529 static int
    530 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    531 	struct dwc_gmac_tx_ring *ring)
    532 {
    533 	int i, error = 0;
    534 
    535 	ring->t_queued = 0;
    536 	ring->t_cur = ring->t_next = 0;
    537 
    538 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    539 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    540 	    TX_DESC_OFFSET(0),
    541 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    542 	    BUS_DMASYNC_POSTWRITE);
    543 
    544 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    545 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    546 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    547 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    548 		    &ring->t_data[i].td_map);
    549 		if (error != 0) {
    550 			aprint_error_dev(sc->sc_dev,
    551 			    "could not create TX DMA map #%d\n", i);
    552 			ring->t_data[i].td_map = NULL;
    553 			goto fail;
    554 		}
    555 		ring->t_desc[i].ddesc_next = htole32(
    556 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    557 		    *TX_NEXT(i));
    558 	}
    559 
    560 	return 0;
    561 
    562 fail:
    563 	dwc_gmac_free_tx_ring(sc, ring);
    564 	return error;
    565 }
    566 
    567 static void
    568 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    569 {
    570 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    571 	if (end > start) {
    572 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    573 		    TX_DESC_OFFSET(start),
    574 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    575 		    ops);
    576 		return;
    577 	}
    578 	/* sync from 'start' to end of ring */
    579 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    580 	    TX_DESC_OFFSET(start),
    581 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
    582 	    ops);
    583 	/* sync from start of ring to 'end' */
    584 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    585 	    TX_DESC_OFFSET(0),
    586 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    587 	    ops);
    588 }
    589 
    590 static void
    591 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    592 	struct dwc_gmac_tx_ring *ring)
    593 {
    594 	int i;
    595 
    596 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    597 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    598 
    599 		if (data->td_m != NULL) {
    600 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    601 			    0, data->td_active->dm_mapsize,
    602 			    BUS_DMASYNC_POSTWRITE);
    603 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    604 			m_freem(data->td_m);
    605 			data->td_m = NULL;
    606 		}
    607 	}
    608 
    609 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    610 	    TX_DESC_OFFSET(0),
    611 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    612 	    BUS_DMASYNC_PREWRITE);
    613 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    614 	    sc->sc_txq.t_physaddr);
    615 
    616 	ring->t_queued = 0;
    617 	ring->t_cur = ring->t_next = 0;
    618 }
    619 
    620 static void
    621 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    622 	struct dwc_gmac_tx_ring *ring)
    623 {
    624 	int i;
    625 
    626 	/* unload the maps */
    627 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    628 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    629 
    630 		if (data->td_m != NULL) {
    631 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    632 			    0, data->td_map->dm_mapsize,
    633 			    BUS_DMASYNC_POSTWRITE);
    634 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    635 			m_freem(data->td_m);
    636 			data->td_m = NULL;
    637 		}
    638 	}
    639 
    640 	/* and actually free them */
    641 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    642 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    643 
    644 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    645 	}
    646 }
    647 
    648 static void
    649 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    650 {
    651 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    652 	struct mii_data * const mii = &sc->sc_mii;
    653 	uint32_t conf;
    654 
    655 	/*
    656 	 * Set MII or GMII interface based on the speed
    657 	 * negotiated by the PHY.
    658 	 */
    659 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    660 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    661 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    662 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    663 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    664 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    665 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    666 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    667 	case IFM_10_T:
    668 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    669 		break;
    670 	case IFM_100_TX:
    671 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    672 			AWIN_GMAC_MAC_CONF_MIISEL;
    673 		break;
    674 	case IFM_1000_T:
    675 		break;
    676 	}
    677 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
    678 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    679 
    680 #ifdef DWC_GMAC_DEBUG
    681 	aprint_normal_dev(sc->sc_dev,
    682 	    "setting MAC conf register: %08x\n", conf);
    683 #endif
    684 
    685 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    686 	    AWIN_GMAC_MAC_CONF, conf);
    687 }
    688 
    689 static int
    690 dwc_gmac_init(struct ifnet *ifp)
    691 {
    692 	struct dwc_gmac_softc *sc = ifp->if_softc;
    693 
    694 	if (ifp->if_flags & IFF_RUNNING)
    695 		return 0;
    696 
    697 	dwc_gmac_stop(ifp, 0);
    698 
    699 	/*
    700 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    701 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    702 	 */
    703 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    704 	    GMAC_BUSMODE_FIXEDBURST |
    705 	    __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
    706 	    __SHIFTIN(8, GMCA_BUSMODE_PBL));
    707 
    708 	/*
    709 	 * Set up address filter (XXX for testing only: promiscous)
    710 	 */
    711 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
    712 	    AWIN_GMAC_MAC_FFILT_PR);
    713 
    714 	/*
    715 	 * Set up dma pointer for RX and TX ring
    716 	 */
    717 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    718 	    sc->sc_rxq.r_physaddr);
    719 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    720 	    sc->sc_txq.t_physaddr);
    721 
    722 	/*
    723 	 * Start RX/TX part
    724 	 */
    725 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    726 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
    727 	    GMAC_DMA_OP_STOREFORWARD);
    728 
    729 	ifp->if_flags |= IFF_RUNNING;
    730 	ifp->if_flags &= ~IFF_OACTIVE;
    731 
    732 	return 0;
    733 }
    734 
    735 static void
    736 dwc_gmac_start(struct ifnet *ifp)
    737 {
    738 	struct dwc_gmac_softc *sc = ifp->if_softc;
    739 	int old = sc->sc_txq.t_queued;
    740 	struct mbuf *m0;
    741 
    742 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    743 		return;
    744 
    745 	for (;;) {
    746 		IFQ_POLL(&ifp->if_snd, m0);
    747 		if (m0 == NULL)
    748 			break;
    749 		if (dwc_gmac_queue(sc, m0) != 0) {
    750 			ifp->if_flags |= IFF_OACTIVE;
    751 			break;
    752 		}
    753 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    754 		bpf_mtap(ifp, m0);
    755 	}
    756 
    757 	if (sc->sc_txq.t_queued != old) {
    758 		/* packets have been queued, kick it off */
    759 		dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
    760 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    761 
    762 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    763 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    764 #ifdef DWC_GMAC_DEBUG
    765 		dwc_dump_status(sc);
    766 #endif
    767 	}
    768 }
    769 
    770 static void
    771 dwc_gmac_stop(struct ifnet *ifp, int disable)
    772 {
    773 	struct dwc_gmac_softc *sc = ifp->if_softc;
    774 
    775 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    776 	    AWIN_GMAC_DMA_OPMODE,
    777 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    778 	        AWIN_GMAC_DMA_OPMODE)
    779 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    780 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    781 	    AWIN_GMAC_DMA_OPMODE,
    782 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    783 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    784 
    785 	mii_down(&sc->sc_mii);
    786 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    787 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    788 }
    789 
    790 /*
    791  * Add m0 to the TX ring
    792  */
    793 static int
    794 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    795 {
    796 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    797 	struct dwc_gmac_tx_data *data = NULL;
    798 	bus_dmamap_t map;
    799 	uint32_t flags, len;
    800 	int error, i, first;
    801 
    802 #ifdef DWC_GMAC_DEBUG
    803 	aprint_normal_dev(sc->sc_dev,
    804 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
    805 #endif
    806 
    807 	first = sc->sc_txq.t_cur;
    808 	map = sc->sc_txq.t_data[first].td_map;
    809 	flags = 0;
    810 
    811 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    812 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    813 	if (error != 0) {
    814 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    815 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    816 		return error;
    817 	}
    818 
    819 	if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
    820 		bus_dmamap_unload(sc->sc_dmat, map);
    821 		return ENOBUFS;
    822 	}
    823 
    824 	data = NULL;
    825 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
    826 	for (i = 0; i < map->dm_nsegs; i++) {
    827 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    828 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    829 
    830 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    831 		len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
    832 		if (i == map->dm_nsegs-1)
    833 			flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
    834 
    835 #ifdef DWC_GMAC_DEBUG
    836 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
    837 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
    838 		    (unsigned long)map->dm_segs[i].ds_addr,
    839 		    (unsigned long)map->dm_segs[i].ds_len,
    840 		    flags, len);
    841 #endif
    842 
    843 		desc->ddesc_cntl = htole32(len|flags);
    844 		flags &= ~DDESC_CNTL_TXFIRST;
    845 
    846 		/*
    847 		 * Defer passing ownership of the first descriptor
    848 		 * untill we are done.
    849 		 */
    850 		if (i)
    851 			desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    852 
    853 		sc->sc_txq.t_queued++;
    854 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
    855 	}
    856 
    857 	/* Pass first to device */
    858 	sc->sc_txq.t_desc[first].ddesc_status
    859 	    = htole32(DDESC_STATUS_OWNEDBYDEV);
    860 
    861 	data->td_m = m0;
    862 	data->td_active = map;
    863 
    864 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    865 	    BUS_DMASYNC_PREWRITE);
    866 
    867 	return 0;
    868 }
    869 
    870 static int
    871 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    872 {
    873 	struct ifaddr *ifa = (struct ifaddr *)data;
    874 	int s, error = 0;
    875 
    876 	s = splnet();
    877 
    878 	switch (cmd) {
    879 	case SIOCINITIFADDR:
    880 		ifp->if_flags |= IFF_UP;
    881 		dwc_gmac_init(ifp);
    882 		switch (ifa->ifa_addr->sa_family) {
    883 #ifdef INET
    884 		case AF_INET:
    885 			arp_ifinit(ifp, ifa);
    886 			break;
    887 #endif
    888 		default:
    889 			break;
    890 		}
    891 		break;
    892 
    893 	case SIOCSIFFLAGS:
    894 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    895 			break;
    896 
    897 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
    898 		case IFF_RUNNING:
    899 			/*
    900 			 * If interface is marked down and it is running, then
    901 			 * stop it.
    902 			 */
    903 			dwc_gmac_stop(ifp, 0);
    904 			ifp->if_flags &= ~IFF_RUNNING;
    905 			break;
    906 		case IFF_UP:
    907 			/*
    908 			 * If interface is marked up and it is stopped, then
    909 			 * start it.
    910 			 */
    911 			error = dwc_gmac_init(ifp);
    912 			break;
    913 		case IFF_UP|IFF_RUNNING:
    914 			/*
    915 			 * If setting debug or promiscuous mode, do not reset
    916 			 * the chip; for everything else, call dwc_gmac_init()
    917 			 * which will trigger a reset.
    918 			 */
    919 			/* XXX - for now allways init */
    920 			error = dwc_gmac_init(ifp);
    921 			break;
    922 		case 0:
    923 			break;
    924 		}
    925 
    926 		break;
    927 
    928 	default:
    929 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
    930 			break;
    931 		error = 0;
    932 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
    933 			;
    934 		else if (ifp->if_flags & IFF_RUNNING)
    935 			/* setmulti */;
    936 		break;
    937 	}
    938 
    939 	splx(s);
    940 
    941 	return error;
    942 }
    943 
    944 static void
    945 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
    946 {
    947 	struct dwc_gmac_tx_data *data;
    948 	struct dwc_gmac_dev_dmadesc *desc;
    949 	uint32_t flags;
    950 	int i;
    951 
    952 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
    953 	    i = TX_NEXT(i), sc->sc_txq.t_queued--) {
    954 
    955 #ifdef DWC_GMAC_DEBUG
    956 		aprint_normal_dev(sc->sc_dev,
    957 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
    958 		    i, sc->sc_txq.t_queued);
    959 #endif
    960 
    961 		desc = &sc->sc_txq.t_desc[i];
    962 		dwc_gmac_txdesc_sync(sc, i, i+1,
    963 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    964 		flags = le32toh(desc->ddesc_status);
    965 
    966 		if (flags & DDESC_STATUS_OWNEDBYDEV)
    967 			break;
    968 
    969 		data = &sc->sc_txq.t_data[i];
    970 		if (data->td_m == NULL)
    971 			continue;
    972 		sc->sc_ec.ec_if.if_opackets++;
    973 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
    974 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    975 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
    976 
    977 #ifdef DWC_GMAC_DEBUG
    978 		aprint_normal_dev(sc->sc_dev,
    979 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
    980 		    "freeing mbuf %p\n", i, data->td_m);
    981 #endif
    982 
    983 		m_freem(data->td_m);
    984 		data->td_m = NULL;
    985 	}
    986 
    987 	sc->sc_txq.t_next = i;
    988 
    989 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
    990 		sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
    991 	}
    992 }
    993 
    994 static void
    995 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
    996 {
    997 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    998 	struct dwc_gmac_dev_dmadesc *desc;
    999 	struct dwc_gmac_rx_data *data;
   1000 	bus_addr_t physaddr;
   1001 	uint32_t status;
   1002 	struct mbuf *m, *mnew;
   1003 	int i, len, error;
   1004 
   1005 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1006 
   1007 #ifdef DWC_GMAC_DEBUG
   1008 printf("rx int: checking desc #%d\n", i);
   1009 #endif
   1010 
   1011 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1012 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1013 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1014 		desc = &sc->sc_rxq.r_desc[i];
   1015 		data = &sc->sc_rxq.r_data[i];
   1016 
   1017 		status = le32toh(desc->ddesc_status);
   1018 		if (status & DDESC_STATUS_OWNEDBYDEV) {
   1019 #ifdef DWC_GMAC_DEBUG
   1020 printf("status %08x, still owned by device\n", status);
   1021 #endif
   1022 			break;
   1023 		}
   1024 
   1025 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
   1026 #ifdef DWC_GMAC_DEBUG
   1027 printf("status %08x, RX error, skipping\n", status);
   1028 #endif
   1029 			ifp->if_ierrors++;
   1030 			goto skip;
   1031 		}
   1032 
   1033 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
   1034 
   1035 #ifdef DWC_GMAC_DEBUG
   1036 printf("rx int: device is done with #%d, len: %d\n", i, len);
   1037 #endif
   1038 
   1039 		/*
   1040 		 * Try to get a new mbuf before passing this one
   1041 		 * up, if that fails, drop the packet and reuse
   1042 		 * the existing one.
   1043 		 */
   1044 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1045 		if (mnew == NULL) {
   1046 			ifp->if_ierrors++;
   1047 			goto skip;
   1048 		}
   1049 		MCLGET(mnew, M_DONTWAIT);
   1050 		if ((mnew->m_flags & M_EXT) == 0) {
   1051 			m_freem(mnew);
   1052 			ifp->if_ierrors++;
   1053 			goto skip;
   1054 		}
   1055 
   1056 		/* unload old DMA map */
   1057 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1058 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1059 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1060 
   1061 		/* and reload with new mbuf */
   1062 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1063 		    mtod(mnew, void*), MCLBYTES, NULL,
   1064 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1065 		if (error != 0) {
   1066 			m_freem(mnew);
   1067 			/* try to reload old mbuf */
   1068 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1069 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1070 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1071 			if (error != 0) {
   1072 				panic("%s: could not load old rx mbuf",
   1073 				    device_xname(sc->sc_dev));
   1074 			}
   1075 			ifp->if_ierrors++;
   1076 			goto skip;
   1077 		}
   1078 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1079 
   1080 		/*
   1081 		 * New mbuf loaded, update RX ring and continue
   1082 		 */
   1083 		m = data->rd_m;
   1084 		data->rd_m = mnew;
   1085 		desc->ddesc_data = htole32(physaddr);
   1086 
   1087 		/* finalize mbuf */
   1088 		m->m_pkthdr.len = m->m_len = len;
   1089 		m->m_pkthdr.rcvif = ifp;
   1090 
   1091 		bpf_mtap(ifp, m);
   1092 		ifp->if_ipackets++;
   1093 		(*ifp->if_input)(ifp, m);
   1094 
   1095 skip:
   1096 		desc->ddesc_cntl = htole32(
   1097 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
   1098 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
   1099 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1100 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1101 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1102 	}
   1103 
   1104 	/* update RX pointer */
   1105 	sc->sc_rxq.r_cur = i;
   1106 
   1107 }
   1108 
   1109 int
   1110 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1111 {
   1112 	uint32_t status, dma_status;
   1113 	int rv = 0;
   1114 
   1115 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1116 	if (status & AWIN_GMAC_MII_IRQ) {
   1117 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1118 		    AWIN_GMAC_MII_STATUS);
   1119 		rv = 1;
   1120 		mii_pollstat(&sc->sc_mii);
   1121 	}
   1122 
   1123 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1124 	    AWIN_GMAC_DMA_STATUS);
   1125 
   1126 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1127 		rv = 1;
   1128 
   1129 	if (dma_status & GMAC_DMA_INT_TIE)
   1130 		dwc_gmac_tx_intr(sc);
   1131 
   1132 	if (dma_status & GMAC_DMA_INT_RIE)
   1133 		dwc_gmac_rx_intr(sc);
   1134 
   1135 	/*
   1136 	 * Check error conditions
   1137 	 */
   1138 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1139 		sc->sc_ec.ec_if.if_oerrors++;
   1140 #ifdef DWC_GMAC_DEBUG
   1141 		dwc_dump_and_abort(sc, "interrupt error condition");
   1142 #endif
   1143 	}
   1144 
   1145 	/* ack interrupt */
   1146 	if (dma_status)
   1147 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1148 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1149 
   1150 	return rv;
   1151 }
   1152 
   1153 #ifdef DWC_GMAC_DEBUG
   1154 static void
   1155 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1156 {
   1157 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1158 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1159 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1160 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1161 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1162 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1163 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1164 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1165 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1166 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1167 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1168 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1169 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1170 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1171 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1172 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1173 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1174 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1175 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1176 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1177 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1178 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1179 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1180 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1181 }
   1182 
   1183 static void
   1184 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1185 {
   1186 	int i;
   1187 
   1188 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1189 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1190 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1191 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1192 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1193 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
   1194 		    i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
   1195 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1196 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1197 	}
   1198 }
   1199 
   1200 static void
   1201 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1202 {
   1203 	int i;
   1204 
   1205 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1206 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1207 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1208 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1209 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1210 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
   1211 		    i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
   1212 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1213 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1214 	}
   1215 }
   1216 
   1217 static void
   1218 dwc_dump_status(struct dwc_gmac_softc *sc)
   1219 {
   1220 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1221 	     AWIN_GMAC_MAC_INTR);
   1222 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1223 	     AWIN_GMAC_DMA_STATUS);
   1224 	char buf[200];
   1225 
   1226 	/* print interrupt state */
   1227 	snprintb(buf, sizeof(buf), "\177\20"
   1228 	    "b\x10""NI\0"
   1229 	    "b\x0f""AI\0"
   1230 	    "b\x0e""ER\0"
   1231 	    "b\x0d""FB\0"
   1232 	    "b\x0a""ET\0"
   1233 	    "b\x09""RW\0"
   1234 	    "b\x08""RS\0"
   1235 	    "b\x07""RU\0"
   1236 	    "b\x06""RI\0"
   1237 	    "b\x05""UN\0"
   1238 	    "b\x04""OV\0"
   1239 	    "b\x03""TJ\0"
   1240 	    "b\x02""TU\0"
   1241 	    "b\x01""TS\0"
   1242 	    "b\x00""TI\0"
   1243 	    "\0", dma_status);
   1244 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1245 	    status, buf);
   1246 }
   1247 
   1248 static void
   1249 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1250 {
   1251 	dwc_dump_status(sc);
   1252 	dwc_gmac_dump_dma(sc);
   1253 	dwc_gmac_dump_tx_desc(sc);
   1254 	dwc_gmac_dump_rx_desc(sc);
   1255 
   1256 	panic(msg);
   1257 }
   1258 #endif
   1259