Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.21
      1 /* $NetBSD: dwc_gmac.c,v 1.21 2014/10/25 18:00:25 joerg Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.21 2014/10/25 18:00:25 joerg Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #include "opt_inet.h"
     49 
     50 #include <sys/param.h>
     51 #include <sys/bus.h>
     52 #include <sys/device.h>
     53 #include <sys/intr.h>
     54 #include <sys/systm.h>
     55 #include <sys/sockio.h>
     56 
     57 #include <net/if.h>
     58 #include <net/if_ether.h>
     59 #include <net/if_media.h>
     60 #include <net/bpf.h>
     61 #ifdef INET
     62 #include <netinet/if_inarp.h>
     63 #endif
     64 
     65 #include <dev/mii/miivar.h>
     66 
     67 #include <dev/ic/dwc_gmac_reg.h>
     68 #include <dev/ic/dwc_gmac_var.h>
     69 
     70 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     71 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     72 static void dwc_gmac_miibus_statchg(struct ifnet *);
     73 
     74 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     75 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     76 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     77 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     78 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     79 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     80 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     81 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     82 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     83 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     84 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     85 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     86 static int dwc_gmac_init(struct ifnet *ifp);
     87 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     88 static void dwc_gmac_start(struct ifnet *ifp);
     89 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     90 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     91 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     92 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
     93 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
     94 
     95 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
     96 				    *sizeof(struct dwc_gmac_dev_dmadesc))
     97 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
     98 
     99 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
    100 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
    101 
    102 
    103 
    104 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    105 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    106 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    107 
    108 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    109 				GMAC_DMA_INT_FBE|	\
    110 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    111 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    112 				GMAC_DMA_INT_TJE)
    113 
    114 #define	AWIN_DEF_MAC_INTRMASK	\
    115 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    116 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
    117 
    118 
    119 #ifdef DWC_GMAC_DEBUG
    120 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    121 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    122 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    123 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    124 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    125 #endif
    126 
    127 void
    128 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    129 {
    130 	uint8_t enaddr[ETHER_ADDR_LEN];
    131 	uint32_t maclo, machi;
    132 	struct mii_data * const mii = &sc->sc_mii;
    133 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    134 	prop_dictionary_t dict;
    135 	int s;
    136 
    137 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    138 	sc->sc_mii_clk = mii_clk & 7;
    139 
    140 	dict = device_properties(sc->sc_dev);
    141 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    142 	if (ea != NULL) {
    143 		/*
    144 		 * If the MAC address is overriden by a device property,
    145 		 * use that.
    146 		 */
    147 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    148 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    149 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    150 	} else {
    151 		/*
    152 		 * If we did not get an externaly configure address,
    153 		 * try to read one from the current filter setup,
    154 		 * before resetting the chip.
    155 		 */
    156 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    157 		    AWIN_GMAC_MAC_ADDR0LO);
    158 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    159 		    AWIN_GMAC_MAC_ADDR0HI);
    160 
    161 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    162 			aprint_error_dev(sc->sc_dev,
    163 			    "couldn't read MAC address\n");
    164 			return;
    165 		}
    166 
    167 		enaddr[0] = maclo & 0x0ff;
    168 		enaddr[1] = (maclo >> 8) & 0x0ff;
    169 		enaddr[2] = (maclo >> 16) & 0x0ff;
    170 		enaddr[3] = (maclo >> 24) & 0x0ff;
    171 		enaddr[4] = machi & 0x0ff;
    172 		enaddr[5] = (machi >> 8) & 0x0ff;
    173 	}
    174 
    175 	/*
    176 	 * Init chip and do initial setup
    177 	 */
    178 	if (dwc_gmac_reset(sc) != 0)
    179 		return;	/* not much to cleanup, haven't attached yet */
    180 	dwc_gmac_write_hwaddr(sc, enaddr);
    181 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    182 	    ether_sprintf(enaddr));
    183 
    184 	/*
    185 	 * Allocate Tx and Rx rings
    186 	 */
    187 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    188 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    189 		goto fail;
    190 	}
    191 
    192 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    193 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    194 		goto fail;
    195 	}
    196 
    197 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    198 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    199 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    200 		goto fail;
    201 	}
    202 
    203 	/*
    204 	 * Prepare interface data
    205 	 */
    206 	ifp->if_softc = sc;
    207 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    208 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    209 	ifp->if_ioctl = dwc_gmac_ioctl;
    210 	ifp->if_start = dwc_gmac_start;
    211 	ifp->if_init = dwc_gmac_init;
    212 	ifp->if_stop = dwc_gmac_stop;
    213 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    214 	IFQ_SET_READY(&ifp->if_snd);
    215 
    216 	/*
    217 	 * Attach MII subdevices
    218 	 */
    219 	sc->sc_ec.ec_mii = &sc->sc_mii;
    220 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    221         mii->mii_ifp = ifp;
    222         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    223         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    224         mii->mii_statchg = dwc_gmac_miibus_statchg;
    225         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
    226 
    227         if (LIST_EMPTY(&mii->mii_phys)) {
    228                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    229                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    230                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    231         } else {
    232                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    233         }
    234 
    235 	/*
    236 	 * Ready, attach interface
    237 	 */
    238 	if_attach(ifp);
    239 	ether_ifattach(ifp, enaddr);
    240 
    241 	/*
    242 	 * Enable interrupts
    243 	 */
    244 	s = splnet();
    245 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
    246 	    AWIN_DEF_MAC_INTRMASK);
    247 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    248 	    GMAC_DEF_DMA_INT_MASK);
    249 	splx(s);
    250 
    251 	return;
    252 
    253 fail:
    254 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    255 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    256 }
    257 
    258 
    259 
    260 static int
    261 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    262 {
    263 	size_t cnt;
    264 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    265 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    266 	for (cnt = 0; cnt < 3000; cnt++) {
    267 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    268 		    & GMAC_BUSMODE_RESET) == 0)
    269 			return 0;
    270 		delay(10);
    271 	}
    272 
    273 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    274 	return EIO;
    275 }
    276 
    277 static void
    278 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    279     uint8_t enaddr[ETHER_ADDR_LEN])
    280 {
    281 	uint32_t lo, hi;
    282 
    283 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    284 	    | (enaddr[3] << 24);
    285 	hi = enaddr[4] | (enaddr[5] << 8);
    286 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    287 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    288 }
    289 
    290 static int
    291 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    292 {
    293 	struct dwc_gmac_softc * const sc = device_private(self);
    294 	uint16_t mii;
    295 	size_t cnt;
    296 	int rv = 0;
    297 
    298 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    299 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    300 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    301 	    | GMAC_MII_BUSY;
    302 
    303 	mutex_enter(&sc->sc_mdio_lock);
    304 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    305 
    306 	for (cnt = 0; cnt < 1000; cnt++) {
    307 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    308 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    309 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    310 			    AWIN_GMAC_MAC_MIIDATA);
    311 			break;
    312 		}
    313 		delay(10);
    314 	}
    315 
    316 	mutex_exit(&sc->sc_mdio_lock);
    317 
    318 	return rv;
    319 }
    320 
    321 static void
    322 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    323 {
    324 	struct dwc_gmac_softc * const sc = device_private(self);
    325 	uint16_t mii;
    326 	size_t cnt;
    327 
    328 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    329 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    330 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    331 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    332 
    333 	mutex_enter(&sc->sc_mdio_lock);
    334 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    335 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    336 
    337 	for (cnt = 0; cnt < 1000; cnt++) {
    338 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    339 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    340 			break;
    341 		delay(10);
    342 	}
    343 
    344 	mutex_exit(&sc->sc_mdio_lock);
    345 }
    346 
    347 static int
    348 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    349 	struct dwc_gmac_rx_ring *ring)
    350 {
    351 	struct dwc_gmac_rx_data *data;
    352 	bus_addr_t physaddr;
    353 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    354 	int error, i, next;
    355 
    356 	ring->r_cur = ring->r_next = 0;
    357 	memset(ring->r_desc, 0, descsize);
    358 
    359 	/*
    360 	 * Pre-allocate Rx buffers and populate Rx ring.
    361 	 */
    362 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    363 		struct dwc_gmac_dev_dmadesc *desc;
    364 
    365 		data = &sc->sc_rxq.r_data[i];
    366 
    367 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    368 		if (data->rd_m == NULL) {
    369 			aprint_error_dev(sc->sc_dev,
    370 			    "could not allocate rx mbuf #%d\n", i);
    371 			error = ENOMEM;
    372 			goto fail;
    373 		}
    374 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    375 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    376 		if (error != 0) {
    377 			aprint_error_dev(sc->sc_dev,
    378 			    "could not create DMA map\n");
    379 			data->rd_map = NULL;
    380 			goto fail;
    381 		}
    382 		MCLGET(data->rd_m, M_DONTWAIT);
    383 		if (!(data->rd_m->m_flags & M_EXT)) {
    384 			aprint_error_dev(sc->sc_dev,
    385 			    "could not allocate mbuf cluster #%d\n", i);
    386 			error = ENOMEM;
    387 			goto fail;
    388 		}
    389 
    390 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    391 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    392 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    393 		if (error != 0) {
    394 			aprint_error_dev(sc->sc_dev,
    395 			    "could not load rx buf DMA map #%d", i);
    396 			goto fail;
    397 		}
    398 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    399 
    400 		desc = &sc->sc_rxq.r_desc[i];
    401 		desc->ddesc_data = htole32(physaddr);
    402 		next = RX_NEXT(i);
    403 		desc->ddesc_next = htole32(ring->r_physaddr
    404 		    + next * sizeof(*desc));
    405 		desc->ddesc_cntl = htole32(
    406 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    407 		    DDESC_CNTL_RXCHAIN);
    408 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    409 	}
    410 
    411 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    412 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    413 	    BUS_DMASYNC_PREREAD);
    414 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    415 	    ring->r_physaddr);
    416 
    417 	return 0;
    418 
    419 fail:
    420 	dwc_gmac_free_rx_ring(sc, ring);
    421 	return error;
    422 }
    423 
    424 static void
    425 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    426 	struct dwc_gmac_rx_ring *ring)
    427 {
    428 	struct dwc_gmac_dev_dmadesc *desc;
    429 	int i;
    430 
    431 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    432 		desc = &sc->sc_rxq.r_desc[i];
    433 		desc->ddesc_cntl = htole32(
    434 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    435 		    DDESC_CNTL_RXCHAIN);
    436 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    437 	}
    438 
    439 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    440 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    441 	    BUS_DMASYNC_PREWRITE);
    442 
    443 	ring->r_cur = ring->r_next = 0;
    444 	/* reset DMA address to start of ring */
    445 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    446 	    sc->sc_rxq.r_physaddr);
    447 }
    448 
    449 static int
    450 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    451 {
    452 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    453 		sizeof(struct dwc_gmac_dev_dmadesc);
    454 	int error, nsegs;
    455 	void *rings;
    456 
    457 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    458 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    459 	if (error != 0) {
    460 		aprint_error_dev(sc->sc_dev,
    461 		    "could not create desc DMA map\n");
    462 		sc->sc_dma_ring_map = NULL;
    463 		goto fail;
    464 	}
    465 
    466 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    467 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    468 	if (error != 0) {
    469 		aprint_error_dev(sc->sc_dev,
    470 		    "could not map DMA memory\n");
    471 		goto fail;
    472 	}
    473 
    474 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    475 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    476 	if (error != 0) {
    477 		aprint_error_dev(sc->sc_dev,
    478 		    "could not allocate DMA memory\n");
    479 		goto fail;
    480 	}
    481 
    482 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    483 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    484 	if (error != 0) {
    485 		aprint_error_dev(sc->sc_dev,
    486 		    "could not load desc DMA map\n");
    487 		goto fail;
    488 	}
    489 
    490 	/* give first AWGE_RX_RING_COUNT to the RX side */
    491 	sc->sc_rxq.r_desc = rings;
    492 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    493 
    494 	/* and next rings to the TX side */
    495 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    496 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    497 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    498 
    499 	return 0;
    500 
    501 fail:
    502 	dwc_gmac_free_dma_rings(sc);
    503 	return error;
    504 }
    505 
    506 static void
    507 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    508 {
    509 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    510 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    511 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    512 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    513 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    514 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    515 }
    516 
    517 static void
    518 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    519 {
    520 	struct dwc_gmac_rx_data *data;
    521 	int i;
    522 
    523 	if (ring->r_desc == NULL)
    524 		return;
    525 
    526 
    527 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    528 		data = &ring->r_data[i];
    529 
    530 		if (data->rd_map != NULL) {
    531 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    532 			    AWGE_RX_RING_COUNT
    533 				*sizeof(struct dwc_gmac_dev_dmadesc),
    534 			    BUS_DMASYNC_POSTREAD);
    535 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    536 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    537 		}
    538 		if (data->rd_m != NULL)
    539 			m_freem(data->rd_m);
    540 	}
    541 }
    542 
    543 static int
    544 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    545 	struct dwc_gmac_tx_ring *ring)
    546 {
    547 	int i, error = 0;
    548 
    549 	ring->t_queued = 0;
    550 	ring->t_cur = ring->t_next = 0;
    551 
    552 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    553 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    554 	    TX_DESC_OFFSET(0),
    555 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    556 	    BUS_DMASYNC_POSTWRITE);
    557 
    558 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    559 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    560 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    561 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    562 		    &ring->t_data[i].td_map);
    563 		if (error != 0) {
    564 			aprint_error_dev(sc->sc_dev,
    565 			    "could not create TX DMA map #%d\n", i);
    566 			ring->t_data[i].td_map = NULL;
    567 			goto fail;
    568 		}
    569 		ring->t_desc[i].ddesc_next = htole32(
    570 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    571 		    *TX_NEXT(i));
    572 	}
    573 
    574 	return 0;
    575 
    576 fail:
    577 	dwc_gmac_free_tx_ring(sc, ring);
    578 	return error;
    579 }
    580 
    581 static void
    582 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    583 {
    584 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    585 	if (end > start) {
    586 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    587 		    TX_DESC_OFFSET(start),
    588 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    589 		    ops);
    590 		return;
    591 	}
    592 	/* sync from 'start' to end of ring */
    593 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    594 	    TX_DESC_OFFSET(start),
    595 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
    596 	    ops);
    597 	/* sync from start of ring to 'end' */
    598 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    599 	    TX_DESC_OFFSET(0),
    600 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    601 	    ops);
    602 }
    603 
    604 static void
    605 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    606 	struct dwc_gmac_tx_ring *ring)
    607 {
    608 	int i;
    609 
    610 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    611 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    612 
    613 		if (data->td_m != NULL) {
    614 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    615 			    0, data->td_active->dm_mapsize,
    616 			    BUS_DMASYNC_POSTWRITE);
    617 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    618 			m_freem(data->td_m);
    619 			data->td_m = NULL;
    620 		}
    621 	}
    622 
    623 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    624 	    TX_DESC_OFFSET(0),
    625 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    626 	    BUS_DMASYNC_PREWRITE);
    627 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    628 	    sc->sc_txq.t_physaddr);
    629 
    630 	ring->t_queued = 0;
    631 	ring->t_cur = ring->t_next = 0;
    632 }
    633 
    634 static void
    635 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    636 	struct dwc_gmac_tx_ring *ring)
    637 {
    638 	int i;
    639 
    640 	/* unload the maps */
    641 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    642 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    643 
    644 		if (data->td_m != NULL) {
    645 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    646 			    0, data->td_map->dm_mapsize,
    647 			    BUS_DMASYNC_POSTWRITE);
    648 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    649 			m_freem(data->td_m);
    650 			data->td_m = NULL;
    651 		}
    652 	}
    653 
    654 	/* and actually free them */
    655 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    656 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    657 
    658 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    659 	}
    660 }
    661 
    662 static void
    663 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    664 {
    665 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    666 	struct mii_data * const mii = &sc->sc_mii;
    667 	uint32_t conf;
    668 
    669 	/*
    670 	 * Set MII or GMII interface based on the speed
    671 	 * negotiated by the PHY.
    672 	 */
    673 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    674 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    675 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    676 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    677 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    678 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    679 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    680 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    681 	case IFM_10_T:
    682 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    683 		break;
    684 	case IFM_100_TX:
    685 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    686 			AWIN_GMAC_MAC_CONF_MIISEL;
    687 		break;
    688 	case IFM_1000_T:
    689 		break;
    690 	}
    691 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
    692 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    693 
    694 #ifdef DWC_GMAC_DEBUG
    695 	aprint_normal_dev(sc->sc_dev,
    696 	    "setting MAC conf register: %08x\n", conf);
    697 #endif
    698 
    699 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    700 	    AWIN_GMAC_MAC_CONF, conf);
    701 }
    702 
    703 static int
    704 dwc_gmac_init(struct ifnet *ifp)
    705 {
    706 	struct dwc_gmac_softc *sc = ifp->if_softc;
    707 	uint32_t ffilt;
    708 
    709 	if (ifp->if_flags & IFF_RUNNING)
    710 		return 0;
    711 
    712 	dwc_gmac_stop(ifp, 0);
    713 
    714 	/*
    715 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    716 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    717 	 */
    718 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    719 	    GMAC_BUSMODE_FIXEDBURST |
    720 	    __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
    721 	    __SHIFTIN(8, GMCA_BUSMODE_PBL));
    722 
    723 	/*
    724 	 * Set up address filter
    725 	 */
    726 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    727 	if (ifp->if_flags & IFF_PROMISC) {
    728 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    729 	} else {
    730 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    731 	}
    732 	if (ifp->if_flags & IFF_BROADCAST) {
    733 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    734 	} else {
    735 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    736 	}
    737 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    738 
    739 	/*
    740 	 * Set up multicast filter
    741 	 */
    742 	dwc_gmac_setmulti(sc);
    743 
    744 	/*
    745 	 * Set up dma pointer for RX and TX ring
    746 	 */
    747 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    748 	    sc->sc_rxq.r_physaddr);
    749 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    750 	    sc->sc_txq.t_physaddr);
    751 
    752 	/*
    753 	 * Start RX/TX part
    754 	 */
    755 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    756 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
    757 	    GMAC_DMA_OP_STOREFORWARD);
    758 
    759 	ifp->if_flags |= IFF_RUNNING;
    760 	ifp->if_flags &= ~IFF_OACTIVE;
    761 
    762 	return 0;
    763 }
    764 
    765 static void
    766 dwc_gmac_start(struct ifnet *ifp)
    767 {
    768 	struct dwc_gmac_softc *sc = ifp->if_softc;
    769 	int old = sc->sc_txq.t_queued;
    770 	struct mbuf *m0;
    771 
    772 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    773 		return;
    774 
    775 	for (;;) {
    776 		IFQ_POLL(&ifp->if_snd, m0);
    777 		if (m0 == NULL)
    778 			break;
    779 		if (dwc_gmac_queue(sc, m0) != 0) {
    780 			ifp->if_flags |= IFF_OACTIVE;
    781 			break;
    782 		}
    783 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    784 		bpf_mtap(ifp, m0);
    785 	}
    786 
    787 	if (sc->sc_txq.t_queued != old) {
    788 		/* packets have been queued, kick it off */
    789 		dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
    790 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    791 
    792 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    793 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    794 #ifdef DWC_GMAC_DEBUG
    795 		dwc_dump_status(sc);
    796 #endif
    797 	}
    798 }
    799 
    800 static void
    801 dwc_gmac_stop(struct ifnet *ifp, int disable)
    802 {
    803 	struct dwc_gmac_softc *sc = ifp->if_softc;
    804 
    805 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    806 	    AWIN_GMAC_DMA_OPMODE,
    807 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    808 	        AWIN_GMAC_DMA_OPMODE)
    809 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    810 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    811 	    AWIN_GMAC_DMA_OPMODE,
    812 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    813 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    814 
    815 	mii_down(&sc->sc_mii);
    816 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    817 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    818 }
    819 
    820 /*
    821  * Add m0 to the TX ring
    822  */
    823 static int
    824 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    825 {
    826 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    827 	struct dwc_gmac_tx_data *data = NULL;
    828 	bus_dmamap_t map;
    829 	uint32_t flags, len;
    830 	int error, i, first;
    831 
    832 #ifdef DWC_GMAC_DEBUG
    833 	aprint_normal_dev(sc->sc_dev,
    834 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
    835 #endif
    836 
    837 	first = sc->sc_txq.t_cur;
    838 	map = sc->sc_txq.t_data[first].td_map;
    839 	flags = 0;
    840 
    841 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    842 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    843 	if (error != 0) {
    844 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    845 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    846 		return error;
    847 	}
    848 
    849 	if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
    850 		bus_dmamap_unload(sc->sc_dmat, map);
    851 		return ENOBUFS;
    852 	}
    853 
    854 	data = NULL;
    855 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
    856 	for (i = 0; i < map->dm_nsegs; i++) {
    857 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    858 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    859 
    860 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    861 		len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
    862 		if (i == map->dm_nsegs-1)
    863 			flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
    864 
    865 #ifdef DWC_GMAC_DEBUG
    866 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
    867 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
    868 		    (unsigned long)map->dm_segs[i].ds_addr,
    869 		    (unsigned long)map->dm_segs[i].ds_len,
    870 		    flags, len);
    871 #endif
    872 
    873 		desc->ddesc_cntl = htole32(len|flags);
    874 		flags &= ~DDESC_CNTL_TXFIRST;
    875 
    876 		/*
    877 		 * Defer passing ownership of the first descriptor
    878 		 * untill we are done.
    879 		 */
    880 		if (i)
    881 			desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    882 
    883 		sc->sc_txq.t_queued++;
    884 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
    885 	}
    886 
    887 	/* Pass first to device */
    888 	sc->sc_txq.t_desc[first].ddesc_status
    889 	    = htole32(DDESC_STATUS_OWNEDBYDEV);
    890 
    891 	data->td_m = m0;
    892 	data->td_active = map;
    893 
    894 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    895 	    BUS_DMASYNC_PREWRITE);
    896 
    897 	return 0;
    898 }
    899 
    900 static int
    901 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    902 {
    903 	struct dwc_gmac_softc *sc = ifp->if_softc;
    904 	struct ifaddr *ifa = (struct ifaddr *)data;
    905 	int s, error = 0;
    906 
    907 	s = splnet();
    908 
    909 	switch (cmd) {
    910 	case SIOCINITIFADDR:
    911 		ifp->if_flags |= IFF_UP;
    912 		dwc_gmac_init(ifp);
    913 		switch (ifa->ifa_addr->sa_family) {
    914 #ifdef INET
    915 		case AF_INET:
    916 			arp_ifinit(ifp, ifa);
    917 			break;
    918 #endif
    919 		default:
    920 			break;
    921 		}
    922 		break;
    923 
    924 	case SIOCSIFFLAGS:
    925 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    926 			break;
    927 
    928 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
    929 		case IFF_RUNNING:
    930 			/*
    931 			 * If interface is marked down and it is running, then
    932 			 * stop it.
    933 			 */
    934 			dwc_gmac_stop(ifp, 0);
    935 			ifp->if_flags &= ~IFF_RUNNING;
    936 			break;
    937 		case IFF_UP:
    938 			/*
    939 			 * If interface is marked up and it is stopped, then
    940 			 * start it.
    941 			 */
    942 			error = dwc_gmac_init(ifp);
    943 			break;
    944 		case IFF_UP|IFF_RUNNING:
    945 			/*
    946 			 * If setting debug or promiscuous mode, do not reset
    947 			 * the chip; for everything else, call dwc_gmac_init()
    948 			 * which will trigger a reset.
    949 			 */
    950 			/* XXX - for now allways init */
    951 			error = dwc_gmac_init(ifp);
    952 			break;
    953 		case 0:
    954 			break;
    955 		}
    956 
    957 		break;
    958 
    959 	default:
    960 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
    961 			break;
    962 		error = 0;
    963 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
    964 			;
    965 		else if (ifp->if_flags & IFF_RUNNING)
    966 			dwc_gmac_setmulti(sc);
    967 		break;
    968 	}
    969 
    970 	splx(s);
    971 
    972 	return error;
    973 }
    974 
    975 static void
    976 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
    977 {
    978 	struct dwc_gmac_tx_data *data;
    979 	struct dwc_gmac_dev_dmadesc *desc;
    980 	uint32_t flags;
    981 	int i;
    982 
    983 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
    984 	    i = TX_NEXT(i), sc->sc_txq.t_queued--) {
    985 
    986 #ifdef DWC_GMAC_DEBUG
    987 		aprint_normal_dev(sc->sc_dev,
    988 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
    989 		    i, sc->sc_txq.t_queued);
    990 #endif
    991 
    992 		desc = &sc->sc_txq.t_desc[i];
    993 		dwc_gmac_txdesc_sync(sc, i, i+1,
    994 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    995 		flags = le32toh(desc->ddesc_status);
    996 
    997 		if (flags & DDESC_STATUS_OWNEDBYDEV)
    998 			break;
    999 
   1000 		data = &sc->sc_txq.t_data[i];
   1001 		if (data->td_m == NULL)
   1002 			continue;
   1003 		sc->sc_ec.ec_if.if_opackets++;
   1004 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1005 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1006 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1007 
   1008 #ifdef DWC_GMAC_DEBUG
   1009 		aprint_normal_dev(sc->sc_dev,
   1010 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1011 		    "freeing mbuf %p\n", i, data->td_m);
   1012 #endif
   1013 
   1014 		m_freem(data->td_m);
   1015 		data->td_m = NULL;
   1016 	}
   1017 
   1018 	sc->sc_txq.t_next = i;
   1019 
   1020 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1021 		sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
   1022 	}
   1023 }
   1024 
   1025 static void
   1026 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1027 {
   1028 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1029 	struct dwc_gmac_dev_dmadesc *desc;
   1030 	struct dwc_gmac_rx_data *data;
   1031 	bus_addr_t physaddr;
   1032 	uint32_t status;
   1033 	struct mbuf *m, *mnew;
   1034 	int i, len, error;
   1035 
   1036 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1037 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1038 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1039 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1040 		desc = &sc->sc_rxq.r_desc[i];
   1041 		data = &sc->sc_rxq.r_data[i];
   1042 
   1043 		status = le32toh(desc->ddesc_status);
   1044 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1045 			break;
   1046 
   1047 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
   1048 #ifdef DWC_GMAC_DEBUG
   1049 			aprint_normal_dev(sc->sc_dev,
   1050 			    "RX error: descriptor status %08x, skipping\n",
   1051 			    status);
   1052 #endif
   1053 			ifp->if_ierrors++;
   1054 			goto skip;
   1055 		}
   1056 
   1057 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
   1058 
   1059 #ifdef DWC_GMAC_DEBUG
   1060 		aprint_normal_dev(sc->sc_dev,
   1061 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1062 		    i, len);
   1063 #endif
   1064 
   1065 		/*
   1066 		 * Try to get a new mbuf before passing this one
   1067 		 * up, if that fails, drop the packet and reuse
   1068 		 * the existing one.
   1069 		 */
   1070 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1071 		if (mnew == NULL) {
   1072 			ifp->if_ierrors++;
   1073 			goto skip;
   1074 		}
   1075 		MCLGET(mnew, M_DONTWAIT);
   1076 		if ((mnew->m_flags & M_EXT) == 0) {
   1077 			m_freem(mnew);
   1078 			ifp->if_ierrors++;
   1079 			goto skip;
   1080 		}
   1081 
   1082 		/* unload old DMA map */
   1083 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1084 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1085 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1086 
   1087 		/* and reload with new mbuf */
   1088 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1089 		    mtod(mnew, void*), MCLBYTES, NULL,
   1090 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1091 		if (error != 0) {
   1092 			m_freem(mnew);
   1093 			/* try to reload old mbuf */
   1094 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1095 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1096 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1097 			if (error != 0) {
   1098 				panic("%s: could not load old rx mbuf",
   1099 				    device_xname(sc->sc_dev));
   1100 			}
   1101 			ifp->if_ierrors++;
   1102 			goto skip;
   1103 		}
   1104 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1105 
   1106 		/*
   1107 		 * New mbuf loaded, update RX ring and continue
   1108 		 */
   1109 		m = data->rd_m;
   1110 		data->rd_m = mnew;
   1111 		desc->ddesc_data = htole32(physaddr);
   1112 
   1113 		/* finalize mbuf */
   1114 		m->m_pkthdr.len = m->m_len = len;
   1115 		m->m_pkthdr.rcvif = ifp;
   1116 		m->m_flags |= M_HASFCS;
   1117 
   1118 		bpf_mtap(ifp, m);
   1119 		ifp->if_ipackets++;
   1120 		(*ifp->if_input)(ifp, m);
   1121 
   1122 skip:
   1123 		desc->ddesc_cntl = htole32(
   1124 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
   1125 		    DDESC_CNTL_RXCHAIN);
   1126 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
   1127 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1128 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1129 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1130 	}
   1131 
   1132 	/* update RX pointer */
   1133 	sc->sc_rxq.r_cur = i;
   1134 
   1135 }
   1136 
   1137 static void
   1138 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1139 {
   1140 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1141 	struct ether_multi *enm;
   1142 	struct ether_multistep step;
   1143 	uint32_t hashes[2] = { 0, 0 };
   1144 	uint32_t ffilt;
   1145 	int h, mcnt;
   1146 
   1147 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1148 
   1149 	if (ifp->if_flags & IFF_PROMISC) {
   1150 allmulti:
   1151 		ifp->if_flags |= IFF_ALLMULTI;
   1152 		ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1153 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1154 		    ffilt);
   1155 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1156 		    0xffffffff);
   1157 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1158 		    0xffffffff);
   1159 		return;
   1160 	}
   1161 
   1162 	ifp->if_flags &= ~IFF_ALLMULTI;
   1163 	ffilt &= ~AWIN_GMAC_MAC_FFILT_PM;
   1164 
   1165 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1166 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1167 
   1168 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
   1169 	mcnt = 0;
   1170 	while (enm != NULL) {
   1171 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1172 		    ETHER_ADDR_LEN) != 0)
   1173 			goto allmulti;
   1174 
   1175 		h = (~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)) >> 26;
   1176 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1177 
   1178 		mcnt++;
   1179 		ETHER_NEXT_MULTI(step, enm);
   1180 	}
   1181 
   1182 	if (mcnt)
   1183 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1184 	else
   1185 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1186 
   1187 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1188 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1189 	    hashes[0]);
   1190 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1191 	    hashes[1]);
   1192 }
   1193 
   1194 int
   1195 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1196 {
   1197 	uint32_t status, dma_status;
   1198 	int rv = 0;
   1199 
   1200 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1201 	if (status & AWIN_GMAC_MII_IRQ) {
   1202 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1203 		    AWIN_GMAC_MII_STATUS);
   1204 		rv = 1;
   1205 		mii_pollstat(&sc->sc_mii);
   1206 	}
   1207 
   1208 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1209 	    AWIN_GMAC_DMA_STATUS);
   1210 
   1211 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1212 		rv = 1;
   1213 
   1214 	if (dma_status & GMAC_DMA_INT_TIE)
   1215 		dwc_gmac_tx_intr(sc);
   1216 
   1217 	if (dma_status & GMAC_DMA_INT_RIE)
   1218 		dwc_gmac_rx_intr(sc);
   1219 
   1220 	/*
   1221 	 * Check error conditions
   1222 	 */
   1223 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1224 		sc->sc_ec.ec_if.if_oerrors++;
   1225 #ifdef DWC_GMAC_DEBUG
   1226 		dwc_dump_and_abort(sc, "interrupt error condition");
   1227 #endif
   1228 	}
   1229 
   1230 	/* ack interrupt */
   1231 	if (dma_status)
   1232 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1233 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1234 
   1235 	return rv;
   1236 }
   1237 
   1238 #ifdef DWC_GMAC_DEBUG
   1239 static void
   1240 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1241 {
   1242 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1243 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1244 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1245 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1246 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1247 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1248 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1249 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1250 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1251 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1252 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1253 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1254 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1255 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1256 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1257 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1258 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1259 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1260 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1261 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1262 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1263 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1264 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1265 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1266 }
   1267 
   1268 static void
   1269 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1270 {
   1271 	int i;
   1272 
   1273 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1274 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1275 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1276 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1277 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1278 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1279 		    "data: %08x next: %08x\n",
   1280 		    i, sc->sc_txq.t_physaddr +
   1281 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1282 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1283 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1284 	}
   1285 }
   1286 
   1287 static void
   1288 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1289 {
   1290 	int i;
   1291 
   1292 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1293 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1294 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1295 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1296 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1297 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1298 		    "data: %08x next: %08x\n",
   1299 		    i, sc->sc_rxq.r_physaddr +
   1300 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1301 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1302 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1303 	}
   1304 }
   1305 
   1306 static void
   1307 dwc_dump_status(struct dwc_gmac_softc *sc)
   1308 {
   1309 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1310 	     AWIN_GMAC_MAC_INTR);
   1311 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1312 	     AWIN_GMAC_DMA_STATUS);
   1313 	char buf[200];
   1314 
   1315 	/* print interrupt state */
   1316 	snprintb(buf, sizeof(buf), "\177\20"
   1317 	    "b\x10""NI\0"
   1318 	    "b\x0f""AI\0"
   1319 	    "b\x0e""ER\0"
   1320 	    "b\x0d""FB\0"
   1321 	    "b\x0a""ET\0"
   1322 	    "b\x09""RW\0"
   1323 	    "b\x08""RS\0"
   1324 	    "b\x07""RU\0"
   1325 	    "b\x06""RI\0"
   1326 	    "b\x05""UN\0"
   1327 	    "b\x04""OV\0"
   1328 	    "b\x03""TJ\0"
   1329 	    "b\x02""TU\0"
   1330 	    "b\x01""TS\0"
   1331 	    "b\x00""TI\0"
   1332 	    "\0", dma_status);
   1333 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1334 	    status, buf);
   1335 }
   1336 
   1337 static void
   1338 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1339 {
   1340 	dwc_dump_status(sc);
   1341 	dwc_gmac_dump_dma(sc);
   1342 	dwc_gmac_dump_tx_desc(sc);
   1343 	dwc_gmac_dump_rx_desc(sc);
   1344 
   1345 	panic("%s", msg);
   1346 }
   1347 #endif
   1348