Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.8
      1 /*-
      2  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This code is derived from software contributed to The NetBSD Foundation
      6  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * This driver supports the Synopsis Designware GMAC core, as found
     32  * on Allwinner A20 cores and others.
     33  *
     34  * Real documentation seems to not be available, the marketing product
     35  * documents could be found here:
     36  *
     37  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 
     42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.8 2014/10/08 18:24:21 martin Exp $");
     43 
     44 /* #define	DWC_GMAC_DEBUG	1 */
     45 
     46 #include "opt_inet.h"
     47 
     48 #include <sys/param.h>
     49 #include <sys/bus.h>
     50 #include <sys/device.h>
     51 #include <sys/intr.h>
     52 #include <sys/systm.h>
     53 #include <sys/sockio.h>
     54 
     55 #include <net/if.h>
     56 #include <net/if_ether.h>
     57 #include <net/if_media.h>
     58 #include <net/bpf.h>
     59 #ifdef INET
     60 #include <netinet/if_inarp.h>
     61 #endif
     62 
     63 #include <dev/mii/miivar.h>
     64 
     65 #include <dev/ic/dwc_gmac_reg.h>
     66 #include <dev/ic/dwc_gmac_var.h>
     67 
     68 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     69 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     70 static void dwc_gmac_miibus_statchg(struct ifnet *);
     71 
     72 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     73 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     74 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     75 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     76 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     77 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     78 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     79 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     80 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     81 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     82 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     83 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     84 static int dwc_gmac_init(struct ifnet *ifp);
     85 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     86 static void dwc_gmac_start(struct ifnet *ifp);
     87 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     88 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     89 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     90 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
     91 
     92 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
     93 				    *sizeof(struct dwc_gmac_dev_dmadesc))
     94 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
     95 
     96 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
     97 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
     98 
     99 
    100 
    101 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    102 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    103 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    104 
    105 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    106 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_ETE| \
    107 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    108 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    109 				GMAC_DMA_INT_TJE|GMAC_DMA_INT_TUE)
    110 
    111 #define	AWIN_DEF_MAC_INTRMASK	\
    112 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    113 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
    114 
    115 
    116 #ifdef DWC_GMAC_DEBUG
    117 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    118 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    119 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    120 #endif
    121 
    122 void
    123 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    124 {
    125 	uint8_t enaddr[ETHER_ADDR_LEN];
    126 	uint32_t maclo, machi;
    127 	struct mii_data * const mii = &sc->sc_mii;
    128 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    129 	prop_dictionary_t dict;
    130 
    131 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    132 	sc->sc_mii_clk = mii_clk & 7;
    133 
    134 	dict = device_properties(sc->sc_dev);
    135 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    136 	if (ea != NULL) {
    137 		/*
    138 		 * If the MAC address is overriden by a device property,
    139 		 * use that.
    140 		 */
    141 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    142 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    143 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    144 	} else {
    145 		/*
    146 		 * If we did not get an externaly configure address,
    147 		 * try to read one from the current filter setup,
    148 		 * before resetting the chip.
    149 		 */
    150 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    151 		    AWIN_GMAC_MAC_ADDR0LO);
    152 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    153 		    AWIN_GMAC_MAC_ADDR0HI);
    154 		enaddr[0] = maclo & 0x0ff;
    155 		enaddr[1] = (maclo >> 8) & 0x0ff;
    156 		enaddr[2] = (maclo >> 16) & 0x0ff;
    157 		enaddr[3] = (maclo >> 24) & 0x0ff;
    158 		enaddr[4] = machi & 0x0ff;
    159 		enaddr[5] = (machi >> 8) & 0x0ff;
    160 	}
    161 
    162 	/*
    163 	 * Init chip and do intial setup
    164 	 */
    165 	if (dwc_gmac_reset(sc) != 0)
    166 		return;	/* not much to cleanup, haven't attached yet */
    167 	dwc_gmac_write_hwaddr(sc, enaddr);
    168 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    169 	    ether_sprintf(enaddr));
    170 
    171 	/*
    172 	 * Allocate Tx and Rx rings
    173 	 */
    174 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    175 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    176 		goto fail;
    177 	}
    178 
    179 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    180 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    181 		goto fail;
    182 	}
    183 
    184 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    185 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    186 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    187 		goto fail;
    188 	}
    189 
    190 	/*
    191 	 * Prepare interface data
    192 	 */
    193 	ifp->if_softc = sc;
    194 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    195 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    196 	ifp->if_ioctl = dwc_gmac_ioctl;
    197 	ifp->if_start = dwc_gmac_start;
    198 	ifp->if_init = dwc_gmac_init;
    199 	ifp->if_stop = dwc_gmac_stop;
    200 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    201 	IFQ_SET_READY(&ifp->if_snd);
    202 
    203 	/*
    204 	 * Attach MII subdevices
    205 	 */
    206 	sc->sc_ec.ec_mii = &sc->sc_mii;
    207 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    208         mii->mii_ifp = ifp;
    209         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    210         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    211         mii->mii_statchg = dwc_gmac_miibus_statchg;
    212         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
    213 
    214         if (LIST_EMPTY(&mii->mii_phys)) {
    215                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    216                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    217                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    218         } else {
    219                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    220         }
    221 
    222 	/*
    223 	 * Ready, attach interface
    224 	 */
    225 	if_attach(ifp);
    226 	ether_ifattach(ifp, enaddr);
    227 
    228 	/*
    229 	 * Enable interrupts
    230 	 */
    231 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
    232 	    AWIN_DEF_MAC_INTRMASK);
    233 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    234 	    GMAC_DEF_DMA_INT_MASK);
    235 
    236 	return;
    237 
    238 fail:
    239 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    240 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    241 }
    242 
    243 
    244 
    245 static int
    246 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    247 {
    248 	size_t cnt;
    249 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    250 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    251 	for (cnt = 0; cnt < 3000; cnt++) {
    252 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    253 		    & GMAC_BUSMODE_RESET) == 0)
    254 			return 0;
    255 		delay(10);
    256 	}
    257 
    258 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    259 	return EIO;
    260 }
    261 
    262 static void
    263 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    264     uint8_t enaddr[ETHER_ADDR_LEN])
    265 {
    266 	uint32_t lo, hi;
    267 
    268 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    269 	    | (enaddr[3] << 24);
    270 	hi = enaddr[4] | (enaddr[5] << 8);
    271 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    272 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    273 }
    274 
    275 static int
    276 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    277 {
    278 	struct dwc_gmac_softc * const sc = device_private(self);
    279 	uint16_t mii;
    280 	size_t cnt;
    281 	int rv = 0;
    282 
    283 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    284 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    285 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    286 	    | GMAC_MII_BUSY;
    287 
    288 	mutex_enter(&sc->sc_mdio_lock);
    289 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    290 
    291 	for (cnt = 0; cnt < 1000; cnt++) {
    292 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    293 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    294 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    295 			    AWIN_GMAC_MAC_MIIDATA);
    296 			break;
    297 		}
    298 		delay(10);
    299 	}
    300 
    301 	mutex_exit(&sc->sc_mdio_lock);
    302 
    303 	return rv;
    304 }
    305 
    306 static void
    307 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    308 {
    309 	struct dwc_gmac_softc * const sc = device_private(self);
    310 	uint16_t mii;
    311 	size_t cnt;
    312 
    313 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    314 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    315 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    316 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    317 
    318 	mutex_enter(&sc->sc_mdio_lock);
    319 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    320 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    321 
    322 	for (cnt = 0; cnt < 1000; cnt++) {
    323 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    324 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    325 			break;
    326 		delay(10);
    327 	}
    328 
    329 	mutex_exit(&sc->sc_mdio_lock);
    330 }
    331 
    332 static int
    333 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    334 	struct dwc_gmac_rx_ring *ring)
    335 {
    336 	struct dwc_gmac_rx_data *data;
    337 	bus_addr_t physaddr;
    338 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    339 	int error, i, next;
    340 
    341 	ring->r_cur = ring->r_next = 0;
    342 	memset(ring->r_desc, 0, descsize);
    343 
    344 	/*
    345 	 * Pre-allocate Rx buffers and populate Rx ring.
    346 	 */
    347 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    348 		struct dwc_gmac_dev_dmadesc *desc;
    349 
    350 		data = &sc->sc_rxq.r_data[i];
    351 
    352 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    353 		if (data->rd_m == NULL) {
    354 			aprint_error_dev(sc->sc_dev,
    355 			    "could not allocate rx mbuf #%d\n", i);
    356 			error = ENOMEM;
    357 			goto fail;
    358 		}
    359 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    360 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    361 		if (error != 0) {
    362 			aprint_error_dev(sc->sc_dev,
    363 			    "could not create DMA map\n");
    364 			data->rd_map = NULL;
    365 			goto fail;
    366 		}
    367 		MCLGET(data->rd_m, M_DONTWAIT);
    368 		if (!(data->rd_m->m_flags & M_EXT)) {
    369 			aprint_error_dev(sc->sc_dev,
    370 			    "could not allocate mbuf cluster #%d\n", i);
    371 			error = ENOMEM;
    372 			goto fail;
    373 		}
    374 
    375 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    376 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    377 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    378 		if (error != 0) {
    379 			aprint_error_dev(sc->sc_dev,
    380 			    "could not load rx buf DMA map #%d", i);
    381 			goto fail;
    382 		}
    383 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    384 
    385 		desc = &sc->sc_rxq.r_desc[i];
    386 		desc->ddesc_data = htole32(physaddr);
    387 		next = RX_NEXT(i);
    388 		desc->ddesc_next = htole32(ring->r_physaddr
    389 		    + next * sizeof(*desc));
    390 		desc->ddesc_cntl = htole32(
    391 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
    392 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    393 	}
    394 
    395 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    396 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    397 	    BUS_DMASYNC_PREREAD);
    398 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    399 	    ring->r_physaddr);
    400 
    401 	return 0;
    402 
    403 fail:
    404 	dwc_gmac_free_rx_ring(sc, ring);
    405 	return error;
    406 }
    407 
    408 static void
    409 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    410 	struct dwc_gmac_rx_ring *ring)
    411 {
    412 	struct dwc_gmac_dev_dmadesc *desc;
    413 	int i;
    414 
    415 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    416 		desc = &sc->sc_rxq.r_desc[i];
    417 		desc->ddesc_cntl = htole32(
    418 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
    419 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    420 	}
    421 
    422 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    423 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    424 	    BUS_DMASYNC_PREWRITE);
    425 
    426 	ring->r_cur = ring->r_next = 0;
    427 }
    428 
    429 static int
    430 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    431 {
    432 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    433 		sizeof(struct dwc_gmac_dev_dmadesc);
    434 	int error, nsegs;
    435 	void *rings;
    436 
    437 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    438 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    439 	if (error != 0) {
    440 		aprint_error_dev(sc->sc_dev,
    441 		    "could not create desc DMA map\n");
    442 		sc->sc_dma_ring_map = NULL;
    443 		goto fail;
    444 	}
    445 
    446 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    447 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    448 	if (error != 0) {
    449 		aprint_error_dev(sc->sc_dev,
    450 		    "could not map DMA memory\n");
    451 		goto fail;
    452 	}
    453 
    454 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    455 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    456 	if (error != 0) {
    457 		aprint_error_dev(sc->sc_dev,
    458 		    "could not allocate DMA memory\n");
    459 		goto fail;
    460 	}
    461 
    462 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    463 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    464 	if (error != 0) {
    465 		aprint_error_dev(sc->sc_dev,
    466 		    "could not load desc DMA map\n");
    467 		goto fail;
    468 	}
    469 
    470 	/* give first AWGE_RX_RING_COUNT to the RX side */
    471 	sc->sc_rxq.r_desc = rings;
    472 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    473 
    474 	/* and next rings to the TX side */
    475 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    476 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    477 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    478 
    479 	return 0;
    480 
    481 fail:
    482 	dwc_gmac_free_dma_rings(sc);
    483 	return error;
    484 }
    485 
    486 static void
    487 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    488 {
    489 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    490 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    491 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    492 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    493 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    494 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    495 }
    496 
    497 static void
    498 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    499 {
    500 	struct dwc_gmac_rx_data *data;
    501 	int i;
    502 
    503 	if (ring->r_desc == NULL)
    504 		return;
    505 
    506 
    507 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    508 		data = &ring->r_data[i];
    509 
    510 		if (data->rd_map != NULL) {
    511 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    512 			    AWGE_RX_RING_COUNT
    513 				*sizeof(struct dwc_gmac_dev_dmadesc),
    514 			    BUS_DMASYNC_POSTREAD);
    515 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    516 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    517 		}
    518 		if (data->rd_m != NULL)
    519 			m_freem(data->rd_m);
    520 	}
    521 }
    522 
    523 static int
    524 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    525 	struct dwc_gmac_tx_ring *ring)
    526 {
    527 	int i, error = 0;
    528 
    529 	ring->t_queued = 0;
    530 	ring->t_cur = ring->t_next = 0;
    531 
    532 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    533 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    534 	    TX_DESC_OFFSET(0),
    535 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    536 	    BUS_DMASYNC_POSTWRITE);
    537 
    538 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    539 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    540 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    541 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    542 		    &ring->t_data[i].td_map);
    543 		if (error != 0) {
    544 			aprint_error_dev(sc->sc_dev,
    545 			    "could not create TX DMA map #%d\n", i);
    546 			ring->t_data[i].td_map = NULL;
    547 			goto fail;
    548 		}
    549 		ring->t_desc[i].ddesc_next = htole32(
    550 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    551 		    *TX_NEXT(i));
    552 	}
    553 
    554 	return 0;
    555 
    556 fail:
    557 	dwc_gmac_free_tx_ring(sc, ring);
    558 	return error;
    559 }
    560 
    561 static void
    562 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    563 {
    564 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    565 	if (end > start) {
    566 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    567 		    TX_DESC_OFFSET(start),
    568 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    569 		    ops);
    570 		return;
    571 	}
    572 	/* sync from 'start' to end of ring */
    573 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    574 	    TX_DESC_OFFSET(start),
    575 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
    576 	    ops);
    577 	/* sync from start of ring to 'end' */
    578 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    579 	    TX_DESC_OFFSET(0),
    580 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    581 	    ops);
    582 }
    583 
    584 static void
    585 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    586 	struct dwc_gmac_tx_ring *ring)
    587 {
    588 	int i;
    589 
    590 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    591 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    592 
    593 		if (data->td_m != NULL) {
    594 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    595 			    0, data->td_active->dm_mapsize,
    596 			    BUS_DMASYNC_POSTWRITE);
    597 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    598 			m_freem(data->td_m);
    599 			data->td_m = NULL;
    600 		}
    601 	}
    602 
    603 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    604 	    TX_DESC_OFFSET(0),
    605 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    606 	    BUS_DMASYNC_PREWRITE);
    607 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    608 	    sc->sc_txq.t_physaddr);
    609 
    610 	ring->t_queued = 0;
    611 	ring->t_cur = ring->t_next = 0;
    612 }
    613 
    614 static void
    615 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    616 	struct dwc_gmac_tx_ring *ring)
    617 {
    618 	int i;
    619 
    620 	/* unload the maps */
    621 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    622 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    623 
    624 		if (data->td_m != NULL) {
    625 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    626 			    0, data->td_map->dm_mapsize,
    627 			    BUS_DMASYNC_POSTWRITE);
    628 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    629 			m_freem(data->td_m);
    630 			data->td_m = NULL;
    631 		}
    632 	}
    633 
    634 	/* and actually free them */
    635 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    636 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    637 
    638 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    639 	}
    640 }
    641 
    642 static void
    643 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    644 {
    645 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    646 	struct mii_data * const mii = &sc->sc_mii;
    647 
    648 	/*
    649 	 * Set MII or GMII interface based on the speed
    650 	 * negotiated by the PHY.
    651 	 */
    652 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    653 	case IFM_10_T:
    654 	case IFM_100_TX:
    655 		/* XXX */
    656 		break;
    657 	case IFM_1000_T:
    658 		/* XXX */
    659 		break;
    660 	}
    661 }
    662 
    663 static int
    664 dwc_gmac_init(struct ifnet *ifp)
    665 {
    666 	struct dwc_gmac_softc *sc = ifp->if_softc;
    667 
    668 	if (ifp->if_flags & IFF_RUNNING)
    669 		return 0;
    670 
    671 	dwc_gmac_stop(ifp, 0);
    672 
    673 	/*
    674 	 * Set up dma pointer for RX and TX ring
    675 	 */
    676 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    677 	    sc->sc_rxq.r_physaddr);
    678 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    679 	    sc->sc_txq.t_physaddr);
    680 
    681 	/*
    682 	 * Start RX part
    683 	 */
    684 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    685 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART);
    686 
    687 	ifp->if_flags |= IFF_RUNNING;
    688 	ifp->if_flags &= ~IFF_OACTIVE;
    689 
    690 	return 0;
    691 }
    692 
    693 static void
    694 dwc_gmac_start(struct ifnet *ifp)
    695 {
    696 	struct dwc_gmac_softc *sc = ifp->if_softc;
    697 	int old = sc->sc_txq.t_queued;
    698 	struct mbuf *m0;
    699 
    700 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    701 		return;
    702 
    703 	for (;;) {
    704 		IFQ_POLL(&ifp->if_snd, m0);
    705 		if (m0 == NULL)
    706 			break;
    707 		if (dwc_gmac_queue(sc, m0) != 0) {
    708 			ifp->if_flags |= IFF_OACTIVE;
    709 			break;
    710 		}
    711 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    712 		bpf_mtap(ifp, m0);
    713 	}
    714 
    715 	if (sc->sc_txq.t_queued != old) {
    716 		/* packets have been queued, kick it off */
    717 		dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
    718 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    719 
    720 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    721 		    AWIN_GMAC_DMA_OPMODE,
    722 		    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    723 		        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    724 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    725 		    AWIN_GMAC_DMA_OPMODE,
    726 		    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    727 		        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_TXSTART);
    728 	}
    729 }
    730 
    731 static void
    732 dwc_gmac_stop(struct ifnet *ifp, int disable)
    733 {
    734 	struct dwc_gmac_softc *sc = ifp->if_softc;
    735 
    736 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    737 	    AWIN_GMAC_DMA_OPMODE,
    738 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    739 	        AWIN_GMAC_DMA_OPMODE)
    740 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    741 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    742 	    AWIN_GMAC_DMA_OPMODE,
    743 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    744 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    745 
    746 	mii_down(&sc->sc_mii);
    747 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    748 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    749 }
    750 
    751 /*
    752  * Add m0 to the TX ring
    753  */
    754 static int
    755 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    756 {
    757 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    758 	struct dwc_gmac_tx_data *data = NULL;
    759 	bus_dmamap_t map;
    760 	uint32_t flags, len;
    761 	int error, i, first;
    762 
    763 #ifdef DWC_GMAC_DEBUG
    764 	aprint_normal_dev(sc->sc_dev,
    765 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
    766 #endif
    767 
    768 	first = sc->sc_txq.t_cur;
    769 	map = sc->sc_txq.t_data[first].td_map;
    770 	flags = 0;
    771 
    772 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    773 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    774 	if (error != 0) {
    775 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    776 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    777 		return error;
    778 	}
    779 
    780 	if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
    781 		bus_dmamap_unload(sc->sc_dmat, map);
    782 		return ENOBUFS;
    783 	}
    784 
    785 	data = NULL;
    786 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
    787 	for (i = 0; i < map->dm_nsegs; i++) {
    788 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    789 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    790 
    791 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    792 		len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
    793 		if (i == map->dm_nsegs-1)
    794 			flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
    795 
    796 #ifdef DWC_GMAC_DEBUG
    797 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
    798 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
    799 		    (unsigned long)map->dm_segs[i].ds_addr,
    800 		    (unsigned long)map->dm_segs[i].ds_len,
    801 		    flags, len);
    802 #endif
    803 
    804 		desc->ddesc_cntl = htole32(len|flags);
    805 		flags &= ~DDESC_CNTL_TXFIRST;
    806 
    807 		/*
    808 		 * Defer passing ownership of the first descriptor
    809 		 * untill we are done.
    810 		 */
    811 		if (i)
    812 			desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    813 
    814 		sc->sc_txq.t_queued++;
    815 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
    816 	}
    817 
    818 	/* Pass first to device */
    819 	sc->sc_txq.t_desc[first].ddesc_status
    820 	    = htole32(DDESC_STATUS_OWNEDBYDEV);
    821 
    822 	data->td_m = m0;
    823 	data->td_active = map;
    824 
    825 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    826 	    BUS_DMASYNC_PREWRITE);
    827 
    828 	return 0;
    829 }
    830 
    831 static int
    832 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    833 {
    834 	// struct dwc_gmac_softc *sc = ifp->if_softc;
    835 	struct ifaddr *ifa = (struct ifaddr *)data;
    836 	int s, error = 0;
    837 
    838 	s = splnet();
    839 
    840 	switch (cmd) {
    841 	case SIOCINITIFADDR:
    842 		ifp->if_flags |= IFF_UP;
    843 		dwc_gmac_init(ifp);
    844 		switch (ifa->ifa_addr->sa_family) {
    845 #ifdef INET
    846 		case AF_INET:
    847 			arp_ifinit(ifp, ifa);
    848 			break;
    849 #endif
    850 		default:
    851 			break;
    852 		}
    853 	default:
    854 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
    855 			break;
    856 		error = 0;
    857 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
    858 			;
    859 		else if (ifp->if_flags & IFF_RUNNING)
    860 			/* setmulti */;
    861 		break;
    862 	}
    863 
    864 	splx(s);
    865 
    866 	return error;
    867 }
    868 
    869 static void
    870 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
    871 {
    872 	struct dwc_gmac_tx_data *data;
    873 	struct dwc_gmac_dev_dmadesc *desc;
    874 	uint32_t flags;
    875 	int i;
    876 
    877 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
    878 	    i = TX_NEXT(i), sc->sc_txq.t_queued--) {
    879 
    880 #ifdef DWC_GMAC_DEBUG
    881 		aprint_normal_dev(sc->sc_dev,
    882 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
    883 		    i, sc->sc_txq.t_queued);
    884 #endif
    885 
    886 		desc = &sc->sc_txq.t_desc[i];
    887 		dwc_gmac_txdesc_sync(sc, i, i+1,
    888 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    889 		flags = le32toh(desc->ddesc_status);
    890 		if (flags & DDESC_STATUS_OWNEDBYDEV)
    891 			break;
    892 		data = &sc->sc_txq.t_data[i];
    893 		if (data->td_m == NULL)
    894 			continue;
    895 		sc->sc_ec.ec_if.if_opackets++;
    896 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
    897 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    898 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
    899 
    900 #ifdef DWC_GMAC_DEBUG
    901 		aprint_normal_dev(sc->sc_dev,
    902 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
    903 		    "freeing mbuf %p\n", i, data->td_m);
    904 #endif
    905 
    906 		m_freem(data->td_m);
    907 		data->td_m = NULL;
    908 	}
    909 
    910 	sc->sc_txq.t_next = i;
    911 
    912 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
    913 		sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
    914 	}
    915 }
    916 
    917 static void
    918 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
    919 {
    920 #ifdef DWC_GMAC_DEBUG
    921 	aprint_normal_dev(sc->sc_dev, "rx intr\n");
    922 	/* XXX */
    923 #endif
    924 }
    925 
    926 int
    927 dwc_gmac_intr(struct dwc_gmac_softc *sc)
    928 {
    929 	uint32_t status, dma_status;
    930 	int rv = 0;
    931 
    932 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
    933 	if (status & AWIN_GMAC_MII_IRQ) {
    934 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    935 		    AWIN_GMAC_MII_STATUS);
    936 		rv = 1;
    937 		mii_pollstat(&sc->sc_mii);
    938 	}
    939 
    940 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    941 	    AWIN_GMAC_DMA_STATUS);
    942 
    943 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
    944 		rv = 1;
    945 
    946 	if (dma_status & GMAC_DMA_INT_TIE)
    947 		dwc_gmac_tx_intr(sc);
    948 
    949 	if (dma_status & GMAC_DMA_INT_RIE)
    950 		dwc_gmac_rx_intr(sc);
    951 
    952 	/*
    953 	 * Check error conditions
    954 	 */
    955 	if (dma_status & GMAC_DMA_INT_ERRORS) {
    956 		sc->sc_ec.ec_if.if_oerrors++;
    957 #ifdef DWC_GMAC_DEBUG
    958 		dwc_dump_and_abort(sc, "interrupt error condition");
    959 #endif
    960 	}
    961 
    962 	/* ack interrupt */
    963 	if (dma_status)
    964 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    965 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
    966 
    967 	return rv;
    968 }
    969 
    970 #ifdef DWC_GMAC_DEBUG
    971 static void
    972 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
    973 {
    974 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
    975 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
    976 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
    977 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
    978 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
    979 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
    980 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
    981 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
    982 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
    983 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
    984 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
    985 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
    986 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
    987 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
    988 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
    989 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
    990 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
    991 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
    992 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
    993 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
    994 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
    995 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
    996 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
    997 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
    998 }
    999 
   1000 static void
   1001 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1002 {
   1003 	int i;
   1004 
   1005 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1006 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1007 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1008 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1009 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1010 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
   1011 		    i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
   1012 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1013 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1014 
   1015 	}
   1016 }
   1017 
   1018 static void
   1019 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1020 {
   1021 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1022 	     AWIN_GMAC_MAC_INTR);
   1023 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1024 	     AWIN_GMAC_DMA_STATUS);
   1025 	char buf[200];
   1026 
   1027 	/* print interrupt state */
   1028 	snprintb(buf, sizeof(buf), "\177\20"
   1029 	    "b\x10""NIE\0"
   1030 	    "b\x0f""AIE\0"
   1031 	    "b\x0e""ERE\0"
   1032 	    "b\x0d""FBE\0"
   1033 	    "b\x0a""ETE\0"
   1034 	    "b\x09""RWE\0"
   1035 	    "b\x08""RSE\0"
   1036 	    "b\x07""RUE\0"
   1037 	    "b\x06""RIE\0"
   1038 	    "b\x05""UNE\0"
   1039 	    "b\x04""OVE\0"
   1040 	    "b\x03""TJE\0"
   1041 	    "b\x02""TUE\0"
   1042 	    "b\x01""TSE\0"
   1043 	    "b\x00""TIE\0"
   1044 	    "\0", dma_status);
   1045 	printf("%s: INTR status: %08x, DMA status: %s\n",
   1046 	    device_xname(sc->sc_dev),
   1047 	    status, buf);
   1048 
   1049 	dwc_gmac_dump_dma(sc);
   1050 	dwc_gmac_dump_tx_desc(sc);
   1051 
   1052 	panic(msg);
   1053 }
   1054 #endif
   1055