Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.50
      1 /* $NetBSD: dwc_gmac.c,v 1.50 2018/06/26 06:48:00 msaitoh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.50 2018/06/26 06:48:00 msaitoh Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #ifdef _KERNEL_OPT
     49 #include "opt_inet.h"
     50 #include "opt_net_mpsafe.h"
     51 #endif
     52 
     53 #include <sys/param.h>
     54 #include <sys/bus.h>
     55 #include <sys/device.h>
     56 #include <sys/intr.h>
     57 #include <sys/systm.h>
     58 #include <sys/sockio.h>
     59 #include <sys/cprng.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_media.h>
     64 #include <net/bpf.h>
     65 #ifdef INET
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #include <dev/mii/miivar.h>
     70 
     71 #include <dev/ic/dwc_gmac_reg.h>
     72 #include <dev/ic/dwc_gmac_var.h>
     73 
     74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     76 static void dwc_gmac_miibus_statchg(struct ifnet *);
     77 
     78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     80 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     90 static int dwc_gmac_init(struct ifnet *ifp);
     91 static int dwc_gmac_init_locked(struct ifnet *ifp);
     92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
     94 static void dwc_gmac_start(struct ifnet *ifp);
     95 static void dwc_gmac_start_locked(struct ifnet *ifp);
     96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
    100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
    101 static int dwc_gmac_ifflags_cb(struct ethercom *);
    102 static uint32_t	bitrev32(uint32_t x);
    103 
    104 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
    105 				    *sizeof(struct dwc_gmac_dev_dmadesc))
    106 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
    107 
    108 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
    109 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
    110 
    111 
    112 
    113 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    114 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    115 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    116 
    117 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    118 				GMAC_DMA_INT_FBE|	\
    119 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    120 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    121 				GMAC_DMA_INT_TJE)
    122 
    123 #define	AWIN_DEF_MAC_INTRMASK	\
    124 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    125 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
    126 
    127 #ifdef DWC_GMAC_DEBUG
    128 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    129 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    130 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    131 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    132 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    133 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
    134 #endif
    135 
    136 #ifdef NET_MPSAFE
    137 #define DWCGMAC_MPSAFE	1
    138 #endif
    139 
    140 void
    141 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    142 {
    143 	uint8_t enaddr[ETHER_ADDR_LEN];
    144 	uint32_t maclo, machi;
    145 	struct mii_data * const mii = &sc->sc_mii;
    146 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    147 	prop_dictionary_t dict;
    148 	int rv;
    149 
    150 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    151 	sc->sc_mii_clk = mii_clk & 7;
    152 
    153 	dict = device_properties(sc->sc_dev);
    154 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    155 	if (ea != NULL) {
    156 		/*
    157 		 * If the MAC address is overriden by a device property,
    158 		 * use that.
    159 		 */
    160 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    161 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    162 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    163 	} else {
    164 		/*
    165 		 * If we did not get an externaly configure address,
    166 		 * try to read one from the current filter setup,
    167 		 * before resetting the chip.
    168 		 */
    169 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    170 		    AWIN_GMAC_MAC_ADDR0LO);
    171 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    172 		    AWIN_GMAC_MAC_ADDR0HI);
    173 
    174 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    175 			/* fake MAC address */
    176 			maclo = 0x00f2 | (cprng_strong32() << 16);
    177 			machi = cprng_strong32();
    178 		}
    179 
    180 		enaddr[0] = maclo & 0x0ff;
    181 		enaddr[1] = (maclo >> 8) & 0x0ff;
    182 		enaddr[2] = (maclo >> 16) & 0x0ff;
    183 		enaddr[3] = (maclo >> 24) & 0x0ff;
    184 		enaddr[4] = machi & 0x0ff;
    185 		enaddr[5] = (machi >> 8) & 0x0ff;
    186 	}
    187 
    188 	/*
    189 	 * Init chip and do initial setup
    190 	 */
    191 	if (dwc_gmac_reset(sc) != 0)
    192 		return;	/* not much to cleanup, haven't attached yet */
    193 	dwc_gmac_write_hwaddr(sc, enaddr);
    194 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    195 	    ether_sprintf(enaddr));
    196 
    197 	/*
    198 	 * Allocate Tx and Rx rings
    199 	 */
    200 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    201 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    202 		goto fail;
    203 	}
    204 
    205 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    206 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    207 		goto fail;
    208 	}
    209 
    210 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    211 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    212 		goto fail;
    213 	}
    214 
    215 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    216 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    217 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    218 
    219 	/*
    220 	 * Prepare interface data
    221 	 */
    222 	ifp->if_softc = sc;
    223 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    224 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    225 #ifdef DWCGMAC_MPSAFE
    226 	ifp->if_extflags = IFEF_MPSAFE;
    227 #endif
    228 	ifp->if_ioctl = dwc_gmac_ioctl;
    229 	ifp->if_start = dwc_gmac_start;
    230 	ifp->if_init = dwc_gmac_init;
    231 	ifp->if_stop = dwc_gmac_stop;
    232 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    233 	IFQ_SET_READY(&ifp->if_snd);
    234 
    235 	/*
    236 	 * Attach MII subdevices
    237 	 */
    238 	sc->sc_ec.ec_mii = &sc->sc_mii;
    239 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    240         mii->mii_ifp = ifp;
    241         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    242         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    243         mii->mii_statchg = dwc_gmac_miibus_statchg;
    244         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
    245 	    MIIF_DOPAUSE);
    246 
    247         if (LIST_EMPTY(&mii->mii_phys)) {
    248                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    249                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    250                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    251         } else {
    252                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    253         }
    254 
    255 	/*
    256 	 * We can support 802.1Q VLAN-sized frames.
    257 	 */
    258 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    259 
    260 	/*
    261 	 * Ready, attach interface
    262 	 */
    263 	/* Attach the interface. */
    264 	rv = if_initialize(ifp);
    265 	if (rv != 0)
    266 		goto fail_2;
    267 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    268 	if_deferred_start_init(ifp, NULL);
    269 	ether_ifattach(ifp, enaddr);
    270 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    271 	if_register(ifp);
    272 
    273 	/*
    274 	 * Enable interrupts
    275 	 */
    276 	mutex_enter(sc->sc_lock);
    277 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    278 	    AWIN_DEF_MAC_INTRMASK);
    279 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    280 	    GMAC_DEF_DMA_INT_MASK);
    281 	mutex_exit(sc->sc_lock);
    282 
    283 	return;
    284 fail_2:
    285 	ifmedia_removeall(&mii->mii_media);
    286 	mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
    287 	mutex_destroy(&sc->sc_txq.t_mtx);
    288 	mutex_destroy(&sc->sc_rxq.r_mtx);
    289 	mutex_obj_free(sc->sc_lock);
    290 fail:
    291 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    292 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    293 	dwc_gmac_free_dma_rings(sc);
    294 	mutex_destroy(&sc->sc_mdio_lock);
    295 }
    296 
    297 
    298 
    299 static int
    300 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    301 {
    302 	size_t cnt;
    303 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    304 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    305 	for (cnt = 0; cnt < 3000; cnt++) {
    306 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    307 		    & GMAC_BUSMODE_RESET) == 0)
    308 			return 0;
    309 		delay(10);
    310 	}
    311 
    312 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    313 	return EIO;
    314 }
    315 
    316 static void
    317 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    318     uint8_t enaddr[ETHER_ADDR_LEN])
    319 {
    320 	uint32_t hi, lo;
    321 
    322 	hi = enaddr[4] | (enaddr[5] << 8);
    323 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    324 	    | (enaddr[3] << 24);
    325 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    326 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    327 }
    328 
    329 static int
    330 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    331 {
    332 	struct dwc_gmac_softc * const sc = device_private(self);
    333 	uint16_t mii;
    334 	size_t cnt;
    335 	int rv = 0;
    336 
    337 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    338 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    339 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    340 	    | GMAC_MII_BUSY;
    341 
    342 	mutex_enter(&sc->sc_mdio_lock);
    343 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    344 
    345 	for (cnt = 0; cnt < 1000; cnt++) {
    346 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    347 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    348 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    349 			    AWIN_GMAC_MAC_MIIDATA);
    350 			break;
    351 		}
    352 		delay(10);
    353 	}
    354 
    355 	mutex_exit(&sc->sc_mdio_lock);
    356 
    357 	return rv;
    358 }
    359 
    360 static void
    361 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    362 {
    363 	struct dwc_gmac_softc * const sc = device_private(self);
    364 	uint16_t mii;
    365 	size_t cnt;
    366 
    367 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    368 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    369 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    370 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    371 
    372 	mutex_enter(&sc->sc_mdio_lock);
    373 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    374 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    375 
    376 	for (cnt = 0; cnt < 1000; cnt++) {
    377 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    378 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    379 			break;
    380 		delay(10);
    381 	}
    382 
    383 	mutex_exit(&sc->sc_mdio_lock);
    384 }
    385 
    386 static int
    387 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    388 	struct dwc_gmac_rx_ring *ring)
    389 {
    390 	struct dwc_gmac_rx_data *data;
    391 	bus_addr_t physaddr;
    392 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    393 	int error, i, next;
    394 
    395 	ring->r_cur = ring->r_next = 0;
    396 	memset(ring->r_desc, 0, descsize);
    397 
    398 	/*
    399 	 * Pre-allocate Rx buffers and populate Rx ring.
    400 	 */
    401 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    402 		struct dwc_gmac_dev_dmadesc *desc;
    403 
    404 		data = &sc->sc_rxq.r_data[i];
    405 
    406 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    407 		if (data->rd_m == NULL) {
    408 			aprint_error_dev(sc->sc_dev,
    409 			    "could not allocate rx mbuf #%d\n", i);
    410 			error = ENOMEM;
    411 			goto fail;
    412 		}
    413 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    414 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    415 		if (error != 0) {
    416 			aprint_error_dev(sc->sc_dev,
    417 			    "could not create DMA map\n");
    418 			data->rd_map = NULL;
    419 			goto fail;
    420 		}
    421 		MCLGET(data->rd_m, M_DONTWAIT);
    422 		if (!(data->rd_m->m_flags & M_EXT)) {
    423 			aprint_error_dev(sc->sc_dev,
    424 			    "could not allocate mbuf cluster #%d\n", i);
    425 			error = ENOMEM;
    426 			goto fail;
    427 		}
    428 
    429 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    430 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    431 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    432 		if (error != 0) {
    433 			aprint_error_dev(sc->sc_dev,
    434 			    "could not load rx buf DMA map #%d", i);
    435 			goto fail;
    436 		}
    437 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    438 
    439 		desc = &sc->sc_rxq.r_desc[i];
    440 		desc->ddesc_data = htole32(physaddr);
    441 		next = RX_NEXT(i);
    442 		desc->ddesc_next = htole32(ring->r_physaddr
    443 		    + next * sizeof(*desc));
    444 		desc->ddesc_cntl = htole32(
    445 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    446 		    DDESC_CNTL_RXCHAIN);
    447 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    448 	}
    449 
    450 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    451 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    452 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    453 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    454 	    ring->r_physaddr);
    455 
    456 	return 0;
    457 
    458 fail:
    459 	dwc_gmac_free_rx_ring(sc, ring);
    460 	return error;
    461 }
    462 
    463 static void
    464 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    465 	struct dwc_gmac_rx_ring *ring)
    466 {
    467 	struct dwc_gmac_dev_dmadesc *desc;
    468 	int i;
    469 
    470 	mutex_enter(&ring->r_mtx);
    471 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    472 		desc = &sc->sc_rxq.r_desc[i];
    473 		desc->ddesc_cntl = htole32(
    474 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    475 		    DDESC_CNTL_RXCHAIN);
    476 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    477 	}
    478 
    479 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    480 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    481 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    482 
    483 	ring->r_cur = ring->r_next = 0;
    484 	/* reset DMA address to start of ring */
    485 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    486 	    sc->sc_rxq.r_physaddr);
    487 	mutex_exit(&ring->r_mtx);
    488 }
    489 
    490 static int
    491 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    492 {
    493 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    494 		sizeof(struct dwc_gmac_dev_dmadesc);
    495 	int error, nsegs;
    496 	void *rings;
    497 
    498 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    499 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    500 	if (error != 0) {
    501 		aprint_error_dev(sc->sc_dev,
    502 		    "could not create desc DMA map\n");
    503 		sc->sc_dma_ring_map = NULL;
    504 		goto fail;
    505 	}
    506 
    507 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    508 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    509 	if (error != 0) {
    510 		aprint_error_dev(sc->sc_dev,
    511 		    "could not map DMA memory\n");
    512 		goto fail;
    513 	}
    514 
    515 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    516 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    517 	if (error != 0) {
    518 		aprint_error_dev(sc->sc_dev,
    519 		    "could not allocate DMA memory\n");
    520 		goto fail;
    521 	}
    522 
    523 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    524 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    525 	if (error != 0) {
    526 		aprint_error_dev(sc->sc_dev,
    527 		    "could not load desc DMA map\n");
    528 		goto fail;
    529 	}
    530 
    531 	/* give first AWGE_RX_RING_COUNT to the RX side */
    532 	sc->sc_rxq.r_desc = rings;
    533 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    534 
    535 	/* and next rings to the TX side */
    536 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    537 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    538 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    539 
    540 	return 0;
    541 
    542 fail:
    543 	dwc_gmac_free_dma_rings(sc);
    544 	return error;
    545 }
    546 
    547 static void
    548 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    549 {
    550 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    551 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    552 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    553 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    554 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    555 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    556 }
    557 
    558 static void
    559 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    560 {
    561 	struct dwc_gmac_rx_data *data;
    562 	int i;
    563 
    564 	if (ring->r_desc == NULL)
    565 		return;
    566 
    567 
    568 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    569 		data = &ring->r_data[i];
    570 
    571 		if (data->rd_map != NULL) {
    572 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    573 			    AWGE_RX_RING_COUNT
    574 				*sizeof(struct dwc_gmac_dev_dmadesc),
    575 			    BUS_DMASYNC_POSTREAD);
    576 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    577 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    578 		}
    579 		if (data->rd_m != NULL)
    580 			m_freem(data->rd_m);
    581 	}
    582 }
    583 
    584 static int
    585 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    586 	struct dwc_gmac_tx_ring *ring)
    587 {
    588 	int i, error = 0;
    589 
    590 	ring->t_queued = 0;
    591 	ring->t_cur = ring->t_next = 0;
    592 
    593 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    594 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    595 	    TX_DESC_OFFSET(0),
    596 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    597 	    BUS_DMASYNC_POSTWRITE);
    598 
    599 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    600 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    601 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    602 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    603 		    &ring->t_data[i].td_map);
    604 		if (error != 0) {
    605 			aprint_error_dev(sc->sc_dev,
    606 			    "could not create TX DMA map #%d\n", i);
    607 			ring->t_data[i].td_map = NULL;
    608 			goto fail;
    609 		}
    610 		ring->t_desc[i].ddesc_next = htole32(
    611 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    612 		    *TX_NEXT(i));
    613 	}
    614 
    615 	return 0;
    616 
    617 fail:
    618 	dwc_gmac_free_tx_ring(sc, ring);
    619 	return error;
    620 }
    621 
    622 static void
    623 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    624 {
    625 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    626 	if (end > start) {
    627 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    628 		    TX_DESC_OFFSET(start),
    629 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    630 		    ops);
    631 		return;
    632 	}
    633 	/* sync from 'start' to end of ring */
    634 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    635 	    TX_DESC_OFFSET(start),
    636 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
    637 	    ops);
    638 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
    639 		/* sync from start of ring to 'end' */
    640 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    641 		    TX_DESC_OFFSET(0),
    642 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    643 		    ops);
    644 	}
    645 }
    646 
    647 static void
    648 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    649 	struct dwc_gmac_tx_ring *ring)
    650 {
    651 	int i;
    652 
    653 	mutex_enter(&ring->t_mtx);
    654 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    655 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    656 
    657 		if (data->td_m != NULL) {
    658 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    659 			    0, data->td_active->dm_mapsize,
    660 			    BUS_DMASYNC_POSTWRITE);
    661 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    662 			m_freem(data->td_m);
    663 			data->td_m = NULL;
    664 		}
    665 	}
    666 
    667 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    668 	    TX_DESC_OFFSET(0),
    669 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    670 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    671 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    672 	    sc->sc_txq.t_physaddr);
    673 
    674 	ring->t_queued = 0;
    675 	ring->t_cur = ring->t_next = 0;
    676 	mutex_exit(&ring->t_mtx);
    677 }
    678 
    679 static void
    680 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    681 	struct dwc_gmac_tx_ring *ring)
    682 {
    683 	int i;
    684 
    685 	/* unload the maps */
    686 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    687 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    688 
    689 		if (data->td_m != NULL) {
    690 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    691 			    0, data->td_map->dm_mapsize,
    692 			    BUS_DMASYNC_POSTWRITE);
    693 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    694 			m_freem(data->td_m);
    695 			data->td_m = NULL;
    696 		}
    697 	}
    698 
    699 	/* and actually free them */
    700 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    701 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    702 
    703 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    704 	}
    705 }
    706 
    707 static void
    708 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    709 {
    710 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    711 	struct mii_data * const mii = &sc->sc_mii;
    712 	uint32_t conf, flow;
    713 
    714 	/*
    715 	 * Set MII or GMII interface based on the speed
    716 	 * negotiated by the PHY.
    717 	 */
    718 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    719 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    720 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    721 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    722 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    723 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    724 	    | AWIN_GMAC_MAC_CONF_ACS
    725 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    726 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    727 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    728 	case IFM_10_T:
    729 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    730 		break;
    731 	case IFM_100_TX:
    732 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    733 			AWIN_GMAC_MAC_CONF_MIISEL;
    734 		break;
    735 	case IFM_1000_T:
    736 		break;
    737 	}
    738 	if (sc->sc_set_speed)
    739 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
    740 
    741 	flow = 0;
    742 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    743 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    744 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    745 	}
    746 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    747 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    748 	}
    749 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    750 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    751 	}
    752 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    753 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    754 
    755 #ifdef DWC_GMAC_DEBUG
    756 	aprint_normal_dev(sc->sc_dev,
    757 	    "setting MAC conf register: %08x\n", conf);
    758 #endif
    759 
    760 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    761 	    AWIN_GMAC_MAC_CONF, conf);
    762 }
    763 
    764 static int
    765 dwc_gmac_init(struct ifnet *ifp)
    766 {
    767 	struct dwc_gmac_softc *sc = ifp->if_softc;
    768 
    769 	mutex_enter(sc->sc_lock);
    770 	int ret = dwc_gmac_init_locked(ifp);
    771 	mutex_exit(sc->sc_lock);
    772 
    773 	return ret;
    774 }
    775 
    776 static int
    777 dwc_gmac_init_locked(struct ifnet *ifp)
    778 {
    779 	struct dwc_gmac_softc *sc = ifp->if_softc;
    780 	uint32_t ffilt;
    781 
    782 	if (ifp->if_flags & IFF_RUNNING)
    783 		return 0;
    784 
    785 	dwc_gmac_stop_locked(ifp, 0);
    786 
    787 	/*
    788 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    789 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    790 	 */
    791 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    792 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    793 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    794 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    795 
    796 	/*
    797 	 * Set up address filter
    798 	 */
    799 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    800 	if (ifp->if_flags & IFF_PROMISC) {
    801 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    802 	} else {
    803 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    804 	}
    805 	if (ifp->if_flags & IFF_BROADCAST) {
    806 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    807 	} else {
    808 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    809 	}
    810 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    811 
    812 	/*
    813 	 * Set up multicast filter
    814 	 */
    815 	dwc_gmac_setmulti(sc);
    816 
    817 	/*
    818 	 * Set up dma pointer for RX and TX ring
    819 	 */
    820 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    821 	    sc->sc_rxq.r_physaddr);
    822 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    823 	    sc->sc_txq.t_physaddr);
    824 
    825 	/*
    826 	 * Start RX/TX part
    827 	 */
    828 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
    829 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
    830 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
    831 	}
    832 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
    833 
    834 	sc->sc_stopping = false;
    835 
    836 	ifp->if_flags |= IFF_RUNNING;
    837 	ifp->if_flags &= ~IFF_OACTIVE;
    838 
    839 	return 0;
    840 }
    841 
    842 static void
    843 dwc_gmac_start(struct ifnet *ifp)
    844 {
    845 	struct dwc_gmac_softc *sc = ifp->if_softc;
    846 #ifdef DWCGMAC_MPSAFE
    847 	KASSERT(if_is_mpsafe(ifp));
    848 #endif
    849 
    850 	mutex_enter(sc->sc_lock);
    851 	if (!sc->sc_stopping) {
    852 		mutex_enter(&sc->sc_txq.t_mtx);
    853 		dwc_gmac_start_locked(ifp);
    854 		mutex_exit(&sc->sc_txq.t_mtx);
    855 	}
    856 	mutex_exit(sc->sc_lock);
    857 }
    858 
    859 static void
    860 dwc_gmac_start_locked(struct ifnet *ifp)
    861 {
    862 	struct dwc_gmac_softc *sc = ifp->if_softc;
    863 	int old = sc->sc_txq.t_queued;
    864 	int start = sc->sc_txq.t_cur;
    865 	struct mbuf *m0;
    866 
    867 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    868 		return;
    869 
    870 	for (;;) {
    871 		IFQ_POLL(&ifp->if_snd, m0);
    872 		if (m0 == NULL)
    873 			break;
    874 		if (dwc_gmac_queue(sc, m0) != 0) {
    875 			ifp->if_flags |= IFF_OACTIVE;
    876 			break;
    877 		}
    878 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    879 		bpf_mtap(ifp, m0, BPF_D_OUT);
    880 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    881 			ifp->if_flags |= IFF_OACTIVE;
    882 			break;
    883 		}
    884 	}
    885 
    886 	if (sc->sc_txq.t_queued != old) {
    887 		/* packets have been queued, kick it off */
    888 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    889 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    890 
    891 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    892 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    893 #ifdef DWC_GMAC_DEBUG
    894 		dwc_dump_status(sc);
    895 #endif
    896 	}
    897 }
    898 
    899 static void
    900 dwc_gmac_stop(struct ifnet *ifp, int disable)
    901 {
    902 	struct dwc_gmac_softc *sc = ifp->if_softc;
    903 
    904 	mutex_enter(sc->sc_lock);
    905 	dwc_gmac_stop_locked(ifp, disable);
    906 	mutex_exit(sc->sc_lock);
    907 }
    908 
    909 static void
    910 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
    911 {
    912 	struct dwc_gmac_softc *sc = ifp->if_softc;
    913 
    914 	sc->sc_stopping = true;
    915 
    916 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    917 	    AWIN_GMAC_DMA_OPMODE,
    918 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    919 	        AWIN_GMAC_DMA_OPMODE)
    920 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    921 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    922 	    AWIN_GMAC_DMA_OPMODE,
    923 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    924 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    925 
    926 	mii_down(&sc->sc_mii);
    927 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    928 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    929 
    930 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    931 }
    932 
    933 /*
    934  * Add m0 to the TX ring
    935  */
    936 static int
    937 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    938 {
    939 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    940 	struct dwc_gmac_tx_data *data = NULL;
    941 	bus_dmamap_t map;
    942 	uint32_t flags, len, status;
    943 	int error, i, first;
    944 
    945 #ifdef DWC_GMAC_DEBUG
    946 	aprint_normal_dev(sc->sc_dev,
    947 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
    948 #endif
    949 
    950 	first = sc->sc_txq.t_cur;
    951 	map = sc->sc_txq.t_data[first].td_map;
    952 
    953 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    954 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    955 	if (error != 0) {
    956 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    957 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    958 		return error;
    959 	}
    960 
    961 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
    962 		bus_dmamap_unload(sc->sc_dmat, map);
    963 		return ENOBUFS;
    964 	}
    965 
    966 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
    967 	status = 0;
    968 	for (i = 0; i < map->dm_nsegs; i++) {
    969 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    970 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    971 
    972 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    973 		len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
    974 
    975 #ifdef DWC_GMAC_DEBUG
    976 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
    977 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
    978 		    (unsigned long)map->dm_segs[i].ds_addr,
    979 		    (unsigned long)map->dm_segs[i].ds_len,
    980 		    flags, len);
    981 #endif
    982 
    983 		desc->ddesc_cntl = htole32(len|flags);
    984 		flags &= ~DDESC_CNTL_TXFIRST;
    985 
    986 		/*
    987 		 * Defer passing ownership of the first descriptor
    988 		 * until we are done.
    989 		 */
    990 		desc->ddesc_status = htole32(status);
    991 		status |= DDESC_STATUS_OWNEDBYDEV;
    992 
    993 		sc->sc_txq.t_queued++;
    994 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
    995 	}
    996 
    997 	desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
    998 
    999 	data->td_m = m0;
   1000 	data->td_active = map;
   1001 
   1002 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1003 	    BUS_DMASYNC_PREWRITE);
   1004 
   1005 	/* Pass first to device */
   1006 	sc->sc_txq.t_desc[first].ddesc_status =
   1007 	    htole32(DDESC_STATUS_OWNEDBYDEV);
   1008 
   1009 	return 0;
   1010 }
   1011 
   1012 /*
   1013  * If the interface is up and running, only modify the receive
   1014  * filter when setting promiscuous or debug mode.  Otherwise fall
   1015  * through to ether_ioctl, which will reset the chip.
   1016  */
   1017 static int
   1018 dwc_gmac_ifflags_cb(struct ethercom *ec)
   1019 {
   1020 	struct ifnet *ifp = &ec->ec_if;
   1021 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1022 	int ret = 0;
   1023 
   1024 	mutex_enter(sc->sc_lock);
   1025 	int change = ifp->if_flags ^ sc->sc_if_flags;
   1026 	sc->sc_if_flags = ifp->if_flags;
   1027 
   1028 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   1029 		ret = ENETRESET;
   1030 		goto out;
   1031 	}
   1032 	if ((change & IFF_PROMISC) != 0) {
   1033 		dwc_gmac_setmulti(sc);
   1034 	}
   1035 out:
   1036 	mutex_exit(sc->sc_lock);
   1037 
   1038 	return ret;
   1039 }
   1040 
   1041 static int
   1042 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1043 {
   1044 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1045 	int error = 0;
   1046 
   1047 	int s = splnet();
   1048 	error = ether_ioctl(ifp, cmd, data);
   1049 
   1050 #ifdef DWCGMAC_MPSAFE
   1051 	splx(s);
   1052 #endif
   1053 
   1054 	if (error == ENETRESET) {
   1055 		error = 0;
   1056 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   1057 			;
   1058 		else if (ifp->if_flags & IFF_RUNNING) {
   1059 			/*
   1060 			 * Multicast list has changed; set the hardware filter
   1061 			 * accordingly.
   1062 			 */
   1063 			mutex_enter(sc->sc_lock);
   1064 			dwc_gmac_setmulti(sc);
   1065 			mutex_exit(sc->sc_lock);
   1066 		}
   1067 	}
   1068 
   1069 	/* Try to get things going again */
   1070 	if (ifp->if_flags & IFF_UP)
   1071 		dwc_gmac_start(ifp);
   1072 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1073 
   1074 #ifndef DWCGMAC_MPSAFE
   1075 	splx(s);
   1076 #endif
   1077 
   1078 	return error;
   1079 }
   1080 
   1081 static void
   1082 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1083 {
   1084 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1085 	struct dwc_gmac_tx_data *data;
   1086 	struct dwc_gmac_dev_dmadesc *desc;
   1087 	uint32_t status;
   1088 	int i, nsegs;
   1089 
   1090 	mutex_enter(&sc->sc_txq.t_mtx);
   1091 
   1092 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1093 #ifdef DWC_GMAC_DEBUG
   1094 		aprint_normal_dev(sc->sc_dev,
   1095 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
   1096 		    i, sc->sc_txq.t_queued);
   1097 #endif
   1098 
   1099 		/*
   1100 		 * i+1 does not need to be a valid descriptor,
   1101 		 * this is just a special notion to just sync
   1102 		 * a single tx descriptor (i)
   1103 		 */
   1104 		dwc_gmac_txdesc_sync(sc, i, i+1,
   1105 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1106 
   1107 		desc = &sc->sc_txq.t_desc[i];
   1108 		status = le32toh(desc->ddesc_status);
   1109 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1110 			break;
   1111 
   1112 		data = &sc->sc_txq.t_data[i];
   1113 		if (data->td_m == NULL)
   1114 			continue;
   1115 
   1116 		ifp->if_opackets++;
   1117 		nsegs = data->td_active->dm_nsegs;
   1118 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1119 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1120 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1121 
   1122 #ifdef DWC_GMAC_DEBUG
   1123 		aprint_normal_dev(sc->sc_dev,
   1124 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1125 		    "freeing mbuf %p\n", i, data->td_m);
   1126 #endif
   1127 
   1128 		m_freem(data->td_m);
   1129 		data->td_m = NULL;
   1130 
   1131 		sc->sc_txq.t_queued -= nsegs;
   1132 	}
   1133 
   1134 	sc->sc_txq.t_next = i;
   1135 
   1136 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1137 		ifp->if_flags &= ~IFF_OACTIVE;
   1138 	}
   1139 	mutex_exit(&sc->sc_txq.t_mtx);
   1140 }
   1141 
   1142 static void
   1143 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1144 {
   1145 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1146 	struct dwc_gmac_dev_dmadesc *desc;
   1147 	struct dwc_gmac_rx_data *data;
   1148 	bus_addr_t physaddr;
   1149 	uint32_t status;
   1150 	struct mbuf *m, *mnew;
   1151 	int i, len, error;
   1152 
   1153 	mutex_enter(&sc->sc_rxq.r_mtx);
   1154 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1155 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1156 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1157 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1158 		desc = &sc->sc_rxq.r_desc[i];
   1159 		data = &sc->sc_rxq.r_data[i];
   1160 
   1161 		status = le32toh(desc->ddesc_status);
   1162 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1163 			break;
   1164 
   1165 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
   1166 #ifdef DWC_GMAC_DEBUG
   1167 			aprint_normal_dev(sc->sc_dev,
   1168 			    "RX error: descriptor status %08x, skipping\n",
   1169 			    status);
   1170 #endif
   1171 			ifp->if_ierrors++;
   1172 			goto skip;
   1173 		}
   1174 
   1175 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
   1176 
   1177 #ifdef DWC_GMAC_DEBUG
   1178 		aprint_normal_dev(sc->sc_dev,
   1179 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1180 		    i, len);
   1181 #endif
   1182 
   1183 		/*
   1184 		 * Try to get a new mbuf before passing this one
   1185 		 * up, if that fails, drop the packet and reuse
   1186 		 * the existing one.
   1187 		 */
   1188 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1189 		if (mnew == NULL) {
   1190 			ifp->if_ierrors++;
   1191 			goto skip;
   1192 		}
   1193 		MCLGET(mnew, M_DONTWAIT);
   1194 		if ((mnew->m_flags & M_EXT) == 0) {
   1195 			m_freem(mnew);
   1196 			ifp->if_ierrors++;
   1197 			goto skip;
   1198 		}
   1199 
   1200 		/* unload old DMA map */
   1201 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1202 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1203 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1204 
   1205 		/* and reload with new mbuf */
   1206 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1207 		    mtod(mnew, void*), MCLBYTES, NULL,
   1208 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1209 		if (error != 0) {
   1210 			m_freem(mnew);
   1211 			/* try to reload old mbuf */
   1212 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1213 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1214 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1215 			if (error != 0) {
   1216 				panic("%s: could not load old rx mbuf",
   1217 				    device_xname(sc->sc_dev));
   1218 			}
   1219 			ifp->if_ierrors++;
   1220 			goto skip;
   1221 		}
   1222 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1223 
   1224 		/*
   1225 		 * New mbuf loaded, update RX ring and continue
   1226 		 */
   1227 		m = data->rd_m;
   1228 		data->rd_m = mnew;
   1229 		desc->ddesc_data = htole32(physaddr);
   1230 
   1231 		/* finalize mbuf */
   1232 		m->m_pkthdr.len = m->m_len = len;
   1233 		m_set_rcvif(m, ifp);
   1234 		m->m_flags |= M_HASFCS;
   1235 
   1236 		if_percpuq_enqueue(sc->sc_ipq, m);
   1237 
   1238 skip:
   1239 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1240 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1241 		desc->ddesc_cntl = htole32(
   1242 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
   1243 		    DDESC_CNTL_RXCHAIN);
   1244 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
   1245 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1246 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1247 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1248 	}
   1249 
   1250 	/* update RX pointer */
   1251 	sc->sc_rxq.r_cur = i;
   1252 
   1253 	mutex_exit(&sc->sc_rxq.r_mtx);
   1254 }
   1255 
   1256 /*
   1257  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
   1258  */
   1259 static uint32_t
   1260 bitrev32(uint32_t x)
   1261 {
   1262 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
   1263 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
   1264 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
   1265 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
   1266 
   1267 	return (x >> 16) | (x << 16);
   1268 }
   1269 
   1270 static void
   1271 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1272 {
   1273 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1274 	struct ether_multi *enm;
   1275 	struct ether_multistep step;
   1276 	uint32_t hashes[2] = { 0, 0 };
   1277 	uint32_t ffilt, h;
   1278 	int mcnt;
   1279 
   1280 	KASSERT(mutex_owned(sc->sc_lock));
   1281 
   1282 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1283 
   1284 	if (ifp->if_flags & IFF_PROMISC) {
   1285 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1286 		goto special_filter;
   1287 	}
   1288 
   1289 	ifp->if_flags &= ~IFF_ALLMULTI;
   1290 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
   1291 
   1292 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1293 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1294 
   1295 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
   1296 	mcnt = 0;
   1297 	while (enm != NULL) {
   1298 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1299 		    ETHER_ADDR_LEN) != 0) {
   1300 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1301 			ifp->if_flags |= IFF_ALLMULTI;
   1302 			goto special_filter;
   1303 		}
   1304 
   1305 		h = bitrev32(
   1306 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
   1307 		    ) >> 26;
   1308 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1309 
   1310 		mcnt++;
   1311 		ETHER_NEXT_MULTI(step, enm);
   1312 	}
   1313 
   1314 	if (mcnt)
   1315 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1316 	else
   1317 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1318 
   1319 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1320 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1321 	    hashes[0]);
   1322 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1323 	    hashes[1]);
   1324 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1325 
   1326 #ifdef DWC_GMAC_DEBUG
   1327 	dwc_gmac_dump_ffilt(sc, ffilt);
   1328 #endif
   1329 	return;
   1330 
   1331 special_filter:
   1332 #ifdef DWC_GMAC_DEBUG
   1333 	dwc_gmac_dump_ffilt(sc, ffilt);
   1334 #endif
   1335 	/* no MAC hashes, ALLMULTI or PROMISC */
   1336 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1337 	    ffilt);
   1338 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1339 	    0xffffffff);
   1340 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1341 	    0xffffffff);
   1342 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1343 }
   1344 
   1345 int
   1346 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1347 {
   1348 	uint32_t status, dma_status;
   1349 	int rv = 0;
   1350 
   1351 	if (sc->sc_stopping)
   1352 		return 0;
   1353 
   1354 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1355 	if (status & AWIN_GMAC_MII_IRQ) {
   1356 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1357 		    AWIN_GMAC_MII_STATUS);
   1358 		rv = 1;
   1359 		mii_pollstat(&sc->sc_mii);
   1360 	}
   1361 
   1362 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1363 	    AWIN_GMAC_DMA_STATUS);
   1364 
   1365 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1366 		rv = 1;
   1367 
   1368 	if (dma_status & GMAC_DMA_INT_TIE)
   1369 		dwc_gmac_tx_intr(sc);
   1370 
   1371 	if (dma_status & GMAC_DMA_INT_RIE)
   1372 		dwc_gmac_rx_intr(sc);
   1373 
   1374 	/*
   1375 	 * Check error conditions
   1376 	 */
   1377 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1378 		sc->sc_ec.ec_if.if_oerrors++;
   1379 #ifdef DWC_GMAC_DEBUG
   1380 		dwc_dump_and_abort(sc, "interrupt error condition");
   1381 #endif
   1382 	}
   1383 
   1384 	/* ack interrupt */
   1385 	if (dma_status)
   1386 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1387 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1388 
   1389 	/*
   1390 	 * Get more packets
   1391 	 */
   1392 	if (rv)
   1393 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1394 
   1395 	return rv;
   1396 }
   1397 
   1398 #ifdef DWC_GMAC_DEBUG
   1399 static void
   1400 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1401 {
   1402 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1403 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1404 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1405 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1406 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1407 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1408 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1409 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1410 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1411 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1412 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1413 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1414 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1415 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1416 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1417 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1418 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1419 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1420 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1421 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1422 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1423 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1424 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1425 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1426 }
   1427 
   1428 static void
   1429 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1430 {
   1431 	int i;
   1432 
   1433 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1434 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1435 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1436 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1437 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1438 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1439 		    "data: %08x next: %08x\n",
   1440 		    i, sc->sc_txq.t_physaddr +
   1441 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1442 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1443 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1444 	}
   1445 }
   1446 
   1447 static void
   1448 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1449 {
   1450 	int i;
   1451 
   1452 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1453 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1454 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1455 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1456 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1457 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1458 		    "data: %08x next: %08x\n",
   1459 		    i, sc->sc_rxq.r_physaddr +
   1460 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1461 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1462 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1463 	}
   1464 }
   1465 
   1466 static void
   1467 dwc_dump_status(struct dwc_gmac_softc *sc)
   1468 {
   1469 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1470 	     AWIN_GMAC_MAC_INTR);
   1471 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1472 	     AWIN_GMAC_DMA_STATUS);
   1473 	char buf[200];
   1474 
   1475 	/* print interrupt state */
   1476 	snprintb(buf, sizeof(buf), "\177\20"
   1477 	    "b\x10""NI\0"
   1478 	    "b\x0f""AI\0"
   1479 	    "b\x0e""ER\0"
   1480 	    "b\x0d""FB\0"
   1481 	    "b\x0a""ET\0"
   1482 	    "b\x09""RW\0"
   1483 	    "b\x08""RS\0"
   1484 	    "b\x07""RU\0"
   1485 	    "b\x06""RI\0"
   1486 	    "b\x05""UN\0"
   1487 	    "b\x04""OV\0"
   1488 	    "b\x03""TJ\0"
   1489 	    "b\x02""TU\0"
   1490 	    "b\x01""TS\0"
   1491 	    "b\x00""TI\0"
   1492 	    "\0", dma_status);
   1493 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1494 	    status, buf);
   1495 }
   1496 
   1497 static void
   1498 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1499 {
   1500 	dwc_dump_status(sc);
   1501 	dwc_gmac_dump_ffilt(sc,
   1502 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1503 	dwc_gmac_dump_dma(sc);
   1504 	dwc_gmac_dump_tx_desc(sc);
   1505 	dwc_gmac_dump_rx_desc(sc);
   1506 
   1507 	panic("%s", msg);
   1508 }
   1509 
   1510 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1511 {
   1512 	char buf[200];
   1513 
   1514 	/* print filter setup */
   1515 	snprintb(buf, sizeof(buf), "\177\20"
   1516 	    "b\x1f""RA\0"
   1517 	    "b\x0a""HPF\0"
   1518 	    "b\x09""SAF\0"
   1519 	    "b\x08""SAIF\0"
   1520 	    "b\x05""DBF\0"
   1521 	    "b\x04""PM\0"
   1522 	    "b\x03""DAIF\0"
   1523 	    "b\x02""HMC\0"
   1524 	    "b\x01""HUC\0"
   1525 	    "b\x00""PR\0"
   1526 	    "\0", ffilt);
   1527 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1528 }
   1529 #endif
   1530