Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.36.2.2
      1 /* $NetBSD: dwc_gmac.c,v 1.36.2.2 2017/03/20 06:57:28 pgoyette Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.36.2.2 2017/03/20 06:57:28 pgoyette Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #ifdef _KERNEL_OPT
     49 #include "opt_inet.h"
     50 #include "opt_net_mpsafe.h"
     51 #endif
     52 
     53 #include <sys/param.h>
     54 #include <sys/bus.h>
     55 #include <sys/device.h>
     56 #include <sys/intr.h>
     57 #include <sys/systm.h>
     58 #include <sys/sockio.h>
     59 #include <sys/cprng.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_media.h>
     64 #include <net/bpf.h>
     65 #ifdef INET
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #include <dev/mii/miivar.h>
     70 
     71 #include <dev/ic/dwc_gmac_reg.h>
     72 #include <dev/ic/dwc_gmac_var.h>
     73 
     74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     76 static void dwc_gmac_miibus_statchg(struct ifnet *);
     77 
     78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     80 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     90 static int dwc_gmac_init(struct ifnet *ifp);
     91 static int dwc_gmac_init_locked(struct ifnet *ifp);
     92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
     94 static void dwc_gmac_start(struct ifnet *ifp);
     95 static void dwc_gmac_start_locked(struct ifnet *ifp);
     96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
    100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
    101 static int dwc_gmac_ifflags_cb(struct ethercom *);
    102 static uint32_t	bitrev32(uint32_t x);
    103 
    104 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
    105 				    *sizeof(struct dwc_gmac_dev_dmadesc))
    106 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
    107 
    108 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
    109 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
    110 
    111 
    112 
    113 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    114 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    115 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    116 
    117 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    118 				GMAC_DMA_INT_FBE|	\
    119 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    120 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    121 				GMAC_DMA_INT_TJE)
    122 
    123 #define	AWIN_DEF_MAC_INTRMASK	\
    124 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    125 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
    126 
    127 
    128 #ifdef DWC_GMAC_DEBUG
    129 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    130 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    131 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    132 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    133 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    134 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
    135 #endif
    136 
    137 #ifdef NET_MPSAFE
    138 #define DWCGMAC_MPSAFE	1
    139 #endif
    140 
    141 void
    142 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    143 {
    144 	uint8_t enaddr[ETHER_ADDR_LEN];
    145 	uint32_t maclo, machi;
    146 	struct mii_data * const mii = &sc->sc_mii;
    147 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    148 	prop_dictionary_t dict;
    149 
    150 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    151 	sc->sc_mii_clk = mii_clk & 7;
    152 
    153 	dict = device_properties(sc->sc_dev);
    154 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    155 	if (ea != NULL) {
    156 		/*
    157 		 * If the MAC address is overriden by a device property,
    158 		 * use that.
    159 		 */
    160 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    161 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    162 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    163 	} else {
    164 		/*
    165 		 * If we did not get an externaly configure address,
    166 		 * try to read one from the current filter setup,
    167 		 * before resetting the chip.
    168 		 */
    169 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    170 		    AWIN_GMAC_MAC_ADDR0LO);
    171 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    172 		    AWIN_GMAC_MAC_ADDR0HI);
    173 
    174 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    175 			/* fake MAC address */
    176 			maclo = 0x00f2 | (cprng_strong32() << 16);
    177 			machi = cprng_strong32();
    178 		}
    179 
    180 		enaddr[0] = maclo & 0x0ff;
    181 		enaddr[1] = (maclo >> 8) & 0x0ff;
    182 		enaddr[2] = (maclo >> 16) & 0x0ff;
    183 		enaddr[3] = (maclo >> 24) & 0x0ff;
    184 		enaddr[4] = machi & 0x0ff;
    185 		enaddr[5] = (machi >> 8) & 0x0ff;
    186 	}
    187 
    188 	/*
    189 	 * Init chip and do initial setup
    190 	 */
    191 	if (dwc_gmac_reset(sc) != 0)
    192 		return;	/* not much to cleanup, haven't attached yet */
    193 	dwc_gmac_write_hwaddr(sc, enaddr);
    194 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    195 	    ether_sprintf(enaddr));
    196 
    197 	/*
    198 	 * Allocate Tx and Rx rings
    199 	 */
    200 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    201 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    202 		goto fail;
    203 	}
    204 
    205 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    206 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    207 		goto fail;
    208 	}
    209 
    210 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    211 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    212 		goto fail;
    213 	}
    214 
    215 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    216 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    217 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    218 
    219 	/*
    220 	 * Prepare interface data
    221 	 */
    222 	ifp->if_softc = sc;
    223 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    224 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    225 	ifp->if_extflags = IFEF_START_MPSAFE;
    226 	ifp->if_ioctl = dwc_gmac_ioctl;
    227 	ifp->if_start = dwc_gmac_start;
    228 	ifp->if_init = dwc_gmac_init;
    229 	ifp->if_stop = dwc_gmac_stop;
    230 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    231 	IFQ_SET_READY(&ifp->if_snd);
    232 
    233 	/*
    234 	 * Attach MII subdevices
    235 	 */
    236 	sc->sc_ec.ec_mii = &sc->sc_mii;
    237 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    238         mii->mii_ifp = ifp;
    239         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    240         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    241         mii->mii_statchg = dwc_gmac_miibus_statchg;
    242         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
    243 	    MIIF_DOPAUSE);
    244 
    245         if (LIST_EMPTY(&mii->mii_phys)) {
    246                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    247                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    248                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    249         } else {
    250                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    251         }
    252 
    253 	/*
    254 	 * We can support 802.1Q VLAN-sized frames.
    255 	 */
    256 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    257 
    258 	/*
    259 	 * Ready, attach interface
    260 	 */
    261 	/* Attach the interface. */
    262 	if_initialize(ifp);
    263 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    264 	if_deferred_start_init(ifp, NULL);
    265 	ether_ifattach(ifp, enaddr);
    266 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    267 	if_register(ifp);
    268 
    269 	/*
    270 	 * Enable interrupts
    271 	 */
    272 	mutex_enter(sc->sc_lock);
    273 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    274 	    AWIN_DEF_MAC_INTRMASK);
    275 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    276 	    GMAC_DEF_DMA_INT_MASK);
    277 	mutex_exit(sc->sc_lock);
    278 
    279 	return;
    280 
    281 fail:
    282 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    283 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    284 }
    285 
    286 
    287 
    288 static int
    289 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    290 {
    291 	size_t cnt;
    292 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    293 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    294 	for (cnt = 0; cnt < 3000; cnt++) {
    295 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    296 		    & GMAC_BUSMODE_RESET) == 0)
    297 			return 0;
    298 		delay(10);
    299 	}
    300 
    301 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    302 	return EIO;
    303 }
    304 
    305 static void
    306 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    307     uint8_t enaddr[ETHER_ADDR_LEN])
    308 {
    309 	uint32_t lo, hi;
    310 
    311 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    312 	    | (enaddr[3] << 24);
    313 	hi = enaddr[4] | (enaddr[5] << 8);
    314 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    315 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    316 }
    317 
    318 static int
    319 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    320 {
    321 	struct dwc_gmac_softc * const sc = device_private(self);
    322 	uint16_t mii;
    323 	size_t cnt;
    324 	int rv = 0;
    325 
    326 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    327 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    328 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    329 	    | GMAC_MII_BUSY;
    330 
    331 	mutex_enter(&sc->sc_mdio_lock);
    332 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    333 
    334 	for (cnt = 0; cnt < 1000; cnt++) {
    335 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    336 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    337 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    338 			    AWIN_GMAC_MAC_MIIDATA);
    339 			break;
    340 		}
    341 		delay(10);
    342 	}
    343 
    344 	mutex_exit(&sc->sc_mdio_lock);
    345 
    346 	return rv;
    347 }
    348 
    349 static void
    350 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    351 {
    352 	struct dwc_gmac_softc * const sc = device_private(self);
    353 	uint16_t mii;
    354 	size_t cnt;
    355 
    356 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    357 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    358 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    359 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    360 
    361 	mutex_enter(&sc->sc_mdio_lock);
    362 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    363 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    364 
    365 	for (cnt = 0; cnt < 1000; cnt++) {
    366 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    367 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    368 			break;
    369 		delay(10);
    370 	}
    371 
    372 	mutex_exit(&sc->sc_mdio_lock);
    373 }
    374 
    375 static int
    376 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    377 	struct dwc_gmac_rx_ring *ring)
    378 {
    379 	struct dwc_gmac_rx_data *data;
    380 	bus_addr_t physaddr;
    381 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    382 	int error, i, next;
    383 
    384 	ring->r_cur = ring->r_next = 0;
    385 	memset(ring->r_desc, 0, descsize);
    386 
    387 	/*
    388 	 * Pre-allocate Rx buffers and populate Rx ring.
    389 	 */
    390 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    391 		struct dwc_gmac_dev_dmadesc *desc;
    392 
    393 		data = &sc->sc_rxq.r_data[i];
    394 
    395 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    396 		if (data->rd_m == NULL) {
    397 			aprint_error_dev(sc->sc_dev,
    398 			    "could not allocate rx mbuf #%d\n", i);
    399 			error = ENOMEM;
    400 			goto fail;
    401 		}
    402 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    403 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    404 		if (error != 0) {
    405 			aprint_error_dev(sc->sc_dev,
    406 			    "could not create DMA map\n");
    407 			data->rd_map = NULL;
    408 			goto fail;
    409 		}
    410 		MCLGET(data->rd_m, M_DONTWAIT);
    411 		if (!(data->rd_m->m_flags & M_EXT)) {
    412 			aprint_error_dev(sc->sc_dev,
    413 			    "could not allocate mbuf cluster #%d\n", i);
    414 			error = ENOMEM;
    415 			goto fail;
    416 		}
    417 
    418 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    419 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    420 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    421 		if (error != 0) {
    422 			aprint_error_dev(sc->sc_dev,
    423 			    "could not load rx buf DMA map #%d", i);
    424 			goto fail;
    425 		}
    426 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    427 
    428 		desc = &sc->sc_rxq.r_desc[i];
    429 		desc->ddesc_data = htole32(physaddr);
    430 		next = RX_NEXT(i);
    431 		desc->ddesc_next = htole32(ring->r_physaddr
    432 		    + next * sizeof(*desc));
    433 		desc->ddesc_cntl = htole32(
    434 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    435 		    DDESC_CNTL_RXCHAIN);
    436 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    437 	}
    438 
    439 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    440 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    441 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    442 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    443 	    ring->r_physaddr);
    444 
    445 	return 0;
    446 
    447 fail:
    448 	dwc_gmac_free_rx_ring(sc, ring);
    449 	return error;
    450 }
    451 
    452 static void
    453 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    454 	struct dwc_gmac_rx_ring *ring)
    455 {
    456 	struct dwc_gmac_dev_dmadesc *desc;
    457 	int i;
    458 
    459 	mutex_enter(&ring->r_mtx);
    460 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    461 		desc = &sc->sc_rxq.r_desc[i];
    462 		desc->ddesc_cntl = htole32(
    463 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    464 		    DDESC_CNTL_RXCHAIN);
    465 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    466 	}
    467 
    468 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    469 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    470 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    471 
    472 	ring->r_cur = ring->r_next = 0;
    473 	/* reset DMA address to start of ring */
    474 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    475 	    sc->sc_rxq.r_physaddr);
    476 	mutex_exit(&ring->r_mtx);
    477 }
    478 
    479 static int
    480 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    481 {
    482 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    483 		sizeof(struct dwc_gmac_dev_dmadesc);
    484 	int error, nsegs;
    485 	void *rings;
    486 
    487 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    488 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    489 	if (error != 0) {
    490 		aprint_error_dev(sc->sc_dev,
    491 		    "could not create desc DMA map\n");
    492 		sc->sc_dma_ring_map = NULL;
    493 		goto fail;
    494 	}
    495 
    496 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    497 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    498 	if (error != 0) {
    499 		aprint_error_dev(sc->sc_dev,
    500 		    "could not map DMA memory\n");
    501 		goto fail;
    502 	}
    503 
    504 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    505 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    506 	if (error != 0) {
    507 		aprint_error_dev(sc->sc_dev,
    508 		    "could not allocate DMA memory\n");
    509 		goto fail;
    510 	}
    511 
    512 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    513 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    514 	if (error != 0) {
    515 		aprint_error_dev(sc->sc_dev,
    516 		    "could not load desc DMA map\n");
    517 		goto fail;
    518 	}
    519 
    520 	/* give first AWGE_RX_RING_COUNT to the RX side */
    521 	sc->sc_rxq.r_desc = rings;
    522 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    523 
    524 	/* and next rings to the TX side */
    525 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    526 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    527 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    528 
    529 	return 0;
    530 
    531 fail:
    532 	dwc_gmac_free_dma_rings(sc);
    533 	return error;
    534 }
    535 
    536 static void
    537 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    538 {
    539 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    540 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    541 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    542 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    543 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    544 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    545 }
    546 
    547 static void
    548 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    549 {
    550 	struct dwc_gmac_rx_data *data;
    551 	int i;
    552 
    553 	if (ring->r_desc == NULL)
    554 		return;
    555 
    556 
    557 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    558 		data = &ring->r_data[i];
    559 
    560 		if (data->rd_map != NULL) {
    561 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    562 			    AWGE_RX_RING_COUNT
    563 				*sizeof(struct dwc_gmac_dev_dmadesc),
    564 			    BUS_DMASYNC_POSTREAD);
    565 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    566 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    567 		}
    568 		if (data->rd_m != NULL)
    569 			m_freem(data->rd_m);
    570 	}
    571 }
    572 
    573 static int
    574 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    575 	struct dwc_gmac_tx_ring *ring)
    576 {
    577 	int i, error = 0;
    578 
    579 	ring->t_queued = 0;
    580 	ring->t_cur = ring->t_next = 0;
    581 
    582 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    583 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    584 	    TX_DESC_OFFSET(0),
    585 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    586 	    BUS_DMASYNC_POSTWRITE);
    587 
    588 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    589 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    590 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    591 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    592 		    &ring->t_data[i].td_map);
    593 		if (error != 0) {
    594 			aprint_error_dev(sc->sc_dev,
    595 			    "could not create TX DMA map #%d\n", i);
    596 			ring->t_data[i].td_map = NULL;
    597 			goto fail;
    598 		}
    599 		ring->t_desc[i].ddesc_next = htole32(
    600 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    601 		    *TX_NEXT(i));
    602 	}
    603 
    604 	return 0;
    605 
    606 fail:
    607 	dwc_gmac_free_tx_ring(sc, ring);
    608 	return error;
    609 }
    610 
    611 static void
    612 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    613 {
    614 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    615 	if (end > start) {
    616 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    617 		    TX_DESC_OFFSET(start),
    618 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    619 		    ops);
    620 		return;
    621 	}
    622 	/* sync from 'start' to end of ring */
    623 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    624 	    TX_DESC_OFFSET(start),
    625 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
    626 	    ops);
    627 	/* sync from start of ring to 'end' */
    628 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    629 	    TX_DESC_OFFSET(0),
    630 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    631 	    ops);
    632 }
    633 
    634 static void
    635 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    636 	struct dwc_gmac_tx_ring *ring)
    637 {
    638 	int i;
    639 
    640 	mutex_enter(&ring->t_mtx);
    641 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    642 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    643 
    644 		if (data->td_m != NULL) {
    645 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    646 			    0, data->td_active->dm_mapsize,
    647 			    BUS_DMASYNC_POSTWRITE);
    648 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    649 			m_freem(data->td_m);
    650 			data->td_m = NULL;
    651 		}
    652 	}
    653 
    654 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    655 	    TX_DESC_OFFSET(0),
    656 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    657 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    658 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    659 	    sc->sc_txq.t_physaddr);
    660 
    661 	ring->t_queued = 0;
    662 	ring->t_cur = ring->t_next = 0;
    663 	mutex_exit(&ring->t_mtx);
    664 }
    665 
    666 static void
    667 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    668 	struct dwc_gmac_tx_ring *ring)
    669 {
    670 	int i;
    671 
    672 	/* unload the maps */
    673 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    674 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    675 
    676 		if (data->td_m != NULL) {
    677 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    678 			    0, data->td_map->dm_mapsize,
    679 			    BUS_DMASYNC_POSTWRITE);
    680 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    681 			m_freem(data->td_m);
    682 			data->td_m = NULL;
    683 		}
    684 	}
    685 
    686 	/* and actually free them */
    687 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    688 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    689 
    690 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    691 	}
    692 }
    693 
    694 static void
    695 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    696 {
    697 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    698 	struct mii_data * const mii = &sc->sc_mii;
    699 	uint32_t conf, flow;
    700 
    701 	/*
    702 	 * Set MII or GMII interface based on the speed
    703 	 * negotiated by the PHY.
    704 	 */
    705 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    706 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    707 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    708 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    709 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    710 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    711 	    | AWIN_GMAC_MAC_CONF_ACS
    712 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    713 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    714 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    715 	case IFM_10_T:
    716 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    717 		break;
    718 	case IFM_100_TX:
    719 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    720 			AWIN_GMAC_MAC_CONF_MIISEL;
    721 		break;
    722 	case IFM_1000_T:
    723 		break;
    724 	}
    725 
    726 	flow = 0;
    727 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    728 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    729 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    730 	}
    731 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    732 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    733 	}
    734 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    735 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    736 	}
    737 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    738 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    739 
    740 #ifdef DWC_GMAC_DEBUG
    741 	aprint_normal_dev(sc->sc_dev,
    742 	    "setting MAC conf register: %08x\n", conf);
    743 #endif
    744 
    745 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    746 	    AWIN_GMAC_MAC_CONF, conf);
    747 }
    748 
    749 static int
    750 dwc_gmac_init(struct ifnet *ifp)
    751 {
    752 	struct dwc_gmac_softc *sc = ifp->if_softc;
    753 
    754 	mutex_enter(sc->sc_lock);
    755 	int ret = dwc_gmac_init_locked(ifp);
    756 	mutex_exit(sc->sc_lock);
    757 
    758 	return ret;
    759 }
    760 
    761 static int
    762 dwc_gmac_init_locked(struct ifnet *ifp)
    763 {
    764 	struct dwc_gmac_softc *sc = ifp->if_softc;
    765 	uint32_t ffilt;
    766 
    767 	if (ifp->if_flags & IFF_RUNNING)
    768 		return 0;
    769 
    770 	dwc_gmac_stop_locked(ifp, 0);
    771 
    772 	/*
    773 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    774 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    775 	 */
    776 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    777 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    778 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    779 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    780 
    781 	/*
    782 	 * Set up address filter
    783 	 */
    784 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    785 	if (ifp->if_flags & IFF_PROMISC) {
    786 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    787 	} else {
    788 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    789 	}
    790 	if (ifp->if_flags & IFF_BROADCAST) {
    791 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    792 	} else {
    793 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    794 	}
    795 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    796 
    797 	/*
    798 	 * Set up multicast filter
    799 	 */
    800 	dwc_gmac_setmulti(sc);
    801 
    802 	/*
    803 	 * Set up dma pointer for RX and TX ring
    804 	 */
    805 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    806 	    sc->sc_rxq.r_physaddr);
    807 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    808 	    sc->sc_txq.t_physaddr);
    809 
    810 	/*
    811 	 * Start RX/TX part
    812 	 */
    813 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    814 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
    815 	    GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
    816 
    817 	sc->sc_stopping = false;
    818 
    819 	ifp->if_flags |= IFF_RUNNING;
    820 	ifp->if_flags &= ~IFF_OACTIVE;
    821 
    822 	return 0;
    823 }
    824 
    825 static void
    826 dwc_gmac_start(struct ifnet *ifp)
    827 {
    828 	struct dwc_gmac_softc *sc = ifp->if_softc;
    829 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
    830 
    831 	mutex_enter(sc->sc_lock);
    832 	if (!sc->sc_stopping) {
    833 		mutex_enter(&sc->sc_txq.t_mtx);
    834 		dwc_gmac_start_locked(ifp);
    835 		mutex_exit(&sc->sc_txq.t_mtx);
    836 	}
    837 	mutex_exit(sc->sc_lock);
    838 }
    839 
    840 static void
    841 dwc_gmac_start_locked(struct ifnet *ifp)
    842 {
    843 	struct dwc_gmac_softc *sc = ifp->if_softc;
    844 	int old = sc->sc_txq.t_queued;
    845 	int start = sc->sc_txq.t_cur;
    846 	struct mbuf *m0;
    847 
    848 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    849 		return;
    850 
    851 	for (;;) {
    852 		IFQ_POLL(&ifp->if_snd, m0);
    853 		if (m0 == NULL)
    854 			break;
    855 		if (dwc_gmac_queue(sc, m0) != 0) {
    856 			ifp->if_flags |= IFF_OACTIVE;
    857 			break;
    858 		}
    859 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    860 		bpf_mtap(ifp, m0);
    861 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    862 			ifp->if_flags |= IFF_OACTIVE;
    863 			break;
    864 		}
    865 	}
    866 
    867 	if (sc->sc_txq.t_queued != old) {
    868 		/* packets have been queued, kick it off */
    869 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    870 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    871 
    872 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    873 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    874 #ifdef DWC_GMAC_DEBUG
    875 		dwc_dump_status(sc);
    876 #endif
    877 	}
    878 }
    879 
    880 static void
    881 dwc_gmac_stop(struct ifnet *ifp, int disable)
    882 {
    883 	struct dwc_gmac_softc *sc = ifp->if_softc;
    884 
    885 	mutex_enter(sc->sc_lock);
    886 	dwc_gmac_stop_locked(ifp, disable);
    887 	mutex_exit(sc->sc_lock);
    888 }
    889 
    890 static void
    891 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
    892 {
    893 	struct dwc_gmac_softc *sc = ifp->if_softc;
    894 
    895 	sc->sc_stopping = true;
    896 
    897 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    898 	    AWIN_GMAC_DMA_OPMODE,
    899 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    900 	        AWIN_GMAC_DMA_OPMODE)
    901 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    902 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    903 	    AWIN_GMAC_DMA_OPMODE,
    904 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    905 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    906 
    907 	mii_down(&sc->sc_mii);
    908 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    909 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    910 }
    911 
    912 /*
    913  * Add m0 to the TX ring
    914  */
    915 static int
    916 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    917 {
    918 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    919 	struct dwc_gmac_tx_data *data = NULL;
    920 	bus_dmamap_t map;
    921 	uint32_t flags, len, status;
    922 	int error, i, first;
    923 
    924 #ifdef DWC_GMAC_DEBUG
    925 	aprint_normal_dev(sc->sc_dev,
    926 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
    927 #endif
    928 
    929 	first = sc->sc_txq.t_cur;
    930 	map = sc->sc_txq.t_data[first].td_map;
    931 
    932 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    933 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    934 	if (error != 0) {
    935 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    936 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    937 		return error;
    938 	}
    939 
    940 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
    941 		bus_dmamap_unload(sc->sc_dmat, map);
    942 		return ENOBUFS;
    943 	}
    944 
    945 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
    946 	status = 0;
    947 	for (i = 0; i < map->dm_nsegs; i++) {
    948 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    949 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    950 
    951 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    952 		len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
    953 
    954 #ifdef DWC_GMAC_DEBUG
    955 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
    956 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
    957 		    (unsigned long)map->dm_segs[i].ds_addr,
    958 		    (unsigned long)map->dm_segs[i].ds_len,
    959 		    flags, len);
    960 #endif
    961 
    962 		desc->ddesc_cntl = htole32(len|flags);
    963 		flags &= ~DDESC_CNTL_TXFIRST;
    964 
    965 		/*
    966 		 * Defer passing ownership of the first descriptor
    967 		 * until we are done.
    968 		 */
    969 		desc->ddesc_status = htole32(status);
    970 		status |= DDESC_STATUS_OWNEDBYDEV;
    971 
    972 		sc->sc_txq.t_queued++;
    973 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
    974 	}
    975 
    976 	desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
    977 
    978 	data->td_m = m0;
    979 	data->td_active = map;
    980 
    981 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    982 	    BUS_DMASYNC_PREWRITE);
    983 
    984 	/* Pass first to device */
    985 	sc->sc_txq.t_desc[first].ddesc_status =
    986 	    htole32(DDESC_STATUS_OWNEDBYDEV);
    987 
    988 	return 0;
    989 }
    990 
    991 /*
    992  * If the interface is up and running, only modify the receive
    993  * filter when setting promiscuous or debug mode.  Otherwise fall
    994  * through to ether_ioctl, which will reset the chip.
    995  */
    996 static int
    997 dwc_gmac_ifflags_cb(struct ethercom *ec)
    998 {
    999 	struct ifnet *ifp = &ec->ec_if;
   1000 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1001 	int ret = 0;
   1002 
   1003 	mutex_enter(sc->sc_lock);
   1004 	int change = ifp->if_flags ^ sc->sc_if_flags;
   1005 	sc->sc_if_flags = ifp->if_flags;
   1006 
   1007 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   1008 		ret = ENETRESET;
   1009 		goto out;
   1010 	}
   1011 	if ((change & IFF_PROMISC) != 0) {
   1012 		dwc_gmac_setmulti(sc);
   1013 	}
   1014 out:
   1015 	mutex_exit(sc->sc_lock);
   1016 
   1017 	return ret;
   1018 }
   1019 
   1020 static int
   1021 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1022 {
   1023 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1024 	int error = 0;
   1025 
   1026 	int s = splnet();
   1027 	error = ether_ioctl(ifp, cmd, data);
   1028 
   1029 #ifdef DWCGMAC_MPSAFE
   1030 	splx(s);
   1031 #endif
   1032 
   1033 	if (error == ENETRESET) {
   1034 		error = 0;
   1035 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   1036 			;
   1037 		else if (ifp->if_flags & IFF_RUNNING) {
   1038 			/*
   1039 			 * Multicast list has changed; set the hardware filter
   1040 			 * accordingly.
   1041 			 */
   1042 			mutex_enter(sc->sc_lock);
   1043 			dwc_gmac_setmulti(sc);
   1044 			mutex_exit(sc->sc_lock);
   1045 		}
   1046 	}
   1047 
   1048 	/* Try to get things going again */
   1049 	if (ifp->if_flags & IFF_UP)
   1050 		dwc_gmac_start(ifp);
   1051 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1052 
   1053 #ifndef DWCGMAC_MPSAFE
   1054 	splx(s);
   1055 #endif
   1056 
   1057 	return error;
   1058 }
   1059 
   1060 static void
   1061 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1062 {
   1063 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1064 	struct dwc_gmac_tx_data *data;
   1065 	struct dwc_gmac_dev_dmadesc *desc;
   1066 	uint32_t status;
   1067 	int i, nsegs;
   1068 
   1069 	mutex_enter(&sc->sc_txq.t_mtx);
   1070 
   1071 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1072 #ifdef DWC_GMAC_DEBUG
   1073 		aprint_normal_dev(sc->sc_dev,
   1074 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
   1075 		    i, sc->sc_txq.t_queued);
   1076 #endif
   1077 
   1078 		/*
   1079 		 * i+1 does not need to be a valid descriptor,
   1080 		 * this is just a special notion to just sync
   1081 		 * a single tx descriptor (i)
   1082 		 */
   1083 		dwc_gmac_txdesc_sync(sc, i, i+1,
   1084 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1085 
   1086 		desc = &sc->sc_txq.t_desc[i];
   1087 		status = le32toh(desc->ddesc_status);
   1088 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1089 			break;
   1090 
   1091 		data = &sc->sc_txq.t_data[i];
   1092 		if (data->td_m == NULL)
   1093 			continue;
   1094 
   1095 		ifp->if_opackets++;
   1096 		nsegs = data->td_active->dm_nsegs;
   1097 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1098 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1099 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1100 
   1101 #ifdef DWC_GMAC_DEBUG
   1102 		aprint_normal_dev(sc->sc_dev,
   1103 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1104 		    "freeing mbuf %p\n", i, data->td_m);
   1105 #endif
   1106 
   1107 		m_freem(data->td_m);
   1108 		data->td_m = NULL;
   1109 
   1110 		sc->sc_txq.t_queued -= nsegs;
   1111 	}
   1112 
   1113 	sc->sc_txq.t_next = i;
   1114 
   1115 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1116 		ifp->if_flags &= ~IFF_OACTIVE;
   1117 	}
   1118 	mutex_exit(&sc->sc_txq.t_mtx);
   1119 }
   1120 
   1121 static void
   1122 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1123 {
   1124 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1125 	struct dwc_gmac_dev_dmadesc *desc;
   1126 	struct dwc_gmac_rx_data *data;
   1127 	bus_addr_t physaddr;
   1128 	uint32_t status;
   1129 	struct mbuf *m, *mnew;
   1130 	int i, len, error;
   1131 
   1132 	mutex_enter(&sc->sc_rxq.r_mtx);
   1133 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1134 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1135 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1136 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1137 		desc = &sc->sc_rxq.r_desc[i];
   1138 		data = &sc->sc_rxq.r_data[i];
   1139 
   1140 		status = le32toh(desc->ddesc_status);
   1141 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1142 			break;
   1143 
   1144 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
   1145 #ifdef DWC_GMAC_DEBUG
   1146 			aprint_normal_dev(sc->sc_dev,
   1147 			    "RX error: descriptor status %08x, skipping\n",
   1148 			    status);
   1149 #endif
   1150 			ifp->if_ierrors++;
   1151 			goto skip;
   1152 		}
   1153 
   1154 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
   1155 
   1156 #ifdef DWC_GMAC_DEBUG
   1157 		aprint_normal_dev(sc->sc_dev,
   1158 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1159 		    i, len);
   1160 #endif
   1161 
   1162 		/*
   1163 		 * Try to get a new mbuf before passing this one
   1164 		 * up, if that fails, drop the packet and reuse
   1165 		 * the existing one.
   1166 		 */
   1167 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1168 		if (mnew == NULL) {
   1169 			ifp->if_ierrors++;
   1170 			goto skip;
   1171 		}
   1172 		MCLGET(mnew, M_DONTWAIT);
   1173 		if ((mnew->m_flags & M_EXT) == 0) {
   1174 			m_freem(mnew);
   1175 			ifp->if_ierrors++;
   1176 			goto skip;
   1177 		}
   1178 
   1179 		/* unload old DMA map */
   1180 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1181 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1182 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1183 
   1184 		/* and reload with new mbuf */
   1185 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1186 		    mtod(mnew, void*), MCLBYTES, NULL,
   1187 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1188 		if (error != 0) {
   1189 			m_freem(mnew);
   1190 			/* try to reload old mbuf */
   1191 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1192 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1193 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1194 			if (error != 0) {
   1195 				panic("%s: could not load old rx mbuf",
   1196 				    device_xname(sc->sc_dev));
   1197 			}
   1198 			ifp->if_ierrors++;
   1199 			goto skip;
   1200 		}
   1201 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1202 
   1203 		/*
   1204 		 * New mbuf loaded, update RX ring and continue
   1205 		 */
   1206 		m = data->rd_m;
   1207 		data->rd_m = mnew;
   1208 		desc->ddesc_data = htole32(physaddr);
   1209 
   1210 		/* finalize mbuf */
   1211 		m->m_pkthdr.len = m->m_len = len;
   1212 		m_set_rcvif(m, ifp);
   1213 		m->m_flags |= M_HASFCS;
   1214 
   1215 		if_percpuq_enqueue(sc->sc_ipq, m);
   1216 
   1217 skip:
   1218 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1219 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1220 		desc->ddesc_cntl = htole32(
   1221 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
   1222 		    DDESC_CNTL_RXCHAIN);
   1223 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
   1224 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1225 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1226 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1227 	}
   1228 
   1229 	/* update RX pointer */
   1230 	sc->sc_rxq.r_cur = i;
   1231 
   1232 	mutex_exit(&sc->sc_rxq.r_mtx);
   1233 }
   1234 
   1235 /*
   1236  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
   1237  */
   1238 static uint32_t
   1239 bitrev32(uint32_t x)
   1240 {
   1241 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
   1242 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
   1243 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
   1244 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
   1245 
   1246 	return (x >> 16) | (x << 16);
   1247 }
   1248 
   1249 static void
   1250 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1251 {
   1252 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1253 	struct ether_multi *enm;
   1254 	struct ether_multistep step;
   1255 	uint32_t hashes[2] = { 0, 0 };
   1256 	uint32_t ffilt, h;
   1257 	int mcnt;
   1258 
   1259 	KASSERT(mutex_owned(sc->sc_lock));
   1260 
   1261 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1262 
   1263 	if (ifp->if_flags & IFF_PROMISC) {
   1264 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1265 		goto special_filter;
   1266 	}
   1267 
   1268 	ifp->if_flags &= ~IFF_ALLMULTI;
   1269 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
   1270 
   1271 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1272 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1273 
   1274 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
   1275 	mcnt = 0;
   1276 	while (enm != NULL) {
   1277 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1278 		    ETHER_ADDR_LEN) != 0) {
   1279 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1280 			ifp->if_flags |= IFF_ALLMULTI;
   1281 			goto special_filter;
   1282 		}
   1283 
   1284 		h = bitrev32(
   1285 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
   1286 		    ) >> 26;
   1287 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1288 
   1289 		mcnt++;
   1290 		ETHER_NEXT_MULTI(step, enm);
   1291 	}
   1292 
   1293 	if (mcnt)
   1294 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1295 	else
   1296 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1297 
   1298 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1299 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1300 	    hashes[0]);
   1301 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1302 	    hashes[1]);
   1303 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1304 
   1305 #ifdef DWC_GMAC_DEBUG
   1306 	dwc_gmac_dump_ffilt(sc, ffilt);
   1307 #endif
   1308 	return;
   1309 
   1310 special_filter:
   1311 #ifdef DWC_GMAC_DEBUG
   1312 	dwc_gmac_dump_ffilt(sc, ffilt);
   1313 #endif
   1314 	/* no MAC hashes, ALLMULTI or PROMISC */
   1315 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1316 	    ffilt);
   1317 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1318 	    0xffffffff);
   1319 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1320 	    0xffffffff);
   1321 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1322 }
   1323 
   1324 int
   1325 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1326 {
   1327 	uint32_t status, dma_status;
   1328 	int rv = 0;
   1329 
   1330 	if (sc->sc_stopping)
   1331 		return 0;
   1332 
   1333 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1334 	if (status & AWIN_GMAC_MII_IRQ) {
   1335 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1336 		    AWIN_GMAC_MII_STATUS);
   1337 		rv = 1;
   1338 		mii_pollstat(&sc->sc_mii);
   1339 	}
   1340 
   1341 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1342 	    AWIN_GMAC_DMA_STATUS);
   1343 
   1344 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1345 		rv = 1;
   1346 
   1347 	if (dma_status & GMAC_DMA_INT_TIE)
   1348 		dwc_gmac_tx_intr(sc);
   1349 
   1350 	if (dma_status & GMAC_DMA_INT_RIE)
   1351 		dwc_gmac_rx_intr(sc);
   1352 
   1353 	/*
   1354 	 * Check error conditions
   1355 	 */
   1356 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1357 		sc->sc_ec.ec_if.if_oerrors++;
   1358 #ifdef DWC_GMAC_DEBUG
   1359 		dwc_dump_and_abort(sc, "interrupt error condition");
   1360 #endif
   1361 	}
   1362 
   1363 	/* ack interrupt */
   1364 	if (dma_status)
   1365 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1366 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1367 
   1368 	/*
   1369 	 * Get more packets
   1370 	 */
   1371 	if (rv)
   1372 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1373 
   1374 	return rv;
   1375 }
   1376 
   1377 #ifdef DWC_GMAC_DEBUG
   1378 static void
   1379 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1380 {
   1381 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1382 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1383 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1384 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1385 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1386 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1387 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1388 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1389 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1390 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1391 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1392 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1393 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1394 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1395 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1396 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1397 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1398 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1399 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1400 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1401 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1402 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1403 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1404 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1405 }
   1406 
   1407 static void
   1408 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1409 {
   1410 	int i;
   1411 
   1412 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1413 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1414 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1415 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1416 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1417 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1418 		    "data: %08x next: %08x\n",
   1419 		    i, sc->sc_txq.t_physaddr +
   1420 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1421 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1422 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1423 	}
   1424 }
   1425 
   1426 static void
   1427 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1428 {
   1429 	int i;
   1430 
   1431 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1432 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1433 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1434 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1435 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1436 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1437 		    "data: %08x next: %08x\n",
   1438 		    i, sc->sc_rxq.r_physaddr +
   1439 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1440 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1441 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1442 	}
   1443 }
   1444 
   1445 static void
   1446 dwc_dump_status(struct dwc_gmac_softc *sc)
   1447 {
   1448 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1449 	     AWIN_GMAC_MAC_INTR);
   1450 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1451 	     AWIN_GMAC_DMA_STATUS);
   1452 	char buf[200];
   1453 
   1454 	/* print interrupt state */
   1455 	snprintb(buf, sizeof(buf), "\177\20"
   1456 	    "b\x10""NI\0"
   1457 	    "b\x0f""AI\0"
   1458 	    "b\x0e""ER\0"
   1459 	    "b\x0d""FB\0"
   1460 	    "b\x0a""ET\0"
   1461 	    "b\x09""RW\0"
   1462 	    "b\x08""RS\0"
   1463 	    "b\x07""RU\0"
   1464 	    "b\x06""RI\0"
   1465 	    "b\x05""UN\0"
   1466 	    "b\x04""OV\0"
   1467 	    "b\x03""TJ\0"
   1468 	    "b\x02""TU\0"
   1469 	    "b\x01""TS\0"
   1470 	    "b\x00""TI\0"
   1471 	    "\0", dma_status);
   1472 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1473 	    status, buf);
   1474 }
   1475 
   1476 static void
   1477 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1478 {
   1479 	dwc_dump_status(sc);
   1480 	dwc_gmac_dump_ffilt(sc,
   1481 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1482 	dwc_gmac_dump_dma(sc);
   1483 	dwc_gmac_dump_tx_desc(sc);
   1484 	dwc_gmac_dump_rx_desc(sc);
   1485 
   1486 	panic("%s", msg);
   1487 }
   1488 
   1489 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1490 {
   1491 	char buf[200];
   1492 
   1493 	/* print filter setup */
   1494 	snprintb(buf, sizeof(buf), "\177\20"
   1495 	    "b\x1f""RA\0"
   1496 	    "b\x0a""HPF\0"
   1497 	    "b\x09""SAF\0"
   1498 	    "b\x08""SAIF\0"
   1499 	    "b\x05""DBF\0"
   1500 	    "b\x04""PM\0"
   1501 	    "b\x03""DAIF\0"
   1502 	    "b\x02""HMC\0"
   1503 	    "b\x01""HUC\0"
   1504 	    "b\x00""PR\0"
   1505 	    "\0", ffilt);
   1506 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1507 }
   1508 #endif
   1509