Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.89
      1 /* $NetBSD: dwc_gmac.c,v 1.89 2024/07/14 09:31:55 skrll Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.89 2024/07/14 09:31:55 skrll Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #ifdef _KERNEL_OPT
     49 #include "opt_inet.h"
     50 #endif
     51 
     52 #include <sys/param.h>
     53 #include <sys/bus.h>
     54 #include <sys/device.h>
     55 #include <sys/intr.h>
     56 #include <sys/systm.h>
     57 #include <sys/sockio.h>
     58 #include <sys/cprng.h>
     59 #include <sys/rndsource.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_media.h>
     64 #include <net/bpf.h>
     65 #ifdef INET
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #include <dev/mii/miivar.h>
     70 
     71 #include <dev/ic/dwc_gmac_reg.h>
     72 #include <dev/ic/dwc_gmac_var.h>
     73 
     74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
     75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
     76 static void dwc_gmac_miibus_statchg(struct ifnet *);
     77 
     78 static int dwc_gmac_reset(struct dwc_gmac_softc *);
     79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
     80 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
     81 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
     82 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     83 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     84 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     85 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     86 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     87 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     88 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
     89 static int dwc_gmac_init(struct ifnet *);
     90 static int dwc_gmac_init_locked(struct ifnet *);
     91 static void dwc_gmac_stop(struct ifnet *, int);
     92 static void dwc_gmac_stop_locked(struct ifnet *, int);
     93 static void dwc_gmac_start(struct ifnet *);
     94 static void dwc_gmac_start_locked(struct ifnet *);
     95 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
     96 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     97 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
     98 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
     99 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
    100 static int dwc_gmac_ifflags_cb(struct ethercom *);
    101 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    102 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    103 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
    104 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
    105 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    106 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    107 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    108 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    109 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    110 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
    111 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
    112 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    113 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    114 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    115 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    116 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    117 
    118 static const struct dwc_gmac_desc_methods desc_methods_standard = {
    119 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
    120 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    121 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    122 	.tx_set_len = dwc_gmac_desc_std_set_len,
    123 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
    124 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
    125 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
    126 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    127 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    128 	.rx_set_len = dwc_gmac_desc_std_set_len,
    129 	.rx_get_len = dwc_gmac_desc_std_get_len,
    130 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
    131 };
    132 
    133 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
    134 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
    135 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    136 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    137 	.tx_set_len = dwc_gmac_desc_enh_set_len,
    138 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
    139 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
    140 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
    141 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    142 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    143 	.rx_set_len = dwc_gmac_desc_enh_set_len,
    144 	.rx_get_len = dwc_gmac_desc_enh_get_len,
    145 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
    146 };
    147 
    148 
    149 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT + (N)) \
    150 				    * sizeof(struct dwc_gmac_dev_dmadesc))
    151 #define	TX_NEXT(N)		(((N) + 1) & (AWGE_TX_RING_COUNT - 1))
    152 
    153 #define RX_DESC_OFFSET(N)	((N) * sizeof(struct dwc_gmac_dev_dmadesc))
    154 #define	RX_NEXT(N)		(((N) + 1) & (AWGE_RX_RING_COUNT - 1))
    155 
    156 
    157 
    158 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
    159 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
    160 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
    161 
    162 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
    163 				GMAC_DMA_INT_FBE |	\
    164 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
    165 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
    166 				GMAC_DMA_INT_TJE)
    167 
    168 #define	AWIN_DEF_MAC_INTRMASK	\
    169 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    170 	AWIN_GMAC_MAC_INT_LINKCHG)
    171 
    172 #ifdef DWC_GMAC_DEBUG
    173 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
    174 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
    175 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
    176 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
    177 static void dwc_dump_status(struct dwc_gmac_softc *);
    178 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
    179 #endif
    180 
    181 int
    182 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
    183 {
    184 	uint8_t enaddr[ETHER_ADDR_LEN];
    185 	uint32_t maclo, machi, ver, hwft;
    186 	struct mii_data * const mii = &sc->sc_mii;
    187 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    188 	prop_dictionary_t dict;
    189 
    190 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    191 	sc->sc_mii_clk = mii_clk & 7;
    192 
    193 	dict = device_properties(sc->sc_dev);
    194 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    195 	if (ea != NULL) {
    196 		/*
    197 		 * If the MAC address is overridden by a device property,
    198 		 * use that.
    199 		 */
    200 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    201 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    202 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
    203 	} else {
    204 		/*
    205 		 * If we did not get an externaly configure address,
    206 		 * try to read one from the current filter setup,
    207 		 * before resetting the chip.
    208 		 */
    209 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    210 		    AWIN_GMAC_MAC_ADDR0LO);
    211 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    212 		    AWIN_GMAC_MAC_ADDR0HI);
    213 
    214 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    215 			/* fake MAC address */
    216 			maclo = 0x00f2 | (cprng_strong32() << 16);
    217 			machi = cprng_strong32();
    218 		}
    219 
    220 		enaddr[0] = maclo & 0x0ff;
    221 		enaddr[1] = (maclo >> 8) & 0x0ff;
    222 		enaddr[2] = (maclo >> 16) & 0x0ff;
    223 		enaddr[3] = (maclo >> 24) & 0x0ff;
    224 		enaddr[4] = machi & 0x0ff;
    225 		enaddr[5] = (machi >> 8) & 0x0ff;
    226 	}
    227 
    228 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
    229 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
    230 
    231 	/*
    232 	 * Init chip and do initial setup
    233 	 */
    234 	if (dwc_gmac_reset(sc) != 0)
    235 		return ENXIO;	/* not much to cleanup, haven't attached yet */
    236 	dwc_gmac_write_hwaddr(sc, enaddr);
    237 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    238 	    ether_sprintf(enaddr));
    239 
    240 	hwft = 0;
    241 	if (ver >= 0x35) {
    242 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    243 		    AWIN_GMAC_DMA_HWFEATURES);
    244 		aprint_normal_dev(sc->sc_dev,
    245 		    "HW feature mask: %x\n", hwft);
    246 	}
    247 
    248 	if (sizeof(bus_addr_t) > 4) {
    249 		int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
    250 		    &sc->sc_dmat, BUS_DMA_WAITOK);
    251 		if (error != 0) {
    252 			aprint_error_dev(sc->sc_dev,
    253 			    "failed to create DMA subregion\n");
    254 			return ENOMEM;
    255 		}
    256 	}
    257 
    258 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
    259 		aprint_normal_dev(sc->sc_dev,
    260 		    "Using enhanced descriptor format\n");
    261 		sc->sc_descm = &desc_methods_enhanced;
    262 	} else {
    263 		sc->sc_descm = &desc_methods_standard;
    264 	}
    265 	if (hwft & GMAC_DMA_FEAT_RMON) {
    266 		uint32_t val;
    267 
    268 		/* Mask all MMC interrupts */
    269 		val = 0xffffffff;
    270 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    271 		    GMAC_MMC_RX_INT_MSK, val);
    272 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    273 		    GMAC_MMC_TX_INT_MSK, val);
    274 	}
    275 
    276 	/*
    277 	 * Allocate Tx and Rx rings
    278 	 */
    279 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    280 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    281 		goto fail;
    282 	}
    283 
    284 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    285 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    286 		goto fail;
    287 	}
    288 
    289 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    290 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    291 		goto fail;
    292 	}
    293 
    294 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    295 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    296 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    297 
    298 	/*
    299 	 * Prepare interface data
    300 	 */
    301 	ifp->if_softc = sc;
    302 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    303 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    304 #ifdef DWCGMAC_MPSAFE
    305 	ifp->if_extflags = IFEF_MPSAFE;
    306 #endif
    307 	ifp->if_ioctl = dwc_gmac_ioctl;
    308 	ifp->if_start = dwc_gmac_start;
    309 	ifp->if_init = dwc_gmac_init;
    310 	ifp->if_stop = dwc_gmac_stop;
    311 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    312 	IFQ_SET_READY(&ifp->if_snd);
    313 
    314 	/*
    315 	 * Attach MII subdevices
    316 	 */
    317 	sc->sc_ec.ec_mii = &sc->sc_mii;
    318 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    319 	mii->mii_ifp = ifp;
    320 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
    321 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
    322 	mii->mii_statchg = dwc_gmac_miibus_statchg;
    323 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
    324 	    MIIF_DOPAUSE);
    325 
    326 	if (LIST_EMPTY(&mii->mii_phys)) {
    327 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    328 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
    329 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
    330 	} else {
    331 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    332 	}
    333 
    334 	/*
    335 	 * We can support 802.1Q VLAN-sized frames.
    336 	 */
    337 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    338 
    339 	/*
    340 	 * Ready, attach interface
    341 	 */
    342 	/* Attach the interface. */
    343 	if_initialize(ifp);
    344 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    345 	if_deferred_start_init(ifp, NULL);
    346 	ether_ifattach(ifp, enaddr);
    347 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    348 	if_register(ifp);
    349 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
    350 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
    351 
    352 	/*
    353 	 * Enable interrupts
    354 	 */
    355 	mutex_enter(sc->sc_lock);
    356 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    357 	    AWIN_DEF_MAC_INTRMASK);
    358 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    359 	    GMAC_DEF_DMA_INT_MASK);
    360 	mutex_exit(sc->sc_lock);
    361 
    362 	return 0;
    363 
    364 fail:
    365 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    366 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    367 	dwc_gmac_free_dma_rings(sc);
    368 	mutex_destroy(&sc->sc_mdio_lock);
    369 
    370 	return ENXIO;
    371 }
    372 
    373 
    374 
    375 static int
    376 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    377 {
    378 	size_t cnt;
    379 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    380 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    381 	    | GMAC_BUSMODE_RESET);
    382 	for (cnt = 0; cnt < 30000; cnt++) {
    383 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    384 		    & GMAC_BUSMODE_RESET) == 0)
    385 			return 0;
    386 		delay(10);
    387 	}
    388 
    389 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    390 	return EIO;
    391 }
    392 
    393 static void
    394 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    395     uint8_t enaddr[ETHER_ADDR_LEN])
    396 {
    397 	uint32_t hi, lo;
    398 
    399 	hi = enaddr[4] | (enaddr[5] << 8);
    400 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    401 	    | ((uint32_t)enaddr[3] << 24);
    402 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    403 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    404 }
    405 
    406 static int
    407 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
    408 {
    409 	struct dwc_gmac_softc * const sc = device_private(self);
    410 	uint16_t mii;
    411 	size_t cnt;
    412 
    413 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    414 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    415 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    416 	    | GMAC_MII_BUSY;
    417 
    418 	mutex_enter(&sc->sc_mdio_lock);
    419 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    420 
    421 	for (cnt = 0; cnt < 1000; cnt++) {
    422 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    423 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    424 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    425 			    AWIN_GMAC_MAC_MIIDATA);
    426 			break;
    427 		}
    428 		delay(10);
    429 	}
    430 
    431 	mutex_exit(&sc->sc_mdio_lock);
    432 
    433 	if (cnt >= 1000)
    434 		return ETIMEDOUT;
    435 
    436 	return 0;
    437 }
    438 
    439 static int
    440 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
    441 {
    442 	struct dwc_gmac_softc * const sc = device_private(self);
    443 	uint16_t mii;
    444 	size_t cnt;
    445 
    446 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    447 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    448 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    449 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    450 
    451 	mutex_enter(&sc->sc_mdio_lock);
    452 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    453 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    454 
    455 	for (cnt = 0; cnt < 1000; cnt++) {
    456 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    457 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    458 			break;
    459 		delay(10);
    460 	}
    461 
    462 	mutex_exit(&sc->sc_mdio_lock);
    463 
    464 	if (cnt >= 1000)
    465 		return ETIMEDOUT;
    466 
    467 	return 0;
    468 }
    469 
    470 static int
    471 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    472 	struct dwc_gmac_rx_ring *ring)
    473 {
    474 	struct dwc_gmac_rx_data *data;
    475 	bus_addr_t physaddr;
    476 	const size_t rxringsz = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    477 	int error, i, next;
    478 
    479 	ring->r_cur = ring->r_next = 0;
    480 	memset(ring->r_desc, 0, rxringsz);
    481 
    482 	/*
    483 	 * Pre-allocate Rx buffers and populate Rx ring.
    484 	 */
    485 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    486 		struct dwc_gmac_dev_dmadesc *desc;
    487 
    488 		data = &sc->sc_rxq.r_data[i];
    489 
    490 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    491 		if (data->rd_m == NULL) {
    492 			aprint_error_dev(sc->sc_dev,
    493 			    "could not allocate rx mbuf #%d\n", i);
    494 			error = ENOMEM;
    495 			goto fail;
    496 		}
    497 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    498 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    499 		if (error != 0) {
    500 			aprint_error_dev(sc->sc_dev,
    501 			    "could not create DMA map\n");
    502 			data->rd_map = NULL;
    503 			goto fail;
    504 		}
    505 		MCLGET(data->rd_m, M_DONTWAIT);
    506 		if (!(data->rd_m->m_flags & M_EXT)) {
    507 			aprint_error_dev(sc->sc_dev,
    508 			    "could not allocate mbuf cluster #%d\n", i);
    509 			error = ENOMEM;
    510 			goto fail;
    511 		}
    512 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
    513 		    = data->rd_m->m_ext.ext_size;
    514 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
    515 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
    516 			    = AWGE_MAX_PACKET;
    517 		}
    518 
    519 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
    520 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
    521 		if (error != 0) {
    522 			aprint_error_dev(sc->sc_dev,
    523 			    "could not load rx buf DMA map #%d", i);
    524 			goto fail;
    525 		}
    526 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    527 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
    528 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    529 
    530 		desc = &sc->sc_rxq.r_desc[i];
    531 		desc->ddesc_data = htole32(physaddr);
    532 		next = RX_NEXT(i);
    533 		desc->ddesc_next = htole32(ring->r_physaddr
    534 		    + next * sizeof(*desc));
    535 		sc->sc_descm->rx_init_flags(desc);
    536 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    537 		sc->sc_descm->rx_set_owned_by_dev(desc);
    538 	}
    539 
    540 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    541 	    RX_DESC_OFFSET(0),
    542 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    543 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    544 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    545 	    ring->r_physaddr);
    546 
    547 	return 0;
    548 
    549 fail:
    550 	dwc_gmac_free_rx_ring(sc, ring);
    551 	return error;
    552 }
    553 
    554 static void
    555 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    556 	struct dwc_gmac_rx_ring *ring)
    557 {
    558 	struct dwc_gmac_dev_dmadesc *desc;
    559 	struct dwc_gmac_rx_data *data;
    560 	int i;
    561 
    562 	mutex_enter(&ring->r_mtx);
    563 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    564 		desc = &sc->sc_rxq.r_desc[i];
    565 		data = &sc->sc_rxq.r_data[i];
    566 		sc->sc_descm->rx_init_flags(desc);
    567 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    568 		sc->sc_descm->rx_set_owned_by_dev(desc);
    569 	}
    570 
    571 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    572 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    573 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    574 
    575 	ring->r_cur = ring->r_next = 0;
    576 	/* reset DMA address to start of ring */
    577 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    578 	    sc->sc_rxq.r_physaddr);
    579 	mutex_exit(&ring->r_mtx);
    580 }
    581 
    582 static int
    583 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    584 {
    585 	const size_t ringsize = AWGE_TOTAL_RING_COUNT *
    586 		sizeof(struct dwc_gmac_dev_dmadesc);
    587 	int error, nsegs;
    588 	void *rings;
    589 
    590 	error = bus_dmamap_create(sc->sc_dmat, ringsize, 1, ringsize, 0,
    591 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    592 	if (error != 0) {
    593 		aprint_error_dev(sc->sc_dev,
    594 		    "could not create desc DMA map\n");
    595 		sc->sc_dma_ring_map = NULL;
    596 		goto fail;
    597 	}
    598 
    599 	error = bus_dmamem_alloc(sc->sc_dmat, ringsize, PAGE_SIZE, 0,
    600 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
    601 	if (error != 0) {
    602 		aprint_error_dev(sc->sc_dev,
    603 		    "could not map DMA memory\n");
    604 		goto fail;
    605 	}
    606 
    607 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    608 	    ringsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    609 	if (error != 0) {
    610 		aprint_error_dev(sc->sc_dev,
    611 		    "could not allocate DMA memory\n");
    612 		goto fail;
    613 	}
    614 
    615 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    616 	    ringsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    617 	if (error != 0) {
    618 		aprint_error_dev(sc->sc_dev,
    619 		    "could not load desc DMA map\n");
    620 		goto fail;
    621 	}
    622 
    623 	/* give first AWGE_RX_RING_COUNT to the RX side */
    624 	sc->sc_rxq.r_desc = rings;
    625 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    626 
    627 	/* and next rings to the TX side */
    628 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    629 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    630 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
    631 
    632 	return 0;
    633 
    634 fail:
    635 	dwc_gmac_free_dma_rings(sc);
    636 	return error;
    637 }
    638 
    639 static void
    640 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    641 {
    642 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    643 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    644 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    645 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    646 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    647 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    648 }
    649 
    650 static void
    651 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    652 {
    653 	struct dwc_gmac_rx_data *data;
    654 	int i;
    655 
    656 	if (ring->r_desc == NULL)
    657 		return;
    658 
    659 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    660 		data = &ring->r_data[i];
    661 
    662 		if (data->rd_map != NULL) {
    663 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    664 			    AWGE_RX_RING_COUNT
    665 				* sizeof(struct dwc_gmac_dev_dmadesc),
    666 			    BUS_DMASYNC_POSTREAD);
    667 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    668 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    669 		}
    670 		m_freem(data->rd_m);
    671 	}
    672 }
    673 
    674 static int
    675 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    676 	struct dwc_gmac_tx_ring *ring)
    677 {
    678 	int i, error = 0;
    679 
    680 	ring->t_queued = 0;
    681 	ring->t_cur = ring->t_next = 0;
    682 
    683 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
    684 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    685 	    TX_DESC_OFFSET(0),
    686 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    687 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    688 
    689 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    690 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    691 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    692 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
    693 		    &ring->t_data[i].td_map);
    694 		if (error != 0) {
    695 			aprint_error_dev(sc->sc_dev,
    696 			    "could not create TX DMA map #%d\n", i);
    697 			ring->t_data[i].td_map = NULL;
    698 			goto fail;
    699 		}
    700 		ring->t_desc[i].ddesc_next = htole32(
    701 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    702 		    * TX_NEXT(i));
    703 	}
    704 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    705 	    TX_DESC_OFFSET(0),
    706 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    707 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    708 
    709 	return 0;
    710 
    711 fail:
    712 	dwc_gmac_free_tx_ring(sc, ring);
    713 	return error;
    714 }
    715 
    716 static void
    717 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    718 {
    719 	/* 'end' is pointing one descriptor beyond the last we want to sync */
    720 	if (end > start) {
    721 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    722 		    TX_DESC_OFFSET(start),
    723 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
    724 		    ops);
    725 		return;
    726 	}
    727 	/* sync from 'start' to end of ring */
    728 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    729 	    TX_DESC_OFFSET(start),
    730 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
    731 	    ops);
    732 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
    733 		/* sync from start of ring to 'end' */
    734 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    735 		    TX_DESC_OFFSET(0),
    736 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
    737 		    ops);
    738 	}
    739 }
    740 
    741 static void
    742 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    743 	struct dwc_gmac_tx_ring *ring)
    744 {
    745 	int i;
    746 
    747 	mutex_enter(&ring->t_mtx);
    748 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    749 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    750 
    751 		if (data->td_m != NULL) {
    752 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    753 			    0, data->td_active->dm_mapsize,
    754 			    BUS_DMASYNC_POSTWRITE);
    755 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    756 			m_freem(data->td_m);
    757 			data->td_m = NULL;
    758 		}
    759 	}
    760 
    761 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    762 	    TX_DESC_OFFSET(0),
    763 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    764 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    765 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    766 	    sc->sc_txq.t_physaddr);
    767 
    768 	ring->t_queued = 0;
    769 	ring->t_cur = ring->t_next = 0;
    770 	mutex_exit(&ring->t_mtx);
    771 }
    772 
    773 static void
    774 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    775 	struct dwc_gmac_tx_ring *ring)
    776 {
    777 	int i;
    778 
    779 	/* unload the maps */
    780 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    781 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    782 
    783 		if (data->td_m != NULL) {
    784 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    785 			    0, data->td_map->dm_mapsize,
    786 			    BUS_DMASYNC_POSTWRITE);
    787 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    788 			m_freem(data->td_m);
    789 			data->td_m = NULL;
    790 		}
    791 	}
    792 
    793 	/* and actually free them */
    794 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    795 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    796 
    797 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    798 	}
    799 }
    800 
    801 static void
    802 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    803 {
    804 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    805 	struct mii_data * const mii = &sc->sc_mii;
    806 	uint32_t conf, flow;
    807 
    808 	/*
    809 	 * Set MII or GMII interface based on the speed
    810 	 * negotiated by the PHY.
    811 	 */
    812 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    813 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
    814 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
    815 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    816 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    817 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    818 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    819 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    820 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    821 	case IFM_10_T:
    822 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    823 		break;
    824 	case IFM_100_TX:
    825 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    826 			AWIN_GMAC_MAC_CONF_MIISEL;
    827 		break;
    828 	case IFM_1000_T:
    829 		break;
    830 	}
    831 	if (sc->sc_set_speed)
    832 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
    833 
    834 	flow = 0;
    835 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    836 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    837 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    838 	}
    839 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    840 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    841 	}
    842 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    843 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    844 	}
    845 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    846 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    847 
    848 #ifdef DWC_GMAC_DEBUG
    849 	aprint_normal_dev(sc->sc_dev,
    850 	    "setting MAC conf register: %08x\n", conf);
    851 #endif
    852 
    853 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    854 	    AWIN_GMAC_MAC_CONF, conf);
    855 }
    856 
    857 static int
    858 dwc_gmac_init(struct ifnet *ifp)
    859 {
    860 	struct dwc_gmac_softc *sc = ifp->if_softc;
    861 
    862 	mutex_enter(sc->sc_lock);
    863 	int ret = dwc_gmac_init_locked(ifp);
    864 	mutex_exit(sc->sc_lock);
    865 
    866 	return ret;
    867 }
    868 
    869 static int
    870 dwc_gmac_init_locked(struct ifnet *ifp)
    871 {
    872 	struct dwc_gmac_softc *sc = ifp->if_softc;
    873 	uint32_t ffilt;
    874 
    875 	if (ifp->if_flags & IFF_RUNNING)
    876 		return 0;
    877 
    878 	dwc_gmac_stop_locked(ifp, 0);
    879 
    880 	/*
    881 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    882 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    883 	 */
    884 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    885 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    886 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    887 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    888 
    889 	/*
    890 	 * Set up address filter
    891 	 */
    892 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    893 	if (ifp->if_flags & IFF_PROMISC) {
    894 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    895 	} else {
    896 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    897 	}
    898 	if (ifp->if_flags & IFF_BROADCAST) {
    899 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    900 	} else {
    901 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    902 	}
    903 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    904 
    905 	/*
    906 	 * Set up multicast filter
    907 	 */
    908 	dwc_gmac_setmulti(sc);
    909 
    910 	/*
    911 	 * Set up dma pointer for RX and TX ring
    912 	 */
    913 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    914 	    sc->sc_rxq.r_physaddr);
    915 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    916 	    sc->sc_txq.t_physaddr);
    917 
    918 	/*
    919 	 * Start RX/TX part
    920 	 */
    921 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
    922 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
    923 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
    924 	}
    925 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
    926 
    927 	sc->sc_stopping = false;
    928 
    929 	ifp->if_flags |= IFF_RUNNING;
    930 	sc->sc_txbusy = false;
    931 
    932 	return 0;
    933 }
    934 
    935 static void
    936 dwc_gmac_start(struct ifnet *ifp)
    937 {
    938 	struct dwc_gmac_softc *sc = ifp->if_softc;
    939 #ifdef DWCGMAC_MPSAFE
    940 	KASSERT(if_is_mpsafe(ifp));
    941 #endif
    942 
    943 	mutex_enter(sc->sc_lock);
    944 	if (!sc->sc_stopping) {
    945 		mutex_enter(&sc->sc_txq.t_mtx);
    946 		dwc_gmac_start_locked(ifp);
    947 		mutex_exit(&sc->sc_txq.t_mtx);
    948 	}
    949 	mutex_exit(sc->sc_lock);
    950 }
    951 
    952 static void
    953 dwc_gmac_start_locked(struct ifnet *ifp)
    954 {
    955 	struct dwc_gmac_softc *sc = ifp->if_softc;
    956 	int old = sc->sc_txq.t_queued;
    957 	int start = sc->sc_txq.t_cur;
    958 	struct mbuf *m0;
    959 
    960 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    961 		return;
    962 	if (sc->sc_txbusy)
    963 		return;
    964 
    965 	for (;;) {
    966 		IFQ_POLL(&ifp->if_snd, m0);
    967 		if (m0 == NULL)
    968 			break;
    969 		if (dwc_gmac_queue(sc, m0) != 0) {
    970 			sc->sc_txbusy = true;
    971 			break;
    972 		}
    973 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    974 		bpf_mtap(ifp, m0, BPF_D_OUT);
    975 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    976 			sc->sc_txbusy = true;
    977 			break;
    978 		}
    979 	}
    980 
    981 	if (sc->sc_txq.t_queued != old) {
    982 		/* packets have been queued, kick it off */
    983 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    984 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    985 
    986 #ifdef DWC_GMAC_DEBUG
    987 		dwc_dump_status(sc);
    988 #endif
    989 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    990 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    991 	}
    992 }
    993 
    994 static void
    995 dwc_gmac_stop(struct ifnet *ifp, int disable)
    996 {
    997 	struct dwc_gmac_softc *sc = ifp->if_softc;
    998 
    999 	mutex_enter(sc->sc_lock);
   1000 	dwc_gmac_stop_locked(ifp, disable);
   1001 	mutex_exit(sc->sc_lock);
   1002 }
   1003 
   1004 static void
   1005 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
   1006 {
   1007 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1008 
   1009 	sc->sc_stopping = true;
   1010 
   1011 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1012 	    AWIN_GMAC_DMA_OPMODE,
   1013 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1014 		AWIN_GMAC_DMA_OPMODE)
   1015 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
   1016 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1017 	    AWIN_GMAC_DMA_OPMODE,
   1018 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1019 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
   1020 
   1021 	mii_down(&sc->sc_mii);
   1022 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
   1023 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
   1024 
   1025 	ifp->if_flags &= ~IFF_RUNNING;
   1026 	sc->sc_txbusy = false;
   1027 }
   1028 
   1029 /*
   1030  * Add m0 to the TX ring
   1031  */
   1032 static int
   1033 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
   1034 {
   1035 	struct dwc_gmac_dev_dmadesc *desc = NULL;
   1036 	struct dwc_gmac_tx_data *data = NULL;
   1037 	bus_dmamap_t map;
   1038 	int error, i, first;
   1039 
   1040 #ifdef DWC_GMAC_DEBUG
   1041 	aprint_normal_dev(sc->sc_dev,
   1042 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
   1043 #endif
   1044 
   1045 	first = sc->sc_txq.t_cur;
   1046 	map = sc->sc_txq.t_data[first].td_map;
   1047 
   1048 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
   1049 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1050 	if (error != 0) {
   1051 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
   1052 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
   1053 		return error;
   1054 	}
   1055 
   1056 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
   1057 		bus_dmamap_unload(sc->sc_dmat, map);
   1058 		return ENOBUFS;
   1059 	}
   1060 
   1061 	for (i = 0; i < map->dm_nsegs; i++) {
   1062 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
   1063 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
   1064 
   1065 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
   1066 
   1067 #ifdef DWC_GMAC_DEBUG
   1068 		aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
   1069 		    "len %lu\n", sc->sc_txq.t_cur,
   1070 		    (unsigned long)map->dm_segs[i].ds_addr,
   1071 		    (unsigned long)map->dm_segs[i].ds_len);
   1072 #endif
   1073 
   1074 		sc->sc_descm->tx_init_flags(desc);
   1075 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
   1076 
   1077 		if (i == 0)
   1078 			sc->sc_descm->tx_set_first_frag(desc);
   1079 
   1080 		/*
   1081 		 * Defer passing ownership of the first descriptor
   1082 		 * until we are done.
   1083 		 */
   1084 		if (i != 0)
   1085 			sc->sc_descm->tx_set_owned_by_dev(desc);
   1086 
   1087 		sc->sc_txq.t_queued++;
   1088 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
   1089 	}
   1090 
   1091 	sc->sc_descm->tx_set_last_frag(desc);
   1092 
   1093 	data->td_m = m0;
   1094 	data->td_active = map;
   1095 
   1096 	/* sync the packet buffer */
   1097 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1098 	    BUS_DMASYNC_PREWRITE);
   1099 
   1100 	/* sync the new descriptors - ownership not transferred yet */
   1101 	dwc_gmac_txdesc_sync(sc, first, sc->sc_txq.t_cur,
   1102 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1103 
   1104 	/* Pass first to device */
   1105 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
   1106 
   1107 	return 0;
   1108 }
   1109 
   1110 /*
   1111  * If the interface is up and running, only modify the receive
   1112  * filter when setting promiscuous or debug mode.  Otherwise fall
   1113  * through to ether_ioctl, which will reset the chip.
   1114  */
   1115 static int
   1116 dwc_gmac_ifflags_cb(struct ethercom *ec)
   1117 {
   1118 	struct ifnet *ifp = &ec->ec_if;
   1119 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1120 	int ret = 0;
   1121 
   1122 	mutex_enter(sc->sc_lock);
   1123 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
   1124 	sc->sc_if_flags = ifp->if_flags;
   1125 
   1126 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   1127 		ret = ENETRESET;
   1128 		goto out;
   1129 	}
   1130 	if ((change & IFF_PROMISC) != 0) {
   1131 		dwc_gmac_setmulti(sc);
   1132 	}
   1133 out:
   1134 	mutex_exit(sc->sc_lock);
   1135 
   1136 	return ret;
   1137 }
   1138 
   1139 static int
   1140 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1141 {
   1142 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1143 	int error = 0;
   1144 
   1145 	int s = splnet();
   1146 	error = ether_ioctl(ifp, cmd, data);
   1147 
   1148 #ifdef DWCGMAC_MPSAFE
   1149 	splx(s);
   1150 #endif
   1151 
   1152 	if (error == ENETRESET) {
   1153 		error = 0;
   1154 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   1155 			;
   1156 		else if (ifp->if_flags & IFF_RUNNING) {
   1157 			/*
   1158 			 * Multicast list has changed; set the hardware filter
   1159 			 * accordingly.
   1160 			 */
   1161 			mutex_enter(sc->sc_lock);
   1162 			dwc_gmac_setmulti(sc);
   1163 			mutex_exit(sc->sc_lock);
   1164 		}
   1165 	}
   1166 
   1167 	/* Try to get things going again */
   1168 	if (ifp->if_flags & IFF_UP)
   1169 		dwc_gmac_start(ifp);
   1170 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1171 
   1172 #ifndef DWCGMAC_MPSAFE
   1173 	splx(s);
   1174 #endif
   1175 
   1176 	return error;
   1177 }
   1178 
   1179 static void
   1180 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1181 {
   1182 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1183 	struct dwc_gmac_tx_data *data;
   1184 	struct dwc_gmac_dev_dmadesc *desc;
   1185 	int i, nsegs;
   1186 
   1187 	mutex_enter(&sc->sc_txq.t_mtx);
   1188 
   1189 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1190 #ifdef DWC_GMAC_DEBUG
   1191 		aprint_normal_dev(sc->sc_dev,
   1192 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
   1193 		    i, sc->sc_txq.t_queued);
   1194 #endif
   1195 
   1196 		/*
   1197 		 * i + 1 does not need to be a valid descriptor,
   1198 		 * this is just a special notion to just sync
   1199 		 * a single tx descriptor (i)
   1200 		 */
   1201 		dwc_gmac_txdesc_sync(sc, i, i + 1,
   1202 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1203 
   1204 		desc = &sc->sc_txq.t_desc[i];
   1205 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
   1206 			break;
   1207 
   1208 		data = &sc->sc_txq.t_data[i];
   1209 		if (data->td_m == NULL)
   1210 			continue;
   1211 
   1212 		if_statinc(ifp, if_opackets);
   1213 		nsegs = data->td_active->dm_nsegs;
   1214 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1215 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1216 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1217 
   1218 #ifdef DWC_GMAC_DEBUG
   1219 		aprint_normal_dev(sc->sc_dev,
   1220 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1221 		    "freeing mbuf %p\n", i, data->td_m);
   1222 #endif
   1223 
   1224 		m_freem(data->td_m);
   1225 		data->td_m = NULL;
   1226 
   1227 		sc->sc_txq.t_queued -= nsegs;
   1228 	}
   1229 
   1230 	sc->sc_txq.t_next = i;
   1231 
   1232 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1233 		sc->sc_txbusy = false;
   1234 	}
   1235 	mutex_exit(&sc->sc_txq.t_mtx);
   1236 }
   1237 
   1238 static void
   1239 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1240 {
   1241 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1242 	struct dwc_gmac_dev_dmadesc *desc;
   1243 	struct dwc_gmac_rx_data *data;
   1244 	bus_addr_t physaddr;
   1245 	struct mbuf *m, *mnew;
   1246 	int i, len, error;
   1247 
   1248 	mutex_enter(&sc->sc_rxq.r_mtx);
   1249 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1250 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1251 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1252 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1253 		desc = &sc->sc_rxq.r_desc[i];
   1254 		data = &sc->sc_rxq.r_data[i];
   1255 
   1256 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
   1257 			break;
   1258 
   1259 		if (sc->sc_descm->rx_has_error(desc)) {
   1260 #ifdef DWC_GMAC_DEBUG
   1261 			aprint_normal_dev(sc->sc_dev,
   1262 			    "RX error: descriptor status %08x, skipping\n",
   1263 			    le32toh(desc->ddesc_status0));
   1264 #endif
   1265 			if_statinc(ifp, if_ierrors);
   1266 			goto skip;
   1267 		}
   1268 
   1269 		len = sc->sc_descm->rx_get_len(desc);
   1270 
   1271 #ifdef DWC_GMAC_DEBUG
   1272 		aprint_normal_dev(sc->sc_dev,
   1273 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1274 		    i, len);
   1275 #endif
   1276 
   1277 		/*
   1278 		 * Try to get a new mbuf before passing this one
   1279 		 * up, if that fails, drop the packet and reuse
   1280 		 * the existing one.
   1281 		 */
   1282 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1283 		if (mnew == NULL) {
   1284 			if_statinc(ifp, if_ierrors);
   1285 			goto skip;
   1286 		}
   1287 		MCLGET(mnew, M_DONTWAIT);
   1288 		if ((mnew->m_flags & M_EXT) == 0) {
   1289 			m_freem(mnew);
   1290 			if_statinc(ifp, if_ierrors);
   1291 			goto skip;
   1292 		}
   1293 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
   1294 		if (mnew->m_len > AWGE_MAX_PACKET) {
   1295 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
   1296 		}
   1297 
   1298 		/* unload old DMA map */
   1299 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1300 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1301 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1302 
   1303 		/* and reload with new mbuf */
   1304 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1305 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1306 		if (error != 0) {
   1307 			m_freem(mnew);
   1308 			/* try to reload old mbuf */
   1309 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1310 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1311 			if (error != 0) {
   1312 				panic("%s: could not load old rx mbuf",
   1313 				    device_xname(sc->sc_dev));
   1314 			}
   1315 			if_statinc(ifp, if_ierrors);
   1316 			goto skip;
   1317 		}
   1318 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1319 
   1320 		/*
   1321 		 * New mbuf loaded, update RX ring and continue
   1322 		 */
   1323 		m = data->rd_m;
   1324 		data->rd_m = mnew;
   1325 		desc->ddesc_data = htole32(physaddr);
   1326 
   1327 		/* finalize mbuf */
   1328 		m->m_pkthdr.len = m->m_len = len;
   1329 		m_set_rcvif(m, ifp);
   1330 		m->m_flags |= M_HASFCS;
   1331 
   1332 		if_percpuq_enqueue(sc->sc_ipq, m);
   1333 
   1334 skip:
   1335 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1336 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1337 
   1338 		sc->sc_descm->rx_init_flags(desc);
   1339 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
   1340 
   1341 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1342 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1343 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1344 
   1345 		sc->sc_descm->rx_set_owned_by_dev(desc);
   1346 
   1347 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1348 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1349 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1350 	}
   1351 
   1352 	/* update RX pointer */
   1353 	sc->sc_rxq.r_cur = i;
   1354 
   1355 	mutex_exit(&sc->sc_rxq.r_mtx);
   1356 }
   1357 
   1358 static void
   1359 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1360 {
   1361 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1362 	struct ether_multi *enm;
   1363 	struct ether_multistep step;
   1364 	struct ethercom *ec = &sc->sc_ec;
   1365 	uint32_t hashes[2] = { 0, 0 };
   1366 	uint32_t ffilt, h;
   1367 	int mcnt;
   1368 
   1369 	KASSERT(mutex_owned(sc->sc_lock));
   1370 
   1371 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1372 
   1373 	if (ifp->if_flags & IFF_PROMISC) {
   1374 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1375 		goto special_filter;
   1376 	}
   1377 
   1378 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
   1379 
   1380 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1381 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1382 
   1383 	ETHER_LOCK(ec);
   1384 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1385 	ETHER_FIRST_MULTI(step, ec, enm);
   1386 	mcnt = 0;
   1387 	while (enm != NULL) {
   1388 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1389 		    ETHER_ADDR_LEN) != 0) {
   1390 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1391 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1392 			ETHER_UNLOCK(ec);
   1393 			goto special_filter;
   1394 		}
   1395 
   1396 		h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
   1397 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1398 
   1399 		mcnt++;
   1400 		ETHER_NEXT_MULTI(step, enm);
   1401 	}
   1402 	ETHER_UNLOCK(ec);
   1403 
   1404 	if (mcnt)
   1405 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1406 	else
   1407 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1408 
   1409 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1410 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1411 	    hashes[0]);
   1412 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1413 	    hashes[1]);
   1414 	sc->sc_if_flags = ifp->if_flags;
   1415 
   1416 #ifdef DWC_GMAC_DEBUG
   1417 	dwc_gmac_dump_ffilt(sc, ffilt);
   1418 #endif
   1419 	return;
   1420 
   1421 special_filter:
   1422 #ifdef DWC_GMAC_DEBUG
   1423 	dwc_gmac_dump_ffilt(sc, ffilt);
   1424 #endif
   1425 	/* no MAC hashes, ALLMULTI or PROMISC */
   1426 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1427 	    ffilt);
   1428 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1429 	    0xffffffff);
   1430 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1431 	    0xffffffff);
   1432 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1433 }
   1434 
   1435 int
   1436 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1437 {
   1438 	uint32_t status, dma_status;
   1439 	int rv = 0;
   1440 
   1441 	if (sc->sc_stopping)
   1442 		return 0;
   1443 
   1444 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1445 	if (status & AWIN_GMAC_MII_IRQ) {
   1446 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1447 		    AWIN_GMAC_MII_STATUS);
   1448 		rv = 1;
   1449 		mii_pollstat(&sc->sc_mii);
   1450 	}
   1451 
   1452 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1453 	    AWIN_GMAC_DMA_STATUS);
   1454 
   1455 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
   1456 		rv = 1;
   1457 
   1458 	if (dma_status & GMAC_DMA_INT_TIE)
   1459 		dwc_gmac_tx_intr(sc);
   1460 
   1461 	if (dma_status & GMAC_DMA_INT_RIE)
   1462 		dwc_gmac_rx_intr(sc);
   1463 
   1464 	/*
   1465 	 * Check error conditions
   1466 	 */
   1467 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1468 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
   1469 #ifdef DWC_GMAC_DEBUG
   1470 		dwc_dump_and_abort(sc, "interrupt error condition");
   1471 #endif
   1472 	}
   1473 
   1474 	rnd_add_uint32(&sc->rnd_source, dma_status);
   1475 
   1476 	/* ack interrupt */
   1477 	if (dma_status)
   1478 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1479 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1480 
   1481 	/*
   1482 	 * Get more packets
   1483 	 */
   1484 	if (rv)
   1485 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1486 
   1487 	return rv;
   1488 }
   1489 
   1490 static void
   1491 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1492 {
   1493 
   1494 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
   1495 }
   1496 
   1497 static int
   1498 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1499 {
   1500 
   1501 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
   1502 }
   1503 
   1504 static void
   1505 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1506 {
   1507 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1508 
   1509 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
   1510 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
   1511 }
   1512 
   1513 static uint32_t
   1514 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1515 {
   1516 
   1517 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
   1518 }
   1519 
   1520 static void
   1521 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1522 {
   1523 
   1524 	desc->ddesc_status0 = 0;
   1525 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1526 }
   1527 
   1528 static void
   1529 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1530 {
   1531 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1532 
   1533 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
   1534 }
   1535 
   1536 static void
   1537 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1538 {
   1539 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1540 
   1541 	desc->ddesc_cntl1 = htole32(cntl |
   1542 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
   1543 }
   1544 
   1545 static void
   1546 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1547 {
   1548 
   1549 	desc->ddesc_status0 = 0;
   1550 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1551 }
   1552 
   1553 static int
   1554 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
   1555 	return !!(le32toh(desc->ddesc_status0) &
   1556 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
   1557 }
   1558 
   1559 static void
   1560 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1561 {
   1562 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
   1563 
   1564 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
   1565 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
   1566 }
   1567 
   1568 static uint32_t
   1569 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1570 {
   1571 
   1572 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
   1573 }
   1574 
   1575 static void
   1576 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1577 {
   1578 
   1579 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
   1580 	desc->ddesc_cntl1 = 0;
   1581 }
   1582 
   1583 static void
   1584 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1585 {
   1586 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1587 
   1588 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
   1589 }
   1590 
   1591 static void
   1592 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1593 {
   1594 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1595 
   1596 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
   1597 }
   1598 
   1599 static void
   1600 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1601 {
   1602 
   1603 	desc->ddesc_status0 = 0;
   1604 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
   1605 }
   1606 
   1607 static int
   1608 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
   1609 {
   1610 
   1611 	return !!(le32toh(desc->ddesc_status0) &
   1612 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
   1613 }
   1614 
   1615 #ifdef DWC_GMAC_DEBUG
   1616 static void
   1617 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1618 {
   1619 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1620 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1621 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1622 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1623 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1624 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1625 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1626 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1627 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1628 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1629 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1630 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1631 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1632 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1633 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1634 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1635 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1636 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1637 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1638 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1639 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1640 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1641 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1642 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1643 }
   1644 
   1645 static void
   1646 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1647 {
   1648 	const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
   1649 	int i;
   1650 
   1651 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1652 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1653 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1654 
   1655 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1656 	    TX_DESC_OFFSET(0), AWGE_TX_RING_COUNT * descsz,
   1657 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1658 
   1659 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1660 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1661 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1662 		    "data: %08x next: %08x\n",
   1663 		    i, sc->sc_txq.t_physaddr +
   1664 			i * sizeof(struct dwc_gmac_dev_dmadesc),
   1665 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1666 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1667 	}
   1668 }
   1669 
   1670 static void
   1671 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1672 {
   1673 	const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
   1674 	int i;
   1675 
   1676 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1677 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1678 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1679 
   1680 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1681 	    RX_DESC_OFFSET(0), AWGE_RX_RING_COUNT * descsz,
   1682 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1683 
   1684 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1685 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1686 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1687 		    "data: %08x next: %08x\n",
   1688 		    i, sc->sc_rxq.r_physaddr +
   1689 			i * sizeof(struct dwc_gmac_dev_dmadesc),
   1690 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1691 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1692 	}
   1693 }
   1694 
   1695 static void
   1696 dwc_dump_status(struct dwc_gmac_softc *sc)
   1697 {
   1698 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1699 	    AWIN_GMAC_MAC_INTR);
   1700 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1701 	    AWIN_GMAC_DMA_STATUS);
   1702 	char buf[200];
   1703 
   1704 	/* print interrupt state */
   1705 	snprintb(buf, sizeof(buf), "\177\20"
   1706 	    "b\x10""NI\0"
   1707 	    "b\x0f""AI\0"
   1708 	    "b\x0e""ER\0"
   1709 	    "b\x0d""FB\0"
   1710 	    "b\x0a""ET\0"
   1711 	    "b\x09""RW\0"
   1712 	    "b\x08""RS\0"
   1713 	    "b\x07""RU\0"
   1714 	    "b\x06""RI\0"
   1715 	    "b\x05""UN\0"
   1716 	    "b\x04""OV\0"
   1717 	    "b\x03""TJ\0"
   1718 	    "b\x02""TU\0"
   1719 	    "b\x01""TS\0"
   1720 	    "b\x00""TI\0"
   1721 	    "\0", dma_status);
   1722 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1723 	    status, buf);
   1724 }
   1725 
   1726 static void
   1727 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1728 {
   1729 	dwc_dump_status(sc);
   1730 	dwc_gmac_dump_ffilt(sc,
   1731 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1732 	dwc_gmac_dump_dma(sc);
   1733 	dwc_gmac_dump_tx_desc(sc);
   1734 	dwc_gmac_dump_rx_desc(sc);
   1735 
   1736 	panic("%s", msg);
   1737 }
   1738 
   1739 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1740 {
   1741 	char buf[200];
   1742 
   1743 	/* print filter setup */
   1744 	snprintb(buf, sizeof(buf), "\177\20"
   1745 	    "b\x1f""RA\0"
   1746 	    "b\x0a""HPF\0"
   1747 	    "b\x09""SAF\0"
   1748 	    "b\x08""SAIF\0"
   1749 	    "b\x05""DBF\0"
   1750 	    "b\x04""PM\0"
   1751 	    "b\x03""DAIF\0"
   1752 	    "b\x02""HMC\0"
   1753 	    "b\x01""HUC\0"
   1754 	    "b\x00""PR\0"
   1755 	    "\0", ffilt);
   1756 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1757 }
   1758 #endif
   1759