Home | History | Annotate | Line # | Download | only in ic
      1 /* $NetBSD: dwc_gmac.c,v 1.97 2025/10/04 04:44:20 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 /*
     43  * Lock order:
     44  *
     45  *	IFNET_LOCK -> sc_mcast_lock
     46  *	IFNET_LOCK -> sc_intr_lock -> {sc_txq.t_mtx, sc_rxq.r_mtx}
     47  */
     48 
     49 #include <sys/cdefs.h>
     50 
     51 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.97 2025/10/04 04:44:20 thorpej Exp $");
     52 
     53 /* #define	DWC_GMAC_DEBUG	1 */
     54 
     55 #ifdef _KERNEL_OPT
     56 #include "opt_inet.h"
     57 #endif
     58 
     59 #include <sys/param.h>
     60 #include <sys/bus.h>
     61 #include <sys/device.h>
     62 #include <sys/intr.h>
     63 #include <sys/systm.h>
     64 #include <sys/sockio.h>
     65 #include <sys/cprng.h>
     66 #include <sys/rndsource.h>
     67 
     68 #include <net/if.h>
     69 #include <net/if_ether.h>
     70 #include <net/if_media.h>
     71 #include <net/bpf.h>
     72 #ifdef INET
     73 #include <netinet/if_inarp.h>
     74 #endif
     75 
     76 #include <dev/mii/miivar.h>
     77 
     78 #include <dev/ic/dwc_gmac_reg.h>
     79 #include <dev/ic/dwc_gmac_var.h>
     80 
     81 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
     82 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
     83 static void dwc_gmac_miibus_statchg(struct ifnet *);
     84 
     85 static int dwc_gmac_reset(struct dwc_gmac_softc *);
     86 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
     87 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
     88 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
     89 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     90 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     91 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     92 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     93 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     94 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     95 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
     96 static int dwc_gmac_init(struct ifnet *);
     97 static void dwc_gmac_stop(struct ifnet *, int);
     98 static void dwc_gmac_start(struct ifnet *);
     99 static void dwc_gmac_start_locked(struct ifnet *);
    100 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
    101 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
    102 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
    103 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
    104 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
    105 static int dwc_gmac_ifflags_cb(struct ethercom *);
    106 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    107 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    108 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
    109 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
    110 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    111 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    112 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    113 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    114 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    115 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
    116 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
    117 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    118 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    119 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    120 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    121 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    122 
    123 static const struct dwc_gmac_desc_methods desc_methods_standard = {
    124 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
    125 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    126 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    127 	.tx_set_len = dwc_gmac_desc_std_set_len,
    128 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
    129 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
    130 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
    131 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    132 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    133 	.rx_set_len = dwc_gmac_desc_std_set_len,
    134 	.rx_get_len = dwc_gmac_desc_std_get_len,
    135 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
    136 };
    137 
    138 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
    139 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
    140 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    141 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    142 	.tx_set_len = dwc_gmac_desc_enh_set_len,
    143 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
    144 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
    145 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
    146 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    147 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    148 	.rx_set_len = dwc_gmac_desc_enh_set_len,
    149 	.rx_get_len = dwc_gmac_desc_enh_get_len,
    150 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
    151 };
    152 
    153 
    154 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT + (N)) \
    155 				    * sizeof(struct dwc_gmac_dev_dmadesc))
    156 #define	TX_NEXT(N)		(((N) + 1) & (AWGE_TX_RING_COUNT - 1))
    157 
    158 #define RX_DESC_OFFSET(N)	((N) * sizeof(struct dwc_gmac_dev_dmadesc))
    159 #define	RX_NEXT(N)		(((N) + 1) & (AWGE_RX_RING_COUNT - 1))
    160 
    161 
    162 
    163 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
    164 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
    165 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
    166 
    167 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
    168 				GMAC_DMA_INT_FBE |	\
    169 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
    170 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
    171 				GMAC_DMA_INT_TJE)
    172 
    173 #define	AWIN_DEF_MAC_INTRMASK	\
    174 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    175 	AWIN_GMAC_MAC_INT_LINKCHG)
    176 
    177 #ifdef DWC_GMAC_DEBUG
    178 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
    179 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
    180 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
    181 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
    182 static void dwc_dump_status(struct dwc_gmac_softc *);
    183 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
    184 #endif
    185 
    186 int
    187 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
    188 {
    189 	uint8_t enaddr[ETHER_ADDR_LEN];
    190 	uint32_t maclo, machi, hwft;
    191 	struct mii_data * const mii = &sc->sc_mii;
    192 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    193 
    194 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    195 	sc->sc_mii_clk = mii_clk & 7;
    196 
    197 	if (! ether_getaddr(sc->sc_dev, enaddr)) {
    198 		/*
    199 		 * If we did not get an externally configure address,
    200 		 * try to read one from the current filter setup,
    201 		 * before resetting the chip.
    202 		 */
    203 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    204 		    AWIN_GMAC_MAC_ADDR0LO);
    205 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    206 		    AWIN_GMAC_MAC_ADDR0HI);
    207 
    208 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    209 			/* fake MAC address */
    210 			maclo = 0x00f2 | (cprng_strong32() << 16);
    211 			machi = cprng_strong32();
    212 		}
    213 
    214 		enaddr[0] = maclo & 0x0ff;
    215 		enaddr[1] = (maclo >> 8) & 0x0ff;
    216 		enaddr[2] = (maclo >> 16) & 0x0ff;
    217 		enaddr[3] = (maclo >> 24) & 0x0ff;
    218 		enaddr[4] = machi & 0x0ff;
    219 		enaddr[5] = (machi >> 8) & 0x0ff;
    220 	}
    221 
    222 	const uint32_t ver =
    223 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
    224 	const uint32_t snpsver =
    225 	    __SHIFTOUT(ver, AWIN_GMAC_MAC_VERSION_SNPSVER_MASK);
    226 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", snpsver);
    227 
    228 	/*
    229 	 * Init chip and do initial setup
    230 	 */
    231 	if (dwc_gmac_reset(sc) != 0)
    232 		return ENXIO;	/* not much to cleanup, haven't attached yet */
    233 	dwc_gmac_write_hwaddr(sc, enaddr);
    234 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    235 	    ether_sprintf(enaddr));
    236 
    237 	hwft = 0;
    238 	if (snpsver >= 0x35) {
    239 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    240 		    AWIN_GMAC_DMA_HWFEATURES);
    241 		aprint_normal_dev(sc->sc_dev,
    242 		    "HW feature mask: %x\n", hwft);
    243 	}
    244 
    245 	if (sizeof(bus_addr_t) > 4) {
    246 		int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
    247 		    &sc->sc_dmat, BUS_DMA_WAITOK);
    248 		if (error != 0) {
    249 			aprint_error_dev(sc->sc_dev,
    250 			    "failed to create DMA subregion\n");
    251 			return ENOMEM;
    252 		}
    253 	}
    254 
    255 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
    256 		aprint_normal_dev(sc->sc_dev,
    257 		    "Using enhanced descriptor format\n");
    258 		sc->sc_descm = &desc_methods_enhanced;
    259 	} else {
    260 		sc->sc_descm = &desc_methods_standard;
    261 	}
    262 	if (hwft & GMAC_DMA_FEAT_RMON) {
    263 		uint32_t val;
    264 
    265 		/* Mask all MMC interrupts */
    266 		val = 0xffffffff;
    267 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    268 		    GMAC_MMC_RX_INT_MSK, val);
    269 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    270 		    GMAC_MMC_TX_INT_MSK, val);
    271 	}
    272 
    273 	/*
    274 	 * Allocate Tx and Rx rings
    275 	 */
    276 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    277 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    278 		goto fail;
    279 	}
    280 
    281 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    282 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    283 		goto fail;
    284 	}
    285 
    286 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    287 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    288 		goto fail;
    289 	}
    290 
    291 	sc->sc_stopping = false;
    292 	sc->sc_txbusy = false;
    293 
    294 	sc->sc_mcast_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
    295 	sc->sc_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    296 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    297 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    298 
    299 	/*
    300 	 * Prepare interface data
    301 	 */
    302 	ifp->if_softc = sc;
    303 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    304 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    305 	ifp->if_extflags = IFEF_MPSAFE;
    306 	ifp->if_ioctl = dwc_gmac_ioctl;
    307 	ifp->if_start = dwc_gmac_start;
    308 	ifp->if_init = dwc_gmac_init;
    309 	ifp->if_stop = dwc_gmac_stop;
    310 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    311 	IFQ_SET_READY(&ifp->if_snd);
    312 
    313 	/*
    314 	 * Attach MII subdevices
    315 	 */
    316 	sc->sc_ec.ec_mii = &sc->sc_mii;
    317 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    318 	mii->mii_ifp = ifp;
    319 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
    320 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
    321 	mii->mii_statchg = dwc_gmac_miibus_statchg;
    322 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
    323 	    MIIF_DOPAUSE);
    324 
    325 	if (LIST_EMPTY(&mii->mii_phys)) {
    326 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    327 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
    328 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
    329 	} else {
    330 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    331 	}
    332 
    333 	/*
    334 	 * We can support 802.1Q VLAN-sized frames.
    335 	 */
    336 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    337 
    338 	/*
    339 	 * Ready, attach interface
    340 	 */
    341 	/* Attach the interface. */
    342 	if_initialize(ifp);
    343 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    344 	if_deferred_start_init(ifp, NULL);
    345 	ether_ifattach(ifp, enaddr);
    346 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    347 	if_register(ifp);
    348 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
    349 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
    350 
    351 	/*
    352 	 * Enable interrupts
    353 	 */
    354 	mutex_enter(sc->sc_intr_lock);
    355 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    356 	    AWIN_DEF_MAC_INTRMASK);
    357 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    358 	    GMAC_DEF_DMA_INT_MASK);
    359 	mutex_exit(sc->sc_intr_lock);
    360 
    361 	return 0;
    362 
    363 fail:
    364 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    365 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    366 	dwc_gmac_free_dma_rings(sc);
    367 	mutex_destroy(&sc->sc_mdio_lock);
    368 
    369 	return ENXIO;
    370 }
    371 
    372 
    373 
    374 static int
    375 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    376 {
    377 	size_t cnt;
    378 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    379 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    380 	    | GMAC_BUSMODE_RESET);
    381 	for (cnt = 0; cnt < 30000; cnt++) {
    382 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    383 		    & GMAC_BUSMODE_RESET) == 0)
    384 			return 0;
    385 		delay(10);
    386 	}
    387 
    388 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    389 	return EIO;
    390 }
    391 
    392 static void
    393 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    394     uint8_t enaddr[ETHER_ADDR_LEN])
    395 {
    396 	uint32_t hi, lo;
    397 
    398 	hi = enaddr[4] | (enaddr[5] << 8);
    399 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    400 	    | ((uint32_t)enaddr[3] << 24);
    401 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    402 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    403 }
    404 
    405 static int
    406 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
    407 {
    408 	struct dwc_gmac_softc * const sc = device_private(self);
    409 	uint16_t mii;
    410 	size_t cnt;
    411 
    412 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    413 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    414 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    415 	    | GMAC_MII_BUSY;
    416 
    417 	mutex_enter(&sc->sc_mdio_lock);
    418 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    419 
    420 	for (cnt = 0; cnt < 1000; cnt++) {
    421 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    422 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    423 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    424 			    AWIN_GMAC_MAC_MIIDATA);
    425 			break;
    426 		}
    427 		delay(10);
    428 	}
    429 
    430 	mutex_exit(&sc->sc_mdio_lock);
    431 
    432 	if (cnt >= 1000)
    433 		return ETIMEDOUT;
    434 
    435 	return 0;
    436 }
    437 
    438 static int
    439 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
    440 {
    441 	struct dwc_gmac_softc * const sc = device_private(self);
    442 	uint16_t mii;
    443 	size_t cnt;
    444 
    445 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    446 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    447 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    448 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    449 
    450 	mutex_enter(&sc->sc_mdio_lock);
    451 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    452 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    453 
    454 	for (cnt = 0; cnt < 1000; cnt++) {
    455 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    456 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    457 			break;
    458 		delay(10);
    459 	}
    460 
    461 	mutex_exit(&sc->sc_mdio_lock);
    462 
    463 	if (cnt >= 1000)
    464 		return ETIMEDOUT;
    465 
    466 	return 0;
    467 }
    468 
    469 static int
    470 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    471 	struct dwc_gmac_rx_ring *ring)
    472 {
    473 	struct dwc_gmac_rx_data *data;
    474 	bus_addr_t physaddr;
    475 	const size_t rxringsz = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    476 	int error, i, next;
    477 
    478 	ring->r_cur = ring->r_next = 0;
    479 	memset(ring->r_desc, 0, rxringsz);
    480 
    481 	/*
    482 	 * Pre-allocate Rx buffers and populate Rx ring.
    483 	 */
    484 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    485 		struct dwc_gmac_dev_dmadesc *desc;
    486 
    487 		data = &sc->sc_rxq.r_data[i];
    488 
    489 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    490 		if (data->rd_m == NULL) {
    491 			aprint_error_dev(sc->sc_dev,
    492 			    "could not allocate rx mbuf #%d\n", i);
    493 			error = ENOMEM;
    494 			goto fail;
    495 		}
    496 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    497 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    498 		if (error != 0) {
    499 			aprint_error_dev(sc->sc_dev,
    500 			    "could not create DMA map\n");
    501 			data->rd_map = NULL;
    502 			goto fail;
    503 		}
    504 		MCLGET(data->rd_m, M_DONTWAIT);
    505 		if (!(data->rd_m->m_flags & M_EXT)) {
    506 			aprint_error_dev(sc->sc_dev,
    507 			    "could not allocate mbuf cluster #%d\n", i);
    508 			error = ENOMEM;
    509 			goto fail;
    510 		}
    511 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
    512 		    = data->rd_m->m_ext.ext_size;
    513 		m_adj(data->rd_m, ETHER_ALIGN);
    514 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
    515 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
    516 			    = AWGE_MAX_PACKET;
    517 		}
    518 
    519 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
    520 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
    521 		if (error != 0) {
    522 			aprint_error_dev(sc->sc_dev,
    523 			    "could not load rx buf DMA map #%d", i);
    524 			goto fail;
    525 		}
    526 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    527 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
    528 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    529 
    530 		desc = &sc->sc_rxq.r_desc[i];
    531 		desc->ddesc_data = htole32(physaddr);
    532 		next = RX_NEXT(i);
    533 		desc->ddesc_next = htole32(ring->r_physaddr
    534 		    + next * sizeof(*desc));
    535 		sc->sc_descm->rx_init_flags(desc);
    536 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    537 		sc->sc_descm->rx_set_owned_by_dev(desc);
    538 	}
    539 
    540 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    541 	    RX_DESC_OFFSET(0),
    542 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    543 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    544 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    545 	    ring->r_physaddr);
    546 
    547 	return 0;
    548 
    549 fail:
    550 	dwc_gmac_free_rx_ring(sc, ring);
    551 	return error;
    552 }
    553 
    554 static void
    555 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    556 	struct dwc_gmac_rx_ring *ring)
    557 {
    558 	struct dwc_gmac_dev_dmadesc *desc;
    559 	struct dwc_gmac_rx_data *data;
    560 	int i;
    561 
    562 	mutex_enter(&ring->r_mtx);
    563 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    564 		desc = &sc->sc_rxq.r_desc[i];
    565 		data = &sc->sc_rxq.r_data[i];
    566 		sc->sc_descm->rx_init_flags(desc);
    567 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    568 		sc->sc_descm->rx_set_owned_by_dev(desc);
    569 	}
    570 
    571 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    572 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    573 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    574 
    575 	ring->r_cur = ring->r_next = 0;
    576 	/* reset DMA address to start of ring */
    577 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    578 	    sc->sc_rxq.r_physaddr);
    579 	mutex_exit(&ring->r_mtx);
    580 }
    581 
    582 static int
    583 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    584 {
    585 	const size_t ringsize = AWGE_TOTAL_RING_COUNT *
    586 		sizeof(struct dwc_gmac_dev_dmadesc);
    587 	int error, nsegs;
    588 	void *rings;
    589 
    590 	error = bus_dmamap_create(sc->sc_dmat, ringsize, 1, ringsize, 0,
    591 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    592 	if (error != 0) {
    593 		aprint_error_dev(sc->sc_dev,
    594 		    "could not create desc DMA map\n");
    595 		sc->sc_dma_ring_map = NULL;
    596 		goto fail;
    597 	}
    598 
    599 	error = bus_dmamem_alloc(sc->sc_dmat, ringsize, PAGE_SIZE, 0,
    600 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
    601 	if (error != 0) {
    602 		aprint_error_dev(sc->sc_dev,
    603 		    "could not map DMA memory\n");
    604 		goto fail;
    605 	}
    606 
    607 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    608 	    ringsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    609 	if (error != 0) {
    610 		aprint_error_dev(sc->sc_dev,
    611 		    "could not allocate DMA memory\n");
    612 		goto fail;
    613 	}
    614 
    615 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    616 	    ringsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    617 	if (error != 0) {
    618 		aprint_error_dev(sc->sc_dev,
    619 		    "could not load desc DMA map\n");
    620 		goto fail;
    621 	}
    622 
    623 	/* give first AWGE_RX_RING_COUNT to the RX side */
    624 	sc->sc_rxq.r_desc = rings;
    625 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    626 
    627 	/* and next rings to the TX side */
    628 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    629 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    630 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
    631 
    632 	return 0;
    633 
    634 fail:
    635 	dwc_gmac_free_dma_rings(sc);
    636 	return error;
    637 }
    638 
    639 static void
    640 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    641 {
    642 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    643 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    644 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    645 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    646 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    647 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    648 }
    649 
    650 static void
    651 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    652 {
    653 	struct dwc_gmac_rx_data *data;
    654 	int i;
    655 
    656 	if (ring->r_desc == NULL)
    657 		return;
    658 
    659 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    660 		data = &ring->r_data[i];
    661 
    662 		if (data->rd_map != NULL) {
    663 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    664 			    AWGE_RX_RING_COUNT
    665 				* sizeof(struct dwc_gmac_dev_dmadesc),
    666 			    BUS_DMASYNC_POSTREAD);
    667 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    668 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    669 		}
    670 		m_freem(data->rd_m);
    671 	}
    672 }
    673 
    674 static int
    675 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    676 	struct dwc_gmac_tx_ring *ring)
    677 {
    678 	int i, error = 0;
    679 
    680 	ring->t_queued = 0;
    681 	ring->t_cur = ring->t_next = 0;
    682 
    683 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
    684 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    685 	    TX_DESC_OFFSET(0),
    686 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    687 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    688 
    689 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    690 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    691 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    692 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
    693 		    &ring->t_data[i].td_map);
    694 		if (error != 0) {
    695 			aprint_error_dev(sc->sc_dev,
    696 			    "could not create TX DMA map #%d\n", i);
    697 			ring->t_data[i].td_map = NULL;
    698 			goto fail;
    699 		}
    700 		ring->t_desc[i].ddesc_next = htole32(
    701 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    702 		    * TX_NEXT(i));
    703 	}
    704 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    705 	    TX_DESC_OFFSET(0),
    706 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    707 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    708 
    709 	return 0;
    710 
    711 fail:
    712 	dwc_gmac_free_tx_ring(sc, ring);
    713 	return error;
    714 }
    715 
    716 static void
    717 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    718 {
    719 	/* 'end' is pointing one descriptor beyond the last we want to sync */
    720 	if (end > start) {
    721 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    722 		    TX_DESC_OFFSET(start),
    723 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
    724 		    ops);
    725 		return;
    726 	}
    727 	/* sync from 'start' to end of ring */
    728 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    729 	    TX_DESC_OFFSET(start),
    730 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
    731 	    ops);
    732 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
    733 		/* sync from start of ring to 'end' */
    734 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    735 		    TX_DESC_OFFSET(0),
    736 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
    737 		    ops);
    738 	}
    739 }
    740 
    741 static void
    742 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    743 	struct dwc_gmac_tx_ring *ring)
    744 {
    745 	int i;
    746 
    747 	mutex_enter(&ring->t_mtx);
    748 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    749 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    750 
    751 		if (data->td_m != NULL) {
    752 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    753 			    0, data->td_active->dm_mapsize,
    754 			    BUS_DMASYNC_POSTWRITE);
    755 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    756 			m_freem(data->td_m);
    757 			data->td_m = NULL;
    758 		}
    759 	}
    760 
    761 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    762 	    TX_DESC_OFFSET(0),
    763 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    764 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    765 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    766 	    sc->sc_txq.t_physaddr);
    767 
    768 	ring->t_queued = 0;
    769 	ring->t_cur = ring->t_next = 0;
    770 	mutex_exit(&ring->t_mtx);
    771 }
    772 
    773 static void
    774 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    775 	struct dwc_gmac_tx_ring *ring)
    776 {
    777 	int i;
    778 
    779 	/* unload the maps */
    780 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    781 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    782 
    783 		if (data->td_m != NULL) {
    784 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    785 			    0, data->td_map->dm_mapsize,
    786 			    BUS_DMASYNC_POSTWRITE);
    787 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    788 			m_freem(data->td_m);
    789 			data->td_m = NULL;
    790 		}
    791 	}
    792 
    793 	/* and actually free them */
    794 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    795 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    796 
    797 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    798 	}
    799 }
    800 
    801 static void
    802 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    803 {
    804 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    805 	struct mii_data * const mii = &sc->sc_mii;
    806 	uint32_t conf, flow;
    807 
    808 	/*
    809 	 * Set MII or GMII interface based on the speed
    810 	 * negotiated by the PHY.
    811 	 */
    812 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    813 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
    814 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
    815 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    816 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    817 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    818 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    819 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    820 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    821 	case IFM_10_T:
    822 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    823 		break;
    824 	case IFM_100_TX:
    825 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    826 			AWIN_GMAC_MAC_CONF_MIISEL;
    827 		break;
    828 	case IFM_1000_T:
    829 		break;
    830 	}
    831 	if (sc->sc_set_speed)
    832 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
    833 
    834 	flow = 0;
    835 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    836 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    837 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    838 	}
    839 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    840 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    841 	}
    842 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    843 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    844 	}
    845 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    846 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    847 
    848 #ifdef DWC_GMAC_DEBUG
    849 	aprint_normal_dev(sc->sc_dev,
    850 	    "setting MAC conf register: %08x\n", conf);
    851 #endif
    852 
    853 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    854 	    AWIN_GMAC_MAC_CONF, conf);
    855 }
    856 
    857 static int
    858 dwc_gmac_init(struct ifnet *ifp)
    859 {
    860 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    861 	uint32_t ffilt;
    862 
    863 	ASSERT_SLEEPABLE();
    864 	KASSERT(IFNET_LOCKED(ifp));
    865 	KASSERT(ifp == &sc->sc_ec.ec_if);
    866 
    867 	dwc_gmac_stop(ifp, 0);
    868 
    869 	/*
    870 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    871 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    872 	 */
    873 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    874 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    875 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    876 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    877 
    878 	/*
    879 	 * Set up address filter
    880 	 */
    881 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    882 	if (ifp->if_flags & IFF_PROMISC) {
    883 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    884 	} else {
    885 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    886 	}
    887 	if (ifp->if_flags & IFF_BROADCAST) {
    888 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    889 	} else {
    890 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    891 	}
    892 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    893 
    894 	/*
    895 	 * Set up multicast filter
    896 	 */
    897 	mutex_enter(sc->sc_mcast_lock);
    898 	dwc_gmac_setmulti(sc);
    899 	mutex_exit(sc->sc_mcast_lock);
    900 
    901 	/*
    902 	 * Set up dma pointer for RX and TX ring
    903 	 */
    904 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    905 	    sc->sc_rxq.r_physaddr);
    906 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    907 	    sc->sc_txq.t_physaddr);
    908 
    909 	/*
    910 	 * Start RX/TX part
    911 	 */
    912 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
    913 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
    914 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
    915 	}
    916 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
    917 #ifdef DWC_GMAC_DEBUG
    918 	aprint_normal_dev(sc->sc_dev,
    919 	    "setting DMA opmode register: %08x\n", opmode);
    920 #endif
    921 
    922 	ifp->if_flags |= IFF_RUNNING;
    923 	sc->sc_if_flags = ifp->if_flags;
    924 
    925 	mutex_enter(sc->sc_intr_lock);
    926 	sc->sc_stopping = false;
    927 	mutex_exit(sc->sc_intr_lock);
    928 
    929 	mutex_enter(&sc->sc_txq.t_mtx);
    930 	sc->sc_txbusy = false;
    931 	mutex_exit(&sc->sc_txq.t_mtx);
    932 
    933 	return 0;
    934 }
    935 
    936 static void
    937 dwc_gmac_start(struct ifnet *ifp)
    938 {
    939 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    940 	KASSERT(if_is_mpsafe(ifp));
    941 
    942 	mutex_enter(sc->sc_intr_lock);
    943 	if (!sc->sc_stopping) {
    944 		dwc_gmac_start_locked(ifp);
    945 	}
    946 	mutex_exit(sc->sc_intr_lock);
    947 }
    948 
    949 static void
    950 dwc_gmac_start_locked(struct ifnet *ifp)
    951 {
    952 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    953 	int old = sc->sc_txq.t_queued;
    954 	int start = sc->sc_txq.t_cur;
    955 	struct mbuf *m0;
    956 
    957 	KASSERT(mutex_owned(sc->sc_intr_lock));
    958 
    959 	mutex_enter(&sc->sc_txq.t_mtx);
    960 	if (sc->sc_txbusy) {
    961 		mutex_exit(&sc->sc_txq.t_mtx);
    962 		return;
    963 	}
    964 
    965 	for (;;) {
    966 		IFQ_POLL(&ifp->if_snd, m0);
    967 		if (m0 == NULL)
    968 			break;
    969 		if (dwc_gmac_queue(sc, m0) != 0) {
    970 			sc->sc_txbusy = true;
    971 			break;
    972 		}
    973 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    974 		bpf_mtap(ifp, m0, BPF_D_OUT);
    975 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    976 			sc->sc_txbusy = true;
    977 			break;
    978 		}
    979 	}
    980 
    981 	if (sc->sc_txq.t_queued != old) {
    982 		/* packets have been queued, kick it off */
    983 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    984 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    985 
    986 #ifdef DWC_GMAC_DEBUG
    987 		dwc_dump_status(sc);
    988 #endif
    989 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    990 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    991 	}
    992 	mutex_exit(&sc->sc_txq.t_mtx);
    993 }
    994 
    995 static void
    996 dwc_gmac_stop(struct ifnet *ifp, int disable)
    997 {
    998 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    999 
   1000 	ASSERT_SLEEPABLE();
   1001 	KASSERT(IFNET_LOCKED(ifp));
   1002 
   1003 	ifp->if_flags &= ~IFF_RUNNING;
   1004 
   1005 	mutex_enter(sc->sc_mcast_lock);
   1006 	sc->sc_if_flags = ifp->if_flags;
   1007 	mutex_exit(sc->sc_mcast_lock);
   1008 
   1009 	mutex_enter(sc->sc_intr_lock);
   1010 	sc->sc_stopping = true;
   1011 	mutex_exit(sc->sc_intr_lock);
   1012 
   1013 	mutex_enter(&sc->sc_txq.t_mtx);
   1014 	sc->sc_txbusy = false;
   1015 	mutex_exit(&sc->sc_txq.t_mtx);
   1016 
   1017 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1018 	    AWIN_GMAC_DMA_OPMODE,
   1019 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1020 		AWIN_GMAC_DMA_OPMODE)
   1021 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
   1022 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1023 	    AWIN_GMAC_DMA_OPMODE,
   1024 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1025 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
   1026 
   1027 	mii_down(&sc->sc_mii);
   1028 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
   1029 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
   1030 }
   1031 
   1032 /*
   1033  * Add m0 to the TX ring
   1034  */
   1035 static int
   1036 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
   1037 {
   1038 	struct dwc_gmac_dev_dmadesc *desc = NULL;
   1039 	struct dwc_gmac_tx_data *data = NULL;
   1040 	bus_dmamap_t map;
   1041 	int error, i, first;
   1042 
   1043 #ifdef DWC_GMAC_DEBUG
   1044 	aprint_normal_dev(sc->sc_dev,
   1045 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
   1046 #endif
   1047 
   1048 	first = sc->sc_txq.t_cur;
   1049 	map = sc->sc_txq.t_data[first].td_map;
   1050 
   1051 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
   1052 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1053 	if (error != 0) {
   1054 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
   1055 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
   1056 		return error;
   1057 	}
   1058 
   1059 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
   1060 		bus_dmamap_unload(sc->sc_dmat, map);
   1061 		return ENOBUFS;
   1062 	}
   1063 
   1064 	for (i = 0; i < map->dm_nsegs; i++) {
   1065 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
   1066 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
   1067 
   1068 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
   1069 
   1070 #ifdef DWC_GMAC_DEBUG
   1071 		aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
   1072 		    "len %lu\n", sc->sc_txq.t_cur,
   1073 		    (unsigned long)map->dm_segs[i].ds_addr,
   1074 		    (unsigned long)map->dm_segs[i].ds_len);
   1075 #endif
   1076 
   1077 		sc->sc_descm->tx_init_flags(desc);
   1078 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
   1079 
   1080 		if (i == 0)
   1081 			sc->sc_descm->tx_set_first_frag(desc);
   1082 
   1083 		/*
   1084 		 * Defer passing ownership of the first descriptor
   1085 		 * until we are done.
   1086 		 */
   1087 		if (i != 0)
   1088 			sc->sc_descm->tx_set_owned_by_dev(desc);
   1089 
   1090 		sc->sc_txq.t_queued++;
   1091 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
   1092 	}
   1093 
   1094 	sc->sc_descm->tx_set_last_frag(desc);
   1095 
   1096 	data->td_m = m0;
   1097 	data->td_active = map;
   1098 
   1099 	/* sync the packet buffer */
   1100 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1101 	    BUS_DMASYNC_PREWRITE);
   1102 
   1103 	/* sync the new descriptors - ownership not transferred yet */
   1104 	dwc_gmac_txdesc_sync(sc, first, sc->sc_txq.t_cur,
   1105 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1106 
   1107 	/* Pass first to device */
   1108 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
   1109 
   1110 	return 0;
   1111 }
   1112 
   1113 /*
   1114  * If the interface is up and running, only modify the receive
   1115  * filter when setting promiscuous or debug mode.  Otherwise fall
   1116  * through to ether_ioctl, which will reset the chip.
   1117  */
   1118 static int
   1119 dwc_gmac_ifflags_cb(struct ethercom *ec)
   1120 {
   1121 	struct ifnet * const ifp = &ec->ec_if;
   1122 	struct dwc_gmac_softc * const sc = ifp->if_softc;
   1123 	int ret = 0;
   1124 
   1125 	KASSERT(IFNET_LOCKED(ifp));
   1126 	mutex_enter(sc->sc_mcast_lock);
   1127 
   1128 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
   1129 	sc->sc_if_flags = ifp->if_flags;
   1130 
   1131 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   1132 		ret = ENETRESET;
   1133 	} else  if ((change & IFF_PROMISC) != 0) {
   1134 		dwc_gmac_setmulti(sc);
   1135 	}
   1136 
   1137 	mutex_exit(sc->sc_mcast_lock);
   1138 
   1139 	return ret;
   1140 }
   1141 
   1142 static int
   1143 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1144 {
   1145 	struct dwc_gmac_softc * const sc = ifp->if_softc;
   1146 	int error = 0;
   1147 
   1148 	switch (cmd) {
   1149 	case SIOCADDMULTI:
   1150 	case SIOCDELMULTI:
   1151 		break;
   1152 	default:
   1153 		KASSERT(IFNET_LOCKED(ifp));
   1154 	}
   1155 
   1156 	const int s = splnet();
   1157 	error = ether_ioctl(ifp, cmd, data);
   1158 	splx(s);
   1159 
   1160 	if (error == ENETRESET) {
   1161 		error = 0;
   1162 		if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   1163 			mutex_enter(sc->sc_mcast_lock);
   1164 			if (sc->sc_if_flags & IFF_RUNNING) {
   1165 				/*
   1166 				 * Multicast list has changed; set the hardware
   1167 				 * filter accordingly.
   1168 				 */
   1169 				dwc_gmac_setmulti(sc);
   1170 			}
   1171 			mutex_exit(sc->sc_mcast_lock);
   1172 		}
   1173 	}
   1174 
   1175 	return error;
   1176 }
   1177 
   1178 static void
   1179 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1180 {
   1181 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1182 	struct dwc_gmac_tx_data *data;
   1183 	struct dwc_gmac_dev_dmadesc *desc;
   1184 	int i, nsegs;
   1185 
   1186 	mutex_enter(&sc->sc_txq.t_mtx);
   1187 
   1188 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1189 #ifdef DWC_GMAC_DEBUG
   1190 		aprint_normal_dev(sc->sc_dev,
   1191 		    "%s: checking desc #%d (t_queued: %d)\n", __func__,
   1192 		    i, sc->sc_txq.t_queued);
   1193 #endif
   1194 
   1195 		/*
   1196 		 * i + 1 does not need to be a valid descriptor,
   1197 		 * this is just a special notion to just sync
   1198 		 * a single tx descriptor (i)
   1199 		 */
   1200 		dwc_gmac_txdesc_sync(sc, i, i + 1,
   1201 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1202 
   1203 		desc = &sc->sc_txq.t_desc[i];
   1204 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
   1205 			break;
   1206 
   1207 		data = &sc->sc_txq.t_data[i];
   1208 		if (data->td_m == NULL)
   1209 			continue;
   1210 
   1211 		if_statinc(ifp, if_opackets);
   1212 		nsegs = data->td_active->dm_nsegs;
   1213 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1214 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1215 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1216 
   1217 #ifdef DWC_GMAC_DEBUG
   1218 		aprint_normal_dev(sc->sc_dev,
   1219 		    "%s: done with packet at desc #%d, freeing mbuf %p\n",
   1220 		    __func__, i, data->td_m);
   1221 #endif
   1222 
   1223 		m_freem(data->td_m);
   1224 		data->td_m = NULL;
   1225 
   1226 		sc->sc_txq.t_queued -= nsegs;
   1227 	}
   1228 
   1229 	sc->sc_txq.t_next = i;
   1230 
   1231 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1232 		sc->sc_txbusy = false;
   1233 	}
   1234 	mutex_exit(&sc->sc_txq.t_mtx);
   1235 }
   1236 
   1237 static void
   1238 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1239 {
   1240 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1241 	struct dwc_gmac_dev_dmadesc *desc;
   1242 	struct dwc_gmac_rx_data *data;
   1243 	bus_addr_t physaddr;
   1244 	struct mbuf *m, *mnew;
   1245 	int i, len, error;
   1246 
   1247 	mutex_enter(&sc->sc_rxq.r_mtx);
   1248 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1249 #ifdef DWC_GMAC_DEBUG
   1250 		aprint_normal_dev(sc->sc_dev, "%s: checking desc #%d\n",
   1251 		    __func__, i);
   1252 #endif
   1253 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1254 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1255 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1256 		desc = &sc->sc_rxq.r_desc[i];
   1257 		data = &sc->sc_rxq.r_data[i];
   1258 
   1259 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
   1260 			break;
   1261 
   1262 		if (sc->sc_descm->rx_has_error(desc)) {
   1263 #ifdef DWC_GMAC_DEBUG
   1264 			aprint_normal_dev(sc->sc_dev,
   1265 			    "%s: RX error: status %08x, skipping\n",
   1266 			    __func__, le32toh(desc->ddesc_status0));
   1267 #endif
   1268 			if_statinc(ifp, if_ierrors);
   1269 			goto skip;
   1270 		}
   1271 
   1272 		len = sc->sc_descm->rx_get_len(desc);
   1273 
   1274 #ifdef DWC_GMAC_DEBUG
   1275 		aprint_normal_dev(sc->sc_dev,
   1276 		    "%s: device is done with descriptor #%d, len: %d\n",
   1277 		    __func__, i, len);
   1278 #endif
   1279 
   1280 		/*
   1281 		 * Try to get a new mbuf before passing this one
   1282 		 * up, if that fails, drop the packet and reuse
   1283 		 * the existing one.
   1284 		 */
   1285 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1286 		if (mnew == NULL) {
   1287 			if_statinc(ifp, if_ierrors);
   1288 			goto skip;
   1289 		}
   1290 		MCLGET(mnew, M_DONTWAIT);
   1291 		if ((mnew->m_flags & M_EXT) == 0) {
   1292 			m_freem(mnew);
   1293 			if_statinc(ifp, if_ierrors);
   1294 			goto skip;
   1295 		}
   1296 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
   1297 		m_adj(mnew, ETHER_ALIGN);
   1298 		if (mnew->m_len > AWGE_MAX_PACKET) {
   1299 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
   1300 		}
   1301 
   1302 		/* unload old DMA map */
   1303 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1304 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1305 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1306 
   1307 		/* and reload with new mbuf */
   1308 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1309 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1310 		if (error != 0) {
   1311 			m_freem(mnew);
   1312 			/* try to reload old mbuf */
   1313 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1314 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1315 			if (error != 0) {
   1316 				panic("%s: could not load old rx mbuf",
   1317 				    device_xname(sc->sc_dev));
   1318 			}
   1319 			if_statinc(ifp, if_ierrors);
   1320 			goto skip;
   1321 		}
   1322 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1323 
   1324 #ifdef DWC_GMAC_DEBUG
   1325 		aprint_normal_dev(sc->sc_dev,
   1326 		    "%s: receiving packet at desc #%d,   using mbuf %p\n",
   1327 		    __func__, i, data->rd_m);
   1328 #endif
   1329 		/*
   1330 		 * New mbuf loaded, update RX ring and continue
   1331 		 */
   1332 		m = data->rd_m;
   1333 		data->rd_m = mnew;
   1334 		desc->ddesc_data = htole32(physaddr);
   1335 
   1336 		/* finalize mbuf */
   1337 		m->m_pkthdr.len = m->m_len = len;
   1338 		m_set_rcvif(m, ifp);
   1339 		m->m_flags |= M_HASFCS;
   1340 
   1341 		if_percpuq_enqueue(sc->sc_ipq, m);
   1342 
   1343 skip:
   1344 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1345 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1346 
   1347 		sc->sc_descm->rx_init_flags(desc);
   1348 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
   1349 
   1350 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1351 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1352 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1353 
   1354 		sc->sc_descm->rx_set_owned_by_dev(desc);
   1355 
   1356 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1357 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1358 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1359 	}
   1360 
   1361 	/* update RX pointer */
   1362 	sc->sc_rxq.r_cur = i;
   1363 
   1364 	mutex_exit(&sc->sc_rxq.r_mtx);
   1365 }
   1366 
   1367 static void
   1368 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1369 {
   1370 	struct ether_multi *enm;
   1371 	struct ether_multistep step;
   1372 	struct ethercom *ec = &sc->sc_ec;
   1373 	uint32_t hashes[2] = { 0, 0 };
   1374 	uint32_t ffilt, h;
   1375 	int mcnt;
   1376 
   1377 	KASSERT(mutex_owned(sc->sc_mcast_lock));
   1378 
   1379 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1380 
   1381 	if (sc->sc_if_flags & IFF_PROMISC) {
   1382 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1383 		goto special_filter;
   1384 	}
   1385 
   1386 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
   1387 
   1388 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1389 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1390 
   1391 	ETHER_LOCK(ec);
   1392 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1393 	ETHER_FIRST_MULTI(step, ec, enm);
   1394 	mcnt = 0;
   1395 	while (enm != NULL) {
   1396 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1397 		    ETHER_ADDR_LEN) != 0) {
   1398 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1399 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1400 			ETHER_UNLOCK(ec);
   1401 			goto special_filter;
   1402 		}
   1403 
   1404 		h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
   1405 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1406 
   1407 		mcnt++;
   1408 		ETHER_NEXT_MULTI(step, enm);
   1409 	}
   1410 	ETHER_UNLOCK(ec);
   1411 
   1412 	if (mcnt)
   1413 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1414 	else
   1415 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1416 
   1417 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1418 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1419 	    hashes[0]);
   1420 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1421 	    hashes[1]);
   1422 
   1423 #ifdef DWC_GMAC_DEBUG
   1424 	dwc_gmac_dump_ffilt(sc, ffilt);
   1425 #endif
   1426 	return;
   1427 
   1428 special_filter:
   1429 #ifdef DWC_GMAC_DEBUG
   1430 	dwc_gmac_dump_ffilt(sc, ffilt);
   1431 #endif
   1432 	/* no MAC hashes, ALLMULTI or PROMISC */
   1433 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1434 	    ffilt);
   1435 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1436 	    0xffffffff);
   1437 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1438 	    0xffffffff);
   1439 }
   1440 
   1441 int
   1442 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1443 {
   1444 	uint32_t status, dma_status;
   1445 	int rv = 0;
   1446 
   1447 	mutex_enter(sc->sc_intr_lock);
   1448 	if (sc->sc_stopping) {
   1449 		mutex_exit(sc->sc_intr_lock);
   1450 		return 0;
   1451 	}
   1452 
   1453 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1454 	if (status & AWIN_GMAC_MII_IRQ) {
   1455 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1456 		    AWIN_GMAC_MII_STATUS);
   1457 		rv = 1;
   1458 		mii_pollstat(&sc->sc_mii);
   1459 	}
   1460 
   1461 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1462 	    AWIN_GMAC_DMA_STATUS);
   1463 
   1464 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
   1465 		rv = 1;
   1466 
   1467 	if (dma_status & GMAC_DMA_INT_TIE)
   1468 		dwc_gmac_tx_intr(sc);
   1469 
   1470 	if (dma_status & GMAC_DMA_INT_RIE)
   1471 		dwc_gmac_rx_intr(sc);
   1472 
   1473 	/*
   1474 	 * Check error conditions
   1475 	 */
   1476 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1477 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
   1478 #ifdef DWC_GMAC_DEBUG
   1479 		dwc_dump_and_abort(sc, "interrupt error condition");
   1480 #endif
   1481 	}
   1482 
   1483 	rnd_add_uint32(&sc->rnd_source, dma_status);
   1484 
   1485 	/* ack interrupt */
   1486 	if (dma_status)
   1487 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1488 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1489 
   1490 	/*
   1491 	 * Get more packets
   1492 	 */
   1493 	if (rv)
   1494 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1495 
   1496 	mutex_exit(sc->sc_intr_lock);
   1497 
   1498 	return rv;
   1499 }
   1500 
   1501 static void
   1502 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1503 {
   1504 
   1505 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
   1506 }
   1507 
   1508 static int
   1509 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1510 {
   1511 
   1512 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
   1513 }
   1514 
   1515 static void
   1516 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1517 {
   1518 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1519 
   1520 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
   1521 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
   1522 }
   1523 
   1524 static uint32_t
   1525 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1526 {
   1527 
   1528 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
   1529 }
   1530 
   1531 static void
   1532 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1533 {
   1534 
   1535 	desc->ddesc_status0 = 0;
   1536 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1537 }
   1538 
   1539 static void
   1540 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1541 {
   1542 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1543 
   1544 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
   1545 }
   1546 
   1547 static void
   1548 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1549 {
   1550 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1551 
   1552 	desc->ddesc_cntl1 = htole32(cntl |
   1553 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
   1554 }
   1555 
   1556 static void
   1557 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1558 {
   1559 
   1560 	desc->ddesc_status0 = 0;
   1561 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1562 }
   1563 
   1564 static int
   1565 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
   1566 	return !!(le32toh(desc->ddesc_status0) &
   1567 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
   1568 }
   1569 
   1570 static void
   1571 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1572 {
   1573 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
   1574 
   1575 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
   1576 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
   1577 }
   1578 
   1579 static uint32_t
   1580 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1581 {
   1582 
   1583 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
   1584 }
   1585 
   1586 static void
   1587 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1588 {
   1589 
   1590 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
   1591 	desc->ddesc_cntl1 = 0;
   1592 }
   1593 
   1594 static void
   1595 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1596 {
   1597 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1598 
   1599 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
   1600 }
   1601 
   1602 static void
   1603 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1604 {
   1605 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1606 
   1607 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
   1608 }
   1609 
   1610 static void
   1611 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1612 {
   1613 
   1614 	desc->ddesc_status0 = 0;
   1615 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
   1616 }
   1617 
   1618 static int
   1619 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
   1620 {
   1621 
   1622 	return !!(le32toh(desc->ddesc_status0) &
   1623 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
   1624 }
   1625 
   1626 #ifdef DWC_GMAC_DEBUG
   1627 static void
   1628 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1629 {
   1630 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1631 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1632 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1633 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1634 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1635 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1636 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1637 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1638 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1639 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1640 	aprint_normal_dev(sc->sc_dev, " status: %08x\n",
   1641 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1642 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1643 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1644 	aprint_normal_dev(sc->sc_dev, "int en.: %08x\n",
   1645 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1646 	aprint_normal_dev(sc->sc_dev, " cur tx: %08x\n",
   1647 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1648 	aprint_normal_dev(sc->sc_dev, " cur rx: %08x\n",
   1649 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1650 	aprint_normal_dev(sc->sc_dev, "cur txb: %08x\n",
   1651 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1652 	aprint_normal_dev(sc->sc_dev, "cur rxb: %08x\n",
   1653 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1654 }
   1655 
   1656 static void
   1657 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1658 {
   1659 	const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
   1660 
   1661 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1662 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1663 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1664 
   1665 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1666 	    TX_DESC_OFFSET(0), AWGE_TX_RING_COUNT * descsz,
   1667 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1668 
   1669 	for (size_t i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1670 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1671 		aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
   1672 		    "data: %08x next: %08x\n",
   1673 		    i, sc->sc_txq.t_physaddr + i * descsz,
   1674 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1675 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1676 	}
   1677 }
   1678 
   1679 static void
   1680 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1681 {
   1682 	const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
   1683 
   1684 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1685 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1686 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1687 
   1688 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1689 	    RX_DESC_OFFSET(0), AWGE_RX_RING_COUNT * descsz,
   1690 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1691 
   1692 	for (size_t i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1693 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1694 		char buf[200];
   1695 
   1696 		if (!sc->sc_descm->rx_is_owned_by_dev(desc)) {
   1697 			/* print interrupt state */
   1698 			snprintb(buf, sizeof(buf),
   1699 			    "\177\20"
   1700 			    "b\x1e"	"daff\0"
   1701 			    "f\x10\xe"	"frlen\0"
   1702 			    "b\x0f"	"error\0"
   1703 			    "b\x0e"	"rxtrunc\0"	/* descriptor error? */
   1704 			    "b\x0d"	"saff\0"
   1705 			    "b\x0c"	"giantframe\0"	/* length error? */
   1706 			    "b\x0b"	"damaged\0"
   1707 			    "b\x0a"	"vlan\0"
   1708 			    "b\x09"	"first\0"
   1709 			    "b\x08"	"last\0"
   1710 			    "b\x07"	"giant\0"
   1711 			    "b\x06"	"collison\0"
   1712 			    "b\x05"	"ether\0"
   1713 			    "b\x04"	"watchdog\0"
   1714 			    "b\x03"	"miierror\0"
   1715 			    "b\x02"	"dribbling\0"
   1716 			    "b\x01"	"crc\0"
   1717 			    "\0", le32toh(desc->ddesc_status0));
   1718 		}
   1719 
   1720 		aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
   1721 		    "data: %08x next: %08x %s\n",
   1722 		    i, sc->sc_rxq.r_physaddr + i * descsz,
   1723 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1724 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next),
   1725 		    sc->sc_descm->rx_is_owned_by_dev(desc) ? "" : buf);
   1726 	}
   1727 }
   1728 
   1729 static void
   1730 dwc_dump_status(struct dwc_gmac_softc *sc)
   1731 {
   1732 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1733 	    AWIN_GMAC_MAC_INTR);
   1734 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1735 	    AWIN_GMAC_DMA_STATUS);
   1736 	char buf[200];
   1737 
   1738 	/* print interrupt state */
   1739 	snprintb(buf, sizeof(buf),
   1740 	    "\177\20"
   1741 	    "b\x1c"	"GPI\0"
   1742 	    "b\x1b"	"GMC\0"
   1743 	    "b\x1a"	"GLI\0"
   1744 	    "f\x17\x3"	"EB\0"
   1745 	    "f\x14\x3"	"TPS\0"
   1746 	    "f\x11\x3"	"RPS\0"
   1747 	    "b\x10"	"NI\0"
   1748 	    "b\x0f"	"AI\0"
   1749 	    "b\x0e"	"ER\0"
   1750 	    "b\x0d"	"FB\0"
   1751 	    "b\x0a"	"ET\0"
   1752 	    "b\x09"	"RW\0"
   1753 	    "b\x08"	"RS\0"
   1754 	    "b\x07"	"RU\0"
   1755 	    "b\x06"	"RI\0"
   1756 	    "b\x05"	"UN\0"
   1757 	    "b\x04"	"OV\0"
   1758 	    "b\x03"	"TJ\0"
   1759 	    "b\x02"	"TU\0"
   1760 	    "b\x01"	"TS\0"
   1761 	    "b\x00"	"TI\0"
   1762 	    "\0", dma_status);
   1763 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1764 	    status, buf);
   1765 }
   1766 
   1767 static void
   1768 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1769 {
   1770 	dwc_dump_status(sc);
   1771 	dwc_gmac_dump_ffilt(sc,
   1772 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1773 	dwc_gmac_dump_dma(sc);
   1774 	dwc_gmac_dump_tx_desc(sc);
   1775 	dwc_gmac_dump_rx_desc(sc);
   1776 
   1777 	panic("%s", msg);
   1778 }
   1779 
   1780 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1781 {
   1782 	char buf[200];
   1783 
   1784 	/* print filter setup */
   1785 	snprintb(buf, sizeof(buf), "\177\20"
   1786 	    "b\x1f""RA\0"
   1787 	    "b\x0a""HPF\0"
   1788 	    "b\x09""SAF\0"
   1789 	    "b\x08""SAIF\0"
   1790 	    "b\x05""DBF\0"
   1791 	    "b\x04""PM\0"
   1792 	    "b\x03""DAIF\0"
   1793 	    "b\x02""HMC\0"
   1794 	    "b\x01""HUC\0"
   1795 	    "b\x00""PR\0"
   1796 	    "\0", ffilt);
   1797 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1798 }
   1799 #endif
   1800