Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.90
      1 /* $NetBSD: dwc_gmac.c,v 1.90 2024/07/14 09:38:41 skrll Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.90 2024/07/14 09:38:41 skrll Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #ifdef _KERNEL_OPT
     49 #include "opt_inet.h"
     50 #endif
     51 
     52 #include <sys/param.h>
     53 #include <sys/bus.h>
     54 #include <sys/device.h>
     55 #include <sys/intr.h>
     56 #include <sys/systm.h>
     57 #include <sys/sockio.h>
     58 #include <sys/cprng.h>
     59 #include <sys/rndsource.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_media.h>
     64 #include <net/bpf.h>
     65 #ifdef INET
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #include <dev/mii/miivar.h>
     70 
     71 #include <dev/ic/dwc_gmac_reg.h>
     72 #include <dev/ic/dwc_gmac_var.h>
     73 
     74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
     75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
     76 static void dwc_gmac_miibus_statchg(struct ifnet *);
     77 
     78 static int dwc_gmac_reset(struct dwc_gmac_softc *);
     79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
     80 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
     81 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
     82 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     83 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     84 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     85 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     86 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     87 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     88 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
     89 static int dwc_gmac_init(struct ifnet *);
     90 static int dwc_gmac_init_locked(struct ifnet *);
     91 static void dwc_gmac_stop(struct ifnet *, int);
     92 static void dwc_gmac_stop_locked(struct ifnet *, int);
     93 static void dwc_gmac_start(struct ifnet *);
     94 static void dwc_gmac_start_locked(struct ifnet *);
     95 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
     96 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     97 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
     98 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
     99 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
    100 static int dwc_gmac_ifflags_cb(struct ethercom *);
    101 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    102 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    103 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
    104 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
    105 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    106 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    107 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    108 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    109 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    110 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
    111 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
    112 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    113 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    114 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    115 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    116 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    117 
    118 static const struct dwc_gmac_desc_methods desc_methods_standard = {
    119 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
    120 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    121 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    122 	.tx_set_len = dwc_gmac_desc_std_set_len,
    123 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
    124 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
    125 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
    126 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    127 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    128 	.rx_set_len = dwc_gmac_desc_std_set_len,
    129 	.rx_get_len = dwc_gmac_desc_std_get_len,
    130 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
    131 };
    132 
    133 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
    134 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
    135 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    136 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    137 	.tx_set_len = dwc_gmac_desc_enh_set_len,
    138 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
    139 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
    140 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
    141 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    142 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    143 	.rx_set_len = dwc_gmac_desc_enh_set_len,
    144 	.rx_get_len = dwc_gmac_desc_enh_get_len,
    145 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
    146 };
    147 
    148 
    149 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT + (N)) \
    150 				    * sizeof(struct dwc_gmac_dev_dmadesc))
    151 #define	TX_NEXT(N)		(((N) + 1) & (AWGE_TX_RING_COUNT - 1))
    152 
    153 #define RX_DESC_OFFSET(N)	((N) * sizeof(struct dwc_gmac_dev_dmadesc))
    154 #define	RX_NEXT(N)		(((N) + 1) & (AWGE_RX_RING_COUNT - 1))
    155 
    156 
    157 
    158 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
    159 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
    160 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
    161 
    162 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
    163 				GMAC_DMA_INT_FBE |	\
    164 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
    165 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
    166 				GMAC_DMA_INT_TJE)
    167 
    168 #define	AWIN_DEF_MAC_INTRMASK	\
    169 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    170 	AWIN_GMAC_MAC_INT_LINKCHG)
    171 
    172 #ifdef DWC_GMAC_DEBUG
    173 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
    174 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
    175 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
    176 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
    177 static void dwc_dump_status(struct dwc_gmac_softc *);
    178 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
    179 #endif
    180 
    181 int
    182 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
    183 {
    184 	uint8_t enaddr[ETHER_ADDR_LEN];
    185 	uint32_t maclo, machi, ver, hwft;
    186 	struct mii_data * const mii = &sc->sc_mii;
    187 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    188 	prop_dictionary_t dict;
    189 
    190 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    191 	sc->sc_mii_clk = mii_clk & 7;
    192 
    193 	dict = device_properties(sc->sc_dev);
    194 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    195 	if (ea != NULL) {
    196 		/*
    197 		 * If the MAC address is overridden by a device property,
    198 		 * use that.
    199 		 */
    200 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    201 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    202 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
    203 	} else {
    204 		/*
    205 		 * If we did not get an externaly configure address,
    206 		 * try to read one from the current filter setup,
    207 		 * before resetting the chip.
    208 		 */
    209 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    210 		    AWIN_GMAC_MAC_ADDR0LO);
    211 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    212 		    AWIN_GMAC_MAC_ADDR0HI);
    213 
    214 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    215 			/* fake MAC address */
    216 			maclo = 0x00f2 | (cprng_strong32() << 16);
    217 			machi = cprng_strong32();
    218 		}
    219 
    220 		enaddr[0] = maclo & 0x0ff;
    221 		enaddr[1] = (maclo >> 8) & 0x0ff;
    222 		enaddr[2] = (maclo >> 16) & 0x0ff;
    223 		enaddr[3] = (maclo >> 24) & 0x0ff;
    224 		enaddr[4] = machi & 0x0ff;
    225 		enaddr[5] = (machi >> 8) & 0x0ff;
    226 	}
    227 
    228 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
    229 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
    230 
    231 	/*
    232 	 * Init chip and do initial setup
    233 	 */
    234 	if (dwc_gmac_reset(sc) != 0)
    235 		return ENXIO;	/* not much to cleanup, haven't attached yet */
    236 	dwc_gmac_write_hwaddr(sc, enaddr);
    237 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    238 	    ether_sprintf(enaddr));
    239 
    240 	hwft = 0;
    241 	if (ver >= 0x35) {
    242 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    243 		    AWIN_GMAC_DMA_HWFEATURES);
    244 		aprint_normal_dev(sc->sc_dev,
    245 		    "HW feature mask: %x\n", hwft);
    246 	}
    247 
    248 	if (sizeof(bus_addr_t) > 4) {
    249 		int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
    250 		    &sc->sc_dmat, BUS_DMA_WAITOK);
    251 		if (error != 0) {
    252 			aprint_error_dev(sc->sc_dev,
    253 			    "failed to create DMA subregion\n");
    254 			return ENOMEM;
    255 		}
    256 	}
    257 
    258 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
    259 		aprint_normal_dev(sc->sc_dev,
    260 		    "Using enhanced descriptor format\n");
    261 		sc->sc_descm = &desc_methods_enhanced;
    262 	} else {
    263 		sc->sc_descm = &desc_methods_standard;
    264 	}
    265 	if (hwft & GMAC_DMA_FEAT_RMON) {
    266 		uint32_t val;
    267 
    268 		/* Mask all MMC interrupts */
    269 		val = 0xffffffff;
    270 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    271 		    GMAC_MMC_RX_INT_MSK, val);
    272 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    273 		    GMAC_MMC_TX_INT_MSK, val);
    274 	}
    275 
    276 	/*
    277 	 * Allocate Tx and Rx rings
    278 	 */
    279 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    280 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    281 		goto fail;
    282 	}
    283 
    284 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    285 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    286 		goto fail;
    287 	}
    288 
    289 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    290 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    291 		goto fail;
    292 	}
    293 
    294 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    295 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    296 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    297 
    298 	/*
    299 	 * Prepare interface data
    300 	 */
    301 	ifp->if_softc = sc;
    302 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    303 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    304 #ifdef DWCGMAC_MPSAFE
    305 	ifp->if_extflags = IFEF_MPSAFE;
    306 #endif
    307 	ifp->if_ioctl = dwc_gmac_ioctl;
    308 	ifp->if_start = dwc_gmac_start;
    309 	ifp->if_init = dwc_gmac_init;
    310 	ifp->if_stop = dwc_gmac_stop;
    311 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    312 	IFQ_SET_READY(&ifp->if_snd);
    313 
    314 	/*
    315 	 * Attach MII subdevices
    316 	 */
    317 	sc->sc_ec.ec_mii = &sc->sc_mii;
    318 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    319 	mii->mii_ifp = ifp;
    320 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
    321 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
    322 	mii->mii_statchg = dwc_gmac_miibus_statchg;
    323 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
    324 	    MIIF_DOPAUSE);
    325 
    326 	if (LIST_EMPTY(&mii->mii_phys)) {
    327 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    328 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
    329 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
    330 	} else {
    331 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    332 	}
    333 
    334 	/*
    335 	 * We can support 802.1Q VLAN-sized frames.
    336 	 */
    337 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    338 
    339 	/*
    340 	 * Ready, attach interface
    341 	 */
    342 	/* Attach the interface. */
    343 	if_initialize(ifp);
    344 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    345 	if_deferred_start_init(ifp, NULL);
    346 	ether_ifattach(ifp, enaddr);
    347 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    348 	if_register(ifp);
    349 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
    350 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
    351 
    352 	/*
    353 	 * Enable interrupts
    354 	 */
    355 	mutex_enter(sc->sc_lock);
    356 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    357 	    AWIN_DEF_MAC_INTRMASK);
    358 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    359 	    GMAC_DEF_DMA_INT_MASK);
    360 	mutex_exit(sc->sc_lock);
    361 
    362 	return 0;
    363 
    364 fail:
    365 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    366 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    367 	dwc_gmac_free_dma_rings(sc);
    368 	mutex_destroy(&sc->sc_mdio_lock);
    369 
    370 	return ENXIO;
    371 }
    372 
    373 
    374 
    375 static int
    376 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    377 {
    378 	size_t cnt;
    379 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    380 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    381 	    | GMAC_BUSMODE_RESET);
    382 	for (cnt = 0; cnt < 30000; cnt++) {
    383 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    384 		    & GMAC_BUSMODE_RESET) == 0)
    385 			return 0;
    386 		delay(10);
    387 	}
    388 
    389 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    390 	return EIO;
    391 }
    392 
    393 static void
    394 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    395     uint8_t enaddr[ETHER_ADDR_LEN])
    396 {
    397 	uint32_t hi, lo;
    398 
    399 	hi = enaddr[4] | (enaddr[5] << 8);
    400 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    401 	    | ((uint32_t)enaddr[3] << 24);
    402 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    403 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    404 }
    405 
    406 static int
    407 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
    408 {
    409 	struct dwc_gmac_softc * const sc = device_private(self);
    410 	uint16_t mii;
    411 	size_t cnt;
    412 
    413 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    414 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    415 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    416 	    | GMAC_MII_BUSY;
    417 
    418 	mutex_enter(&sc->sc_mdio_lock);
    419 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    420 
    421 	for (cnt = 0; cnt < 1000; cnt++) {
    422 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    423 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    424 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    425 			    AWIN_GMAC_MAC_MIIDATA);
    426 			break;
    427 		}
    428 		delay(10);
    429 	}
    430 
    431 	mutex_exit(&sc->sc_mdio_lock);
    432 
    433 	if (cnt >= 1000)
    434 		return ETIMEDOUT;
    435 
    436 	return 0;
    437 }
    438 
    439 static int
    440 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
    441 {
    442 	struct dwc_gmac_softc * const sc = device_private(self);
    443 	uint16_t mii;
    444 	size_t cnt;
    445 
    446 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    447 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    448 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    449 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    450 
    451 	mutex_enter(&sc->sc_mdio_lock);
    452 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    453 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    454 
    455 	for (cnt = 0; cnt < 1000; cnt++) {
    456 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    457 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    458 			break;
    459 		delay(10);
    460 	}
    461 
    462 	mutex_exit(&sc->sc_mdio_lock);
    463 
    464 	if (cnt >= 1000)
    465 		return ETIMEDOUT;
    466 
    467 	return 0;
    468 }
    469 
    470 static int
    471 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    472 	struct dwc_gmac_rx_ring *ring)
    473 {
    474 	struct dwc_gmac_rx_data *data;
    475 	bus_addr_t physaddr;
    476 	const size_t rxringsz = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    477 	int error, i, next;
    478 
    479 	ring->r_cur = ring->r_next = 0;
    480 	memset(ring->r_desc, 0, rxringsz);
    481 
    482 	/*
    483 	 * Pre-allocate Rx buffers and populate Rx ring.
    484 	 */
    485 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    486 		struct dwc_gmac_dev_dmadesc *desc;
    487 
    488 		data = &sc->sc_rxq.r_data[i];
    489 
    490 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    491 		if (data->rd_m == NULL) {
    492 			aprint_error_dev(sc->sc_dev,
    493 			    "could not allocate rx mbuf #%d\n", i);
    494 			error = ENOMEM;
    495 			goto fail;
    496 		}
    497 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    498 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    499 		if (error != 0) {
    500 			aprint_error_dev(sc->sc_dev,
    501 			    "could not create DMA map\n");
    502 			data->rd_map = NULL;
    503 			goto fail;
    504 		}
    505 		MCLGET(data->rd_m, M_DONTWAIT);
    506 		if (!(data->rd_m->m_flags & M_EXT)) {
    507 			aprint_error_dev(sc->sc_dev,
    508 			    "could not allocate mbuf cluster #%d\n", i);
    509 			error = ENOMEM;
    510 			goto fail;
    511 		}
    512 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
    513 		    = data->rd_m->m_ext.ext_size;
    514 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
    515 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
    516 			    = AWGE_MAX_PACKET;
    517 		}
    518 
    519 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
    520 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
    521 		if (error != 0) {
    522 			aprint_error_dev(sc->sc_dev,
    523 			    "could not load rx buf DMA map #%d", i);
    524 			goto fail;
    525 		}
    526 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    527 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
    528 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    529 
    530 		desc = &sc->sc_rxq.r_desc[i];
    531 		desc->ddesc_data = htole32(physaddr);
    532 		next = RX_NEXT(i);
    533 		desc->ddesc_next = htole32(ring->r_physaddr
    534 		    + next * sizeof(*desc));
    535 		sc->sc_descm->rx_init_flags(desc);
    536 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    537 		sc->sc_descm->rx_set_owned_by_dev(desc);
    538 	}
    539 
    540 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    541 	    RX_DESC_OFFSET(0),
    542 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    543 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    544 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    545 	    ring->r_physaddr);
    546 
    547 	return 0;
    548 
    549 fail:
    550 	dwc_gmac_free_rx_ring(sc, ring);
    551 	return error;
    552 }
    553 
    554 static void
    555 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    556 	struct dwc_gmac_rx_ring *ring)
    557 {
    558 	struct dwc_gmac_dev_dmadesc *desc;
    559 	struct dwc_gmac_rx_data *data;
    560 	int i;
    561 
    562 	mutex_enter(&ring->r_mtx);
    563 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    564 		desc = &sc->sc_rxq.r_desc[i];
    565 		data = &sc->sc_rxq.r_data[i];
    566 		sc->sc_descm->rx_init_flags(desc);
    567 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    568 		sc->sc_descm->rx_set_owned_by_dev(desc);
    569 	}
    570 
    571 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    572 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    573 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    574 
    575 	ring->r_cur = ring->r_next = 0;
    576 	/* reset DMA address to start of ring */
    577 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    578 	    sc->sc_rxq.r_physaddr);
    579 	mutex_exit(&ring->r_mtx);
    580 }
    581 
    582 static int
    583 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    584 {
    585 	const size_t ringsize = AWGE_TOTAL_RING_COUNT *
    586 		sizeof(struct dwc_gmac_dev_dmadesc);
    587 	int error, nsegs;
    588 	void *rings;
    589 
    590 	error = bus_dmamap_create(sc->sc_dmat, ringsize, 1, ringsize, 0,
    591 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    592 	if (error != 0) {
    593 		aprint_error_dev(sc->sc_dev,
    594 		    "could not create desc DMA map\n");
    595 		sc->sc_dma_ring_map = NULL;
    596 		goto fail;
    597 	}
    598 
    599 	error = bus_dmamem_alloc(sc->sc_dmat, ringsize, PAGE_SIZE, 0,
    600 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
    601 	if (error != 0) {
    602 		aprint_error_dev(sc->sc_dev,
    603 		    "could not map DMA memory\n");
    604 		goto fail;
    605 	}
    606 
    607 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    608 	    ringsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    609 	if (error != 0) {
    610 		aprint_error_dev(sc->sc_dev,
    611 		    "could not allocate DMA memory\n");
    612 		goto fail;
    613 	}
    614 
    615 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    616 	    ringsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    617 	if (error != 0) {
    618 		aprint_error_dev(sc->sc_dev,
    619 		    "could not load desc DMA map\n");
    620 		goto fail;
    621 	}
    622 
    623 	/* give first AWGE_RX_RING_COUNT to the RX side */
    624 	sc->sc_rxq.r_desc = rings;
    625 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    626 
    627 	/* and next rings to the TX side */
    628 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    629 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    630 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
    631 
    632 	return 0;
    633 
    634 fail:
    635 	dwc_gmac_free_dma_rings(sc);
    636 	return error;
    637 }
    638 
    639 static void
    640 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    641 {
    642 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    643 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    644 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    645 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    646 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    647 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    648 }
    649 
    650 static void
    651 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    652 {
    653 	struct dwc_gmac_rx_data *data;
    654 	int i;
    655 
    656 	if (ring->r_desc == NULL)
    657 		return;
    658 
    659 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    660 		data = &ring->r_data[i];
    661 
    662 		if (data->rd_map != NULL) {
    663 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    664 			    AWGE_RX_RING_COUNT
    665 				* sizeof(struct dwc_gmac_dev_dmadesc),
    666 			    BUS_DMASYNC_POSTREAD);
    667 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    668 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    669 		}
    670 		m_freem(data->rd_m);
    671 	}
    672 }
    673 
    674 static int
    675 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    676 	struct dwc_gmac_tx_ring *ring)
    677 {
    678 	int i, error = 0;
    679 
    680 	ring->t_queued = 0;
    681 	ring->t_cur = ring->t_next = 0;
    682 
    683 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
    684 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    685 	    TX_DESC_OFFSET(0),
    686 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    687 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    688 
    689 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    690 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    691 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    692 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
    693 		    &ring->t_data[i].td_map);
    694 		if (error != 0) {
    695 			aprint_error_dev(sc->sc_dev,
    696 			    "could not create TX DMA map #%d\n", i);
    697 			ring->t_data[i].td_map = NULL;
    698 			goto fail;
    699 		}
    700 		ring->t_desc[i].ddesc_next = htole32(
    701 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    702 		    * TX_NEXT(i));
    703 	}
    704 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    705 	    TX_DESC_OFFSET(0),
    706 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    707 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    708 
    709 	return 0;
    710 
    711 fail:
    712 	dwc_gmac_free_tx_ring(sc, ring);
    713 	return error;
    714 }
    715 
    716 static void
    717 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    718 {
    719 	/* 'end' is pointing one descriptor beyond the last we want to sync */
    720 	if (end > start) {
    721 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    722 		    TX_DESC_OFFSET(start),
    723 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
    724 		    ops);
    725 		return;
    726 	}
    727 	/* sync from 'start' to end of ring */
    728 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    729 	    TX_DESC_OFFSET(start),
    730 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
    731 	    ops);
    732 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
    733 		/* sync from start of ring to 'end' */
    734 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    735 		    TX_DESC_OFFSET(0),
    736 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
    737 		    ops);
    738 	}
    739 }
    740 
    741 static void
    742 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    743 	struct dwc_gmac_tx_ring *ring)
    744 {
    745 	int i;
    746 
    747 	mutex_enter(&ring->t_mtx);
    748 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    749 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    750 
    751 		if (data->td_m != NULL) {
    752 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    753 			    0, data->td_active->dm_mapsize,
    754 			    BUS_DMASYNC_POSTWRITE);
    755 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    756 			m_freem(data->td_m);
    757 			data->td_m = NULL;
    758 		}
    759 	}
    760 
    761 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    762 	    TX_DESC_OFFSET(0),
    763 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    764 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    765 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    766 	    sc->sc_txq.t_physaddr);
    767 
    768 	ring->t_queued = 0;
    769 	ring->t_cur = ring->t_next = 0;
    770 	mutex_exit(&ring->t_mtx);
    771 }
    772 
    773 static void
    774 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    775 	struct dwc_gmac_tx_ring *ring)
    776 {
    777 	int i;
    778 
    779 	/* unload the maps */
    780 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    781 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    782 
    783 		if (data->td_m != NULL) {
    784 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    785 			    0, data->td_map->dm_mapsize,
    786 			    BUS_DMASYNC_POSTWRITE);
    787 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    788 			m_freem(data->td_m);
    789 			data->td_m = NULL;
    790 		}
    791 	}
    792 
    793 	/* and actually free them */
    794 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    795 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    796 
    797 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    798 	}
    799 }
    800 
    801 static void
    802 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    803 {
    804 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    805 	struct mii_data * const mii = &sc->sc_mii;
    806 	uint32_t conf, flow;
    807 
    808 	/*
    809 	 * Set MII or GMII interface based on the speed
    810 	 * negotiated by the PHY.
    811 	 */
    812 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    813 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
    814 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
    815 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    816 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    817 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    818 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    819 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    820 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    821 	case IFM_10_T:
    822 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    823 		break;
    824 	case IFM_100_TX:
    825 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    826 			AWIN_GMAC_MAC_CONF_MIISEL;
    827 		break;
    828 	case IFM_1000_T:
    829 		break;
    830 	}
    831 	if (sc->sc_set_speed)
    832 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
    833 
    834 	flow = 0;
    835 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    836 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    837 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    838 	}
    839 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    840 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    841 	}
    842 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    843 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    844 	}
    845 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    846 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    847 
    848 #ifdef DWC_GMAC_DEBUG
    849 	aprint_normal_dev(sc->sc_dev,
    850 	    "setting MAC conf register: %08x\n", conf);
    851 #endif
    852 
    853 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    854 	    AWIN_GMAC_MAC_CONF, conf);
    855 }
    856 
    857 static int
    858 dwc_gmac_init(struct ifnet *ifp)
    859 {
    860 	struct dwc_gmac_softc *sc = ifp->if_softc;
    861 
    862 	mutex_enter(sc->sc_lock);
    863 	int ret = dwc_gmac_init_locked(ifp);
    864 	mutex_exit(sc->sc_lock);
    865 
    866 	return ret;
    867 }
    868 
    869 static int
    870 dwc_gmac_init_locked(struct ifnet *ifp)
    871 {
    872 	struct dwc_gmac_softc *sc = ifp->if_softc;
    873 	uint32_t ffilt;
    874 
    875 	if (ifp->if_flags & IFF_RUNNING)
    876 		return 0;
    877 
    878 	dwc_gmac_stop_locked(ifp, 0);
    879 
    880 	/*
    881 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    882 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    883 	 */
    884 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    885 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    886 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    887 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    888 
    889 	/*
    890 	 * Set up address filter
    891 	 */
    892 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    893 	if (ifp->if_flags & IFF_PROMISC) {
    894 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    895 	} else {
    896 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    897 	}
    898 	if (ifp->if_flags & IFF_BROADCAST) {
    899 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    900 	} else {
    901 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    902 	}
    903 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    904 
    905 	/*
    906 	 * Set up multicast filter
    907 	 */
    908 	dwc_gmac_setmulti(sc);
    909 
    910 	/*
    911 	 * Set up dma pointer for RX and TX ring
    912 	 */
    913 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    914 	    sc->sc_rxq.r_physaddr);
    915 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    916 	    sc->sc_txq.t_physaddr);
    917 
    918 	/*
    919 	 * Start RX/TX part
    920 	 */
    921 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
    922 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
    923 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
    924 	}
    925 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
    926 #ifdef DWC_GMAC_DEBUG
    927 	aprint_normal_dev(sc->sc_dev,
    928 	    "setting DMA opmode register: %08x\n", opmode);
    929 #endif
    930 
    931 	sc->sc_stopping = false;
    932 
    933 	ifp->if_flags |= IFF_RUNNING;
    934 	sc->sc_txbusy = false;
    935 
    936 	return 0;
    937 }
    938 
    939 static void
    940 dwc_gmac_start(struct ifnet *ifp)
    941 {
    942 	struct dwc_gmac_softc *sc = ifp->if_softc;
    943 #ifdef DWCGMAC_MPSAFE
    944 	KASSERT(if_is_mpsafe(ifp));
    945 #endif
    946 
    947 	mutex_enter(sc->sc_lock);
    948 	if (!sc->sc_stopping) {
    949 		mutex_enter(&sc->sc_txq.t_mtx);
    950 		dwc_gmac_start_locked(ifp);
    951 		mutex_exit(&sc->sc_txq.t_mtx);
    952 	}
    953 	mutex_exit(sc->sc_lock);
    954 }
    955 
    956 static void
    957 dwc_gmac_start_locked(struct ifnet *ifp)
    958 {
    959 	struct dwc_gmac_softc *sc = ifp->if_softc;
    960 	int old = sc->sc_txq.t_queued;
    961 	int start = sc->sc_txq.t_cur;
    962 	struct mbuf *m0;
    963 
    964 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    965 		return;
    966 	if (sc->sc_txbusy)
    967 		return;
    968 
    969 	for (;;) {
    970 		IFQ_POLL(&ifp->if_snd, m0);
    971 		if (m0 == NULL)
    972 			break;
    973 		if (dwc_gmac_queue(sc, m0) != 0) {
    974 			sc->sc_txbusy = true;
    975 			break;
    976 		}
    977 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    978 		bpf_mtap(ifp, m0, BPF_D_OUT);
    979 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    980 			sc->sc_txbusy = true;
    981 			break;
    982 		}
    983 	}
    984 
    985 	if (sc->sc_txq.t_queued != old) {
    986 		/* packets have been queued, kick it off */
    987 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    988 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    989 
    990 #ifdef DWC_GMAC_DEBUG
    991 		dwc_dump_status(sc);
    992 #endif
    993 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    994 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    995 	}
    996 }
    997 
    998 static void
    999 dwc_gmac_stop(struct ifnet *ifp, int disable)
   1000 {
   1001 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1002 
   1003 	mutex_enter(sc->sc_lock);
   1004 	dwc_gmac_stop_locked(ifp, disable);
   1005 	mutex_exit(sc->sc_lock);
   1006 }
   1007 
   1008 static void
   1009 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
   1010 {
   1011 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1012 
   1013 	sc->sc_stopping = true;
   1014 
   1015 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1016 	    AWIN_GMAC_DMA_OPMODE,
   1017 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1018 		AWIN_GMAC_DMA_OPMODE)
   1019 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
   1020 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1021 	    AWIN_GMAC_DMA_OPMODE,
   1022 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1023 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
   1024 
   1025 	mii_down(&sc->sc_mii);
   1026 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
   1027 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
   1028 
   1029 	ifp->if_flags &= ~IFF_RUNNING;
   1030 	sc->sc_txbusy = false;
   1031 }
   1032 
   1033 /*
   1034  * Add m0 to the TX ring
   1035  */
   1036 static int
   1037 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
   1038 {
   1039 	struct dwc_gmac_dev_dmadesc *desc = NULL;
   1040 	struct dwc_gmac_tx_data *data = NULL;
   1041 	bus_dmamap_t map;
   1042 	int error, i, first;
   1043 
   1044 #ifdef DWC_GMAC_DEBUG
   1045 	aprint_normal_dev(sc->sc_dev,
   1046 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
   1047 #endif
   1048 
   1049 	first = sc->sc_txq.t_cur;
   1050 	map = sc->sc_txq.t_data[first].td_map;
   1051 
   1052 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
   1053 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1054 	if (error != 0) {
   1055 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
   1056 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
   1057 		return error;
   1058 	}
   1059 
   1060 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
   1061 		bus_dmamap_unload(sc->sc_dmat, map);
   1062 		return ENOBUFS;
   1063 	}
   1064 
   1065 	for (i = 0; i < map->dm_nsegs; i++) {
   1066 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
   1067 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
   1068 
   1069 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
   1070 
   1071 #ifdef DWC_GMAC_DEBUG
   1072 		aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
   1073 		    "len %lu\n", sc->sc_txq.t_cur,
   1074 		    (unsigned long)map->dm_segs[i].ds_addr,
   1075 		    (unsigned long)map->dm_segs[i].ds_len);
   1076 #endif
   1077 
   1078 		sc->sc_descm->tx_init_flags(desc);
   1079 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
   1080 
   1081 		if (i == 0)
   1082 			sc->sc_descm->tx_set_first_frag(desc);
   1083 
   1084 		/*
   1085 		 * Defer passing ownership of the first descriptor
   1086 		 * until we are done.
   1087 		 */
   1088 		if (i != 0)
   1089 			sc->sc_descm->tx_set_owned_by_dev(desc);
   1090 
   1091 		sc->sc_txq.t_queued++;
   1092 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
   1093 	}
   1094 
   1095 	sc->sc_descm->tx_set_last_frag(desc);
   1096 
   1097 	data->td_m = m0;
   1098 	data->td_active = map;
   1099 
   1100 	/* sync the packet buffer */
   1101 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1102 	    BUS_DMASYNC_PREWRITE);
   1103 
   1104 	/* sync the new descriptors - ownership not transferred yet */
   1105 	dwc_gmac_txdesc_sync(sc, first, sc->sc_txq.t_cur,
   1106 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1107 
   1108 	/* Pass first to device */
   1109 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
   1110 
   1111 	return 0;
   1112 }
   1113 
   1114 /*
   1115  * If the interface is up and running, only modify the receive
   1116  * filter when setting promiscuous or debug mode.  Otherwise fall
   1117  * through to ether_ioctl, which will reset the chip.
   1118  */
   1119 static int
   1120 dwc_gmac_ifflags_cb(struct ethercom *ec)
   1121 {
   1122 	struct ifnet *ifp = &ec->ec_if;
   1123 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1124 	int ret = 0;
   1125 
   1126 	mutex_enter(sc->sc_lock);
   1127 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
   1128 	sc->sc_if_flags = ifp->if_flags;
   1129 
   1130 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   1131 		ret = ENETRESET;
   1132 		goto out;
   1133 	}
   1134 	if ((change & IFF_PROMISC) != 0) {
   1135 		dwc_gmac_setmulti(sc);
   1136 	}
   1137 out:
   1138 	mutex_exit(sc->sc_lock);
   1139 
   1140 	return ret;
   1141 }
   1142 
   1143 static int
   1144 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1145 {
   1146 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1147 	int error = 0;
   1148 
   1149 	int s = splnet();
   1150 	error = ether_ioctl(ifp, cmd, data);
   1151 
   1152 #ifdef DWCGMAC_MPSAFE
   1153 	splx(s);
   1154 #endif
   1155 
   1156 	if (error == ENETRESET) {
   1157 		error = 0;
   1158 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   1159 			;
   1160 		else if (ifp->if_flags & IFF_RUNNING) {
   1161 			/*
   1162 			 * Multicast list has changed; set the hardware filter
   1163 			 * accordingly.
   1164 			 */
   1165 			mutex_enter(sc->sc_lock);
   1166 			dwc_gmac_setmulti(sc);
   1167 			mutex_exit(sc->sc_lock);
   1168 		}
   1169 	}
   1170 
   1171 	/* Try to get things going again */
   1172 	if (ifp->if_flags & IFF_UP)
   1173 		dwc_gmac_start(ifp);
   1174 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1175 
   1176 #ifndef DWCGMAC_MPSAFE
   1177 	splx(s);
   1178 #endif
   1179 
   1180 	return error;
   1181 }
   1182 
   1183 static void
   1184 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1185 {
   1186 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1187 	struct dwc_gmac_tx_data *data;
   1188 	struct dwc_gmac_dev_dmadesc *desc;
   1189 	int i, nsegs;
   1190 
   1191 	mutex_enter(&sc->sc_txq.t_mtx);
   1192 
   1193 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1194 #ifdef DWC_GMAC_DEBUG
   1195 		aprint_normal_dev(sc->sc_dev,
   1196 		    "%s: checking desc #%d (t_queued: %d)\n", __func__,
   1197 		    i, sc->sc_txq.t_queued);
   1198 #endif
   1199 
   1200 		/*
   1201 		 * i + 1 does not need to be a valid descriptor,
   1202 		 * this is just a special notion to just sync
   1203 		 * a single tx descriptor (i)
   1204 		 */
   1205 		dwc_gmac_txdesc_sync(sc, i, i + 1,
   1206 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1207 
   1208 		desc = &sc->sc_txq.t_desc[i];
   1209 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
   1210 			break;
   1211 
   1212 		data = &sc->sc_txq.t_data[i];
   1213 		if (data->td_m == NULL)
   1214 			continue;
   1215 
   1216 		if_statinc(ifp, if_opackets);
   1217 		nsegs = data->td_active->dm_nsegs;
   1218 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1219 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1220 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1221 
   1222 #ifdef DWC_GMAC_DEBUG
   1223 		aprint_normal_dev(sc->sc_dev,
   1224 		    "%s: done with packet at desc #%d, freeing mbuf %p\n",
   1225 		    __func__, i, data->td_m);
   1226 #endif
   1227 
   1228 		m_freem(data->td_m);
   1229 		data->td_m = NULL;
   1230 
   1231 		sc->sc_txq.t_queued -= nsegs;
   1232 	}
   1233 
   1234 	sc->sc_txq.t_next = i;
   1235 
   1236 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1237 		sc->sc_txbusy = false;
   1238 	}
   1239 	mutex_exit(&sc->sc_txq.t_mtx);
   1240 }
   1241 
   1242 static void
   1243 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1244 {
   1245 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1246 	struct dwc_gmac_dev_dmadesc *desc;
   1247 	struct dwc_gmac_rx_data *data;
   1248 	bus_addr_t physaddr;
   1249 	struct mbuf *m, *mnew;
   1250 	int i, len, error;
   1251 
   1252 	mutex_enter(&sc->sc_rxq.r_mtx);
   1253 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1254 #ifdef DWC_GMAC_DEBUG
   1255 		aprint_normal_dev(sc->sc_dev, "%s: checking desc #%d\n",
   1256 		    __func__, i);
   1257 #endif
   1258 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1259 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1260 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1261 		desc = &sc->sc_rxq.r_desc[i];
   1262 		data = &sc->sc_rxq.r_data[i];
   1263 
   1264 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
   1265 			break;
   1266 
   1267 		if (sc->sc_descm->rx_has_error(desc)) {
   1268 #ifdef DWC_GMAC_DEBUG
   1269 			aprint_normal_dev(sc->sc_dev,
   1270 			    "%s: RX error: status %08x, skipping\n",
   1271 			    __func__, le32toh(desc->ddesc_status0));
   1272 #endif
   1273 			if_statinc(ifp, if_ierrors);
   1274 			goto skip;
   1275 		}
   1276 
   1277 		len = sc->sc_descm->rx_get_len(desc);
   1278 
   1279 #ifdef DWC_GMAC_DEBUG
   1280 		aprint_normal_dev(sc->sc_dev,
   1281 		    "%s: device is done with descriptor #%d, len: %d\n",
   1282 		    __func__, i, len);
   1283 #endif
   1284 
   1285 		/*
   1286 		 * Try to get a new mbuf before passing this one
   1287 		 * up, if that fails, drop the packet and reuse
   1288 		 * the existing one.
   1289 		 */
   1290 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1291 		if (mnew == NULL) {
   1292 			if_statinc(ifp, if_ierrors);
   1293 			goto skip;
   1294 		}
   1295 		MCLGET(mnew, M_DONTWAIT);
   1296 		if ((mnew->m_flags & M_EXT) == 0) {
   1297 			m_freem(mnew);
   1298 			if_statinc(ifp, if_ierrors);
   1299 			goto skip;
   1300 		}
   1301 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
   1302 		if (mnew->m_len > AWGE_MAX_PACKET) {
   1303 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
   1304 		}
   1305 
   1306 		/* unload old DMA map */
   1307 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1308 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1309 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1310 
   1311 		/* and reload with new mbuf */
   1312 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1313 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1314 		if (error != 0) {
   1315 			m_freem(mnew);
   1316 			/* try to reload old mbuf */
   1317 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1318 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1319 			if (error != 0) {
   1320 				panic("%s: could not load old rx mbuf",
   1321 				    device_xname(sc->sc_dev));
   1322 			}
   1323 			if_statinc(ifp, if_ierrors);
   1324 			goto skip;
   1325 		}
   1326 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1327 
   1328 #ifdef DWC_GMAC_DEBUG
   1329 		aprint_normal_dev(sc->sc_dev,
   1330 		    "%s: receiving packet at desc #%d,   using mbuf %p\n",
   1331 		    __func__, i, data->rd_m);
   1332 #endif
   1333 		/*
   1334 		 * New mbuf loaded, update RX ring and continue
   1335 		 */
   1336 		m = data->rd_m;
   1337 		data->rd_m = mnew;
   1338 		desc->ddesc_data = htole32(physaddr);
   1339 
   1340 		/* finalize mbuf */
   1341 		m->m_pkthdr.len = m->m_len = len;
   1342 		m_set_rcvif(m, ifp);
   1343 		m->m_flags |= M_HASFCS;
   1344 
   1345 		if_percpuq_enqueue(sc->sc_ipq, m);
   1346 
   1347 skip:
   1348 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1349 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1350 
   1351 		sc->sc_descm->rx_init_flags(desc);
   1352 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
   1353 
   1354 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1355 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1356 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1357 
   1358 		sc->sc_descm->rx_set_owned_by_dev(desc);
   1359 
   1360 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1361 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1362 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1363 	}
   1364 
   1365 	/* update RX pointer */
   1366 	sc->sc_rxq.r_cur = i;
   1367 
   1368 	mutex_exit(&sc->sc_rxq.r_mtx);
   1369 }
   1370 
   1371 static void
   1372 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1373 {
   1374 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1375 	struct ether_multi *enm;
   1376 	struct ether_multistep step;
   1377 	struct ethercom *ec = &sc->sc_ec;
   1378 	uint32_t hashes[2] = { 0, 0 };
   1379 	uint32_t ffilt, h;
   1380 	int mcnt;
   1381 
   1382 	KASSERT(mutex_owned(sc->sc_lock));
   1383 
   1384 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1385 
   1386 	if (ifp->if_flags & IFF_PROMISC) {
   1387 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1388 		goto special_filter;
   1389 	}
   1390 
   1391 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
   1392 
   1393 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1394 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1395 
   1396 	ETHER_LOCK(ec);
   1397 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1398 	ETHER_FIRST_MULTI(step, ec, enm);
   1399 	mcnt = 0;
   1400 	while (enm != NULL) {
   1401 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1402 		    ETHER_ADDR_LEN) != 0) {
   1403 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1404 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1405 			ETHER_UNLOCK(ec);
   1406 			goto special_filter;
   1407 		}
   1408 
   1409 		h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
   1410 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1411 
   1412 		mcnt++;
   1413 		ETHER_NEXT_MULTI(step, enm);
   1414 	}
   1415 	ETHER_UNLOCK(ec);
   1416 
   1417 	if (mcnt)
   1418 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1419 	else
   1420 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1421 
   1422 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1423 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1424 	    hashes[0]);
   1425 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1426 	    hashes[1]);
   1427 	sc->sc_if_flags = ifp->if_flags;
   1428 
   1429 #ifdef DWC_GMAC_DEBUG
   1430 	dwc_gmac_dump_ffilt(sc, ffilt);
   1431 #endif
   1432 	return;
   1433 
   1434 special_filter:
   1435 #ifdef DWC_GMAC_DEBUG
   1436 	dwc_gmac_dump_ffilt(sc, ffilt);
   1437 #endif
   1438 	/* no MAC hashes, ALLMULTI or PROMISC */
   1439 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1440 	    ffilt);
   1441 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1442 	    0xffffffff);
   1443 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1444 	    0xffffffff);
   1445 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1446 }
   1447 
   1448 int
   1449 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1450 {
   1451 	uint32_t status, dma_status;
   1452 	int rv = 0;
   1453 
   1454 	if (sc->sc_stopping)
   1455 		return 0;
   1456 
   1457 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1458 	if (status & AWIN_GMAC_MII_IRQ) {
   1459 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1460 		    AWIN_GMAC_MII_STATUS);
   1461 		rv = 1;
   1462 		mii_pollstat(&sc->sc_mii);
   1463 	}
   1464 
   1465 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1466 	    AWIN_GMAC_DMA_STATUS);
   1467 
   1468 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
   1469 		rv = 1;
   1470 
   1471 	if (dma_status & GMAC_DMA_INT_TIE)
   1472 		dwc_gmac_tx_intr(sc);
   1473 
   1474 	if (dma_status & GMAC_DMA_INT_RIE)
   1475 		dwc_gmac_rx_intr(sc);
   1476 
   1477 	/*
   1478 	 * Check error conditions
   1479 	 */
   1480 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1481 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
   1482 #ifdef DWC_GMAC_DEBUG
   1483 		dwc_dump_and_abort(sc, "interrupt error condition");
   1484 #endif
   1485 	}
   1486 
   1487 	rnd_add_uint32(&sc->rnd_source, dma_status);
   1488 
   1489 	/* ack interrupt */
   1490 	if (dma_status)
   1491 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1492 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1493 
   1494 	/*
   1495 	 * Get more packets
   1496 	 */
   1497 	if (rv)
   1498 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1499 
   1500 	return rv;
   1501 }
   1502 
   1503 static void
   1504 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1505 {
   1506 
   1507 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
   1508 }
   1509 
   1510 static int
   1511 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1512 {
   1513 
   1514 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
   1515 }
   1516 
   1517 static void
   1518 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1519 {
   1520 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1521 
   1522 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
   1523 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
   1524 }
   1525 
   1526 static uint32_t
   1527 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1528 {
   1529 
   1530 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
   1531 }
   1532 
   1533 static void
   1534 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1535 {
   1536 
   1537 	desc->ddesc_status0 = 0;
   1538 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1539 }
   1540 
   1541 static void
   1542 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1543 {
   1544 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1545 
   1546 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
   1547 }
   1548 
   1549 static void
   1550 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1551 {
   1552 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1553 
   1554 	desc->ddesc_cntl1 = htole32(cntl |
   1555 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
   1556 }
   1557 
   1558 static void
   1559 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1560 {
   1561 
   1562 	desc->ddesc_status0 = 0;
   1563 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1564 }
   1565 
   1566 static int
   1567 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
   1568 	return !!(le32toh(desc->ddesc_status0) &
   1569 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
   1570 }
   1571 
   1572 static void
   1573 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1574 {
   1575 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
   1576 
   1577 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
   1578 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
   1579 }
   1580 
   1581 static uint32_t
   1582 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1583 {
   1584 
   1585 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
   1586 }
   1587 
   1588 static void
   1589 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1590 {
   1591 
   1592 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
   1593 	desc->ddesc_cntl1 = 0;
   1594 }
   1595 
   1596 static void
   1597 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1598 {
   1599 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1600 
   1601 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
   1602 }
   1603 
   1604 static void
   1605 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1606 {
   1607 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1608 
   1609 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
   1610 }
   1611 
   1612 static void
   1613 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1614 {
   1615 
   1616 	desc->ddesc_status0 = 0;
   1617 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
   1618 }
   1619 
   1620 static int
   1621 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
   1622 {
   1623 
   1624 	return !!(le32toh(desc->ddesc_status0) &
   1625 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
   1626 }
   1627 
   1628 #ifdef DWC_GMAC_DEBUG
   1629 static void
   1630 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1631 {
   1632 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1633 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1634 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1635 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1636 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1637 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1638 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1639 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1640 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1641 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1642 	aprint_normal_dev(sc->sc_dev, " status: %08x\n",
   1643 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1644 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1645 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1646 	aprint_normal_dev(sc->sc_dev, "int en.: %08x\n",
   1647 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1648 	aprint_normal_dev(sc->sc_dev, " cur tx: %08x\n",
   1649 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1650 	aprint_normal_dev(sc->sc_dev, " cur rx: %08x\n",
   1651 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1652 	aprint_normal_dev(sc->sc_dev, "cur txb: %08x\n",
   1653 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1654 	aprint_normal_dev(sc->sc_dev, "cur rxb: %08x\n",
   1655 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1656 }
   1657 
   1658 static void
   1659 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1660 {
   1661 	const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
   1662 
   1663 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1664 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1665 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1666 
   1667 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1668 	    TX_DESC_OFFSET(0), AWGE_TX_RING_COUNT * descsz,
   1669 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1670 
   1671 	for (size_t i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1672 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1673 		aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
   1674 		    "data: %08x next: %08x\n",
   1675 		    i, sc->sc_txq.t_physaddr + i * descsz,
   1676 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1677 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1678 	}
   1679 }
   1680 
   1681 static void
   1682 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1683 {
   1684 	const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
   1685 
   1686 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1687 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1688 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1689 
   1690 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1691 	    RX_DESC_OFFSET(0), AWGE_RX_RING_COUNT * descsz,
   1692 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1693 
   1694 	for (size_t i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1695 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1696 		char buf[200];
   1697 
   1698 		if (!sc->sc_descm->rx_is_owned_by_dev(desc)) {
   1699 			/* print interrupt state */
   1700 			snprintb(buf, sizeof(buf),
   1701 			    "\177\20"
   1702 			    "b\x1e"	"daff\0"
   1703 			    "f\x10\xe"	"frlen\0"
   1704 			    "b\x0f"	"error\0"
   1705 			    "b\x0e"	"rxtrunc\0"	/* descriptor error? */
   1706 			    "b\x0d"	"saff\0"
   1707 			    "b\x0c"	"giantframe\0"	/* length error? */
   1708 			    "b\x0b"	"damaged\0"
   1709 			    "b\x0a"	"vlan\0"
   1710 			    "b\x09"	"first\0"
   1711 			    "b\x08"	"last\0"
   1712 			    "b\x07"	"giant\0"
   1713 			    "b\x06"	"collison\0"
   1714 			    "b\x05"	"ether\0"
   1715 			    "b\x04"	"watchdog\0"
   1716 			    "b\x03"	"miierror\0"
   1717 			    "b\x02"	"dribbling\0"
   1718 			    "b\x01"	"crc\0"
   1719 			    "\0", le32toh(desc->ddesc_status0));
   1720 		}
   1721 
   1722 		aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
   1723 		    "data: %08x next: %08x %s\n",
   1724 		    i, sc->sc_rxq.r_physaddr + i * descsz,
   1725 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1726 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next),
   1727 		    sc->sc_descm->rx_is_owned_by_dev(desc) ? "" : buf);
   1728 	}
   1729 }
   1730 
   1731 static void
   1732 dwc_dump_status(struct dwc_gmac_softc *sc)
   1733 {
   1734 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1735 	    AWIN_GMAC_MAC_INTR);
   1736 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1737 	    AWIN_GMAC_DMA_STATUS);
   1738 	char buf[200];
   1739 
   1740 	/* print interrupt state */
   1741 	snprintb(buf, sizeof(buf),
   1742 	    "\177\20"
   1743 	    "b\x1c"	"GPI\0"
   1744 	    "b\x1b"	"GMC\0"
   1745 	    "b\x1a"	"GLI\0"
   1746 	    "f\x17\x3"	"EB\0"
   1747 	    "f\x14\x3"	"TPS\0"
   1748 	    "f\x11\x3"	"RPS\0"
   1749 	    "b\x10"	"NI\0"
   1750 	    "b\x0f"	"AI\0"
   1751 	    "b\x0e"	"ER\0"
   1752 	    "b\x0d"	"FB\0"
   1753 	    "b\x0a"	"ET\0"
   1754 	    "b\x09"	"RW\0"
   1755 	    "b\x08"	"RS\0"
   1756 	    "b\x07"	"RU\0"
   1757 	    "b\x06"	"RI\0"
   1758 	    "b\x05"	"UN\0"
   1759 	    "b\x04"	"OV\0"
   1760 	    "b\x03"	"TJ\0"
   1761 	    "b\x02"	"TU\0"
   1762 	    "b\x01"	"TS\0"
   1763 	    "b\x00"	"TI\0"
   1764 	    "\0", dma_status);
   1765 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1766 	    status, buf);
   1767 }
   1768 
   1769 static void
   1770 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1771 {
   1772 	dwc_dump_status(sc);
   1773 	dwc_gmac_dump_ffilt(sc,
   1774 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1775 	dwc_gmac_dump_dma(sc);
   1776 	dwc_gmac_dump_tx_desc(sc);
   1777 	dwc_gmac_dump_rx_desc(sc);
   1778 
   1779 	panic("%s", msg);
   1780 }
   1781 
   1782 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1783 {
   1784 	char buf[200];
   1785 
   1786 	/* print filter setup */
   1787 	snprintb(buf, sizeof(buf), "\177\20"
   1788 	    "b\x1f""RA\0"
   1789 	    "b\x0a""HPF\0"
   1790 	    "b\x09""SAF\0"
   1791 	    "b\x08""SAIF\0"
   1792 	    "b\x05""DBF\0"
   1793 	    "b\x04""PM\0"
   1794 	    "b\x03""DAIF\0"
   1795 	    "b\x02""HMC\0"
   1796 	    "b\x01""HUC\0"
   1797 	    "b\x00""PR\0"
   1798 	    "\0", ffilt);
   1799 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1800 }
   1801 #endif
   1802