Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.82
      1 /* $NetBSD: dwc_gmac.c,v 1.82 2024/02/27 08:25:38 skrll Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.82 2024/02/27 08:25:38 skrll Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #ifdef _KERNEL_OPT
     49 #include "opt_inet.h"
     50 #endif
     51 
     52 #include <sys/param.h>
     53 #include <sys/bus.h>
     54 #include <sys/device.h>
     55 #include <sys/intr.h>
     56 #include <sys/systm.h>
     57 #include <sys/sockio.h>
     58 #include <sys/cprng.h>
     59 #include <sys/rndsource.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_media.h>
     64 #include <net/bpf.h>
     65 #ifdef INET
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #include <dev/mii/miivar.h>
     70 
     71 #include <dev/ic/dwc_gmac_reg.h>
     72 #include <dev/ic/dwc_gmac_var.h>
     73 
     74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
     75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
     76 static void dwc_gmac_miibus_statchg(struct ifnet *);
     77 
     78 static int dwc_gmac_reset(struct dwc_gmac_softc *);
     79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
     80 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
     81 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
     82 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     83 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     84 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     85 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     86 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     87 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     88 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
     89 static int dwc_gmac_init(struct ifnet *);
     90 static int dwc_gmac_init_locked(struct ifnet *);
     91 static void dwc_gmac_stop(struct ifnet *, int);
     92 static void dwc_gmac_stop_locked(struct ifnet *, int);
     93 static void dwc_gmac_start(struct ifnet *);
     94 static void dwc_gmac_start_locked(struct ifnet *);
     95 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
     96 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     97 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
     98 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
     99 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
    100 static int dwc_gmac_ifflags_cb(struct ethercom *);
    101 static uint32_t	bitrev32(uint32_t);
    102 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    103 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    104 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
    105 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
    106 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    107 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    108 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    109 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    110 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    111 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
    112 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
    113 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    114 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    115 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    116 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    117 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    118 
    119 static const struct dwc_gmac_desc_methods desc_methods_standard = {
    120 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
    121 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    122 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    123 	.tx_set_len = dwc_gmac_desc_std_set_len,
    124 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
    125 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
    126 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
    127 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    128 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    129 	.rx_set_len = dwc_gmac_desc_std_set_len,
    130 	.rx_get_len = dwc_gmac_desc_std_get_len,
    131 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
    132 };
    133 
    134 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
    135 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
    136 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    137 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    138 	.tx_set_len = dwc_gmac_desc_enh_set_len,
    139 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
    140 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
    141 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
    142 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    143 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    144 	.rx_set_len = dwc_gmac_desc_enh_set_len,
    145 	.rx_get_len = dwc_gmac_desc_enh_get_len,
    146 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
    147 };
    148 
    149 
    150 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT + (N)) \
    151 				    * sizeof(struct dwc_gmac_dev_dmadesc))
    152 #define	TX_NEXT(N)		(((N) + 1) & (AWGE_TX_RING_COUNT - 1))
    153 
    154 #define RX_DESC_OFFSET(N)	((N) * sizeof(struct dwc_gmac_dev_dmadesc))
    155 #define	RX_NEXT(N)		(((N) + 1) & (AWGE_RX_RING_COUNT - 1))
    156 
    157 
    158 
    159 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
    160 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
    161 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
    162 
    163 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
    164 				GMAC_DMA_INT_FBE |	\
    165 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
    166 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
    167 				GMAC_DMA_INT_TJE)
    168 
    169 #define	AWIN_DEF_MAC_INTRMASK	\
    170 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    171 	AWIN_GMAC_MAC_INT_LINKCHG)
    172 
    173 #ifdef DWC_GMAC_DEBUG
    174 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
    175 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
    176 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
    177 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
    178 static void dwc_dump_status(struct dwc_gmac_softc *);
    179 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
    180 #endif
    181 
    182 int
    183 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
    184 {
    185 	uint8_t enaddr[ETHER_ADDR_LEN];
    186 	uint32_t maclo, machi, ver, hwft;
    187 	struct mii_data * const mii = &sc->sc_mii;
    188 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    189 	prop_dictionary_t dict;
    190 
    191 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    192 	sc->sc_mii_clk = mii_clk & 7;
    193 
    194 	dict = device_properties(sc->sc_dev);
    195 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    196 	if (ea != NULL) {
    197 		/*
    198 		 * If the MAC address is overridden by a device property,
    199 		 * use that.
    200 		 */
    201 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    202 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    203 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
    204 	} else {
    205 		/*
    206 		 * If we did not get an externaly configure address,
    207 		 * try to read one from the current filter setup,
    208 		 * before resetting the chip.
    209 		 */
    210 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    211 		    AWIN_GMAC_MAC_ADDR0LO);
    212 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    213 		    AWIN_GMAC_MAC_ADDR0HI);
    214 
    215 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    216 			/* fake MAC address */
    217 			maclo = 0x00f2 | (cprng_strong32() << 16);
    218 			machi = cprng_strong32();
    219 		}
    220 
    221 		enaddr[0] = maclo & 0x0ff;
    222 		enaddr[1] = (maclo >> 8) & 0x0ff;
    223 		enaddr[2] = (maclo >> 16) & 0x0ff;
    224 		enaddr[3] = (maclo >> 24) & 0x0ff;
    225 		enaddr[4] = machi & 0x0ff;
    226 		enaddr[5] = (machi >> 8) & 0x0ff;
    227 	}
    228 
    229 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
    230 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
    231 
    232 	/*
    233 	 * Init chip and do initial setup
    234 	 */
    235 	if (dwc_gmac_reset(sc) != 0)
    236 		return ENXIO;	/* not much to cleanup, haven't attached yet */
    237 	dwc_gmac_write_hwaddr(sc, enaddr);
    238 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    239 	    ether_sprintf(enaddr));
    240 
    241 	hwft = 0;
    242 	if (ver >= 0x35) {
    243 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    244 		    AWIN_GMAC_DMA_HWFEATURES);
    245 		aprint_normal_dev(sc->sc_dev,
    246 		    "HW feature mask: %x\n", hwft);
    247 	}
    248 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
    249 		aprint_normal_dev(sc->sc_dev,
    250 		    "Using enhanced descriptor format\n");
    251 		sc->sc_descm = &desc_methods_enhanced;
    252 	} else {
    253 		sc->sc_descm = &desc_methods_standard;
    254 	}
    255 	if (hwft & GMAC_DMA_FEAT_RMON) {
    256 		uint32_t val;
    257 
    258 		/* Mask all MMC interrupts */
    259 		val = 0xffffffff;
    260 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    261 		    GMAC_MMC_RX_INT_MSK, val);
    262 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    263 		    GMAC_MMC_TX_INT_MSK, val);
    264 	}
    265 
    266 	/*
    267 	 * Allocate Tx and Rx rings
    268 	 */
    269 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    270 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    271 		goto fail;
    272 	}
    273 
    274 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    275 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    276 		goto fail;
    277 	}
    278 
    279 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    280 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    281 		goto fail;
    282 	}
    283 
    284 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    285 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    286 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    287 
    288 	/*
    289 	 * Prepare interface data
    290 	 */
    291 	ifp->if_softc = sc;
    292 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    293 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    294 #ifdef DWCGMAC_MPSAFE
    295 	ifp->if_extflags = IFEF_MPSAFE;
    296 #endif
    297 	ifp->if_ioctl = dwc_gmac_ioctl;
    298 	ifp->if_start = dwc_gmac_start;
    299 	ifp->if_init = dwc_gmac_init;
    300 	ifp->if_stop = dwc_gmac_stop;
    301 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    302 	IFQ_SET_READY(&ifp->if_snd);
    303 
    304 	/*
    305 	 * Attach MII subdevices
    306 	 */
    307 	sc->sc_ec.ec_mii = &sc->sc_mii;
    308 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    309 	mii->mii_ifp = ifp;
    310 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
    311 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
    312 	mii->mii_statchg = dwc_gmac_miibus_statchg;
    313 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
    314 	    MIIF_DOPAUSE);
    315 
    316 	if (LIST_EMPTY(&mii->mii_phys)) {
    317 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    318 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
    319 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
    320 	} else {
    321 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    322 	}
    323 
    324 	/*
    325 	 * We can support 802.1Q VLAN-sized frames.
    326 	 */
    327 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    328 
    329 	/*
    330 	 * Ready, attach interface
    331 	 */
    332 	/* Attach the interface. */
    333 	if_initialize(ifp);
    334 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    335 	if_deferred_start_init(ifp, NULL);
    336 	ether_ifattach(ifp, enaddr);
    337 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    338 	if_register(ifp);
    339 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
    340 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
    341 
    342 	/*
    343 	 * Enable interrupts
    344 	 */
    345 	mutex_enter(sc->sc_lock);
    346 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    347 	    AWIN_DEF_MAC_INTRMASK);
    348 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    349 	    GMAC_DEF_DMA_INT_MASK);
    350 	mutex_exit(sc->sc_lock);
    351 
    352 	return 0;
    353 
    354 fail:
    355 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    356 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    357 	dwc_gmac_free_dma_rings(sc);
    358 	mutex_destroy(&sc->sc_mdio_lock);
    359 
    360 	return ENXIO;
    361 }
    362 
    363 
    364 
    365 static int
    366 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    367 {
    368 	size_t cnt;
    369 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    370 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    371 	    | GMAC_BUSMODE_RESET);
    372 	for (cnt = 0; cnt < 30000; cnt++) {
    373 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    374 		    & GMAC_BUSMODE_RESET) == 0)
    375 			return 0;
    376 		delay(10);
    377 	}
    378 
    379 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    380 	return EIO;
    381 }
    382 
    383 static void
    384 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    385     uint8_t enaddr[ETHER_ADDR_LEN])
    386 {
    387 	uint32_t hi, lo;
    388 
    389 	hi = enaddr[4] | (enaddr[5] << 8);
    390 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    391 	    | ((uint32_t)enaddr[3] << 24);
    392 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    393 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    394 }
    395 
    396 static int
    397 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
    398 {
    399 	struct dwc_gmac_softc * const sc = device_private(self);
    400 	uint16_t mii;
    401 	size_t cnt;
    402 
    403 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    404 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    405 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    406 	    | GMAC_MII_BUSY;
    407 
    408 	mutex_enter(&sc->sc_mdio_lock);
    409 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    410 
    411 	for (cnt = 0; cnt < 1000; cnt++) {
    412 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    413 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    414 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    415 			    AWIN_GMAC_MAC_MIIDATA);
    416 			break;
    417 		}
    418 		delay(10);
    419 	}
    420 
    421 	mutex_exit(&sc->sc_mdio_lock);
    422 
    423 	if (cnt >= 1000)
    424 		return ETIMEDOUT;
    425 
    426 	return 0;
    427 }
    428 
    429 static int
    430 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
    431 {
    432 	struct dwc_gmac_softc * const sc = device_private(self);
    433 	uint16_t mii;
    434 	size_t cnt;
    435 
    436 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    437 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    438 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    439 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    440 
    441 	mutex_enter(&sc->sc_mdio_lock);
    442 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    443 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    444 
    445 	for (cnt = 0; cnt < 1000; cnt++) {
    446 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    447 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    448 			break;
    449 		delay(10);
    450 	}
    451 
    452 	mutex_exit(&sc->sc_mdio_lock);
    453 
    454 	if (cnt >= 1000)
    455 		return ETIMEDOUT;
    456 
    457 	return 0;
    458 }
    459 
    460 static int
    461 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    462 	struct dwc_gmac_rx_ring *ring)
    463 {
    464 	struct dwc_gmac_rx_data *data;
    465 	bus_addr_t physaddr;
    466 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    467 	int error, i, next;
    468 
    469 	ring->r_cur = ring->r_next = 0;
    470 	memset(ring->r_desc, 0, descsize);
    471 
    472 	/*
    473 	 * Pre-allocate Rx buffers and populate Rx ring.
    474 	 */
    475 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    476 		struct dwc_gmac_dev_dmadesc *desc;
    477 
    478 		data = &sc->sc_rxq.r_data[i];
    479 
    480 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    481 		if (data->rd_m == NULL) {
    482 			aprint_error_dev(sc->sc_dev,
    483 			    "could not allocate rx mbuf #%d\n", i);
    484 			error = ENOMEM;
    485 			goto fail;
    486 		}
    487 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    488 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    489 		if (error != 0) {
    490 			aprint_error_dev(sc->sc_dev,
    491 			    "could not create DMA map\n");
    492 			data->rd_map = NULL;
    493 			goto fail;
    494 		}
    495 		MCLGET(data->rd_m, M_DONTWAIT);
    496 		if (!(data->rd_m->m_flags & M_EXT)) {
    497 			aprint_error_dev(sc->sc_dev,
    498 			    "could not allocate mbuf cluster #%d\n", i);
    499 			error = ENOMEM;
    500 			goto fail;
    501 		}
    502 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
    503 		    = data->rd_m->m_ext.ext_size;
    504 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
    505 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
    506 			    = AWGE_MAX_PACKET;
    507 		}
    508 
    509 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
    510 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
    511 		if (error != 0) {
    512 			aprint_error_dev(sc->sc_dev,
    513 			    "could not load rx buf DMA map #%d", i);
    514 			goto fail;
    515 		}
    516 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    517 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
    518 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    519 
    520 		desc = &sc->sc_rxq.r_desc[i];
    521 		desc->ddesc_data = htole32(physaddr);
    522 		next = RX_NEXT(i);
    523 		desc->ddesc_next = htole32(ring->r_physaddr
    524 		    + next * sizeof(*desc));
    525 		sc->sc_descm->rx_init_flags(desc);
    526 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    527 		sc->sc_descm->rx_set_owned_by_dev(desc);
    528 	}
    529 
    530 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    531 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    532 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
    533 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    534 	    ring->r_physaddr);
    535 
    536 	return 0;
    537 
    538 fail:
    539 	dwc_gmac_free_rx_ring(sc, ring);
    540 	return error;
    541 }
    542 
    543 static void
    544 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    545 	struct dwc_gmac_rx_ring *ring)
    546 {
    547 	struct dwc_gmac_dev_dmadesc *desc;
    548 	struct dwc_gmac_rx_data *data;
    549 	int i;
    550 
    551 	mutex_enter(&ring->r_mtx);
    552 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    553 		desc = &sc->sc_rxq.r_desc[i];
    554 		data = &sc->sc_rxq.r_data[i];
    555 		sc->sc_descm->rx_init_flags(desc);
    556 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    557 		sc->sc_descm->rx_set_owned_by_dev(desc);
    558 	}
    559 
    560 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    561 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    562 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    563 
    564 	ring->r_cur = ring->r_next = 0;
    565 	/* reset DMA address to start of ring */
    566 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    567 	    sc->sc_rxq.r_physaddr);
    568 	mutex_exit(&ring->r_mtx);
    569 }
    570 
    571 static int
    572 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    573 {
    574 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    575 		sizeof(struct dwc_gmac_dev_dmadesc);
    576 	int error, nsegs;
    577 	void *rings;
    578 
    579 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    580 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    581 	if (error != 0) {
    582 		aprint_error_dev(sc->sc_dev,
    583 		    "could not create desc DMA map\n");
    584 		sc->sc_dma_ring_map = NULL;
    585 		goto fail;
    586 	}
    587 
    588 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    589 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
    590 	if (error != 0) {
    591 		aprint_error_dev(sc->sc_dev,
    592 		    "could not map DMA memory\n");
    593 		goto fail;
    594 	}
    595 
    596 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    597 	    descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    598 	if (error != 0) {
    599 		aprint_error_dev(sc->sc_dev,
    600 		    "could not allocate DMA memory\n");
    601 		goto fail;
    602 	}
    603 
    604 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    605 	    descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    606 	if (error != 0) {
    607 		aprint_error_dev(sc->sc_dev,
    608 		    "could not load desc DMA map\n");
    609 		goto fail;
    610 	}
    611 
    612 	/* give first AWGE_RX_RING_COUNT to the RX side */
    613 	sc->sc_rxq.r_desc = rings;
    614 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    615 
    616 	/* and next rings to the TX side */
    617 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    618 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    619 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
    620 
    621 	return 0;
    622 
    623 fail:
    624 	dwc_gmac_free_dma_rings(sc);
    625 	return error;
    626 }
    627 
    628 static void
    629 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    630 {
    631 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    632 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    633 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    634 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    635 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    636 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    637 }
    638 
    639 static void
    640 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    641 {
    642 	struct dwc_gmac_rx_data *data;
    643 	int i;
    644 
    645 	if (ring->r_desc == NULL)
    646 		return;
    647 
    648 
    649 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    650 		data = &ring->r_data[i];
    651 
    652 		if (data->rd_map != NULL) {
    653 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    654 			    AWGE_RX_RING_COUNT
    655 				* sizeof(struct dwc_gmac_dev_dmadesc),
    656 			    BUS_DMASYNC_POSTREAD);
    657 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    658 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    659 		}
    660 		if (data->rd_m != NULL)
    661 			m_freem(data->rd_m);
    662 	}
    663 }
    664 
    665 static int
    666 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    667 	struct dwc_gmac_tx_ring *ring)
    668 {
    669 	int i, error = 0;
    670 
    671 	ring->t_queued = 0;
    672 	ring->t_cur = ring->t_next = 0;
    673 
    674 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
    675 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    676 	    TX_DESC_OFFSET(0),
    677 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    678 	    BUS_DMASYNC_POSTWRITE);
    679 
    680 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    681 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    682 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    683 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
    684 		    &ring->t_data[i].td_map);
    685 		if (error != 0) {
    686 			aprint_error_dev(sc->sc_dev,
    687 			    "could not create TX DMA map #%d\n", i);
    688 			ring->t_data[i].td_map = NULL;
    689 			goto fail;
    690 		}
    691 		ring->t_desc[i].ddesc_next = htole32(
    692 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    693 		    *TX_NEXT(i));
    694 	}
    695 
    696 	return 0;
    697 
    698 fail:
    699 	dwc_gmac_free_tx_ring(sc, ring);
    700 	return error;
    701 }
    702 
    703 static void
    704 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    705 {
    706 	/* 'end' is pointing one descriptor beyond the last we want to sync */
    707 	if (end > start) {
    708 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    709 		    TX_DESC_OFFSET(start),
    710 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    711 		    ops);
    712 		return;
    713 	}
    714 	/* sync from 'start' to end of ring */
    715 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    716 	    TX_DESC_OFFSET(start),
    717 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
    718 	    ops);
    719 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
    720 		/* sync from start of ring to 'end' */
    721 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    722 		    TX_DESC_OFFSET(0),
    723 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    724 		    ops);
    725 	}
    726 }
    727 
    728 static void
    729 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    730 	struct dwc_gmac_tx_ring *ring)
    731 {
    732 	int i;
    733 
    734 	mutex_enter(&ring->t_mtx);
    735 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    736 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    737 
    738 		if (data->td_m != NULL) {
    739 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    740 			    0, data->td_active->dm_mapsize,
    741 			    BUS_DMASYNC_POSTWRITE);
    742 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    743 			m_freem(data->td_m);
    744 			data->td_m = NULL;
    745 		}
    746 	}
    747 
    748 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    749 	    TX_DESC_OFFSET(0),
    750 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    751 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    752 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    753 	    sc->sc_txq.t_physaddr);
    754 
    755 	ring->t_queued = 0;
    756 	ring->t_cur = ring->t_next = 0;
    757 	mutex_exit(&ring->t_mtx);
    758 }
    759 
    760 static void
    761 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    762 	struct dwc_gmac_tx_ring *ring)
    763 {
    764 	int i;
    765 
    766 	/* unload the maps */
    767 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    768 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    769 
    770 		if (data->td_m != NULL) {
    771 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    772 			    0, data->td_map->dm_mapsize,
    773 			    BUS_DMASYNC_POSTWRITE);
    774 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    775 			m_freem(data->td_m);
    776 			data->td_m = NULL;
    777 		}
    778 	}
    779 
    780 	/* and actually free them */
    781 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    782 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    783 
    784 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    785 	}
    786 }
    787 
    788 static void
    789 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    790 {
    791 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    792 	struct mii_data * const mii = &sc->sc_mii;
    793 	uint32_t conf, flow;
    794 
    795 	/*
    796 	 * Set MII or GMII interface based on the speed
    797 	 * negotiated by the PHY.
    798 	 */
    799 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    800 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
    801 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
    802 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    803 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    804 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    805 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    806 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    807 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    808 	case IFM_10_T:
    809 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    810 		break;
    811 	case IFM_100_TX:
    812 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    813 			AWIN_GMAC_MAC_CONF_MIISEL;
    814 		break;
    815 	case IFM_1000_T:
    816 		break;
    817 	}
    818 	if (sc->sc_set_speed)
    819 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
    820 
    821 	flow = 0;
    822 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    823 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    824 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    825 	}
    826 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    827 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    828 	}
    829 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    830 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    831 	}
    832 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    833 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    834 
    835 #ifdef DWC_GMAC_DEBUG
    836 	aprint_normal_dev(sc->sc_dev,
    837 	    "setting MAC conf register: %08x\n", conf);
    838 #endif
    839 
    840 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    841 	    AWIN_GMAC_MAC_CONF, conf);
    842 }
    843 
    844 static int
    845 dwc_gmac_init(struct ifnet *ifp)
    846 {
    847 	struct dwc_gmac_softc *sc = ifp->if_softc;
    848 
    849 	mutex_enter(sc->sc_lock);
    850 	int ret = dwc_gmac_init_locked(ifp);
    851 	mutex_exit(sc->sc_lock);
    852 
    853 	return ret;
    854 }
    855 
    856 static int
    857 dwc_gmac_init_locked(struct ifnet *ifp)
    858 {
    859 	struct dwc_gmac_softc *sc = ifp->if_softc;
    860 	uint32_t ffilt;
    861 
    862 	if (ifp->if_flags & IFF_RUNNING)
    863 		return 0;
    864 
    865 	dwc_gmac_stop_locked(ifp, 0);
    866 
    867 	/*
    868 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    869 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    870 	 */
    871 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    872 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    873 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    874 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    875 
    876 	/*
    877 	 * Set up address filter
    878 	 */
    879 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    880 	if (ifp->if_flags & IFF_PROMISC) {
    881 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    882 	} else {
    883 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    884 	}
    885 	if (ifp->if_flags & IFF_BROADCAST) {
    886 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    887 	} else {
    888 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    889 	}
    890 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    891 
    892 	/*
    893 	 * Set up multicast filter
    894 	 */
    895 	dwc_gmac_setmulti(sc);
    896 
    897 	/*
    898 	 * Set up dma pointer for RX and TX ring
    899 	 */
    900 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    901 	    sc->sc_rxq.r_physaddr);
    902 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    903 	    sc->sc_txq.t_physaddr);
    904 
    905 	/*
    906 	 * Start RX/TX part
    907 	 */
    908 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
    909 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
    910 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
    911 	}
    912 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
    913 
    914 	sc->sc_stopping = false;
    915 
    916 	ifp->if_flags |= IFF_RUNNING;
    917 	sc->sc_txbusy = false;
    918 
    919 	return 0;
    920 }
    921 
    922 static void
    923 dwc_gmac_start(struct ifnet *ifp)
    924 {
    925 	struct dwc_gmac_softc *sc = ifp->if_softc;
    926 #ifdef DWCGMAC_MPSAFE
    927 	KASSERT(if_is_mpsafe(ifp));
    928 #endif
    929 
    930 	mutex_enter(sc->sc_lock);
    931 	if (!sc->sc_stopping) {
    932 		mutex_enter(&sc->sc_txq.t_mtx);
    933 		dwc_gmac_start_locked(ifp);
    934 		mutex_exit(&sc->sc_txq.t_mtx);
    935 	}
    936 	mutex_exit(sc->sc_lock);
    937 }
    938 
    939 static void
    940 dwc_gmac_start_locked(struct ifnet *ifp)
    941 {
    942 	struct dwc_gmac_softc *sc = ifp->if_softc;
    943 	int old = sc->sc_txq.t_queued;
    944 	int start = sc->sc_txq.t_cur;
    945 	struct mbuf *m0;
    946 
    947 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    948 		return;
    949 	if (sc->sc_txbusy)
    950 		return;
    951 
    952 	for (;;) {
    953 		IFQ_POLL(&ifp->if_snd, m0);
    954 		if (m0 == NULL)
    955 			break;
    956 		if (dwc_gmac_queue(sc, m0) != 0) {
    957 			sc->sc_txbusy = true;
    958 			break;
    959 		}
    960 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    961 		bpf_mtap(ifp, m0, BPF_D_OUT);
    962 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    963 			sc->sc_txbusy = true;
    964 			break;
    965 		}
    966 	}
    967 
    968 	if (sc->sc_txq.t_queued != old) {
    969 		/* packets have been queued, kick it off */
    970 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    971 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    972 
    973 #ifdef DWC_GMAC_DEBUG
    974 		dwc_dump_status(sc);
    975 #endif
    976 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    977 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    978 	}
    979 }
    980 
    981 static void
    982 dwc_gmac_stop(struct ifnet *ifp, int disable)
    983 {
    984 	struct dwc_gmac_softc *sc = ifp->if_softc;
    985 
    986 	mutex_enter(sc->sc_lock);
    987 	dwc_gmac_stop_locked(ifp, disable);
    988 	mutex_exit(sc->sc_lock);
    989 }
    990 
    991 static void
    992 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
    993 {
    994 	struct dwc_gmac_softc *sc = ifp->if_softc;
    995 
    996 	sc->sc_stopping = true;
    997 
    998 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    999 	    AWIN_GMAC_DMA_OPMODE,
   1000 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1001 		AWIN_GMAC_DMA_OPMODE)
   1002 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
   1003 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1004 	    AWIN_GMAC_DMA_OPMODE,
   1005 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1006 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
   1007 
   1008 	mii_down(&sc->sc_mii);
   1009 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
   1010 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
   1011 
   1012 	ifp->if_flags &= ~IFF_RUNNING;
   1013 	sc->sc_txbusy = false;
   1014 }
   1015 
   1016 /*
   1017  * Add m0 to the TX ring
   1018  */
   1019 static int
   1020 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
   1021 {
   1022 	struct dwc_gmac_dev_dmadesc *desc = NULL;
   1023 	struct dwc_gmac_tx_data *data = NULL;
   1024 	bus_dmamap_t map;
   1025 	int error, i, first;
   1026 
   1027 #ifdef DWC_GMAC_DEBUG
   1028 	aprint_normal_dev(sc->sc_dev,
   1029 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
   1030 #endif
   1031 
   1032 	first = sc->sc_txq.t_cur;
   1033 	map = sc->sc_txq.t_data[first].td_map;
   1034 
   1035 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
   1036 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1037 	if (error != 0) {
   1038 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
   1039 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
   1040 		return error;
   1041 	}
   1042 
   1043 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
   1044 		bus_dmamap_unload(sc->sc_dmat, map);
   1045 		return ENOBUFS;
   1046 	}
   1047 
   1048 	for (i = 0; i < map->dm_nsegs; i++) {
   1049 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
   1050 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
   1051 
   1052 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
   1053 
   1054 #ifdef DWC_GMAC_DEBUG
   1055 		aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
   1056 		    "len %lu\n", sc->sc_txq.t_cur,
   1057 		    (unsigned long)map->dm_segs[i].ds_addr,
   1058 		    (unsigned long)map->dm_segs[i].ds_len);
   1059 #endif
   1060 
   1061 		sc->sc_descm->tx_init_flags(desc);
   1062 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
   1063 
   1064 		if (i == 0)
   1065 			sc->sc_descm->tx_set_first_frag(desc);
   1066 
   1067 		/*
   1068 		 * Defer passing ownership of the first descriptor
   1069 		 * until we are done.
   1070 		 */
   1071 		if (i != 0)
   1072 			sc->sc_descm->tx_set_owned_by_dev(desc);
   1073 
   1074 		sc->sc_txq.t_queued++;
   1075 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
   1076 	}
   1077 
   1078 	sc->sc_descm->tx_set_last_frag(desc);
   1079 
   1080 	data->td_m = m0;
   1081 	data->td_active = map;
   1082 
   1083 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1084 	    BUS_DMASYNC_PREWRITE);
   1085 
   1086 	/* Pass first to device */
   1087 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
   1088 
   1089 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1090 	    BUS_DMASYNC_PREWRITE);
   1091 
   1092 	return 0;
   1093 }
   1094 
   1095 /*
   1096  * If the interface is up and running, only modify the receive
   1097  * filter when setting promiscuous or debug mode.  Otherwise fall
   1098  * through to ether_ioctl, which will reset the chip.
   1099  */
   1100 static int
   1101 dwc_gmac_ifflags_cb(struct ethercom *ec)
   1102 {
   1103 	struct ifnet *ifp = &ec->ec_if;
   1104 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1105 	int ret = 0;
   1106 
   1107 	mutex_enter(sc->sc_lock);
   1108 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
   1109 	sc->sc_if_flags = ifp->if_flags;
   1110 
   1111 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   1112 		ret = ENETRESET;
   1113 		goto out;
   1114 	}
   1115 	if ((change & IFF_PROMISC) != 0) {
   1116 		dwc_gmac_setmulti(sc);
   1117 	}
   1118 out:
   1119 	mutex_exit(sc->sc_lock);
   1120 
   1121 	return ret;
   1122 }
   1123 
   1124 static int
   1125 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1126 {
   1127 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1128 	int error = 0;
   1129 
   1130 	int s = splnet();
   1131 	error = ether_ioctl(ifp, cmd, data);
   1132 
   1133 #ifdef DWCGMAC_MPSAFE
   1134 	splx(s);
   1135 #endif
   1136 
   1137 	if (error == ENETRESET) {
   1138 		error = 0;
   1139 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   1140 			;
   1141 		else if (ifp->if_flags & IFF_RUNNING) {
   1142 			/*
   1143 			 * Multicast list has changed; set the hardware filter
   1144 			 * accordingly.
   1145 			 */
   1146 			mutex_enter(sc->sc_lock);
   1147 			dwc_gmac_setmulti(sc);
   1148 			mutex_exit(sc->sc_lock);
   1149 		}
   1150 	}
   1151 
   1152 	/* Try to get things going again */
   1153 	if (ifp->if_flags & IFF_UP)
   1154 		dwc_gmac_start(ifp);
   1155 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1156 
   1157 #ifndef DWCGMAC_MPSAFE
   1158 	splx(s);
   1159 #endif
   1160 
   1161 	return error;
   1162 }
   1163 
   1164 static void
   1165 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1166 {
   1167 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1168 	struct dwc_gmac_tx_data *data;
   1169 	struct dwc_gmac_dev_dmadesc *desc;
   1170 	int i, nsegs;
   1171 
   1172 	mutex_enter(&sc->sc_txq.t_mtx);
   1173 
   1174 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1175 #ifdef DWC_GMAC_DEBUG
   1176 		aprint_normal_dev(sc->sc_dev,
   1177 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
   1178 		    i, sc->sc_txq.t_queued);
   1179 #endif
   1180 
   1181 		/*
   1182 		 * i + 1 does not need to be a valid descriptor,
   1183 		 * this is just a special notion to just sync
   1184 		 * a single tx descriptor (i)
   1185 		 */
   1186 		dwc_gmac_txdesc_sync(sc, i, i + 1,
   1187 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1188 
   1189 		desc = &sc->sc_txq.t_desc[i];
   1190 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
   1191 			break;
   1192 
   1193 		data = &sc->sc_txq.t_data[i];
   1194 		if (data->td_m == NULL)
   1195 			continue;
   1196 
   1197 		if_statinc(ifp, if_opackets);
   1198 		nsegs = data->td_active->dm_nsegs;
   1199 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1200 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1201 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1202 
   1203 #ifdef DWC_GMAC_DEBUG
   1204 		aprint_normal_dev(sc->sc_dev,
   1205 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1206 		    "freeing mbuf %p\n", i, data->td_m);
   1207 #endif
   1208 
   1209 		m_freem(data->td_m);
   1210 		data->td_m = NULL;
   1211 
   1212 		sc->sc_txq.t_queued -= nsegs;
   1213 	}
   1214 
   1215 	sc->sc_txq.t_next = i;
   1216 
   1217 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1218 		sc->sc_txbusy = false;
   1219 	}
   1220 	mutex_exit(&sc->sc_txq.t_mtx);
   1221 }
   1222 
   1223 static void
   1224 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1225 {
   1226 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1227 	struct dwc_gmac_dev_dmadesc *desc;
   1228 	struct dwc_gmac_rx_data *data;
   1229 	bus_addr_t physaddr;
   1230 	struct mbuf *m, *mnew;
   1231 	int i, len, error;
   1232 
   1233 	mutex_enter(&sc->sc_rxq.r_mtx);
   1234 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1235 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1236 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1237 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1238 		desc = &sc->sc_rxq.r_desc[i];
   1239 		data = &sc->sc_rxq.r_data[i];
   1240 
   1241 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
   1242 			break;
   1243 
   1244 		if (sc->sc_descm->rx_has_error(desc)) {
   1245 #ifdef DWC_GMAC_DEBUG
   1246 			aprint_normal_dev(sc->sc_dev,
   1247 			    "RX error: descriptor status %08x, skipping\n",
   1248 			    le32toh(desc->ddesc_status0));
   1249 #endif
   1250 			if_statinc(ifp, if_ierrors);
   1251 			goto skip;
   1252 		}
   1253 
   1254 		len = sc->sc_descm->rx_get_len(desc);
   1255 
   1256 #ifdef DWC_GMAC_DEBUG
   1257 		aprint_normal_dev(sc->sc_dev,
   1258 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1259 		    i, len);
   1260 #endif
   1261 
   1262 		/*
   1263 		 * Try to get a new mbuf before passing this one
   1264 		 * up, if that fails, drop the packet and reuse
   1265 		 * the existing one.
   1266 		 */
   1267 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1268 		if (mnew == NULL) {
   1269 			if_statinc(ifp, if_ierrors);
   1270 			goto skip;
   1271 		}
   1272 		MCLGET(mnew, M_DONTWAIT);
   1273 		if ((mnew->m_flags & M_EXT) == 0) {
   1274 			m_freem(mnew);
   1275 			if_statinc(ifp, if_ierrors);
   1276 			goto skip;
   1277 		}
   1278 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
   1279 		if (mnew->m_len > AWGE_MAX_PACKET) {
   1280 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
   1281 		}
   1282 
   1283 		/* unload old DMA map */
   1284 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1285 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1286 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1287 
   1288 		/* and reload with new mbuf */
   1289 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1290 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1291 		if (error != 0) {
   1292 			m_freem(mnew);
   1293 			/* try to reload old mbuf */
   1294 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1295 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1296 			if (error != 0) {
   1297 				panic("%s: could not load old rx mbuf",
   1298 				    device_xname(sc->sc_dev));
   1299 			}
   1300 			if_statinc(ifp, if_ierrors);
   1301 			goto skip;
   1302 		}
   1303 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1304 
   1305 		/*
   1306 		 * New mbuf loaded, update RX ring and continue
   1307 		 */
   1308 		m = data->rd_m;
   1309 		data->rd_m = mnew;
   1310 		desc->ddesc_data = htole32(physaddr);
   1311 
   1312 		/* finalize mbuf */
   1313 		m->m_pkthdr.len = m->m_len = len;
   1314 		m_set_rcvif(m, ifp);
   1315 		m->m_flags |= M_HASFCS;
   1316 
   1317 		if_percpuq_enqueue(sc->sc_ipq, m);
   1318 
   1319 skip:
   1320 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1321 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1322 
   1323 		sc->sc_descm->rx_init_flags(desc);
   1324 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
   1325 		sc->sc_descm->rx_set_owned_by_dev(desc);
   1326 
   1327 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1328 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1329 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1330 	}
   1331 
   1332 	/* update RX pointer */
   1333 	sc->sc_rxq.r_cur = i;
   1334 
   1335 	mutex_exit(&sc->sc_rxq.r_mtx);
   1336 }
   1337 
   1338 /*
   1339  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
   1340  */
   1341 static uint32_t
   1342 bitrev32(uint32_t x)
   1343 {
   1344 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
   1345 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
   1346 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
   1347 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
   1348 
   1349 	return (x >> 16) | (x << 16);
   1350 }
   1351 
   1352 static void
   1353 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1354 {
   1355 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1356 	struct ether_multi *enm;
   1357 	struct ether_multistep step;
   1358 	struct ethercom *ec = &sc->sc_ec;
   1359 	uint32_t hashes[2] = { 0, 0 };
   1360 	uint32_t ffilt, h;
   1361 	int mcnt;
   1362 
   1363 	KASSERT(mutex_owned(sc->sc_lock));
   1364 
   1365 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1366 
   1367 	if (ifp->if_flags & IFF_PROMISC) {
   1368 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1369 		goto special_filter;
   1370 	}
   1371 
   1372 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
   1373 
   1374 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1375 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1376 
   1377 	ETHER_LOCK(ec);
   1378 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1379 	ETHER_FIRST_MULTI(step, ec, enm);
   1380 	mcnt = 0;
   1381 	while (enm != NULL) {
   1382 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1383 		    ETHER_ADDR_LEN) != 0) {
   1384 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1385 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1386 			ETHER_UNLOCK(ec);
   1387 			goto special_filter;
   1388 		}
   1389 
   1390 		h = bitrev32(
   1391 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
   1392 		    ) >> 26;
   1393 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1394 
   1395 		mcnt++;
   1396 		ETHER_NEXT_MULTI(step, enm);
   1397 	}
   1398 	ETHER_UNLOCK(ec);
   1399 
   1400 	if (mcnt)
   1401 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1402 	else
   1403 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1404 
   1405 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1406 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1407 	    hashes[0]);
   1408 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1409 	    hashes[1]);
   1410 	sc->sc_if_flags = ifp->if_flags;
   1411 
   1412 #ifdef DWC_GMAC_DEBUG
   1413 	dwc_gmac_dump_ffilt(sc, ffilt);
   1414 #endif
   1415 	return;
   1416 
   1417 special_filter:
   1418 #ifdef DWC_GMAC_DEBUG
   1419 	dwc_gmac_dump_ffilt(sc, ffilt);
   1420 #endif
   1421 	/* no MAC hashes, ALLMULTI or PROMISC */
   1422 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1423 	    ffilt);
   1424 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1425 	    0xffffffff);
   1426 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1427 	    0xffffffff);
   1428 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1429 }
   1430 
   1431 int
   1432 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1433 {
   1434 	uint32_t status, dma_status;
   1435 	int rv = 0;
   1436 
   1437 	if (sc->sc_stopping)
   1438 		return 0;
   1439 
   1440 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1441 	if (status & AWIN_GMAC_MII_IRQ) {
   1442 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1443 		    AWIN_GMAC_MII_STATUS);
   1444 		rv = 1;
   1445 		mii_pollstat(&sc->sc_mii);
   1446 	}
   1447 
   1448 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1449 	    AWIN_GMAC_DMA_STATUS);
   1450 
   1451 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
   1452 		rv = 1;
   1453 
   1454 	if (dma_status & GMAC_DMA_INT_TIE)
   1455 		dwc_gmac_tx_intr(sc);
   1456 
   1457 	if (dma_status & GMAC_DMA_INT_RIE)
   1458 		dwc_gmac_rx_intr(sc);
   1459 
   1460 	/*
   1461 	 * Check error conditions
   1462 	 */
   1463 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1464 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
   1465 #ifdef DWC_GMAC_DEBUG
   1466 		dwc_dump_and_abort(sc, "interrupt error condition");
   1467 #endif
   1468 	}
   1469 
   1470 	rnd_add_uint32(&sc->rnd_source, dma_status);
   1471 
   1472 	/* ack interrupt */
   1473 	if (dma_status)
   1474 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1475 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1476 
   1477 	/*
   1478 	 * Get more packets
   1479 	 */
   1480 	if (rv)
   1481 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1482 
   1483 	return rv;
   1484 }
   1485 
   1486 static void
   1487 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1488 {
   1489 
   1490 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
   1491 }
   1492 
   1493 static int
   1494 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1495 {
   1496 
   1497 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
   1498 }
   1499 
   1500 static void
   1501 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1502 {
   1503 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1504 
   1505 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
   1506 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
   1507 }
   1508 
   1509 static uint32_t
   1510 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1511 {
   1512 
   1513 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
   1514 }
   1515 
   1516 static void
   1517 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1518 {
   1519 
   1520 	desc->ddesc_status0 = 0;
   1521 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1522 }
   1523 
   1524 static void
   1525 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1526 {
   1527 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1528 
   1529 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
   1530 }
   1531 
   1532 static void
   1533 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1534 {
   1535 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1536 
   1537 	desc->ddesc_cntl1 = htole32(cntl |
   1538 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
   1539 }
   1540 
   1541 static void
   1542 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1543 {
   1544 
   1545 	desc->ddesc_status0 = 0;
   1546 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1547 }
   1548 
   1549 static int
   1550 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
   1551 	return !!(le32toh(desc->ddesc_status0) &
   1552 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
   1553 }
   1554 
   1555 static void
   1556 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1557 {
   1558 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
   1559 
   1560 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
   1561 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
   1562 }
   1563 
   1564 static uint32_t
   1565 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1566 {
   1567 
   1568 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
   1569 }
   1570 
   1571 static void
   1572 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1573 {
   1574 
   1575 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
   1576 	desc->ddesc_cntl1 = 0;
   1577 }
   1578 
   1579 static void
   1580 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1581 {
   1582 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1583 
   1584 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
   1585 }
   1586 
   1587 static void
   1588 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1589 {
   1590 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1591 
   1592 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
   1593 }
   1594 
   1595 static void
   1596 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1597 {
   1598 
   1599 	desc->ddesc_status0 = 0;
   1600 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
   1601 }
   1602 
   1603 static int
   1604 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
   1605 {
   1606 
   1607 	return !!(le32toh(desc->ddesc_status0) &
   1608 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
   1609 }
   1610 
   1611 #ifdef DWC_GMAC_DEBUG
   1612 static void
   1613 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1614 {
   1615 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1616 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1617 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1618 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1619 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1620 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1621 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1622 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1623 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1624 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1625 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1626 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1627 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1628 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1629 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1630 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1631 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1632 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1633 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1634 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1635 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1636 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1637 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1638 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1639 }
   1640 
   1641 static void
   1642 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1643 {
   1644 	int i;
   1645 
   1646 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1647 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1648 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1649 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1650 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1651 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1652 		    "data: %08x next: %08x\n",
   1653 		    i, sc->sc_txq.t_physaddr +
   1654 			i * sizeof(struct dwc_gmac_dev_dmadesc),
   1655 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1656 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1657 	}
   1658 }
   1659 
   1660 static void
   1661 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1662 {
   1663 	int i;
   1664 
   1665 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1666 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1667 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1668 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1669 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1670 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1671 		    "data: %08x next: %08x\n",
   1672 		    i, sc->sc_rxq.r_physaddr +
   1673 			i * sizeof(struct dwc_gmac_dev_dmadesc),
   1674 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1675 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1676 	}
   1677 }
   1678 
   1679 static void
   1680 dwc_dump_status(struct dwc_gmac_softc *sc)
   1681 {
   1682 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1683 	     AWIN_GMAC_MAC_INTR);
   1684 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1685 	     AWIN_GMAC_DMA_STATUS);
   1686 	char buf[200];
   1687 
   1688 	/* print interrupt state */
   1689 	snprintb(buf, sizeof(buf), "\177\20"
   1690 	    "b\x10""NI\0"
   1691 	    "b\x0f""AI\0"
   1692 	    "b\x0e""ER\0"
   1693 	    "b\x0d""FB\0"
   1694 	    "b\x0a""ET\0"
   1695 	    "b\x09""RW\0"
   1696 	    "b\x08""RS\0"
   1697 	    "b\x07""RU\0"
   1698 	    "b\x06""RI\0"
   1699 	    "b\x05""UN\0"
   1700 	    "b\x04""OV\0"
   1701 	    "b\x03""TJ\0"
   1702 	    "b\x02""TU\0"
   1703 	    "b\x01""TS\0"
   1704 	    "b\x00""TI\0"
   1705 	    "\0", dma_status);
   1706 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1707 	    status, buf);
   1708 }
   1709 
   1710 static void
   1711 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1712 {
   1713 	dwc_dump_status(sc);
   1714 	dwc_gmac_dump_ffilt(sc,
   1715 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1716 	dwc_gmac_dump_dma(sc);
   1717 	dwc_gmac_dump_tx_desc(sc);
   1718 	dwc_gmac_dump_rx_desc(sc);
   1719 
   1720 	panic("%s", msg);
   1721 }
   1722 
   1723 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1724 {
   1725 	char buf[200];
   1726 
   1727 	/* print filter setup */
   1728 	snprintb(buf, sizeof(buf), "\177\20"
   1729 	    "b\x1f""RA\0"
   1730 	    "b\x0a""HPF\0"
   1731 	    "b\x09""SAF\0"
   1732 	    "b\x08""SAIF\0"
   1733 	    "b\x05""DBF\0"
   1734 	    "b\x04""PM\0"
   1735 	    "b\x03""DAIF\0"
   1736 	    "b\x02""HMC\0"
   1737 	    "b\x01""HUC\0"
   1738 	    "b\x00""PR\0"
   1739 	    "\0", ffilt);
   1740 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1741 }
   1742 #endif
   1743