Home | History | Annotate | Line # | Download | only in ic
      1 /* $NetBSD: dwc_gmac.c,v 1.96 2025/02/16 18:54:49 jakllsch Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 /*
     43  * Lock order:
     44  *
     45  *	IFNET_LOCK -> sc_mcast_lock
     46  *	IFNET_LOCK -> sc_intr_lock -> {sc_txq.t_mtx, sc_rxq.r_mtx}
     47  */
     48 
     49 #include <sys/cdefs.h>
     50 
     51 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.96 2025/02/16 18:54:49 jakllsch Exp $");
     52 
     53 /* #define	DWC_GMAC_DEBUG	1 */
     54 
     55 #ifdef _KERNEL_OPT
     56 #include "opt_inet.h"
     57 #endif
     58 
     59 #include <sys/param.h>
     60 #include <sys/bus.h>
     61 #include <sys/device.h>
     62 #include <sys/intr.h>
     63 #include <sys/systm.h>
     64 #include <sys/sockio.h>
     65 #include <sys/cprng.h>
     66 #include <sys/rndsource.h>
     67 
     68 #include <net/if.h>
     69 #include <net/if_ether.h>
     70 #include <net/if_media.h>
     71 #include <net/bpf.h>
     72 #ifdef INET
     73 #include <netinet/if_inarp.h>
     74 #endif
     75 
     76 #include <dev/mii/miivar.h>
     77 
     78 #include <dev/ic/dwc_gmac_reg.h>
     79 #include <dev/ic/dwc_gmac_var.h>
     80 
     81 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
     82 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
     83 static void dwc_gmac_miibus_statchg(struct ifnet *);
     84 
     85 static int dwc_gmac_reset(struct dwc_gmac_softc *);
     86 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
     87 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
     88 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
     89 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     90 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     91 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
     92 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     93 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     94 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
     95 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
     96 static int dwc_gmac_init(struct ifnet *);
     97 static void dwc_gmac_stop(struct ifnet *, int);
     98 static void dwc_gmac_start(struct ifnet *);
     99 static void dwc_gmac_start_locked(struct ifnet *);
    100 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
    101 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
    102 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
    103 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
    104 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
    105 static int dwc_gmac_ifflags_cb(struct ethercom *);
    106 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    107 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    108 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
    109 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
    110 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    111 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    112 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    113 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    114 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    115 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
    116 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
    117 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    118 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    119 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    120 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    121 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    122 
    123 static const struct dwc_gmac_desc_methods desc_methods_standard = {
    124 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
    125 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    126 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    127 	.tx_set_len = dwc_gmac_desc_std_set_len,
    128 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
    129 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
    130 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
    131 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    132 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    133 	.rx_set_len = dwc_gmac_desc_std_set_len,
    134 	.rx_get_len = dwc_gmac_desc_std_get_len,
    135 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
    136 };
    137 
    138 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
    139 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
    140 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    141 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    142 	.tx_set_len = dwc_gmac_desc_enh_set_len,
    143 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
    144 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
    145 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
    146 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    147 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    148 	.rx_set_len = dwc_gmac_desc_enh_set_len,
    149 	.rx_get_len = dwc_gmac_desc_enh_get_len,
    150 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
    151 };
    152 
    153 
    154 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT + (N)) \
    155 				    * sizeof(struct dwc_gmac_dev_dmadesc))
    156 #define	TX_NEXT(N)		(((N) + 1) & (AWGE_TX_RING_COUNT - 1))
    157 
    158 #define RX_DESC_OFFSET(N)	((N) * sizeof(struct dwc_gmac_dev_dmadesc))
    159 #define	RX_NEXT(N)		(((N) + 1) & (AWGE_RX_RING_COUNT - 1))
    160 
    161 
    162 
    163 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
    164 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
    165 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
    166 
    167 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
    168 				GMAC_DMA_INT_FBE |	\
    169 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
    170 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
    171 				GMAC_DMA_INT_TJE)
    172 
    173 #define	AWIN_DEF_MAC_INTRMASK	\
    174 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    175 	AWIN_GMAC_MAC_INT_LINKCHG)
    176 
    177 #ifdef DWC_GMAC_DEBUG
    178 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
    179 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
    180 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
    181 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
    182 static void dwc_dump_status(struct dwc_gmac_softc *);
    183 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
    184 #endif
    185 
    186 int
    187 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
    188 {
    189 	uint8_t enaddr[ETHER_ADDR_LEN];
    190 	uint32_t maclo, machi, hwft;
    191 	struct mii_data * const mii = &sc->sc_mii;
    192 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    193 	prop_dictionary_t dict;
    194 
    195 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    196 	sc->sc_mii_clk = mii_clk & 7;
    197 
    198 	dict = device_properties(sc->sc_dev);
    199 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    200 	if (ea != NULL) {
    201 		/*
    202 		 * If the MAC address is overridden by a device property,
    203 		 * use that.
    204 		 */
    205 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    206 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    207 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
    208 	} else {
    209 		/*
    210 		 * If we did not get an externally configure address,
    211 		 * try to read one from the current filter setup,
    212 		 * before resetting the chip.
    213 		 */
    214 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    215 		    AWIN_GMAC_MAC_ADDR0LO);
    216 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    217 		    AWIN_GMAC_MAC_ADDR0HI);
    218 
    219 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    220 			/* fake MAC address */
    221 			maclo = 0x00f2 | (cprng_strong32() << 16);
    222 			machi = cprng_strong32();
    223 		}
    224 
    225 		enaddr[0] = maclo & 0x0ff;
    226 		enaddr[1] = (maclo >> 8) & 0x0ff;
    227 		enaddr[2] = (maclo >> 16) & 0x0ff;
    228 		enaddr[3] = (maclo >> 24) & 0x0ff;
    229 		enaddr[4] = machi & 0x0ff;
    230 		enaddr[5] = (machi >> 8) & 0x0ff;
    231 	}
    232 
    233 	const uint32_t ver =
    234 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
    235 	const uint32_t snpsver =
    236 	    __SHIFTOUT(ver, AWIN_GMAC_MAC_VERSION_SNPSVER_MASK);
    237 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", snpsver);
    238 
    239 	/*
    240 	 * Init chip and do initial setup
    241 	 */
    242 	if (dwc_gmac_reset(sc) != 0)
    243 		return ENXIO;	/* not much to cleanup, haven't attached yet */
    244 	dwc_gmac_write_hwaddr(sc, enaddr);
    245 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    246 	    ether_sprintf(enaddr));
    247 
    248 	hwft = 0;
    249 	if (snpsver >= 0x35) {
    250 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    251 		    AWIN_GMAC_DMA_HWFEATURES);
    252 		aprint_normal_dev(sc->sc_dev,
    253 		    "HW feature mask: %x\n", hwft);
    254 	}
    255 
    256 	if (sizeof(bus_addr_t) > 4) {
    257 		int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
    258 		    &sc->sc_dmat, BUS_DMA_WAITOK);
    259 		if (error != 0) {
    260 			aprint_error_dev(sc->sc_dev,
    261 			    "failed to create DMA subregion\n");
    262 			return ENOMEM;
    263 		}
    264 	}
    265 
    266 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
    267 		aprint_normal_dev(sc->sc_dev,
    268 		    "Using enhanced descriptor format\n");
    269 		sc->sc_descm = &desc_methods_enhanced;
    270 	} else {
    271 		sc->sc_descm = &desc_methods_standard;
    272 	}
    273 	if (hwft & GMAC_DMA_FEAT_RMON) {
    274 		uint32_t val;
    275 
    276 		/* Mask all MMC interrupts */
    277 		val = 0xffffffff;
    278 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    279 		    GMAC_MMC_RX_INT_MSK, val);
    280 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    281 		    GMAC_MMC_TX_INT_MSK, val);
    282 	}
    283 
    284 	/*
    285 	 * Allocate Tx and Rx rings
    286 	 */
    287 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    288 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    289 		goto fail;
    290 	}
    291 
    292 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    293 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    294 		goto fail;
    295 	}
    296 
    297 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    298 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    299 		goto fail;
    300 	}
    301 
    302 	sc->sc_stopping = false;
    303 	sc->sc_txbusy = false;
    304 
    305 	sc->sc_mcast_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
    306 	sc->sc_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    307 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    308 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    309 
    310 	/*
    311 	 * Prepare interface data
    312 	 */
    313 	ifp->if_softc = sc;
    314 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    315 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    316 	ifp->if_extflags = IFEF_MPSAFE;
    317 	ifp->if_ioctl = dwc_gmac_ioctl;
    318 	ifp->if_start = dwc_gmac_start;
    319 	ifp->if_init = dwc_gmac_init;
    320 	ifp->if_stop = dwc_gmac_stop;
    321 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    322 	IFQ_SET_READY(&ifp->if_snd);
    323 
    324 	/*
    325 	 * Attach MII subdevices
    326 	 */
    327 	sc->sc_ec.ec_mii = &sc->sc_mii;
    328 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    329 	mii->mii_ifp = ifp;
    330 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
    331 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
    332 	mii->mii_statchg = dwc_gmac_miibus_statchg;
    333 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
    334 	    MIIF_DOPAUSE);
    335 
    336 	if (LIST_EMPTY(&mii->mii_phys)) {
    337 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    338 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
    339 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
    340 	} else {
    341 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    342 	}
    343 
    344 	/*
    345 	 * We can support 802.1Q VLAN-sized frames.
    346 	 */
    347 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    348 
    349 	/*
    350 	 * Ready, attach interface
    351 	 */
    352 	/* Attach the interface. */
    353 	if_initialize(ifp);
    354 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    355 	if_deferred_start_init(ifp, NULL);
    356 	ether_ifattach(ifp, enaddr);
    357 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    358 	if_register(ifp);
    359 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
    360 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
    361 
    362 	/*
    363 	 * Enable interrupts
    364 	 */
    365 	mutex_enter(sc->sc_intr_lock);
    366 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    367 	    AWIN_DEF_MAC_INTRMASK);
    368 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    369 	    GMAC_DEF_DMA_INT_MASK);
    370 	mutex_exit(sc->sc_intr_lock);
    371 
    372 	return 0;
    373 
    374 fail:
    375 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    376 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    377 	dwc_gmac_free_dma_rings(sc);
    378 	mutex_destroy(&sc->sc_mdio_lock);
    379 
    380 	return ENXIO;
    381 }
    382 
    383 
    384 
    385 static int
    386 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    387 {
    388 	size_t cnt;
    389 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    390 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    391 	    | GMAC_BUSMODE_RESET);
    392 	for (cnt = 0; cnt < 30000; cnt++) {
    393 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    394 		    & GMAC_BUSMODE_RESET) == 0)
    395 			return 0;
    396 		delay(10);
    397 	}
    398 
    399 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    400 	return EIO;
    401 }
    402 
    403 static void
    404 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    405     uint8_t enaddr[ETHER_ADDR_LEN])
    406 {
    407 	uint32_t hi, lo;
    408 
    409 	hi = enaddr[4] | (enaddr[5] << 8);
    410 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    411 	    | ((uint32_t)enaddr[3] << 24);
    412 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    413 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    414 }
    415 
    416 static int
    417 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
    418 {
    419 	struct dwc_gmac_softc * const sc = device_private(self);
    420 	uint16_t mii;
    421 	size_t cnt;
    422 
    423 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    424 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    425 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    426 	    | GMAC_MII_BUSY;
    427 
    428 	mutex_enter(&sc->sc_mdio_lock);
    429 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    430 
    431 	for (cnt = 0; cnt < 1000; cnt++) {
    432 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    433 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    434 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    435 			    AWIN_GMAC_MAC_MIIDATA);
    436 			break;
    437 		}
    438 		delay(10);
    439 	}
    440 
    441 	mutex_exit(&sc->sc_mdio_lock);
    442 
    443 	if (cnt >= 1000)
    444 		return ETIMEDOUT;
    445 
    446 	return 0;
    447 }
    448 
    449 static int
    450 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
    451 {
    452 	struct dwc_gmac_softc * const sc = device_private(self);
    453 	uint16_t mii;
    454 	size_t cnt;
    455 
    456 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
    457 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
    458 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
    459 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    460 
    461 	mutex_enter(&sc->sc_mdio_lock);
    462 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    463 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    464 
    465 	for (cnt = 0; cnt < 1000; cnt++) {
    466 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    467 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    468 			break;
    469 		delay(10);
    470 	}
    471 
    472 	mutex_exit(&sc->sc_mdio_lock);
    473 
    474 	if (cnt >= 1000)
    475 		return ETIMEDOUT;
    476 
    477 	return 0;
    478 }
    479 
    480 static int
    481 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    482 	struct dwc_gmac_rx_ring *ring)
    483 {
    484 	struct dwc_gmac_rx_data *data;
    485 	bus_addr_t physaddr;
    486 	const size_t rxringsz = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    487 	int error, i, next;
    488 
    489 	ring->r_cur = ring->r_next = 0;
    490 	memset(ring->r_desc, 0, rxringsz);
    491 
    492 	/*
    493 	 * Pre-allocate Rx buffers and populate Rx ring.
    494 	 */
    495 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    496 		struct dwc_gmac_dev_dmadesc *desc;
    497 
    498 		data = &sc->sc_rxq.r_data[i];
    499 
    500 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    501 		if (data->rd_m == NULL) {
    502 			aprint_error_dev(sc->sc_dev,
    503 			    "could not allocate rx mbuf #%d\n", i);
    504 			error = ENOMEM;
    505 			goto fail;
    506 		}
    507 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    508 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    509 		if (error != 0) {
    510 			aprint_error_dev(sc->sc_dev,
    511 			    "could not create DMA map\n");
    512 			data->rd_map = NULL;
    513 			goto fail;
    514 		}
    515 		MCLGET(data->rd_m, M_DONTWAIT);
    516 		if (!(data->rd_m->m_flags & M_EXT)) {
    517 			aprint_error_dev(sc->sc_dev,
    518 			    "could not allocate mbuf cluster #%d\n", i);
    519 			error = ENOMEM;
    520 			goto fail;
    521 		}
    522 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
    523 		    = data->rd_m->m_ext.ext_size;
    524 		m_adj(data->rd_m, ETHER_ALIGN);
    525 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
    526 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
    527 			    = AWGE_MAX_PACKET;
    528 		}
    529 
    530 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
    531 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
    532 		if (error != 0) {
    533 			aprint_error_dev(sc->sc_dev,
    534 			    "could not load rx buf DMA map #%d", i);
    535 			goto fail;
    536 		}
    537 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    538 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
    539 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    540 
    541 		desc = &sc->sc_rxq.r_desc[i];
    542 		desc->ddesc_data = htole32(physaddr);
    543 		next = RX_NEXT(i);
    544 		desc->ddesc_next = htole32(ring->r_physaddr
    545 		    + next * sizeof(*desc));
    546 		sc->sc_descm->rx_init_flags(desc);
    547 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    548 		sc->sc_descm->rx_set_owned_by_dev(desc);
    549 	}
    550 
    551 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    552 	    RX_DESC_OFFSET(0),
    553 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    554 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    555 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    556 	    ring->r_physaddr);
    557 
    558 	return 0;
    559 
    560 fail:
    561 	dwc_gmac_free_rx_ring(sc, ring);
    562 	return error;
    563 }
    564 
    565 static void
    566 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    567 	struct dwc_gmac_rx_ring *ring)
    568 {
    569 	struct dwc_gmac_dev_dmadesc *desc;
    570 	struct dwc_gmac_rx_data *data;
    571 	int i;
    572 
    573 	mutex_enter(&ring->r_mtx);
    574 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    575 		desc = &sc->sc_rxq.r_desc[i];
    576 		data = &sc->sc_rxq.r_data[i];
    577 		sc->sc_descm->rx_init_flags(desc);
    578 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
    579 		sc->sc_descm->rx_set_owned_by_dev(desc);
    580 	}
    581 
    582 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    583 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    584 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    585 
    586 	ring->r_cur = ring->r_next = 0;
    587 	/* reset DMA address to start of ring */
    588 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    589 	    sc->sc_rxq.r_physaddr);
    590 	mutex_exit(&ring->r_mtx);
    591 }
    592 
    593 static int
    594 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    595 {
    596 	const size_t ringsize = AWGE_TOTAL_RING_COUNT *
    597 		sizeof(struct dwc_gmac_dev_dmadesc);
    598 	int error, nsegs;
    599 	void *rings;
    600 
    601 	error = bus_dmamap_create(sc->sc_dmat, ringsize, 1, ringsize, 0,
    602 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    603 	if (error != 0) {
    604 		aprint_error_dev(sc->sc_dev,
    605 		    "could not create desc DMA map\n");
    606 		sc->sc_dma_ring_map = NULL;
    607 		goto fail;
    608 	}
    609 
    610 	error = bus_dmamem_alloc(sc->sc_dmat, ringsize, PAGE_SIZE, 0,
    611 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
    612 	if (error != 0) {
    613 		aprint_error_dev(sc->sc_dev,
    614 		    "could not map DMA memory\n");
    615 		goto fail;
    616 	}
    617 
    618 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    619 	    ringsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    620 	if (error != 0) {
    621 		aprint_error_dev(sc->sc_dev,
    622 		    "could not allocate DMA memory\n");
    623 		goto fail;
    624 	}
    625 
    626 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    627 	    ringsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    628 	if (error != 0) {
    629 		aprint_error_dev(sc->sc_dev,
    630 		    "could not load desc DMA map\n");
    631 		goto fail;
    632 	}
    633 
    634 	/* give first AWGE_RX_RING_COUNT to the RX side */
    635 	sc->sc_rxq.r_desc = rings;
    636 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    637 
    638 	/* and next rings to the TX side */
    639 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    640 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    641 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
    642 
    643 	return 0;
    644 
    645 fail:
    646 	dwc_gmac_free_dma_rings(sc);
    647 	return error;
    648 }
    649 
    650 static void
    651 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    652 {
    653 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    654 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    655 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    656 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    657 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    658 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    659 }
    660 
    661 static void
    662 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    663 {
    664 	struct dwc_gmac_rx_data *data;
    665 	int i;
    666 
    667 	if (ring->r_desc == NULL)
    668 		return;
    669 
    670 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    671 		data = &ring->r_data[i];
    672 
    673 		if (data->rd_map != NULL) {
    674 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    675 			    AWGE_RX_RING_COUNT
    676 				* sizeof(struct dwc_gmac_dev_dmadesc),
    677 			    BUS_DMASYNC_POSTREAD);
    678 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    679 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    680 		}
    681 		m_freem(data->rd_m);
    682 	}
    683 }
    684 
    685 static int
    686 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    687 	struct dwc_gmac_tx_ring *ring)
    688 {
    689 	int i, error = 0;
    690 
    691 	ring->t_queued = 0;
    692 	ring->t_cur = ring->t_next = 0;
    693 
    694 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
    695 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    696 	    TX_DESC_OFFSET(0),
    697 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    698 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    699 
    700 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    701 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    702 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    703 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
    704 		    &ring->t_data[i].td_map);
    705 		if (error != 0) {
    706 			aprint_error_dev(sc->sc_dev,
    707 			    "could not create TX DMA map #%d\n", i);
    708 			ring->t_data[i].td_map = NULL;
    709 			goto fail;
    710 		}
    711 		ring->t_desc[i].ddesc_next = htole32(
    712 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    713 		    * TX_NEXT(i));
    714 	}
    715 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    716 	    TX_DESC_OFFSET(0),
    717 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    718 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    719 
    720 	return 0;
    721 
    722 fail:
    723 	dwc_gmac_free_tx_ring(sc, ring);
    724 	return error;
    725 }
    726 
    727 static void
    728 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    729 {
    730 	/* 'end' is pointing one descriptor beyond the last we want to sync */
    731 	if (end > start) {
    732 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    733 		    TX_DESC_OFFSET(start),
    734 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
    735 		    ops);
    736 		return;
    737 	}
    738 	/* sync from 'start' to end of ring */
    739 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    740 	    TX_DESC_OFFSET(start),
    741 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
    742 	    ops);
    743 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
    744 		/* sync from start of ring to 'end' */
    745 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    746 		    TX_DESC_OFFSET(0),
    747 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
    748 		    ops);
    749 	}
    750 }
    751 
    752 static void
    753 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    754 	struct dwc_gmac_tx_ring *ring)
    755 {
    756 	int i;
    757 
    758 	mutex_enter(&ring->t_mtx);
    759 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    760 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    761 
    762 		if (data->td_m != NULL) {
    763 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    764 			    0, data->td_active->dm_mapsize,
    765 			    BUS_DMASYNC_POSTWRITE);
    766 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    767 			m_freem(data->td_m);
    768 			data->td_m = NULL;
    769 		}
    770 	}
    771 
    772 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    773 	    TX_DESC_OFFSET(0),
    774 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
    775 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    776 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    777 	    sc->sc_txq.t_physaddr);
    778 
    779 	ring->t_queued = 0;
    780 	ring->t_cur = ring->t_next = 0;
    781 	mutex_exit(&ring->t_mtx);
    782 }
    783 
    784 static void
    785 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    786 	struct dwc_gmac_tx_ring *ring)
    787 {
    788 	int i;
    789 
    790 	/* unload the maps */
    791 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    792 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    793 
    794 		if (data->td_m != NULL) {
    795 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    796 			    0, data->td_map->dm_mapsize,
    797 			    BUS_DMASYNC_POSTWRITE);
    798 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    799 			m_freem(data->td_m);
    800 			data->td_m = NULL;
    801 		}
    802 	}
    803 
    804 	/* and actually free them */
    805 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    806 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    807 
    808 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    809 	}
    810 }
    811 
    812 static void
    813 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    814 {
    815 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    816 	struct mii_data * const mii = &sc->sc_mii;
    817 	uint32_t conf, flow;
    818 
    819 	/*
    820 	 * Set MII or GMII interface based on the speed
    821 	 * negotiated by the PHY.
    822 	 */
    823 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    824 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
    825 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
    826 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    827 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    828 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    829 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    830 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    831 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    832 	case IFM_10_T:
    833 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    834 		break;
    835 	case IFM_100_TX:
    836 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    837 			AWIN_GMAC_MAC_CONF_MIISEL;
    838 		break;
    839 	case IFM_1000_T:
    840 		break;
    841 	}
    842 	if (sc->sc_set_speed)
    843 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
    844 
    845 	flow = 0;
    846 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    847 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    848 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    849 	}
    850 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    851 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    852 	}
    853 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    854 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    855 	}
    856 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    857 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    858 
    859 #ifdef DWC_GMAC_DEBUG
    860 	aprint_normal_dev(sc->sc_dev,
    861 	    "setting MAC conf register: %08x\n", conf);
    862 #endif
    863 
    864 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    865 	    AWIN_GMAC_MAC_CONF, conf);
    866 }
    867 
    868 static int
    869 dwc_gmac_init(struct ifnet *ifp)
    870 {
    871 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    872 	uint32_t ffilt;
    873 
    874 	ASSERT_SLEEPABLE();
    875 	KASSERT(IFNET_LOCKED(ifp));
    876 	KASSERT(ifp == &sc->sc_ec.ec_if);
    877 
    878 	dwc_gmac_stop(ifp, 0);
    879 
    880 	/*
    881 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    882 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    883 	 */
    884 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    885 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    886 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    887 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    888 
    889 	/*
    890 	 * Set up address filter
    891 	 */
    892 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    893 	if (ifp->if_flags & IFF_PROMISC) {
    894 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    895 	} else {
    896 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    897 	}
    898 	if (ifp->if_flags & IFF_BROADCAST) {
    899 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    900 	} else {
    901 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    902 	}
    903 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    904 
    905 	/*
    906 	 * Set up multicast filter
    907 	 */
    908 	mutex_enter(sc->sc_mcast_lock);
    909 	dwc_gmac_setmulti(sc);
    910 	mutex_exit(sc->sc_mcast_lock);
    911 
    912 	/*
    913 	 * Set up dma pointer for RX and TX ring
    914 	 */
    915 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    916 	    sc->sc_rxq.r_physaddr);
    917 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    918 	    sc->sc_txq.t_physaddr);
    919 
    920 	/*
    921 	 * Start RX/TX part
    922 	 */
    923 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
    924 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
    925 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
    926 	}
    927 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
    928 #ifdef DWC_GMAC_DEBUG
    929 	aprint_normal_dev(sc->sc_dev,
    930 	    "setting DMA opmode register: %08x\n", opmode);
    931 #endif
    932 
    933 	ifp->if_flags |= IFF_RUNNING;
    934 	sc->sc_if_flags = ifp->if_flags;
    935 
    936 	mutex_enter(sc->sc_intr_lock);
    937 	sc->sc_stopping = false;
    938 	mutex_exit(sc->sc_intr_lock);
    939 
    940 	mutex_enter(&sc->sc_txq.t_mtx);
    941 	sc->sc_txbusy = false;
    942 	mutex_exit(&sc->sc_txq.t_mtx);
    943 
    944 	return 0;
    945 }
    946 
    947 static void
    948 dwc_gmac_start(struct ifnet *ifp)
    949 {
    950 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    951 	KASSERT(if_is_mpsafe(ifp));
    952 
    953 	mutex_enter(sc->sc_intr_lock);
    954 	if (!sc->sc_stopping) {
    955 		dwc_gmac_start_locked(ifp);
    956 	}
    957 	mutex_exit(sc->sc_intr_lock);
    958 }
    959 
    960 static void
    961 dwc_gmac_start_locked(struct ifnet *ifp)
    962 {
    963 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    964 	int old = sc->sc_txq.t_queued;
    965 	int start = sc->sc_txq.t_cur;
    966 	struct mbuf *m0;
    967 
    968 	KASSERT(mutex_owned(sc->sc_intr_lock));
    969 
    970 	mutex_enter(&sc->sc_txq.t_mtx);
    971 	if (sc->sc_txbusy) {
    972 		mutex_exit(&sc->sc_txq.t_mtx);
    973 		return;
    974 	}
    975 
    976 	for (;;) {
    977 		IFQ_POLL(&ifp->if_snd, m0);
    978 		if (m0 == NULL)
    979 			break;
    980 		if (dwc_gmac_queue(sc, m0) != 0) {
    981 			sc->sc_txbusy = true;
    982 			break;
    983 		}
    984 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    985 		bpf_mtap(ifp, m0, BPF_D_OUT);
    986 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    987 			sc->sc_txbusy = true;
    988 			break;
    989 		}
    990 	}
    991 
    992 	if (sc->sc_txq.t_queued != old) {
    993 		/* packets have been queued, kick it off */
    994 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    995 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    996 
    997 #ifdef DWC_GMAC_DEBUG
    998 		dwc_dump_status(sc);
    999 #endif
   1000 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1001 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
   1002 	}
   1003 	mutex_exit(&sc->sc_txq.t_mtx);
   1004 }
   1005 
   1006 static void
   1007 dwc_gmac_stop(struct ifnet *ifp, int disable)
   1008 {
   1009 	struct dwc_gmac_softc * const sc = ifp->if_softc;
   1010 
   1011 	ASSERT_SLEEPABLE();
   1012 	KASSERT(IFNET_LOCKED(ifp));
   1013 
   1014 	ifp->if_flags &= ~IFF_RUNNING;
   1015 
   1016 	mutex_enter(sc->sc_mcast_lock);
   1017 	sc->sc_if_flags = ifp->if_flags;
   1018 	mutex_exit(sc->sc_mcast_lock);
   1019 
   1020 	mutex_enter(sc->sc_intr_lock);
   1021 	sc->sc_stopping = true;
   1022 	mutex_exit(sc->sc_intr_lock);
   1023 
   1024 	mutex_enter(&sc->sc_txq.t_mtx);
   1025 	sc->sc_txbusy = false;
   1026 	mutex_exit(&sc->sc_txq.t_mtx);
   1027 
   1028 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1029 	    AWIN_GMAC_DMA_OPMODE,
   1030 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1031 		AWIN_GMAC_DMA_OPMODE)
   1032 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
   1033 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1034 	    AWIN_GMAC_DMA_OPMODE,
   1035 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1036 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
   1037 
   1038 	mii_down(&sc->sc_mii);
   1039 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
   1040 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
   1041 }
   1042 
   1043 /*
   1044  * Add m0 to the TX ring
   1045  */
   1046 static int
   1047 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
   1048 {
   1049 	struct dwc_gmac_dev_dmadesc *desc = NULL;
   1050 	struct dwc_gmac_tx_data *data = NULL;
   1051 	bus_dmamap_t map;
   1052 	int error, i, first;
   1053 
   1054 #ifdef DWC_GMAC_DEBUG
   1055 	aprint_normal_dev(sc->sc_dev,
   1056 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
   1057 #endif
   1058 
   1059 	first = sc->sc_txq.t_cur;
   1060 	map = sc->sc_txq.t_data[first].td_map;
   1061 
   1062 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
   1063 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1064 	if (error != 0) {
   1065 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
   1066 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
   1067 		return error;
   1068 	}
   1069 
   1070 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
   1071 		bus_dmamap_unload(sc->sc_dmat, map);
   1072 		return ENOBUFS;
   1073 	}
   1074 
   1075 	for (i = 0; i < map->dm_nsegs; i++) {
   1076 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
   1077 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
   1078 
   1079 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
   1080 
   1081 #ifdef DWC_GMAC_DEBUG
   1082 		aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
   1083 		    "len %lu\n", sc->sc_txq.t_cur,
   1084 		    (unsigned long)map->dm_segs[i].ds_addr,
   1085 		    (unsigned long)map->dm_segs[i].ds_len);
   1086 #endif
   1087 
   1088 		sc->sc_descm->tx_init_flags(desc);
   1089 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
   1090 
   1091 		if (i == 0)
   1092 			sc->sc_descm->tx_set_first_frag(desc);
   1093 
   1094 		/*
   1095 		 * Defer passing ownership of the first descriptor
   1096 		 * until we are done.
   1097 		 */
   1098 		if (i != 0)
   1099 			sc->sc_descm->tx_set_owned_by_dev(desc);
   1100 
   1101 		sc->sc_txq.t_queued++;
   1102 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
   1103 	}
   1104 
   1105 	sc->sc_descm->tx_set_last_frag(desc);
   1106 
   1107 	data->td_m = m0;
   1108 	data->td_active = map;
   1109 
   1110 	/* sync the packet buffer */
   1111 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1112 	    BUS_DMASYNC_PREWRITE);
   1113 
   1114 	/* sync the new descriptors - ownership not transferred yet */
   1115 	dwc_gmac_txdesc_sync(sc, first, sc->sc_txq.t_cur,
   1116 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1117 
   1118 	/* Pass first to device */
   1119 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
   1120 
   1121 	return 0;
   1122 }
   1123 
   1124 /*
   1125  * If the interface is up and running, only modify the receive
   1126  * filter when setting promiscuous or debug mode.  Otherwise fall
   1127  * through to ether_ioctl, which will reset the chip.
   1128  */
   1129 static int
   1130 dwc_gmac_ifflags_cb(struct ethercom *ec)
   1131 {
   1132 	struct ifnet * const ifp = &ec->ec_if;
   1133 	struct dwc_gmac_softc * const sc = ifp->if_softc;
   1134 	int ret = 0;
   1135 
   1136 	KASSERT(IFNET_LOCKED(ifp));
   1137 	mutex_enter(sc->sc_mcast_lock);
   1138 
   1139 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
   1140 	sc->sc_if_flags = ifp->if_flags;
   1141 
   1142 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   1143 		ret = ENETRESET;
   1144 	} else  if ((change & IFF_PROMISC) != 0) {
   1145 		dwc_gmac_setmulti(sc);
   1146 	}
   1147 
   1148 	mutex_exit(sc->sc_mcast_lock);
   1149 
   1150 	return ret;
   1151 }
   1152 
   1153 static int
   1154 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1155 {
   1156 	struct dwc_gmac_softc * const sc = ifp->if_softc;
   1157 	int error = 0;
   1158 
   1159 	switch (cmd) {
   1160 	case SIOCADDMULTI:
   1161 	case SIOCDELMULTI:
   1162 		break;
   1163 	default:
   1164 		KASSERT(IFNET_LOCKED(ifp));
   1165 	}
   1166 
   1167 	const int s = splnet();
   1168 	error = ether_ioctl(ifp, cmd, data);
   1169 	splx(s);
   1170 
   1171 	if (error == ENETRESET) {
   1172 		error = 0;
   1173 		if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   1174 			mutex_enter(sc->sc_mcast_lock);
   1175 			if (sc->sc_if_flags & IFF_RUNNING) {
   1176 				/*
   1177 				 * Multicast list has changed; set the hardware
   1178 				 * filter accordingly.
   1179 				 */
   1180 				dwc_gmac_setmulti(sc);
   1181 			}
   1182 			mutex_exit(sc->sc_mcast_lock);
   1183 		}
   1184 	}
   1185 
   1186 	return error;
   1187 }
   1188 
   1189 static void
   1190 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1191 {
   1192 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1193 	struct dwc_gmac_tx_data *data;
   1194 	struct dwc_gmac_dev_dmadesc *desc;
   1195 	int i, nsegs;
   1196 
   1197 	mutex_enter(&sc->sc_txq.t_mtx);
   1198 
   1199 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1200 #ifdef DWC_GMAC_DEBUG
   1201 		aprint_normal_dev(sc->sc_dev,
   1202 		    "%s: checking desc #%d (t_queued: %d)\n", __func__,
   1203 		    i, sc->sc_txq.t_queued);
   1204 #endif
   1205 
   1206 		/*
   1207 		 * i + 1 does not need to be a valid descriptor,
   1208 		 * this is just a special notion to just sync
   1209 		 * a single tx descriptor (i)
   1210 		 */
   1211 		dwc_gmac_txdesc_sync(sc, i, i + 1,
   1212 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1213 
   1214 		desc = &sc->sc_txq.t_desc[i];
   1215 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
   1216 			break;
   1217 
   1218 		data = &sc->sc_txq.t_data[i];
   1219 		if (data->td_m == NULL)
   1220 			continue;
   1221 
   1222 		if_statinc(ifp, if_opackets);
   1223 		nsegs = data->td_active->dm_nsegs;
   1224 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1225 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1226 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1227 
   1228 #ifdef DWC_GMAC_DEBUG
   1229 		aprint_normal_dev(sc->sc_dev,
   1230 		    "%s: done with packet at desc #%d, freeing mbuf %p\n",
   1231 		    __func__, i, data->td_m);
   1232 #endif
   1233 
   1234 		m_freem(data->td_m);
   1235 		data->td_m = NULL;
   1236 
   1237 		sc->sc_txq.t_queued -= nsegs;
   1238 	}
   1239 
   1240 	sc->sc_txq.t_next = i;
   1241 
   1242 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1243 		sc->sc_txbusy = false;
   1244 	}
   1245 	mutex_exit(&sc->sc_txq.t_mtx);
   1246 }
   1247 
   1248 static void
   1249 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1250 {
   1251 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1252 	struct dwc_gmac_dev_dmadesc *desc;
   1253 	struct dwc_gmac_rx_data *data;
   1254 	bus_addr_t physaddr;
   1255 	struct mbuf *m, *mnew;
   1256 	int i, len, error;
   1257 
   1258 	mutex_enter(&sc->sc_rxq.r_mtx);
   1259 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1260 #ifdef DWC_GMAC_DEBUG
   1261 		aprint_normal_dev(sc->sc_dev, "%s: checking desc #%d\n",
   1262 		    __func__, i);
   1263 #endif
   1264 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1265 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1266 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1267 		desc = &sc->sc_rxq.r_desc[i];
   1268 		data = &sc->sc_rxq.r_data[i];
   1269 
   1270 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
   1271 			break;
   1272 
   1273 		if (sc->sc_descm->rx_has_error(desc)) {
   1274 #ifdef DWC_GMAC_DEBUG
   1275 			aprint_normal_dev(sc->sc_dev,
   1276 			    "%s: RX error: status %08x, skipping\n",
   1277 			    __func__, le32toh(desc->ddesc_status0));
   1278 #endif
   1279 			if_statinc(ifp, if_ierrors);
   1280 			goto skip;
   1281 		}
   1282 
   1283 		len = sc->sc_descm->rx_get_len(desc);
   1284 
   1285 #ifdef DWC_GMAC_DEBUG
   1286 		aprint_normal_dev(sc->sc_dev,
   1287 		    "%s: device is done with descriptor #%d, len: %d\n",
   1288 		    __func__, i, len);
   1289 #endif
   1290 
   1291 		/*
   1292 		 * Try to get a new mbuf before passing this one
   1293 		 * up, if that fails, drop the packet and reuse
   1294 		 * the existing one.
   1295 		 */
   1296 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1297 		if (mnew == NULL) {
   1298 			if_statinc(ifp, if_ierrors);
   1299 			goto skip;
   1300 		}
   1301 		MCLGET(mnew, M_DONTWAIT);
   1302 		if ((mnew->m_flags & M_EXT) == 0) {
   1303 			m_freem(mnew);
   1304 			if_statinc(ifp, if_ierrors);
   1305 			goto skip;
   1306 		}
   1307 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
   1308 		m_adj(mnew, ETHER_ALIGN);
   1309 		if (mnew->m_len > AWGE_MAX_PACKET) {
   1310 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
   1311 		}
   1312 
   1313 		/* unload old DMA map */
   1314 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1315 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1316 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1317 
   1318 		/* and reload with new mbuf */
   1319 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1320 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1321 		if (error != 0) {
   1322 			m_freem(mnew);
   1323 			/* try to reload old mbuf */
   1324 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
   1325 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
   1326 			if (error != 0) {
   1327 				panic("%s: could not load old rx mbuf",
   1328 				    device_xname(sc->sc_dev));
   1329 			}
   1330 			if_statinc(ifp, if_ierrors);
   1331 			goto skip;
   1332 		}
   1333 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1334 
   1335 #ifdef DWC_GMAC_DEBUG
   1336 		aprint_normal_dev(sc->sc_dev,
   1337 		    "%s: receiving packet at desc #%d,   using mbuf %p\n",
   1338 		    __func__, i, data->rd_m);
   1339 #endif
   1340 		/*
   1341 		 * New mbuf loaded, update RX ring and continue
   1342 		 */
   1343 		m = data->rd_m;
   1344 		data->rd_m = mnew;
   1345 		desc->ddesc_data = htole32(physaddr);
   1346 
   1347 		/* finalize mbuf */
   1348 		m->m_pkthdr.len = m->m_len = len;
   1349 		m_set_rcvif(m, ifp);
   1350 		m->m_flags |= M_HASFCS;
   1351 
   1352 		if_percpuq_enqueue(sc->sc_ipq, m);
   1353 
   1354 skip:
   1355 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1356 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1357 
   1358 		sc->sc_descm->rx_init_flags(desc);
   1359 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
   1360 
   1361 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1362 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1363 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1364 
   1365 		sc->sc_descm->rx_set_owned_by_dev(desc);
   1366 
   1367 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1368 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1369 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1370 	}
   1371 
   1372 	/* update RX pointer */
   1373 	sc->sc_rxq.r_cur = i;
   1374 
   1375 	mutex_exit(&sc->sc_rxq.r_mtx);
   1376 }
   1377 
   1378 static void
   1379 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1380 {
   1381 	struct ether_multi *enm;
   1382 	struct ether_multistep step;
   1383 	struct ethercom *ec = &sc->sc_ec;
   1384 	uint32_t hashes[2] = { 0, 0 };
   1385 	uint32_t ffilt, h;
   1386 	int mcnt;
   1387 
   1388 	KASSERT(mutex_owned(sc->sc_mcast_lock));
   1389 
   1390 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1391 
   1392 	if (sc->sc_if_flags & IFF_PROMISC) {
   1393 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1394 		goto special_filter;
   1395 	}
   1396 
   1397 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
   1398 
   1399 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1400 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1401 
   1402 	ETHER_LOCK(ec);
   1403 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1404 	ETHER_FIRST_MULTI(step, ec, enm);
   1405 	mcnt = 0;
   1406 	while (enm != NULL) {
   1407 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1408 		    ETHER_ADDR_LEN) != 0) {
   1409 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1410 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1411 			ETHER_UNLOCK(ec);
   1412 			goto special_filter;
   1413 		}
   1414 
   1415 		h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
   1416 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1417 
   1418 		mcnt++;
   1419 		ETHER_NEXT_MULTI(step, enm);
   1420 	}
   1421 	ETHER_UNLOCK(ec);
   1422 
   1423 	if (mcnt)
   1424 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1425 	else
   1426 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1427 
   1428 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1429 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1430 	    hashes[0]);
   1431 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1432 	    hashes[1]);
   1433 
   1434 #ifdef DWC_GMAC_DEBUG
   1435 	dwc_gmac_dump_ffilt(sc, ffilt);
   1436 #endif
   1437 	return;
   1438 
   1439 special_filter:
   1440 #ifdef DWC_GMAC_DEBUG
   1441 	dwc_gmac_dump_ffilt(sc, ffilt);
   1442 #endif
   1443 	/* no MAC hashes, ALLMULTI or PROMISC */
   1444 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1445 	    ffilt);
   1446 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1447 	    0xffffffff);
   1448 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1449 	    0xffffffff);
   1450 }
   1451 
   1452 int
   1453 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1454 {
   1455 	uint32_t status, dma_status;
   1456 	int rv = 0;
   1457 
   1458 	mutex_enter(sc->sc_intr_lock);
   1459 	if (sc->sc_stopping) {
   1460 		mutex_exit(sc->sc_intr_lock);
   1461 		return 0;
   1462 	}
   1463 
   1464 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1465 	if (status & AWIN_GMAC_MII_IRQ) {
   1466 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1467 		    AWIN_GMAC_MII_STATUS);
   1468 		rv = 1;
   1469 		mii_pollstat(&sc->sc_mii);
   1470 	}
   1471 
   1472 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1473 	    AWIN_GMAC_DMA_STATUS);
   1474 
   1475 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
   1476 		rv = 1;
   1477 
   1478 	if (dma_status & GMAC_DMA_INT_TIE)
   1479 		dwc_gmac_tx_intr(sc);
   1480 
   1481 	if (dma_status & GMAC_DMA_INT_RIE)
   1482 		dwc_gmac_rx_intr(sc);
   1483 
   1484 	/*
   1485 	 * Check error conditions
   1486 	 */
   1487 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1488 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
   1489 #ifdef DWC_GMAC_DEBUG
   1490 		dwc_dump_and_abort(sc, "interrupt error condition");
   1491 #endif
   1492 	}
   1493 
   1494 	rnd_add_uint32(&sc->rnd_source, dma_status);
   1495 
   1496 	/* ack interrupt */
   1497 	if (dma_status)
   1498 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1499 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1500 
   1501 	/*
   1502 	 * Get more packets
   1503 	 */
   1504 	if (rv)
   1505 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1506 
   1507 	mutex_exit(sc->sc_intr_lock);
   1508 
   1509 	return rv;
   1510 }
   1511 
   1512 static void
   1513 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1514 {
   1515 
   1516 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
   1517 }
   1518 
   1519 static int
   1520 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1521 {
   1522 
   1523 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
   1524 }
   1525 
   1526 static void
   1527 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1528 {
   1529 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1530 
   1531 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
   1532 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
   1533 }
   1534 
   1535 static uint32_t
   1536 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1537 {
   1538 
   1539 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
   1540 }
   1541 
   1542 static void
   1543 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1544 {
   1545 
   1546 	desc->ddesc_status0 = 0;
   1547 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1548 }
   1549 
   1550 static void
   1551 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1552 {
   1553 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1554 
   1555 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
   1556 }
   1557 
   1558 static void
   1559 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1560 {
   1561 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1562 
   1563 	desc->ddesc_cntl1 = htole32(cntl |
   1564 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
   1565 }
   1566 
   1567 static void
   1568 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1569 {
   1570 
   1571 	desc->ddesc_status0 = 0;
   1572 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1573 }
   1574 
   1575 static int
   1576 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
   1577 	return !!(le32toh(desc->ddesc_status0) &
   1578 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
   1579 }
   1580 
   1581 static void
   1582 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1583 {
   1584 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
   1585 
   1586 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
   1587 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
   1588 }
   1589 
   1590 static uint32_t
   1591 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1592 {
   1593 
   1594 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
   1595 }
   1596 
   1597 static void
   1598 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1599 {
   1600 
   1601 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
   1602 	desc->ddesc_cntl1 = 0;
   1603 }
   1604 
   1605 static void
   1606 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1607 {
   1608 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1609 
   1610 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
   1611 }
   1612 
   1613 static void
   1614 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1615 {
   1616 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1617 
   1618 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
   1619 }
   1620 
   1621 static void
   1622 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1623 {
   1624 
   1625 	desc->ddesc_status0 = 0;
   1626 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
   1627 }
   1628 
   1629 static int
   1630 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
   1631 {
   1632 
   1633 	return !!(le32toh(desc->ddesc_status0) &
   1634 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
   1635 }
   1636 
   1637 #ifdef DWC_GMAC_DEBUG
   1638 static void
   1639 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1640 {
   1641 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1642 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1643 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1644 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1645 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1646 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1647 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1648 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1649 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1650 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1651 	aprint_normal_dev(sc->sc_dev, " status: %08x\n",
   1652 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1653 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1654 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1655 	aprint_normal_dev(sc->sc_dev, "int en.: %08x\n",
   1656 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1657 	aprint_normal_dev(sc->sc_dev, " cur tx: %08x\n",
   1658 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1659 	aprint_normal_dev(sc->sc_dev, " cur rx: %08x\n",
   1660 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1661 	aprint_normal_dev(sc->sc_dev, "cur txb: %08x\n",
   1662 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1663 	aprint_normal_dev(sc->sc_dev, "cur rxb: %08x\n",
   1664 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1665 }
   1666 
   1667 static void
   1668 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1669 {
   1670 	const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
   1671 
   1672 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1673 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1674 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1675 
   1676 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1677 	    TX_DESC_OFFSET(0), AWGE_TX_RING_COUNT * descsz,
   1678 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1679 
   1680 	for (size_t i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1681 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1682 		aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
   1683 		    "data: %08x next: %08x\n",
   1684 		    i, sc->sc_txq.t_physaddr + i * descsz,
   1685 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1686 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1687 	}
   1688 }
   1689 
   1690 static void
   1691 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1692 {
   1693 	const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
   1694 
   1695 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1696 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1697 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1698 
   1699 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1700 	    RX_DESC_OFFSET(0), AWGE_RX_RING_COUNT * descsz,
   1701 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1702 
   1703 	for (size_t i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1704 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1705 		char buf[200];
   1706 
   1707 		if (!sc->sc_descm->rx_is_owned_by_dev(desc)) {
   1708 			/* print interrupt state */
   1709 			snprintb(buf, sizeof(buf),
   1710 			    "\177\20"
   1711 			    "b\x1e"	"daff\0"
   1712 			    "f\x10\xe"	"frlen\0"
   1713 			    "b\x0f"	"error\0"
   1714 			    "b\x0e"	"rxtrunc\0"	/* descriptor error? */
   1715 			    "b\x0d"	"saff\0"
   1716 			    "b\x0c"	"giantframe\0"	/* length error? */
   1717 			    "b\x0b"	"damaged\0"
   1718 			    "b\x0a"	"vlan\0"
   1719 			    "b\x09"	"first\0"
   1720 			    "b\x08"	"last\0"
   1721 			    "b\x07"	"giant\0"
   1722 			    "b\x06"	"collison\0"
   1723 			    "b\x05"	"ether\0"
   1724 			    "b\x04"	"watchdog\0"
   1725 			    "b\x03"	"miierror\0"
   1726 			    "b\x02"	"dribbling\0"
   1727 			    "b\x01"	"crc\0"
   1728 			    "\0", le32toh(desc->ddesc_status0));
   1729 		}
   1730 
   1731 		aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
   1732 		    "data: %08x next: %08x %s\n",
   1733 		    i, sc->sc_rxq.r_physaddr + i * descsz,
   1734 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1735 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next),
   1736 		    sc->sc_descm->rx_is_owned_by_dev(desc) ? "" : buf);
   1737 	}
   1738 }
   1739 
   1740 static void
   1741 dwc_dump_status(struct dwc_gmac_softc *sc)
   1742 {
   1743 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1744 	    AWIN_GMAC_MAC_INTR);
   1745 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1746 	    AWIN_GMAC_DMA_STATUS);
   1747 	char buf[200];
   1748 
   1749 	/* print interrupt state */
   1750 	snprintb(buf, sizeof(buf),
   1751 	    "\177\20"
   1752 	    "b\x1c"	"GPI\0"
   1753 	    "b\x1b"	"GMC\0"
   1754 	    "b\x1a"	"GLI\0"
   1755 	    "f\x17\x3"	"EB\0"
   1756 	    "f\x14\x3"	"TPS\0"
   1757 	    "f\x11\x3"	"RPS\0"
   1758 	    "b\x10"	"NI\0"
   1759 	    "b\x0f"	"AI\0"
   1760 	    "b\x0e"	"ER\0"
   1761 	    "b\x0d"	"FB\0"
   1762 	    "b\x0a"	"ET\0"
   1763 	    "b\x09"	"RW\0"
   1764 	    "b\x08"	"RS\0"
   1765 	    "b\x07"	"RU\0"
   1766 	    "b\x06"	"RI\0"
   1767 	    "b\x05"	"UN\0"
   1768 	    "b\x04"	"OV\0"
   1769 	    "b\x03"	"TJ\0"
   1770 	    "b\x02"	"TU\0"
   1771 	    "b\x01"	"TS\0"
   1772 	    "b\x00"	"TI\0"
   1773 	    "\0", dma_status);
   1774 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1775 	    status, buf);
   1776 }
   1777 
   1778 static void
   1779 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1780 {
   1781 	dwc_dump_status(sc);
   1782 	dwc_gmac_dump_ffilt(sc,
   1783 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1784 	dwc_gmac_dump_dma(sc);
   1785 	dwc_gmac_dump_tx_desc(sc);
   1786 	dwc_gmac_dump_rx_desc(sc);
   1787 
   1788 	panic("%s", msg);
   1789 }
   1790 
   1791 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1792 {
   1793 	char buf[200];
   1794 
   1795 	/* print filter setup */
   1796 	snprintb(buf, sizeof(buf), "\177\20"
   1797 	    "b\x1f""RA\0"
   1798 	    "b\x0a""HPF\0"
   1799 	    "b\x09""SAF\0"
   1800 	    "b\x08""SAIF\0"
   1801 	    "b\x05""DBF\0"
   1802 	    "b\x04""PM\0"
   1803 	    "b\x03""DAIF\0"
   1804 	    "b\x02""HMC\0"
   1805 	    "b\x01""HUC\0"
   1806 	    "b\x00""PR\0"
   1807 	    "\0", ffilt);
   1808 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1809 }
   1810 #endif
   1811