Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.56
      1 /* $NetBSD: dwc_gmac.c,v 1.56 2019/01/22 03:42:26 msaitoh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.56 2019/01/22 03:42:26 msaitoh Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #ifdef _KERNEL_OPT
     49 #include "opt_inet.h"
     50 #include "opt_net_mpsafe.h"
     51 #endif
     52 
     53 #include <sys/param.h>
     54 #include <sys/bus.h>
     55 #include <sys/device.h>
     56 #include <sys/intr.h>
     57 #include <sys/systm.h>
     58 #include <sys/sockio.h>
     59 #include <sys/cprng.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_media.h>
     64 #include <net/bpf.h>
     65 #ifdef INET
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #include <dev/mii/miivar.h>
     70 
     71 #include <dev/ic/dwc_gmac_reg.h>
     72 #include <dev/ic/dwc_gmac_var.h>
     73 
     74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
     75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
     76 static void dwc_gmac_miibus_statchg(struct ifnet *);
     77 
     78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     80 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     90 static int dwc_gmac_init(struct ifnet *ifp);
     91 static int dwc_gmac_init_locked(struct ifnet *ifp);
     92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
     94 static void dwc_gmac_start(struct ifnet *ifp);
     95 static void dwc_gmac_start_locked(struct ifnet *ifp);
     96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
    100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
    101 static int dwc_gmac_ifflags_cb(struct ethercom *);
    102 static uint32_t	bitrev32(uint32_t x);
    103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    104 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
    106 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
    107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    111 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
    113 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
    114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    118 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    119 
    120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
    121 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
    122 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    123 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    124 	.tx_set_len = dwc_gmac_desc_std_set_len,
    125 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
    126 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
    127 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
    128 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    129 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    130 	.rx_set_len = dwc_gmac_desc_std_set_len,
    131 	.rx_get_len = dwc_gmac_desc_std_get_len,
    132 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
    133 };
    134 
    135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
    136 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
    137 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    138 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    139 	.tx_set_len = dwc_gmac_desc_enh_set_len,
    140 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
    141 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
    142 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
    143 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    144 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    145 	.rx_set_len = dwc_gmac_desc_enh_set_len,
    146 	.rx_get_len = dwc_gmac_desc_enh_get_len,
    147 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
    148 };
    149 
    150 
    151 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
    152 				    *sizeof(struct dwc_gmac_dev_dmadesc))
    153 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
    154 
    155 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
    156 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
    157 
    158 
    159 
    160 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    161 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    162 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    163 
    164 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    165 				GMAC_DMA_INT_FBE|	\
    166 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    167 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    168 				GMAC_DMA_INT_TJE)
    169 
    170 #define	AWIN_DEF_MAC_INTRMASK	\
    171 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    172 	AWIN_GMAC_MAC_INT_LINKCHG)
    173 
    174 #ifdef DWC_GMAC_DEBUG
    175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    178 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    179 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
    181 #endif
    182 
    183 #ifdef NET_MPSAFE
    184 #define DWCGMAC_MPSAFE	1
    185 #endif
    186 
    187 int
    188 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    189 {
    190 	uint8_t enaddr[ETHER_ADDR_LEN];
    191 	uint32_t maclo, machi, ver, hwft;
    192 	struct mii_data * const mii = &sc->sc_mii;
    193 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    194 	prop_dictionary_t dict;
    195 	int rv;
    196 
    197 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    198 	sc->sc_mii_clk = mii_clk & 7;
    199 
    200 	dict = device_properties(sc->sc_dev);
    201 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    202 	if (ea != NULL) {
    203 		/*
    204 		 * If the MAC address is overriden by a device property,
    205 		 * use that.
    206 		 */
    207 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    208 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    209 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    210 	} else {
    211 		/*
    212 		 * If we did not get an externaly configure address,
    213 		 * try to read one from the current filter setup,
    214 		 * before resetting the chip.
    215 		 */
    216 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    217 		    AWIN_GMAC_MAC_ADDR0LO);
    218 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    219 		    AWIN_GMAC_MAC_ADDR0HI);
    220 
    221 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    222 			/* fake MAC address */
    223 			maclo = 0x00f2 | (cprng_strong32() << 16);
    224 			machi = cprng_strong32();
    225 		}
    226 
    227 		enaddr[0] = maclo & 0x0ff;
    228 		enaddr[1] = (maclo >> 8) & 0x0ff;
    229 		enaddr[2] = (maclo >> 16) & 0x0ff;
    230 		enaddr[3] = (maclo >> 24) & 0x0ff;
    231 		enaddr[4] = machi & 0x0ff;
    232 		enaddr[5] = (machi >> 8) & 0x0ff;
    233 	}
    234 
    235 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
    236 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
    237 
    238 	/*
    239 	 * Init chip and do initial setup
    240 	 */
    241 	if (dwc_gmac_reset(sc) != 0)
    242 		return ENXIO;	/* not much to cleanup, haven't attached yet */
    243 	dwc_gmac_write_hwaddr(sc, enaddr);
    244 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    245 	    ether_sprintf(enaddr));
    246 
    247 	hwft = 0;
    248 	if (ver >= 0x35) {
    249 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    250 		    AWIN_GMAC_DMA_HWFEATURES);
    251 		aprint_normal_dev(sc->sc_dev,
    252 		    "HW feature mask: %x\n", hwft);
    253 	}
    254 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
    255 		aprint_normal_dev(sc->sc_dev,
    256 		    "Using enhanced descriptor format\n");
    257 		sc->sc_descm = &desc_methods_enhanced;
    258 	} else {
    259 		sc->sc_descm = &desc_methods_standard;
    260 	}
    261 
    262 	/*
    263 	 * Allocate Tx and Rx rings
    264 	 */
    265 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    266 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    267 		goto fail;
    268 	}
    269 
    270 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    271 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    272 		goto fail;
    273 	}
    274 
    275 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    276 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    277 		goto fail;
    278 	}
    279 
    280 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    281 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    282 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    283 
    284 	/*
    285 	 * Prepare interface data
    286 	 */
    287 	ifp->if_softc = sc;
    288 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    289 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    290 #ifdef DWCGMAC_MPSAFE
    291 	ifp->if_extflags = IFEF_MPSAFE;
    292 #endif
    293 	ifp->if_ioctl = dwc_gmac_ioctl;
    294 	ifp->if_start = dwc_gmac_start;
    295 	ifp->if_init = dwc_gmac_init;
    296 	ifp->if_stop = dwc_gmac_stop;
    297 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    298 	IFQ_SET_READY(&ifp->if_snd);
    299 
    300 	/*
    301 	 * Attach MII subdevices
    302 	 */
    303 	sc->sc_ec.ec_mii = &sc->sc_mii;
    304 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    305         mii->mii_ifp = ifp;
    306         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    307         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    308         mii->mii_statchg = dwc_gmac_miibus_statchg;
    309         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
    310 	    MIIF_DOPAUSE);
    311 
    312         if (LIST_EMPTY(&mii->mii_phys)) {
    313                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    314                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    315                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    316         } else {
    317                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    318         }
    319 
    320 	/*
    321 	 * We can support 802.1Q VLAN-sized frames.
    322 	 */
    323 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    324 
    325 	/*
    326 	 * Ready, attach interface
    327 	 */
    328 	/* Attach the interface. */
    329 	rv = if_initialize(ifp);
    330 	if (rv != 0)
    331 		goto fail_2;
    332 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    333 	if_deferred_start_init(ifp, NULL);
    334 	ether_ifattach(ifp, enaddr);
    335 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    336 	if_register(ifp);
    337 
    338 	/*
    339 	 * Enable interrupts
    340 	 */
    341 	mutex_enter(sc->sc_lock);
    342 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    343 	    AWIN_DEF_MAC_INTRMASK);
    344 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    345 	    GMAC_DEF_DMA_INT_MASK);
    346 	mutex_exit(sc->sc_lock);
    347 
    348 	return 0;
    349 
    350 fail_2:
    351 	ifmedia_removeall(&mii->mii_media);
    352 	mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
    353 	mutex_destroy(&sc->sc_txq.t_mtx);
    354 	mutex_destroy(&sc->sc_rxq.r_mtx);
    355 	mutex_obj_free(sc->sc_lock);
    356 fail:
    357 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    358 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    359 	dwc_gmac_free_dma_rings(sc);
    360 	mutex_destroy(&sc->sc_mdio_lock);
    361 
    362 	return ENXIO;
    363 }
    364 
    365 
    366 
    367 static int
    368 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    369 {
    370 	size_t cnt;
    371 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    372 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    373 	for (cnt = 0; cnt < 3000; cnt++) {
    374 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    375 		    & GMAC_BUSMODE_RESET) == 0)
    376 			return 0;
    377 		delay(10);
    378 	}
    379 
    380 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    381 	return EIO;
    382 }
    383 
    384 static void
    385 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    386     uint8_t enaddr[ETHER_ADDR_LEN])
    387 {
    388 	uint32_t hi, lo;
    389 
    390 	hi = enaddr[4] | (enaddr[5] << 8);
    391 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    392 	    | (enaddr[3] << 24);
    393 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    394 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    395 }
    396 
    397 static int
    398 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
    399 {
    400 	struct dwc_gmac_softc * const sc = device_private(self);
    401 	uint16_t mii;
    402 	size_t cnt;
    403 
    404 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    405 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    406 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    407 	    | GMAC_MII_BUSY;
    408 
    409 	mutex_enter(&sc->sc_mdio_lock);
    410 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    411 
    412 	for (cnt = 0; cnt < 1000; cnt++) {
    413 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    414 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    415 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    416 			    AWIN_GMAC_MAC_MIIDATA);
    417 			break;
    418 		}
    419 		delay(10);
    420 	}
    421 
    422 	mutex_exit(&sc->sc_mdio_lock);
    423 
    424 	if (cnt >= 1000)
    425 		return ETIMEDOUT;
    426 
    427 	return 0;
    428 }
    429 
    430 static int
    431 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
    432 {
    433 	struct dwc_gmac_softc * const sc = device_private(self);
    434 	uint16_t mii;
    435 	size_t cnt;
    436 
    437 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    438 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    439 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    440 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    441 
    442 	mutex_enter(&sc->sc_mdio_lock);
    443 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    444 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    445 
    446 	for (cnt = 0; cnt < 1000; cnt++) {
    447 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    448 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    449 			break;
    450 		delay(10);
    451 	}
    452 
    453 	mutex_exit(&sc->sc_mdio_lock);
    454 
    455 	if (cnt >= 1000)
    456 		return ETIMEDOUT;
    457 
    458 	return 0;
    459 }
    460 
    461 static int
    462 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    463 	struct dwc_gmac_rx_ring *ring)
    464 {
    465 	struct dwc_gmac_rx_data *data;
    466 	bus_addr_t physaddr;
    467 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    468 	int error, i, next;
    469 
    470 	ring->r_cur = ring->r_next = 0;
    471 	memset(ring->r_desc, 0, descsize);
    472 
    473 	/*
    474 	 * Pre-allocate Rx buffers and populate Rx ring.
    475 	 */
    476 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    477 		struct dwc_gmac_dev_dmadesc *desc;
    478 
    479 		data = &sc->sc_rxq.r_data[i];
    480 
    481 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    482 		if (data->rd_m == NULL) {
    483 			aprint_error_dev(sc->sc_dev,
    484 			    "could not allocate rx mbuf #%d\n", i);
    485 			error = ENOMEM;
    486 			goto fail;
    487 		}
    488 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    489 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    490 		if (error != 0) {
    491 			aprint_error_dev(sc->sc_dev,
    492 			    "could not create DMA map\n");
    493 			data->rd_map = NULL;
    494 			goto fail;
    495 		}
    496 		MCLGET(data->rd_m, M_DONTWAIT);
    497 		if (!(data->rd_m->m_flags & M_EXT)) {
    498 			aprint_error_dev(sc->sc_dev,
    499 			    "could not allocate mbuf cluster #%d\n", i);
    500 			error = ENOMEM;
    501 			goto fail;
    502 		}
    503 
    504 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    505 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    506 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    507 		if (error != 0) {
    508 			aprint_error_dev(sc->sc_dev,
    509 			    "could not load rx buf DMA map #%d", i);
    510 			goto fail;
    511 		}
    512 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    513 
    514 		desc = &sc->sc_rxq.r_desc[i];
    515 		desc->ddesc_data = htole32(physaddr);
    516 		next = RX_NEXT(i);
    517 		desc->ddesc_next = htole32(ring->r_physaddr
    518 		    + next * sizeof(*desc));
    519 		sc->sc_descm->rx_init_flags(desc);
    520 		sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
    521 		sc->sc_descm->rx_set_owned_by_dev(desc);
    522 	}
    523 
    524 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    525 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    526 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    527 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    528 	    ring->r_physaddr);
    529 
    530 	return 0;
    531 
    532 fail:
    533 	dwc_gmac_free_rx_ring(sc, ring);
    534 	return error;
    535 }
    536 
    537 static void
    538 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    539 	struct dwc_gmac_rx_ring *ring)
    540 {
    541 	struct dwc_gmac_dev_dmadesc *desc;
    542 	int i;
    543 
    544 	mutex_enter(&ring->r_mtx);
    545 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    546 		desc = &sc->sc_rxq.r_desc[i];
    547 		sc->sc_descm->rx_init_flags(desc);
    548 		sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
    549 		sc->sc_descm->rx_set_owned_by_dev(desc);
    550 	}
    551 
    552 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    553 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    554 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    555 
    556 	ring->r_cur = ring->r_next = 0;
    557 	/* reset DMA address to start of ring */
    558 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    559 	    sc->sc_rxq.r_physaddr);
    560 	mutex_exit(&ring->r_mtx);
    561 }
    562 
    563 static int
    564 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    565 {
    566 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    567 		sizeof(struct dwc_gmac_dev_dmadesc);
    568 	int error, nsegs;
    569 	void *rings;
    570 
    571 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    572 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    573 	if (error != 0) {
    574 		aprint_error_dev(sc->sc_dev,
    575 		    "could not create desc DMA map\n");
    576 		sc->sc_dma_ring_map = NULL;
    577 		goto fail;
    578 	}
    579 
    580 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    581 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    582 	if (error != 0) {
    583 		aprint_error_dev(sc->sc_dev,
    584 		    "could not map DMA memory\n");
    585 		goto fail;
    586 	}
    587 
    588 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    589 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    590 	if (error != 0) {
    591 		aprint_error_dev(sc->sc_dev,
    592 		    "could not allocate DMA memory\n");
    593 		goto fail;
    594 	}
    595 
    596 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    597 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    598 	if (error != 0) {
    599 		aprint_error_dev(sc->sc_dev,
    600 		    "could not load desc DMA map\n");
    601 		goto fail;
    602 	}
    603 
    604 	/* give first AWGE_RX_RING_COUNT to the RX side */
    605 	sc->sc_rxq.r_desc = rings;
    606 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    607 
    608 	/* and next rings to the TX side */
    609 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    610 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    611 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    612 
    613 	return 0;
    614 
    615 fail:
    616 	dwc_gmac_free_dma_rings(sc);
    617 	return error;
    618 }
    619 
    620 static void
    621 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    622 {
    623 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    624 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    625 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    626 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    627 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    628 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    629 }
    630 
    631 static void
    632 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    633 {
    634 	struct dwc_gmac_rx_data *data;
    635 	int i;
    636 
    637 	if (ring->r_desc == NULL)
    638 		return;
    639 
    640 
    641 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    642 		data = &ring->r_data[i];
    643 
    644 		if (data->rd_map != NULL) {
    645 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    646 			    AWGE_RX_RING_COUNT
    647 				*sizeof(struct dwc_gmac_dev_dmadesc),
    648 			    BUS_DMASYNC_POSTREAD);
    649 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    650 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    651 		}
    652 		if (data->rd_m != NULL)
    653 			m_freem(data->rd_m);
    654 	}
    655 }
    656 
    657 static int
    658 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    659 	struct dwc_gmac_tx_ring *ring)
    660 {
    661 	int i, error = 0;
    662 
    663 	ring->t_queued = 0;
    664 	ring->t_cur = ring->t_next = 0;
    665 
    666 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    667 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    668 	    TX_DESC_OFFSET(0),
    669 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    670 	    BUS_DMASYNC_POSTWRITE);
    671 
    672 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    673 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    674 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    675 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    676 		    &ring->t_data[i].td_map);
    677 		if (error != 0) {
    678 			aprint_error_dev(sc->sc_dev,
    679 			    "could not create TX DMA map #%d\n", i);
    680 			ring->t_data[i].td_map = NULL;
    681 			goto fail;
    682 		}
    683 		ring->t_desc[i].ddesc_next = htole32(
    684 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    685 		    *TX_NEXT(i));
    686 	}
    687 
    688 	return 0;
    689 
    690 fail:
    691 	dwc_gmac_free_tx_ring(sc, ring);
    692 	return error;
    693 }
    694 
    695 static void
    696 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    697 {
    698 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    699 	if (end > start) {
    700 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    701 		    TX_DESC_OFFSET(start),
    702 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    703 		    ops);
    704 		return;
    705 	}
    706 	/* sync from 'start' to end of ring */
    707 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    708 	    TX_DESC_OFFSET(start),
    709 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
    710 	    ops);
    711 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
    712 		/* sync from start of ring to 'end' */
    713 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    714 		    TX_DESC_OFFSET(0),
    715 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    716 		    ops);
    717 	}
    718 }
    719 
    720 static void
    721 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    722 	struct dwc_gmac_tx_ring *ring)
    723 {
    724 	int i;
    725 
    726 	mutex_enter(&ring->t_mtx);
    727 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    728 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    729 
    730 		if (data->td_m != NULL) {
    731 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    732 			    0, data->td_active->dm_mapsize,
    733 			    BUS_DMASYNC_POSTWRITE);
    734 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    735 			m_freem(data->td_m);
    736 			data->td_m = NULL;
    737 		}
    738 	}
    739 
    740 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    741 	    TX_DESC_OFFSET(0),
    742 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    743 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    744 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    745 	    sc->sc_txq.t_physaddr);
    746 
    747 	ring->t_queued = 0;
    748 	ring->t_cur = ring->t_next = 0;
    749 	mutex_exit(&ring->t_mtx);
    750 }
    751 
    752 static void
    753 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    754 	struct dwc_gmac_tx_ring *ring)
    755 {
    756 	int i;
    757 
    758 	/* unload the maps */
    759 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    760 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    761 
    762 		if (data->td_m != NULL) {
    763 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    764 			    0, data->td_map->dm_mapsize,
    765 			    BUS_DMASYNC_POSTWRITE);
    766 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    767 			m_freem(data->td_m);
    768 			data->td_m = NULL;
    769 		}
    770 	}
    771 
    772 	/* and actually free them */
    773 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    774 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    775 
    776 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    777 	}
    778 }
    779 
    780 static void
    781 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    782 {
    783 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    784 	struct mii_data * const mii = &sc->sc_mii;
    785 	uint32_t conf, flow;
    786 
    787 	/*
    788 	 * Set MII or GMII interface based on the speed
    789 	 * negotiated by the PHY.
    790 	 */
    791 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    792 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    793 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    794 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    795 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    796 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    797 	    | AWIN_GMAC_MAC_CONF_ACS
    798 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    799 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    800 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    801 	case IFM_10_T:
    802 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    803 		break;
    804 	case IFM_100_TX:
    805 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    806 			AWIN_GMAC_MAC_CONF_MIISEL;
    807 		break;
    808 	case IFM_1000_T:
    809 		break;
    810 	}
    811 	if (sc->sc_set_speed)
    812 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
    813 
    814 	flow = 0;
    815 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    816 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    817 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    818 	}
    819 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    820 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    821 	}
    822 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    823 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    824 	}
    825 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    826 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    827 
    828 #ifdef DWC_GMAC_DEBUG
    829 	aprint_normal_dev(sc->sc_dev,
    830 	    "setting MAC conf register: %08x\n", conf);
    831 #endif
    832 
    833 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    834 	    AWIN_GMAC_MAC_CONF, conf);
    835 }
    836 
    837 static int
    838 dwc_gmac_init(struct ifnet *ifp)
    839 {
    840 	struct dwc_gmac_softc *sc = ifp->if_softc;
    841 
    842 	mutex_enter(sc->sc_lock);
    843 	int ret = dwc_gmac_init_locked(ifp);
    844 	mutex_exit(sc->sc_lock);
    845 
    846 	return ret;
    847 }
    848 
    849 static int
    850 dwc_gmac_init_locked(struct ifnet *ifp)
    851 {
    852 	struct dwc_gmac_softc *sc = ifp->if_softc;
    853 	uint32_t ffilt;
    854 
    855 	if (ifp->if_flags & IFF_RUNNING)
    856 		return 0;
    857 
    858 	dwc_gmac_stop_locked(ifp, 0);
    859 
    860 	/*
    861 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    862 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    863 	 */
    864 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    865 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    866 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    867 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    868 
    869 	/*
    870 	 * Set up address filter
    871 	 */
    872 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    873 	if (ifp->if_flags & IFF_PROMISC) {
    874 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    875 	} else {
    876 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    877 	}
    878 	if (ifp->if_flags & IFF_BROADCAST) {
    879 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    880 	} else {
    881 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    882 	}
    883 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    884 
    885 	/*
    886 	 * Set up multicast filter
    887 	 */
    888 	dwc_gmac_setmulti(sc);
    889 
    890 	/*
    891 	 * Set up dma pointer for RX and TX ring
    892 	 */
    893 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    894 	    sc->sc_rxq.r_physaddr);
    895 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    896 	    sc->sc_txq.t_physaddr);
    897 
    898 	/*
    899 	 * Start RX/TX part
    900 	 */
    901 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
    902 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
    903 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
    904 	}
    905 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
    906 
    907 	sc->sc_stopping = false;
    908 
    909 	ifp->if_flags |= IFF_RUNNING;
    910 	ifp->if_flags &= ~IFF_OACTIVE;
    911 
    912 	return 0;
    913 }
    914 
    915 static void
    916 dwc_gmac_start(struct ifnet *ifp)
    917 {
    918 	struct dwc_gmac_softc *sc = ifp->if_softc;
    919 #ifdef DWCGMAC_MPSAFE
    920 	KASSERT(if_is_mpsafe(ifp));
    921 #endif
    922 
    923 	mutex_enter(sc->sc_lock);
    924 	if (!sc->sc_stopping) {
    925 		mutex_enter(&sc->sc_txq.t_mtx);
    926 		dwc_gmac_start_locked(ifp);
    927 		mutex_exit(&sc->sc_txq.t_mtx);
    928 	}
    929 	mutex_exit(sc->sc_lock);
    930 }
    931 
    932 static void
    933 dwc_gmac_start_locked(struct ifnet *ifp)
    934 {
    935 	struct dwc_gmac_softc *sc = ifp->if_softc;
    936 	int old = sc->sc_txq.t_queued;
    937 	int start = sc->sc_txq.t_cur;
    938 	struct mbuf *m0;
    939 
    940 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    941 		return;
    942 
    943 	for (;;) {
    944 		IFQ_POLL(&ifp->if_snd, m0);
    945 		if (m0 == NULL)
    946 			break;
    947 		if (dwc_gmac_queue(sc, m0) != 0) {
    948 			ifp->if_flags |= IFF_OACTIVE;
    949 			break;
    950 		}
    951 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    952 		bpf_mtap(ifp, m0, BPF_D_OUT);
    953 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    954 			ifp->if_flags |= IFF_OACTIVE;
    955 			break;
    956 		}
    957 	}
    958 
    959 	if (sc->sc_txq.t_queued != old) {
    960 		/* packets have been queued, kick it off */
    961 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    962 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    963 
    964 #ifdef DWC_GMAC_DEBUG
    965 		dwc_dump_status(sc);
    966 #endif
    967 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    968 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    969 	}
    970 }
    971 
    972 static void
    973 dwc_gmac_stop(struct ifnet *ifp, int disable)
    974 {
    975 	struct dwc_gmac_softc *sc = ifp->if_softc;
    976 
    977 	mutex_enter(sc->sc_lock);
    978 	dwc_gmac_stop_locked(ifp, disable);
    979 	mutex_exit(sc->sc_lock);
    980 }
    981 
    982 static void
    983 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
    984 {
    985 	struct dwc_gmac_softc *sc = ifp->if_softc;
    986 
    987 	sc->sc_stopping = true;
    988 
    989 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    990 	    AWIN_GMAC_DMA_OPMODE,
    991 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    992 	        AWIN_GMAC_DMA_OPMODE)
    993 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    994 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    995 	    AWIN_GMAC_DMA_OPMODE,
    996 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    997 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    998 
    999 	mii_down(&sc->sc_mii);
   1000 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
   1001 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
   1002 
   1003 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1004 }
   1005 
   1006 /*
   1007  * Add m0 to the TX ring
   1008  */
   1009 static int
   1010 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
   1011 {
   1012 	struct dwc_gmac_dev_dmadesc *desc = NULL;
   1013 	struct dwc_gmac_tx_data *data = NULL;
   1014 	bus_dmamap_t map;
   1015 	int error, i, first;
   1016 
   1017 #ifdef DWC_GMAC_DEBUG
   1018 	aprint_normal_dev(sc->sc_dev,
   1019 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
   1020 #endif
   1021 
   1022 	first = sc->sc_txq.t_cur;
   1023 	map = sc->sc_txq.t_data[first].td_map;
   1024 
   1025 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
   1026 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1027 	if (error != 0) {
   1028 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
   1029 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
   1030 		return error;
   1031 	}
   1032 
   1033 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
   1034 		bus_dmamap_unload(sc->sc_dmat, map);
   1035 		return ENOBUFS;
   1036 	}
   1037 
   1038 	for (i = 0; i < map->dm_nsegs; i++) {
   1039 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
   1040 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
   1041 
   1042 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
   1043 
   1044 #ifdef DWC_GMAC_DEBUG
   1045 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
   1046 		    "len %lu\n", sc->sc_txq.t_cur,
   1047 		    (unsigned long)map->dm_segs[i].ds_addr,
   1048 		    (unsigned long)map->dm_segs[i].ds_len);
   1049 #endif
   1050 
   1051 		sc->sc_descm->tx_init_flags(desc);
   1052 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
   1053 
   1054 		if (i == 0)
   1055 			sc->sc_descm->tx_set_first_frag(desc);
   1056 
   1057 		/*
   1058 		 * Defer passing ownership of the first descriptor
   1059 		 * until we are done.
   1060 		 */
   1061 		if (i != 0)
   1062 			sc->sc_descm->tx_set_owned_by_dev(desc);
   1063 
   1064 		sc->sc_txq.t_queued++;
   1065 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
   1066 	}
   1067 
   1068 	sc->sc_descm->tx_set_last_frag(desc);
   1069 
   1070 	data->td_m = m0;
   1071 	data->td_active = map;
   1072 
   1073 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1074 	    BUS_DMASYNC_PREWRITE);
   1075 
   1076 	/* Pass first to device */
   1077 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
   1078 
   1079 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1080 	    BUS_DMASYNC_PREWRITE);
   1081 
   1082 	return 0;
   1083 }
   1084 
   1085 /*
   1086  * If the interface is up and running, only modify the receive
   1087  * filter when setting promiscuous or debug mode.  Otherwise fall
   1088  * through to ether_ioctl, which will reset the chip.
   1089  */
   1090 static int
   1091 dwc_gmac_ifflags_cb(struct ethercom *ec)
   1092 {
   1093 	struct ifnet *ifp = &ec->ec_if;
   1094 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1095 	int ret = 0;
   1096 
   1097 	mutex_enter(sc->sc_lock);
   1098 	int change = ifp->if_flags ^ sc->sc_if_flags;
   1099 	sc->sc_if_flags = ifp->if_flags;
   1100 
   1101 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   1102 		ret = ENETRESET;
   1103 		goto out;
   1104 	}
   1105 	if ((change & IFF_PROMISC) != 0) {
   1106 		dwc_gmac_setmulti(sc);
   1107 	}
   1108 out:
   1109 	mutex_exit(sc->sc_lock);
   1110 
   1111 	return ret;
   1112 }
   1113 
   1114 static int
   1115 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1116 {
   1117 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1118 	int error = 0;
   1119 
   1120 	int s = splnet();
   1121 	error = ether_ioctl(ifp, cmd, data);
   1122 
   1123 #ifdef DWCGMAC_MPSAFE
   1124 	splx(s);
   1125 #endif
   1126 
   1127 	if (error == ENETRESET) {
   1128 		error = 0;
   1129 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   1130 			;
   1131 		else if (ifp->if_flags & IFF_RUNNING) {
   1132 			/*
   1133 			 * Multicast list has changed; set the hardware filter
   1134 			 * accordingly.
   1135 			 */
   1136 			mutex_enter(sc->sc_lock);
   1137 			dwc_gmac_setmulti(sc);
   1138 			mutex_exit(sc->sc_lock);
   1139 		}
   1140 	}
   1141 
   1142 	/* Try to get things going again */
   1143 	if (ifp->if_flags & IFF_UP)
   1144 		dwc_gmac_start(ifp);
   1145 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1146 
   1147 #ifndef DWCGMAC_MPSAFE
   1148 	splx(s);
   1149 #endif
   1150 
   1151 	return error;
   1152 }
   1153 
   1154 static void
   1155 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1156 {
   1157 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1158 	struct dwc_gmac_tx_data *data;
   1159 	struct dwc_gmac_dev_dmadesc *desc;
   1160 	int i, nsegs;
   1161 
   1162 	mutex_enter(&sc->sc_txq.t_mtx);
   1163 
   1164 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1165 #ifdef DWC_GMAC_DEBUG
   1166 		aprint_normal_dev(sc->sc_dev,
   1167 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
   1168 		    i, sc->sc_txq.t_queued);
   1169 #endif
   1170 
   1171 		/*
   1172 		 * i+1 does not need to be a valid descriptor,
   1173 		 * this is just a special notion to just sync
   1174 		 * a single tx descriptor (i)
   1175 		 */
   1176 		dwc_gmac_txdesc_sync(sc, i, i+1,
   1177 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1178 
   1179 		desc = &sc->sc_txq.t_desc[i];
   1180 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
   1181 			break;
   1182 
   1183 		data = &sc->sc_txq.t_data[i];
   1184 		if (data->td_m == NULL)
   1185 			continue;
   1186 
   1187 		ifp->if_opackets++;
   1188 		nsegs = data->td_active->dm_nsegs;
   1189 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1190 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1191 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1192 
   1193 #ifdef DWC_GMAC_DEBUG
   1194 		aprint_normal_dev(sc->sc_dev,
   1195 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1196 		    "freeing mbuf %p\n", i, data->td_m);
   1197 #endif
   1198 
   1199 		m_freem(data->td_m);
   1200 		data->td_m = NULL;
   1201 
   1202 		sc->sc_txq.t_queued -= nsegs;
   1203 	}
   1204 
   1205 	sc->sc_txq.t_next = i;
   1206 
   1207 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1208 		ifp->if_flags &= ~IFF_OACTIVE;
   1209 	}
   1210 	mutex_exit(&sc->sc_txq.t_mtx);
   1211 }
   1212 
   1213 static void
   1214 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1215 {
   1216 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1217 	struct dwc_gmac_dev_dmadesc *desc;
   1218 	struct dwc_gmac_rx_data *data;
   1219 	bus_addr_t physaddr;
   1220 	struct mbuf *m, *mnew;
   1221 	int i, len, error;
   1222 
   1223 	mutex_enter(&sc->sc_rxq.r_mtx);
   1224 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1225 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1226 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1227 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1228 		desc = &sc->sc_rxq.r_desc[i];
   1229 		data = &sc->sc_rxq.r_data[i];
   1230 
   1231 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
   1232 			break;
   1233 
   1234 		if (sc->sc_descm->rx_has_error(desc)) {
   1235 #ifdef DWC_GMAC_DEBUG
   1236 			aprint_normal_dev(sc->sc_dev,
   1237 			    "RX error: descriptor status %08x, skipping\n",
   1238 			    le32toh(desc->ddesc_status0));
   1239 #endif
   1240 			ifp->if_ierrors++;
   1241 			goto skip;
   1242 		}
   1243 
   1244 		len = sc->sc_descm->rx_get_len(desc);
   1245 
   1246 #ifdef DWC_GMAC_DEBUG
   1247 		aprint_normal_dev(sc->sc_dev,
   1248 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1249 		    i, len);
   1250 #endif
   1251 
   1252 		/*
   1253 		 * Try to get a new mbuf before passing this one
   1254 		 * up, if that fails, drop the packet and reuse
   1255 		 * the existing one.
   1256 		 */
   1257 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1258 		if (mnew == NULL) {
   1259 			ifp->if_ierrors++;
   1260 			goto skip;
   1261 		}
   1262 		MCLGET(mnew, M_DONTWAIT);
   1263 		if ((mnew->m_flags & M_EXT) == 0) {
   1264 			m_freem(mnew);
   1265 			ifp->if_ierrors++;
   1266 			goto skip;
   1267 		}
   1268 
   1269 		/* unload old DMA map */
   1270 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1271 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1272 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1273 
   1274 		/* and reload with new mbuf */
   1275 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1276 		    mtod(mnew, void*), MCLBYTES, NULL,
   1277 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1278 		if (error != 0) {
   1279 			m_freem(mnew);
   1280 			/* try to reload old mbuf */
   1281 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1282 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1283 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1284 			if (error != 0) {
   1285 				panic("%s: could not load old rx mbuf",
   1286 				    device_xname(sc->sc_dev));
   1287 			}
   1288 			ifp->if_ierrors++;
   1289 			goto skip;
   1290 		}
   1291 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1292 
   1293 		/*
   1294 		 * New mbuf loaded, update RX ring and continue
   1295 		 */
   1296 		m = data->rd_m;
   1297 		data->rd_m = mnew;
   1298 		desc->ddesc_data = htole32(physaddr);
   1299 
   1300 		/* finalize mbuf */
   1301 		m->m_pkthdr.len = m->m_len = len;
   1302 		m_set_rcvif(m, ifp);
   1303 		m->m_flags |= M_HASFCS;
   1304 
   1305 		if_percpuq_enqueue(sc->sc_ipq, m);
   1306 
   1307 skip:
   1308 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1309 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1310 
   1311 		sc->sc_descm->rx_init_flags(desc);
   1312 		sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
   1313 		sc->sc_descm->rx_set_owned_by_dev(desc);
   1314 
   1315 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1316 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1317 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1318 	}
   1319 
   1320 	/* update RX pointer */
   1321 	sc->sc_rxq.r_cur = i;
   1322 
   1323 	mutex_exit(&sc->sc_rxq.r_mtx);
   1324 }
   1325 
   1326 /*
   1327  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
   1328  */
   1329 static uint32_t
   1330 bitrev32(uint32_t x)
   1331 {
   1332 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
   1333 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
   1334 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
   1335 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
   1336 
   1337 	return (x >> 16) | (x << 16);
   1338 }
   1339 
   1340 static void
   1341 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1342 {
   1343 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1344 	struct ether_multi *enm;
   1345 	struct ether_multistep step;
   1346 	uint32_t hashes[2] = { 0, 0 };
   1347 	uint32_t ffilt, h;
   1348 	int mcnt;
   1349 
   1350 	KASSERT(mutex_owned(sc->sc_lock));
   1351 
   1352 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1353 
   1354 	if (ifp->if_flags & IFF_PROMISC) {
   1355 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1356 		goto special_filter;
   1357 	}
   1358 
   1359 	ifp->if_flags &= ~IFF_ALLMULTI;
   1360 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
   1361 
   1362 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1363 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1364 
   1365 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
   1366 	mcnt = 0;
   1367 	while (enm != NULL) {
   1368 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1369 		    ETHER_ADDR_LEN) != 0) {
   1370 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1371 			ifp->if_flags |= IFF_ALLMULTI;
   1372 			goto special_filter;
   1373 		}
   1374 
   1375 		h = bitrev32(
   1376 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
   1377 		    ) >> 26;
   1378 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1379 
   1380 		mcnt++;
   1381 		ETHER_NEXT_MULTI(step, enm);
   1382 	}
   1383 
   1384 	if (mcnt)
   1385 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1386 	else
   1387 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1388 
   1389 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1390 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1391 	    hashes[0]);
   1392 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1393 	    hashes[1]);
   1394 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1395 
   1396 #ifdef DWC_GMAC_DEBUG
   1397 	dwc_gmac_dump_ffilt(sc, ffilt);
   1398 #endif
   1399 	return;
   1400 
   1401 special_filter:
   1402 #ifdef DWC_GMAC_DEBUG
   1403 	dwc_gmac_dump_ffilt(sc, ffilt);
   1404 #endif
   1405 	/* no MAC hashes, ALLMULTI or PROMISC */
   1406 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1407 	    ffilt);
   1408 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1409 	    0xffffffff);
   1410 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1411 	    0xffffffff);
   1412 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1413 }
   1414 
   1415 int
   1416 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1417 {
   1418 	uint32_t status, dma_status;
   1419 	int rv = 0;
   1420 
   1421 	if (sc->sc_stopping)
   1422 		return 0;
   1423 
   1424 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1425 	if (status & AWIN_GMAC_MII_IRQ) {
   1426 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1427 		    AWIN_GMAC_MII_STATUS);
   1428 		rv = 1;
   1429 		mii_pollstat(&sc->sc_mii);
   1430 	}
   1431 
   1432 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1433 	    AWIN_GMAC_DMA_STATUS);
   1434 
   1435 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1436 		rv = 1;
   1437 
   1438 	if (dma_status & GMAC_DMA_INT_TIE)
   1439 		dwc_gmac_tx_intr(sc);
   1440 
   1441 	if (dma_status & GMAC_DMA_INT_RIE)
   1442 		dwc_gmac_rx_intr(sc);
   1443 
   1444 	/*
   1445 	 * Check error conditions
   1446 	 */
   1447 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1448 		sc->sc_ec.ec_if.if_oerrors++;
   1449 #ifdef DWC_GMAC_DEBUG
   1450 		dwc_dump_and_abort(sc, "interrupt error condition");
   1451 #endif
   1452 	}
   1453 
   1454 	/* ack interrupt */
   1455 	if (dma_status)
   1456 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1457 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1458 
   1459 	/*
   1460 	 * Get more packets
   1461 	 */
   1462 	if (rv)
   1463 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1464 
   1465 	return rv;
   1466 }
   1467 
   1468 static void
   1469 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1470 {
   1471 
   1472 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
   1473 }
   1474 
   1475 static int
   1476 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1477 {
   1478 
   1479 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
   1480 }
   1481 
   1482 static void
   1483 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1484 {
   1485 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1486 
   1487 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
   1488 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
   1489 }
   1490 
   1491 static uint32_t
   1492 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1493 {
   1494 
   1495 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
   1496 }
   1497 
   1498 static void
   1499 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1500 {
   1501 
   1502 	desc->ddesc_status0 = 0;
   1503 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1504 }
   1505 
   1506 static void
   1507 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1508 {
   1509 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1510 
   1511 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
   1512 }
   1513 
   1514 static void
   1515 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1516 {
   1517 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1518 
   1519 	desc->ddesc_cntl1 = htole32(cntl |
   1520 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
   1521 }
   1522 
   1523 static void
   1524 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1525 {
   1526 
   1527 	desc->ddesc_status0 = 0;
   1528 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1529 }
   1530 
   1531 static int
   1532 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
   1533 	return !!(le32toh(desc->ddesc_status0) &
   1534 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
   1535 }
   1536 
   1537 static void
   1538 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1539 {
   1540 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
   1541 
   1542 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
   1543 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
   1544 }
   1545 
   1546 static uint32_t
   1547 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1548 {
   1549 
   1550 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
   1551 }
   1552 
   1553 static void
   1554 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1555 {
   1556 
   1557 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
   1558 	desc->ddesc_cntl1 = 0;
   1559 }
   1560 
   1561 static void
   1562 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1563 {
   1564 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1565 
   1566 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
   1567 }
   1568 
   1569 static void
   1570 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1571 {
   1572 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1573 
   1574 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
   1575 }
   1576 
   1577 static void
   1578 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1579 {
   1580 
   1581 	desc->ddesc_status0 = 0;
   1582 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
   1583 }
   1584 
   1585 static int
   1586 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
   1587 {
   1588 
   1589 	return !!(le32toh(desc->ddesc_status0) &
   1590 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
   1591 }
   1592 
   1593 #ifdef DWC_GMAC_DEBUG
   1594 static void
   1595 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1596 {
   1597 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1598 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1599 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1600 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1601 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1602 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1603 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1604 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1605 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1606 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1607 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1608 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1609 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1610 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1611 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1612 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1613 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1614 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1615 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1616 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1617 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1618 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1619 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1620 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1621 }
   1622 
   1623 static void
   1624 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1625 {
   1626 	int i;
   1627 
   1628 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1629 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1630 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1631 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1632 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1633 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1634 		    "data: %08x next: %08x\n",
   1635 		    i, sc->sc_txq.t_physaddr +
   1636 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1637 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1638 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1639 	}
   1640 }
   1641 
   1642 static void
   1643 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1644 {
   1645 	int i;
   1646 
   1647 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1648 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1649 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1650 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1651 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1652 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1653 		    "data: %08x next: %08x\n",
   1654 		    i, sc->sc_rxq.r_physaddr +
   1655 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1656 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1657 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1658 	}
   1659 }
   1660 
   1661 static void
   1662 dwc_dump_status(struct dwc_gmac_softc *sc)
   1663 {
   1664 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1665 	     AWIN_GMAC_MAC_INTR);
   1666 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1667 	     AWIN_GMAC_DMA_STATUS);
   1668 	char buf[200];
   1669 
   1670 	/* print interrupt state */
   1671 	snprintb(buf, sizeof(buf), "\177\20"
   1672 	    "b\x10""NI\0"
   1673 	    "b\x0f""AI\0"
   1674 	    "b\x0e""ER\0"
   1675 	    "b\x0d""FB\0"
   1676 	    "b\x0a""ET\0"
   1677 	    "b\x09""RW\0"
   1678 	    "b\x08""RS\0"
   1679 	    "b\x07""RU\0"
   1680 	    "b\x06""RI\0"
   1681 	    "b\x05""UN\0"
   1682 	    "b\x04""OV\0"
   1683 	    "b\x03""TJ\0"
   1684 	    "b\x02""TU\0"
   1685 	    "b\x01""TS\0"
   1686 	    "b\x00""TI\0"
   1687 	    "\0", dma_status);
   1688 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1689 	    status, buf);
   1690 }
   1691 
   1692 static void
   1693 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1694 {
   1695 	dwc_dump_status(sc);
   1696 	dwc_gmac_dump_ffilt(sc,
   1697 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1698 	dwc_gmac_dump_dma(sc);
   1699 	dwc_gmac_dump_tx_desc(sc);
   1700 	dwc_gmac_dump_rx_desc(sc);
   1701 
   1702 	panic("%s", msg);
   1703 }
   1704 
   1705 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1706 {
   1707 	char buf[200];
   1708 
   1709 	/* print filter setup */
   1710 	snprintb(buf, sizeof(buf), "\177\20"
   1711 	    "b\x1f""RA\0"
   1712 	    "b\x0a""HPF\0"
   1713 	    "b\x09""SAF\0"
   1714 	    "b\x08""SAIF\0"
   1715 	    "b\x05""DBF\0"
   1716 	    "b\x04""PM\0"
   1717 	    "b\x03""DAIF\0"
   1718 	    "b\x02""HMC\0"
   1719 	    "b\x01""HUC\0"
   1720 	    "b\x00""PR\0"
   1721 	    "\0", ffilt);
   1722 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1723 }
   1724 #endif
   1725