Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.55
      1 /* $NetBSD: dwc_gmac.c,v 1.55 2018/10/08 17:09:31 martin Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.55 2018/10/08 17:09:31 martin Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #ifdef _KERNEL_OPT
     49 #include "opt_inet.h"
     50 #include "opt_net_mpsafe.h"
     51 #endif
     52 
     53 #include <sys/param.h>
     54 #include <sys/bus.h>
     55 #include <sys/device.h>
     56 #include <sys/intr.h>
     57 #include <sys/systm.h>
     58 #include <sys/sockio.h>
     59 #include <sys/cprng.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_media.h>
     64 #include <net/bpf.h>
     65 #ifdef INET
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #include <dev/mii/miivar.h>
     70 
     71 #include <dev/ic/dwc_gmac_reg.h>
     72 #include <dev/ic/dwc_gmac_var.h>
     73 
     74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     76 static void dwc_gmac_miibus_statchg(struct ifnet *);
     77 
     78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     80 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     90 static int dwc_gmac_init(struct ifnet *ifp);
     91 static int dwc_gmac_init_locked(struct ifnet *ifp);
     92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
     94 static void dwc_gmac_start(struct ifnet *ifp);
     95 static void dwc_gmac_start_locked(struct ifnet *ifp);
     96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
    100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
    101 static int dwc_gmac_ifflags_cb(struct ethercom *);
    102 static uint32_t	bitrev32(uint32_t x);
    103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    104 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
    105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
    106 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
    107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    111 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
    113 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
    114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
    115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
    116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
    117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
    118 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
    119 
    120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
    121 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
    122 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    123 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    124 	.tx_set_len = dwc_gmac_desc_std_set_len,
    125 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
    126 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
    127 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
    128 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    129 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    130 	.rx_set_len = dwc_gmac_desc_std_set_len,
    131 	.rx_get_len = dwc_gmac_desc_std_get_len,
    132 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
    133 };
    134 
    135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
    136 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
    137 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    138 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    139 	.tx_set_len = dwc_gmac_desc_enh_set_len,
    140 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
    141 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
    142 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
    143 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
    144 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
    145 	.rx_set_len = dwc_gmac_desc_enh_set_len,
    146 	.rx_get_len = dwc_gmac_desc_enh_get_len,
    147 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
    148 };
    149 
    150 
    151 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
    152 				    *sizeof(struct dwc_gmac_dev_dmadesc))
    153 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
    154 
    155 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
    156 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
    157 
    158 
    159 
    160 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    161 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    162 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    163 
    164 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    165 				GMAC_DMA_INT_FBE|	\
    166 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    167 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    168 				GMAC_DMA_INT_TJE)
    169 
    170 #define	AWIN_DEF_MAC_INTRMASK	\
    171 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    172 	AWIN_GMAC_MAC_INT_LINKCHG)
    173 
    174 #ifdef DWC_GMAC_DEBUG
    175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    178 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    179 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
    181 #endif
    182 
    183 #ifdef NET_MPSAFE
    184 #define DWCGMAC_MPSAFE	1
    185 #endif
    186 
    187 int
    188 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    189 {
    190 	uint8_t enaddr[ETHER_ADDR_LEN];
    191 	uint32_t maclo, machi, ver, hwft;
    192 	struct mii_data * const mii = &sc->sc_mii;
    193 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    194 	prop_dictionary_t dict;
    195 	int rv;
    196 
    197 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    198 	sc->sc_mii_clk = mii_clk & 7;
    199 
    200 	dict = device_properties(sc->sc_dev);
    201 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    202 	if (ea != NULL) {
    203 		/*
    204 		 * If the MAC address is overriden by a device property,
    205 		 * use that.
    206 		 */
    207 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    208 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    209 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    210 	} else {
    211 		/*
    212 		 * If we did not get an externaly configure address,
    213 		 * try to read one from the current filter setup,
    214 		 * before resetting the chip.
    215 		 */
    216 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    217 		    AWIN_GMAC_MAC_ADDR0LO);
    218 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    219 		    AWIN_GMAC_MAC_ADDR0HI);
    220 
    221 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    222 			/* fake MAC address */
    223 			maclo = 0x00f2 | (cprng_strong32() << 16);
    224 			machi = cprng_strong32();
    225 		}
    226 
    227 		enaddr[0] = maclo & 0x0ff;
    228 		enaddr[1] = (maclo >> 8) & 0x0ff;
    229 		enaddr[2] = (maclo >> 16) & 0x0ff;
    230 		enaddr[3] = (maclo >> 24) & 0x0ff;
    231 		enaddr[4] = machi & 0x0ff;
    232 		enaddr[5] = (machi >> 8) & 0x0ff;
    233 	}
    234 
    235 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
    236 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
    237 
    238 	/*
    239 	 * Init chip and do initial setup
    240 	 */
    241 	if (dwc_gmac_reset(sc) != 0)
    242 		return ENXIO;	/* not much to cleanup, haven't attached yet */
    243 	dwc_gmac_write_hwaddr(sc, enaddr);
    244 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    245 	    ether_sprintf(enaddr));
    246 
    247 	hwft = 0;
    248 	if (ver >= 0x35) {
    249 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    250 		    AWIN_GMAC_DMA_HWFEATURES);
    251 		aprint_normal_dev(sc->sc_dev,
    252 		    "HW feature mask: %x\n", hwft);
    253 	}
    254 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
    255 		aprint_normal_dev(sc->sc_dev,
    256 		    "Using enhanced descriptor format\n");
    257 		sc->sc_descm = &desc_methods_enhanced;
    258 	} else {
    259 		sc->sc_descm = &desc_methods_standard;
    260 	}
    261 
    262 	/*
    263 	 * Allocate Tx and Rx rings
    264 	 */
    265 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    266 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    267 		goto fail;
    268 	}
    269 
    270 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    271 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    272 		goto fail;
    273 	}
    274 
    275 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    276 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    277 		goto fail;
    278 	}
    279 
    280 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    281 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    282 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    283 
    284 	/*
    285 	 * Prepare interface data
    286 	 */
    287 	ifp->if_softc = sc;
    288 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    289 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    290 #ifdef DWCGMAC_MPSAFE
    291 	ifp->if_extflags = IFEF_MPSAFE;
    292 #endif
    293 	ifp->if_ioctl = dwc_gmac_ioctl;
    294 	ifp->if_start = dwc_gmac_start;
    295 	ifp->if_init = dwc_gmac_init;
    296 	ifp->if_stop = dwc_gmac_stop;
    297 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    298 	IFQ_SET_READY(&ifp->if_snd);
    299 
    300 	/*
    301 	 * Attach MII subdevices
    302 	 */
    303 	sc->sc_ec.ec_mii = &sc->sc_mii;
    304 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    305         mii->mii_ifp = ifp;
    306         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    307         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    308         mii->mii_statchg = dwc_gmac_miibus_statchg;
    309         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
    310 	    MIIF_DOPAUSE);
    311 
    312         if (LIST_EMPTY(&mii->mii_phys)) {
    313                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    314                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    315                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    316         } else {
    317                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    318         }
    319 
    320 	/*
    321 	 * We can support 802.1Q VLAN-sized frames.
    322 	 */
    323 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    324 
    325 	/*
    326 	 * Ready, attach interface
    327 	 */
    328 	/* Attach the interface. */
    329 	rv = if_initialize(ifp);
    330 	if (rv != 0)
    331 		goto fail_2;
    332 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    333 	if_deferred_start_init(ifp, NULL);
    334 	ether_ifattach(ifp, enaddr);
    335 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    336 	if_register(ifp);
    337 
    338 	/*
    339 	 * Enable interrupts
    340 	 */
    341 	mutex_enter(sc->sc_lock);
    342 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    343 	    AWIN_DEF_MAC_INTRMASK);
    344 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    345 	    GMAC_DEF_DMA_INT_MASK);
    346 	mutex_exit(sc->sc_lock);
    347 
    348 	return 0;
    349 
    350 fail_2:
    351 	ifmedia_removeall(&mii->mii_media);
    352 	mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
    353 	mutex_destroy(&sc->sc_txq.t_mtx);
    354 	mutex_destroy(&sc->sc_rxq.r_mtx);
    355 	mutex_obj_free(sc->sc_lock);
    356 fail:
    357 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    358 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    359 	dwc_gmac_free_dma_rings(sc);
    360 	mutex_destroy(&sc->sc_mdio_lock);
    361 
    362 	return ENXIO;
    363 }
    364 
    365 
    366 
    367 static int
    368 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    369 {
    370 	size_t cnt;
    371 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    372 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    373 	for (cnt = 0; cnt < 3000; cnt++) {
    374 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    375 		    & GMAC_BUSMODE_RESET) == 0)
    376 			return 0;
    377 		delay(10);
    378 	}
    379 
    380 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    381 	return EIO;
    382 }
    383 
    384 static void
    385 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    386     uint8_t enaddr[ETHER_ADDR_LEN])
    387 {
    388 	uint32_t hi, lo;
    389 
    390 	hi = enaddr[4] | (enaddr[5] << 8);
    391 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    392 	    | (enaddr[3] << 24);
    393 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    394 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    395 }
    396 
    397 static int
    398 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    399 {
    400 	struct dwc_gmac_softc * const sc = device_private(self);
    401 	uint16_t mii;
    402 	size_t cnt;
    403 	int rv = 0;
    404 
    405 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    406 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    407 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    408 	    | GMAC_MII_BUSY;
    409 
    410 	mutex_enter(&sc->sc_mdio_lock);
    411 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    412 
    413 	for (cnt = 0; cnt < 1000; cnt++) {
    414 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    415 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    416 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    417 			    AWIN_GMAC_MAC_MIIDATA);
    418 			break;
    419 		}
    420 		delay(10);
    421 	}
    422 
    423 	mutex_exit(&sc->sc_mdio_lock);
    424 
    425 	return rv;
    426 }
    427 
    428 static void
    429 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    430 {
    431 	struct dwc_gmac_softc * const sc = device_private(self);
    432 	uint16_t mii;
    433 	size_t cnt;
    434 
    435 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    436 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    437 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    438 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    439 
    440 	mutex_enter(&sc->sc_mdio_lock);
    441 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    442 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    443 
    444 	for (cnt = 0; cnt < 1000; cnt++) {
    445 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    446 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    447 			break;
    448 		delay(10);
    449 	}
    450 
    451 	mutex_exit(&sc->sc_mdio_lock);
    452 }
    453 
    454 static int
    455 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    456 	struct dwc_gmac_rx_ring *ring)
    457 {
    458 	struct dwc_gmac_rx_data *data;
    459 	bus_addr_t physaddr;
    460 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    461 	int error, i, next;
    462 
    463 	ring->r_cur = ring->r_next = 0;
    464 	memset(ring->r_desc, 0, descsize);
    465 
    466 	/*
    467 	 * Pre-allocate Rx buffers and populate Rx ring.
    468 	 */
    469 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    470 		struct dwc_gmac_dev_dmadesc *desc;
    471 
    472 		data = &sc->sc_rxq.r_data[i];
    473 
    474 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    475 		if (data->rd_m == NULL) {
    476 			aprint_error_dev(sc->sc_dev,
    477 			    "could not allocate rx mbuf #%d\n", i);
    478 			error = ENOMEM;
    479 			goto fail;
    480 		}
    481 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    482 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    483 		if (error != 0) {
    484 			aprint_error_dev(sc->sc_dev,
    485 			    "could not create DMA map\n");
    486 			data->rd_map = NULL;
    487 			goto fail;
    488 		}
    489 		MCLGET(data->rd_m, M_DONTWAIT);
    490 		if (!(data->rd_m->m_flags & M_EXT)) {
    491 			aprint_error_dev(sc->sc_dev,
    492 			    "could not allocate mbuf cluster #%d\n", i);
    493 			error = ENOMEM;
    494 			goto fail;
    495 		}
    496 
    497 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    498 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    499 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    500 		if (error != 0) {
    501 			aprint_error_dev(sc->sc_dev,
    502 			    "could not load rx buf DMA map #%d", i);
    503 			goto fail;
    504 		}
    505 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    506 
    507 		desc = &sc->sc_rxq.r_desc[i];
    508 		desc->ddesc_data = htole32(physaddr);
    509 		next = RX_NEXT(i);
    510 		desc->ddesc_next = htole32(ring->r_physaddr
    511 		    + next * sizeof(*desc));
    512 		sc->sc_descm->rx_init_flags(desc);
    513 		sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
    514 		sc->sc_descm->rx_set_owned_by_dev(desc);
    515 	}
    516 
    517 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    518 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    519 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    520 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    521 	    ring->r_physaddr);
    522 
    523 	return 0;
    524 
    525 fail:
    526 	dwc_gmac_free_rx_ring(sc, ring);
    527 	return error;
    528 }
    529 
    530 static void
    531 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    532 	struct dwc_gmac_rx_ring *ring)
    533 {
    534 	struct dwc_gmac_dev_dmadesc *desc;
    535 	int i;
    536 
    537 	mutex_enter(&ring->r_mtx);
    538 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    539 		desc = &sc->sc_rxq.r_desc[i];
    540 		sc->sc_descm->rx_init_flags(desc);
    541 		sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
    542 		sc->sc_descm->rx_set_owned_by_dev(desc);
    543 	}
    544 
    545 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    546 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    547 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    548 
    549 	ring->r_cur = ring->r_next = 0;
    550 	/* reset DMA address to start of ring */
    551 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    552 	    sc->sc_rxq.r_physaddr);
    553 	mutex_exit(&ring->r_mtx);
    554 }
    555 
    556 static int
    557 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    558 {
    559 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    560 		sizeof(struct dwc_gmac_dev_dmadesc);
    561 	int error, nsegs;
    562 	void *rings;
    563 
    564 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    565 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    566 	if (error != 0) {
    567 		aprint_error_dev(sc->sc_dev,
    568 		    "could not create desc DMA map\n");
    569 		sc->sc_dma_ring_map = NULL;
    570 		goto fail;
    571 	}
    572 
    573 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    574 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    575 	if (error != 0) {
    576 		aprint_error_dev(sc->sc_dev,
    577 		    "could not map DMA memory\n");
    578 		goto fail;
    579 	}
    580 
    581 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    582 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    583 	if (error != 0) {
    584 		aprint_error_dev(sc->sc_dev,
    585 		    "could not allocate DMA memory\n");
    586 		goto fail;
    587 	}
    588 
    589 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    590 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    591 	if (error != 0) {
    592 		aprint_error_dev(sc->sc_dev,
    593 		    "could not load desc DMA map\n");
    594 		goto fail;
    595 	}
    596 
    597 	/* give first AWGE_RX_RING_COUNT to the RX side */
    598 	sc->sc_rxq.r_desc = rings;
    599 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    600 
    601 	/* and next rings to the TX side */
    602 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    603 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    604 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    605 
    606 	return 0;
    607 
    608 fail:
    609 	dwc_gmac_free_dma_rings(sc);
    610 	return error;
    611 }
    612 
    613 static void
    614 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    615 {
    616 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    617 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    618 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    619 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    620 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    621 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    622 }
    623 
    624 static void
    625 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    626 {
    627 	struct dwc_gmac_rx_data *data;
    628 	int i;
    629 
    630 	if (ring->r_desc == NULL)
    631 		return;
    632 
    633 
    634 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    635 		data = &ring->r_data[i];
    636 
    637 		if (data->rd_map != NULL) {
    638 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    639 			    AWGE_RX_RING_COUNT
    640 				*sizeof(struct dwc_gmac_dev_dmadesc),
    641 			    BUS_DMASYNC_POSTREAD);
    642 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    643 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    644 		}
    645 		if (data->rd_m != NULL)
    646 			m_freem(data->rd_m);
    647 	}
    648 }
    649 
    650 static int
    651 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    652 	struct dwc_gmac_tx_ring *ring)
    653 {
    654 	int i, error = 0;
    655 
    656 	ring->t_queued = 0;
    657 	ring->t_cur = ring->t_next = 0;
    658 
    659 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    660 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    661 	    TX_DESC_OFFSET(0),
    662 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    663 	    BUS_DMASYNC_POSTWRITE);
    664 
    665 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    666 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    667 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    668 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    669 		    &ring->t_data[i].td_map);
    670 		if (error != 0) {
    671 			aprint_error_dev(sc->sc_dev,
    672 			    "could not create TX DMA map #%d\n", i);
    673 			ring->t_data[i].td_map = NULL;
    674 			goto fail;
    675 		}
    676 		ring->t_desc[i].ddesc_next = htole32(
    677 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    678 		    *TX_NEXT(i));
    679 	}
    680 
    681 	return 0;
    682 
    683 fail:
    684 	dwc_gmac_free_tx_ring(sc, ring);
    685 	return error;
    686 }
    687 
    688 static void
    689 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    690 {
    691 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    692 	if (end > start) {
    693 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    694 		    TX_DESC_OFFSET(start),
    695 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    696 		    ops);
    697 		return;
    698 	}
    699 	/* sync from 'start' to end of ring */
    700 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    701 	    TX_DESC_OFFSET(start),
    702 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
    703 	    ops);
    704 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
    705 		/* sync from start of ring to 'end' */
    706 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    707 		    TX_DESC_OFFSET(0),
    708 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    709 		    ops);
    710 	}
    711 }
    712 
    713 static void
    714 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    715 	struct dwc_gmac_tx_ring *ring)
    716 {
    717 	int i;
    718 
    719 	mutex_enter(&ring->t_mtx);
    720 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    721 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    722 
    723 		if (data->td_m != NULL) {
    724 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    725 			    0, data->td_active->dm_mapsize,
    726 			    BUS_DMASYNC_POSTWRITE);
    727 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    728 			m_freem(data->td_m);
    729 			data->td_m = NULL;
    730 		}
    731 	}
    732 
    733 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    734 	    TX_DESC_OFFSET(0),
    735 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    736 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    737 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    738 	    sc->sc_txq.t_physaddr);
    739 
    740 	ring->t_queued = 0;
    741 	ring->t_cur = ring->t_next = 0;
    742 	mutex_exit(&ring->t_mtx);
    743 }
    744 
    745 static void
    746 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    747 	struct dwc_gmac_tx_ring *ring)
    748 {
    749 	int i;
    750 
    751 	/* unload the maps */
    752 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    753 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    754 
    755 		if (data->td_m != NULL) {
    756 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    757 			    0, data->td_map->dm_mapsize,
    758 			    BUS_DMASYNC_POSTWRITE);
    759 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    760 			m_freem(data->td_m);
    761 			data->td_m = NULL;
    762 		}
    763 	}
    764 
    765 	/* and actually free them */
    766 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    767 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    768 
    769 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    770 	}
    771 }
    772 
    773 static void
    774 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    775 {
    776 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    777 	struct mii_data * const mii = &sc->sc_mii;
    778 	uint32_t conf, flow;
    779 
    780 	/*
    781 	 * Set MII or GMII interface based on the speed
    782 	 * negotiated by the PHY.
    783 	 */
    784 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    785 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    786 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    787 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    788 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    789 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    790 	    | AWIN_GMAC_MAC_CONF_ACS
    791 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    792 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    793 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    794 	case IFM_10_T:
    795 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    796 		break;
    797 	case IFM_100_TX:
    798 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    799 			AWIN_GMAC_MAC_CONF_MIISEL;
    800 		break;
    801 	case IFM_1000_T:
    802 		break;
    803 	}
    804 	if (sc->sc_set_speed)
    805 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
    806 
    807 	flow = 0;
    808 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    809 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    810 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    811 	}
    812 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    813 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    814 	}
    815 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    816 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    817 	}
    818 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    819 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    820 
    821 #ifdef DWC_GMAC_DEBUG
    822 	aprint_normal_dev(sc->sc_dev,
    823 	    "setting MAC conf register: %08x\n", conf);
    824 #endif
    825 
    826 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    827 	    AWIN_GMAC_MAC_CONF, conf);
    828 }
    829 
    830 static int
    831 dwc_gmac_init(struct ifnet *ifp)
    832 {
    833 	struct dwc_gmac_softc *sc = ifp->if_softc;
    834 
    835 	mutex_enter(sc->sc_lock);
    836 	int ret = dwc_gmac_init_locked(ifp);
    837 	mutex_exit(sc->sc_lock);
    838 
    839 	return ret;
    840 }
    841 
    842 static int
    843 dwc_gmac_init_locked(struct ifnet *ifp)
    844 {
    845 	struct dwc_gmac_softc *sc = ifp->if_softc;
    846 	uint32_t ffilt;
    847 
    848 	if (ifp->if_flags & IFF_RUNNING)
    849 		return 0;
    850 
    851 	dwc_gmac_stop_locked(ifp, 0);
    852 
    853 	/*
    854 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    855 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    856 	 */
    857 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    858 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    859 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    860 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    861 
    862 	/*
    863 	 * Set up address filter
    864 	 */
    865 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    866 	if (ifp->if_flags & IFF_PROMISC) {
    867 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    868 	} else {
    869 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    870 	}
    871 	if (ifp->if_flags & IFF_BROADCAST) {
    872 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    873 	} else {
    874 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    875 	}
    876 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    877 
    878 	/*
    879 	 * Set up multicast filter
    880 	 */
    881 	dwc_gmac_setmulti(sc);
    882 
    883 	/*
    884 	 * Set up dma pointer for RX and TX ring
    885 	 */
    886 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    887 	    sc->sc_rxq.r_physaddr);
    888 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    889 	    sc->sc_txq.t_physaddr);
    890 
    891 	/*
    892 	 * Start RX/TX part
    893 	 */
    894 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
    895 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
    896 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
    897 	}
    898 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
    899 
    900 	sc->sc_stopping = false;
    901 
    902 	ifp->if_flags |= IFF_RUNNING;
    903 	ifp->if_flags &= ~IFF_OACTIVE;
    904 
    905 	return 0;
    906 }
    907 
    908 static void
    909 dwc_gmac_start(struct ifnet *ifp)
    910 {
    911 	struct dwc_gmac_softc *sc = ifp->if_softc;
    912 #ifdef DWCGMAC_MPSAFE
    913 	KASSERT(if_is_mpsafe(ifp));
    914 #endif
    915 
    916 	mutex_enter(sc->sc_lock);
    917 	if (!sc->sc_stopping) {
    918 		mutex_enter(&sc->sc_txq.t_mtx);
    919 		dwc_gmac_start_locked(ifp);
    920 		mutex_exit(&sc->sc_txq.t_mtx);
    921 	}
    922 	mutex_exit(sc->sc_lock);
    923 }
    924 
    925 static void
    926 dwc_gmac_start_locked(struct ifnet *ifp)
    927 {
    928 	struct dwc_gmac_softc *sc = ifp->if_softc;
    929 	int old = sc->sc_txq.t_queued;
    930 	int start = sc->sc_txq.t_cur;
    931 	struct mbuf *m0;
    932 
    933 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    934 		return;
    935 
    936 	for (;;) {
    937 		IFQ_POLL(&ifp->if_snd, m0);
    938 		if (m0 == NULL)
    939 			break;
    940 		if (dwc_gmac_queue(sc, m0) != 0) {
    941 			ifp->if_flags |= IFF_OACTIVE;
    942 			break;
    943 		}
    944 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    945 		bpf_mtap(ifp, m0, BPF_D_OUT);
    946 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    947 			ifp->if_flags |= IFF_OACTIVE;
    948 			break;
    949 		}
    950 	}
    951 
    952 	if (sc->sc_txq.t_queued != old) {
    953 		/* packets have been queued, kick it off */
    954 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    955 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    956 
    957 #ifdef DWC_GMAC_DEBUG
    958 		dwc_dump_status(sc);
    959 #endif
    960 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    961 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    962 	}
    963 }
    964 
    965 static void
    966 dwc_gmac_stop(struct ifnet *ifp, int disable)
    967 {
    968 	struct dwc_gmac_softc *sc = ifp->if_softc;
    969 
    970 	mutex_enter(sc->sc_lock);
    971 	dwc_gmac_stop_locked(ifp, disable);
    972 	mutex_exit(sc->sc_lock);
    973 }
    974 
    975 static void
    976 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
    977 {
    978 	struct dwc_gmac_softc *sc = ifp->if_softc;
    979 
    980 	sc->sc_stopping = true;
    981 
    982 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    983 	    AWIN_GMAC_DMA_OPMODE,
    984 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    985 	        AWIN_GMAC_DMA_OPMODE)
    986 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    987 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    988 	    AWIN_GMAC_DMA_OPMODE,
    989 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    990 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    991 
    992 	mii_down(&sc->sc_mii);
    993 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    994 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    995 
    996 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    997 }
    998 
    999 /*
   1000  * Add m0 to the TX ring
   1001  */
   1002 static int
   1003 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
   1004 {
   1005 	struct dwc_gmac_dev_dmadesc *desc = NULL;
   1006 	struct dwc_gmac_tx_data *data = NULL;
   1007 	bus_dmamap_t map;
   1008 	int error, i, first;
   1009 
   1010 #ifdef DWC_GMAC_DEBUG
   1011 	aprint_normal_dev(sc->sc_dev,
   1012 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
   1013 #endif
   1014 
   1015 	first = sc->sc_txq.t_cur;
   1016 	map = sc->sc_txq.t_data[first].td_map;
   1017 
   1018 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
   1019 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1020 	if (error != 0) {
   1021 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
   1022 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
   1023 		return error;
   1024 	}
   1025 
   1026 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
   1027 		bus_dmamap_unload(sc->sc_dmat, map);
   1028 		return ENOBUFS;
   1029 	}
   1030 
   1031 	for (i = 0; i < map->dm_nsegs; i++) {
   1032 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
   1033 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
   1034 
   1035 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
   1036 
   1037 #ifdef DWC_GMAC_DEBUG
   1038 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
   1039 		    "len %lu\n", sc->sc_txq.t_cur,
   1040 		    (unsigned long)map->dm_segs[i].ds_addr,
   1041 		    (unsigned long)map->dm_segs[i].ds_len);
   1042 #endif
   1043 
   1044 		sc->sc_descm->tx_init_flags(desc);
   1045 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
   1046 
   1047 		if (i == 0)
   1048 			sc->sc_descm->tx_set_first_frag(desc);
   1049 
   1050 		/*
   1051 		 * Defer passing ownership of the first descriptor
   1052 		 * until we are done.
   1053 		 */
   1054 		if (i != 0)
   1055 			sc->sc_descm->tx_set_owned_by_dev(desc);
   1056 
   1057 		sc->sc_txq.t_queued++;
   1058 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
   1059 	}
   1060 
   1061 	sc->sc_descm->tx_set_last_frag(desc);
   1062 
   1063 	data->td_m = m0;
   1064 	data->td_active = map;
   1065 
   1066 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1067 	    BUS_DMASYNC_PREWRITE);
   1068 
   1069 	/* Pass first to device */
   1070 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
   1071 
   1072 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1073 	    BUS_DMASYNC_PREWRITE);
   1074 
   1075 	return 0;
   1076 }
   1077 
   1078 /*
   1079  * If the interface is up and running, only modify the receive
   1080  * filter when setting promiscuous or debug mode.  Otherwise fall
   1081  * through to ether_ioctl, which will reset the chip.
   1082  */
   1083 static int
   1084 dwc_gmac_ifflags_cb(struct ethercom *ec)
   1085 {
   1086 	struct ifnet *ifp = &ec->ec_if;
   1087 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1088 	int ret = 0;
   1089 
   1090 	mutex_enter(sc->sc_lock);
   1091 	int change = ifp->if_flags ^ sc->sc_if_flags;
   1092 	sc->sc_if_flags = ifp->if_flags;
   1093 
   1094 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   1095 		ret = ENETRESET;
   1096 		goto out;
   1097 	}
   1098 	if ((change & IFF_PROMISC) != 0) {
   1099 		dwc_gmac_setmulti(sc);
   1100 	}
   1101 out:
   1102 	mutex_exit(sc->sc_lock);
   1103 
   1104 	return ret;
   1105 }
   1106 
   1107 static int
   1108 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1109 {
   1110 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1111 	int error = 0;
   1112 
   1113 	int s = splnet();
   1114 	error = ether_ioctl(ifp, cmd, data);
   1115 
   1116 #ifdef DWCGMAC_MPSAFE
   1117 	splx(s);
   1118 #endif
   1119 
   1120 	if (error == ENETRESET) {
   1121 		error = 0;
   1122 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   1123 			;
   1124 		else if (ifp->if_flags & IFF_RUNNING) {
   1125 			/*
   1126 			 * Multicast list has changed; set the hardware filter
   1127 			 * accordingly.
   1128 			 */
   1129 			mutex_enter(sc->sc_lock);
   1130 			dwc_gmac_setmulti(sc);
   1131 			mutex_exit(sc->sc_lock);
   1132 		}
   1133 	}
   1134 
   1135 	/* Try to get things going again */
   1136 	if (ifp->if_flags & IFF_UP)
   1137 		dwc_gmac_start(ifp);
   1138 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1139 
   1140 #ifndef DWCGMAC_MPSAFE
   1141 	splx(s);
   1142 #endif
   1143 
   1144 	return error;
   1145 }
   1146 
   1147 static void
   1148 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1149 {
   1150 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1151 	struct dwc_gmac_tx_data *data;
   1152 	struct dwc_gmac_dev_dmadesc *desc;
   1153 	int i, nsegs;
   1154 
   1155 	mutex_enter(&sc->sc_txq.t_mtx);
   1156 
   1157 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1158 #ifdef DWC_GMAC_DEBUG
   1159 		aprint_normal_dev(sc->sc_dev,
   1160 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
   1161 		    i, sc->sc_txq.t_queued);
   1162 #endif
   1163 
   1164 		/*
   1165 		 * i+1 does not need to be a valid descriptor,
   1166 		 * this is just a special notion to just sync
   1167 		 * a single tx descriptor (i)
   1168 		 */
   1169 		dwc_gmac_txdesc_sync(sc, i, i+1,
   1170 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1171 
   1172 		desc = &sc->sc_txq.t_desc[i];
   1173 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
   1174 			break;
   1175 
   1176 		data = &sc->sc_txq.t_data[i];
   1177 		if (data->td_m == NULL)
   1178 			continue;
   1179 
   1180 		ifp->if_opackets++;
   1181 		nsegs = data->td_active->dm_nsegs;
   1182 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1183 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1184 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1185 
   1186 #ifdef DWC_GMAC_DEBUG
   1187 		aprint_normal_dev(sc->sc_dev,
   1188 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1189 		    "freeing mbuf %p\n", i, data->td_m);
   1190 #endif
   1191 
   1192 		m_freem(data->td_m);
   1193 		data->td_m = NULL;
   1194 
   1195 		sc->sc_txq.t_queued -= nsegs;
   1196 	}
   1197 
   1198 	sc->sc_txq.t_next = i;
   1199 
   1200 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1201 		ifp->if_flags &= ~IFF_OACTIVE;
   1202 	}
   1203 	mutex_exit(&sc->sc_txq.t_mtx);
   1204 }
   1205 
   1206 static void
   1207 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1208 {
   1209 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1210 	struct dwc_gmac_dev_dmadesc *desc;
   1211 	struct dwc_gmac_rx_data *data;
   1212 	bus_addr_t physaddr;
   1213 	struct mbuf *m, *mnew;
   1214 	int i, len, error;
   1215 
   1216 	mutex_enter(&sc->sc_rxq.r_mtx);
   1217 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1218 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1219 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1220 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1221 		desc = &sc->sc_rxq.r_desc[i];
   1222 		data = &sc->sc_rxq.r_data[i];
   1223 
   1224 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
   1225 			break;
   1226 
   1227 		if (sc->sc_descm->rx_has_error(desc)) {
   1228 #ifdef DWC_GMAC_DEBUG
   1229 			aprint_normal_dev(sc->sc_dev,
   1230 			    "RX error: descriptor status %08x, skipping\n",
   1231 			    le32toh(desc->ddesc_status0));
   1232 #endif
   1233 			ifp->if_ierrors++;
   1234 			goto skip;
   1235 		}
   1236 
   1237 		len = sc->sc_descm->rx_get_len(desc);
   1238 
   1239 #ifdef DWC_GMAC_DEBUG
   1240 		aprint_normal_dev(sc->sc_dev,
   1241 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1242 		    i, len);
   1243 #endif
   1244 
   1245 		/*
   1246 		 * Try to get a new mbuf before passing this one
   1247 		 * up, if that fails, drop the packet and reuse
   1248 		 * the existing one.
   1249 		 */
   1250 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1251 		if (mnew == NULL) {
   1252 			ifp->if_ierrors++;
   1253 			goto skip;
   1254 		}
   1255 		MCLGET(mnew, M_DONTWAIT);
   1256 		if ((mnew->m_flags & M_EXT) == 0) {
   1257 			m_freem(mnew);
   1258 			ifp->if_ierrors++;
   1259 			goto skip;
   1260 		}
   1261 
   1262 		/* unload old DMA map */
   1263 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1264 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1265 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1266 
   1267 		/* and reload with new mbuf */
   1268 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1269 		    mtod(mnew, void*), MCLBYTES, NULL,
   1270 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1271 		if (error != 0) {
   1272 			m_freem(mnew);
   1273 			/* try to reload old mbuf */
   1274 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1275 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1276 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1277 			if (error != 0) {
   1278 				panic("%s: could not load old rx mbuf",
   1279 				    device_xname(sc->sc_dev));
   1280 			}
   1281 			ifp->if_ierrors++;
   1282 			goto skip;
   1283 		}
   1284 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1285 
   1286 		/*
   1287 		 * New mbuf loaded, update RX ring and continue
   1288 		 */
   1289 		m = data->rd_m;
   1290 		data->rd_m = mnew;
   1291 		desc->ddesc_data = htole32(physaddr);
   1292 
   1293 		/* finalize mbuf */
   1294 		m->m_pkthdr.len = m->m_len = len;
   1295 		m_set_rcvif(m, ifp);
   1296 		m->m_flags |= M_HASFCS;
   1297 
   1298 		if_percpuq_enqueue(sc->sc_ipq, m);
   1299 
   1300 skip:
   1301 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1302 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1303 
   1304 		sc->sc_descm->rx_init_flags(desc);
   1305 		sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
   1306 		sc->sc_descm->rx_set_owned_by_dev(desc);
   1307 
   1308 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1309 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1310 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1311 	}
   1312 
   1313 	/* update RX pointer */
   1314 	sc->sc_rxq.r_cur = i;
   1315 
   1316 	mutex_exit(&sc->sc_rxq.r_mtx);
   1317 }
   1318 
   1319 /*
   1320  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
   1321  */
   1322 static uint32_t
   1323 bitrev32(uint32_t x)
   1324 {
   1325 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
   1326 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
   1327 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
   1328 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
   1329 
   1330 	return (x >> 16) | (x << 16);
   1331 }
   1332 
   1333 static void
   1334 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1335 {
   1336 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1337 	struct ether_multi *enm;
   1338 	struct ether_multistep step;
   1339 	uint32_t hashes[2] = { 0, 0 };
   1340 	uint32_t ffilt, h;
   1341 	int mcnt;
   1342 
   1343 	KASSERT(mutex_owned(sc->sc_lock));
   1344 
   1345 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1346 
   1347 	if (ifp->if_flags & IFF_PROMISC) {
   1348 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1349 		goto special_filter;
   1350 	}
   1351 
   1352 	ifp->if_flags &= ~IFF_ALLMULTI;
   1353 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
   1354 
   1355 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1356 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1357 
   1358 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
   1359 	mcnt = 0;
   1360 	while (enm != NULL) {
   1361 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1362 		    ETHER_ADDR_LEN) != 0) {
   1363 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1364 			ifp->if_flags |= IFF_ALLMULTI;
   1365 			goto special_filter;
   1366 		}
   1367 
   1368 		h = bitrev32(
   1369 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
   1370 		    ) >> 26;
   1371 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1372 
   1373 		mcnt++;
   1374 		ETHER_NEXT_MULTI(step, enm);
   1375 	}
   1376 
   1377 	if (mcnt)
   1378 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1379 	else
   1380 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1381 
   1382 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1383 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1384 	    hashes[0]);
   1385 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1386 	    hashes[1]);
   1387 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1388 
   1389 #ifdef DWC_GMAC_DEBUG
   1390 	dwc_gmac_dump_ffilt(sc, ffilt);
   1391 #endif
   1392 	return;
   1393 
   1394 special_filter:
   1395 #ifdef DWC_GMAC_DEBUG
   1396 	dwc_gmac_dump_ffilt(sc, ffilt);
   1397 #endif
   1398 	/* no MAC hashes, ALLMULTI or PROMISC */
   1399 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1400 	    ffilt);
   1401 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1402 	    0xffffffff);
   1403 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1404 	    0xffffffff);
   1405 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1406 }
   1407 
   1408 int
   1409 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1410 {
   1411 	uint32_t status, dma_status;
   1412 	int rv = 0;
   1413 
   1414 	if (sc->sc_stopping)
   1415 		return 0;
   1416 
   1417 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1418 	if (status & AWIN_GMAC_MII_IRQ) {
   1419 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1420 		    AWIN_GMAC_MII_STATUS);
   1421 		rv = 1;
   1422 		mii_pollstat(&sc->sc_mii);
   1423 	}
   1424 
   1425 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1426 	    AWIN_GMAC_DMA_STATUS);
   1427 
   1428 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1429 		rv = 1;
   1430 
   1431 	if (dma_status & GMAC_DMA_INT_TIE)
   1432 		dwc_gmac_tx_intr(sc);
   1433 
   1434 	if (dma_status & GMAC_DMA_INT_RIE)
   1435 		dwc_gmac_rx_intr(sc);
   1436 
   1437 	/*
   1438 	 * Check error conditions
   1439 	 */
   1440 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1441 		sc->sc_ec.ec_if.if_oerrors++;
   1442 #ifdef DWC_GMAC_DEBUG
   1443 		dwc_dump_and_abort(sc, "interrupt error condition");
   1444 #endif
   1445 	}
   1446 
   1447 	/* ack interrupt */
   1448 	if (dma_status)
   1449 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1450 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1451 
   1452 	/*
   1453 	 * Get more packets
   1454 	 */
   1455 	if (rv)
   1456 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
   1457 
   1458 	return rv;
   1459 }
   1460 
   1461 static void
   1462 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1463 {
   1464 
   1465 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
   1466 }
   1467 
   1468 static int
   1469 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
   1470 {
   1471 
   1472 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
   1473 }
   1474 
   1475 static void
   1476 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1477 {
   1478 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1479 
   1480 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
   1481 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
   1482 }
   1483 
   1484 static uint32_t
   1485 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1486 {
   1487 
   1488 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
   1489 }
   1490 
   1491 static void
   1492 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1493 {
   1494 
   1495 	desc->ddesc_status0 = 0;
   1496 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1497 }
   1498 
   1499 static void
   1500 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1501 {
   1502 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1503 
   1504 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
   1505 }
   1506 
   1507 static void
   1508 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1509 {
   1510 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
   1511 
   1512 	desc->ddesc_cntl1 = htole32(cntl |
   1513 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
   1514 }
   1515 
   1516 static void
   1517 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1518 {
   1519 
   1520 	desc->ddesc_status0 = 0;
   1521 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
   1522 }
   1523 
   1524 static int
   1525 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
   1526 	return !!(le32toh(desc->ddesc_status0) &
   1527 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
   1528 }
   1529 
   1530 static void
   1531 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
   1532 {
   1533 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
   1534 
   1535 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
   1536 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
   1537 }
   1538 
   1539 static uint32_t
   1540 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
   1541 {
   1542 
   1543 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
   1544 }
   1545 
   1546 static void
   1547 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1548 {
   1549 
   1550 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
   1551 	desc->ddesc_cntl1 = 0;
   1552 }
   1553 
   1554 static void
   1555 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
   1556 {
   1557 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1558 
   1559 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
   1560 }
   1561 
   1562 static void
   1563 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
   1564 {
   1565 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
   1566 
   1567 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
   1568 }
   1569 
   1570 static void
   1571 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
   1572 {
   1573 
   1574 	desc->ddesc_status0 = 0;
   1575 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
   1576 }
   1577 
   1578 static int
   1579 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
   1580 {
   1581 
   1582 	return !!(le32toh(desc->ddesc_status0) &
   1583 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
   1584 }
   1585 
   1586 #ifdef DWC_GMAC_DEBUG
   1587 static void
   1588 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1589 {
   1590 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1591 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1592 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1593 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1594 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1595 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1596 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1597 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1598 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1599 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1600 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1601 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1602 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1603 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1604 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1605 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1606 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1607 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1608 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1609 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1610 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1611 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1612 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1613 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1614 }
   1615 
   1616 static void
   1617 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1618 {
   1619 	int i;
   1620 
   1621 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1622 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1623 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1624 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1625 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1626 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1627 		    "data: %08x next: %08x\n",
   1628 		    i, sc->sc_txq.t_physaddr +
   1629 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1630 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1631 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1632 	}
   1633 }
   1634 
   1635 static void
   1636 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1637 {
   1638 	int i;
   1639 
   1640 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1641 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1642 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1643 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1644 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1645 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1646 		    "data: %08x next: %08x\n",
   1647 		    i, sc->sc_rxq.r_physaddr +
   1648 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1649 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
   1650 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1651 	}
   1652 }
   1653 
   1654 static void
   1655 dwc_dump_status(struct dwc_gmac_softc *sc)
   1656 {
   1657 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1658 	     AWIN_GMAC_MAC_INTR);
   1659 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1660 	     AWIN_GMAC_DMA_STATUS);
   1661 	char buf[200];
   1662 
   1663 	/* print interrupt state */
   1664 	snprintb(buf, sizeof(buf), "\177\20"
   1665 	    "b\x10""NI\0"
   1666 	    "b\x0f""AI\0"
   1667 	    "b\x0e""ER\0"
   1668 	    "b\x0d""FB\0"
   1669 	    "b\x0a""ET\0"
   1670 	    "b\x09""RW\0"
   1671 	    "b\x08""RS\0"
   1672 	    "b\x07""RU\0"
   1673 	    "b\x06""RI\0"
   1674 	    "b\x05""UN\0"
   1675 	    "b\x04""OV\0"
   1676 	    "b\x03""TJ\0"
   1677 	    "b\x02""TU\0"
   1678 	    "b\x01""TS\0"
   1679 	    "b\x00""TI\0"
   1680 	    "\0", dma_status);
   1681 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1682 	    status, buf);
   1683 }
   1684 
   1685 static void
   1686 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1687 {
   1688 	dwc_dump_status(sc);
   1689 	dwc_gmac_dump_ffilt(sc,
   1690 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1691 	dwc_gmac_dump_dma(sc);
   1692 	dwc_gmac_dump_tx_desc(sc);
   1693 	dwc_gmac_dump_rx_desc(sc);
   1694 
   1695 	panic("%s", msg);
   1696 }
   1697 
   1698 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1699 {
   1700 	char buf[200];
   1701 
   1702 	/* print filter setup */
   1703 	snprintb(buf, sizeof(buf), "\177\20"
   1704 	    "b\x1f""RA\0"
   1705 	    "b\x0a""HPF\0"
   1706 	    "b\x09""SAF\0"
   1707 	    "b\x08""SAIF\0"
   1708 	    "b\x05""DBF\0"
   1709 	    "b\x04""PM\0"
   1710 	    "b\x03""DAIF\0"
   1711 	    "b\x02""HMC\0"
   1712 	    "b\x01""HUC\0"
   1713 	    "b\x00""PR\0"
   1714 	    "\0", ffilt);
   1715 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1716 }
   1717 #endif
   1718