Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.29
      1 /* $NetBSD: dwc_gmac.c,v 1.29 2014/12/07 02:23:14 jmcneill Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.29 2014/12/07 02:23:14 jmcneill Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #include "opt_inet.h"
     49 
     50 #include <sys/param.h>
     51 #include <sys/bus.h>
     52 #include <sys/device.h>
     53 #include <sys/intr.h>
     54 #include <sys/systm.h>
     55 #include <sys/sockio.h>
     56 #include <sys/cprng.h>
     57 
     58 #include <net/if.h>
     59 #include <net/if_ether.h>
     60 #include <net/if_media.h>
     61 #include <net/bpf.h>
     62 #ifdef INET
     63 #include <netinet/if_inarp.h>
     64 #endif
     65 
     66 #include <dev/mii/miivar.h>
     67 
     68 #include <dev/ic/dwc_gmac_reg.h>
     69 #include <dev/ic/dwc_gmac_var.h>
     70 
     71 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     72 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     73 static void dwc_gmac_miibus_statchg(struct ifnet *);
     74 
     75 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     76 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     77 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     78 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     79 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     80 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     81 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     82 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     83 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     84 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     85 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     86 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     87 static int dwc_gmac_init(struct ifnet *ifp);
     88 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     89 static void dwc_gmac_start(struct ifnet *ifp);
     90 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     91 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     92 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     93 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
     94 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
     95 static int dwc_gmac_ifflags_cb(struct ethercom *);
     96 static uint32_t	bitrev32(uint32_t x);
     97 
     98 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
     99 				    *sizeof(struct dwc_gmac_dev_dmadesc))
    100 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
    101 
    102 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
    103 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
    104 
    105 
    106 
    107 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    108 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    109 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    110 
    111 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    112 				GMAC_DMA_INT_FBE|	\
    113 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    114 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    115 				GMAC_DMA_INT_TJE)
    116 
    117 #define	AWIN_DEF_MAC_INTRMASK	\
    118 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    119 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
    120 
    121 
    122 #ifdef DWC_GMAC_DEBUG
    123 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    124 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    125 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    126 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    127 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    128 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
    129 #endif
    130 
    131 void
    132 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    133 {
    134 	uint8_t enaddr[ETHER_ADDR_LEN];
    135 	uint32_t maclo, machi;
    136 	struct mii_data * const mii = &sc->sc_mii;
    137 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    138 	prop_dictionary_t dict;
    139 	int s;
    140 
    141 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    142 	sc->sc_mii_clk = mii_clk & 7;
    143 
    144 	dict = device_properties(sc->sc_dev);
    145 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    146 	if (ea != NULL) {
    147 		/*
    148 		 * If the MAC address is overriden by a device property,
    149 		 * use that.
    150 		 */
    151 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    152 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    153 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    154 	} else {
    155 		/*
    156 		 * If we did not get an externaly configure address,
    157 		 * try to read one from the current filter setup,
    158 		 * before resetting the chip.
    159 		 */
    160 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    161 		    AWIN_GMAC_MAC_ADDR0LO);
    162 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    163 		    AWIN_GMAC_MAC_ADDR0HI);
    164 
    165 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    166 			/* fake MAC address */
    167 			maclo = 0x00f2 | (cprng_strong32() << 16);
    168 			machi = cprng_strong32();
    169 		}
    170 
    171 		enaddr[0] = maclo & 0x0ff;
    172 		enaddr[1] = (maclo >> 8) & 0x0ff;
    173 		enaddr[2] = (maclo >> 16) & 0x0ff;
    174 		enaddr[3] = (maclo >> 24) & 0x0ff;
    175 		enaddr[4] = machi & 0x0ff;
    176 		enaddr[5] = (machi >> 8) & 0x0ff;
    177 	}
    178 
    179 	/*
    180 	 * Init chip and do initial setup
    181 	 */
    182 	if (dwc_gmac_reset(sc) != 0)
    183 		return;	/* not much to cleanup, haven't attached yet */
    184 	dwc_gmac_write_hwaddr(sc, enaddr);
    185 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    186 	    ether_sprintf(enaddr));
    187 
    188 	/*
    189 	 * Allocate Tx and Rx rings
    190 	 */
    191 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    192 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    193 		goto fail;
    194 	}
    195 
    196 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    197 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    198 		goto fail;
    199 	}
    200 
    201 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    202 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    203 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    204 		goto fail;
    205 	}
    206 
    207 	/*
    208 	 * Prepare interface data
    209 	 */
    210 	ifp->if_softc = sc;
    211 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    212 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    213 	ifp->if_ioctl = dwc_gmac_ioctl;
    214 	ifp->if_start = dwc_gmac_start;
    215 	ifp->if_init = dwc_gmac_init;
    216 	ifp->if_stop = dwc_gmac_stop;
    217 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    218 	IFQ_SET_READY(&ifp->if_snd);
    219 
    220 	/*
    221 	 * Attach MII subdevices
    222 	 */
    223 	sc->sc_ec.ec_mii = &sc->sc_mii;
    224 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    225         mii->mii_ifp = ifp;
    226         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    227         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    228         mii->mii_statchg = dwc_gmac_miibus_statchg;
    229         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
    230 	    MIIF_DOPAUSE);
    231 
    232         if (LIST_EMPTY(&mii->mii_phys)) {
    233                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    234                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    235                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    236         } else {
    237                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    238         }
    239 
    240 	/*
    241 	 * Ready, attach interface
    242 	 */
    243 	if_attach(ifp);
    244 	ether_ifattach(ifp, enaddr);
    245 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    246 
    247 	/*
    248 	 * Enable interrupts
    249 	 */
    250 	s = splnet();
    251 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    252 	    AWIN_DEF_MAC_INTRMASK);
    253 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    254 	    GMAC_DEF_DMA_INT_MASK);
    255 	splx(s);
    256 
    257 	return;
    258 
    259 fail:
    260 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    261 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    262 }
    263 
    264 
    265 
    266 static int
    267 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    268 {
    269 	size_t cnt;
    270 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    271 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    272 	for (cnt = 0; cnt < 3000; cnt++) {
    273 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    274 		    & GMAC_BUSMODE_RESET) == 0)
    275 			return 0;
    276 		delay(10);
    277 	}
    278 
    279 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    280 	return EIO;
    281 }
    282 
    283 static void
    284 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    285     uint8_t enaddr[ETHER_ADDR_LEN])
    286 {
    287 	uint32_t lo, hi;
    288 
    289 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    290 	    | (enaddr[3] << 24);
    291 	hi = enaddr[4] | (enaddr[5] << 8);
    292 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    293 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    294 }
    295 
    296 static int
    297 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    298 {
    299 	struct dwc_gmac_softc * const sc = device_private(self);
    300 	uint16_t mii;
    301 	size_t cnt;
    302 	int rv = 0;
    303 
    304 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    305 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    306 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    307 	    | GMAC_MII_BUSY;
    308 
    309 	mutex_enter(&sc->sc_mdio_lock);
    310 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    311 
    312 	for (cnt = 0; cnt < 1000; cnt++) {
    313 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    314 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    315 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    316 			    AWIN_GMAC_MAC_MIIDATA);
    317 			break;
    318 		}
    319 		delay(10);
    320 	}
    321 
    322 	mutex_exit(&sc->sc_mdio_lock);
    323 
    324 	return rv;
    325 }
    326 
    327 static void
    328 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    329 {
    330 	struct dwc_gmac_softc * const sc = device_private(self);
    331 	uint16_t mii;
    332 	size_t cnt;
    333 
    334 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    335 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    336 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    337 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    338 
    339 	mutex_enter(&sc->sc_mdio_lock);
    340 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    341 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    342 
    343 	for (cnt = 0; cnt < 1000; cnt++) {
    344 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    345 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    346 			break;
    347 		delay(10);
    348 	}
    349 
    350 	mutex_exit(&sc->sc_mdio_lock);
    351 }
    352 
    353 static int
    354 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    355 	struct dwc_gmac_rx_ring *ring)
    356 {
    357 	struct dwc_gmac_rx_data *data;
    358 	bus_addr_t physaddr;
    359 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    360 	int error, i, next;
    361 
    362 	ring->r_cur = ring->r_next = 0;
    363 	memset(ring->r_desc, 0, descsize);
    364 
    365 	/*
    366 	 * Pre-allocate Rx buffers and populate Rx ring.
    367 	 */
    368 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    369 		struct dwc_gmac_dev_dmadesc *desc;
    370 
    371 		data = &sc->sc_rxq.r_data[i];
    372 
    373 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    374 		if (data->rd_m == NULL) {
    375 			aprint_error_dev(sc->sc_dev,
    376 			    "could not allocate rx mbuf #%d\n", i);
    377 			error = ENOMEM;
    378 			goto fail;
    379 		}
    380 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    381 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    382 		if (error != 0) {
    383 			aprint_error_dev(sc->sc_dev,
    384 			    "could not create DMA map\n");
    385 			data->rd_map = NULL;
    386 			goto fail;
    387 		}
    388 		MCLGET(data->rd_m, M_DONTWAIT);
    389 		if (!(data->rd_m->m_flags & M_EXT)) {
    390 			aprint_error_dev(sc->sc_dev,
    391 			    "could not allocate mbuf cluster #%d\n", i);
    392 			error = ENOMEM;
    393 			goto fail;
    394 		}
    395 
    396 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    397 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    398 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    399 		if (error != 0) {
    400 			aprint_error_dev(sc->sc_dev,
    401 			    "could not load rx buf DMA map #%d", i);
    402 			goto fail;
    403 		}
    404 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    405 
    406 		desc = &sc->sc_rxq.r_desc[i];
    407 		desc->ddesc_data = htole32(physaddr);
    408 		next = RX_NEXT(i);
    409 		desc->ddesc_next = htole32(ring->r_physaddr
    410 		    + next * sizeof(*desc));
    411 		desc->ddesc_cntl = htole32(
    412 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    413 		    DDESC_CNTL_RXCHAIN);
    414 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    415 	}
    416 
    417 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    418 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    419 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    420 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    421 	    ring->r_physaddr);
    422 
    423 	return 0;
    424 
    425 fail:
    426 	dwc_gmac_free_rx_ring(sc, ring);
    427 	return error;
    428 }
    429 
    430 static void
    431 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    432 	struct dwc_gmac_rx_ring *ring)
    433 {
    434 	struct dwc_gmac_dev_dmadesc *desc;
    435 	int i;
    436 
    437 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    438 		desc = &sc->sc_rxq.r_desc[i];
    439 		desc->ddesc_cntl = htole32(
    440 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    441 		    DDESC_CNTL_RXCHAIN);
    442 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    443 	}
    444 
    445 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    446 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    447 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    448 
    449 	ring->r_cur = ring->r_next = 0;
    450 	/* reset DMA address to start of ring */
    451 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    452 	    sc->sc_rxq.r_physaddr);
    453 }
    454 
    455 static int
    456 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    457 {
    458 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    459 		sizeof(struct dwc_gmac_dev_dmadesc);
    460 	int error, nsegs;
    461 	void *rings;
    462 
    463 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    464 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    465 	if (error != 0) {
    466 		aprint_error_dev(sc->sc_dev,
    467 		    "could not create desc DMA map\n");
    468 		sc->sc_dma_ring_map = NULL;
    469 		goto fail;
    470 	}
    471 
    472 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    473 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    474 	if (error != 0) {
    475 		aprint_error_dev(sc->sc_dev,
    476 		    "could not map DMA memory\n");
    477 		goto fail;
    478 	}
    479 
    480 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    481 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    482 	if (error != 0) {
    483 		aprint_error_dev(sc->sc_dev,
    484 		    "could not allocate DMA memory\n");
    485 		goto fail;
    486 	}
    487 
    488 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    489 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    490 	if (error != 0) {
    491 		aprint_error_dev(sc->sc_dev,
    492 		    "could not load desc DMA map\n");
    493 		goto fail;
    494 	}
    495 
    496 	/* give first AWGE_RX_RING_COUNT to the RX side */
    497 	sc->sc_rxq.r_desc = rings;
    498 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    499 
    500 	/* and next rings to the TX side */
    501 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    502 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    503 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    504 
    505 	return 0;
    506 
    507 fail:
    508 	dwc_gmac_free_dma_rings(sc);
    509 	return error;
    510 }
    511 
    512 static void
    513 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    514 {
    515 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    516 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    517 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    518 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    519 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    520 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    521 }
    522 
    523 static void
    524 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    525 {
    526 	struct dwc_gmac_rx_data *data;
    527 	int i;
    528 
    529 	if (ring->r_desc == NULL)
    530 		return;
    531 
    532 
    533 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    534 		data = &ring->r_data[i];
    535 
    536 		if (data->rd_map != NULL) {
    537 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    538 			    AWGE_RX_RING_COUNT
    539 				*sizeof(struct dwc_gmac_dev_dmadesc),
    540 			    BUS_DMASYNC_POSTREAD);
    541 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    542 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    543 		}
    544 		if (data->rd_m != NULL)
    545 			m_freem(data->rd_m);
    546 	}
    547 }
    548 
    549 static int
    550 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    551 	struct dwc_gmac_tx_ring *ring)
    552 {
    553 	int i, error = 0;
    554 
    555 	ring->t_queued = 0;
    556 	ring->t_cur = ring->t_next = 0;
    557 
    558 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    559 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    560 	    TX_DESC_OFFSET(0),
    561 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    562 	    BUS_DMASYNC_POSTWRITE);
    563 
    564 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    565 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    566 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    567 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    568 		    &ring->t_data[i].td_map);
    569 		if (error != 0) {
    570 			aprint_error_dev(sc->sc_dev,
    571 			    "could not create TX DMA map #%d\n", i);
    572 			ring->t_data[i].td_map = NULL;
    573 			goto fail;
    574 		}
    575 		ring->t_desc[i].ddesc_next = htole32(
    576 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    577 		    *TX_NEXT(i));
    578 	}
    579 
    580 	return 0;
    581 
    582 fail:
    583 	dwc_gmac_free_tx_ring(sc, ring);
    584 	return error;
    585 }
    586 
    587 static void
    588 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    589 {
    590 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    591 	if (end > start) {
    592 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    593 		    TX_DESC_OFFSET(start),
    594 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    595 		    ops);
    596 		return;
    597 	}
    598 	/* sync from 'start' to end of ring */
    599 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    600 	    TX_DESC_OFFSET(start),
    601 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
    602 	    ops);
    603 	/* sync from start of ring to 'end' */
    604 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    605 	    TX_DESC_OFFSET(0),
    606 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    607 	    ops);
    608 }
    609 
    610 static void
    611 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    612 	struct dwc_gmac_tx_ring *ring)
    613 {
    614 	int i;
    615 
    616 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    617 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    618 
    619 		if (data->td_m != NULL) {
    620 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    621 			    0, data->td_active->dm_mapsize,
    622 			    BUS_DMASYNC_POSTWRITE);
    623 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    624 			m_freem(data->td_m);
    625 			data->td_m = NULL;
    626 		}
    627 	}
    628 
    629 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    630 	    TX_DESC_OFFSET(0),
    631 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    632 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    633 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    634 	    sc->sc_txq.t_physaddr);
    635 
    636 	ring->t_queued = 0;
    637 	ring->t_cur = ring->t_next = 0;
    638 }
    639 
    640 static void
    641 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    642 	struct dwc_gmac_tx_ring *ring)
    643 {
    644 	int i;
    645 
    646 	/* unload the maps */
    647 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    648 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    649 
    650 		if (data->td_m != NULL) {
    651 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    652 			    0, data->td_map->dm_mapsize,
    653 			    BUS_DMASYNC_POSTWRITE);
    654 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    655 			m_freem(data->td_m);
    656 			data->td_m = NULL;
    657 		}
    658 	}
    659 
    660 	/* and actually free them */
    661 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    662 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    663 
    664 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    665 	}
    666 }
    667 
    668 static void
    669 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    670 {
    671 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    672 	struct mii_data * const mii = &sc->sc_mii;
    673 	uint32_t conf, flow;
    674 
    675 	/*
    676 	 * Set MII or GMII interface based on the speed
    677 	 * negotiated by the PHY.
    678 	 */
    679 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    680 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    681 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    682 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    683 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    684 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    685 	    | AWIN_GMAC_MAC_CONF_ACS
    686 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    687 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    688 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    689 	case IFM_10_T:
    690 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    691 		break;
    692 	case IFM_100_TX:
    693 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    694 			AWIN_GMAC_MAC_CONF_MIISEL;
    695 		break;
    696 	case IFM_1000_T:
    697 		break;
    698 	}
    699 
    700 	flow = 0;
    701 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    702 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    703 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    704 	}
    705 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    706 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    707 	}
    708 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    709 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    710 	}
    711 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    712 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    713 
    714 #ifdef DWC_GMAC_DEBUG
    715 	aprint_normal_dev(sc->sc_dev,
    716 	    "setting MAC conf register: %08x\n", conf);
    717 #endif
    718 
    719 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    720 	    AWIN_GMAC_MAC_CONF, conf);
    721 }
    722 
    723 static int
    724 dwc_gmac_init(struct ifnet *ifp)
    725 {
    726 	struct dwc_gmac_softc *sc = ifp->if_softc;
    727 	uint32_t ffilt;
    728 
    729 	if (ifp->if_flags & IFF_RUNNING)
    730 		return 0;
    731 
    732 	dwc_gmac_stop(ifp, 0);
    733 
    734 	/*
    735 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    736 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    737 	 */
    738 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    739 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    740 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    741 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    742 
    743 	/*
    744 	 * Set up address filter
    745 	 */
    746 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    747 	if (ifp->if_flags & IFF_PROMISC) {
    748 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    749 	} else {
    750 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    751 	}
    752 	if (ifp->if_flags & IFF_BROADCAST) {
    753 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    754 	} else {
    755 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    756 	}
    757 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    758 
    759 	/*
    760 	 * Set up multicast filter
    761 	 */
    762 	dwc_gmac_setmulti(sc);
    763 
    764 	/*
    765 	 * Set up dma pointer for RX and TX ring
    766 	 */
    767 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    768 	    sc->sc_rxq.r_physaddr);
    769 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    770 	    sc->sc_txq.t_physaddr);
    771 
    772 	/*
    773 	 * Start RX/TX part
    774 	 */
    775 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    776 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
    777 	    GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
    778 
    779 	ifp->if_flags |= IFF_RUNNING;
    780 	ifp->if_flags &= ~IFF_OACTIVE;
    781 
    782 	return 0;
    783 }
    784 
    785 static void
    786 dwc_gmac_start(struct ifnet *ifp)
    787 {
    788 	struct dwc_gmac_softc *sc = ifp->if_softc;
    789 	int old = sc->sc_txq.t_queued;
    790 	struct mbuf *m0;
    791 
    792 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    793 		return;
    794 
    795 	for (;;) {
    796 		IFQ_POLL(&ifp->if_snd, m0);
    797 		if (m0 == NULL)
    798 			break;
    799 		if (dwc_gmac_queue(sc, m0) != 0) {
    800 			ifp->if_flags |= IFF_OACTIVE;
    801 			break;
    802 		}
    803 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    804 		bpf_mtap(ifp, m0);
    805 	}
    806 
    807 	if (sc->sc_txq.t_queued != old) {
    808 		/* packets have been queued, kick it off */
    809 		dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
    810 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    811 
    812 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    813 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    814 #ifdef DWC_GMAC_DEBUG
    815 		dwc_dump_status(sc);
    816 #endif
    817 	}
    818 }
    819 
    820 static void
    821 dwc_gmac_stop(struct ifnet *ifp, int disable)
    822 {
    823 	struct dwc_gmac_softc *sc = ifp->if_softc;
    824 
    825 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    826 	    AWIN_GMAC_DMA_OPMODE,
    827 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    828 	        AWIN_GMAC_DMA_OPMODE)
    829 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    830 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    831 	    AWIN_GMAC_DMA_OPMODE,
    832 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    833 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    834 
    835 	mii_down(&sc->sc_mii);
    836 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    837 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    838 }
    839 
    840 /*
    841  * Add m0 to the TX ring
    842  */
    843 static int
    844 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    845 {
    846 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    847 	struct dwc_gmac_tx_data *data = NULL;
    848 	bus_dmamap_t map;
    849 	uint32_t flags, len;
    850 	int error, i, first;
    851 
    852 #ifdef DWC_GMAC_DEBUG
    853 	aprint_normal_dev(sc->sc_dev,
    854 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
    855 #endif
    856 
    857 	first = sc->sc_txq.t_cur;
    858 	map = sc->sc_txq.t_data[first].td_map;
    859 	flags = 0;
    860 
    861 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    862 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    863 	if (error != 0) {
    864 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    865 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    866 		return error;
    867 	}
    868 
    869 	if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
    870 		bus_dmamap_unload(sc->sc_dmat, map);
    871 		return ENOBUFS;
    872 	}
    873 
    874 	data = NULL;
    875 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
    876 	for (i = 0; i < map->dm_nsegs; i++) {
    877 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    878 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    879 
    880 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    881 		len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
    882 		if (i == map->dm_nsegs-1)
    883 			flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
    884 
    885 #ifdef DWC_GMAC_DEBUG
    886 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
    887 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
    888 		    (unsigned long)map->dm_segs[i].ds_addr,
    889 		    (unsigned long)map->dm_segs[i].ds_len,
    890 		    flags, len);
    891 #endif
    892 
    893 		desc->ddesc_cntl = htole32(len|flags);
    894 		flags &= ~DDESC_CNTL_TXFIRST;
    895 
    896 		/*
    897 		 * Defer passing ownership of the first descriptor
    898 		 * until we are done.
    899 		 */
    900 		if (i)
    901 			desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    902 
    903 		sc->sc_txq.t_queued++;
    904 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
    905 	}
    906 
    907 	/* Pass first to device */
    908 	sc->sc_txq.t_desc[first].ddesc_status
    909 	    = htole32(DDESC_STATUS_OWNEDBYDEV);
    910 
    911 	data->td_m = m0;
    912 	data->td_active = map;
    913 
    914 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    915 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    916 
    917 	return 0;
    918 }
    919 
    920 /*
    921  * If the interface is up and running, only modify the receive
    922  * filter when setting promiscuous or debug mode.  Otherwise fall
    923  * through to ether_ioctl, which will reset the chip.
    924  */
    925 static int
    926 dwc_gmac_ifflags_cb(struct ethercom *ec)
    927 {
    928 	struct ifnet *ifp = &ec->ec_if;
    929 	struct dwc_gmac_softc *sc = ifp->if_softc;
    930 	int change = ifp->if_flags ^ sc->sc_if_flags;
    931 
    932 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    933 		return ENETRESET;
    934 	if ((change & IFF_PROMISC) != 0)
    935 		dwc_gmac_setmulti(sc);
    936 	return 0;
    937 }
    938 
    939 static int
    940 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    941 {
    942 	struct dwc_gmac_softc *sc = ifp->if_softc;
    943 	int s, error = 0;
    944 
    945 	s = splnet();
    946 
    947 	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
    948 		error = 0;
    949 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
    950 			;
    951 		else if (ifp->if_flags & IFF_RUNNING) {
    952 			/*
    953 			 * Multicast list has changed; set the hardware filter
    954 			 * accordingly.
    955 			 */
    956 			dwc_gmac_setmulti(sc);
    957 		}
    958 	}
    959 
    960 	/* Try to get things going again */
    961 	if (ifp->if_flags & IFF_UP)
    962 		dwc_gmac_start(ifp);
    963 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
    964 	splx(s);
    965 	return error;
    966 }
    967 
    968 static void
    969 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
    970 {
    971 	struct dwc_gmac_tx_data *data;
    972 	struct dwc_gmac_dev_dmadesc *desc;
    973 	uint32_t flags;
    974 	int i;
    975 
    976 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
    977 	    i = TX_NEXT(i), sc->sc_txq.t_queued--) {
    978 
    979 #ifdef DWC_GMAC_DEBUG
    980 		aprint_normal_dev(sc->sc_dev,
    981 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
    982 		    i, sc->sc_txq.t_queued);
    983 #endif
    984 
    985 		desc = &sc->sc_txq.t_desc[i];
    986 		/*
    987 		 * i+1 does not need to be a valid descriptor,
    988 		 * this is just a special notion to just sync
    989 		 * a single tx descriptor (i)
    990 		 */
    991 		dwc_gmac_txdesc_sync(sc, i, i+1,
    992 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    993 		flags = le32toh(desc->ddesc_status);
    994 
    995 		if (flags & DDESC_STATUS_OWNEDBYDEV)
    996 			break;
    997 
    998 		data = &sc->sc_txq.t_data[i];
    999 		if (data->td_m == NULL)
   1000 			continue;
   1001 		sc->sc_ec.ec_if.if_opackets++;
   1002 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1003 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1004 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1005 
   1006 #ifdef DWC_GMAC_DEBUG
   1007 		aprint_normal_dev(sc->sc_dev,
   1008 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1009 		    "freeing mbuf %p\n", i, data->td_m);
   1010 #endif
   1011 
   1012 		m_freem(data->td_m);
   1013 		data->td_m = NULL;
   1014 	}
   1015 
   1016 	sc->sc_txq.t_next = i;
   1017 
   1018 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1019 		sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
   1020 	}
   1021 }
   1022 
   1023 static void
   1024 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1025 {
   1026 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1027 	struct dwc_gmac_dev_dmadesc *desc;
   1028 	struct dwc_gmac_rx_data *data;
   1029 	bus_addr_t physaddr;
   1030 	uint32_t status;
   1031 	struct mbuf *m, *mnew;
   1032 	int i, len, error;
   1033 
   1034 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1035 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1036 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1037 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1038 		desc = &sc->sc_rxq.r_desc[i];
   1039 		data = &sc->sc_rxq.r_data[i];
   1040 
   1041 		status = le32toh(desc->ddesc_status);
   1042 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1043 			break;
   1044 
   1045 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
   1046 #ifdef DWC_GMAC_DEBUG
   1047 			aprint_normal_dev(sc->sc_dev,
   1048 			    "RX error: descriptor status %08x, skipping\n",
   1049 			    status);
   1050 #endif
   1051 			ifp->if_ierrors++;
   1052 			goto skip;
   1053 		}
   1054 
   1055 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
   1056 
   1057 #ifdef DWC_GMAC_DEBUG
   1058 		aprint_normal_dev(sc->sc_dev,
   1059 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1060 		    i, len);
   1061 #endif
   1062 
   1063 		/*
   1064 		 * Try to get a new mbuf before passing this one
   1065 		 * up, if that fails, drop the packet and reuse
   1066 		 * the existing one.
   1067 		 */
   1068 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1069 		if (mnew == NULL) {
   1070 			ifp->if_ierrors++;
   1071 			goto skip;
   1072 		}
   1073 		MCLGET(mnew, M_DONTWAIT);
   1074 		if ((mnew->m_flags & M_EXT) == 0) {
   1075 			m_freem(mnew);
   1076 			ifp->if_ierrors++;
   1077 			goto skip;
   1078 		}
   1079 
   1080 		/* unload old DMA map */
   1081 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1082 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1083 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1084 
   1085 		/* and reload with new mbuf */
   1086 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1087 		    mtod(mnew, void*), MCLBYTES, NULL,
   1088 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1089 		if (error != 0) {
   1090 			m_freem(mnew);
   1091 			/* try to reload old mbuf */
   1092 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1093 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1094 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1095 			if (error != 0) {
   1096 				panic("%s: could not load old rx mbuf",
   1097 				    device_xname(sc->sc_dev));
   1098 			}
   1099 			ifp->if_ierrors++;
   1100 			goto skip;
   1101 		}
   1102 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1103 
   1104 		/*
   1105 		 * New mbuf loaded, update RX ring and continue
   1106 		 */
   1107 		m = data->rd_m;
   1108 		data->rd_m = mnew;
   1109 		desc->ddesc_data = htole32(physaddr);
   1110 
   1111 		/* finalize mbuf */
   1112 		m->m_pkthdr.len = m->m_len = len;
   1113 		m->m_pkthdr.rcvif = ifp;
   1114 		m->m_flags |= M_HASFCS;
   1115 
   1116 		bpf_mtap(ifp, m);
   1117 		ifp->if_ipackets++;
   1118 		(*ifp->if_input)(ifp, m);
   1119 
   1120 skip:
   1121 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1122 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1123 		desc->ddesc_cntl = htole32(
   1124 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
   1125 		    DDESC_CNTL_RXCHAIN);
   1126 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
   1127 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1128 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1129 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1130 	}
   1131 
   1132 	/* update RX pointer */
   1133 	sc->sc_rxq.r_cur = i;
   1134 
   1135 }
   1136 
   1137 /*
   1138  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
   1139  */
   1140 static uint32_t
   1141 bitrev32(uint32_t x)
   1142 {
   1143 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
   1144 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
   1145 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
   1146 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
   1147 
   1148 	return (x >> 16) | (x << 16);
   1149 }
   1150 
   1151 static void
   1152 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1153 {
   1154 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1155 	struct ether_multi *enm;
   1156 	struct ether_multistep step;
   1157 	uint32_t hashes[2] = { 0, 0 };
   1158 	uint32_t ffilt, h;
   1159 	int mcnt, s;
   1160 
   1161 	s = splnet();
   1162 
   1163 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1164 
   1165 	if (ifp->if_flags & IFF_PROMISC) {
   1166 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1167 		goto special_filter;
   1168 	}
   1169 
   1170 	ifp->if_flags &= ~IFF_ALLMULTI;
   1171 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
   1172 
   1173 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1174 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1175 
   1176 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
   1177 	mcnt = 0;
   1178 	while (enm != NULL) {
   1179 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1180 		    ETHER_ADDR_LEN) != 0) {
   1181 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1182 			ifp->if_flags |= IFF_ALLMULTI;
   1183 			goto special_filter;
   1184 		}
   1185 
   1186 		h = bitrev32(
   1187 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
   1188 		    ) >> 26;
   1189 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1190 
   1191 		mcnt++;
   1192 		ETHER_NEXT_MULTI(step, enm);
   1193 	}
   1194 
   1195 	if (mcnt)
   1196 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1197 	else
   1198 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1199 
   1200 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1201 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1202 	    hashes[0]);
   1203 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1204 	    hashes[1]);
   1205 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1206 
   1207 	splx(s);
   1208 
   1209 #ifdef DWC_GMAC_DEBUG
   1210 	dwc_gmac_dump_ffilt(sc, ffilt);
   1211 #endif
   1212 	return;
   1213 
   1214 special_filter:
   1215 #ifdef DWC_GMAC_DEBUG
   1216 	dwc_gmac_dump_ffilt(sc, ffilt);
   1217 #endif
   1218 	/* no MAC hashes, ALLMULTI or PROMISC */
   1219 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1220 	    ffilt);
   1221 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1222 	    0xffffffff);
   1223 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1224 	    0xffffffff);
   1225 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1226 	splx(s);
   1227 }
   1228 
   1229 int
   1230 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1231 {
   1232 	uint32_t status, dma_status;
   1233 	int rv = 0;
   1234 
   1235 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1236 	if (status & AWIN_GMAC_MII_IRQ) {
   1237 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1238 		    AWIN_GMAC_MII_STATUS);
   1239 		rv = 1;
   1240 		mii_pollstat(&sc->sc_mii);
   1241 	}
   1242 
   1243 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1244 	    AWIN_GMAC_DMA_STATUS);
   1245 
   1246 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1247 		rv = 1;
   1248 
   1249 	if (dma_status & GMAC_DMA_INT_TIE)
   1250 		dwc_gmac_tx_intr(sc);
   1251 
   1252 	if (dma_status & GMAC_DMA_INT_RIE)
   1253 		dwc_gmac_rx_intr(sc);
   1254 
   1255 	/*
   1256 	 * Check error conditions
   1257 	 */
   1258 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1259 		sc->sc_ec.ec_if.if_oerrors++;
   1260 #ifdef DWC_GMAC_DEBUG
   1261 		dwc_dump_and_abort(sc, "interrupt error condition");
   1262 #endif
   1263 	}
   1264 
   1265 	/* ack interrupt */
   1266 	if (dma_status)
   1267 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1268 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1269 
   1270 	/*
   1271 	 * Get more packets
   1272 	 */
   1273 	if (rv)
   1274 		sc->sc_ec.ec_if.if_start(&sc->sc_ec.ec_if);
   1275 
   1276 	return rv;
   1277 }
   1278 
   1279 #ifdef DWC_GMAC_DEBUG
   1280 static void
   1281 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1282 {
   1283 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1284 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1285 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1286 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1287 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1288 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1289 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1290 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1291 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1292 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1293 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1294 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1295 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1296 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1297 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1298 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1299 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1300 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1301 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1302 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1303 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1304 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1305 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1306 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1307 }
   1308 
   1309 static void
   1310 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1311 {
   1312 	int i;
   1313 
   1314 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1315 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1316 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1317 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1318 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1319 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1320 		    "data: %08x next: %08x\n",
   1321 		    i, sc->sc_txq.t_physaddr +
   1322 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1323 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1324 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1325 	}
   1326 }
   1327 
   1328 static void
   1329 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1330 {
   1331 	int i;
   1332 
   1333 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1334 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1335 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1336 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1337 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1338 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1339 		    "data: %08x next: %08x\n",
   1340 		    i, sc->sc_rxq.r_physaddr +
   1341 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1342 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1343 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1344 	}
   1345 }
   1346 
   1347 static void
   1348 dwc_dump_status(struct dwc_gmac_softc *sc)
   1349 {
   1350 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1351 	     AWIN_GMAC_MAC_INTR);
   1352 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1353 	     AWIN_GMAC_DMA_STATUS);
   1354 	char buf[200];
   1355 
   1356 	/* print interrupt state */
   1357 	snprintb(buf, sizeof(buf), "\177\20"
   1358 	    "b\x10""NI\0"
   1359 	    "b\x0f""AI\0"
   1360 	    "b\x0e""ER\0"
   1361 	    "b\x0d""FB\0"
   1362 	    "b\x0a""ET\0"
   1363 	    "b\x09""RW\0"
   1364 	    "b\x08""RS\0"
   1365 	    "b\x07""RU\0"
   1366 	    "b\x06""RI\0"
   1367 	    "b\x05""UN\0"
   1368 	    "b\x04""OV\0"
   1369 	    "b\x03""TJ\0"
   1370 	    "b\x02""TU\0"
   1371 	    "b\x01""TS\0"
   1372 	    "b\x00""TI\0"
   1373 	    "\0", dma_status);
   1374 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1375 	    status, buf);
   1376 }
   1377 
   1378 static void
   1379 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1380 {
   1381 	dwc_dump_status(sc);
   1382 	dwc_gmac_dump_ffilt(sc,
   1383 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1384 	dwc_gmac_dump_dma(sc);
   1385 	dwc_gmac_dump_tx_desc(sc);
   1386 	dwc_gmac_dump_rx_desc(sc);
   1387 
   1388 	panic("%s", msg);
   1389 }
   1390 
   1391 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1392 {
   1393 	char buf[200];
   1394 
   1395 	/* print filter setup */
   1396 	snprintb(buf, sizeof(buf), "\177\20"
   1397 	    "b\x1f""RA\0"
   1398 	    "b\x0a""HPF\0"
   1399 	    "b\x09""SAF\0"
   1400 	    "b\x08""SAIF\0"
   1401 	    "b\x05""DBF\0"
   1402 	    "b\x04""PM\0"
   1403 	    "b\x03""DAIF\0"
   1404 	    "b\x02""HMC\0"
   1405 	    "b\x01""HUC\0"
   1406 	    "b\x00""PR\0"
   1407 	    "\0", ffilt);
   1408 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1409 }
   1410 #endif
   1411