Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.30
      1 /* $NetBSD: dwc_gmac.c,v 1.30 2015/01/05 21:37:07 martin Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.30 2015/01/05 21:37:07 martin Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #include "opt_inet.h"
     49 
     50 #include <sys/param.h>
     51 #include <sys/bus.h>
     52 #include <sys/device.h>
     53 #include <sys/intr.h>
     54 #include <sys/systm.h>
     55 #include <sys/sockio.h>
     56 #include <sys/cprng.h>
     57 
     58 #include <net/if.h>
     59 #include <net/if_ether.h>
     60 #include <net/if_media.h>
     61 #include <net/bpf.h>
     62 #ifdef INET
     63 #include <netinet/if_inarp.h>
     64 #endif
     65 
     66 #include <dev/mii/miivar.h>
     67 
     68 #include <dev/ic/dwc_gmac_reg.h>
     69 #include <dev/ic/dwc_gmac_var.h>
     70 
     71 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     72 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     73 static void dwc_gmac_miibus_statchg(struct ifnet *);
     74 
     75 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     76 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     77 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     78 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     79 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     80 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     81 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     82 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     83 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     84 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     85 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     86 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     87 static int dwc_gmac_init(struct ifnet *ifp);
     88 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     89 static void dwc_gmac_start(struct ifnet *ifp);
     90 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     91 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     92 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     93 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
     94 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
     95 static int dwc_gmac_ifflags_cb(struct ethercom *);
     96 static uint32_t	bitrev32(uint32_t x);
     97 
     98 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
     99 				    *sizeof(struct dwc_gmac_dev_dmadesc))
    100 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
    101 
    102 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
    103 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
    104 
    105 
    106 
    107 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    108 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    109 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    110 
    111 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    112 				GMAC_DMA_INT_FBE|	\
    113 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    114 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    115 				GMAC_DMA_INT_TJE)
    116 
    117 #define	AWIN_DEF_MAC_INTRMASK	\
    118 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    119 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
    120 
    121 
    122 #ifdef DWC_GMAC_DEBUG
    123 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    124 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    125 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    126 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    127 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    128 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
    129 #endif
    130 
    131 void
    132 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    133 {
    134 	uint8_t enaddr[ETHER_ADDR_LEN];
    135 	uint32_t maclo, machi;
    136 	struct mii_data * const mii = &sc->sc_mii;
    137 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    138 	prop_dictionary_t dict;
    139 	int s;
    140 
    141 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    142 	sc->sc_mii_clk = mii_clk & 7;
    143 
    144 	dict = device_properties(sc->sc_dev);
    145 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    146 	if (ea != NULL) {
    147 		/*
    148 		 * If the MAC address is overriden by a device property,
    149 		 * use that.
    150 		 */
    151 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    152 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    153 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    154 	} else {
    155 		/*
    156 		 * If we did not get an externaly configure address,
    157 		 * try to read one from the current filter setup,
    158 		 * before resetting the chip.
    159 		 */
    160 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    161 		    AWIN_GMAC_MAC_ADDR0LO);
    162 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    163 		    AWIN_GMAC_MAC_ADDR0HI);
    164 
    165 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    166 			/* fake MAC address */
    167 			maclo = 0x00f2 | (cprng_strong32() << 16);
    168 			machi = cprng_strong32();
    169 		}
    170 
    171 		enaddr[0] = maclo & 0x0ff;
    172 		enaddr[1] = (maclo >> 8) & 0x0ff;
    173 		enaddr[2] = (maclo >> 16) & 0x0ff;
    174 		enaddr[3] = (maclo >> 24) & 0x0ff;
    175 		enaddr[4] = machi & 0x0ff;
    176 		enaddr[5] = (machi >> 8) & 0x0ff;
    177 	}
    178 
    179 	/*
    180 	 * Init chip and do initial setup
    181 	 */
    182 	if (dwc_gmac_reset(sc) != 0)
    183 		return;	/* not much to cleanup, haven't attached yet */
    184 	dwc_gmac_write_hwaddr(sc, enaddr);
    185 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    186 	    ether_sprintf(enaddr));
    187 
    188 	/*
    189 	 * Allocate Tx and Rx rings
    190 	 */
    191 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    192 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    193 		goto fail;
    194 	}
    195 
    196 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    197 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    198 		goto fail;
    199 	}
    200 
    201 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    202 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    203 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    204 		goto fail;
    205 	}
    206 
    207 	/*
    208 	 * Prepare interface data
    209 	 */
    210 	ifp->if_softc = sc;
    211 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    212 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    213 	ifp->if_ioctl = dwc_gmac_ioctl;
    214 	ifp->if_start = dwc_gmac_start;
    215 	ifp->if_init = dwc_gmac_init;
    216 	ifp->if_stop = dwc_gmac_stop;
    217 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    218 	IFQ_SET_READY(&ifp->if_snd);
    219 
    220 	/*
    221 	 * Attach MII subdevices
    222 	 */
    223 	sc->sc_ec.ec_mii = &sc->sc_mii;
    224 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    225         mii->mii_ifp = ifp;
    226         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    227         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    228         mii->mii_statchg = dwc_gmac_miibus_statchg;
    229         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
    230 	    MIIF_DOPAUSE);
    231 
    232         if (LIST_EMPTY(&mii->mii_phys)) {
    233                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    234                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    235                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    236         } else {
    237                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    238         }
    239 
    240 	/*
    241 	 * Ready, attach interface
    242 	 */
    243 	if_attach(ifp);
    244 	ether_ifattach(ifp, enaddr);
    245 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    246 
    247 	/*
    248 	 * Enable interrupts
    249 	 */
    250 	s = splnet();
    251 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    252 	    AWIN_DEF_MAC_INTRMASK);
    253 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    254 	    GMAC_DEF_DMA_INT_MASK);
    255 	splx(s);
    256 
    257 	return;
    258 
    259 fail:
    260 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    261 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    262 }
    263 
    264 
    265 
    266 static int
    267 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    268 {
    269 	size_t cnt;
    270 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    271 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    272 	for (cnt = 0; cnt < 3000; cnt++) {
    273 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    274 		    & GMAC_BUSMODE_RESET) == 0)
    275 			return 0;
    276 		delay(10);
    277 	}
    278 
    279 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    280 	return EIO;
    281 }
    282 
    283 static void
    284 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    285     uint8_t enaddr[ETHER_ADDR_LEN])
    286 {
    287 	uint32_t lo, hi;
    288 
    289 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    290 	    | (enaddr[3] << 24);
    291 	hi = enaddr[4] | (enaddr[5] << 8);
    292 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    293 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    294 }
    295 
    296 static int
    297 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    298 {
    299 	struct dwc_gmac_softc * const sc = device_private(self);
    300 	uint16_t mii;
    301 	size_t cnt;
    302 	int rv = 0;
    303 
    304 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    305 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    306 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    307 	    | GMAC_MII_BUSY;
    308 
    309 	mutex_enter(&sc->sc_mdio_lock);
    310 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    311 
    312 	for (cnt = 0; cnt < 1000; cnt++) {
    313 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    314 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    315 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    316 			    AWIN_GMAC_MAC_MIIDATA);
    317 			break;
    318 		}
    319 		delay(10);
    320 	}
    321 
    322 	mutex_exit(&sc->sc_mdio_lock);
    323 
    324 	return rv;
    325 }
    326 
    327 static void
    328 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    329 {
    330 	struct dwc_gmac_softc * const sc = device_private(self);
    331 	uint16_t mii;
    332 	size_t cnt;
    333 
    334 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    335 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    336 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    337 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    338 
    339 	mutex_enter(&sc->sc_mdio_lock);
    340 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    341 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    342 
    343 	for (cnt = 0; cnt < 1000; cnt++) {
    344 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    345 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    346 			break;
    347 		delay(10);
    348 	}
    349 
    350 	mutex_exit(&sc->sc_mdio_lock);
    351 }
    352 
    353 static int
    354 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    355 	struct dwc_gmac_rx_ring *ring)
    356 {
    357 	struct dwc_gmac_rx_data *data;
    358 	bus_addr_t physaddr;
    359 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    360 	int error, i, next;
    361 
    362 	ring->r_cur = ring->r_next = 0;
    363 	memset(ring->r_desc, 0, descsize);
    364 
    365 	/*
    366 	 * Pre-allocate Rx buffers and populate Rx ring.
    367 	 */
    368 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    369 		struct dwc_gmac_dev_dmadesc *desc;
    370 
    371 		data = &sc->sc_rxq.r_data[i];
    372 
    373 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    374 		if (data->rd_m == NULL) {
    375 			aprint_error_dev(sc->sc_dev,
    376 			    "could not allocate rx mbuf #%d\n", i);
    377 			error = ENOMEM;
    378 			goto fail;
    379 		}
    380 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    381 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    382 		if (error != 0) {
    383 			aprint_error_dev(sc->sc_dev,
    384 			    "could not create DMA map\n");
    385 			data->rd_map = NULL;
    386 			goto fail;
    387 		}
    388 		MCLGET(data->rd_m, M_DONTWAIT);
    389 		if (!(data->rd_m->m_flags & M_EXT)) {
    390 			aprint_error_dev(sc->sc_dev,
    391 			    "could not allocate mbuf cluster #%d\n", i);
    392 			error = ENOMEM;
    393 			goto fail;
    394 		}
    395 
    396 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    397 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    398 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    399 		if (error != 0) {
    400 			aprint_error_dev(sc->sc_dev,
    401 			    "could not load rx buf DMA map #%d", i);
    402 			goto fail;
    403 		}
    404 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    405 
    406 		desc = &sc->sc_rxq.r_desc[i];
    407 		desc->ddesc_data = htole32(physaddr);
    408 		next = RX_NEXT(i);
    409 		desc->ddesc_next = htole32(ring->r_physaddr
    410 		    + next * sizeof(*desc));
    411 		desc->ddesc_cntl = htole32(
    412 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    413 		    DDESC_CNTL_RXCHAIN);
    414 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    415 	}
    416 
    417 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    418 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    419 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    420 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    421 	    ring->r_physaddr);
    422 
    423 	return 0;
    424 
    425 fail:
    426 	dwc_gmac_free_rx_ring(sc, ring);
    427 	return error;
    428 }
    429 
    430 static void
    431 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    432 	struct dwc_gmac_rx_ring *ring)
    433 {
    434 	struct dwc_gmac_dev_dmadesc *desc;
    435 	int i;
    436 
    437 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    438 		desc = &sc->sc_rxq.r_desc[i];
    439 		desc->ddesc_cntl = htole32(
    440 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    441 		    DDESC_CNTL_RXCHAIN);
    442 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    443 	}
    444 
    445 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    446 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    447 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    448 
    449 	ring->r_cur = ring->r_next = 0;
    450 	/* reset DMA address to start of ring */
    451 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    452 	    sc->sc_rxq.r_physaddr);
    453 }
    454 
    455 static int
    456 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    457 {
    458 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    459 		sizeof(struct dwc_gmac_dev_dmadesc);
    460 	int error, nsegs;
    461 	void *rings;
    462 
    463 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    464 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    465 	if (error != 0) {
    466 		aprint_error_dev(sc->sc_dev,
    467 		    "could not create desc DMA map\n");
    468 		sc->sc_dma_ring_map = NULL;
    469 		goto fail;
    470 	}
    471 
    472 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    473 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    474 	if (error != 0) {
    475 		aprint_error_dev(sc->sc_dev,
    476 		    "could not map DMA memory\n");
    477 		goto fail;
    478 	}
    479 
    480 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    481 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    482 	if (error != 0) {
    483 		aprint_error_dev(sc->sc_dev,
    484 		    "could not allocate DMA memory\n");
    485 		goto fail;
    486 	}
    487 
    488 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    489 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    490 	if (error != 0) {
    491 		aprint_error_dev(sc->sc_dev,
    492 		    "could not load desc DMA map\n");
    493 		goto fail;
    494 	}
    495 
    496 	/* give first AWGE_RX_RING_COUNT to the RX side */
    497 	sc->sc_rxq.r_desc = rings;
    498 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    499 
    500 	/* and next rings to the TX side */
    501 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    502 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    503 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    504 
    505 	return 0;
    506 
    507 fail:
    508 	dwc_gmac_free_dma_rings(sc);
    509 	return error;
    510 }
    511 
    512 static void
    513 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    514 {
    515 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    516 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    517 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    518 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    519 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    520 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    521 }
    522 
    523 static void
    524 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    525 {
    526 	struct dwc_gmac_rx_data *data;
    527 	int i;
    528 
    529 	if (ring->r_desc == NULL)
    530 		return;
    531 
    532 
    533 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    534 		data = &ring->r_data[i];
    535 
    536 		if (data->rd_map != NULL) {
    537 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    538 			    AWGE_RX_RING_COUNT
    539 				*sizeof(struct dwc_gmac_dev_dmadesc),
    540 			    BUS_DMASYNC_POSTREAD);
    541 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    542 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    543 		}
    544 		if (data->rd_m != NULL)
    545 			m_freem(data->rd_m);
    546 	}
    547 }
    548 
    549 static int
    550 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    551 	struct dwc_gmac_tx_ring *ring)
    552 {
    553 	int i, error = 0;
    554 
    555 	ring->t_queued = 0;
    556 	ring->t_cur = ring->t_next = 0;
    557 
    558 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    559 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    560 	    TX_DESC_OFFSET(0),
    561 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    562 	    BUS_DMASYNC_POSTWRITE);
    563 
    564 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    565 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    566 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    567 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    568 		    &ring->t_data[i].td_map);
    569 		if (error != 0) {
    570 			aprint_error_dev(sc->sc_dev,
    571 			    "could not create TX DMA map #%d\n", i);
    572 			ring->t_data[i].td_map = NULL;
    573 			goto fail;
    574 		}
    575 		ring->t_desc[i].ddesc_next = htole32(
    576 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    577 		    *TX_NEXT(i));
    578 	}
    579 
    580 	return 0;
    581 
    582 fail:
    583 	dwc_gmac_free_tx_ring(sc, ring);
    584 	return error;
    585 }
    586 
    587 static void
    588 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    589 {
    590 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    591 	if (end > start) {
    592 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    593 		    TX_DESC_OFFSET(start),
    594 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    595 		    ops);
    596 		return;
    597 	}
    598 	/* sync from 'start' to end of ring */
    599 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    600 	    TX_DESC_OFFSET(start),
    601 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
    602 	    ops);
    603 	/* sync from start of ring to 'end' */
    604 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    605 	    TX_DESC_OFFSET(0),
    606 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    607 	    ops);
    608 }
    609 
    610 static void
    611 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    612 	struct dwc_gmac_tx_ring *ring)
    613 {
    614 	int i;
    615 
    616 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    617 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    618 
    619 		if (data->td_m != NULL) {
    620 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    621 			    0, data->td_active->dm_mapsize,
    622 			    BUS_DMASYNC_POSTWRITE);
    623 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    624 			m_freem(data->td_m);
    625 			data->td_m = NULL;
    626 		}
    627 	}
    628 
    629 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    630 	    TX_DESC_OFFSET(0),
    631 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    632 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    633 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    634 	    sc->sc_txq.t_physaddr);
    635 
    636 	ring->t_queued = 0;
    637 	ring->t_cur = ring->t_next = 0;
    638 }
    639 
    640 static void
    641 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    642 	struct dwc_gmac_tx_ring *ring)
    643 {
    644 	int i;
    645 
    646 	/* unload the maps */
    647 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    648 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    649 
    650 		if (data->td_m != NULL) {
    651 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    652 			    0, data->td_map->dm_mapsize,
    653 			    BUS_DMASYNC_POSTWRITE);
    654 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    655 			m_freem(data->td_m);
    656 			data->td_m = NULL;
    657 		}
    658 	}
    659 
    660 	/* and actually free them */
    661 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    662 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    663 
    664 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    665 	}
    666 }
    667 
    668 static void
    669 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    670 {
    671 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    672 	struct mii_data * const mii = &sc->sc_mii;
    673 	uint32_t conf, flow;
    674 
    675 	/*
    676 	 * Set MII or GMII interface based on the speed
    677 	 * negotiated by the PHY.
    678 	 */
    679 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    680 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    681 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    682 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    683 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    684 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    685 	    | AWIN_GMAC_MAC_CONF_ACS
    686 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    687 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    688 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    689 	case IFM_10_T:
    690 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    691 		break;
    692 	case IFM_100_TX:
    693 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    694 			AWIN_GMAC_MAC_CONF_MIISEL;
    695 		break;
    696 	case IFM_1000_T:
    697 		break;
    698 	}
    699 
    700 	flow = 0;
    701 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    702 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    703 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    704 	}
    705 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    706 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    707 	}
    708 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    709 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    710 	}
    711 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    712 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    713 
    714 #ifdef DWC_GMAC_DEBUG
    715 	aprint_normal_dev(sc->sc_dev,
    716 	    "setting MAC conf register: %08x\n", conf);
    717 #endif
    718 
    719 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    720 	    AWIN_GMAC_MAC_CONF, conf);
    721 }
    722 
    723 static int
    724 dwc_gmac_init(struct ifnet *ifp)
    725 {
    726 	struct dwc_gmac_softc *sc = ifp->if_softc;
    727 	uint32_t ffilt;
    728 
    729 	if (ifp->if_flags & IFF_RUNNING)
    730 		return 0;
    731 
    732 	dwc_gmac_stop(ifp, 0);
    733 
    734 	/*
    735 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    736 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    737 	 */
    738 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    739 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    740 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    741 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    742 
    743 	/*
    744 	 * Set up address filter
    745 	 */
    746 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    747 	if (ifp->if_flags & IFF_PROMISC) {
    748 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    749 	} else {
    750 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    751 	}
    752 	if (ifp->if_flags & IFF_BROADCAST) {
    753 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    754 	} else {
    755 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    756 	}
    757 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    758 
    759 	/*
    760 	 * Set up multicast filter
    761 	 */
    762 	dwc_gmac_setmulti(sc);
    763 
    764 	/*
    765 	 * Set up dma pointer for RX and TX ring
    766 	 */
    767 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    768 	    sc->sc_rxq.r_physaddr);
    769 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    770 	    sc->sc_txq.t_physaddr);
    771 
    772 	/*
    773 	 * Start RX/TX part
    774 	 */
    775 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    776 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
    777 	    GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
    778 
    779 	ifp->if_flags |= IFF_RUNNING;
    780 	ifp->if_flags &= ~IFF_OACTIVE;
    781 
    782 	return 0;
    783 }
    784 
    785 static void
    786 dwc_gmac_start(struct ifnet *ifp)
    787 {
    788 	struct dwc_gmac_softc *sc = ifp->if_softc;
    789 	int old = sc->sc_txq.t_queued;
    790 	int start = sc->sc_txq.t_cur;
    791 	struct mbuf *m0;
    792 
    793 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    794 		return;
    795 
    796 	for (;;) {
    797 		IFQ_POLL(&ifp->if_snd, m0);
    798 		if (m0 == NULL)
    799 			break;
    800 		if (dwc_gmac_queue(sc, m0) != 0) {
    801 			ifp->if_flags |= IFF_OACTIVE;
    802 			break;
    803 		}
    804 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    805 		bpf_mtap(ifp, m0);
    806 	}
    807 
    808 	if (sc->sc_txq.t_queued != old) {
    809 		/* packets have been queued, kick it off */
    810 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    811 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    812 
    813 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    814 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    815 #ifdef DWC_GMAC_DEBUG
    816 		dwc_dump_status(sc);
    817 #endif
    818 	}
    819 }
    820 
    821 static void
    822 dwc_gmac_stop(struct ifnet *ifp, int disable)
    823 {
    824 	struct dwc_gmac_softc *sc = ifp->if_softc;
    825 
    826 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    827 	    AWIN_GMAC_DMA_OPMODE,
    828 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    829 	        AWIN_GMAC_DMA_OPMODE)
    830 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    831 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    832 	    AWIN_GMAC_DMA_OPMODE,
    833 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    834 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    835 
    836 	mii_down(&sc->sc_mii);
    837 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    838 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    839 }
    840 
    841 /*
    842  * Add m0 to the TX ring
    843  */
    844 static int
    845 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    846 {
    847 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    848 	struct dwc_gmac_tx_data *data = NULL;
    849 	bus_dmamap_t map;
    850 	uint32_t flags, len;
    851 	int error, i, first;
    852 
    853 #ifdef DWC_GMAC_DEBUG
    854 	aprint_normal_dev(sc->sc_dev,
    855 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
    856 #endif
    857 
    858 	first = sc->sc_txq.t_cur;
    859 	map = sc->sc_txq.t_data[first].td_map;
    860 	flags = 0;
    861 
    862 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    863 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    864 	if (error != 0) {
    865 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    866 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    867 		return error;
    868 	}
    869 
    870 	if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
    871 		bus_dmamap_unload(sc->sc_dmat, map);
    872 		return ENOBUFS;
    873 	}
    874 
    875 	data = NULL;
    876 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
    877 	for (i = 0; i < map->dm_nsegs; i++) {
    878 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    879 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    880 
    881 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    882 		len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
    883 		if (i == map->dm_nsegs-1)
    884 			flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
    885 
    886 #ifdef DWC_GMAC_DEBUG
    887 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
    888 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
    889 		    (unsigned long)map->dm_segs[i].ds_addr,
    890 		    (unsigned long)map->dm_segs[i].ds_len,
    891 		    flags, len);
    892 #endif
    893 
    894 		desc->ddesc_cntl = htole32(len|flags);
    895 		flags &= ~DDESC_CNTL_TXFIRST;
    896 
    897 		/*
    898 		 * Defer passing ownership of the first descriptor
    899 		 * until we are done.
    900 		 */
    901 		if (i)
    902 			desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    903 
    904 		sc->sc_txq.t_queued++;
    905 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
    906 	}
    907 
    908 	/* Pass first to device */
    909 	sc->sc_txq.t_desc[first].ddesc_status
    910 	    = htole32(DDESC_STATUS_OWNEDBYDEV);
    911 
    912 	data->td_m = m0;
    913 	data->td_active = map;
    914 
    915 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    916 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    917 
    918 	return 0;
    919 }
    920 
    921 /*
    922  * If the interface is up and running, only modify the receive
    923  * filter when setting promiscuous or debug mode.  Otherwise fall
    924  * through to ether_ioctl, which will reset the chip.
    925  */
    926 static int
    927 dwc_gmac_ifflags_cb(struct ethercom *ec)
    928 {
    929 	struct ifnet *ifp = &ec->ec_if;
    930 	struct dwc_gmac_softc *sc = ifp->if_softc;
    931 	int change = ifp->if_flags ^ sc->sc_if_flags;
    932 
    933 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    934 		return ENETRESET;
    935 	if ((change & IFF_PROMISC) != 0)
    936 		dwc_gmac_setmulti(sc);
    937 	return 0;
    938 }
    939 
    940 static int
    941 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    942 {
    943 	struct dwc_gmac_softc *sc = ifp->if_softc;
    944 	int s, error = 0;
    945 
    946 	s = splnet();
    947 
    948 	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
    949 		error = 0;
    950 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
    951 			;
    952 		else if (ifp->if_flags & IFF_RUNNING) {
    953 			/*
    954 			 * Multicast list has changed; set the hardware filter
    955 			 * accordingly.
    956 			 */
    957 			dwc_gmac_setmulti(sc);
    958 		}
    959 	}
    960 
    961 	/* Try to get things going again */
    962 	if (ifp->if_flags & IFF_UP)
    963 		dwc_gmac_start(ifp);
    964 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
    965 	splx(s);
    966 	return error;
    967 }
    968 
    969 static void
    970 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
    971 {
    972 	struct dwc_gmac_tx_data *data;
    973 	struct dwc_gmac_dev_dmadesc *desc;
    974 	uint32_t flags;
    975 	int i;
    976 
    977 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
    978 	    i = TX_NEXT(i), sc->sc_txq.t_queued--) {
    979 
    980 #ifdef DWC_GMAC_DEBUG
    981 		aprint_normal_dev(sc->sc_dev,
    982 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
    983 		    i, sc->sc_txq.t_queued);
    984 #endif
    985 
    986 		desc = &sc->sc_txq.t_desc[i];
    987 		/*
    988 		 * i+1 does not need to be a valid descriptor,
    989 		 * this is just a special notion to just sync
    990 		 * a single tx descriptor (i)
    991 		 */
    992 		dwc_gmac_txdesc_sync(sc, i, i+1,
    993 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    994 		flags = le32toh(desc->ddesc_status);
    995 
    996 		if (flags & DDESC_STATUS_OWNEDBYDEV)
    997 			break;
    998 
    999 		data = &sc->sc_txq.t_data[i];
   1000 		if (data->td_m == NULL)
   1001 			continue;
   1002 		sc->sc_ec.ec_if.if_opackets++;
   1003 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1004 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1005 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1006 
   1007 #ifdef DWC_GMAC_DEBUG
   1008 		aprint_normal_dev(sc->sc_dev,
   1009 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1010 		    "freeing mbuf %p\n", i, data->td_m);
   1011 #endif
   1012 
   1013 		m_freem(data->td_m);
   1014 		data->td_m = NULL;
   1015 	}
   1016 
   1017 	sc->sc_txq.t_next = i;
   1018 
   1019 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1020 		sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
   1021 	}
   1022 }
   1023 
   1024 static void
   1025 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1026 {
   1027 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1028 	struct dwc_gmac_dev_dmadesc *desc;
   1029 	struct dwc_gmac_rx_data *data;
   1030 	bus_addr_t physaddr;
   1031 	uint32_t status;
   1032 	struct mbuf *m, *mnew;
   1033 	int i, len, error;
   1034 
   1035 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1036 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1037 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1038 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1039 		desc = &sc->sc_rxq.r_desc[i];
   1040 		data = &sc->sc_rxq.r_data[i];
   1041 
   1042 		status = le32toh(desc->ddesc_status);
   1043 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1044 			break;
   1045 
   1046 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
   1047 #ifdef DWC_GMAC_DEBUG
   1048 			aprint_normal_dev(sc->sc_dev,
   1049 			    "RX error: descriptor status %08x, skipping\n",
   1050 			    status);
   1051 #endif
   1052 			ifp->if_ierrors++;
   1053 			goto skip;
   1054 		}
   1055 
   1056 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
   1057 
   1058 #ifdef DWC_GMAC_DEBUG
   1059 		aprint_normal_dev(sc->sc_dev,
   1060 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1061 		    i, len);
   1062 #endif
   1063 
   1064 		/*
   1065 		 * Try to get a new mbuf before passing this one
   1066 		 * up, if that fails, drop the packet and reuse
   1067 		 * the existing one.
   1068 		 */
   1069 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1070 		if (mnew == NULL) {
   1071 			ifp->if_ierrors++;
   1072 			goto skip;
   1073 		}
   1074 		MCLGET(mnew, M_DONTWAIT);
   1075 		if ((mnew->m_flags & M_EXT) == 0) {
   1076 			m_freem(mnew);
   1077 			ifp->if_ierrors++;
   1078 			goto skip;
   1079 		}
   1080 
   1081 		/* unload old DMA map */
   1082 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1083 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1084 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1085 
   1086 		/* and reload with new mbuf */
   1087 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1088 		    mtod(mnew, void*), MCLBYTES, NULL,
   1089 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1090 		if (error != 0) {
   1091 			m_freem(mnew);
   1092 			/* try to reload old mbuf */
   1093 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1094 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1095 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1096 			if (error != 0) {
   1097 				panic("%s: could not load old rx mbuf",
   1098 				    device_xname(sc->sc_dev));
   1099 			}
   1100 			ifp->if_ierrors++;
   1101 			goto skip;
   1102 		}
   1103 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1104 
   1105 		/*
   1106 		 * New mbuf loaded, update RX ring and continue
   1107 		 */
   1108 		m = data->rd_m;
   1109 		data->rd_m = mnew;
   1110 		desc->ddesc_data = htole32(physaddr);
   1111 
   1112 		/* finalize mbuf */
   1113 		m->m_pkthdr.len = m->m_len = len;
   1114 		m->m_pkthdr.rcvif = ifp;
   1115 		m->m_flags |= M_HASFCS;
   1116 
   1117 		bpf_mtap(ifp, m);
   1118 		ifp->if_ipackets++;
   1119 		(*ifp->if_input)(ifp, m);
   1120 
   1121 skip:
   1122 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1123 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1124 		desc->ddesc_cntl = htole32(
   1125 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
   1126 		    DDESC_CNTL_RXCHAIN);
   1127 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
   1128 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1129 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1130 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1131 	}
   1132 
   1133 	/* update RX pointer */
   1134 	sc->sc_rxq.r_cur = i;
   1135 
   1136 }
   1137 
   1138 /*
   1139  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
   1140  */
   1141 static uint32_t
   1142 bitrev32(uint32_t x)
   1143 {
   1144 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
   1145 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
   1146 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
   1147 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
   1148 
   1149 	return (x >> 16) | (x << 16);
   1150 }
   1151 
   1152 static void
   1153 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1154 {
   1155 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1156 	struct ether_multi *enm;
   1157 	struct ether_multistep step;
   1158 	uint32_t hashes[2] = { 0, 0 };
   1159 	uint32_t ffilt, h;
   1160 	int mcnt, s;
   1161 
   1162 	s = splnet();
   1163 
   1164 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1165 
   1166 	if (ifp->if_flags & IFF_PROMISC) {
   1167 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1168 		goto special_filter;
   1169 	}
   1170 
   1171 	ifp->if_flags &= ~IFF_ALLMULTI;
   1172 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
   1173 
   1174 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1175 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1176 
   1177 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
   1178 	mcnt = 0;
   1179 	while (enm != NULL) {
   1180 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1181 		    ETHER_ADDR_LEN) != 0) {
   1182 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1183 			ifp->if_flags |= IFF_ALLMULTI;
   1184 			goto special_filter;
   1185 		}
   1186 
   1187 		h = bitrev32(
   1188 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
   1189 		    ) >> 26;
   1190 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1191 
   1192 		mcnt++;
   1193 		ETHER_NEXT_MULTI(step, enm);
   1194 	}
   1195 
   1196 	if (mcnt)
   1197 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1198 	else
   1199 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1200 
   1201 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1202 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1203 	    hashes[0]);
   1204 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1205 	    hashes[1]);
   1206 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1207 
   1208 	splx(s);
   1209 
   1210 #ifdef DWC_GMAC_DEBUG
   1211 	dwc_gmac_dump_ffilt(sc, ffilt);
   1212 #endif
   1213 	return;
   1214 
   1215 special_filter:
   1216 #ifdef DWC_GMAC_DEBUG
   1217 	dwc_gmac_dump_ffilt(sc, ffilt);
   1218 #endif
   1219 	/* no MAC hashes, ALLMULTI or PROMISC */
   1220 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1221 	    ffilt);
   1222 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1223 	    0xffffffff);
   1224 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1225 	    0xffffffff);
   1226 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1227 	splx(s);
   1228 }
   1229 
   1230 int
   1231 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1232 {
   1233 	uint32_t status, dma_status;
   1234 	int rv = 0;
   1235 
   1236 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1237 	if (status & AWIN_GMAC_MII_IRQ) {
   1238 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1239 		    AWIN_GMAC_MII_STATUS);
   1240 		rv = 1;
   1241 		mii_pollstat(&sc->sc_mii);
   1242 	}
   1243 
   1244 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1245 	    AWIN_GMAC_DMA_STATUS);
   1246 
   1247 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1248 		rv = 1;
   1249 
   1250 	if (dma_status & GMAC_DMA_INT_TIE)
   1251 		dwc_gmac_tx_intr(sc);
   1252 
   1253 	if (dma_status & GMAC_DMA_INT_RIE)
   1254 		dwc_gmac_rx_intr(sc);
   1255 
   1256 	/*
   1257 	 * Check error conditions
   1258 	 */
   1259 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1260 		sc->sc_ec.ec_if.if_oerrors++;
   1261 #ifdef DWC_GMAC_DEBUG
   1262 		dwc_dump_and_abort(sc, "interrupt error condition");
   1263 #endif
   1264 	}
   1265 
   1266 	/* ack interrupt */
   1267 	if (dma_status)
   1268 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1269 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1270 
   1271 	/*
   1272 	 * Get more packets
   1273 	 */
   1274 	if (rv)
   1275 		sc->sc_ec.ec_if.if_start(&sc->sc_ec.ec_if);
   1276 
   1277 	return rv;
   1278 }
   1279 
   1280 #ifdef DWC_GMAC_DEBUG
   1281 static void
   1282 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1283 {
   1284 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1285 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1286 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1287 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1288 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1289 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1290 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1291 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1292 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1293 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1294 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1295 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1296 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1297 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1298 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1299 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1300 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1301 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1302 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1303 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1304 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1305 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1306 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1307 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1308 }
   1309 
   1310 static void
   1311 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1312 {
   1313 	int i;
   1314 
   1315 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1316 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1317 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1318 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1319 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1320 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1321 		    "data: %08x next: %08x\n",
   1322 		    i, sc->sc_txq.t_physaddr +
   1323 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1324 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1325 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1326 	}
   1327 }
   1328 
   1329 static void
   1330 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1331 {
   1332 	int i;
   1333 
   1334 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1335 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1336 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1337 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1338 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1339 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1340 		    "data: %08x next: %08x\n",
   1341 		    i, sc->sc_rxq.r_physaddr +
   1342 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1343 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1344 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1345 	}
   1346 }
   1347 
   1348 static void
   1349 dwc_dump_status(struct dwc_gmac_softc *sc)
   1350 {
   1351 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1352 	     AWIN_GMAC_MAC_INTR);
   1353 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1354 	     AWIN_GMAC_DMA_STATUS);
   1355 	char buf[200];
   1356 
   1357 	/* print interrupt state */
   1358 	snprintb(buf, sizeof(buf), "\177\20"
   1359 	    "b\x10""NI\0"
   1360 	    "b\x0f""AI\0"
   1361 	    "b\x0e""ER\0"
   1362 	    "b\x0d""FB\0"
   1363 	    "b\x0a""ET\0"
   1364 	    "b\x09""RW\0"
   1365 	    "b\x08""RS\0"
   1366 	    "b\x07""RU\0"
   1367 	    "b\x06""RI\0"
   1368 	    "b\x05""UN\0"
   1369 	    "b\x04""OV\0"
   1370 	    "b\x03""TJ\0"
   1371 	    "b\x02""TU\0"
   1372 	    "b\x01""TS\0"
   1373 	    "b\x00""TI\0"
   1374 	    "\0", dma_status);
   1375 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1376 	    status, buf);
   1377 }
   1378 
   1379 static void
   1380 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1381 {
   1382 	dwc_dump_status(sc);
   1383 	dwc_gmac_dump_ffilt(sc,
   1384 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1385 	dwc_gmac_dump_dma(sc);
   1386 	dwc_gmac_dump_tx_desc(sc);
   1387 	dwc_gmac_dump_rx_desc(sc);
   1388 
   1389 	panic("%s", msg);
   1390 }
   1391 
   1392 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1393 {
   1394 	char buf[200];
   1395 
   1396 	/* print filter setup */
   1397 	snprintb(buf, sizeof(buf), "\177\20"
   1398 	    "b\x1f""RA\0"
   1399 	    "b\x0a""HPF\0"
   1400 	    "b\x09""SAF\0"
   1401 	    "b\x08""SAIF\0"
   1402 	    "b\x05""DBF\0"
   1403 	    "b\x04""PM\0"
   1404 	    "b\x03""DAIF\0"
   1405 	    "b\x02""HMC\0"
   1406 	    "b\x01""HUC\0"
   1407 	    "b\x00""PR\0"
   1408 	    "\0", ffilt);
   1409 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1410 }
   1411 #endif
   1412