Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.6
      1 /*-
      2  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This code is derived from software contributed to The NetBSD Foundation
      6  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * This driver supports the Synopsis Designware GMAC core, as found
     32  * on Allwinner A20 cores and others.
     33  *
     34  * Real documentation seems to not be available, the marketing product
     35  * documents could be found here:
     36  *
     37  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 
     42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.6 2014/09/14 11:00:52 martin Exp $");
     43 
     44 #include "opt_inet.h"
     45 
     46 #include <sys/param.h>
     47 #include <sys/bus.h>
     48 #include <sys/device.h>
     49 #include <sys/intr.h>
     50 #include <sys/systm.h>
     51 #include <sys/sockio.h>
     52 
     53 #include <net/if.h>
     54 #include <net/if_ether.h>
     55 #include <net/if_media.h>
     56 #include <net/bpf.h>
     57 #ifdef INET
     58 #include <netinet/if_inarp.h>
     59 #endif
     60 
     61 #include <dev/mii/miivar.h>
     62 
     63 #include <dev/ic/dwc_gmac_reg.h>
     64 #include <dev/ic/dwc_gmac_var.h>
     65 
     66 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     67 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     68 static void dwc_gmac_miibus_statchg(struct ifnet *);
     69 
     70 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     71 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     72 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     73 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     74 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     75 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     76 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     77 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     78 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     79 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     80 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     81 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     82 static int dwc_gmac_init(struct ifnet *ifp);
     83 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     84 static void dwc_gmac_start(struct ifnet *ifp);
     85 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     86 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     87 
     88 
     89 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
     90 				    *sizeof(struct dwc_gmac_dev_dmadesc))
     91 
     92 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
     93 
     94 void
     95 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
     96 {
     97 	uint8_t enaddr[ETHER_ADDR_LEN];
     98 	uint32_t maclo, machi;
     99 	struct mii_data * const mii = &sc->sc_mii;
    100 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    101 	prop_dictionary_t dict;
    102 
    103 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    104 	sc->sc_mii_clk = mii_clk & 7;
    105 
    106 	dict = device_properties(sc->sc_dev);
    107 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    108 	if (ea != NULL) {
    109 		/*
    110 		 * If the MAC address is overriden by a device property,
    111 		 * use that.
    112 		 */
    113 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    114 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    115 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    116 	} else {
    117 		/*
    118 		 * If we did not get an externaly configure address,
    119 		 * try to read one from the current filter setup,
    120 		 * before resetting the chip.
    121 		 */
    122 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
    123 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
    124 		enaddr[0] = maclo & 0x0ff;
    125 		enaddr[1] = (maclo >> 8) & 0x0ff;
    126 		enaddr[2] = (maclo >> 16) & 0x0ff;
    127 		enaddr[3] = (maclo >> 24) & 0x0ff;
    128 		enaddr[4] = machi & 0x0ff;
    129 		enaddr[5] = (machi >> 8) & 0x0ff;
    130 	}
    131 
    132 	/*
    133 	 * Init chip and do intial setup
    134 	 */
    135 	if (dwc_gmac_reset(sc) != 0)
    136 		return;	/* not much to cleanup, haven't attached yet */
    137 	dwc_gmac_write_hwaddr(sc, enaddr);
    138 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    139 	    ether_sprintf(enaddr));
    140 
    141 	/*
    142 	 * Allocate Tx and Rx rings
    143 	 */
    144 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    145 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    146 		goto fail;
    147 	}
    148 
    149 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    150 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    151 		goto fail;
    152 	}
    153 
    154 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    155 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    156 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    157 		goto fail;
    158 	}
    159 
    160 	/*
    161 	 * Prepare interface data
    162 	 */
    163 	ifp->if_softc = sc;
    164 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    165 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    166 	ifp->if_ioctl = dwc_gmac_ioctl;
    167 	ifp->if_start = dwc_gmac_start;
    168 	ifp->if_init = dwc_gmac_init;
    169 	ifp->if_stop = dwc_gmac_stop;
    170 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    171 	IFQ_SET_READY(&ifp->if_snd);
    172 
    173 	/*
    174 	 * Attach MII subdevices
    175 	 */
    176 	sc->sc_ec.ec_mii = &sc->sc_mii;
    177 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    178         mii->mii_ifp = ifp;
    179         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    180         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    181         mii->mii_statchg = dwc_gmac_miibus_statchg;
    182         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
    183 
    184         if (LIST_EMPTY(&mii->mii_phys)) {
    185                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    186                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    187                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    188         } else {
    189                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    190         }
    191 
    192 	/*
    193 	 * Ready, attach interface
    194 	 */
    195 	if_attach(ifp);
    196 	ether_ifattach(ifp, enaddr);
    197 
    198 	/*
    199 	 * Enable interrupts
    200 	 */
    201 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
    202 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
    203 
    204 	return;
    205 
    206 fail:
    207 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    208 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    209 }
    210 
    211 
    212 
    213 static int
    214 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    215 {
    216 	size_t cnt;
    217 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    218 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    219 	for (cnt = 0; cnt < 3000; cnt++) {
    220 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    221 		    & GMAC_BUSMODE_RESET) == 0)
    222 			return 0;
    223 		delay(10);
    224 	}
    225 
    226 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    227 	return EIO;
    228 }
    229 
    230 static void
    231 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    232     uint8_t enaddr[ETHER_ADDR_LEN])
    233 {
    234 	uint32_t lo, hi;
    235 
    236 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    237 	    | (enaddr[3] << 24);
    238 	hi = enaddr[4] | (enaddr[5] << 8);
    239 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    240 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    241 }
    242 
    243 static int
    244 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    245 {
    246 	struct dwc_gmac_softc * const sc = device_private(self);
    247 	uint16_t mii;
    248 	size_t cnt;
    249 	int rv = 0;
    250 
    251 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    252 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    253 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    254 	    | GMAC_MII_BUSY;
    255 
    256 	mutex_enter(&sc->sc_mdio_lock);
    257 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    258 
    259 	for (cnt = 0; cnt < 1000; cnt++) {
    260 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    261 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    262 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    263 			    AWIN_GMAC_MAC_MIIDATA);
    264 			break;
    265 		}
    266 		delay(10);
    267 	}
    268 
    269 	mutex_exit(&sc->sc_mdio_lock);
    270 
    271 	return rv;
    272 }
    273 
    274 static void
    275 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    276 {
    277 	struct dwc_gmac_softc * const sc = device_private(self);
    278 	uint16_t mii;
    279 	size_t cnt;
    280 
    281 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    282 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    283 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    284 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    285 
    286 	mutex_enter(&sc->sc_mdio_lock);
    287 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    288 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    289 
    290 	for (cnt = 0; cnt < 1000; cnt++) {
    291 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    292 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    293 			break;
    294 		delay(10);
    295 	}
    296 
    297 	mutex_exit(&sc->sc_mdio_lock);
    298 }
    299 
    300 static int
    301 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    302 	struct dwc_gmac_rx_ring *ring)
    303 {
    304 	struct dwc_gmac_rx_data *data;
    305 	bus_addr_t physaddr;
    306 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    307 	int error, i, next;
    308 
    309 	ring->r_cur = ring->r_next = 0;
    310 	memset(ring->r_desc, 0, descsize);
    311 
    312 	/*
    313 	 * Pre-allocate Rx buffers and populate Rx ring.
    314 	 */
    315 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    316 		struct dwc_gmac_dev_dmadesc *desc;
    317 
    318 		data = &sc->sc_rxq.r_data[i];
    319 
    320 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    321 		if (data->rd_m == NULL) {
    322 			aprint_error_dev(sc->sc_dev,
    323 			    "could not allocate rx mbuf #%d\n", i);
    324 			error = ENOMEM;
    325 			goto fail;
    326 		}
    327 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    328 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    329 		if (error != 0) {
    330 			aprint_error_dev(sc->sc_dev,
    331 			    "could not create DMA map\n");
    332 			data->rd_map = NULL;
    333 			goto fail;
    334 		}
    335 		MCLGET(data->rd_m, M_DONTWAIT);
    336 		if (!(data->rd_m->m_flags & M_EXT)) {
    337 			aprint_error_dev(sc->sc_dev,
    338 			    "could not allocate mbuf cluster #%d\n", i);
    339 			error = ENOMEM;
    340 			goto fail;
    341 		}
    342 
    343 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    344 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    345 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    346 		if (error != 0) {
    347 			aprint_error_dev(sc->sc_dev,
    348 			    "could not load rx buf DMA map #%d", i);
    349 			goto fail;
    350 		}
    351 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    352 
    353 		desc = &sc->sc_rxq.r_desc[i];
    354 		desc->ddesc_data = htole32(physaddr);
    355 		next = (i+1) & AWGE_RX_RING_COUNT;
    356 		desc->ddesc_next = htole32(ring->r_physaddr
    357 		    + next * sizeof(*desc));
    358 		desc->ddesc_cntl = htole32(
    359 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
    360 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    361 	}
    362 
    363 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    364 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    365 	    BUS_DMASYNC_PREREAD);
    366 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    367 	    ring->r_physaddr);
    368 
    369 	return 0;
    370 
    371 fail:
    372 	dwc_gmac_free_rx_ring(sc, ring);
    373 	return error;
    374 }
    375 
    376 static void
    377 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    378 	struct dwc_gmac_rx_ring *ring)
    379 {
    380 	struct dwc_gmac_dev_dmadesc *desc;
    381 	int i;
    382 
    383 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    384 		desc = &sc->sc_rxq.r_desc[i];
    385 		desc->ddesc_cntl = htole32(
    386 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
    387 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    388 	}
    389 
    390 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    391 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    392 	    BUS_DMASYNC_PREWRITE);
    393 
    394 	ring->r_cur = ring->r_next = 0;
    395 }
    396 
    397 static int
    398 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    399 {
    400 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    401 		sizeof(struct dwc_gmac_dev_dmadesc);
    402 	int error, nsegs;
    403 	void *rings;
    404 
    405 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    406 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    407 	if (error != 0) {
    408 		aprint_error_dev(sc->sc_dev,
    409 		    "could not create desc DMA map\n");
    410 		sc->sc_dma_ring_map = NULL;
    411 		goto fail;
    412 	}
    413 
    414 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    415 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    416 	if (error != 0) {
    417 		aprint_error_dev(sc->sc_dev,
    418 		    "could not map DMA memory\n");
    419 		goto fail;
    420 	}
    421 
    422 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    423 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    424 	if (error != 0) {
    425 		aprint_error_dev(sc->sc_dev,
    426 		    "could not allocate DMA memory\n");
    427 		goto fail;
    428 	}
    429 
    430 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    431 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    432 	if (error != 0) {
    433 		aprint_error_dev(sc->sc_dev,
    434 		    "could not load desc DMA map\n");
    435 		goto fail;
    436 	}
    437 
    438 	/* give first AWGE_RX_RING_COUNT to the RX side */
    439 	sc->sc_rxq.r_desc = rings;
    440 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    441 
    442 	/* and next rings to the TX side */
    443 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    444 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    445 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    446 
    447 	return 0;
    448 
    449 fail:
    450 	dwc_gmac_free_dma_rings(sc);
    451 	return error;
    452 }
    453 
    454 static void
    455 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    456 {
    457 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    458 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    459 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    460 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    461 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    462 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    463 }
    464 
    465 static void
    466 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    467 {
    468 	struct dwc_gmac_rx_data *data;
    469 	int i;
    470 
    471 	if (ring->r_desc == NULL)
    472 		return;
    473 
    474 
    475 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    476 		data = &ring->r_data[i];
    477 
    478 		if (data->rd_map != NULL) {
    479 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    480 			    AWGE_RX_RING_COUNT
    481 				*sizeof(struct dwc_gmac_dev_dmadesc),
    482 			    BUS_DMASYNC_POSTREAD);
    483 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    484 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    485 		}
    486 		if (data->rd_m != NULL)
    487 			m_freem(data->rd_m);
    488 	}
    489 }
    490 
    491 static int
    492 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    493 	struct dwc_gmac_tx_ring *ring)
    494 {
    495 	int i, error = 0;
    496 
    497 	ring->t_queued = 0;
    498 	ring->t_cur = ring->t_next = 0;
    499 
    500 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    501 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    502 	    TX_DESC_OFFSET(0),
    503 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    504 	    BUS_DMASYNC_POSTWRITE);
    505 
    506 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    507 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    508 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    509 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    510 		    &ring->t_data[i].td_map);
    511 		if (error != 0) {
    512 			aprint_error_dev(sc->sc_dev,
    513 			    "could not create TX DMA map #%d\n", i);
    514 			ring->t_data[i].td_map = NULL;
    515 			goto fail;
    516 		}
    517 		ring->t_desc[i].ddesc_next = htole32(
    518 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    519 		    *((i+1)&AWGE_TX_RING_COUNT));
    520 	}
    521 
    522 	return 0;
    523 
    524 fail:
    525 	dwc_gmac_free_tx_ring(sc, ring);
    526 	return error;
    527 }
    528 
    529 static void
    530 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    531 {
    532 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    533 	if (end > start) {
    534 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    535 		    TX_DESC_OFFSET(start),
    536 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    537 		    ops);
    538 		return;
    539 	}
    540 	/* sync from 'start' to end of ring */
    541 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    542 	    TX_DESC_OFFSET(start),
    543 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
    544 	    ops);
    545 	/* sync from start of ring to 'end' */
    546 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    547 	    TX_DESC_OFFSET(0),
    548 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    549 	    ops);
    550 }
    551 
    552 static void
    553 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    554 	struct dwc_gmac_tx_ring *ring)
    555 {
    556 	int i;
    557 
    558 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    559 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    560 
    561 		if (data->td_m != NULL) {
    562 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    563 			    0, data->td_active->dm_mapsize,
    564 			    BUS_DMASYNC_POSTWRITE);
    565 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    566 			m_freem(data->td_m);
    567 			data->td_m = NULL;
    568 		}
    569 	}
    570 
    571 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    572 	    TX_DESC_OFFSET(0),
    573 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    574 	    BUS_DMASYNC_PREWRITE);
    575 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    576 	    sc->sc_txq.t_physaddr);
    577 
    578 	ring->t_queued = 0;
    579 	ring->t_cur = ring->t_next = 0;
    580 }
    581 
    582 static void
    583 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    584 	struct dwc_gmac_tx_ring *ring)
    585 {
    586 	int i;
    587 
    588 	/* unload the maps */
    589 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    590 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    591 
    592 		if (data->td_m != NULL) {
    593 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    594 			    0, data->td_map->dm_mapsize,
    595 			    BUS_DMASYNC_POSTWRITE);
    596 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    597 			m_freem(data->td_m);
    598 			data->td_m = NULL;
    599 		}
    600 	}
    601 
    602 	/* and actually free them */
    603 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    604 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    605 
    606 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    607 	}
    608 }
    609 
    610 static void
    611 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    612 {
    613 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    614 	struct mii_data * const mii = &sc->sc_mii;
    615 
    616 	/*
    617 	 * Set MII or GMII interface based on the speed
    618 	 * negotiated by the PHY.
    619 	 */
    620 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    621 	case IFM_10_T:
    622 	case IFM_100_TX:
    623 		/* XXX */
    624 		break;
    625 	case IFM_1000_T:
    626 		/* XXX */
    627 		break;
    628 	}
    629 }
    630 
    631 static int
    632 dwc_gmac_init(struct ifnet *ifp)
    633 {
    634 	struct dwc_gmac_softc *sc = ifp->if_softc;
    635 
    636 	if (ifp->if_flags & IFF_RUNNING)
    637 		return 0;
    638 
    639 	dwc_gmac_stop(ifp, 0);
    640 
    641 	/*
    642 	 * Set up dma pointer for RX and TX ring
    643 	 */
    644 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    645 	    sc->sc_rxq.r_physaddr);
    646 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    647 	    sc->sc_txq.t_physaddr);
    648 
    649 	/*
    650 	 * Start RX part
    651 	 */
    652 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    653 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART);
    654 
    655 	ifp->if_flags |= IFF_RUNNING;
    656 	ifp->if_flags &= ~IFF_OACTIVE;
    657 
    658 	return 0;
    659 }
    660 
    661 static void
    662 dwc_gmac_start(struct ifnet *ifp)
    663 {
    664 	struct dwc_gmac_softc *sc = ifp->if_softc;
    665 	int old = sc->sc_txq.t_queued;
    666 	struct mbuf *m0;
    667 
    668 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    669 		return;
    670 
    671 	for (;;) {
    672 		IFQ_POLL(&ifp->if_snd, m0);
    673 		if (m0 == NULL)
    674 			break;
    675 		if (dwc_gmac_queue(sc, m0) != 0) {
    676 			ifp->if_flags |= IFF_OACTIVE;
    677 			break;
    678 		}
    679 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    680 		bpf_mtap(ifp, m0);
    681 	}
    682 
    683 	if (sc->sc_txq.t_queued != old) {
    684 		/* packets have been queued, kick it off */
    685 		dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
    686 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    687 
    688 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    689 		    AWIN_GMAC_DMA_OPMODE,
    690 		    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    691 		        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    692 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    693 		    AWIN_GMAC_DMA_OPMODE,
    694 		    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    695 		        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_TXSTART);
    696 	}
    697 }
    698 
    699 static void
    700 dwc_gmac_stop(struct ifnet *ifp, int disable)
    701 {
    702 	struct dwc_gmac_softc *sc = ifp->if_softc;
    703 
    704 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    705 	    AWIN_GMAC_DMA_OPMODE,
    706 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    707 	        AWIN_GMAC_DMA_OPMODE)
    708 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    709 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    710 	    AWIN_GMAC_DMA_OPMODE,
    711 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    712 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    713 
    714 	mii_down(&sc->sc_mii);
    715 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    716 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    717 }
    718 
    719 /*
    720  * Add m0 to the TX ring
    721  */
    722 static int
    723 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    724 {
    725 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    726 	struct dwc_gmac_tx_data *data = NULL;
    727 	bus_dmamap_t map;
    728 	uint32_t flags, len;
    729 	int error, i, first;
    730 
    731 	first = sc->sc_txq.t_cur;
    732 	map = sc->sc_txq.t_data[first].td_map;
    733 	flags = 0;
    734 
    735 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    736 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    737 	if (error != 0) {
    738 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    739 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    740 		return error;
    741 	}
    742 
    743 	if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
    744 		bus_dmamap_unload(sc->sc_dmat, map);
    745 		return ENOBUFS;
    746 	}
    747 
    748 	data = NULL;
    749 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXINT|DDESC_CNTL_TXCHAIN;
    750 	for (i = 0; i < map->dm_nsegs; i++) {
    751 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    752 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    753 
    754 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    755 		len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
    756 		if (i == map->dm_nsegs-1)
    757 			flags |= DDESC_CNTL_TXLAST;
    758 		desc->ddesc_cntl = htole32(len|flags);
    759 		flags &= ~DDESC_CNTL_TXFIRST;
    760 
    761 		/*
    762 		 * Defer passing ownership of the first descriptor
    763 		 * untill we are done.
    764 		 */
    765 		if (i)
    766 			desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    767 		sc->sc_txq.t_queued++;
    768 
    769 		sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
    770 		    & (AWGE_TX_RING_COUNT-1);
    771 	}
    772 
    773 	/* Pass first to device */
    774 	sc->sc_txq.t_desc[first].ddesc_status
    775 	    = htole32(DDESC_STATUS_OWNEDBYDEV);
    776 
    777 	data->td_m = m0;
    778 	data->td_active = map;
    779 
    780 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    781 	    BUS_DMASYNC_PREWRITE);
    782 
    783 	return 0;
    784 }
    785 
    786 static int
    787 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    788 {
    789 	// struct dwc_gmac_softc *sc = ifp->if_softc;
    790 	struct ifaddr *ifa = (struct ifaddr *)data;
    791 	int s, error = 0;
    792 
    793 	s = splnet();
    794 
    795 	switch (cmd) {
    796 	case SIOCINITIFADDR:
    797 		ifp->if_flags |= IFF_UP;
    798 		dwc_gmac_init(ifp);
    799 		switch (ifa->ifa_addr->sa_family) {
    800 #ifdef INET
    801 		case AF_INET:
    802 			arp_ifinit(ifp, ifa);
    803 			break;
    804 #endif
    805 		default:
    806 			break;
    807 		}
    808 	default:
    809 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
    810 			break;
    811 		error = 0;
    812 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
    813 			;
    814 		else if (ifp->if_flags & IFF_RUNNING)
    815 			/* setmulti */;
    816 		break;
    817 	}
    818 
    819 	splx(s);
    820 
    821 	return error;
    822 }
    823 
    824 int
    825 dwc_gmac_intr(struct dwc_gmac_softc *sc)
    826 {
    827 	uint32_t status, dma_status;
    828 
    829 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
    830 	if (status & AWIN_GMAC_MII_IRQ) {
    831 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    832 		    AWIN_GMAC_MII_STATUS);
    833 		mii_pollstat(&sc->sc_mii);
    834 	}
    835 
    836 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    837 	    AWIN_GMAC_DMA_STATUS);
    838 
    839 printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
    840     status, dma_status);
    841 
    842 static size_t cnt = 0;
    843 if (++cnt > 20)
    844 	panic("enough now");
    845 
    846 	return 1;
    847 }
    848