Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.4
      1 /*-
      2  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This code is derived from software contributed to The NetBSD Foundation
      6  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * This driver supports the Synopsis Designware GMAC core, as found
     32  * on Allwinner A20 cores and others.
     33  *
     34  * Real documentation seems to not be available, the marketing product
     35  * documents could be found here:
     36  *
     37  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 
     42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.4 2014/09/09 10:06:47 martin Exp $");
     43 
     44 #include "opt_inet.h"
     45 
     46 #include <sys/param.h>
     47 #include <sys/bus.h>
     48 #include <sys/device.h>
     49 #include <sys/intr.h>
     50 #include <sys/systm.h>
     51 #include <sys/sockio.h>
     52 
     53 #include <net/if.h>
     54 #include <net/if_ether.h>
     55 #include <net/if_media.h>
     56 #include <net/bpf.h>
     57 #ifdef INET
     58 #include <netinet/if_inarp.h>
     59 #endif
     60 
     61 #include <dev/mii/miivar.h>
     62 
     63 #include <dev/ic/dwc_gmac_reg.h>
     64 #include <dev/ic/dwc_gmac_var.h>
     65 
     66 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     67 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     68 static void dwc_gmac_miibus_statchg(struct ifnet *);
     69 
     70 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     71 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     72 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     73 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     74 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     75 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     76 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     77 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     78 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     79 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     80 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     81 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     82 static int dwc_gmac_init(struct ifnet *ifp);
     83 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     84 static void dwc_gmac_start(struct ifnet *ifp);
     85 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     86 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     87 
     88 
     89 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
     90 				    *sizeof(struct dwc_gmac_dev_dmadesc))
     91 
     92 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
     93 
     94 void
     95 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint8_t *ep, uint32_t mii_clk)
     96 {
     97 	uint8_t enaddr[ETHER_ADDR_LEN];
     98 	uint32_t maclo, machi;
     99 	struct mii_data * const mii = &sc->sc_mii;
    100 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    101 
    102 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    103 	sc->sc_mii_clk = mii_clk & 7;
    104 
    105 	/*
    106 	 * If the frontend did not pass in a pre-configured ethernet mac
    107 	 * address, try to read on from the current filter setup,
    108 	 * before resetting the chip.
    109 	 */
    110 	if (ep == NULL) {
    111 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
    112 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
    113 		enaddr[0] = maclo & 0x0ff;
    114 		enaddr[1] = (maclo >> 8) & 0x0ff;
    115 		enaddr[2] = (maclo >> 16) & 0x0ff;
    116 		enaddr[3] = (maclo >> 24) & 0x0ff;
    117 		enaddr[4] = machi & 0x0ff;
    118 		enaddr[5] = (machi >> 8) & 0x0ff;
    119 		ep = enaddr;
    120 	}
    121 
    122 	/*
    123 	 * Init chip and do intial setup
    124 	 */
    125 	if (dwc_gmac_reset(sc) != 0)
    126 		return;	/* not much to cleanup, haven't attached yet */
    127 	dwc_gmac_write_hwaddr(sc, ep);
    128 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    129 	    ether_sprintf(enaddr));
    130 
    131 	/*
    132 	 * Allocate Tx and Rx rings
    133 	 */
    134 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    135 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    136 		goto fail;
    137 	}
    138 
    139 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    140 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    141 		goto fail;
    142 	}
    143 
    144 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    145 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    146 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    147 		goto fail;
    148 	}
    149 
    150 	/*
    151 	 * Prepare interface data
    152 	 */
    153 	ifp->if_softc = sc;
    154 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    155 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    156 	ifp->if_ioctl = dwc_gmac_ioctl;
    157 	ifp->if_start = dwc_gmac_start;
    158 	ifp->if_init = dwc_gmac_init;
    159 	ifp->if_stop = dwc_gmac_stop;
    160 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    161 	IFQ_SET_READY(&ifp->if_snd);
    162 
    163 	/*
    164 	 * Attach MII subdevices
    165 	 */
    166 	sc->sc_ec.ec_mii = &sc->sc_mii;
    167 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    168         mii->mii_ifp = ifp;
    169         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    170         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    171         mii->mii_statchg = dwc_gmac_miibus_statchg;
    172         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
    173 
    174         if (LIST_EMPTY(&mii->mii_phys)) {
    175                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    176                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    177                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    178         } else {
    179                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    180         }
    181 
    182 	/*
    183 	 * Ready, attach interface
    184 	 */
    185 	if_attach(ifp);
    186 	ether_ifattach(ifp, enaddr);
    187 
    188 	/*
    189 	 * Enable interrupts
    190 	 */
    191 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
    192 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
    193 
    194 	return;
    195 
    196 fail:
    197 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    198 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    199 }
    200 
    201 
    202 
    203 static int
    204 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    205 {
    206 	size_t cnt;
    207 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    208 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    209 	for (cnt = 0; cnt < 3000; cnt++) {
    210 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    211 		    & GMAC_BUSMODE_RESET) == 0)
    212 			return 0;
    213 		delay(10);
    214 	}
    215 
    216 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    217 	return EIO;
    218 }
    219 
    220 static void
    221 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    222     uint8_t enaddr[ETHER_ADDR_LEN])
    223 {
    224 	uint32_t lo, hi;
    225 
    226 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    227 	    | (enaddr[3] << 24);
    228 	hi = enaddr[4] | (enaddr[5] << 8);
    229 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    230 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    231 }
    232 
    233 static int
    234 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    235 {
    236 	struct dwc_gmac_softc * const sc = device_private(self);
    237 	uint16_t miiaddr;
    238 	size_t cnt;
    239 	int rv = 0;
    240 
    241 	miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
    242 		| ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK);
    243 
    244 	mutex_enter(&sc->sc_mdio_lock);
    245 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR,
    246 	    miiaddr | GMAC_MII_BUSY | (sc->sc_mii_clk << 2));
    247 
    248 	for (cnt = 0; cnt < 1000; cnt++) {
    249 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    250 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    251 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    252 			    AWIN_GMAC_MAC_MIIDATA);
    253 			break;
    254 		}
    255 		delay(10);
    256 	}
    257 
    258 	mutex_exit(&sc->sc_mdio_lock);
    259 
    260 	return rv;
    261 }
    262 
    263 static void
    264 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    265 {
    266 	struct dwc_gmac_softc * const sc = device_private(self);
    267 	uint16_t miiaddr;
    268 	size_t cnt;
    269 
    270 	miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
    271 		| ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK)
    272 		| GMAC_MII_BUSY | GMAC_MII_WRITE | (sc->sc_mii_clk << 2);
    273 
    274 	mutex_enter(&sc->sc_mdio_lock);
    275 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    276 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR,
    277 	    miiaddr);
    278 
    279 	for (cnt = 0; cnt < 1000; cnt++) {
    280 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    281 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    282 			break;
    283 		delay(10);
    284 	}
    285 
    286 	mutex_exit(&sc->sc_mdio_lock);
    287 }
    288 
    289 static int
    290 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    291 	struct dwc_gmac_rx_ring *ring)
    292 {
    293 	struct dwc_gmac_rx_data *data;
    294 	bus_addr_t physaddr;
    295 	const size_t descsize =
    296 	    AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    297 	int error, i, next;
    298 
    299 	ring->r_cur = ring->r_next = 0;
    300 	memset(ring->r_desc, 0, descsize);
    301 
    302 	/*
    303 	 * Pre-allocate Rx buffers and populate Rx ring.
    304 	 */
    305 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    306 		struct dwc_gmac_dev_dmadesc *desc;
    307 
    308 		data = &sc->sc_rxq.r_data[i];
    309 
    310 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    311 		if (data->rd_m == NULL) {
    312 			aprint_error_dev(sc->sc_dev,
    313 			    "could not allocate rx mbuf #%d\n", i);
    314 			error = ENOMEM;
    315 			goto fail;
    316 		}
    317 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    318 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    319 		if (error != 0) {
    320 			aprint_error_dev(sc->sc_dev,
    321 			    "could not create DMA map\n");
    322 			data->rd_map = NULL;
    323 			goto fail;
    324 		}
    325 		MCLGET(data->rd_m, M_DONTWAIT);
    326 		if (!(data->rd_m->m_flags & M_EXT)) {
    327 			aprint_error_dev(sc->sc_dev,
    328 			    "could not allocate mbuf cluster #%d\n", i);
    329 			error = ENOMEM;
    330 			goto fail;
    331 		}
    332 
    333 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    334 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    335 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    336 		if (error != 0) {
    337 			aprint_error_dev(sc->sc_dev,
    338 			    "could not load rx buf DMA map #%d", i);
    339 			goto fail;
    340 		}
    341 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    342 
    343 		desc = &sc->sc_rxq.r_desc[i];
    344 		desc->ddesc_data = htole32(physaddr);
    345 		next = i < (AWGE_RX_RING_COUNT-1) ? i+1 : 0;
    346 		desc->ddesc_next = htole32(ring->r_physaddr
    347 		    + next * sizeof(*desc));
    348 		desc->ddesc_cntl = htole32(
    349 		    (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
    350 		    << DDESC_CNTL_SIZE1SHIFT);
    351 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    352 	}
    353 
    354 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    355 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    356 	    BUS_DMASYNC_PREREAD);
    357 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    358 	    htole32(ring->r_physaddr));
    359 
    360 	return 0;
    361 
    362 fail:
    363 	dwc_gmac_free_rx_ring(sc, ring);
    364 	return error;
    365 }
    366 
    367 static void
    368 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    369 	struct dwc_gmac_rx_ring *ring)
    370 {
    371 	struct dwc_gmac_dev_dmadesc *desc;
    372 	int i;
    373 
    374 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    375 		desc = &sc->sc_rxq.r_desc[i];
    376 		desc->ddesc_cntl = htole32(
    377 		    (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
    378 		    << DDESC_CNTL_SIZE1SHIFT);
    379 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    380 	}
    381 
    382 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    383 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    384 	    BUS_DMASYNC_PREWRITE);
    385 
    386 	ring->r_cur = ring->r_next = 0;
    387 }
    388 
    389 static int
    390 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    391 {
    392 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    393 		sizeof(struct dwc_gmac_dev_dmadesc);
    394 	int error, nsegs;
    395 	void *rings;
    396 
    397 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    398 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    399 	if (error != 0) {
    400 		aprint_error_dev(sc->sc_dev,
    401 		    "could not create desc DMA map\n");
    402 		sc->sc_dma_ring_map = NULL;
    403 		goto fail;
    404 	}
    405 
    406 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    407 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    408 	if (error != 0) {
    409 		aprint_error_dev(sc->sc_dev,
    410 		    "could not map DMA memory\n");
    411 		goto fail;
    412 	}
    413 
    414 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    415 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    416 	if (error != 0) {
    417 		aprint_error_dev(sc->sc_dev,
    418 		    "could not allocate DMA memory\n");
    419 		goto fail;
    420 	}
    421 
    422 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    423 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    424 	if (error != 0) {
    425 		aprint_error_dev(sc->sc_dev,
    426 		    "could not load desc DMA map\n");
    427 		goto fail;
    428 	}
    429 
    430 	/* give first AWGE_RX_RING_COUNT to the RX side */
    431 	sc->sc_rxq.r_desc = rings;
    432 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    433 
    434 	/* and next rings to the TX side */
    435 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    436 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    437 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    438 
    439 	return 0;
    440 
    441 fail:
    442 	dwc_gmac_free_dma_rings(sc);
    443 	return error;
    444 }
    445 
    446 static void
    447 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    448 {
    449 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    450 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    451 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    452 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    453 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    454 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    455 }
    456 
    457 static void
    458 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    459 {
    460 	struct dwc_gmac_rx_data *data;
    461 	int i;
    462 
    463 	if (ring->r_desc == NULL)
    464 		return;
    465 
    466 
    467 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    468 		data = &ring->r_data[i];
    469 
    470 		if (data->rd_map != NULL) {
    471 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    472 			    AWGE_RX_RING_COUNT
    473 				*sizeof(struct dwc_gmac_dev_dmadesc),
    474 			    BUS_DMASYNC_POSTREAD);
    475 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    476 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    477 		}
    478 		if (data->rd_m != NULL)
    479 			m_freem(data->rd_m);
    480 	}
    481 }
    482 
    483 static int
    484 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    485 	struct dwc_gmac_tx_ring *ring)
    486 {
    487 	int i, error = 0;
    488 
    489 	ring->t_queued = 0;
    490 	ring->t_cur = ring->t_next = 0;
    491 
    492 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    493 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    494 	    TX_DESC_OFFSET(0),
    495 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    496 	    BUS_DMASYNC_POSTWRITE);
    497 
    498 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    499 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    500 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    501 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    502 		    &ring->t_data[i].td_map);
    503 		if (error != 0) {
    504 			aprint_error_dev(sc->sc_dev,
    505 			    "could not create TX DMA map #%d\n", i);
    506 			ring->t_data[i].td_map = NULL;
    507 			goto fail;
    508 		}
    509 		ring->t_desc[i].ddesc_next = htole32(
    510 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    511 		    *((i+1)&AWGE_TX_RING_COUNT));
    512 	}
    513 
    514 	return 0;
    515 
    516 fail:
    517 	dwc_gmac_free_tx_ring(sc, ring);
    518 	return error;
    519 }
    520 
    521 static void
    522 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    523 {
    524 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    525 	if (end > start) {
    526 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    527 		    TX_DESC_OFFSET(start),
    528 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    529 		    ops);
    530 		return;
    531 	}
    532 	/* sync from 'start' to end of ring */
    533 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    534 	    TX_DESC_OFFSET(start),
    535 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
    536 	    ops);
    537 	/* sync from start of ring to 'end' */
    538 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    539 	    TX_DESC_OFFSET(0),
    540 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    541 	    ops);
    542 }
    543 
    544 static void
    545 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    546 	struct dwc_gmac_tx_ring *ring)
    547 {
    548 	int i;
    549 
    550 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    551 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    552 
    553 		if (data->td_m != NULL) {
    554 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    555 			    0, data->td_active->dm_mapsize,
    556 			    BUS_DMASYNC_POSTWRITE);
    557 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    558 			m_freem(data->td_m);
    559 			data->td_m = NULL;
    560 		}
    561 	}
    562 
    563 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    564 	    TX_DESC_OFFSET(0),
    565 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    566 	    BUS_DMASYNC_PREWRITE);
    567 
    568 	ring->t_queued = 0;
    569 	ring->t_cur = ring->t_next = 0;
    570 }
    571 
    572 static void
    573 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    574 	struct dwc_gmac_tx_ring *ring)
    575 {
    576 	int i;
    577 
    578 	/* unload the maps */
    579 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    580 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    581 
    582 		if (data->td_m != NULL) {
    583 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    584 			    0, data->td_map->dm_mapsize,
    585 			    BUS_DMASYNC_POSTWRITE);
    586 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    587 			m_freem(data->td_m);
    588 			data->td_m = NULL;
    589 		}
    590 	}
    591 
    592 	/* and actually free them */
    593 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    594 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    595 
    596 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    597 	}
    598 }
    599 
    600 static void
    601 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    602 {
    603 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    604 	struct mii_data * const mii = &sc->sc_mii;
    605 
    606 	/*
    607 	 * Set MII or GMII interface based on the speed
    608 	 * negotiated by the PHY.
    609 	 */
    610 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    611 	case IFM_10_T:
    612 	case IFM_100_TX:
    613 		/* XXX */
    614 		break;
    615 	case IFM_1000_T:
    616 		/* XXX */
    617 		break;
    618 	}
    619 }
    620 
    621 static int
    622 dwc_gmac_init(struct ifnet *ifp)
    623 {
    624 	struct dwc_gmac_softc *sc = ifp->if_softc;
    625 
    626 	if (ifp->if_flags & IFF_RUNNING)
    627 		return 0;
    628 
    629 	dwc_gmac_stop(ifp, 0);
    630 
    631 	/*
    632 	 * Set up dma pointer for RX ring
    633 	 */
    634 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR, sc->sc_rxq.r_physaddr);
    635 
    636 	ifp->if_flags |= IFF_RUNNING;
    637 	ifp->if_flags &= ~IFF_OACTIVE;
    638 
    639 	return 0;
    640 }
    641 
    642 static void
    643 dwc_gmac_start(struct ifnet *ifp)
    644 {
    645 	struct dwc_gmac_softc *sc = ifp->if_softc;
    646 	int old = sc->sc_txq.t_queued;
    647 	struct mbuf *m0;
    648 
    649 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    650 		return;
    651 
    652 	for (;;) {
    653 		IFQ_POLL(&ifp->if_snd, m0);
    654 		if (m0 == NULL)
    655 			break;
    656 		if (dwc_gmac_queue(sc, m0) != 0) {
    657 			ifp->if_flags |= IFF_OACTIVE;
    658 			break;
    659 		}
    660 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    661 		bpf_mtap(ifp, m0);
    662 	}
    663 
    664 	if (sc->sc_txq.t_queued != old) {
    665 		/* packets have been queued, kick it off */
    666 		dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
    667 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    668 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    669 		    sc->sc_txq.t_physaddr
    670 		        + old*sizeof(struct dwc_gmac_dev_dmadesc));
    671 	}
    672 }
    673 
    674 static void
    675 dwc_gmac_stop(struct ifnet *ifp, int disable)
    676 {
    677 	struct dwc_gmac_softc *sc = ifp->if_softc;
    678 
    679 	mii_down(&sc->sc_mii);
    680 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    681 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    682 }
    683 
    684 /*
    685  * Add m0 to the TX ring
    686  */
    687 static int
    688 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    689 {
    690 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    691 	struct dwc_gmac_tx_data *data = NULL;
    692 	bus_dmamap_t map;
    693 	uint32_t status, flags, len;
    694 	int error, i, first;
    695 
    696 	first = sc->sc_txq.t_cur;
    697 	map = sc->sc_txq.t_data[first].td_map;
    698 	flags = 0;
    699 
    700 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    701 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    702 	if (error != 0) {
    703 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    704 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    705 		return error;
    706 	}
    707 
    708 	if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
    709 		bus_dmamap_unload(sc->sc_dmat, map);
    710 		return ENOBUFS;
    711 	}
    712 
    713 	data = NULL;
    714 	flags = DDESC_STATUS_TXINT|DDESC_STATUS_TXCHAIN;
    715 	for (i = 0; i < map->dm_nsegs; i++) {
    716 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    717 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    718 
    719 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    720 		len = (map->dm_segs[i].ds_len & DDESC_CNTL_SIZE1MASK)
    721 		    << DDESC_CNTL_SIZE1SHIFT;
    722 		desc->ddesc_cntl = htole32(len);
    723 		status = flags;
    724 		desc->ddesc_status = htole32(status);
    725 		sc->sc_txq.t_queued++;
    726 
    727 		/*
    728 		 * Defer passing ownership of the first descriptor
    729 		 * untill we are done.
    730 		 */
    731 		flags |= DDESC_STATUS_OWNEDBYDEV;
    732 
    733 		sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
    734 		    & (AWGE_TX_RING_COUNT-1);
    735 	}
    736 
    737 	/* Fixup last */
    738 	status = flags|DDESC_STATUS_TXLAST;
    739 	desc->ddesc_status = htole32(status);
    740 
    741 	/* Finalize first */
    742 	status = flags|DDESC_STATUS_TXFIRST;
    743 	sc->sc_txq.t_desc[first].ddesc_status = htole32(status);
    744 
    745 	data->td_m = m0;
    746 	data->td_active = map;
    747 
    748 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    749 	    BUS_DMASYNC_PREWRITE);
    750 
    751 	return 0;
    752 }
    753 
    754 static int
    755 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    756 {
    757 	// struct dwc_gmac_softc *sc = ifp->if_softc;
    758 	struct ifaddr *ifa = (struct ifaddr *)data;
    759 	int s, error = 0;
    760 
    761 	s = splnet();
    762 
    763 	switch (cmd) {
    764 	case SIOCINITIFADDR:
    765 		ifp->if_flags |= IFF_UP;
    766 		dwc_gmac_init(ifp);
    767 		switch (ifa->ifa_addr->sa_family) {
    768 #ifdef INET
    769 		case AF_INET:
    770 			arp_ifinit(ifp, ifa);
    771 			break;
    772 #endif
    773 		default:
    774 			break;
    775 		}
    776 	default:
    777 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
    778 			break;
    779 		error = 0;
    780 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
    781 			;
    782 		else if (ifp->if_flags & IFF_RUNNING)
    783 			/* setmulti */;
    784 		break;
    785 	}
    786 
    787 	splx(s);
    788 
    789 	return error;
    790 }
    791 
    792 int
    793 dwc_gmac_intr(struct dwc_gmac_softc *sc)
    794 {
    795 	uint32_t status, dma_status;
    796 
    797 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
    798 	if (status & AWIN_GMAC_MII_IRQ) {
    799 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    800 		    AWIN_GMAC_MII_STATUS);
    801 		mii_pollstat(&sc->sc_mii);
    802 	}
    803 
    804 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    805 	    AWIN_GMAC_DMA_STATUS);
    806 
    807 printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
    808     status, dma_status);
    809 
    810 static size_t cnt = 0;
    811 if (++cnt > 20)
    812 	panic("enough now");
    813 
    814 	return 1;
    815 }
    816