Home | History | Annotate | Line # | Download | only in ic
dwc_gmac.c revision 1.28.2.8
      1 /* $NetBSD: dwc_gmac.c,v 1.28.2.8 2016/07/09 20:25:02 skrll Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * This driver supports the Synopsis Designware GMAC core, as found
     34  * on Allwinner A20 cores and others.
     35  *
     36  * Real documentation seems to not be available, the marketing product
     37  * documents could be found here:
     38  *
     39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 
     44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.28.2.8 2016/07/09 20:25:02 skrll Exp $");
     45 
     46 /* #define	DWC_GMAC_DEBUG	1 */
     47 
     48 #ifdef _KERNEL_OPT
     49 #include "opt_inet.h"
     50 #include "opt_net_mpsafe.h"
     51 #endif
     52 
     53 #include <sys/param.h>
     54 #include <sys/bus.h>
     55 #include <sys/device.h>
     56 #include <sys/intr.h>
     57 #include <sys/systm.h>
     58 #include <sys/sockio.h>
     59 #include <sys/cprng.h>
     60 
     61 #include <net/if.h>
     62 #include <net/if_ether.h>
     63 #include <net/if_media.h>
     64 #include <net/bpf.h>
     65 #ifdef INET
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #include <dev/mii/miivar.h>
     70 
     71 #include <dev/ic/dwc_gmac_reg.h>
     72 #include <dev/ic/dwc_gmac_var.h>
     73 
     74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
     75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
     76 static void dwc_gmac_miibus_statchg(struct ifnet *);
     77 
     78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
     79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
     80 			 uint8_t enaddr[ETHER_ADDR_LEN]);
     81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
     82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
     83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
     86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
     89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
     90 static int dwc_gmac_init(struct ifnet *ifp);
     91 static int dwc_gmac_init_locked(struct ifnet *ifp);
     92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
     93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
     94 static void dwc_gmac_start(struct ifnet *ifp);
     95 static void dwc_gmac_start_locked(struct ifnet *ifp);
     96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
     97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
     98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
     99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
    100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
    101 static int dwc_gmac_ifflags_cb(struct ethercom *);
    102 static uint32_t	bitrev32(uint32_t x);
    103 
    104 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
    105 				    *sizeof(struct dwc_gmac_dev_dmadesc))
    106 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
    107 
    108 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
    109 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
    110 
    111 
    112 
    113 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
    114 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
    115 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
    116 
    117 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
    118 				GMAC_DMA_INT_FBE|	\
    119 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
    120 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
    121 				GMAC_DMA_INT_TJE)
    122 
    123 #define	AWIN_DEF_MAC_INTRMASK	\
    124 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
    125 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
    126 
    127 
    128 #ifdef DWC_GMAC_DEBUG
    129 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
    130 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
    131 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
    132 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
    133 static void dwc_dump_status(struct dwc_gmac_softc *sc);
    134 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
    135 #endif
    136 
    137 #ifdef NET_MPSAFE
    138 #define DWCGMAC_MPSAFE	1
    139 #endif
    140 
    141 void
    142 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
    143 {
    144 	uint8_t enaddr[ETHER_ADDR_LEN];
    145 	uint32_t maclo, machi;
    146 	struct mii_data * const mii = &sc->sc_mii;
    147 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
    148 	prop_dictionary_t dict;
    149 
    150 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
    151 	sc->sc_mii_clk = mii_clk & 7;
    152 
    153 	dict = device_properties(sc->sc_dev);
    154 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
    155 	if (ea != NULL) {
    156 		/*
    157 		 * If the MAC address is overriden by a device property,
    158 		 * use that.
    159 		 */
    160 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
    161 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
    162 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
    163 	} else {
    164 		/*
    165 		 * If we did not get an externaly configure address,
    166 		 * try to read one from the current filter setup,
    167 		 * before resetting the chip.
    168 		 */
    169 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    170 		    AWIN_GMAC_MAC_ADDR0LO);
    171 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    172 		    AWIN_GMAC_MAC_ADDR0HI);
    173 
    174 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
    175 			/* fake MAC address */
    176 			maclo = 0x00f2 | (cprng_strong32() << 16);
    177 			machi = cprng_strong32();
    178 		}
    179 
    180 		enaddr[0] = maclo & 0x0ff;
    181 		enaddr[1] = (maclo >> 8) & 0x0ff;
    182 		enaddr[2] = (maclo >> 16) & 0x0ff;
    183 		enaddr[3] = (maclo >> 24) & 0x0ff;
    184 		enaddr[4] = machi & 0x0ff;
    185 		enaddr[5] = (machi >> 8) & 0x0ff;
    186 	}
    187 
    188 	/*
    189 	 * Init chip and do initial setup
    190 	 */
    191 	if (dwc_gmac_reset(sc) != 0)
    192 		return;	/* not much to cleanup, haven't attached yet */
    193 	dwc_gmac_write_hwaddr(sc, enaddr);
    194 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
    195 	    ether_sprintf(enaddr));
    196 
    197 	/*
    198 	 * Allocate Tx and Rx rings
    199 	 */
    200 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
    201 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
    202 		goto fail;
    203 	}
    204 
    205 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
    206 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
    207 		goto fail;
    208 	}
    209 
    210 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
    211 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
    212 		goto fail;
    213 	}
    214 
    215 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    216 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
    217 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
    218 
    219 	/*
    220 	 * Prepare interface data
    221 	 */
    222 	ifp->if_softc = sc;
    223 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    224 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    225 	ifp->if_ioctl = dwc_gmac_ioctl;
    226 	ifp->if_start = dwc_gmac_start;
    227 	ifp->if_init = dwc_gmac_init;
    228 	ifp->if_stop = dwc_gmac_stop;
    229 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
    230 	IFQ_SET_READY(&ifp->if_snd);
    231 
    232 	/*
    233 	 * Attach MII subdevices
    234 	 */
    235 	sc->sc_ec.ec_mii = &sc->sc_mii;
    236 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    237         mii->mii_ifp = ifp;
    238         mii->mii_readreg = dwc_gmac_miibus_read_reg;
    239         mii->mii_writereg = dwc_gmac_miibus_write_reg;
    240         mii->mii_statchg = dwc_gmac_miibus_statchg;
    241         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
    242 	    MIIF_DOPAUSE);
    243 
    244         if (LIST_EMPTY(&mii->mii_phys)) {
    245                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
    246                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    247                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
    248         } else {
    249                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
    250         }
    251 
    252 	/*
    253 	 * We can support 802.1Q VLAN-sized frames.
    254 	 */
    255 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    256 
    257 	/*
    258 	 * Ready, attach interface
    259 	 */
    260 	/* Attach the interface. */
    261 	if_initialize(ifp);
    262 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
    263 	ether_ifattach(ifp, enaddr);
    264 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
    265 	if_register(ifp);
    266 
    267 	/*
    268 	 * Enable interrupts
    269 	 */
    270 	mutex_enter(sc->sc_lock);
    271 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
    272 	    AWIN_DEF_MAC_INTRMASK);
    273 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
    274 	    GMAC_DEF_DMA_INT_MASK);
    275 	mutex_exit(sc->sc_lock);
    276 
    277 	return;
    278 
    279 fail:
    280 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
    281 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
    282 }
    283 
    284 
    285 
    286 static int
    287 dwc_gmac_reset(struct dwc_gmac_softc *sc)
    288 {
    289 	size_t cnt;
    290 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    291 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
    292 	for (cnt = 0; cnt < 3000; cnt++) {
    293 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
    294 		    & GMAC_BUSMODE_RESET) == 0)
    295 			return 0;
    296 		delay(10);
    297 	}
    298 
    299 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
    300 	return EIO;
    301 }
    302 
    303 static void
    304 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
    305     uint8_t enaddr[ETHER_ADDR_LEN])
    306 {
    307 	uint32_t lo, hi;
    308 
    309 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
    310 	    | (enaddr[3] << 24);
    311 	hi = enaddr[4] | (enaddr[5] << 8);
    312 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
    313 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
    314 }
    315 
    316 static int
    317 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
    318 {
    319 	struct dwc_gmac_softc * const sc = device_private(self);
    320 	uint16_t mii;
    321 	size_t cnt;
    322 	int rv = 0;
    323 
    324 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    325 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    326 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    327 	    | GMAC_MII_BUSY;
    328 
    329 	mutex_enter(&sc->sc_mdio_lock);
    330 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    331 
    332 	for (cnt = 0; cnt < 1000; cnt++) {
    333 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    334 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
    335 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    336 			    AWIN_GMAC_MAC_MIIDATA);
    337 			break;
    338 		}
    339 		delay(10);
    340 	}
    341 
    342 	mutex_exit(&sc->sc_mdio_lock);
    343 
    344 	return rv;
    345 }
    346 
    347 static void
    348 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
    349 {
    350 	struct dwc_gmac_softc * const sc = device_private(self);
    351 	uint16_t mii;
    352 	size_t cnt;
    353 
    354 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
    355 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
    356 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
    357 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
    358 
    359 	mutex_enter(&sc->sc_mdio_lock);
    360 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
    361 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
    362 
    363 	for (cnt = 0; cnt < 1000; cnt++) {
    364 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    365 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
    366 			break;
    367 		delay(10);
    368 	}
    369 
    370 	mutex_exit(&sc->sc_mdio_lock);
    371 }
    372 
    373 static int
    374 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
    375 	struct dwc_gmac_rx_ring *ring)
    376 {
    377 	struct dwc_gmac_rx_data *data;
    378 	bus_addr_t physaddr;
    379 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
    380 	int error, i, next;
    381 
    382 	ring->r_cur = ring->r_next = 0;
    383 	memset(ring->r_desc, 0, descsize);
    384 
    385 	/*
    386 	 * Pre-allocate Rx buffers and populate Rx ring.
    387 	 */
    388 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    389 		struct dwc_gmac_dev_dmadesc *desc;
    390 
    391 		data = &sc->sc_rxq.r_data[i];
    392 
    393 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
    394 		if (data->rd_m == NULL) {
    395 			aprint_error_dev(sc->sc_dev,
    396 			    "could not allocate rx mbuf #%d\n", i);
    397 			error = ENOMEM;
    398 			goto fail;
    399 		}
    400 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    401 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
    402 		if (error != 0) {
    403 			aprint_error_dev(sc->sc_dev,
    404 			    "could not create DMA map\n");
    405 			data->rd_map = NULL;
    406 			goto fail;
    407 		}
    408 		MCLGET(data->rd_m, M_DONTWAIT);
    409 		if (!(data->rd_m->m_flags & M_EXT)) {
    410 			aprint_error_dev(sc->sc_dev,
    411 			    "could not allocate mbuf cluster #%d\n", i);
    412 			error = ENOMEM;
    413 			goto fail;
    414 		}
    415 
    416 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
    417 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
    418 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
    419 		if (error != 0) {
    420 			aprint_error_dev(sc->sc_dev,
    421 			    "could not load rx buf DMA map #%d", i);
    422 			goto fail;
    423 		}
    424 		physaddr = data->rd_map->dm_segs[0].ds_addr;
    425 
    426 		desc = &sc->sc_rxq.r_desc[i];
    427 		desc->ddesc_data = htole32(physaddr);
    428 		next = RX_NEXT(i);
    429 		desc->ddesc_next = htole32(ring->r_physaddr
    430 		    + next * sizeof(*desc));
    431 		desc->ddesc_cntl = htole32(
    432 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    433 		    DDESC_CNTL_RXCHAIN);
    434 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    435 	}
    436 
    437 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    438 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    439 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    440 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    441 	    ring->r_physaddr);
    442 
    443 	return 0;
    444 
    445 fail:
    446 	dwc_gmac_free_rx_ring(sc, ring);
    447 	return error;
    448 }
    449 
    450 static void
    451 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
    452 	struct dwc_gmac_rx_ring *ring)
    453 {
    454 	struct dwc_gmac_dev_dmadesc *desc;
    455 	int i;
    456 
    457 	mutex_enter(&ring->r_mtx);
    458 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    459 		desc = &sc->sc_rxq.r_desc[i];
    460 		desc->ddesc_cntl = htole32(
    461 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
    462 		    DDESC_CNTL_RXCHAIN);
    463 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
    464 	}
    465 
    466 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    467 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    468 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    469 
    470 	ring->r_cur = ring->r_next = 0;
    471 	/* reset DMA address to start of ring */
    472 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    473 	    sc->sc_rxq.r_physaddr);
    474 	mutex_exit(&ring->r_mtx);
    475 }
    476 
    477 static int
    478 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
    479 {
    480 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
    481 		sizeof(struct dwc_gmac_dev_dmadesc);
    482 	int error, nsegs;
    483 	void *rings;
    484 
    485 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
    486 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
    487 	if (error != 0) {
    488 		aprint_error_dev(sc->sc_dev,
    489 		    "could not create desc DMA map\n");
    490 		sc->sc_dma_ring_map = NULL;
    491 		goto fail;
    492 	}
    493 
    494 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
    495 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    496 	if (error != 0) {
    497 		aprint_error_dev(sc->sc_dev,
    498 		    "could not map DMA memory\n");
    499 		goto fail;
    500 	}
    501 
    502 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
    503 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    504 	if (error != 0) {
    505 		aprint_error_dev(sc->sc_dev,
    506 		    "could not allocate DMA memory\n");
    507 		goto fail;
    508 	}
    509 
    510 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
    511 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    512 	if (error != 0) {
    513 		aprint_error_dev(sc->sc_dev,
    514 		    "could not load desc DMA map\n");
    515 		goto fail;
    516 	}
    517 
    518 	/* give first AWGE_RX_RING_COUNT to the RX side */
    519 	sc->sc_rxq.r_desc = rings;
    520 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
    521 
    522 	/* and next rings to the TX side */
    523 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
    524 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
    525 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
    526 
    527 	return 0;
    528 
    529 fail:
    530 	dwc_gmac_free_dma_rings(sc);
    531 	return error;
    532 }
    533 
    534 static void
    535 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
    536 {
    537 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
    538 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
    539 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
    540 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
    541 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
    542 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
    543 }
    544 
    545 static void
    546 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
    547 {
    548 	struct dwc_gmac_rx_data *data;
    549 	int i;
    550 
    551 	if (ring->r_desc == NULL)
    552 		return;
    553 
    554 
    555 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
    556 		data = &ring->r_data[i];
    557 
    558 		if (data->rd_map != NULL) {
    559 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
    560 			    AWGE_RX_RING_COUNT
    561 				*sizeof(struct dwc_gmac_dev_dmadesc),
    562 			    BUS_DMASYNC_POSTREAD);
    563 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
    564 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
    565 		}
    566 		if (data->rd_m != NULL)
    567 			m_freem(data->rd_m);
    568 	}
    569 }
    570 
    571 static int
    572 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
    573 	struct dwc_gmac_tx_ring *ring)
    574 {
    575 	int i, error = 0;
    576 
    577 	ring->t_queued = 0;
    578 	ring->t_cur = ring->t_next = 0;
    579 
    580 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
    581 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    582 	    TX_DESC_OFFSET(0),
    583 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    584 	    BUS_DMASYNC_POSTWRITE);
    585 
    586 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    587 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    588 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
    589 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
    590 		    &ring->t_data[i].td_map);
    591 		if (error != 0) {
    592 			aprint_error_dev(sc->sc_dev,
    593 			    "could not create TX DMA map #%d\n", i);
    594 			ring->t_data[i].td_map = NULL;
    595 			goto fail;
    596 		}
    597 		ring->t_desc[i].ddesc_next = htole32(
    598 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
    599 		    *TX_NEXT(i));
    600 	}
    601 
    602 	return 0;
    603 
    604 fail:
    605 	dwc_gmac_free_tx_ring(sc, ring);
    606 	return error;
    607 }
    608 
    609 static void
    610 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
    611 {
    612 	/* 'end' is pointing one descriptor beyound the last we want to sync */
    613 	if (end > start) {
    614 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    615 		    TX_DESC_OFFSET(start),
    616 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
    617 		    ops);
    618 		return;
    619 	}
    620 	/* sync from 'start' to end of ring */
    621 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    622 	    TX_DESC_OFFSET(start),
    623 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
    624 	    ops);
    625 	/* sync from start of ring to 'end' */
    626 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    627 	    TX_DESC_OFFSET(0),
    628 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
    629 	    ops);
    630 }
    631 
    632 static void
    633 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
    634 	struct dwc_gmac_tx_ring *ring)
    635 {
    636 	int i;
    637 
    638 	mutex_enter(&ring->t_mtx);
    639 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    640 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    641 
    642 		if (data->td_m != NULL) {
    643 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    644 			    0, data->td_active->dm_mapsize,
    645 			    BUS_DMASYNC_POSTWRITE);
    646 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    647 			m_freem(data->td_m);
    648 			data->td_m = NULL;
    649 		}
    650 	}
    651 
    652 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
    653 	    TX_DESC_OFFSET(0),
    654 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
    655 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    656 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    657 	    sc->sc_txq.t_physaddr);
    658 
    659 	ring->t_queued = 0;
    660 	ring->t_cur = ring->t_next = 0;
    661 	mutex_exit(&ring->t_mtx);
    662 }
    663 
    664 static void
    665 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
    666 	struct dwc_gmac_tx_ring *ring)
    667 {
    668 	int i;
    669 
    670 	/* unload the maps */
    671 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    672 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    673 
    674 		if (data->td_m != NULL) {
    675 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
    676 			    0, data->td_map->dm_mapsize,
    677 			    BUS_DMASYNC_POSTWRITE);
    678 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
    679 			m_freem(data->td_m);
    680 			data->td_m = NULL;
    681 		}
    682 	}
    683 
    684 	/* and actually free them */
    685 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
    686 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
    687 
    688 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
    689 	}
    690 }
    691 
    692 static void
    693 dwc_gmac_miibus_statchg(struct ifnet *ifp)
    694 {
    695 	struct dwc_gmac_softc * const sc = ifp->if_softc;
    696 	struct mii_data * const mii = &sc->sc_mii;
    697 	uint32_t conf, flow;
    698 
    699 	/*
    700 	 * Set MII or GMII interface based on the speed
    701 	 * negotiated by the PHY.
    702 	 */
    703 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
    704 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
    705 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
    706 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
    707 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
    708 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
    709 	    | AWIN_GMAC_MAC_CONF_ACS
    710 	    | AWIN_GMAC_MAC_CONF_RXENABLE
    711 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
    712 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
    713 	case IFM_10_T:
    714 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
    715 		break;
    716 	case IFM_100_TX:
    717 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
    718 			AWIN_GMAC_MAC_CONF_MIISEL;
    719 		break;
    720 	case IFM_1000_T:
    721 		break;
    722 	}
    723 
    724 	flow = 0;
    725 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
    726 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
    727 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
    728 	}
    729 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
    730 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
    731 	}
    732 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
    733 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
    734 	}
    735 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    736 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
    737 
    738 #ifdef DWC_GMAC_DEBUG
    739 	aprint_normal_dev(sc->sc_dev,
    740 	    "setting MAC conf register: %08x\n", conf);
    741 #endif
    742 
    743 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    744 	    AWIN_GMAC_MAC_CONF, conf);
    745 }
    746 
    747 static int
    748 dwc_gmac_init(struct ifnet *ifp)
    749 {
    750 	struct dwc_gmac_softc *sc = ifp->if_softc;
    751 
    752 	mutex_enter(sc->sc_lock);
    753 	int ret = dwc_gmac_init_locked(ifp);
    754 	mutex_exit(sc->sc_lock);
    755 
    756 	return ret;
    757 }
    758 
    759 static int
    760 dwc_gmac_init_locked(struct ifnet *ifp)
    761 {
    762 	struct dwc_gmac_softc *sc = ifp->if_softc;
    763 	uint32_t ffilt;
    764 
    765 	if (ifp->if_flags & IFF_RUNNING)
    766 		return 0;
    767 
    768 	dwc_gmac_stop_locked(ifp, 0);
    769 
    770 	/*
    771 	 * Configure DMA burst/transfer mode and RX/TX priorities.
    772 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
    773 	 */
    774 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
    775 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
    776 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
    777 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
    778 
    779 	/*
    780 	 * Set up address filter
    781 	 */
    782 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
    783 	if (ifp->if_flags & IFF_PROMISC) {
    784 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
    785 	} else {
    786 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
    787 	}
    788 	if (ifp->if_flags & IFF_BROADCAST) {
    789 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
    790 	} else {
    791 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
    792 	}
    793 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
    794 
    795 	/*
    796 	 * Set up multicast filter
    797 	 */
    798 	dwc_gmac_setmulti(sc);
    799 
    800 	/*
    801 	 * Set up dma pointer for RX and TX ring
    802 	 */
    803 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
    804 	    sc->sc_rxq.r_physaddr);
    805 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
    806 	    sc->sc_txq.t_physaddr);
    807 
    808 	/*
    809 	 * Start RX/TX part
    810 	 */
    811 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    812 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
    813 	    GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
    814 
    815 	sc->sc_stopping = false;
    816 
    817 	ifp->if_flags |= IFF_RUNNING;
    818 	ifp->if_flags &= ~IFF_OACTIVE;
    819 
    820 	return 0;
    821 }
    822 
    823 static void
    824 dwc_gmac_start(struct ifnet *ifp)
    825 {
    826 	struct dwc_gmac_softc *sc = ifp->if_softc;
    827 
    828 	mutex_enter(sc->sc_lock);
    829 	if (!sc->sc_stopping) {
    830 		mutex_enter(&sc->sc_txq.t_mtx);
    831 		dwc_gmac_start_locked(ifp);
    832 		mutex_exit(&sc->sc_txq.t_mtx);
    833 	}
    834 	mutex_exit(sc->sc_lock);
    835 }
    836 
    837 static void
    838 dwc_gmac_start_locked(struct ifnet *ifp)
    839 {
    840 	struct dwc_gmac_softc *sc = ifp->if_softc;
    841 	int old = sc->sc_txq.t_queued;
    842 	int start = sc->sc_txq.t_cur;
    843 	struct mbuf *m0;
    844 
    845 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    846 		return;
    847 
    848 	for (;;) {
    849 		IFQ_POLL(&ifp->if_snd, m0);
    850 		if (m0 == NULL)
    851 			break;
    852 		if (dwc_gmac_queue(sc, m0) != 0) {
    853 			ifp->if_flags |= IFF_OACTIVE;
    854 			break;
    855 		}
    856 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    857 		bpf_mtap(ifp, m0);
    858 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
    859 			ifp->if_flags |= IFF_OACTIVE;
    860 			break;
    861 		}
    862 	}
    863 
    864 	if (sc->sc_txq.t_queued != old) {
    865 		/* packets have been queued, kick it off */
    866 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
    867 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    868 
    869 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    870 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
    871 #ifdef DWC_GMAC_DEBUG
    872 		dwc_dump_status(sc);
    873 #endif
    874 	}
    875 }
    876 
    877 static void
    878 dwc_gmac_stop(struct ifnet *ifp, int disable)
    879 {
    880 	struct dwc_gmac_softc *sc = ifp->if_softc;
    881 
    882 	mutex_enter(sc->sc_lock);
    883 	dwc_gmac_stop_locked(ifp, disable);
    884 	mutex_exit(sc->sc_lock);
    885 }
    886 
    887 static void
    888 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
    889 {
    890 	struct dwc_gmac_softc *sc = ifp->if_softc;
    891 
    892 	sc->sc_stopping = true;
    893 
    894 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    895 	    AWIN_GMAC_DMA_OPMODE,
    896 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    897 	        AWIN_GMAC_DMA_OPMODE)
    898 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
    899 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
    900 	    AWIN_GMAC_DMA_OPMODE,
    901 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
    902 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
    903 
    904 	mii_down(&sc->sc_mii);
    905 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
    906 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
    907 }
    908 
    909 /*
    910  * Add m0 to the TX ring
    911  */
    912 static int
    913 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
    914 {
    915 	struct dwc_gmac_dev_dmadesc *desc = NULL;
    916 	struct dwc_gmac_tx_data *data = NULL;
    917 	bus_dmamap_t map;
    918 	uint32_t flags, len, status;
    919 	int error, i, first;
    920 
    921 #ifdef DWC_GMAC_DEBUG
    922 	aprint_normal_dev(sc->sc_dev,
    923 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
    924 #endif
    925 
    926 	first = sc->sc_txq.t_cur;
    927 	map = sc->sc_txq.t_data[first].td_map;
    928 
    929 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
    930 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    931 	if (error != 0) {
    932 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
    933 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
    934 		return error;
    935 	}
    936 
    937 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
    938 		bus_dmamap_unload(sc->sc_dmat, map);
    939 		return ENOBUFS;
    940 	}
    941 
    942 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
    943 	status = 0;
    944 	for (i = 0; i < map->dm_nsegs; i++) {
    945 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
    946 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
    947 
    948 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
    949 		len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
    950 
    951 #ifdef DWC_GMAC_DEBUG
    952 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
    953 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
    954 		    (unsigned long)map->dm_segs[i].ds_addr,
    955 		    (unsigned long)map->dm_segs[i].ds_len,
    956 		    flags, len);
    957 #endif
    958 
    959 		desc->ddesc_cntl = htole32(len|flags);
    960 		flags &= ~DDESC_CNTL_TXFIRST;
    961 
    962 		/*
    963 		 * Defer passing ownership of the first descriptor
    964 		 * until we are done.
    965 		 */
    966 		desc->ddesc_status = htole32(status);
    967 		status |= DDESC_STATUS_OWNEDBYDEV;
    968 
    969 		sc->sc_txq.t_queued++;
    970 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
    971 	}
    972 
    973 	desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
    974 
    975 	data->td_m = m0;
    976 	data->td_active = map;
    977 
    978 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    979 	    BUS_DMASYNC_PREWRITE);
    980 
    981 	/* Pass first to device */
    982 	sc->sc_txq.t_desc[first].ddesc_status =
    983 	    htole32(DDESC_STATUS_OWNEDBYDEV);
    984 
    985 	return 0;
    986 }
    987 
    988 /*
    989  * If the interface is up and running, only modify the receive
    990  * filter when setting promiscuous or debug mode.  Otherwise fall
    991  * through to ether_ioctl, which will reset the chip.
    992  */
    993 static int
    994 dwc_gmac_ifflags_cb(struct ethercom *ec)
    995 {
    996 	struct ifnet *ifp = &ec->ec_if;
    997 	struct dwc_gmac_softc *sc = ifp->if_softc;
    998 	int change = ifp->if_flags ^ sc->sc_if_flags;
    999 
   1000 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   1001 		return ENETRESET;
   1002 	if ((change & IFF_PROMISC) != 0) {
   1003 		mutex_enter(sc->sc_lock);
   1004 		dwc_gmac_setmulti(sc);
   1005 		mutex_exit(sc->sc_lock);
   1006 	}
   1007 	return 0;
   1008 }
   1009 
   1010 static int
   1011 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1012 {
   1013 	struct dwc_gmac_softc *sc = ifp->if_softc;
   1014 	int error = 0;
   1015 
   1016 	int s = splnet();
   1017 	error = ether_ioctl(ifp, cmd, data);
   1018 
   1019 #ifdef DWCGMAC_MPSAFE
   1020 	splx(s);
   1021 #endif
   1022 
   1023 	if (error == ENETRESET) {
   1024 		error = 0;
   1025 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   1026 			;
   1027 		else if (ifp->if_flags & IFF_RUNNING) {
   1028 			/*
   1029 			 * Multicast list has changed; set the hardware filter
   1030 			 * accordingly.
   1031 			 */
   1032 			mutex_enter(sc->sc_lock);
   1033 			dwc_gmac_setmulti(sc);
   1034 			mutex_exit(sc->sc_lock);
   1035 		}
   1036 	}
   1037 
   1038 	/* Try to get things going again */
   1039 	if (ifp->if_flags & IFF_UP)
   1040 		dwc_gmac_start(ifp);
   1041 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1042 
   1043 #ifndef DWCGMAC_MPSAFE
   1044 	splx(s);
   1045 #endif
   1046 
   1047 	return error;
   1048 }
   1049 
   1050 static void
   1051 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
   1052 {
   1053 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1054 	struct dwc_gmac_tx_data *data;
   1055 	struct dwc_gmac_dev_dmadesc *desc;
   1056 	uint32_t status;
   1057 	int i, nsegs;
   1058 
   1059 	mutex_enter(&sc->sc_txq.t_mtx);
   1060 
   1061 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
   1062 #ifdef DWC_GMAC_DEBUG
   1063 		aprint_normal_dev(sc->sc_dev,
   1064 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
   1065 		    i, sc->sc_txq.t_queued);
   1066 #endif
   1067 
   1068 		/*
   1069 		 * i+1 does not need to be a valid descriptor,
   1070 		 * this is just a special notion to just sync
   1071 		 * a single tx descriptor (i)
   1072 		 */
   1073 		dwc_gmac_txdesc_sync(sc, i, i+1,
   1074 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1075 
   1076 		desc = &sc->sc_txq.t_desc[i];
   1077 		status = le32toh(desc->ddesc_status);
   1078 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1079 			break;
   1080 
   1081 		data = &sc->sc_txq.t_data[i];
   1082 		if (data->td_m == NULL)
   1083 			continue;
   1084 
   1085 		ifp->if_opackets++;
   1086 		nsegs = data->td_active->dm_nsegs;
   1087 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
   1088 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1089 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
   1090 
   1091 #ifdef DWC_GMAC_DEBUG
   1092 		aprint_normal_dev(sc->sc_dev,
   1093 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
   1094 		    "freeing mbuf %p\n", i, data->td_m);
   1095 #endif
   1096 
   1097 		m_freem(data->td_m);
   1098 		data->td_m = NULL;
   1099 
   1100 		sc->sc_txq.t_queued -= nsegs;
   1101 	}
   1102 
   1103 	sc->sc_txq.t_next = i;
   1104 
   1105 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
   1106 		ifp->if_flags &= ~IFF_OACTIVE;
   1107 	}
   1108 	mutex_exit(&sc->sc_txq.t_mtx);
   1109 }
   1110 
   1111 static void
   1112 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
   1113 {
   1114 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1115 	struct dwc_gmac_dev_dmadesc *desc;
   1116 	struct dwc_gmac_rx_data *data;
   1117 	bus_addr_t physaddr;
   1118 	uint32_t status;
   1119 	struct mbuf *m, *mnew;
   1120 	int i, len, error;
   1121 
   1122 	mutex_enter(&sc->sc_rxq.r_mtx);
   1123 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
   1124 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1125 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1126 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1127 		desc = &sc->sc_rxq.r_desc[i];
   1128 		data = &sc->sc_rxq.r_data[i];
   1129 
   1130 		status = le32toh(desc->ddesc_status);
   1131 		if (status & DDESC_STATUS_OWNEDBYDEV)
   1132 			break;
   1133 
   1134 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
   1135 #ifdef DWC_GMAC_DEBUG
   1136 			aprint_normal_dev(sc->sc_dev,
   1137 			    "RX error: descriptor status %08x, skipping\n",
   1138 			    status);
   1139 #endif
   1140 			ifp->if_ierrors++;
   1141 			goto skip;
   1142 		}
   1143 
   1144 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
   1145 
   1146 #ifdef DWC_GMAC_DEBUG
   1147 		aprint_normal_dev(sc->sc_dev,
   1148 		    "rx int: device is done with descriptor #%d, len: %d\n",
   1149 		    i, len);
   1150 #endif
   1151 
   1152 		/*
   1153 		 * Try to get a new mbuf before passing this one
   1154 		 * up, if that fails, drop the packet and reuse
   1155 		 * the existing one.
   1156 		 */
   1157 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
   1158 		if (mnew == NULL) {
   1159 			ifp->if_ierrors++;
   1160 			goto skip;
   1161 		}
   1162 		MCLGET(mnew, M_DONTWAIT);
   1163 		if ((mnew->m_flags & M_EXT) == 0) {
   1164 			m_freem(mnew);
   1165 			ifp->if_ierrors++;
   1166 			goto skip;
   1167 		}
   1168 
   1169 		/* unload old DMA map */
   1170 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1171 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1172 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
   1173 
   1174 		/* and reload with new mbuf */
   1175 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1176 		    mtod(mnew, void*), MCLBYTES, NULL,
   1177 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1178 		if (error != 0) {
   1179 			m_freem(mnew);
   1180 			/* try to reload old mbuf */
   1181 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
   1182 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
   1183 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
   1184 			if (error != 0) {
   1185 				panic("%s: could not load old rx mbuf",
   1186 				    device_xname(sc->sc_dev));
   1187 			}
   1188 			ifp->if_ierrors++;
   1189 			goto skip;
   1190 		}
   1191 		physaddr = data->rd_map->dm_segs[0].ds_addr;
   1192 
   1193 		/*
   1194 		 * New mbuf loaded, update RX ring and continue
   1195 		 */
   1196 		m = data->rd_m;
   1197 		data->rd_m = mnew;
   1198 		desc->ddesc_data = htole32(physaddr);
   1199 
   1200 		/* finalize mbuf */
   1201 		m->m_pkthdr.len = m->m_len = len;
   1202 		m_set_rcvif(m, ifp);
   1203 		m->m_flags |= M_HASFCS;
   1204 
   1205 		ifp->if_ipackets++;
   1206 
   1207 		mutex_exit(&sc->sc_rxq.r_mtx);
   1208 
   1209 		bpf_mtap(ifp, m);
   1210 		if_percpuq_enqueue(sc->sc_ipq, m);
   1211 
   1212 		mutex_enter(&sc->sc_rxq.r_mtx);
   1213 
   1214 skip:
   1215 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
   1216 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   1217 		desc->ddesc_cntl = htole32(
   1218 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
   1219 		    DDESC_CNTL_RXCHAIN);
   1220 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
   1221 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
   1222 		    RX_DESC_OFFSET(i), sizeof(*desc),
   1223 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1224 	}
   1225 
   1226 	/* update RX pointer */
   1227 	sc->sc_rxq.r_cur = i;
   1228 
   1229 	mutex_exit(&sc->sc_rxq.r_mtx);
   1230 }
   1231 
   1232 /*
   1233  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
   1234  */
   1235 static uint32_t
   1236 bitrev32(uint32_t x)
   1237 {
   1238 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
   1239 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
   1240 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
   1241 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
   1242 
   1243 	return (x >> 16) | (x << 16);
   1244 }
   1245 
   1246 static void
   1247 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
   1248 {
   1249 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
   1250 	struct ether_multi *enm;
   1251 	struct ether_multistep step;
   1252 	uint32_t hashes[2] = { 0, 0 };
   1253 	uint32_t ffilt, h;
   1254 	int mcnt;
   1255 
   1256 	KASSERT(mutex_owned(sc->sc_lock));
   1257 
   1258 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
   1259 
   1260 	if (ifp->if_flags & IFF_PROMISC) {
   1261 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
   1262 		goto special_filter;
   1263 	}
   1264 
   1265 	ifp->if_flags &= ~IFF_ALLMULTI;
   1266 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
   1267 
   1268 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
   1269 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
   1270 
   1271 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
   1272 	mcnt = 0;
   1273 	while (enm != NULL) {
   1274 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1275 		    ETHER_ADDR_LEN) != 0) {
   1276 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
   1277 			ifp->if_flags |= IFF_ALLMULTI;
   1278 			goto special_filter;
   1279 		}
   1280 
   1281 		h = bitrev32(
   1282 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
   1283 		    ) >> 26;
   1284 		hashes[h >> 5] |= (1 << (h & 0x1f));
   1285 
   1286 		mcnt++;
   1287 		ETHER_NEXT_MULTI(step, enm);
   1288 	}
   1289 
   1290 	if (mcnt)
   1291 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
   1292 	else
   1293 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
   1294 
   1295 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
   1296 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1297 	    hashes[0]);
   1298 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1299 	    hashes[1]);
   1300 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1301 
   1302 #ifdef DWC_GMAC_DEBUG
   1303 	dwc_gmac_dump_ffilt(sc, ffilt);
   1304 #endif
   1305 	return;
   1306 
   1307 special_filter:
   1308 #ifdef DWC_GMAC_DEBUG
   1309 	dwc_gmac_dump_ffilt(sc, ffilt);
   1310 #endif
   1311 	/* no MAC hashes, ALLMULTI or PROMISC */
   1312 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
   1313 	    ffilt);
   1314 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
   1315 	    0xffffffff);
   1316 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
   1317 	    0xffffffff);
   1318 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
   1319 }
   1320 
   1321 int
   1322 dwc_gmac_intr(struct dwc_gmac_softc *sc)
   1323 {
   1324 	uint32_t status, dma_status;
   1325 	int rv = 0;
   1326 
   1327 	if (sc->sc_stopping)
   1328 		return 0;
   1329 
   1330 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
   1331 	if (status & AWIN_GMAC_MII_IRQ) {
   1332 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1333 		    AWIN_GMAC_MII_STATUS);
   1334 		rv = 1;
   1335 		mii_pollstat(&sc->sc_mii);
   1336 	}
   1337 
   1338 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1339 	    AWIN_GMAC_DMA_STATUS);
   1340 
   1341 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
   1342 		rv = 1;
   1343 
   1344 	if (dma_status & GMAC_DMA_INT_TIE)
   1345 		dwc_gmac_tx_intr(sc);
   1346 
   1347 	if (dma_status & GMAC_DMA_INT_RIE)
   1348 		dwc_gmac_rx_intr(sc);
   1349 
   1350 	/*
   1351 	 * Check error conditions
   1352 	 */
   1353 	if (dma_status & GMAC_DMA_INT_ERRORS) {
   1354 		sc->sc_ec.ec_if.if_oerrors++;
   1355 #ifdef DWC_GMAC_DEBUG
   1356 		dwc_dump_and_abort(sc, "interrupt error condition");
   1357 #endif
   1358 	}
   1359 
   1360 	/* ack interrupt */
   1361 	if (dma_status)
   1362 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
   1363 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
   1364 
   1365 	/*
   1366 	 * Get more packets
   1367 	 */
   1368 	if (rv)
   1369 		sc->sc_ec.ec_if.if_start(&sc->sc_ec.ec_if);
   1370 
   1371 	return rv;
   1372 }
   1373 
   1374 #ifdef DWC_GMAC_DEBUG
   1375 static void
   1376 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
   1377 {
   1378 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
   1379 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
   1380 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
   1381 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
   1382 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
   1383 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
   1384 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
   1385 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
   1386 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
   1387 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
   1388 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
   1389 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
   1390 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
   1391 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
   1392 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
   1393 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
   1394 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
   1395 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
   1396 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
   1397 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
   1398 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
   1399 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
   1400 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
   1401 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
   1402 }
   1403 
   1404 static void
   1405 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
   1406 {
   1407 	int i;
   1408 
   1409 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
   1410 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
   1411 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
   1412 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
   1413 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
   1414 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1415 		    "data: %08x next: %08x\n",
   1416 		    i, sc->sc_txq.t_physaddr +
   1417 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1418 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1419 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1420 	}
   1421 }
   1422 
   1423 static void
   1424 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
   1425 {
   1426 	int i;
   1427 
   1428 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
   1429 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
   1430 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
   1431 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
   1432 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
   1433 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
   1434 		    "data: %08x next: %08x\n",
   1435 		    i, sc->sc_rxq.r_physaddr +
   1436 			i*sizeof(struct dwc_gmac_dev_dmadesc),
   1437 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
   1438 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
   1439 	}
   1440 }
   1441 
   1442 static void
   1443 dwc_dump_status(struct dwc_gmac_softc *sc)
   1444 {
   1445 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1446 	     AWIN_GMAC_MAC_INTR);
   1447 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
   1448 	     AWIN_GMAC_DMA_STATUS);
   1449 	char buf[200];
   1450 
   1451 	/* print interrupt state */
   1452 	snprintb(buf, sizeof(buf), "\177\20"
   1453 	    "b\x10""NI\0"
   1454 	    "b\x0f""AI\0"
   1455 	    "b\x0e""ER\0"
   1456 	    "b\x0d""FB\0"
   1457 	    "b\x0a""ET\0"
   1458 	    "b\x09""RW\0"
   1459 	    "b\x08""RS\0"
   1460 	    "b\x07""RU\0"
   1461 	    "b\x06""RI\0"
   1462 	    "b\x05""UN\0"
   1463 	    "b\x04""OV\0"
   1464 	    "b\x03""TJ\0"
   1465 	    "b\x02""TU\0"
   1466 	    "b\x01""TS\0"
   1467 	    "b\x00""TI\0"
   1468 	    "\0", dma_status);
   1469 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
   1470 	    status, buf);
   1471 }
   1472 
   1473 static void
   1474 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
   1475 {
   1476 	dwc_dump_status(sc);
   1477 	dwc_gmac_dump_ffilt(sc,
   1478 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
   1479 	dwc_gmac_dump_dma(sc);
   1480 	dwc_gmac_dump_tx_desc(sc);
   1481 	dwc_gmac_dump_rx_desc(sc);
   1482 
   1483 	panic("%s", msg);
   1484 }
   1485 
   1486 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
   1487 {
   1488 	char buf[200];
   1489 
   1490 	/* print filter setup */
   1491 	snprintb(buf, sizeof(buf), "\177\20"
   1492 	    "b\x1f""RA\0"
   1493 	    "b\x0a""HPF\0"
   1494 	    "b\x09""SAF\0"
   1495 	    "b\x08""SAIF\0"
   1496 	    "b\x05""DBF\0"
   1497 	    "b\x04""PM\0"
   1498 	    "b\x03""DAIF\0"
   1499 	    "b\x02""HMC\0"
   1500 	    "b\x01""HUC\0"
   1501 	    "b\x00""PR\0"
   1502 	    "\0", ffilt);
   1503 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
   1504 }
   1505 #endif
   1506