Home | History | Annotate | Line # | Download | only in gemini
if_gmc.c revision 1.10
      1 /* $NetBSD: if_gmc.c,v 1.10 2019/05/23 13:10:50 msaitoh Exp $ */
      2 /*-
      3  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Matt Thomas <matt (at) 3am-software.com>
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/param.h>
     32 #include <sys/callout.h>
     33 #include <sys/device.h>
     34 #include <sys/ioctl.h>
     35 #include <sys/kernel.h>
     36 #include <sys/kmem.h>
     37 #include <sys/mbuf.h>
     38 
     39 #include <sys/bus.h>
     40 #include <machine/intr.h>
     41 
     42 #include <arm/gemini/gemini_reg.h>
     43 #include <arm/gemini/gemini_gmacreg.h>
     44 #include <arm/gemini/gemini_gmacvar.h>
     45 
     46 #include <net/if.h>
     47 #include <net/if_ether.h>
     48 #include <net/if_dl.h>
     49 
     50 __KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.10 2019/05/23 13:10:50 msaitoh Exp $");
     51 
     52 #define	MAX_TXSEG	32
     53 
     54 struct gmc_softc {
     55 	device_t sc_dev;
     56 	struct gmac_softc *sc_psc;
     57 	struct gmc_softc *sc_sibling;
     58 	bus_dma_tag_t sc_dmat;
     59 	bus_space_tag_t sc_iot;
     60 	bus_space_handle_t sc_ioh;
     61 	bus_space_handle_t sc_dma_ioh;
     62 	bus_space_handle_t sc_gmac_ioh;
     63 	struct ethercom sc_ec;
     64 	struct mii_data sc_mii;
     65 	void *sc_ih;
     66 	bool sc_port1;
     67 	uint8_t sc_phy;
     68 	gmac_hwqueue_t *sc_rxq;
     69 	gmac_hwqueue_t *sc_txq[6];
     70 	callout_t sc_mii_ch;
     71 
     72 	uint32_t sc_gmac_status;
     73 	uint32_t sc_gmac_sta_add[3];
     74 	uint32_t sc_gmac_mcast_filter[2];
     75 	uint32_t sc_gmac_rx_filter;
     76 	uint32_t sc_gmac_config[2];
     77 	uint32_t sc_dmavr;
     78 
     79 	uint32_t sc_int_mask[5];
     80 	uint32_t sc_int_enabled[5];
     81 };
     82 
     83 #define	sc_if	sc_ec.ec_if
     84 
     85 static bool
     86 gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
     87 {
     88 	bus_dmamap_t map;
     89 	uint32_t desc0, desc1, desc3;
     90 	struct mbuf *last_m, *m0;
     91 	size_t count, i;
     92 	int error;
     93 	gmac_desc_t *d;
     94 
     95 	KASSERT(hwq != NULL);
     96 
     97 	map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
     98 	if (map == NULL)
     99 		return false;
    100 
    101 	for (last_m = NULL, m0 = m, count = 0;
    102 	     m0 != NULL;
    103 	     last_m = m0, m0 = m0->m_next) {
    104 		vaddr_t addr = (uintptr_t)m0->m_data;
    105 		if (m0->m_len == 0)
    106 			continue;
    107 		if (addr & 1) {
    108 			if (last_m != NULL && M_TRAILINGSPACE(last_m) > 0) {
    109 				last_m->m_data[last_m->m_len++] = *m->m_data++;
    110 				m->m_len--;
    111 			} else if (M_TRAILINGSPACE(m0) > 0) {
    112 				memmove(m0->m_data + 1, m0->m_data, m0->m_len);
    113 				m0->m_data++;
    114 			} else if (M_LEADINGSPACE(m0) > 0) {
    115 				memmove(m0->m_data - 1, m0->m_data, m0->m_len);
    116 				m0->m_data--;
    117 			} else {
    118 				panic("gmc_txqueue: odd addr %p", m0->m_data);
    119 			}
    120 		}
    121 		count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
    122 	}
    123 
    124 	gmac_hwqueue_sync(hwq);
    125 	if (hwq->hwq_free <= count) {
    126 		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
    127 		return false;
    128 	}
    129 
    130 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
    131 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
    132 	if (error) {
    133 		aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
    134 		    error);
    135 		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
    136 		m_freem(m);
    137 		sc->sc_if.if_oerrors++;
    138 		return true;
    139 	}
    140 	KASSERT(map->dm_nsegs > 0);
    141 
    142 	/*
    143 	 * Sync the mbuf contents to memory/cache.
    144 	 */
    145 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    146 		BUS_DMASYNC_PREWRITE);
    147 
    148 	/*
    149 	 * Now we need to load the descriptors...
    150 	 */
    151 	desc0 = map->dm_nsegs << 16;
    152 	desc1 = m->m_pkthdr.len;
    153 	desc3 = DESC3_SOF;
    154 	i = 0;
    155 	d = NULL;
    156 	do {
    157 #if 0
    158 		if (i > 0)
    159 			aprint_debug_dev(sc->sc_dev,
    160 			    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
    161 			    i-1, d, d->d_desc0, d->d_desc1,
    162 			    d->d_bufaddr, d->d_desc3);
    163 #endif
    164 		d = gmac_hwqueue_desc(hwq, i);
    165 		KASSERT(map->dm_segs[i].ds_len > 0);
    166 		KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
    167 		d->d_desc0 = htole32(map->dm_segs[i].ds_len | desc0);
    168 		d->d_desc1 = htole32(desc1);
    169 		d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
    170 		d->d_desc3 = htole32(desc3);
    171 		desc3 = 0;
    172 	} while (++i < map->dm_nsegs);
    173 
    174 	d->d_desc3 |= htole32(DESC3_EOF | DESC3_EOFIE);
    175 #if 0
    176 	aprint_debug_dev(sc->sc_dev,
    177 	    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
    178 	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
    179 #endif
    180 	M_SETCTX(m, map);
    181 	IF_ENQUEUE(&hwq->hwq_ifq, m);
    182 	/*
    183 	 * Last descriptor has been marked.  Give them to the h/w.
    184 	 * This will sync for us.
    185 	 */
    186 	gmac_hwqueue_produce(hwq, map->dm_nsegs);
    187 #if 0
    188 	aprint_debug_dev(sc->sc_dev,
    189 	    "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
    190 	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
    191 #endif
    192 	return true;
    193 }
    194 
    195 static void
    196 gmc_filter_change(struct gmc_softc *sc)
    197 {
    198 	struct ether_multi *enm;
    199 	struct ether_multistep step;
    200 	uint32_t mhash[2];
    201 	uint32_t new0, new1, new2;
    202 	const char * const eaddr = CLLADDR(sc->sc_if.if_sadl);
    203 
    204 	new0 = eaddr[0] | ((eaddr[1] | (eaddr[2] | (eaddr[3] << 8)) << 8) << 8);
    205 	new1 = eaddr[4] | (eaddr[5] << 8);
    206 	new2 = 0;
    207 	if (sc->sc_gmac_sta_add[0] != new0
    208 	    || sc->sc_gmac_sta_add[1] != new1
    209 	    || sc->sc_gmac_sta_add[2] != new2) {
    210 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD0,
    211 		    new0);
    212 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD1,
    213 		    new1);
    214 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD2,
    215 		    new2);
    216 		sc->sc_gmac_sta_add[0] = new0;
    217 		sc->sc_gmac_sta_add[1] = new1;
    218 		sc->sc_gmac_sta_add[2] = new2;
    219 	}
    220 
    221 	mhash[0] = 0;
    222 	mhash[1] = 0;
    223 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
    224 	while (enm != NULL) {
    225 		size_t i;
    226 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
    227 			mhash[0] = mhash[1] = 0xffffffff;
    228 			break;
    229 		}
    230 		i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
    231 		mhash[(i >> 5) & 1] |= 1 << (i & 31);
    232 		ETHER_NEXT_MULTI(step, enm);
    233 	}
    234 
    235 	if (sc->sc_gmac_mcast_filter[0] != mhash[0]
    236 	    || sc->sc_gmac_mcast_filter[1] != mhash[1]) {
    237 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
    238 		    GMAC_MCAST_FILTER0, mhash[0]);
    239 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
    240 		    GMAC_MCAST_FILTER1, mhash[1]);
    241 		sc->sc_gmac_mcast_filter[0] = mhash[0];
    242 		sc->sc_gmac_mcast_filter[1] = mhash[1];
    243 	}
    244 
    245 	new0 = sc->sc_gmac_rx_filter & ~RXFILTER_PROMISC;
    246 	new0 |= RXFILTER_BROADCAST | RXFILTER_UNICAST | RXFILTER_MULTICAST;
    247 	if (sc->sc_if.if_flags & IFF_PROMISC)
    248 		new0 |= RXFILTER_PROMISC;
    249 
    250 	if (new0 != sc->sc_gmac_rx_filter) {
    251 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_RX_FILTER,
    252 		    new0);
    253 		sc->sc_gmac_rx_filter = new0;
    254 	}
    255 }
    256 
    257 static void
    258 gmc_mii_tick(void *arg)
    259 {
    260 	struct gmc_softc * const sc = arg;
    261 	struct gmac_softc * const psc = sc->sc_psc;
    262 	int s = splnet();
    263 
    264 	/*
    265 	 * If we had to increase the number of receive mbufs due to fifo
    266 	 * overflows, we need a way to decrease them.  So every second we
    267 	 * recieve less than or equal to MIN_RXMAPS packets, we decrement
    268 	 * swfree_min until it returns to MIN_RXMAPS.
    269 	 */
    270 	if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
    271 	    && psc->sc_swfree_min > MIN_RXMAPS) {
    272 		psc->sc_swfree_min--;
    273 		gmac_swfree_min_update(psc);
    274 	}
    275 	/*
    276 	 * If only one GMAC is running or this is port0, reset the count.
    277 	 */
    278 	if (psc->sc_running != 3 || !sc->sc_port1)
    279 		psc->sc_rxpkts_per_sec = 0;
    280 
    281 	mii_tick(&sc->sc_mii);
    282 	if (sc->sc_if.if_flags & IFF_RUNNING)
    283 		callout_schedule(&sc->sc_mii_ch, hz);
    284 
    285 	splx(s);
    286 }
    287 
    288 static int
    289 gmc_mediachange(struct ifnet *ifp)
    290 {
    291 	struct gmc_softc * const sc = ifp->if_softc;
    292 
    293 	if ((ifp->if_flags & IFF_UP) == 0)
    294 		return 0;
    295 
    296 	return mii_mediachg(&sc->sc_mii);
    297 }
    298 
    299 static void
    300 gmc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
    301 {
    302 	struct gmc_softc * const sc = ifp->if_softc;
    303 
    304 	mii_pollstat(&sc->sc_mii);
    305 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
    306 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
    307 }
    308 
    309 static void
    310 gmc_mii_statchg(struct ifnet *ifp)
    311 {
    312 	struct gmc_softc * const sc = ifp->if_softc;
    313 	uint32_t gmac_status;
    314 
    315 	gmac_status = sc->sc_gmac_status;
    316 
    317 	gmac_status &= ~STATUS_PHYMODE_MASK;
    318 	gmac_status |= STATUS_PHYMODE_RGMII_A;
    319 
    320 	gmac_status &= ~STATUS_SPEED_MASK;
    321 	if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
    322 		gmac_status |= STATUS_SPEED_1000M;
    323 	} else if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
    324 		gmac_status |= STATUS_SPEED_100M;
    325 	} else {
    326 		gmac_status |= STATUS_SPEED_10M;
    327 	}
    328 
    329 	if (sc->sc_mii.mii_media_active & IFM_FDX)
    330 		gmac_status |= STATUS_DUPLEX_FULL;
    331 	else
    332 		gmac_status &= ~STATUS_DUPLEX_FULL;
    333 
    334 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE)
    335 		gmac_status |= STATUS_LINK_ON;
    336 	else
    337 		gmac_status &= ~STATUS_LINK_ON;
    338 
    339 	if (sc->sc_gmac_status != gmac_status) {
    340 		aprint_debug_dev(sc->sc_dev,
    341 		    "status change old=%#x new=%#x active=%#x\n",
    342 		    sc->sc_gmac_status, gmac_status,
    343 		    sc->sc_mii.mii_media_active);
    344 		sc->sc_gmac_status = gmac_status;
    345 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
    346 		    sc->sc_gmac_status);
    347 	}
    348 
    349 	(*sc->sc_mii.mii_writereg)(sc->sc_dev, sc->sc_phy, 0x0018, 0x0041);
    350 }
    351 
    352 static int
    353 gmc_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
    354 {
    355 	struct gmc_softc * const sc = ifp->if_softc;
    356 	int s;
    357 	int error;
    358 	s = splnet();
    359 
    360 	switch (cmd) {
    361 	default:
    362 		error = ether_ioctl(ifp, cmd, data);
    363 		if (error == ENETRESET) {
    364 			if (ifp->if_flags & IFF_RUNNING) {
    365 				/*
    366 				 * If the interface is running, we have to
    367 				 * update its multicast filter.
    368 				 */
    369 				gmc_filter_change(sc);
    370 			}
    371 			error = 0;
    372 		}
    373 	}
    374 
    375 	splx(s);
    376 	return error;
    377 }
    378 
    379 static void
    380 gmc_ifstart(struct ifnet *ifp)
    381 {
    382 	struct gmc_softc * const sc = ifp->if_softc;
    383 
    384 #if 0
    385 	if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0)
    386 		return;
    387 #endif
    388 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    389 		return;
    390 
    391 	for (;;) {
    392 		struct mbuf *m;
    393 		IF_DEQUEUE(&ifp->if_snd, m);
    394 		if (m == NULL)
    395 			break;
    396 		if (!gmc_txqueue(sc, sc->sc_txq[0], m)) {
    397 			IF_PREPEND(&ifp->if_snd, m);
    398 			ifp->if_flags |= IFF_OACTIVE;
    399 			break;
    400 		}
    401 	}
    402 }
    403 
    404 static void
    405 gmc_ifstop(struct ifnet *ifp, int disable)
    406 {
    407 	struct gmc_softc * const sc = ifp->if_softc;
    408 	struct gmac_softc * const psc = sc->sc_psc;
    409 
    410 	psc->sc_running &= ~(sc->sc_port1 ? 2 : 1);
    411 	psc->sc_int_enabled[0] &= ~sc->sc_int_enabled[0];
    412 	psc->sc_int_enabled[1] &= ~sc->sc_int_enabled[1];
    413 	psc->sc_int_enabled[2] &= ~sc->sc_int_enabled[2];
    414 	psc->sc_int_enabled[3] &= ~sc->sc_int_enabled[3];
    415 	psc->sc_int_enabled[4] &= ~sc->sc_int_enabled[4] | INT4_SW_FREEQ_EMPTY;
    416 	if (psc->sc_running == 0) {
    417 		psc->sc_int_enabled[4] &= ~INT4_SW_FREEQ_EMPTY;
    418 		KASSERT(psc->sc_int_enabled[0] == 0);
    419 		KASSERT(psc->sc_int_enabled[1] == 0);
    420 		KASSERT(psc->sc_int_enabled[2] == 0);
    421 		KASSERT(psc->sc_int_enabled[3] == 0);
    422 		KASSERT(psc->sc_int_enabled[4] == 0);
    423 	} else if (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0)
    424 			== sc->sc_port1) {
    425 		psc->sc_int_select[4] &= ~INT4_SW_FREEQ_EMPTY;
    426 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
    427 		    psc->sc_int_select[4]);
    428 	}
    429 	gmac_intr_update(psc);
    430 	if (disable) {
    431 #if 0
    432 		if (psc->sc_running == 0) {
    433 			gmac_mapcache_destroy(&psc->sc_txmaps);
    434 			gmac_mapcache_destroy(&psc->sc_rxmaps);
    435 		}
    436 #endif
    437 	}
    438 }
    439 
    440 static int
    441 gmc_ifinit(struct ifnet *ifp)
    442 {
    443 	struct gmc_softc * const sc = ifp->if_softc;
    444 	struct gmac_softc * const psc = sc->sc_psc;
    445 	uint32_t new, mask;
    446 
    447 	gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
    448 	gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
    449 
    450 	if (sc->sc_rxq == NULL) {
    451 		gmac_hwqmem_t *hqm;
    452 		hqm = gmac_hwqmem_create(psc->sc_rxmaps, 16, /*RXQ_NDESCS,*/ 1,
    453 		   HQM_CONSUMER | HQM_RX);
    454 		sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
    455 		    sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
    456 		    GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
    457 		if (sc->sc_rxq == NULL) {
    458 			gmac_hwqmem_destroy(hqm);
    459 			goto failed;
    460 		}
    461 		sc->sc_rxq->hwq_ifp = ifp;
    462 		sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
    463 	}
    464 
    465 	if (sc->sc_txq[0] == NULL) {
    466 		gmac_hwqueue_t *hwq, *last_hwq;
    467 		gmac_hwqmem_t *hqm;
    468 		size_t i;
    469 
    470 		hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
    471 		   HQM_PRODUCER | HQM_TX);
    472 		KASSERT(hqm != NULL);
    473 		for (i = 0; i < __arraycount(sc->sc_txq); i++) {
    474 			sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
    475 			    sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
    476 			    GMAC_SW_TX_Q_BASE, i);
    477 			if (sc->sc_txq[i] == NULL) {
    478 				if (i == 0)
    479 					gmac_hwqmem_destroy(hqm);
    480 				goto failed;
    481 			}
    482 			sc->sc_txq[i]->hwq_ifp = ifp;
    483 
    484 			last_hwq = NULL;
    485 			SLIST_FOREACH(hwq, &psc->sc_hwfreeq->hwq_producers,
    486 			    hwq_link) {
    487 				if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff)
    488 					break;
    489 				last_hwq = hwq;
    490 			}
    491 			if (last_hwq == NULL)
    492 				SLIST_INSERT_HEAD(
    493 				    &psc->sc_hwfreeq->hwq_producers,
    494 				    sc->sc_txq[i], hwq_link);
    495 			else
    496 				SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
    497 				    hwq_link);
    498 		}
    499 	}
    500 
    501 	gmc_filter_change(sc);
    502 
    503 	mask = DMAVR_LOOPBACK | DMAVR_DROP_SMALL_ACK | DMAVR_EXTRABYTES_MASK
    504 	    | DMAVR_RXBURSTSIZE_MASK | DMAVR_RXBUSWIDTH_MASK
    505 	    | DMAVR_TXBURSTSIZE_MASK | DMAVR_TXBUSWIDTH_MASK;
    506 	new = DMAVR_RXDMA_ENABLE | DMAVR_TXDMA_ENABLE
    507 	    | DMAVR_EXTRABYTES(2)
    508 	    | DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W)
    509 	    | DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS)
    510 	    | DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W)
    511 	    | DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS);
    512 	new |= sc->sc_dmavr & ~mask;
    513 	if (sc->sc_dmavr != new) {
    514 		sc->sc_dmavr = new;
    515 		bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
    516 		    sc->sc_dmavr);
    517 		aprint_debug_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
    518 		    sc->sc_dmavr,
    519 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
    520 	}
    521 
    522 	mask = CONFIG0_MAXLEN_MASK | CONFIG0_TX_DISABLE | CONFIG0_RX_DISABLE
    523 	    | CONFIG0_LOOPBACK |/*CONFIG0_SIM_TEST|*/CONFIG0_INVERSE_RXC_RGMII
    524 	    | CONFIG0_RGMII_INBAND_STATUS_ENABLE;
    525 	new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536) | CONFIG0_R_LATCHED_MMII;
    526 	new |= (sc->sc_gmac_config[0] & ~mask);
    527 	if (sc->sc_gmac_config[0] != new) {
    528 		sc->sc_gmac_config[0] = new;
    529 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
    530 		    sc->sc_gmac_config[0]);
    531 		aprint_debug_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
    532 		    sc->sc_gmac_config[0],
    533 		    bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
    534 	}
    535 
    536 	psc->sc_rxpkts_per_sec +=
    537 	    gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
    538 
    539 	/*
    540 	 * If we will be the only active interface, make sure the sw freeq
    541 	 * interrupt gets routed to use.
    542 	 */
    543 	if (psc->sc_running == 0
    544 	    && (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0) != sc->sc_port1)) {
    545 		psc->sc_int_select[4] ^= INT4_SW_FREEQ_EMPTY;
    546 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
    547 		    psc->sc_int_select[4]);
    548 	}
    549 	sc->sc_int_enabled[0] = sc->sc_int_mask[0]
    550 	    & (INT0_TXDERR|INT0_TXPERR|INT0_RXDERR|INT0_RXPERR|INT0_SWTXQ_EOF);
    551 	sc->sc_int_enabled[1] = sc->sc_int_mask[1] & INT1_DEF_RXQ_EOF;
    552 	sc->sc_int_enabled[4] = INT4_SW_FREEQ_EMPTY | (sc->sc_int_mask[4]
    553 	    & (INT4_TX_FAIL | INT4_MIB_HEMIWRAP | INT4_RX_FIFO_OVRN
    554 	       | INT4_RGMII_STSCHG));
    555 
    556 	psc->sc_int_enabled[0] |= sc->sc_int_enabled[0];
    557 	psc->sc_int_enabled[1] |= sc->sc_int_enabled[1];
    558 	psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
    559 
    560 	gmac_intr_update(psc);
    561 
    562 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    563 		mii_tick(&sc->sc_mii);
    564 
    565 	ifp->if_flags |= IFF_RUNNING;
    566 	psc->sc_running |= (sc->sc_port1 ? 2 : 1);
    567 
    568 	callout_schedule(&sc->sc_mii_ch, hz);
    569 
    570 	return 0;
    571 
    572 failed:
    573 	gmc_ifstop(ifp, true);
    574 	return ENOMEM;
    575 }
    576 
    577 static int
    578 gmc_intr(void *arg)
    579 {
    580 	struct gmc_softc * const sc = arg;
    581 	uint32_t int0_status, int1_status, int4_status;
    582 	uint32_t status;
    583 	bool do_ifstart = false;
    584 	int rv = 0;
    585 
    586 	aprint_debug_dev(sc->sc_dev, "gmac_intr: entry\n");
    587 
    588 	int0_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    589 	    GMAC_INT0_STATUS);
    590 	int1_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    591 	    GMAC_INT1_STATUS);
    592 	int4_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    593 	    GMAC_INT4_STATUS);
    594 
    595 	aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
    596 	    int0_status, int1_status,
    597 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
    598 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
    599 	    int4_status);
    600 
    601 #if 0
    602 	aprint_debug_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
    603 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
    604 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
    605 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
    606 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
    607 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
    608 #endif
    609 
    610 	status = int0_status & sc->sc_int_mask[0];
    611 	if (status & (INT0_TXDERR | INT0_TXPERR)) {
    612 		aprint_error_dev(sc->sc_dev,
    613 		    "transmit%s%s error: %#x %08x bufaddr %#x\n",
    614 		    status & INT0_TXDERR ? " data" : "",
    615 		    status & INT0_TXPERR ? " protocol" : "",
    616 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    617 		    GMAC_DMA_TX_CUR_DESC),
    618 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    619 		    GMAC_SW_TX_Q0_RWPTR),
    620 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    621 		    GMAC_DMA_TX_DESC2));
    622 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
    623 		    status & (INT0_TXDERR | INT0_TXPERR));
    624 		Debugger();
    625 	}
    626 	if (status & (INT0_RXDERR | INT0_RXPERR)) {
    627 		aprint_error_dev(sc->sc_dev,
    628 		    "receive%s%s error: %#x %#x=%#x/%#x/%#x/%#x\n",
    629 		    status & INT0_RXDERR ? " data" : "",
    630 		    status & INT0_RXPERR ? " protocol" : "",
    631 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    632 		    GMAC_DMA_RX_CUR_DESC),
    633 		bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    634 		    GMAC_SWFREEQ_RWPTR),
    635 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    636 		    GMAC_DMA_RX_DESC0),
    637 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    638 		    GMAC_DMA_RX_DESC1),
    639 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    640 		    GMAC_DMA_RX_DESC2),
    641 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    642 		    GMAC_DMA_RX_DESC3));
    643 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
    644 		    status & (INT0_RXDERR | INT0_RXPERR));
    645 		    Debugger();
    646 	}
    647 	if (status & INT0_SWTXQ_EOF) {
    648 		status &= INT0_SWTXQ_EOF;
    649 		for (int i = 0; status && i < __arraycount(sc->sc_txq); i++) {
    650 			if (status & INT0_SWTXQn_EOF(i)) {
    651 				gmac_hwqueue_sync(sc->sc_txq[i]);
    652 				bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    653 				    GMAC_INT0_STATUS,
    654 				    sc->sc_int_mask[0] & (INT0_SWTXQn_EOF(i)
    655 					| INT0_SWTXQn_FIN(i)));
    656 				status &= ~INT0_SWTXQn_EOF(i);
    657 			}
    658 		}
    659 		do_ifstart = true;
    660 		rv = 1;
    661 	}
    662 
    663 	if (int4_status & INT4_SW_FREEQ_EMPTY) {
    664 		struct gmac_softc * const psc = sc->sc_psc;
    665 		psc->sc_rxpkts_per_sec +=
    666 		    gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
    667 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
    668 		    status & INT4_SW_FREEQ_EMPTY);
    669 		rv = 1;
    670 	}
    671 
    672 	status = int1_status & sc->sc_int_mask[1];
    673 	if (status & INT1_DEF_RXQ_EOF) {
    674 		struct gmac_softc * const psc = sc->sc_psc;
    675 		psc->sc_rxpkts_per_sec +=
    676 		    gmac_hwqueue_consume(sc->sc_rxq, psc->sc_swfree_min);
    677 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS,
    678 		    status & INT1_DEF_RXQ_EOF);
    679 		rv = 1;
    680 	}
    681 
    682 	status = int4_status & sc->sc_int_enabled[4];
    683 	if (status & INT4_TX_FAIL) {
    684 	}
    685 	if (status & INT4_MIB_HEMIWRAP) {
    686 	}
    687 	if (status & INT4_RX_XON) {
    688 	}
    689 	if (status & INT4_RX_XOFF) {
    690 	}
    691 	if (status & INT4_TX_XON) {
    692 	}
    693 	if (status & INT4_TX_XOFF) {
    694 	}
    695 	if (status & INT4_RX_FIFO_OVRN) {
    696 #if 0
    697 		if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS) {
    698 			sc->sc_psc->sc_swfree_min++;
    699 			gmac_swfree_min_update(psc);
    700 		}
    701 #endif
    702 		sc->sc_if.if_ierrors++;
    703 	}
    704 	if (status & INT4_RGMII_STSCHG) {
    705 		mii_pollstat(&sc->sc_mii);
    706 	}
    707 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS, status);
    708 
    709 	if (do_ifstart)
    710 		if_schedule_deferred_start(&sc->sc_if);
    711 
    712 	aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
    713 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS),
    714 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS),
    715 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
    716 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
    717 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS));
    718 	aprint_debug_dev(sc->sc_dev, "gmac_intr: exit rv=%d\n", rv);
    719 	return rv;
    720 }
    721 
    722 static int
    723 gmc_match(device_t parent, cfdata_t cf, void *aux)
    724 {
    725 	struct gmac_softc *psc = device_private(parent);
    726 	struct gmac_attach_args *gma = aux;
    727 
    728 	if ((unsigned int)gma->gma_phy > 31)
    729 		return 0;
    730 	if ((unsigned int)gma->gma_port > 1)
    731 		return 0;
    732 	if (gma->gma_intr < 1 || gma->gma_intr > 2)
    733 		return 0;
    734 
    735 	if (psc->sc_ports & (1 << gma->gma_port))
    736 		return 0;
    737 
    738 	return 1;
    739 }
    740 
    741 static void
    742 gmc_attach(device_t parent, device_t self, void *aux)
    743 {
    744 	struct gmac_softc * const psc = device_private(parent);
    745 	struct gmc_softc * const sc = device_private(self);
    746 	struct gmac_attach_args *gma = aux;
    747 	struct ifnet * const ifp = &sc->sc_if;
    748 	struct mii_data * const mii = &sc->sc_mii;
    749 	static const char eaddrs[2][6] = {
    750 		"\x0\x52\xc3\x11\x22\x33",
    751 		"\x0\x52\xc3\x44\x55\x66",
    752 	};
    753 
    754 	psc->sc_ports |= 1 << gma->gma_port;
    755 	sc->sc_port1 = (gma->gma_port == 1);
    756 	sc->sc_phy = gma->gma_phy;
    757 
    758 	sc->sc_dev = self;
    759 	sc->sc_psc = psc;
    760 	sc->sc_iot = psc->sc_iot;
    761 	sc->sc_ioh = psc->sc_ioh;
    762 	sc->sc_dmat = psc->sc_dmat;
    763 
    764 	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
    765 	    GMAC_PORTn_DMA_OFFSET(gma->gma_port), GMAC_PORTn_DMA_SIZE,
    766 	    &sc->sc_dma_ioh);
    767 	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
    768 	    GMAC_PORTn_GMAC_OFFSET(gma->gma_port), GMAC_PORTn_GMAC_SIZE,
    769 	    &sc->sc_gmac_ioh);
    770 	aprint_normal("\n");
    771 	aprint_naive("\n");
    772 
    773 	strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
    774 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
    775 	ifp->if_softc = sc;
    776 	ifp->if_ioctl = gmc_ifioctl;
    777 	ifp->if_stop  = gmc_ifstop;
    778 	ifp->if_start = gmc_ifstart;
    779 	ifp->if_init  = gmc_ifinit;
    780 
    781 	IFQ_SET_READY(&ifp->if_snd);
    782 
    783 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
    784 	sc->sc_ec.ec_mii = mii;
    785 
    786 	mii->mii_ifp = ifp;
    787 	mii->mii_statchg = gmc_mii_statchg;
    788 	mii->mii_readreg = gma->gma_mii_readreg;
    789 	mii->mii_writereg = gma->gma_mii_writereg;
    790 
    791 	ifmedia_init(&mii->mii_media, 0, gmc_mediachange, gmc_mediastatus);
    792 
    793 	if_attach(ifp);
    794 	if_deferred_start_init(ifp, NULL);
    795 	ether_ifattach(ifp, eaddrs[gma->gma_port]);
    796 	mii_attach(sc->sc_dev, mii, 0xffffffff,
    797 	    gma->gma_phy, MII_OFFSET_ANY, 0);
    798 
    799 	if (LIST_EMPTY(&mii->mii_phys)) {
    800 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
    801 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
    802 	} else {
    803 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    804 //		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX);
    805 	}
    806 
    807 	sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    808 	    GMAC_STATUS);
    809 	sc->sc_gmac_sta_add[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    810 	    GMAC_STA_ADD0);
    811 	sc->sc_gmac_sta_add[1] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    812 	    GMAC_STA_ADD1);
    813 	sc->sc_gmac_sta_add[2] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    814 	    GMAC_STA_ADD2);
    815 	sc->sc_gmac_mcast_filter[0] = bus_space_read_4(sc->sc_iot,
    816 	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER0);
    817 	sc->sc_gmac_mcast_filter[1] = bus_space_read_4(sc->sc_iot,
    818 	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER1);
    819 	sc->sc_gmac_rx_filter = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    820 	    GMAC_RX_FILTER);
    821 	sc->sc_gmac_config[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    822 	    GMAC_CONFIG0);
    823 	sc->sc_dmavr = bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR);
    824 
    825 	/* sc->sc_int_enabled is already zeroed */
    826 	sc->sc_int_mask[0] = (sc->sc_port1 ? INT0_GMAC1 : INT0_GMAC0);
    827 	sc->sc_int_mask[1] = (sc->sc_port1 ? INT1_GMAC1 : INT1_GMAC0);
    828 	sc->sc_int_mask[2] = (sc->sc_port1 ? INT2_GMAC1 : INT2_GMAC0);
    829 	sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
    830 	sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
    831 
    832 	if (!sc->sc_port1) {
    833 	sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
    834 	    gmc_intr, sc);
    835 	KASSERT(sc->sc_ih != NULL);
    836 	}
    837 
    838 	callout_init(&sc->sc_mii_ch, 0);
    839 	callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);
    840 
    841 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    842 	     ether_sprintf(CLLADDR(sc->sc_if.if_sadl)));
    843 }
    844 
    845 CFATTACH_DECL_NEW(gmc, sizeof(struct gmc_softc),
    846     gmc_match, gmc_attach, NULL, NULL);
    847