Home | History | Annotate | Line # | Download | only in gemini
if_gmc.c revision 1.11
      1 /* $NetBSD: if_gmc.c,v 1.11 2019/05/28 07:41:46 msaitoh Exp $ */
      2 /*-
      3  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Matt Thomas <matt (at) 3am-software.com>
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/param.h>
     32 #include <sys/callout.h>
     33 #include <sys/device.h>
     34 #include <sys/ioctl.h>
     35 #include <sys/kernel.h>
     36 #include <sys/kmem.h>
     37 #include <sys/mbuf.h>
     38 
     39 #include <sys/bus.h>
     40 #include <machine/intr.h>
     41 
     42 #include <arm/gemini/gemini_reg.h>
     43 #include <arm/gemini/gemini_gmacreg.h>
     44 #include <arm/gemini/gemini_gmacvar.h>
     45 
     46 #include <net/if.h>
     47 #include <net/if_ether.h>
     48 #include <net/if_dl.h>
     49 
     50 __KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.11 2019/05/28 07:41:46 msaitoh Exp $");
     51 
     52 #define	MAX_TXSEG	32
     53 
     54 struct gmc_softc {
     55 	device_t sc_dev;
     56 	struct gmac_softc *sc_psc;
     57 	struct gmc_softc *sc_sibling;
     58 	bus_dma_tag_t sc_dmat;
     59 	bus_space_tag_t sc_iot;
     60 	bus_space_handle_t sc_ioh;
     61 	bus_space_handle_t sc_dma_ioh;
     62 	bus_space_handle_t sc_gmac_ioh;
     63 	struct ethercom sc_ec;
     64 	struct mii_data sc_mii;
     65 	void *sc_ih;
     66 	bool sc_port1;
     67 	uint8_t sc_phy;
     68 	gmac_hwqueue_t *sc_rxq;
     69 	gmac_hwqueue_t *sc_txq[6];
     70 	callout_t sc_mii_ch;
     71 
     72 	uint32_t sc_gmac_status;
     73 	uint32_t sc_gmac_sta_add[3];
     74 	uint32_t sc_gmac_mcast_filter[2];
     75 	uint32_t sc_gmac_rx_filter;
     76 	uint32_t sc_gmac_config[2];
     77 	uint32_t sc_dmavr;
     78 
     79 	uint32_t sc_int_mask[5];
     80 	uint32_t sc_int_enabled[5];
     81 };
     82 
     83 #define	sc_if	sc_ec.ec_if
     84 
     85 static bool
     86 gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
     87 {
     88 	bus_dmamap_t map;
     89 	uint32_t desc0, desc1, desc3;
     90 	struct mbuf *last_m, *m0;
     91 	size_t count, i;
     92 	int error;
     93 	gmac_desc_t *d;
     94 
     95 	KASSERT(hwq != NULL);
     96 
     97 	map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
     98 	if (map == NULL)
     99 		return false;
    100 
    101 	for (last_m = NULL, m0 = m, count = 0;
    102 	     m0 != NULL;
    103 	     last_m = m0, m0 = m0->m_next) {
    104 		vaddr_t addr = (uintptr_t)m0->m_data;
    105 		if (m0->m_len == 0)
    106 			continue;
    107 		if (addr & 1) {
    108 			if (last_m != NULL && M_TRAILINGSPACE(last_m) > 0) {
    109 				last_m->m_data[last_m->m_len++] = *m->m_data++;
    110 				m->m_len--;
    111 			} else if (M_TRAILINGSPACE(m0) > 0) {
    112 				memmove(m0->m_data + 1, m0->m_data, m0->m_len);
    113 				m0->m_data++;
    114 			} else if (M_LEADINGSPACE(m0) > 0) {
    115 				memmove(m0->m_data - 1, m0->m_data, m0->m_len);
    116 				m0->m_data--;
    117 			} else {
    118 				panic("gmc_txqueue: odd addr %p", m0->m_data);
    119 			}
    120 		}
    121 		count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
    122 	}
    123 
    124 	gmac_hwqueue_sync(hwq);
    125 	if (hwq->hwq_free <= count) {
    126 		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
    127 		return false;
    128 	}
    129 
    130 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
    131 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
    132 	if (error) {
    133 		aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
    134 		    error);
    135 		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
    136 		m_freem(m);
    137 		sc->sc_if.if_oerrors++;
    138 		return true;
    139 	}
    140 	KASSERT(map->dm_nsegs > 0);
    141 
    142 	/*
    143 	 * Sync the mbuf contents to memory/cache.
    144 	 */
    145 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    146 		BUS_DMASYNC_PREWRITE);
    147 
    148 	/*
    149 	 * Now we need to load the descriptors...
    150 	 */
    151 	desc0 = map->dm_nsegs << 16;
    152 	desc1 = m->m_pkthdr.len;
    153 	desc3 = DESC3_SOF;
    154 	i = 0;
    155 	d = NULL;
    156 	do {
    157 #if 0
    158 		if (i > 0)
    159 			aprint_debug_dev(sc->sc_dev,
    160 			    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
    161 			    i-1, d, d->d_desc0, d->d_desc1,
    162 			    d->d_bufaddr, d->d_desc3);
    163 #endif
    164 		d = gmac_hwqueue_desc(hwq, i);
    165 		KASSERT(map->dm_segs[i].ds_len > 0);
    166 		KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
    167 		d->d_desc0 = htole32(map->dm_segs[i].ds_len | desc0);
    168 		d->d_desc1 = htole32(desc1);
    169 		d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
    170 		d->d_desc3 = htole32(desc3);
    171 		desc3 = 0;
    172 	} while (++i < map->dm_nsegs);
    173 
    174 	d->d_desc3 |= htole32(DESC3_EOF | DESC3_EOFIE);
    175 #if 0
    176 	aprint_debug_dev(sc->sc_dev,
    177 	    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
    178 	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
    179 #endif
    180 	M_SETCTX(m, map);
    181 	IF_ENQUEUE(&hwq->hwq_ifq, m);
    182 	/*
    183 	 * Last descriptor has been marked.  Give them to the h/w.
    184 	 * This will sync for us.
    185 	 */
    186 	gmac_hwqueue_produce(hwq, map->dm_nsegs);
    187 #if 0
    188 	aprint_debug_dev(sc->sc_dev,
    189 	    "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
    190 	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
    191 #endif
    192 	return true;
    193 }
    194 
    195 static void
    196 gmc_filter_change(struct gmc_softc *sc)
    197 {
    198 	struct ethercom *ec = &sc->sc_ec;
    199 	struct ether_multi *enm;
    200 	struct ether_multistep step;
    201 	uint32_t mhash[2];
    202 	uint32_t new0, new1, new2;
    203 	const char * const eaddr = CLLADDR(sc->sc_if.if_sadl);
    204 
    205 	new0 = eaddr[0] | ((eaddr[1] | (eaddr[2] | (eaddr[3] << 8)) << 8) << 8);
    206 	new1 = eaddr[4] | (eaddr[5] << 8);
    207 	new2 = 0;
    208 	if (sc->sc_gmac_sta_add[0] != new0
    209 	    || sc->sc_gmac_sta_add[1] != new1
    210 	    || sc->sc_gmac_sta_add[2] != new2) {
    211 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD0,
    212 		    new0);
    213 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD1,
    214 		    new1);
    215 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD2,
    216 		    new2);
    217 		sc->sc_gmac_sta_add[0] = new0;
    218 		sc->sc_gmac_sta_add[1] = new1;
    219 		sc->sc_gmac_sta_add[2] = new2;
    220 	}
    221 
    222 	mhash[0] = 0;
    223 	mhash[1] = 0;
    224 	ETHER_LOCK(ec);
    225 	ETHER_FIRST_MULTI(step, ec, enm);
    226 	while (enm != NULL) {
    227 		size_t i;
    228 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
    229 			mhash[0] = mhash[1] = 0xffffffff;
    230 			break;
    231 		}
    232 		i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
    233 		mhash[(i >> 5) & 1] |= 1 << (i & 31);
    234 		ETHER_NEXT_MULTI(step, enm);
    235 	}
    236 	ETHER_UNLOCK(ec);
    237 
    238 	if (sc->sc_gmac_mcast_filter[0] != mhash[0]
    239 	    || sc->sc_gmac_mcast_filter[1] != mhash[1]) {
    240 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
    241 		    GMAC_MCAST_FILTER0, mhash[0]);
    242 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
    243 		    GMAC_MCAST_FILTER1, mhash[1]);
    244 		sc->sc_gmac_mcast_filter[0] = mhash[0];
    245 		sc->sc_gmac_mcast_filter[1] = mhash[1];
    246 	}
    247 
    248 	new0 = sc->sc_gmac_rx_filter & ~RXFILTER_PROMISC;
    249 	new0 |= RXFILTER_BROADCAST | RXFILTER_UNICAST | RXFILTER_MULTICAST;
    250 	if (sc->sc_if.if_flags & IFF_PROMISC)
    251 		new0 |= RXFILTER_PROMISC;
    252 
    253 	if (new0 != sc->sc_gmac_rx_filter) {
    254 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_RX_FILTER,
    255 		    new0);
    256 		sc->sc_gmac_rx_filter = new0;
    257 	}
    258 }
    259 
    260 static void
    261 gmc_mii_tick(void *arg)
    262 {
    263 	struct gmc_softc * const sc = arg;
    264 	struct gmac_softc * const psc = sc->sc_psc;
    265 	int s = splnet();
    266 
    267 	/*
    268 	 * If we had to increase the number of receive mbufs due to fifo
    269 	 * overflows, we need a way to decrease them.  So every second we
    270 	 * recieve less than or equal to MIN_RXMAPS packets, we decrement
    271 	 * swfree_min until it returns to MIN_RXMAPS.
    272 	 */
    273 	if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
    274 	    && psc->sc_swfree_min > MIN_RXMAPS) {
    275 		psc->sc_swfree_min--;
    276 		gmac_swfree_min_update(psc);
    277 	}
    278 	/*
    279 	 * If only one GMAC is running or this is port0, reset the count.
    280 	 */
    281 	if (psc->sc_running != 3 || !sc->sc_port1)
    282 		psc->sc_rxpkts_per_sec = 0;
    283 
    284 	mii_tick(&sc->sc_mii);
    285 	if (sc->sc_if.if_flags & IFF_RUNNING)
    286 		callout_schedule(&sc->sc_mii_ch, hz);
    287 
    288 	splx(s);
    289 }
    290 
    291 static int
    292 gmc_mediachange(struct ifnet *ifp)
    293 {
    294 	struct gmc_softc * const sc = ifp->if_softc;
    295 
    296 	if ((ifp->if_flags & IFF_UP) == 0)
    297 		return 0;
    298 
    299 	return mii_mediachg(&sc->sc_mii);
    300 }
    301 
    302 static void
    303 gmc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
    304 {
    305 	struct gmc_softc * const sc = ifp->if_softc;
    306 
    307 	mii_pollstat(&sc->sc_mii);
    308 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
    309 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
    310 }
    311 
    312 static void
    313 gmc_mii_statchg(struct ifnet *ifp)
    314 {
    315 	struct gmc_softc * const sc = ifp->if_softc;
    316 	uint32_t gmac_status;
    317 
    318 	gmac_status = sc->sc_gmac_status;
    319 
    320 	gmac_status &= ~STATUS_PHYMODE_MASK;
    321 	gmac_status |= STATUS_PHYMODE_RGMII_A;
    322 
    323 	gmac_status &= ~STATUS_SPEED_MASK;
    324 	if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
    325 		gmac_status |= STATUS_SPEED_1000M;
    326 	} else if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
    327 		gmac_status |= STATUS_SPEED_100M;
    328 	} else {
    329 		gmac_status |= STATUS_SPEED_10M;
    330 	}
    331 
    332 	if (sc->sc_mii.mii_media_active & IFM_FDX)
    333 		gmac_status |= STATUS_DUPLEX_FULL;
    334 	else
    335 		gmac_status &= ~STATUS_DUPLEX_FULL;
    336 
    337 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE)
    338 		gmac_status |= STATUS_LINK_ON;
    339 	else
    340 		gmac_status &= ~STATUS_LINK_ON;
    341 
    342 	if (sc->sc_gmac_status != gmac_status) {
    343 		aprint_debug_dev(sc->sc_dev,
    344 		    "status change old=%#x new=%#x active=%#x\n",
    345 		    sc->sc_gmac_status, gmac_status,
    346 		    sc->sc_mii.mii_media_active);
    347 		sc->sc_gmac_status = gmac_status;
    348 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
    349 		    sc->sc_gmac_status);
    350 	}
    351 
    352 	(*sc->sc_mii.mii_writereg)(sc->sc_dev, sc->sc_phy, 0x0018, 0x0041);
    353 }
    354 
    355 static int
    356 gmc_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
    357 {
    358 	struct gmc_softc * const sc = ifp->if_softc;
    359 	int s;
    360 	int error;
    361 	s = splnet();
    362 
    363 	switch (cmd) {
    364 	default:
    365 		error = ether_ioctl(ifp, cmd, data);
    366 		if (error == ENETRESET) {
    367 			if (ifp->if_flags & IFF_RUNNING) {
    368 				/*
    369 				 * If the interface is running, we have to
    370 				 * update its multicast filter.
    371 				 */
    372 				gmc_filter_change(sc);
    373 			}
    374 			error = 0;
    375 		}
    376 	}
    377 
    378 	splx(s);
    379 	return error;
    380 }
    381 
    382 static void
    383 gmc_ifstart(struct ifnet *ifp)
    384 {
    385 	struct gmc_softc * const sc = ifp->if_softc;
    386 
    387 #if 0
    388 	if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0)
    389 		return;
    390 #endif
    391 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    392 		return;
    393 
    394 	for (;;) {
    395 		struct mbuf *m;
    396 		IF_DEQUEUE(&ifp->if_snd, m);
    397 		if (m == NULL)
    398 			break;
    399 		if (!gmc_txqueue(sc, sc->sc_txq[0], m)) {
    400 			IF_PREPEND(&ifp->if_snd, m);
    401 			ifp->if_flags |= IFF_OACTIVE;
    402 			break;
    403 		}
    404 	}
    405 }
    406 
    407 static void
    408 gmc_ifstop(struct ifnet *ifp, int disable)
    409 {
    410 	struct gmc_softc * const sc = ifp->if_softc;
    411 	struct gmac_softc * const psc = sc->sc_psc;
    412 
    413 	psc->sc_running &= ~(sc->sc_port1 ? 2 : 1);
    414 	psc->sc_int_enabled[0] &= ~sc->sc_int_enabled[0];
    415 	psc->sc_int_enabled[1] &= ~sc->sc_int_enabled[1];
    416 	psc->sc_int_enabled[2] &= ~sc->sc_int_enabled[2];
    417 	psc->sc_int_enabled[3] &= ~sc->sc_int_enabled[3];
    418 	psc->sc_int_enabled[4] &= ~sc->sc_int_enabled[4] | INT4_SW_FREEQ_EMPTY;
    419 	if (psc->sc_running == 0) {
    420 		psc->sc_int_enabled[4] &= ~INT4_SW_FREEQ_EMPTY;
    421 		KASSERT(psc->sc_int_enabled[0] == 0);
    422 		KASSERT(psc->sc_int_enabled[1] == 0);
    423 		KASSERT(psc->sc_int_enabled[2] == 0);
    424 		KASSERT(psc->sc_int_enabled[3] == 0);
    425 		KASSERT(psc->sc_int_enabled[4] == 0);
    426 	} else if (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0)
    427 			== sc->sc_port1) {
    428 		psc->sc_int_select[4] &= ~INT4_SW_FREEQ_EMPTY;
    429 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
    430 		    psc->sc_int_select[4]);
    431 	}
    432 	gmac_intr_update(psc);
    433 	if (disable) {
    434 #if 0
    435 		if (psc->sc_running == 0) {
    436 			gmac_mapcache_destroy(&psc->sc_txmaps);
    437 			gmac_mapcache_destroy(&psc->sc_rxmaps);
    438 		}
    439 #endif
    440 	}
    441 }
    442 
    443 static int
    444 gmc_ifinit(struct ifnet *ifp)
    445 {
    446 	struct gmc_softc * const sc = ifp->if_softc;
    447 	struct gmac_softc * const psc = sc->sc_psc;
    448 	uint32_t new, mask;
    449 
    450 	gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
    451 	gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
    452 
    453 	if (sc->sc_rxq == NULL) {
    454 		gmac_hwqmem_t *hqm;
    455 		hqm = gmac_hwqmem_create(psc->sc_rxmaps, 16, /*RXQ_NDESCS,*/ 1,
    456 		   HQM_CONSUMER | HQM_RX);
    457 		sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
    458 		    sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
    459 		    GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
    460 		if (sc->sc_rxq == NULL) {
    461 			gmac_hwqmem_destroy(hqm);
    462 			goto failed;
    463 		}
    464 		sc->sc_rxq->hwq_ifp = ifp;
    465 		sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
    466 	}
    467 
    468 	if (sc->sc_txq[0] == NULL) {
    469 		gmac_hwqueue_t *hwq, *last_hwq;
    470 		gmac_hwqmem_t *hqm;
    471 		size_t i;
    472 
    473 		hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
    474 		   HQM_PRODUCER | HQM_TX);
    475 		KASSERT(hqm != NULL);
    476 		for (i = 0; i < __arraycount(sc->sc_txq); i++) {
    477 			sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
    478 			    sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
    479 			    GMAC_SW_TX_Q_BASE, i);
    480 			if (sc->sc_txq[i] == NULL) {
    481 				if (i == 0)
    482 					gmac_hwqmem_destroy(hqm);
    483 				goto failed;
    484 			}
    485 			sc->sc_txq[i]->hwq_ifp = ifp;
    486 
    487 			last_hwq = NULL;
    488 			SLIST_FOREACH(hwq, &psc->sc_hwfreeq->hwq_producers,
    489 			    hwq_link) {
    490 				if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff)
    491 					break;
    492 				last_hwq = hwq;
    493 			}
    494 			if (last_hwq == NULL)
    495 				SLIST_INSERT_HEAD(
    496 				    &psc->sc_hwfreeq->hwq_producers,
    497 				    sc->sc_txq[i], hwq_link);
    498 			else
    499 				SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
    500 				    hwq_link);
    501 		}
    502 	}
    503 
    504 	gmc_filter_change(sc);
    505 
    506 	mask = DMAVR_LOOPBACK | DMAVR_DROP_SMALL_ACK | DMAVR_EXTRABYTES_MASK
    507 	    | DMAVR_RXBURSTSIZE_MASK | DMAVR_RXBUSWIDTH_MASK
    508 	    | DMAVR_TXBURSTSIZE_MASK | DMAVR_TXBUSWIDTH_MASK;
    509 	new = DMAVR_RXDMA_ENABLE | DMAVR_TXDMA_ENABLE
    510 	    | DMAVR_EXTRABYTES(2)
    511 	    | DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W)
    512 	    | DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS)
    513 	    | DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W)
    514 	    | DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS);
    515 	new |= sc->sc_dmavr & ~mask;
    516 	if (sc->sc_dmavr != new) {
    517 		sc->sc_dmavr = new;
    518 		bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
    519 		    sc->sc_dmavr);
    520 		aprint_debug_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
    521 		    sc->sc_dmavr,
    522 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
    523 	}
    524 
    525 	mask = CONFIG0_MAXLEN_MASK | CONFIG0_TX_DISABLE | CONFIG0_RX_DISABLE
    526 	    | CONFIG0_LOOPBACK |/*CONFIG0_SIM_TEST|*/CONFIG0_INVERSE_RXC_RGMII
    527 	    | CONFIG0_RGMII_INBAND_STATUS_ENABLE;
    528 	new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536) | CONFIG0_R_LATCHED_MMII;
    529 	new |= (sc->sc_gmac_config[0] & ~mask);
    530 	if (sc->sc_gmac_config[0] != new) {
    531 		sc->sc_gmac_config[0] = new;
    532 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
    533 		    sc->sc_gmac_config[0]);
    534 		aprint_debug_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
    535 		    sc->sc_gmac_config[0],
    536 		    bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
    537 	}
    538 
    539 	psc->sc_rxpkts_per_sec +=
    540 	    gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
    541 
    542 	/*
    543 	 * If we will be the only active interface, make sure the sw freeq
    544 	 * interrupt gets routed to use.
    545 	 */
    546 	if (psc->sc_running == 0
    547 	    && (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0) != sc->sc_port1)) {
    548 		psc->sc_int_select[4] ^= INT4_SW_FREEQ_EMPTY;
    549 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
    550 		    psc->sc_int_select[4]);
    551 	}
    552 	sc->sc_int_enabled[0] = sc->sc_int_mask[0]
    553 	    & (INT0_TXDERR|INT0_TXPERR|INT0_RXDERR|INT0_RXPERR|INT0_SWTXQ_EOF);
    554 	sc->sc_int_enabled[1] = sc->sc_int_mask[1] & INT1_DEF_RXQ_EOF;
    555 	sc->sc_int_enabled[4] = INT4_SW_FREEQ_EMPTY | (sc->sc_int_mask[4]
    556 	    & (INT4_TX_FAIL | INT4_MIB_HEMIWRAP | INT4_RX_FIFO_OVRN
    557 	       | INT4_RGMII_STSCHG));
    558 
    559 	psc->sc_int_enabled[0] |= sc->sc_int_enabled[0];
    560 	psc->sc_int_enabled[1] |= sc->sc_int_enabled[1];
    561 	psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
    562 
    563 	gmac_intr_update(psc);
    564 
    565 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    566 		mii_tick(&sc->sc_mii);
    567 
    568 	ifp->if_flags |= IFF_RUNNING;
    569 	psc->sc_running |= (sc->sc_port1 ? 2 : 1);
    570 
    571 	callout_schedule(&sc->sc_mii_ch, hz);
    572 
    573 	return 0;
    574 
    575 failed:
    576 	gmc_ifstop(ifp, true);
    577 	return ENOMEM;
    578 }
    579 
    580 static int
    581 gmc_intr(void *arg)
    582 {
    583 	struct gmc_softc * const sc = arg;
    584 	uint32_t int0_status, int1_status, int4_status;
    585 	uint32_t status;
    586 	bool do_ifstart = false;
    587 	int rv = 0;
    588 
    589 	aprint_debug_dev(sc->sc_dev, "gmac_intr: entry\n");
    590 
    591 	int0_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    592 	    GMAC_INT0_STATUS);
    593 	int1_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    594 	    GMAC_INT1_STATUS);
    595 	int4_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    596 	    GMAC_INT4_STATUS);
    597 
    598 	aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
    599 	    int0_status, int1_status,
    600 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
    601 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
    602 	    int4_status);
    603 
    604 #if 0
    605 	aprint_debug_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
    606 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
    607 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
    608 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
    609 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
    610 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
    611 #endif
    612 
    613 	status = int0_status & sc->sc_int_mask[0];
    614 	if (status & (INT0_TXDERR | INT0_TXPERR)) {
    615 		aprint_error_dev(sc->sc_dev,
    616 		    "transmit%s%s error: %#x %08x bufaddr %#x\n",
    617 		    status & INT0_TXDERR ? " data" : "",
    618 		    status & INT0_TXPERR ? " protocol" : "",
    619 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    620 		    GMAC_DMA_TX_CUR_DESC),
    621 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    622 		    GMAC_SW_TX_Q0_RWPTR),
    623 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    624 		    GMAC_DMA_TX_DESC2));
    625 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
    626 		    status & (INT0_TXDERR | INT0_TXPERR));
    627 		Debugger();
    628 	}
    629 	if (status & (INT0_RXDERR | INT0_RXPERR)) {
    630 		aprint_error_dev(sc->sc_dev,
    631 		    "receive%s%s error: %#x %#x=%#x/%#x/%#x/%#x\n",
    632 		    status & INT0_RXDERR ? " data" : "",
    633 		    status & INT0_RXPERR ? " protocol" : "",
    634 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    635 		    GMAC_DMA_RX_CUR_DESC),
    636 		bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    637 		    GMAC_SWFREEQ_RWPTR),
    638 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    639 		    GMAC_DMA_RX_DESC0),
    640 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    641 		    GMAC_DMA_RX_DESC1),
    642 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    643 		    GMAC_DMA_RX_DESC2),
    644 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    645 		    GMAC_DMA_RX_DESC3));
    646 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
    647 		    status & (INT0_RXDERR | INT0_RXPERR));
    648 		    Debugger();
    649 	}
    650 	if (status & INT0_SWTXQ_EOF) {
    651 		status &= INT0_SWTXQ_EOF;
    652 		for (int i = 0; status && i < __arraycount(sc->sc_txq); i++) {
    653 			if (status & INT0_SWTXQn_EOF(i)) {
    654 				gmac_hwqueue_sync(sc->sc_txq[i]);
    655 				bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    656 				    GMAC_INT0_STATUS,
    657 				    sc->sc_int_mask[0] & (INT0_SWTXQn_EOF(i)
    658 					| INT0_SWTXQn_FIN(i)));
    659 				status &= ~INT0_SWTXQn_EOF(i);
    660 			}
    661 		}
    662 		do_ifstart = true;
    663 		rv = 1;
    664 	}
    665 
    666 	if (int4_status & INT4_SW_FREEQ_EMPTY) {
    667 		struct gmac_softc * const psc = sc->sc_psc;
    668 		psc->sc_rxpkts_per_sec +=
    669 		    gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
    670 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
    671 		    status & INT4_SW_FREEQ_EMPTY);
    672 		rv = 1;
    673 	}
    674 
    675 	status = int1_status & sc->sc_int_mask[1];
    676 	if (status & INT1_DEF_RXQ_EOF) {
    677 		struct gmac_softc * const psc = sc->sc_psc;
    678 		psc->sc_rxpkts_per_sec +=
    679 		    gmac_hwqueue_consume(sc->sc_rxq, psc->sc_swfree_min);
    680 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS,
    681 		    status & INT1_DEF_RXQ_EOF);
    682 		rv = 1;
    683 	}
    684 
    685 	status = int4_status & sc->sc_int_enabled[4];
    686 	if (status & INT4_TX_FAIL) {
    687 	}
    688 	if (status & INT4_MIB_HEMIWRAP) {
    689 	}
    690 	if (status & INT4_RX_XON) {
    691 	}
    692 	if (status & INT4_RX_XOFF) {
    693 	}
    694 	if (status & INT4_TX_XON) {
    695 	}
    696 	if (status & INT4_TX_XOFF) {
    697 	}
    698 	if (status & INT4_RX_FIFO_OVRN) {
    699 #if 0
    700 		if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS) {
    701 			sc->sc_psc->sc_swfree_min++;
    702 			gmac_swfree_min_update(psc);
    703 		}
    704 #endif
    705 		sc->sc_if.if_ierrors++;
    706 	}
    707 	if (status & INT4_RGMII_STSCHG) {
    708 		mii_pollstat(&sc->sc_mii);
    709 	}
    710 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS, status);
    711 
    712 	if (do_ifstart)
    713 		if_schedule_deferred_start(&sc->sc_if);
    714 
    715 	aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
    716 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS),
    717 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS),
    718 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
    719 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
    720 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS));
    721 	aprint_debug_dev(sc->sc_dev, "gmac_intr: exit rv=%d\n", rv);
    722 	return rv;
    723 }
    724 
    725 static int
    726 gmc_match(device_t parent, cfdata_t cf, void *aux)
    727 {
    728 	struct gmac_softc *psc = device_private(parent);
    729 	struct gmac_attach_args *gma = aux;
    730 
    731 	if ((unsigned int)gma->gma_phy > 31)
    732 		return 0;
    733 	if ((unsigned int)gma->gma_port > 1)
    734 		return 0;
    735 	if (gma->gma_intr < 1 || gma->gma_intr > 2)
    736 		return 0;
    737 
    738 	if (psc->sc_ports & (1 << gma->gma_port))
    739 		return 0;
    740 
    741 	return 1;
    742 }
    743 
    744 static void
    745 gmc_attach(device_t parent, device_t self, void *aux)
    746 {
    747 	struct gmac_softc * const psc = device_private(parent);
    748 	struct gmc_softc * const sc = device_private(self);
    749 	struct gmac_attach_args *gma = aux;
    750 	struct ifnet * const ifp = &sc->sc_if;
    751 	struct mii_data * const mii = &sc->sc_mii;
    752 	static const char eaddrs[2][6] = {
    753 		"\x0\x52\xc3\x11\x22\x33",
    754 		"\x0\x52\xc3\x44\x55\x66",
    755 	};
    756 
    757 	psc->sc_ports |= 1 << gma->gma_port;
    758 	sc->sc_port1 = (gma->gma_port == 1);
    759 	sc->sc_phy = gma->gma_phy;
    760 
    761 	sc->sc_dev = self;
    762 	sc->sc_psc = psc;
    763 	sc->sc_iot = psc->sc_iot;
    764 	sc->sc_ioh = psc->sc_ioh;
    765 	sc->sc_dmat = psc->sc_dmat;
    766 
    767 	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
    768 	    GMAC_PORTn_DMA_OFFSET(gma->gma_port), GMAC_PORTn_DMA_SIZE,
    769 	    &sc->sc_dma_ioh);
    770 	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
    771 	    GMAC_PORTn_GMAC_OFFSET(gma->gma_port), GMAC_PORTn_GMAC_SIZE,
    772 	    &sc->sc_gmac_ioh);
    773 	aprint_normal("\n");
    774 	aprint_naive("\n");
    775 
    776 	strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
    777 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
    778 	ifp->if_softc = sc;
    779 	ifp->if_ioctl = gmc_ifioctl;
    780 	ifp->if_stop  = gmc_ifstop;
    781 	ifp->if_start = gmc_ifstart;
    782 	ifp->if_init  = gmc_ifinit;
    783 
    784 	IFQ_SET_READY(&ifp->if_snd);
    785 
    786 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
    787 	sc->sc_ec.ec_mii = mii;
    788 
    789 	mii->mii_ifp = ifp;
    790 	mii->mii_statchg = gmc_mii_statchg;
    791 	mii->mii_readreg = gma->gma_mii_readreg;
    792 	mii->mii_writereg = gma->gma_mii_writereg;
    793 
    794 	ifmedia_init(&mii->mii_media, 0, gmc_mediachange, gmc_mediastatus);
    795 
    796 	if_attach(ifp);
    797 	if_deferred_start_init(ifp, NULL);
    798 	ether_ifattach(ifp, eaddrs[gma->gma_port]);
    799 	mii_attach(sc->sc_dev, mii, 0xffffffff,
    800 	    gma->gma_phy, MII_OFFSET_ANY, 0);
    801 
    802 	if (LIST_EMPTY(&mii->mii_phys)) {
    803 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
    804 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
    805 	} else {
    806 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    807 //		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX);
    808 	}
    809 
    810 	sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    811 	    GMAC_STATUS);
    812 	sc->sc_gmac_sta_add[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    813 	    GMAC_STA_ADD0);
    814 	sc->sc_gmac_sta_add[1] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    815 	    GMAC_STA_ADD1);
    816 	sc->sc_gmac_sta_add[2] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    817 	    GMAC_STA_ADD2);
    818 	sc->sc_gmac_mcast_filter[0] = bus_space_read_4(sc->sc_iot,
    819 	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER0);
    820 	sc->sc_gmac_mcast_filter[1] = bus_space_read_4(sc->sc_iot,
    821 	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER1);
    822 	sc->sc_gmac_rx_filter = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    823 	    GMAC_RX_FILTER);
    824 	sc->sc_gmac_config[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    825 	    GMAC_CONFIG0);
    826 	sc->sc_dmavr = bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR);
    827 
    828 	/* sc->sc_int_enabled is already zeroed */
    829 	sc->sc_int_mask[0] = (sc->sc_port1 ? INT0_GMAC1 : INT0_GMAC0);
    830 	sc->sc_int_mask[1] = (sc->sc_port1 ? INT1_GMAC1 : INT1_GMAC0);
    831 	sc->sc_int_mask[2] = (sc->sc_port1 ? INT2_GMAC1 : INT2_GMAC0);
    832 	sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
    833 	sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
    834 
    835 	if (!sc->sc_port1) {
    836 	sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
    837 	    gmc_intr, sc);
    838 	KASSERT(sc->sc_ih != NULL);
    839 	}
    840 
    841 	callout_init(&sc->sc_mii_ch, 0);
    842 	callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);
    843 
    844 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    845 	     ether_sprintf(CLLADDR(sc->sc_if.if_sadl)));
    846 }
    847 
    848 CFATTACH_DECL_NEW(gmc, sizeof(struct gmc_softc),
    849     gmc_match, gmc_attach, NULL, NULL);
    850