Home | History | Annotate | Line # | Download | only in gemini
if_gmc.c revision 1.2
      1  1.2  matt /* $NetBSD: if_gmc.c,v 1.2 2008/12/15 04:44:27 matt Exp $ */
      2  1.1  matt /*-
      3  1.1  matt  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      4  1.1  matt  * All rights reserved.
      5  1.1  matt  *
      6  1.1  matt  * This code is derived from software contributed to The NetBSD Foundation
      7  1.1  matt  * by Matt Thomas <matt (at) 3am-software.com>
      8  1.1  matt  *
      9  1.1  matt  * Redistribution and use in source and binary forms, with or without
     10  1.1  matt  * modification, are permitted provided that the following conditions
     11  1.1  matt  * are met:
     12  1.1  matt  * 1. Redistributions of source code must retain the above copyright
     13  1.1  matt  *    notice, this list of conditions and the following disclaimer.
     14  1.1  matt  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.1  matt  *    notice, this list of conditions and the following disclaimer in the
     16  1.1  matt  *    documentation and/or other materials provided with the distribution.
     17  1.1  matt  *
     18  1.1  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  1.1  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  1.1  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  1.1  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  1.1  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  1.1  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  1.1  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  1.1  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  1.1  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  1.1  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  1.1  matt  * POSSIBILITY OF SUCH DAMAGE.
     29  1.1  matt  */
     30  1.1  matt 
     31  1.1  matt #include <sys/param.h>
     32  1.1  matt #include <sys/callout.h>
     33  1.1  matt #include <sys/device.h>
     34  1.1  matt #include <sys/ioctl.h>
     35  1.1  matt #include <sys/kernel.h>
     36  1.1  matt #include <sys/kmem.h>
     37  1.1  matt #include <sys/mbuf.h>
     38  1.1  matt 
     39  1.1  matt #include <machine/bus.h>
     40  1.1  matt #include <machine/intr.h>
     41  1.1  matt 
     42  1.1  matt #include <arm/gemini/gemini_reg.h>
     43  1.1  matt #include <arm/gemini/gemini_gmacreg.h>
     44  1.1  matt #include <arm/gemini/gemini_gmacvar.h>
     45  1.1  matt 
     46  1.1  matt #include <net/if.h>
     47  1.1  matt #include <net/if_ether.h>
     48  1.1  matt #include <net/if_dl.h>
     49  1.1  matt 
     50  1.2  matt __KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.2 2008/12/15 04:44:27 matt Exp $");
     51  1.1  matt 
     52  1.1  matt #define	MAX_TXSEG	32
     53  1.1  matt 
     54  1.1  matt struct gmc_softc {
     55  1.1  matt 	device_t sc_dev;
     56  1.1  matt 	struct gmac_softc *sc_psc;
     57  1.1  matt 	struct gmc_softc *sc_sibling;
     58  1.1  matt 	bus_dma_tag_t sc_dmat;
     59  1.1  matt 	bus_space_tag_t sc_iot;
     60  1.1  matt 	bus_space_handle_t sc_ioh;
     61  1.1  matt 	bus_space_handle_t sc_dma_ioh;
     62  1.1  matt 	bus_space_handle_t sc_gmac_ioh;
     63  1.1  matt 	struct ethercom sc_ec;
     64  1.1  matt 	struct mii_data sc_mii;
     65  1.1  matt 	void *sc_ih;
     66  1.1  matt 	bool sc_port1;
     67  1.1  matt 	gmac_hwqueue_t *sc_rxq;
     68  1.1  matt 	gmac_hwqueue_t *sc_txq[6];
     69  1.1  matt 	callout_t sc_mii_ch;
     70  1.1  matt 
     71  1.1  matt 	uint32_t sc_gmac_status;
     72  1.1  matt 	uint32_t sc_gmac_sta_add[3];
     73  1.1  matt 	uint32_t sc_gmac_mcast_filter[2];
     74  1.1  matt 	uint32_t sc_gmac_rx_filter;
     75  1.1  matt 	uint32_t sc_gmac_config[2];
     76  1.1  matt 	uint32_t sc_dmavr;
     77  1.1  matt 
     78  1.1  matt 	uint32_t sc_int_mask[5];
     79  1.1  matt 	uint32_t sc_int_enabled[5];
     80  1.1  matt };
     81  1.1  matt 
     82  1.1  matt #define	sc_if	sc_ec.ec_if
     83  1.1  matt 
     84  1.1  matt static bool
     85  1.1  matt gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
     86  1.1  matt {
     87  1.1  matt 	bus_dmamap_t map;
     88  1.1  matt 	uint32_t desc1, desc3;
     89  1.1  matt 	struct mbuf *last_m, *m0;
     90  1.1  matt 	size_t count, i;
     91  1.1  matt 	int error;
     92  1.1  matt 	gmac_desc_t *d;
     93  1.1  matt 
     94  1.2  matt 	KASSERT(hwq != NULL);
     95  1.2  matt 
     96  1.1  matt 	map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
     97  1.1  matt 	if (map == NULL)
     98  1.1  matt 		return false;
     99  1.1  matt 
    100  1.1  matt 	for (last_m = NULL, m0 = m, count = 0;
    101  1.1  matt 	     m0 != NULL;
    102  1.1  matt 	     last_m = m0, m0 = m0->m_next) {
    103  1.1  matt 		vaddr_t addr = (uintptr_t)m0->m_data;
    104  1.1  matt 		if (m0->m_len == 0)
    105  1.1  matt 			continue;
    106  1.1  matt 		if (addr & 1) {
    107  1.1  matt 			if (last_m != NULL && M_TRAILINGSPACE(last_m) > 0) {
    108  1.1  matt 				last_m->m_data[last_m->m_len++] = *m->m_data++;
    109  1.1  matt 				m->m_len--;
    110  1.1  matt 			} else if (M_TRAILINGSPACE(m0) > 0) {
    111  1.1  matt 				memmove(m0->m_data + 1, m0->m_data, m0->m_len);
    112  1.1  matt 				m0->m_data++;
    113  1.1  matt 			} else if (M_LEADINGSPACE(m0) > 0) {
    114  1.1  matt 				memmove(m0->m_data - 1, m0->m_data, m0->m_len);
    115  1.1  matt 				m0->m_data--;
    116  1.1  matt 			} else {
    117  1.2  matt 				panic("gmc_txqueue: odd addr %p", m0->m_data);
    118  1.1  matt 			}
    119  1.1  matt 		}
    120  1.1  matt 		count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
    121  1.1  matt 	}
    122  1.1  matt 
    123  1.2  matt 	gmac_hwqueue_sync(hwq);
    124  1.1  matt 	if (hwq->hwq_free <= count) {
    125  1.2  matt 		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
    126  1.2  matt 		return false;
    127  1.1  matt 	}
    128  1.1  matt 
    129  1.1  matt 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
    130  1.2  matt 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    131  1.1  matt 	if (error) {
    132  1.1  matt 		aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
    133  1.1  matt 		    error);
    134  1.1  matt 		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
    135  1.1  matt 		m_freem(m);
    136  1.1  matt 		sc->sc_if.if_oerrors++;
    137  1.1  matt 		return true;
    138  1.1  matt 	}
    139  1.1  matt 	KASSERT(map->dm_nsegs > 0);
    140  1.1  matt 
    141  1.1  matt 	/*
    142  1.1  matt 	 * Sync the mbuf contents to memory/cache.
    143  1.1  matt 	 */
    144  1.1  matt 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
    145  1.2  matt 		BUS_DMASYNC_PREWRITE);
    146  1.1  matt 
    147  1.1  matt 	/*
    148  1.1  matt 	 * Now we need to load the descriptors...
    149  1.1  matt 	 */
    150  1.1  matt 	desc1 = m->m_pkthdr.len;
    151  1.1  matt 	desc3 = DESC3_SOF;
    152  1.1  matt 	i = 0;
    153  1.2  matt 	d = NULL;
    154  1.1  matt 	do {
    155  1.2  matt 		if (i > 0)
    156  1.2  matt 			aprint_normal_dev(sc->sc_dev,
    157  1.2  matt 			    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
    158  1.2  matt 			    i-1, d, d->d_desc0, d->d_desc1,
    159  1.2  matt 			    d->d_bufaddr, d->d_desc3);
    160  1.1  matt 		d = gmac_hwqueue_desc(hwq, i);
    161  1.1  matt 		KASSERT(map->dm_segs[i].ds_len > 0);
    162  1.1  matt 		KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
    163  1.2  matt 		d->d_desc0 = htole32(map->dm_segs[i].ds_len);
    164  1.2  matt 		d->d_desc1 = htole32(desc1);
    165  1.2  matt 		d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
    166  1.2  matt 		d->d_desc3 = htole32(desc3);
    167  1.2  matt 		desc3 = 0;
    168  1.1  matt 	} while (++i < map->dm_nsegs);
    169  1.1  matt 
    170  1.2  matt 	d->d_desc3 |= htole32(DESC3_EOF|DESC3_EOFIE);
    171  1.2  matt 	aprint_normal_dev(sc->sc_dev,
    172  1.2  matt 	    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
    173  1.2  matt 	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
    174  1.1  matt 	M_SETCTX(m, map);
    175  1.1  matt 	IF_ENQUEUE(&hwq->hwq_ifq, m);
    176  1.1  matt 	/*
    177  1.1  matt 	 * Last descriptor has been marked.  Give them to the h/w.
    178  1.1  matt 	 * This will sync for us.
    179  1.1  matt 	 */
    180  1.1  matt 	gmac_hwqueue_produce(hwq, map->dm_nsegs);
    181  1.2  matt 	aprint_normal_dev(sc->sc_dev,
    182  1.2  matt 	    "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
    183  1.2  matt 	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
    184  1.1  matt 	return true;
    185  1.1  matt }
    186  1.1  matt 
    187  1.1  matt static void
    188  1.1  matt gmc_rxproduce(struct gmc_softc *sc)
    189  1.1  matt {
    190  1.2  matt 	struct gmac_softc * const psc = sc->sc_psc;
    191  1.2  matt 	gmac_hwqueue_t * const hwq = psc->sc_swfreeq;
    192  1.1  matt 	gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
    193  1.1  matt 	size_t i;
    194  1.1  matt 
    195  1.2  matt 	for (i = 0;
    196  1.2  matt 	     hwq->hwq_size - hwq->hwq_free - 1 + i < psc->sc_swfree_min; i++) {
    197  1.1  matt 		bus_dmamap_t map;
    198  1.1  matt 		gmac_desc_t *d;
    199  1.1  matt 		struct mbuf *m;
    200  1.1  matt 		int error;
    201  1.1  matt 
    202  1.1  matt 		map = gmac_mapcache_get(hqm->hqm_mc);
    203  1.1  matt 		if (map == NULL)
    204  1.1  matt 			break;
    205  1.1  matt 
    206  1.1  matt 		KASSERT(map->dm_mapsize == 0);
    207  1.1  matt 
    208  1.1  matt 		m = m_get(MT_DATA, M_DONTWAIT);
    209  1.1  matt 		if (m == NULL) {
    210  1.1  matt 			gmac_mapcache_put(hqm->hqm_mc, map);
    211  1.1  matt 			break;
    212  1.1  matt 		}
    213  1.1  matt 
    214  1.1  matt 		MCLGET(m, M_DONTWAIT);
    215  1.1  matt 		if ((m->m_flags & M_EXT) == 0) {
    216  1.1  matt 			m_free(m);
    217  1.1  matt 			gmac_mapcache_put(hqm->hqm_mc, map);
    218  1.1  matt 			break;
    219  1.1  matt 		}
    220  1.1  matt 		error = bus_dmamap_load(hqm->hqm_dmat, map, m->m_data,
    221  1.2  matt 		    MCLBYTES, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
    222  1.1  matt 		if (error) {
    223  1.1  matt 			m_free(m);
    224  1.1  matt 			gmac_mapcache_put(hqm->hqm_mc, map);
    225  1.1  matt 			aprint_error_dev(sc->sc_dev,
    226  1.1  matt 			    "map %p(%zu): can't map rx mbuf(%p) wptr=%zu: %d\n",
    227  1.1  matt 			    map, map->_dm_size, m,
    228  1.1  matt 			    (hwq->hwq_wptr + i) & (hwq->hwq_size - 1),
    229  1.1  matt 			    error);
    230  1.1  matt 			Debugger();
    231  1.1  matt 			break;
    232  1.1  matt 		}
    233  1.1  matt 		bus_dmamap_sync(hqm->hqm_dmat, map, 0, map->dm_mapsize,
    234  1.2  matt 		    BUS_DMASYNC_PREREAD);
    235  1.1  matt 		m->m_len = 0;
    236  1.1  matt 		M_SETCTX(m, map);
    237  1.1  matt 		d = gmac_hwqueue_desc(hwq, i);
    238  1.1  matt 		d->d_desc0   = htole32(map->dm_segs->ds_len);
    239  1.1  matt 		d->d_bufaddr = htole32(map->dm_segs->ds_addr);
    240  1.1  matt 		IF_ENQUEUE(&hwq->hwq_ifq, m);
    241  1.2  matt 		sc->sc_psc->sc_rxpkts_per_sec++;
    242  1.1  matt 	}
    243  1.1  matt 
    244  1.1  matt 	if (i)
    245  1.1  matt 		gmac_hwqueue_produce(hwq, i);
    246  1.1  matt }
    247  1.1  matt 
    248  1.1  matt static void
    249  1.1  matt gmc_filter_change(struct gmc_softc *sc)
    250  1.1  matt {
    251  1.1  matt 	struct ether_multi *enm;
    252  1.1  matt 	struct ether_multistep step;
    253  1.1  matt 	uint32_t mhash[2];
    254  1.1  matt 	uint32_t new0, new1, new2;
    255  1.1  matt 	const char * const eaddr = CLLADDR(sc->sc_if.if_sadl);
    256  1.1  matt 
    257  1.1  matt 	new0 = eaddr[0] | ((eaddr[1] | (eaddr[2] | (eaddr[3] << 8)) << 8) << 8);
    258  1.1  matt 	new1 = eaddr[4] | (eaddr[5] << 8);
    259  1.1  matt 	new2 = 0;
    260  1.1  matt 	if (sc->sc_gmac_sta_add[0] != new0
    261  1.1  matt 	    || sc->sc_gmac_sta_add[1] != new1
    262  1.1  matt 	    || sc->sc_gmac_sta_add[2] != new2) {
    263  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD0,
    264  1.1  matt 		    new0);
    265  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD1,
    266  1.1  matt 		    new1);
    267  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD2,
    268  1.1  matt 		    new2);
    269  1.1  matt 		sc->sc_gmac_sta_add[0] = new0;
    270  1.1  matt 		sc->sc_gmac_sta_add[1] = new1;
    271  1.1  matt 		sc->sc_gmac_sta_add[2] = new2;
    272  1.1  matt 	}
    273  1.1  matt 
    274  1.1  matt 	mhash[0] = 0;
    275  1.1  matt 	mhash[1] = 0;
    276  1.1  matt 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
    277  1.1  matt 	while (enm != NULL) {
    278  1.1  matt 		size_t i;
    279  1.1  matt 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
    280  1.1  matt 			mhash[0] = mhash[1] = 0xffffffff;
    281  1.1  matt 			break;
    282  1.1  matt 		}
    283  1.1  matt 		i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
    284  1.2  matt 		mhash[(i >> 5) & 1] |= 1 << (i & 31);
    285  1.1  matt 		ETHER_NEXT_MULTI(step, enm);
    286  1.1  matt 	}
    287  1.1  matt 
    288  1.1  matt 	if (sc->sc_gmac_mcast_filter[0] != mhash[0]
    289  1.1  matt 	    || sc->sc_gmac_mcast_filter[1] != mhash[1]) {
    290  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
    291  1.1  matt 		    GMAC_MCAST_FILTER0, mhash[0]);
    292  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
    293  1.1  matt 		    GMAC_MCAST_FILTER1, mhash[1]);
    294  1.1  matt 		sc->sc_gmac_mcast_filter[0] = mhash[0];
    295  1.1  matt 		sc->sc_gmac_mcast_filter[1] = mhash[1];
    296  1.1  matt 	}
    297  1.1  matt 
    298  1.1  matt 	new0 = sc->sc_gmac_rx_filter & ~RXFILTER_PROMISC;
    299  1.1  matt 	new0 |= RXFILTER_BROADCAST | RXFILTER_UNICAST | RXFILTER_MULTICAST;
    300  1.1  matt 	if (sc->sc_if.if_flags & IFF_PROMISC)
    301  1.1  matt 		new0 |= RXFILTER_PROMISC;
    302  1.1  matt 
    303  1.1  matt 	if (new0 != sc->sc_gmac_rx_filter) {
    304  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_RX_FILTER,
    305  1.1  matt 		    new0);
    306  1.1  matt 		sc->sc_gmac_rx_filter = new0;
    307  1.1  matt 	}
    308  1.1  matt }
    309  1.1  matt 
    310  1.1  matt static void
    311  1.1  matt gmc_mii_tick(void *arg)
    312  1.1  matt {
    313  1.1  matt 	struct gmc_softc * const sc = arg;
    314  1.2  matt 	struct gmac_softc * const psc = sc->sc_psc;
    315  1.1  matt 	int s = splnet();
    316  1.1  matt 
    317  1.2  matt 	/*
    318  1.2  matt 	 * If we had to increase the number of receive mbufs due to fifo
    319  1.2  matt 	 * overflows, we need a way to decrease them.  So every second we
    320  1.2  matt 	 * recieve less than or equal to MIN_RXMAPS packets, we decrement
    321  1.2  matt 	 * swfree_min until it returns to MIN_RXMAPS.
    322  1.2  matt 	 */
    323  1.2  matt 	if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
    324  1.2  matt 	    && psc->sc_swfree_min > MIN_RXMAPS)
    325  1.2  matt 		psc->sc_swfree_min--;
    326  1.2  matt 	/*
    327  1.2  matt 	 * If only one GMAC is running or this is port0, reset the count.
    328  1.2  matt 	 */
    329  1.2  matt 	if (psc->sc_running != 3 || !sc->sc_port1)
    330  1.2  matt 		psc->sc_rxpkts_per_sec = 0;
    331  1.2  matt 
    332  1.1  matt 	mii_tick(&sc->sc_mii);
    333  1.1  matt 	if (sc->sc_if.if_flags & IFF_RUNNING)
    334  1.1  matt 		callout_schedule(&sc->sc_mii_ch, hz);
    335  1.1  matt 
    336  1.1  matt 	splx(s);
    337  1.1  matt }
    338  1.1  matt 
    339  1.1  matt static int
    340  1.1  matt gmc_mediachange(struct ifnet *ifp)
    341  1.1  matt {
    342  1.1  matt 	struct gmc_softc * const sc = ifp->if_softc;
    343  1.1  matt 
    344  1.1  matt 	if ((ifp->if_flags & IFF_UP) == 0)
    345  1.1  matt 		return 0;
    346  1.1  matt 
    347  1.1  matt 	return mii_mediachg(&sc->sc_mii);
    348  1.1  matt }
    349  1.1  matt 
    350  1.1  matt static void
    351  1.1  matt gmc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
    352  1.1  matt {
    353  1.1  matt 	struct gmc_softc * const sc = ifp->if_softc;
    354  1.1  matt 
    355  1.1  matt 	mii_pollstat(&sc->sc_mii);
    356  1.1  matt 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
    357  1.1  matt 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
    358  1.1  matt }
    359  1.1  matt 
    360  1.1  matt static void
    361  1.1  matt gmc_mii_statchg(device_t self)
    362  1.1  matt {
    363  1.1  matt 	struct gmc_softc * const sc = device_private(self);
    364  1.1  matt 	uint32_t gmac_status;
    365  1.1  matt 
    366  1.1  matt 	gmac_status = sc->sc_gmac_status;
    367  1.2  matt 	gmac_status &= ~STATUS_PHYMODE_MASK;
    368  1.2  matt 	gmac_status |= STATUS_PHYMODE_GMII;
    369  1.1  matt 	gmac_status &= ~STATUS_SPEED_MASK;
    370  1.1  matt 	if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
    371  1.1  matt 		gmac_status |= STATUS_SPEED_1000M;
    372  1.1  matt 	} else if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
    373  1.1  matt 		gmac_status |= STATUS_SPEED_100M;
    374  1.1  matt 	} else {
    375  1.1  matt 		gmac_status |= STATUS_SPEED_10M;
    376  1.1  matt 	}
    377  1.1  matt 
    378  1.1  matt         if (sc->sc_mii.mii_media_active & IFM_FDX)
    379  1.1  matt 		gmac_status |= STATUS_DUPLEX_FULL;
    380  1.1  matt 	else
    381  1.1  matt 		gmac_status &= ~STATUS_DUPLEX_FULL;
    382  1.1  matt 
    383  1.1  matt         if (sc->sc_mii.mii_media_active & IFM_ACTIVE)
    384  1.1  matt 		gmac_status |= STATUS_LINK_ON;
    385  1.1  matt 	else
    386  1.1  matt 		gmac_status &= ~STATUS_LINK_ON;
    387  1.1  matt 
    388  1.2  matt 	gmac_status |= STATUS_LINK_ON; /* XXX */
    389  1.2  matt 
    390  1.1  matt 	if (sc->sc_gmac_status != gmac_status) {
    391  1.1  matt 		aprint_normal_dev(sc->sc_dev,
    392  1.2  matt 		    "status change old=%#x new=%#x active=%#x\n",
    393  1.2  matt 		    sc->sc_gmac_status, gmac_status,
    394  1.2  matt 		    sc->sc_mii.mii_media_active);
    395  1.1  matt 		sc->sc_gmac_status = gmac_status;
    396  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
    397  1.1  matt 		    sc->sc_gmac_status);
    398  1.1  matt 	}
    399  1.1  matt }
    400  1.1  matt 
    401  1.1  matt static int
    402  1.1  matt gmc_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
    403  1.1  matt {
    404  1.1  matt 	struct gmc_softc * const sc = ifp->if_softc;
    405  1.1  matt 	struct ifreq * const ifr = data;
    406  1.1  matt 	int s;
    407  1.1  matt 	int error;
    408  1.1  matt 	s = splnet();
    409  1.1  matt 
    410  1.1  matt 	switch (cmd) {
    411  1.1  matt 	case SIOCSIFMEDIA:
    412  1.1  matt 	case SIOCGIFMEDIA:
    413  1.1  matt 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
    414  1.1  matt 		break;
    415  1.1  matt 	default:
    416  1.1  matt 		error = ether_ioctl(ifp, cmd, data);
    417  1.1  matt 		if (error == ENETRESET) {
    418  1.1  matt 			if (ifp->if_flags & IFF_RUNNING) {
    419  1.1  matt 				/*
    420  1.1  matt 				 * If the interface is running, we have to
    421  1.1  matt 				 * update its multicast filter.
    422  1.1  matt 				 */
    423  1.1  matt 				gmc_filter_change(sc);
    424  1.1  matt 			}
    425  1.1  matt 			error = 0;
    426  1.1  matt 		}
    427  1.1  matt 	}
    428  1.1  matt 
    429  1.1  matt 	splx(s);
    430  1.1  matt 	return error;
    431  1.1  matt }
    432  1.1  matt 
    433  1.1  matt static void
    434  1.1  matt gmc_ifstart(struct ifnet *ifp)
    435  1.1  matt {
    436  1.1  matt 	struct gmc_softc * const sc = ifp->if_softc;
    437  1.1  matt 
    438  1.2  matt 	if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0
    439  1.2  matt 	    || (ifp->if_flags & IFF_RUNNING) == 0)
    440  1.2  matt 		return;
    441  1.2  matt 
    442  1.1  matt 	for (;;) {
    443  1.1  matt 		struct mbuf *m;
    444  1.1  matt 		IF_DEQUEUE(&ifp->if_snd, m);
    445  1.1  matt 		if (m == NULL)
    446  1.1  matt 			break;
    447  1.1  matt 		if (!gmc_txqueue(sc, sc->sc_txq[0], m)) {
    448  1.1  matt 			IF_PREPEND(&ifp->if_snd, m);
    449  1.1  matt 			ifp->if_flags |= IFF_OACTIVE;
    450  1.1  matt 			break;
    451  1.1  matt 		}
    452  1.1  matt 	}
    453  1.1  matt }
    454  1.1  matt 
    455  1.1  matt static void
    456  1.1  matt gmc_ifstop(struct ifnet *ifp, int disable)
    457  1.1  matt {
    458  1.1  matt 	struct gmc_softc * const sc = ifp->if_softc;
    459  1.1  matt 	struct gmac_softc * const psc = sc->sc_psc;
    460  1.1  matt 
    461  1.1  matt 	psc->sc_running &= ~(sc->sc_port1 ? 2 : 1);
    462  1.1  matt 	psc->sc_int_enabled[0] &= ~sc->sc_int_enabled[0];
    463  1.1  matt 	psc->sc_int_enabled[1] &= ~sc->sc_int_enabled[1];
    464  1.1  matt 	psc->sc_int_enabled[2] &= ~sc->sc_int_enabled[2];
    465  1.1  matt 	psc->sc_int_enabled[3] &= ~sc->sc_int_enabled[3];
    466  1.1  matt 	psc->sc_int_enabled[4] &= ~sc->sc_int_enabled[4] | INT4_SW_FREEQ_EMPTY;
    467  1.1  matt 	if (psc->sc_running == 0) {
    468  1.1  matt 		psc->sc_int_enabled[4] &= ~INT4_SW_FREEQ_EMPTY;
    469  1.1  matt 		KASSERT(psc->sc_int_enabled[0] == 0);
    470  1.1  matt 		KASSERT(psc->sc_int_enabled[1] == 0);
    471  1.1  matt 		KASSERT(psc->sc_int_enabled[2] == 0);
    472  1.1  matt 		KASSERT(psc->sc_int_enabled[3] == 0);
    473  1.1  matt 		KASSERT(psc->sc_int_enabled[4] == 0);
    474  1.1  matt 	} else if (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0)
    475  1.1  matt 			== sc->sc_port1) {
    476  1.1  matt 		psc->sc_int_select[4] &= ~INT4_SW_FREEQ_EMPTY;
    477  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
    478  1.1  matt 		    psc->sc_int_select[4]);
    479  1.1  matt 	}
    480  1.1  matt 	gmac_intr_update(psc);
    481  1.1  matt 	if (disable) {
    482  1.1  matt #if 0
    483  1.1  matt 		if (psc->sc_running == 0) {
    484  1.1  matt 			gmac_mapcache_destroy(&psc->sc_txmaps);
    485  1.1  matt 			gmac_mapcache_destroy(&psc->sc_rxmaps);
    486  1.1  matt 		}
    487  1.1  matt #endif
    488  1.1  matt 	}
    489  1.1  matt }
    490  1.1  matt 
    491  1.1  matt static int
    492  1.1  matt gmc_ifinit(struct ifnet *ifp)
    493  1.1  matt {
    494  1.1  matt 	struct gmc_softc * const sc = ifp->if_softc;
    495  1.1  matt 	struct gmac_softc * const psc = sc->sc_psc;
    496  1.2  matt #if 1
    497  1.1  matt 	uint32_t new, mask;
    498  1.2  matt #endif
    499  1.1  matt 
    500  1.1  matt 	gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
    501  1.1  matt 	gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
    502  1.1  matt 
    503  1.1  matt 	if (sc->sc_rxq == NULL) {
    504  1.1  matt 		gmac_hwqmem_t *hqm;
    505  1.1  matt 		hqm = gmac_hwqmem_create(psc->sc_rxmaps, RXQ_NDESCS, 1,
    506  1.1  matt 		   HQM_CONSUMER|HQM_RX);
    507  1.1  matt 		sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
    508  1.2  matt 		    sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
    509  1.2  matt 		    GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
    510  1.1  matt 		if (sc->sc_rxq == NULL) {
    511  1.1  matt 			gmac_hwqmem_destroy(hqm);
    512  1.1  matt 			goto failed;
    513  1.1  matt 		}
    514  1.1  matt 		sc->sc_rxq->hwq_ifp = ifp;
    515  1.1  matt 		sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
    516  1.1  matt 	}
    517  1.1  matt 
    518  1.2  matt 	if (sc->sc_txq[0] == NULL) {
    519  1.1  matt 		gmac_hwqueue_t *hwq, *last_hwq;
    520  1.1  matt 		gmac_hwqmem_t *hqm;
    521  1.1  matt 		size_t i;
    522  1.1  matt 
    523  1.1  matt 		hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
    524  1.1  matt 		   HQM_PRODUCER|HQM_TX);
    525  1.2  matt 		KASSERT(hqm != NULL);
    526  1.1  matt 		for (i = 0; i < __arraycount(sc->sc_txq); i++) {
    527  1.1  matt 			sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
    528  1.1  matt 			    sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
    529  1.1  matt 			    GMAC_SW_TX_Q_BASE, i);
    530  1.1  matt 			if (sc->sc_txq[i] == NULL) {
    531  1.1  matt 				if (i == 0)
    532  1.1  matt 					gmac_hwqmem_destroy(hqm);
    533  1.1  matt 				goto failed;
    534  1.1  matt 			}
    535  1.1  matt 			sc->sc_txq[i]->hwq_ifp = ifp;
    536  1.1  matt 
    537  1.1  matt 			last_hwq = NULL;
    538  1.1  matt 			SLIST_FOREACH(hwq, &psc->sc_hwfreeq->hwq_producers,
    539  1.1  matt 			    hwq_link) {
    540  1.1  matt 				if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff)
    541  1.1  matt 					break;
    542  1.1  matt 				last_hwq = hwq;
    543  1.1  matt 			}
    544  1.1  matt 			if (last_hwq == NULL)
    545  1.1  matt 				SLIST_INSERT_HEAD(
    546  1.1  matt 				    &psc->sc_hwfreeq->hwq_producers,
    547  1.1  matt 				    sc->sc_txq[i], hwq_link);
    548  1.1  matt 			else
    549  1.1  matt 				SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
    550  1.1  matt 				    hwq_link);
    551  1.1  matt 		}
    552  1.1  matt 	}
    553  1.1  matt 
    554  1.1  matt 	gmc_filter_change(sc);
    555  1.1  matt 
    556  1.2  matt #if 1
    557  1.1  matt 	mask = DMAVR_LOOPBACK|DMAVR_DROP_SMALL_ACK|DMAVR_EXTRABYTES_MASK
    558  1.1  matt 	    |DMAVR_RXBURSTSIZE_MASK|DMAVR_RXBUSWIDTH_MASK
    559  1.1  matt 	    |DMAVR_TXBURSTSIZE_MASK|DMAVR_TXBUSWIDTH_MASK;
    560  1.1  matt 	new = /* DMAVR_RXDMA_ENABLE| */ DMAVR_TXDMA_ENABLE
    561  1.1  matt 	    |DMAVR_EXTRABYTES(2)
    562  1.1  matt 	    |DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W)
    563  1.1  matt 	    |DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS)
    564  1.1  matt 	    |DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W)
    565  1.1  matt 	    |DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS);
    566  1.1  matt 	new |= sc->sc_dmavr & ~mask;
    567  1.1  matt 	if (sc->sc_dmavr != new) {
    568  1.1  matt 		sc->sc_dmavr = new;
    569  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
    570  1.1  matt 		    sc->sc_dmavr);
    571  1.2  matt 		aprint_normal_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
    572  1.2  matt 		    sc->sc_dmavr,
    573  1.2  matt 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
    574  1.1  matt 	}
    575  1.1  matt 
    576  1.1  matt 	mask = CONFIG0_MAXLEN_MASK|CONFIG0_TX_DISABLE/*|CONFIG0_RX_DISABLE*/
    577  1.1  matt 	    |CONFIG0_LOOPBACK|CONFIG0_SIM_TEST|CONFIG0_INVERSE_RXC_RGMII
    578  1.1  matt 	    |CONFIG0_R_LATCHED_MMII|CONFIG0_RGMII_INBAND_STATUS_ENABLE;
    579  1.1  matt 	new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536);
    580  1.1  matt 	new |= (sc->sc_gmac_config[0] & ~mask);
    581  1.1  matt 	if (sc->sc_gmac_config[0] != new) {
    582  1.1  matt 		sc->sc_gmac_config[0] = new;
    583  1.2  matt 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
    584  1.1  matt 		    sc->sc_gmac_config[0]);
    585  1.2  matt 		aprint_normal_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
    586  1.2  matt 		    sc->sc_gmac_config[0],
    587  1.2  matt 		    bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
    588  1.1  matt 	}
    589  1.1  matt 
    590  1.1  matt 	gmc_rxproduce(sc);
    591  1.1  matt 
    592  1.1  matt 	/*
    593  1.1  matt 	 * If we will be the only active interface, make sure the sw freeq
    594  1.1  matt 	 * interrupt gets routed to use.
    595  1.1  matt 	 */
    596  1.1  matt 	if (psc->sc_running == 0
    597  1.1  matt 	    && (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0) != sc->sc_port1)) {
    598  1.1  matt 		psc->sc_int_select[4] ^= INT4_SW_FREEQ_EMPTY;
    599  1.1  matt 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
    600  1.1  matt 		    psc->sc_int_select[4]);
    601  1.1  matt 	}
    602  1.1  matt 	sc->sc_int_enabled[0] = sc->sc_int_mask[0]
    603  1.1  matt 	    & (INT0_TXDERR|INT0_TXPERR|INT0_RXDERR|INT0_RXPERR|INT0_SWTXQ_EOF);
    604  1.1  matt 	sc->sc_int_enabled[1] = sc->sc_int_mask[1] & INT1_DEF_RXQ_EOF;
    605  1.1  matt 	sc->sc_int_enabled[4] = INT4_SW_FREEQ_EMPTY | (sc->sc_int_mask[4]
    606  1.1  matt 	    & (INT4_TX_FAIL|INT4_MIB_HEMIWRAP|INT4_RX_FIFO_OVRN
    607  1.1  matt 	       |INT4_RGMII_STSCHG));
    608  1.1  matt 
    609  1.1  matt 	psc->sc_int_enabled[0] |= sc->sc_int_enabled[0];
    610  1.1  matt 	psc->sc_int_enabled[1] |= sc->sc_int_enabled[1];
    611  1.1  matt 	psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
    612  1.1  matt 
    613  1.1  matt 	gmac_intr_update(psc);
    614  1.2  matt #endif
    615  1.1  matt 
    616  1.1  matt 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    617  1.1  matt 		mii_tick(&sc->sc_mii);
    618  1.1  matt 
    619  1.1  matt 	ifp->if_flags |= IFF_RUNNING;
    620  1.1  matt 	psc->sc_running |= (sc->sc_port1 ? 2 : 1);
    621  1.1  matt 
    622  1.1  matt 	callout_schedule(&sc->sc_mii_ch, hz);
    623  1.1  matt 
    624  1.1  matt 	return 0;
    625  1.1  matt 
    626  1.1  matt failed:
    627  1.1  matt 	gmc_ifstop(ifp, true);
    628  1.1  matt 	return ENOMEM;
    629  1.1  matt }
    630  1.1  matt 
    631  1.1  matt static int
    632  1.1  matt gmc_intr(void *arg)
    633  1.1  matt {
    634  1.1  matt 	struct gmc_softc * const sc = arg;
    635  1.1  matt 	uint32_t int0_status, int1_status, int4_status;
    636  1.1  matt 	uint32_t status;
    637  1.1  matt 	bool do_ifstart = false;
    638  1.1  matt 	int rv = 0;
    639  1.1  matt 
    640  1.1  matt 	int0_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    641  1.1  matt 	    GMAC_INT0_STATUS);
    642  1.1  matt 	int1_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    643  1.1  matt 	    GMAC_INT1_STATUS);
    644  1.1  matt 	int4_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    645  1.1  matt 	    GMAC_INT4_STATUS);
    646  1.1  matt 
    647  1.1  matt 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
    648  1.1  matt 	    int0_status & sc->sc_int_enabled[0]);
    649  1.1  matt 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS,
    650  1.1  matt 	    int1_status & sc->sc_int_enabled[1]);
    651  1.1  matt 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
    652  1.1  matt 	    int4_status & sc->sc_int_enabled[4]);
    653  1.1  matt 
    654  1.2  matt 	aprint_normal_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
    655  1.2  matt 	    int0_status, int1_status,
    656  1.2  matt 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
    657  1.2  matt 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
    658  1.2  matt 	    int4_status);
    659  1.2  matt 
    660  1.2  matt 	aprint_normal_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
    661  1.2  matt 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
    662  1.2  matt 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
    663  1.2  matt 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
    664  1.2  matt 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
    665  1.2  matt 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
    666  1.2  matt 
    667  1.1  matt 	status = int0_status & sc->sc_int_mask[0];
    668  1.1  matt 	if (status & (INT0_TXDERR|INT0_TXPERR)) {
    669  1.1  matt 		aprint_error_dev(sc->sc_dev,
    670  1.2  matt 		    "transmit%s%s error: %#x %08x bufaddr %#x\n",
    671  1.1  matt 		    status & INT0_TXDERR ? " data" : "",
    672  1.1  matt 		    status & INT0_TXPERR ? " protocol" : "",
    673  1.1  matt 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    674  1.2  matt 			GMAC_DMA_TX_CUR_DESC),
    675  1.2  matt 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    676  1.2  matt 			GMAC_SW_TX_Q0_RWPTR),
    677  1.2  matt 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    678  1.1  matt 			GMAC_DMA_TX_DESC2));
    679  1.2  matt 		    Debugger();
    680  1.1  matt 	}
    681  1.1  matt 	if (status & (INT0_RXDERR|INT0_RXPERR)) {
    682  1.1  matt 		aprint_error_dev(sc->sc_dev,
    683  1.2  matt 		    "receive%s%s error: %#x %#x bufaddr %#x\n",
    684  1.1  matt 		    status & INT0_TXDERR ? " data" : "",
    685  1.1  matt 		    status & INT0_TXPERR ? " protocol" : "",
    686  1.1  matt 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    687  1.2  matt 			GMAC_DMA_RX_CUR_DESC),
    688  1.2  matt 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    689  1.2  matt 			GMAC_DEF_RXQn_RWPTR(sc->sc_port1)),
    690  1.2  matt 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
    691  1.1  matt 			GMAC_DMA_RX_DESC2));
    692  1.2  matt 		    Debugger();
    693  1.1  matt 	}
    694  1.1  matt 	if (status & INT0_SWTXQ_EOF) {
    695  1.1  matt 		status &= INT0_SWTXQ_EOF;
    696  1.1  matt 		for (int i = 0; status && i < __arraycount(sc->sc_txq); i++) {
    697  1.1  matt 			if (status & INT0_SWTXQn_EOF(i)) {
    698  1.1  matt 				gmac_hwqueue_sync(sc->sc_txq[i]);
    699  1.1  matt 				status &= ~INT0_SWTXQn_EOF(i);
    700  1.1  matt 			}
    701  1.1  matt 		}
    702  1.2  matt #if 0
    703  1.1  matt 		/*
    704  1.1  matt 		 * If we got an EOF, that means someting wound up in the
    705  1.1  matt 		 * hardware freeq, so go reclaim it.
    706  1.1  matt 		 */
    707  1.2  matt //		gmac_hwqueue_consume(sc->sc_psc->sc_hwfreeq);
    708  1.2  matt #endif
    709  1.1  matt 		do_ifstart = true;
    710  1.1  matt 		rv = 1;
    711  1.1  matt 	}
    712  1.1  matt 
    713  1.1  matt 	status = int1_status & sc->sc_int_mask[1];
    714  1.1  matt 	if (status & INT1_DEF_RXQ_EOF) {
    715  1.1  matt 		gmac_hwqueue_consume(sc->sc_rxq);
    716  1.1  matt 		rv = 1;
    717  1.1  matt 	}
    718  1.1  matt 
    719  1.1  matt 	if (int4_status & INT4_SW_FREEQ_EMPTY) {
    720  1.1  matt 		gmc_rxproduce(sc);
    721  1.1  matt 		rv = 1;
    722  1.1  matt 	}
    723  1.2  matt 
    724  1.1  matt 	status = int4_status & sc->sc_int_enabled[4];
    725  1.1  matt 	if (status & INT4_TX_FAIL) {
    726  1.1  matt 	}
    727  1.1  matt 	if (status & INT4_MIB_HEMIWRAP) {
    728  1.1  matt 	}
    729  1.1  matt 	if (status & INT4_RX_XON) {
    730  1.1  matt 	}
    731  1.1  matt 	if (status & INT4_RX_XOFF) {
    732  1.1  matt 	}
    733  1.1  matt 	if (status & INT4_TX_XON) {
    734  1.1  matt 	}
    735  1.1  matt 	if (status & INT4_TX_XOFF) {
    736  1.1  matt 	}
    737  1.1  matt 	if (status & INT4_RX_FIFO_OVRN) {
    738  1.2  matt 		if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS)
    739  1.2  matt 			sc->sc_psc->sc_swfree_min++;
    740  1.1  matt 		sc->sc_if.if_ierrors++;
    741  1.1  matt 	}
    742  1.1  matt 	if (status & INT4_RGMII_STSCHG) {
    743  1.1  matt 		mii_tick(&sc->sc_mii);
    744  1.1  matt 	}
    745  1.1  matt 
    746  1.1  matt 	if (do_ifstart)
    747  1.1  matt 		gmc_ifstart(&sc->sc_if);
    748  1.1  matt 
    749  1.2  matt 	aprint_normal_dev(sc->sc_dev, "gmac_intr: done\n");
    750  1.1  matt 	return rv;
    751  1.1  matt }
    752  1.1  matt 
    753  1.1  matt static int
    754  1.1  matt gmc_match(device_t parent, cfdata_t cf, void *aux)
    755  1.1  matt {
    756  1.1  matt 	struct gmac_softc *psc = device_private(parent);
    757  1.1  matt 	struct gmac_attach_args *gma = aux;
    758  1.1  matt 
    759  1.1  matt 	if ((unsigned int)gma->gma_phy > 31)
    760  1.1  matt 		return 0;
    761  1.1  matt 	if ((unsigned int)gma->gma_port > 1)
    762  1.1  matt 		return 0;
    763  1.1  matt 	if (gma->gma_intr < 1 || gma->gma_intr > 2)
    764  1.1  matt 		return 0;
    765  1.1  matt 
    766  1.1  matt 	if (psc->sc_ports & (1 << gma->gma_port))
    767  1.1  matt 		return 0;
    768  1.1  matt 
    769  1.1  matt 	return 1;
    770  1.1  matt }
    771  1.1  matt 
    772  1.1  matt static void
    773  1.1  matt gmc_attach(device_t parent, device_t self, void *aux)
    774  1.1  matt {
    775  1.1  matt 	struct gmac_softc * const psc = device_private(parent);
    776  1.1  matt 	struct gmc_softc * const sc = device_private(self);
    777  1.1  matt 	struct gmac_attach_args *gma = aux;
    778  1.1  matt 	struct ifnet * const ifp = &sc->sc_if;
    779  1.1  matt 	static const char eaddrs[2][6] = {
    780  1.1  matt 		"\x0\x52\xc3\x11\x22\x33",
    781  1.1  matt 		"\x0\x52\xc3\x44\x55\x66",
    782  1.1  matt 	};
    783  1.1  matt 
    784  1.1  matt 	psc->sc_ports |= 1 << gma->gma_port;
    785  1.1  matt 	sc->sc_port1 = (gma->gma_port == 1);
    786  1.1  matt 
    787  1.1  matt 	sc->sc_dev = self;
    788  1.1  matt 	sc->sc_psc = psc;
    789  1.1  matt 	sc->sc_iot = psc->sc_iot;
    790  1.1  matt 	sc->sc_ioh = psc->sc_ioh;
    791  1.1  matt 	sc->sc_dmat = psc->sc_dmat;
    792  1.1  matt 
    793  1.1  matt 	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
    794  1.1  matt 	    GMAC_PORTn_DMA_OFFSET(gma->gma_port), GMAC_PORTn_DMA_SIZE,
    795  1.1  matt 	    &sc->sc_dma_ioh);
    796  1.1  matt 	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
    797  1.1  matt 	    GMAC_PORTn_GMAC_OFFSET(gma->gma_port), GMAC_PORTn_GMAC_SIZE,
    798  1.1  matt 	    &sc->sc_gmac_ioh);
    799  1.1  matt 	aprint_normal("\n");
    800  1.1  matt 	aprint_naive("\n");
    801  1.1  matt 
    802  1.1  matt 	strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
    803  1.1  matt 	ifp->if_flags = IFF_SIMPLEX|IFF_MULTICAST|IFF_BROADCAST;
    804  1.1  matt 	ifp->if_softc = sc;
    805  1.1  matt 	ifp->if_ioctl = gmc_ifioctl;
    806  1.1  matt 	ifp->if_stop  = gmc_ifstop;
    807  1.1  matt 	ifp->if_start = gmc_ifstart;
    808  1.1  matt 	ifp->if_init  = gmc_ifinit;
    809  1.1  matt 
    810  1.1  matt 	IFQ_SET_READY(&ifp->if_snd);
    811  1.1  matt 
    812  1.1  matt 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
    813  1.1  matt 	sc->sc_ec.ec_mii = &sc->sc_mii;
    814  1.1  matt 
    815  1.1  matt 	sc->sc_mii.mii_ifp = ifp;
    816  1.1  matt 	sc->sc_mii.mii_statchg = gmc_mii_statchg;
    817  1.1  matt 	sc->sc_mii.mii_readreg = gma->gma_mii_readreg;
    818  1.1  matt 	sc->sc_mii.mii_writereg = gma->gma_mii_writereg;
    819  1.1  matt 
    820  1.1  matt 	ifmedia_init(&sc->sc_mii.mii_media, 0, gmc_mediachange,
    821  1.1  matt 	   gmc_mediastatus);
    822  1.1  matt 
    823  1.1  matt 	if_attach(ifp);
    824  1.1  matt 	ether_ifattach(ifp, eaddrs[gma->gma_port]);
    825  1.1  matt 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
    826  1.1  matt 	    gma->gma_phy, MII_OFFSET_ANY, 0);
    827  1.1  matt 
    828  1.1  matt 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
    829  1.1  matt 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
    830  1.1  matt 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
    831  1.1  matt 	} else {
    832  1.2  matt 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
    833  1.1  matt 	}
    834  1.1  matt 
    835  1.1  matt 	sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    836  1.1  matt 	    GMAC_STATUS);
    837  1.1  matt 	sc->sc_gmac_sta_add[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    838  1.1  matt 	    GMAC_STA_ADD0);
    839  1.1  matt 	sc->sc_gmac_sta_add[1] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    840  1.1  matt 	    GMAC_STA_ADD1);
    841  1.1  matt 	sc->sc_gmac_sta_add[2] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    842  1.1  matt 	    GMAC_STA_ADD2);
    843  1.1  matt 	sc->sc_gmac_mcast_filter[0] = bus_space_read_4(sc->sc_iot,
    844  1.1  matt 	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER0);
    845  1.1  matt 	sc->sc_gmac_mcast_filter[1] = bus_space_read_4(sc->sc_iot,
    846  1.1  matt 	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER1);
    847  1.1  matt 	sc->sc_gmac_rx_filter = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    848  1.1  matt 	    GMAC_RX_FILTER);
    849  1.1  matt 	sc->sc_gmac_config[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
    850  1.1  matt 	    GMAC_CONFIG0);
    851  1.1  matt 	sc->sc_dmavr = bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR);
    852  1.1  matt 
    853  1.1  matt 	/* sc->sc_int_enabled is already zeroed */
    854  1.1  matt 	sc->sc_int_mask[0] = (sc->sc_port1 ? INT0_GMAC1 : INT0_GMAC0);
    855  1.1  matt 	sc->sc_int_mask[1] = (sc->sc_port1 ? INT1_GMAC1 : INT1_GMAC0);
    856  1.1  matt 	sc->sc_int_mask[2] = (sc->sc_port1 ? INT2_GMAC1 : INT2_GMAC0);
    857  1.1  matt 	sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
    858  1.1  matt 	sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
    859  1.1  matt 
    860  1.2  matt 	if (!sc->sc_port1) {
    861  1.1  matt 	sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
    862  1.1  matt 	    gmc_intr, sc);
    863  1.1  matt 	KASSERT(sc->sc_ih != NULL);
    864  1.2  matt 	}
    865  1.1  matt 
    866  1.1  matt 	callout_init(&sc->sc_mii_ch, 0);
    867  1.1  matt 	callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);
    868  1.1  matt 
    869  1.1  matt 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    870  1.1  matt 	     ether_sprintf(CLLADDR(sc->sc_if.if_sadl)));
    871  1.1  matt }
    872  1.1  matt 
    873  1.1  matt CFATTACH_DECL_NEW(gmc, sizeof(struct gmc_softc),
    874  1.1  matt     gmc_match, gmc_attach, NULL, NULL);
    875