Home | History | Annotate | Line # | Download | only in marvell
if_mvgbe.c revision 1.45.2.2
      1 /*	$NetBSD: if_mvgbe.c,v 1.45.2.2 2017/03/20 06:57:29 pgoyette Exp $	*/
      2 /*
      3  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
     19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     25  * POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.45.2.2 2017/03/20 06:57:29 pgoyette Exp $");
     29 
     30 #include "opt_multiprocessor.h"
     31 
     32 #if defined MULTIPROCESSOR
     33 #warning Queue Management Method 'Counters' not support. Please use mvxpe instead of this.
     34 #endif
     35 
     36 #include <sys/param.h>
     37 #include <sys/bus.h>
     38 #include <sys/callout.h>
     39 #include <sys/device.h>
     40 #include <sys/endian.h>
     41 #include <sys/errno.h>
     42 #include <sys/evcnt.h>
     43 #include <sys/kernel.h>
     44 #include <sys/kmem.h>
     45 #include <sys/mutex.h>
     46 #include <sys/sockio.h>
     47 #include <sys/sysctl.h>
     48 
     49 #include <dev/marvell/marvellreg.h>
     50 #include <dev/marvell/marvellvar.h>
     51 #include <dev/marvell/mvgbereg.h>
     52 
     53 #include <net/if.h>
     54 #include <net/if_ether.h>
     55 #include <net/if_media.h>
     56 
     57 #include <netinet/in.h>
     58 #include <netinet/in_systm.h>
     59 #include <netinet/ip.h>
     60 
     61 #include <net/bpf.h>
     62 #include <sys/rndsource.h>
     63 
     64 #include <dev/mii/mii.h>
     65 #include <dev/mii/miivar.h>
     66 
     67 #include "locators.h"
     68 
     69 /* #define MVGBE_DEBUG 3 */
     70 #ifdef MVGBE_DEBUG
     71 #define DPRINTF(x)	if (mvgbe_debug) printf x
     72 #define DPRINTFN(n,x)	if (mvgbe_debug >= (n)) printf x
     73 int mvgbe_debug = MVGBE_DEBUG;
     74 #else
     75 #define DPRINTF(x)
     76 #define DPRINTFN(n,x)
     77 #endif
     78 
     79 
     80 #define MVGBE_READ(sc, reg) \
     81 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
     82 #define MVGBE_WRITE(sc, reg, val) \
     83 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
     84 #define MVGBE_READ_FILTER(sc, reg, val, c) \
     85 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c))
     86 #define MVGBE_WRITE_FILTER(sc, reg, val, c) \
     87 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c))
     88 
     89 #define MVGBE_LINKUP_READ(sc) \
     90     bus_space_read_4((sc)->sc_iot, (sc)->sc_linkup.ioh, 0)
     91 #define MVGBE_IS_LINKUP(sc)	(MVGBE_LINKUP_READ(sc) & (sc)->sc_linkup.bit)
     92 
     93 #define MVGBE_TX_RING_CNT	256
     94 #define MVGBE_TX_RING_MSK	(MVGBE_TX_RING_CNT - 1)
     95 #define MVGBE_TX_RING_NEXT(x)	(((x) + 1) & MVGBE_TX_RING_MSK)
     96 #define MVGBE_RX_RING_CNT	256
     97 #define MVGBE_RX_RING_MSK	(MVGBE_RX_RING_CNT - 1)
     98 #define MVGBE_RX_RING_NEXT(x)	(((x) + 1) & MVGBE_RX_RING_MSK)
     99 
    100 CTASSERT(MVGBE_TX_RING_CNT > 1 && MVGBE_TX_RING_NEXT(MVGBE_TX_RING_CNT) ==
    101 	(MVGBE_TX_RING_CNT + 1) % MVGBE_TX_RING_CNT);
    102 CTASSERT(MVGBE_RX_RING_CNT > 1 && MVGBE_RX_RING_NEXT(MVGBE_RX_RING_CNT) ==
    103 	(MVGBE_RX_RING_CNT + 1) % MVGBE_RX_RING_CNT);
    104 
    105 #define MVGBE_JSLOTS		384	/* XXXX */
    106 #define MVGBE_JLEN \
    107     ((MVGBE_MRU + MVGBE_HWHEADER_SIZE + MVGBE_RXBUF_ALIGN - 1) & \
    108     ~MVGBE_RXBUF_MASK)
    109 #define MVGBE_NTXSEG		30
    110 #define MVGBE_JPAGESZ		PAGE_SIZE
    111 #define MVGBE_RESID \
    112     (MVGBE_JPAGESZ - (MVGBE_JLEN * MVGBE_JSLOTS) % MVGBE_JPAGESZ)
    113 #define MVGBE_JMEM \
    114     ((MVGBE_JLEN * MVGBE_JSLOTS) + MVGBE_RESID)
    115 
    116 #define MVGBE_TX_RING_ADDR(sc, i)		\
    117     ((sc)->sc_ring_map->dm_segs[0].ds_addr +	\
    118 			offsetof(struct mvgbe_ring_data, mvgbe_tx_ring[(i)]))
    119 
    120 #define MVGBE_RX_RING_ADDR(sc, i)		\
    121     ((sc)->sc_ring_map->dm_segs[0].ds_addr +	\
    122 			offsetof(struct mvgbe_ring_data, mvgbe_rx_ring[(i)]))
    123 
    124 #define MVGBE_CDOFF(x)		offsetof(struct mvgbe_ring_data, x)
    125 #define MVGBE_CDTXOFF(x)	MVGBE_CDOFF(mvgbe_tx_ring[(x)])
    126 #define MVGBE_CDRXOFF(x)	MVGBE_CDOFF(mvgbe_rx_ring[(x)])
    127 
    128 #define MVGBE_CDTXSYNC(sc, x, n, ops)					\
    129 do {									\
    130 	int __x, __n;							\
    131 	const int __descsize = sizeof(struct mvgbe_tx_desc);		\
    132 									\
    133 	__x = (x);							\
    134 	__n = (n);							\
    135 									\
    136 	/* If it will wrap around, sync to the end of the ring. */	\
    137 	if ((__x + __n) > MVGBE_TX_RING_CNT) {				\
    138 		bus_dmamap_sync((sc)->sc_dmat,				\
    139 		    (sc)->sc_ring_map, MVGBE_CDTXOFF(__x),		\
    140 		    __descsize * (MVGBE_TX_RING_CNT - __x), (ops));	\
    141 		__n -= (MVGBE_TX_RING_CNT - __x);			\
    142 		__x = 0;						\
    143 	}								\
    144 									\
    145 	/* Now sync whatever is left. */				\
    146 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map,		\
    147 	    MVGBE_CDTXOFF((__x)), __descsize * __n, (ops));		\
    148 } while (0 /*CONSTCOND*/)
    149 
    150 #define MVGBE_CDRXSYNC(sc, x, ops)					\
    151 do {									\
    152 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map,		\
    153 	    MVGBE_CDRXOFF((x)), sizeof(struct mvgbe_rx_desc), (ops));	\
    154 	} while (/*CONSTCOND*/0)
    155 
    156 #define MVGBE_IPGINTTX_DEFAULT	768
    157 #define MVGBE_IPGINTRX_DEFAULT	768
    158 
    159 #ifdef MVGBE_EVENT_COUNTERS
    160 #define	MVGBE_EVCNT_INCR(ev)		(ev)->ev_count++
    161 #define	MVGBE_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    162 #else
    163 #define	MVGBE_EVCNT_INCR(ev)		/* nothing */
    164 #define	MVGBE_EVCNT_ADD(ev, val)	/* nothing */
    165 #endif
    166 
    167 struct mvgbe_jpool_entry {
    168 	int slot;
    169 	LIST_ENTRY(mvgbe_jpool_entry) jpool_entries;
    170 };
    171 
    172 struct mvgbe_chain {
    173 	void *mvgbe_desc;
    174 	struct mbuf *mvgbe_mbuf;
    175 	struct mvgbe_chain *mvgbe_next;
    176 };
    177 
    178 struct mvgbe_txmap_entry {
    179 	bus_dmamap_t dmamap;
    180 	SIMPLEQ_ENTRY(mvgbe_txmap_entry) link;
    181 };
    182 
    183 struct mvgbe_chain_data {
    184 	struct mvgbe_chain mvgbe_tx_chain[MVGBE_TX_RING_CNT];
    185 	struct mvgbe_txmap_entry *mvgbe_tx_map[MVGBE_TX_RING_CNT];
    186 	int mvgbe_tx_prod;
    187 	int mvgbe_tx_cons;
    188 	int mvgbe_tx_cnt;
    189 
    190 	struct mvgbe_chain mvgbe_rx_chain[MVGBE_RX_RING_CNT];
    191 	bus_dmamap_t mvgbe_rx_map[MVGBE_RX_RING_CNT];
    192 	bus_dmamap_t mvgbe_rx_jumbo_map;
    193 	int mvgbe_rx_prod;
    194 	int mvgbe_rx_cons;
    195 	int mvgbe_rx_cnt;
    196 
    197 	/* Stick the jumbo mem management stuff here too. */
    198 	void *mvgbe_jslots[MVGBE_JSLOTS];
    199 	void *mvgbe_jumbo_buf;
    200 };
    201 
    202 struct mvgbe_ring_data {
    203 	struct mvgbe_tx_desc mvgbe_tx_ring[MVGBE_TX_RING_CNT];
    204 	struct mvgbe_rx_desc mvgbe_rx_ring[MVGBE_RX_RING_CNT];
    205 };
    206 
    207 struct mvgbec_softc {
    208 	device_t sc_dev;
    209 
    210 	bus_space_tag_t sc_iot;
    211 	bus_space_handle_t sc_ioh;
    212 
    213 	kmutex_t sc_mtx;
    214 
    215 	int sc_flags;
    216 };
    217 
    218 struct mvgbe_softc {
    219 	device_t sc_dev;
    220 	int sc_port;
    221 	uint32_t sc_version;
    222 
    223 	bus_space_tag_t sc_iot;
    224 	bus_space_handle_t sc_ioh;
    225 	bus_space_handle_t sc_dafh;	/* dest address filter handle */
    226 	bus_dma_tag_t sc_dmat;
    227 
    228 	struct ethercom sc_ethercom;
    229 	struct mii_data sc_mii;
    230 	u_int8_t sc_enaddr[ETHER_ADDR_LEN];	/* station addr */
    231 
    232 	callout_t sc_tick_ch;		/* tick callout */
    233 
    234 	struct mvgbe_chain_data sc_cdata;
    235 	struct mvgbe_ring_data *sc_rdata;
    236 	bus_dmamap_t sc_ring_map;
    237 	int sc_if_flags;
    238 	unsigned int sc_ipginttx;
    239 	unsigned int sc_ipgintrx;
    240 	int sc_wdogsoft;
    241 
    242 	LIST_HEAD(__mvgbe_jfreehead, mvgbe_jpool_entry) sc_jfree_listhead;
    243 	LIST_HEAD(__mvgbe_jinusehead, mvgbe_jpool_entry) sc_jinuse_listhead;
    244 	SIMPLEQ_HEAD(__mvgbe_txmaphead, mvgbe_txmap_entry) sc_txmap_head;
    245 
    246 	struct {
    247 		bus_space_handle_t ioh;
    248 		uint32_t bit;
    249 	} sc_linkup;
    250 	uint32_t sc_cmdsts_opts;
    251 
    252 	krndsource_t sc_rnd_source;
    253 	struct sysctllog *mvgbe_clog;
    254 #ifdef MVGBE_EVENT_COUNTERS
    255 	struct evcnt sc_ev_rxoverrun;
    256 	struct evcnt sc_ev_wdogsoft;
    257 #endif
    258 };
    259 
    260 
    261 /* Gigabit Ethernet Unit Global part functions */
    262 
    263 static int mvgbec_match(device_t, struct cfdata *, void *);
    264 static void mvgbec_attach(device_t, device_t, void *);
    265 
    266 static int mvgbec_print(void *, const char *);
    267 static int mvgbec_search(device_t, cfdata_t, const int *, void *);
    268 
    269 /* MII funcstions */
    270 static int mvgbec_miibus_readreg(device_t, int, int);
    271 static void mvgbec_miibus_writereg(device_t, int, int, int);
    272 static void mvgbec_miibus_statchg(struct ifnet *);
    273 
    274 static void mvgbec_wininit(struct mvgbec_softc *, enum marvell_tags *);
    275 
    276 /* Gigabit Ethernet Port part functions */
    277 
    278 static int mvgbe_match(device_t, struct cfdata *, void *);
    279 static void mvgbe_attach(device_t, device_t, void *);
    280 
    281 static void mvgbe_tick(void *);
    282 static int mvgbe_intr(void *);
    283 
    284 static void mvgbe_start(struct ifnet *);
    285 static int mvgbe_ioctl(struct ifnet *, u_long, void *);
    286 static int mvgbe_init(struct ifnet *);
    287 static void mvgbe_stop(struct ifnet *, int);
    288 static void mvgbe_watchdog(struct ifnet *);
    289 
    290 static int mvgbe_ifflags_cb(struct ethercom *);
    291 
    292 static int mvgbe_mediachange(struct ifnet *);
    293 static void mvgbe_mediastatus(struct ifnet *, struct ifmediareq *);
    294 
    295 static int mvgbe_init_rx_ring(struct mvgbe_softc *);
    296 static int mvgbe_init_tx_ring(struct mvgbe_softc *);
    297 static int mvgbe_newbuf(struct mvgbe_softc *, int, struct mbuf *, bus_dmamap_t);
    298 static int mvgbe_alloc_jumbo_mem(struct mvgbe_softc *);
    299 static void *mvgbe_jalloc(struct mvgbe_softc *);
    300 static void mvgbe_jfree(struct mbuf *, void *, size_t, void *);
    301 static int mvgbe_encap(struct mvgbe_softc *, struct mbuf *, uint32_t *);
    302 static void mvgbe_rxeof(struct mvgbe_softc *);
    303 static void mvgbe_txeof(struct mvgbe_softc *);
    304 static uint8_t mvgbe_crc8(const uint8_t *, size_t);
    305 static void mvgbe_filter_setup(struct mvgbe_softc *);
    306 #ifdef MVGBE_DEBUG
    307 static void mvgbe_dump_txdesc(struct mvgbe_tx_desc *, int);
    308 #endif
    309 static int mvgbe_ipginttx(struct mvgbec_softc *, struct mvgbe_softc *,
    310     unsigned int);
    311 static int mvgbe_ipgintrx(struct mvgbec_softc *, struct mvgbe_softc *,
    312     unsigned int);
    313 static void sysctl_mvgbe_init(struct mvgbe_softc *);
    314 static int mvgbe_sysctl_ipginttx(SYSCTLFN_PROTO);
    315 static int mvgbe_sysctl_ipgintrx(SYSCTLFN_PROTO);
    316 
    317 CFATTACH_DECL_NEW(mvgbec_gt, sizeof(struct mvgbec_softc),
    318     mvgbec_match, mvgbec_attach, NULL, NULL);
    319 CFATTACH_DECL_NEW(mvgbec_mbus, sizeof(struct mvgbec_softc),
    320     mvgbec_match, mvgbec_attach, NULL, NULL);
    321 
    322 CFATTACH_DECL_NEW(mvgbe, sizeof(struct mvgbe_softc),
    323     mvgbe_match, mvgbe_attach, NULL, NULL);
    324 
    325 device_t mvgbec0 = NULL;
    326 static int mvgbe_root_num;
    327 
    328 struct mvgbe_port {
    329 	int model;
    330 	int unit;
    331 	int ports;
    332 	int irqs[3];
    333 	int flags;
    334 #define FLAGS_FIX_TQTB	(1 << 0)
    335 #define FLAGS_FIX_MTU	(1 << 1)
    336 #define	FLAGS_IPG1	(1 << 2)
    337 #define	FLAGS_IPG2	(1 << 3)
    338 #define	FLAGS_HAS_PV	(1 << 4)	/* Has Port Version Register */
    339 } mvgbe_ports[] = {
    340 	{ MARVELL_DISCOVERY_II,		0, 3, { 32, 33, 34 }, 0 },
    341 	{ MARVELL_DISCOVERY_III,	0, 3, { 32, 33, 34 }, 0 },
    342 #if 0
    343 	{ MARVELL_DISCOVERY_LT,		0, ?, { }, 0 },
    344 	{ MARVELL_DISCOVERY_V,		0, ?, { }, 0 },
    345 	{ MARVELL_DISCOVERY_VI,		0, ?, { }, 0 },
    346 #endif
    347 	{ MARVELL_ORION_1_88F5082,	0, 1, { 21 }, FLAGS_FIX_MTU },
    348 	{ MARVELL_ORION_1_88F5180N,	0, 1, { 21 }, FLAGS_FIX_MTU },
    349 	{ MARVELL_ORION_1_88F5181,	0, 1, { 21 }, FLAGS_FIX_MTU | FLAGS_IPG1 },
    350 	{ MARVELL_ORION_1_88F5182,	0, 1, { 21 }, FLAGS_FIX_MTU | FLAGS_IPG1 },
    351 	{ MARVELL_ORION_2_88F5281,	0, 1, { 21 }, FLAGS_FIX_MTU | FLAGS_IPG1 },
    352 	{ MARVELL_ORION_1_88F6082,	0, 1, { 21 }, FLAGS_FIX_MTU },
    353 	{ MARVELL_ORION_1_88W8660,	0, 1, { 21 }, FLAGS_FIX_MTU },
    354 
    355 	{ MARVELL_KIRKWOOD_88F6180,	0, 1, { 11 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    356 	{ MARVELL_KIRKWOOD_88F6192,	0, 1, { 11 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    357 	{ MARVELL_KIRKWOOD_88F6192,	1, 1, { 15 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    358 	{ MARVELL_KIRKWOOD_88F6281,	0, 1, { 11 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    359 	{ MARVELL_KIRKWOOD_88F6281,	1, 1, { 15 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    360 	{ MARVELL_KIRKWOOD_88F6282,	0, 1, { 11 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    361 	{ MARVELL_KIRKWOOD_88F6282,	1, 1, { 15 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    362 
    363 	{ MARVELL_MV78XX0_MV78100,	0, 1, { 40 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    364 	{ MARVELL_MV78XX0_MV78100,	1, 1, { 44 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    365 	{ MARVELL_MV78XX0_MV78200,	0, 1, { 40 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    366 	{ MARVELL_MV78XX0_MV78200,	1, 1, { 44 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    367 	{ MARVELL_MV78XX0_MV78200,	2, 1, { 48 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    368 	{ MARVELL_MV78XX0_MV78200,	3, 1, { 52 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    369 
    370 	{ MARVELL_DOVE_88AP510,		0, 1, { 29 }, FLAGS_FIX_TQTB | FLAGS_IPG2 },
    371 
    372 	{ MARVELL_ARMADAXP_MV78130,	0, 1, { 66 }, FLAGS_HAS_PV },
    373 	{ MARVELL_ARMADAXP_MV78130,	1, 1, { 70 }, FLAGS_HAS_PV },
    374 	{ MARVELL_ARMADAXP_MV78130,	2, 1, { 74 }, FLAGS_HAS_PV },
    375 	{ MARVELL_ARMADAXP_MV78160,	0, 1, { 66 }, FLAGS_HAS_PV },
    376 	{ MARVELL_ARMADAXP_MV78160,	1, 1, { 70 }, FLAGS_HAS_PV },
    377 	{ MARVELL_ARMADAXP_MV78160,	2, 1, { 74 }, FLAGS_HAS_PV },
    378 	{ MARVELL_ARMADAXP_MV78160,	3, 1, { 78 }, FLAGS_HAS_PV },
    379 	{ MARVELL_ARMADAXP_MV78230,	0, 1, { 66 }, FLAGS_HAS_PV },
    380 	{ MARVELL_ARMADAXP_MV78230,	1, 1, { 70 }, FLAGS_HAS_PV },
    381 	{ MARVELL_ARMADAXP_MV78230,	2, 1, { 74 }, FLAGS_HAS_PV },
    382 	{ MARVELL_ARMADAXP_MV78260,	0, 1, { 66 }, FLAGS_HAS_PV },
    383 	{ MARVELL_ARMADAXP_MV78260,	1, 1, { 70 }, FLAGS_HAS_PV },
    384 	{ MARVELL_ARMADAXP_MV78260,	2, 1, { 74 }, FLAGS_HAS_PV },
    385 	{ MARVELL_ARMADAXP_MV78260,	3, 1, { 78 }, FLAGS_HAS_PV },
    386 	{ MARVELL_ARMADAXP_MV78460,	0, 1, { 66 }, FLAGS_HAS_PV },
    387 	{ MARVELL_ARMADAXP_MV78460,	1, 1, { 70 }, FLAGS_HAS_PV },
    388 	{ MARVELL_ARMADAXP_MV78460,	2, 1, { 74 }, FLAGS_HAS_PV },
    389 	{ MARVELL_ARMADAXP_MV78460,	3, 1, { 78 }, FLAGS_HAS_PV },
    390 
    391 	{ MARVELL_ARMADA370_MV6707,	0, 1, { 66 }, FLAGS_HAS_PV },
    392 	{ MARVELL_ARMADA370_MV6707,	1, 1, { 70 }, FLAGS_HAS_PV },
    393 	{ MARVELL_ARMADA370_MV6710,	0, 1, { 66 }, FLAGS_HAS_PV },
    394 	{ MARVELL_ARMADA370_MV6710,	1, 1, { 70 }, FLAGS_HAS_PV },
    395 	{ MARVELL_ARMADA370_MV6W11,	0, 1, { 66 }, FLAGS_HAS_PV },
    396 	{ MARVELL_ARMADA370_MV6W11,	1, 1, { 70 }, FLAGS_HAS_PV },
    397 };
    398 
    399 
    400 /* ARGSUSED */
    401 static int
    402 mvgbec_match(device_t parent, cfdata_t match, void *aux)
    403 {
    404 	struct marvell_attach_args *mva = aux;
    405 	int i;
    406 
    407 	if (strcmp(mva->mva_name, match->cf_name) != 0)
    408 		return 0;
    409 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
    410 		return 0;
    411 
    412 	for (i = 0; i < __arraycount(mvgbe_ports); i++)
    413 		if (mva->mva_model == mvgbe_ports[i].model) {
    414 			mva->mva_size = MVGBE_SIZE;
    415 			return 1;
    416 		}
    417 	return 0;
    418 }
    419 
    420 /* ARGSUSED */
    421 static void
    422 mvgbec_attach(device_t parent, device_t self, void *aux)
    423 {
    424 	struct mvgbec_softc *csc = device_private(self);
    425 	struct marvell_attach_args *mva = aux, gbea;
    426 	struct mvgbe_softc *port;
    427 	struct mii_softc *mii;
    428 	device_t child;
    429 	uint32_t phyaddr;
    430 	int i, j;
    431 
    432 	aprint_naive("\n");
    433 	aprint_normal(": Marvell Gigabit Ethernet Controller\n");
    434 
    435 	csc->sc_dev = self;
    436 	csc->sc_iot = mva->mva_iot;
    437 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
    438 	    mva->mva_size, &csc->sc_ioh)) {
    439 		aprint_error_dev(self, "Cannot map registers\n");
    440 		return;
    441 	}
    442 
    443 	if (mvgbec0 == NULL)
    444 		mvgbec0 = self;
    445 
    446 	phyaddr = 0;
    447 	MVGBE_WRITE(csc, MVGBE_PHYADDR, phyaddr);
    448 
    449 	mutex_init(&csc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
    450 
    451 	/* Disable and clear Gigabit Ethernet Unit interrupts */
    452 	MVGBE_WRITE(csc, MVGBE_EUIM, 0);
    453 	MVGBE_WRITE(csc, MVGBE_EUIC, 0);
    454 
    455 	mvgbec_wininit(csc, mva->mva_tags);
    456 
    457 	memset(&gbea, 0, sizeof(gbea));
    458 	for (i = 0; i < __arraycount(mvgbe_ports); i++) {
    459 		if (mvgbe_ports[i].model != mva->mva_model ||
    460 		    mvgbe_ports[i].unit != mva->mva_unit)
    461 			continue;
    462 
    463 		csc->sc_flags = mvgbe_ports[i].flags;
    464 
    465 		for (j = 0; j < mvgbe_ports[i].ports; j++) {
    466 			gbea.mva_name = "mvgbe";
    467 			gbea.mva_model = mva->mva_model;
    468 			gbea.mva_iot = csc->sc_iot;
    469 			gbea.mva_ioh = csc->sc_ioh;
    470 			gbea.mva_unit = j;
    471 			gbea.mva_dmat = mva->mva_dmat;
    472 			gbea.mva_irq = mvgbe_ports[i].irqs[j];
    473 			child = config_found_sm_loc(csc->sc_dev, "mvgbec", NULL,
    474 			    &gbea, mvgbec_print, mvgbec_search);
    475 			if (child) {
    476 				port = device_private(child);
    477 				mii  = LIST_FIRST(&port->sc_mii.mii_phys);
    478 				if (mii != NULL)
    479 					phyaddr |= MVGBE_PHYADDR_PHYAD(j,
    480 					    mii->mii_phy);
    481 			}
    482 		}
    483 		break;
    484 	}
    485 	MVGBE_WRITE(csc, MVGBE_PHYADDR, phyaddr);
    486 }
    487 
    488 static int
    489 mvgbec_print(void *aux, const char *pnp)
    490 {
    491 	struct marvell_attach_args *gbea = aux;
    492 
    493 	if (pnp)
    494 		aprint_normal("%s at %s port %d",
    495 		    gbea->mva_name, pnp, gbea->mva_unit);
    496 	else {
    497 		if (gbea->mva_unit != MVGBECCF_PORT_DEFAULT)
    498 			aprint_normal(" port %d", gbea->mva_unit);
    499 		if (gbea->mva_irq != MVGBECCF_IRQ_DEFAULT)
    500 			aprint_normal(" irq %d", gbea->mva_irq);
    501 	}
    502 	return UNCONF;
    503 }
    504 
    505 /* ARGSUSED */
    506 static int
    507 mvgbec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
    508 {
    509 	struct marvell_attach_args *gbea = aux;
    510 
    511 	if (cf->cf_loc[MVGBECCF_PORT] == gbea->mva_unit &&
    512 	    cf->cf_loc[MVGBECCF_IRQ] != MVGBECCF_IRQ_DEFAULT)
    513 		gbea->mva_irq = cf->cf_loc[MVGBECCF_IRQ];
    514 
    515 	return config_match(parent, cf, aux);
    516 }
    517 
    518 static int
    519 mvgbec_miibus_readreg(device_t dev, int phy, int reg)
    520 {
    521 	struct mvgbe_softc *sc = device_private(dev);
    522 	struct mvgbec_softc *csc;
    523 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    524 	uint32_t smi, val;
    525 	int i;
    526 
    527 	if (mvgbec0 == NULL) {
    528 		aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n");
    529 		return -1;
    530 	}
    531 	csc = device_private(mvgbec0);
    532 
    533 	mutex_enter(&csc->sc_mtx);
    534 
    535 	for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) {
    536 		DELAY(1);
    537 		if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY))
    538 			break;
    539 	}
    540 	if (i == MVGBE_PHY_TIMEOUT) {
    541 		aprint_error_ifnet(ifp, "SMI busy timeout\n");
    542 		mutex_exit(&csc->sc_mtx);
    543 		return -1;
    544 	}
    545 
    546 	smi =
    547 	    MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | MVGBE_SMI_OPCODE_READ;
    548 	MVGBE_WRITE(csc, MVGBE_SMI, smi);
    549 
    550 	for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) {
    551 		DELAY(1);
    552 		smi = MVGBE_READ(csc, MVGBE_SMI);
    553 		if (smi & MVGBE_SMI_READVALID)
    554 			break;
    555 	}
    556 
    557 	mutex_exit(&csc->sc_mtx);
    558 
    559 	DPRINTFN(9, ("mvgbec_miibus_readreg: i=%d, timeout=%d\n",
    560 	    i, MVGBE_PHY_TIMEOUT));
    561 
    562 	val = smi & MVGBE_SMI_DATA_MASK;
    563 
    564 	DPRINTFN(9, ("mvgbec_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
    565 	    phy, reg, val));
    566 
    567 	return val;
    568 }
    569 
    570 static void
    571 mvgbec_miibus_writereg(device_t dev, int phy, int reg, int val)
    572 {
    573 	struct mvgbe_softc *sc = device_private(dev);
    574 	struct mvgbec_softc *csc;
    575 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    576 	uint32_t smi;
    577 	int i;
    578 
    579 	if (mvgbec0 == NULL) {
    580 		aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n");
    581 		return;
    582 	}
    583 	csc = device_private(mvgbec0);
    584 
    585 	DPRINTFN(9, ("mvgbec_miibus_writereg phy=%d reg=%#x val=%#x\n",
    586 	     phy, reg, val));
    587 
    588 	mutex_enter(&csc->sc_mtx);
    589 
    590 	for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) {
    591 		DELAY(1);
    592 		if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY))
    593 			break;
    594 	}
    595 	if (i == MVGBE_PHY_TIMEOUT) {
    596 		aprint_error_ifnet(ifp, "SMI busy timeout\n");
    597 		mutex_exit(&csc->sc_mtx);
    598 		return;
    599 	}
    600 
    601 	smi = MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) |
    602 	    MVGBE_SMI_OPCODE_WRITE | (val & MVGBE_SMI_DATA_MASK);
    603 	MVGBE_WRITE(csc, MVGBE_SMI, smi);
    604 
    605 	for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) {
    606 		DELAY(1);
    607 		if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY))
    608 			break;
    609 	}
    610 
    611 	mutex_exit(&csc->sc_mtx);
    612 
    613 	if (i == MVGBE_PHY_TIMEOUT)
    614 		aprint_error_ifnet(ifp, "phy write timed out\n");
    615 }
    616 
    617 static void
    618 mvgbec_miibus_statchg(struct ifnet *ifp)
    619 {
    620 
    621 	/* nothing to do */
    622 }
    623 
    624 
    625 static void
    626 mvgbec_wininit(struct mvgbec_softc *sc, enum marvell_tags *tags)
    627 {
    628 	device_t pdev = device_parent(sc->sc_dev);
    629 	uint64_t base;
    630 	uint32_t en, ac, size;
    631 	int window, target, attr, rv, i;
    632 
    633 	/* First disable all address decode windows */
    634 	en = MVGBE_BARE_EN_MASK;
    635 	MVGBE_WRITE(sc, MVGBE_BARE, en);
    636 
    637 	ac = 0;
    638 	for (window = 0, i = 0;
    639 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MVGBE_NWINDOW; i++) {
    640 		rv = marvell_winparams_by_tag(pdev, tags[i],
    641 		    &target, &attr, &base, &size);
    642 		if (rv != 0 || size == 0)
    643 			continue;
    644 
    645 		if (base > 0xffffffffULL) {
    646 			if (window >= MVGBE_NREMAP) {
    647 				aprint_error_dev(sc->sc_dev,
    648 				    "can't remap window %d\n", window);
    649 				continue;
    650 			}
    651 			MVGBE_WRITE(sc, MVGBE_HA(window),
    652 			    (base >> 32) & 0xffffffff);
    653 		}
    654 
    655 		MVGBE_WRITE(sc, MVGBE_BASEADDR(window),
    656 		    MVGBE_BASEADDR_TARGET(target)	|
    657 		    MVGBE_BASEADDR_ATTR(attr)		|
    658 		    MVGBE_BASEADDR_BASE(base));
    659 		MVGBE_WRITE(sc, MVGBE_S(window), MVGBE_S_SIZE(size));
    660 
    661 		en &= ~(1 << window);
    662 		/* set full access (r/w) */
    663 		ac |= MVGBE_EPAP_EPAR(window, MVGBE_EPAP_AC_FA);
    664 		window++;
    665 	}
    666 	/* allow to access decode window */
    667 	MVGBE_WRITE(sc, MVGBE_EPAP, ac);
    668 
    669 	MVGBE_WRITE(sc, MVGBE_BARE, en);
    670 }
    671 
    672 
    673 /* ARGSUSED */
    674 static int
    675 mvgbe_match(device_t parent, cfdata_t match, void *aux)
    676 {
    677 	struct marvell_attach_args *mva = aux;
    678 	uint32_t pbase, maddrh, maddrl;
    679 	prop_dictionary_t dict;
    680 
    681 	dict = device_properties(parent);
    682 	if (dict) {
    683 		if (prop_dictionary_get(dict, "mac-address"))
    684 			return 1;
    685 	}
    686 
    687 	pbase = MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE;
    688 	maddrh =
    689 	    bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAH);
    690 	maddrl =
    691 	    bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAL);
    692 	if ((maddrh | maddrl) == 0)
    693 		return 0;
    694 
    695 	return 1;
    696 }
    697 
    698 /* ARGSUSED */
    699 static void
    700 mvgbe_attach(device_t parent, device_t self, void *aux)
    701 {
    702 	struct mvgbec_softc *csc = device_private(parent);
    703 	struct mvgbe_softc *sc = device_private(self);
    704 	struct marvell_attach_args *mva = aux;
    705 	struct mvgbe_txmap_entry *entry;
    706 	prop_dictionary_t dict;
    707 	prop_data_t enaddrp;
    708 	struct ifnet *ifp;
    709 	bus_dma_segment_t seg;
    710 	bus_dmamap_t dmamap;
    711 	int rseg, i;
    712 	uint32_t maddrh, maddrl;
    713 	uint8_t enaddr[ETHER_ADDR_LEN];
    714 	void *kva;
    715 
    716 	aprint_naive("\n");
    717 	aprint_normal("\n");
    718 
    719 	dict = device_properties(parent);
    720 	if (dict)
    721 		enaddrp = prop_dictionary_get(dict, "mac-address");
    722 	else
    723 		enaddrp = NULL;
    724 
    725 	sc->sc_dev = self;
    726 	sc->sc_port = mva->mva_unit;
    727 	sc->sc_iot = mva->mva_iot;
    728 	callout_init(&sc->sc_tick_ch, 0);
    729 	callout_setfunc(&sc->sc_tick_ch, mvgbe_tick, sc);
    730 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
    731 	    MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE,
    732 	    MVGBE_PORTR_SIZE, &sc->sc_ioh)) {
    733 		aprint_error_dev(self, "Cannot map registers\n");
    734 		return;
    735 	}
    736 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
    737 	    MVGBE_PORTDAFR_BASE + mva->mva_unit * MVGBE_PORTDAFR_SIZE,
    738 	    MVGBE_PORTDAFR_SIZE, &sc->sc_dafh)) {
    739 		aprint_error_dev(self,
    740 		    "Cannot map destination address filter registers\n");
    741 		return;
    742 	}
    743 	sc->sc_dmat = mva->mva_dmat;
    744 
    745 	if (csc->sc_flags & FLAGS_HAS_PV) {
    746 		/* GbE port has Port Version register. */
    747 		sc->sc_version = MVGBE_READ(sc, MVGBE_PV);
    748 		aprint_normal_dev(self, "Port Version 0x%x\n", sc->sc_version);
    749 	}
    750 
    751 	if (sc->sc_version >= 0x10) {
    752 		/*
    753 		 * Armada XP
    754 		 */
    755 
    756 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
    757 		    MVGBE_PS0, sizeof(uint32_t), &sc->sc_linkup.ioh)) {
    758 			aprint_error_dev(self, "Cannot map linkup register\n");
    759 			return;
    760 		}
    761 		sc->sc_linkup.bit = MVGBE_PS0_LINKUP;
    762 		csc->sc_flags |= FLAGS_IPG2;
    763 	} else {
    764 		if (bus_space_subregion(mva->mva_iot, sc->sc_ioh,
    765 		    MVGBE_PS, sizeof(uint32_t), &sc->sc_linkup.ioh)) {
    766 			aprint_error_dev(self, "Cannot map linkup register\n");
    767 			return;
    768 		}
    769 		sc->sc_linkup.bit = MVGBE_PS_LINKUP;
    770 	}
    771 
    772 	if (enaddrp) {
    773 		memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN);
    774 		maddrh  = enaddr[0] << 24;
    775 		maddrh |= enaddr[1] << 16;
    776 		maddrh |= enaddr[2] << 8;
    777 		maddrh |= enaddr[3];
    778 		maddrl  = enaddr[4] << 8;
    779 		maddrl |= enaddr[5];
    780 		MVGBE_WRITE(sc, MVGBE_MACAH, maddrh);
    781 		MVGBE_WRITE(sc, MVGBE_MACAL, maddrl);
    782 	}
    783 
    784 	maddrh = MVGBE_READ(sc, MVGBE_MACAH);
    785 	maddrl = MVGBE_READ(sc, MVGBE_MACAL);
    786 	sc->sc_enaddr[0] = maddrh >> 24;
    787 	sc->sc_enaddr[1] = maddrh >> 16;
    788 	sc->sc_enaddr[2] = maddrh >> 8;
    789 	sc->sc_enaddr[3] = maddrh >> 0;
    790 	sc->sc_enaddr[4] = maddrl >> 8;
    791 	sc->sc_enaddr[5] = maddrl >> 0;
    792 	aprint_normal_dev(self, "Ethernet address %s\n",
    793 	    ether_sprintf(sc->sc_enaddr));
    794 
    795 	/* clear all ethernet port interrupts */
    796 	MVGBE_WRITE(sc, MVGBE_IC, 0);
    797 	MVGBE_WRITE(sc, MVGBE_ICE, 0);
    798 
    799 	marvell_intr_establish(mva->mva_irq, IPL_NET, mvgbe_intr, sc);
    800 
    801 	/* Allocate the descriptor queues. */
    802 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mvgbe_ring_data),
    803 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
    804 		aprint_error_dev(self, "can't alloc rx buffers\n");
    805 		return;
    806 	}
    807 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    808 	    sizeof(struct mvgbe_ring_data), &kva, BUS_DMA_NOWAIT)) {
    809 		aprint_error_dev(self, "can't map dma buffers (%lu bytes)\n",
    810 		    (u_long)sizeof(struct mvgbe_ring_data));
    811 		goto fail1;
    812 	}
    813 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 1,
    814 	    sizeof(struct mvgbe_ring_data), 0, BUS_DMA_NOWAIT,
    815 	    &sc->sc_ring_map)) {
    816 		aprint_error_dev(self, "can't create dma map\n");
    817 		goto fail2;
    818 	}
    819 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_ring_map, kva,
    820 	    sizeof(struct mvgbe_ring_data), NULL, BUS_DMA_NOWAIT)) {
    821 		aprint_error_dev(self, "can't load dma map\n");
    822 		goto fail3;
    823 	}
    824 	for (i = 0; i < MVGBE_RX_RING_CNT; i++)
    825 		sc->sc_cdata.mvgbe_rx_chain[i].mvgbe_mbuf = NULL;
    826 
    827 	SIMPLEQ_INIT(&sc->sc_txmap_head);
    828 	for (i = 0; i < MVGBE_TX_RING_CNT; i++) {
    829 		sc->sc_cdata.mvgbe_tx_chain[i].mvgbe_mbuf = NULL;
    830 
    831 		if (bus_dmamap_create(sc->sc_dmat,
    832 		    MVGBE_JLEN, MVGBE_NTXSEG, MVGBE_JLEN, 0,
    833 		    BUS_DMA_NOWAIT, &dmamap)) {
    834 			aprint_error_dev(self, "Can't create TX dmamap\n");
    835 			goto fail4;
    836 		}
    837 
    838 		entry = kmem_alloc(sizeof(*entry), KM_SLEEP);
    839 		if (!entry) {
    840 			aprint_error_dev(self, "Can't alloc txmap entry\n");
    841 			bus_dmamap_destroy(sc->sc_dmat, dmamap);
    842 			goto fail4;
    843 		}
    844 		entry->dmamap = dmamap;
    845 		SIMPLEQ_INSERT_HEAD(&sc->sc_txmap_head, entry, link);
    846 	}
    847 
    848 	sc->sc_rdata = (struct mvgbe_ring_data *)kva;
    849 	memset(sc->sc_rdata, 0, sizeof(struct mvgbe_ring_data));
    850 
    851 	/*
    852 	 * We can support 802.1Q VLAN-sized frames and jumbo
    853 	 * Ethernet frames.
    854 	 */
    855 	sc->sc_ethercom.ec_capabilities |=
    856 	    ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
    857 
    858 	/* Try to allocate memory for jumbo buffers. */
    859 	if (mvgbe_alloc_jumbo_mem(sc)) {
    860 		aprint_error_dev(self, "jumbo buffer allocation failed\n");
    861 		goto fail4;
    862 	}
    863 
    864 	ifp = &sc->sc_ethercom.ec_if;
    865 	ifp->if_softc = sc;
    866 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    867 	ifp->if_start = mvgbe_start;
    868 	ifp->if_ioctl = mvgbe_ioctl;
    869 	ifp->if_init = mvgbe_init;
    870 	ifp->if_stop = mvgbe_stop;
    871 	ifp->if_watchdog = mvgbe_watchdog;
    872 	/*
    873 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
    874 	 */
    875 	sc->sc_ethercom.ec_if.if_capabilities |=
    876 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
    877 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
    878 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
    879 	/*
    880 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
    881 	 */
    882 	sc->sc_ethercom.ec_if.if_capabilities &= ~IFCAP_CSUM_TCPv4_Tx;
    883 	IFQ_SET_MAXLEN(&ifp->if_snd, max(MVGBE_TX_RING_CNT - 1, IFQ_MAXLEN));
    884 	IFQ_SET_READY(&ifp->if_snd);
    885 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
    886 
    887 	mvgbe_stop(ifp, 0);
    888 
    889 	/*
    890 	 * Do MII setup.
    891 	 */
    892 	sc->sc_mii.mii_ifp = ifp;
    893 	sc->sc_mii.mii_readreg = mvgbec_miibus_readreg;
    894 	sc->sc_mii.mii_writereg = mvgbec_miibus_writereg;
    895 	sc->sc_mii.mii_statchg = mvgbec_miibus_statchg;
    896 
    897 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
    898 	ifmedia_init(&sc->sc_mii.mii_media, 0,
    899 	    mvgbe_mediachange, mvgbe_mediastatus);
    900 	mii_attach(self, &sc->sc_mii, 0xffffffff,
    901 	    MII_PHY_ANY, parent == mvgbec0 ? 0 : 1, 0);
    902 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
    903 		aprint_error_dev(self, "no PHY found!\n");
    904 		ifmedia_add(&sc->sc_mii.mii_media,
    905 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
    906 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
    907 	} else
    908 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
    909 
    910 	/*
    911 	 * Call MI attach routines.
    912 	 */
    913 	if_attach(ifp);
    914 	if_deferred_start_init(ifp, NULL);
    915 
    916 	ether_ifattach(ifp, sc->sc_enaddr);
    917 	ether_set_ifflags_cb(&sc->sc_ethercom, mvgbe_ifflags_cb);
    918 
    919 	sysctl_mvgbe_init(sc);
    920 #ifdef MVGBE_EVENT_COUNTERS
    921 	/* Attach event counters. */
    922 	evcnt_attach_dynamic(&sc->sc_ev_rxoverrun, EVCNT_TYPE_MISC,
    923 	    NULL, device_xname(sc->sc_dev), "rxoverrrun");
    924 	evcnt_attach_dynamic(&sc->sc_ev_wdogsoft, EVCNT_TYPE_MISC,
    925 	    NULL, device_xname(sc->sc_dev), "wdogsoft");
    926 #endif
    927 	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
    928 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
    929 
    930 	return;
    931 
    932 fail4:
    933 	while ((entry = SIMPLEQ_FIRST(&sc->sc_txmap_head)) != NULL) {
    934 		SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link);
    935 		bus_dmamap_destroy(sc->sc_dmat, entry->dmamap);
    936 	}
    937 	bus_dmamap_unload(sc->sc_dmat, sc->sc_ring_map);
    938 fail3:
    939 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_ring_map);
    940 fail2:
    941 	bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct mvgbe_ring_data));
    942 fail1:
    943 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
    944 	return;
    945 }
    946 
    947 static int
    948 mvgbe_ipginttx(struct mvgbec_softc *csc, struct mvgbe_softc *sc,
    949     unsigned int ipginttx)
    950 {
    951 	uint32_t reg;
    952 	reg = MVGBE_READ(sc, MVGBE_PTFUT);
    953 
    954 	if (csc->sc_flags & FLAGS_IPG2) {
    955 		if (ipginttx > MVGBE_PTFUT_IPGINTTX_V2_MAX)
    956 			return -1;
    957 		reg &= ~MVGBE_PTFUT_IPGINTTX_V2_MASK;
    958 		reg |= MVGBE_PTFUT_IPGINTTX_V2(ipginttx);
    959 	} else if (csc->sc_flags & FLAGS_IPG1) {
    960 		if (ipginttx > MVGBE_PTFUT_IPGINTTX_V1_MAX)
    961 			return -1;
    962 		reg &= ~MVGBE_PTFUT_IPGINTTX_V1_MASK;
    963 		reg |= MVGBE_PTFUT_IPGINTTX_V1(ipginttx);
    964 	}
    965 	MVGBE_WRITE(sc, MVGBE_PTFUT, reg);
    966 
    967 	return 0;
    968 }
    969 
    970 static int
    971 mvgbe_ipgintrx(struct mvgbec_softc *csc, struct mvgbe_softc *sc,
    972     unsigned int ipgintrx)
    973 {
    974 	uint32_t reg;
    975 	reg = MVGBE_READ(sc, MVGBE_SDC);
    976 
    977 	if (csc->sc_flags & FLAGS_IPG2) {
    978 		if (ipgintrx > MVGBE_SDC_IPGINTRX_V2_MAX)
    979 			return -1;
    980 		reg &= ~MVGBE_SDC_IPGINTRX_V2_MASK;
    981 		reg |= MVGBE_SDC_IPGINTRX_V2(ipgintrx);
    982 	} else if (csc->sc_flags & FLAGS_IPG1) {
    983 		if (ipgintrx > MVGBE_SDC_IPGINTRX_V1_MAX)
    984 			return -1;
    985 		reg &= ~MVGBE_SDC_IPGINTRX_V1_MASK;
    986 		reg |= MVGBE_SDC_IPGINTRX_V1(ipgintrx);
    987 	}
    988 	MVGBE_WRITE(sc, MVGBE_SDC, reg);
    989 
    990 	return 0;
    991 }
    992 
    993 static void
    994 mvgbe_tick(void *arg)
    995 {
    996 	struct mvgbe_softc *sc = arg;
    997 	struct mii_data *mii = &sc->sc_mii;
    998 	int s;
    999 
   1000 	s = splnet();
   1001 	mii_tick(mii);
   1002 	/* Need more work */
   1003 	MVGBE_EVCNT_ADD(&sc->sc_ev_rxoverrun, MVGBE_READ(sc, MVGBE_POFC));
   1004 	splx(s);
   1005 
   1006 	callout_schedule(&sc->sc_tick_ch, hz);
   1007 }
   1008 
   1009 static int
   1010 mvgbe_intr(void *arg)
   1011 {
   1012 	struct mvgbe_softc *sc = arg;
   1013 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1014 	uint32_t ic, ice, datum = 0;
   1015 	int claimed = 0;
   1016 
   1017 	for (;;) {
   1018 		ice = MVGBE_READ(sc, MVGBE_ICE);
   1019 		ic = MVGBE_READ(sc, MVGBE_IC);
   1020 
   1021 		DPRINTFN(3, ("mvgbe_intr: ic=%#x, ice=%#x\n", ic, ice));
   1022 		if (ic == 0 && ice == 0)
   1023 			break;
   1024 
   1025 		datum = datum ^ ic ^ ice;
   1026 
   1027 		MVGBE_WRITE(sc, MVGBE_IC, ~ic);
   1028 		MVGBE_WRITE(sc, MVGBE_ICE, ~ice);
   1029 
   1030 		claimed = 1;
   1031 
   1032 		if (!(ifp->if_flags & IFF_RUNNING))
   1033 			break;
   1034 
   1035 		if (ice & MVGBE_ICE_LINKCHG) {
   1036 			if (MVGBE_IS_LINKUP(sc)) {
   1037 				/* Enable port RX and TX. */
   1038 				MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0));
   1039 				MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ(0));
   1040 			} else {
   1041 				MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ(0));
   1042 				MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ(0));
   1043 			}
   1044 
   1045 			/* Notify link change event to mii layer */
   1046 			mii_pollstat(&sc->sc_mii);
   1047 		}
   1048 
   1049 		if (ic & (MVGBE_IC_RXBUF | MVGBE_IC_RXERROR))
   1050 			mvgbe_rxeof(sc);
   1051 
   1052 		if (ice & (MVGBE_ICE_TXBUF_MASK | MVGBE_ICE_TXERR_MASK))
   1053 			mvgbe_txeof(sc);
   1054 	}
   1055 
   1056 	if_schedule_deferred_start(ifp);
   1057 
   1058 	rnd_add_uint32(&sc->sc_rnd_source, datum);
   1059 
   1060 	return claimed;
   1061 }
   1062 
   1063 static void
   1064 mvgbe_start(struct ifnet *ifp)
   1065 {
   1066 	struct mvgbe_softc *sc = ifp->if_softc;
   1067 	struct mbuf *m_head = NULL;
   1068 	uint32_t idx = sc->sc_cdata.mvgbe_tx_prod;
   1069 	int pkts = 0;
   1070 
   1071 	DPRINTFN(3, ("mvgbe_start (idx %d, tx_chain[idx] %p)\n", idx,
   1072 	    sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf));
   1073 
   1074 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   1075 		return;
   1076 	/* If Link is DOWN, can't start TX */
   1077 	if (!MVGBE_IS_LINKUP(sc))
   1078 		return;
   1079 
   1080 	while (sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf == NULL) {
   1081 		IFQ_POLL(&ifp->if_snd, m_head);
   1082 		if (m_head == NULL)
   1083 			break;
   1084 
   1085 		/*
   1086 		 * Pack the data into the transmit ring. If we
   1087 		 * don't have room, set the OACTIVE flag and wait
   1088 		 * for the NIC to drain the ring.
   1089 		 */
   1090 		if (mvgbe_encap(sc, m_head, &idx)) {
   1091 			if (sc->sc_cdata.mvgbe_tx_cnt > 0)
   1092 				ifp->if_flags |= IFF_OACTIVE;
   1093 			break;
   1094 		}
   1095 
   1096 		/* now we are committed to transmit the packet */
   1097 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
   1098 		pkts++;
   1099 
   1100 		/*
   1101 		 * If there's a BPF listener, bounce a copy of this frame
   1102 		 * to him.
   1103 		 */
   1104 		bpf_mtap(ifp, m_head);
   1105 	}
   1106 	if (pkts == 0)
   1107 		return;
   1108 
   1109 	/* Transmit at Queue 0 */
   1110 	if (idx != sc->sc_cdata.mvgbe_tx_prod) {
   1111 		sc->sc_cdata.mvgbe_tx_prod = idx;
   1112 		MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ(0));
   1113 
   1114 		/*
   1115 		 * Set a timeout in case the chip goes out to lunch.
   1116 		 */
   1117 		ifp->if_timer = 1;
   1118 		sc->sc_wdogsoft = 1;
   1119 	}
   1120 }
   1121 
   1122 static int
   1123 mvgbe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   1124 {
   1125 	struct mvgbe_softc *sc = ifp->if_softc;
   1126 	struct ifreq *ifr = data;
   1127 	int s, error = 0;
   1128 
   1129 	s = splnet();
   1130 
   1131 	switch (cmd) {
   1132 	case SIOCGIFMEDIA:
   1133 	case SIOCSIFMEDIA:
   1134 		DPRINTFN(2, ("mvgbe_ioctl MEDIA\n"));
   1135 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   1136 		break;
   1137 	default:
   1138 		DPRINTFN(2, ("mvgbe_ioctl ETHER\n"));
   1139 		error = ether_ioctl(ifp, cmd, data);
   1140 		if (error == ENETRESET) {
   1141 			if (ifp->if_flags & IFF_RUNNING) {
   1142 				mvgbe_filter_setup(sc);
   1143 			}
   1144 			error = 0;
   1145 		}
   1146 		break;
   1147 	}
   1148 
   1149 	splx(s);
   1150 
   1151 	return error;
   1152 }
   1153 
   1154 static int
   1155 mvgbe_init(struct ifnet *ifp)
   1156 {
   1157 	struct mvgbe_softc *sc = ifp->if_softc;
   1158 	struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev));
   1159 	struct mii_data *mii = &sc->sc_mii;
   1160 	uint32_t reg;
   1161 	int i;
   1162 
   1163 	DPRINTFN(2, ("mvgbe_init\n"));
   1164 
   1165 	/* Cancel pending I/O and free all RX/TX buffers. */
   1166 	mvgbe_stop(ifp, 0);
   1167 
   1168 	/* clear all ethernet port interrupts */
   1169 	MVGBE_WRITE(sc, MVGBE_IC, 0);
   1170 	MVGBE_WRITE(sc, MVGBE_ICE, 0);
   1171 
   1172 	/* Init TX/RX descriptors */
   1173 	if (mvgbe_init_tx_ring(sc) == ENOBUFS) {
   1174 		aprint_error_ifnet(ifp,
   1175 		    "initialization failed: no memory for tx buffers\n");
   1176 		return ENOBUFS;
   1177 	}
   1178 	if (mvgbe_init_rx_ring(sc) == ENOBUFS) {
   1179 		aprint_error_ifnet(ifp,
   1180 		    "initialization failed: no memory for rx buffers\n");
   1181 		return ENOBUFS;
   1182 	}
   1183 
   1184 	if ((csc->sc_flags & FLAGS_IPG1) || (csc->sc_flags & FLAGS_IPG2)) {
   1185 		sc->sc_ipginttx = MVGBE_IPGINTTX_DEFAULT;
   1186 		sc->sc_ipgintrx = MVGBE_IPGINTRX_DEFAULT;
   1187 	}
   1188 	if (csc->sc_flags & FLAGS_FIX_MTU)
   1189 		MVGBE_WRITE(sc, MVGBE_MTU, 0);	/* hw reset value is wrong */
   1190 	if (sc->sc_version >= 0x10) {
   1191 		MVGBE_WRITE(csc, MVGBE_PANC,
   1192 		    MVGBE_PANC_FORCELINKPASS	|
   1193 		    MVGBE_PANC_INBANDANBYPASSEN	|
   1194 		    MVGBE_PANC_SETMIISPEED	|
   1195 		    MVGBE_PANC_SETGMIISPEED	|
   1196 		    MVGBE_PANC_ANSPEEDEN	|
   1197 		    MVGBE_PANC_SETFCEN		|
   1198 		    MVGBE_PANC_PAUSEADV		|
   1199 		    MVGBE_PANC_SETFULLDX	|
   1200 		    MVGBE_PANC_ANDUPLEXEN	|
   1201 		    MVGBE_PANC_RESERVED);
   1202 		MVGBE_WRITE(csc, MVGBE_PMACC0,
   1203 		    MVGBE_PMACC0_RESERVED |
   1204 		    MVGBE_PMACC0_FRAMESIZELIMIT(1600));
   1205 		reg = MVGBE_READ(csc, MVGBE_PMACC2);
   1206 		reg &= MVGBE_PMACC2_PCSEN;	/* keep PCSEN bit */
   1207 		MVGBE_WRITE(csc, MVGBE_PMACC2,
   1208 		    reg | MVGBE_PMACC2_RESERVED | MVGBE_PMACC2_RGMIIEN);
   1209 
   1210 		MVGBE_WRITE(sc, MVGBE_PXCX,
   1211 		    MVGBE_READ(sc, MVGBE_PXCX) & ~MVGBE_PXCX_TXCRCDIS);
   1212 
   1213 #ifndef MULTIPROCESSOR
   1214 		MVGBE_WRITE(sc, MVGBE_PACC, MVGVE_PACC_ACCELERATIONMODE_BM);
   1215 #else
   1216 		MVGBE_WRITE(sc, MVGBE_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
   1217 #endif
   1218 	} else {
   1219 		MVGBE_WRITE(sc, MVGBE_PSC,
   1220 		    MVGBE_PSC_ANFC |		/* Enable Auto-Neg Flow Ctrl */
   1221 		    MVGBE_PSC_RESERVED |	/* Must be set to 1 */
   1222 		    MVGBE_PSC_FLFAIL |		/* Do NOT Force Link Fail */
   1223 		    MVGBE_PSC_MRU(MVGBE_PSC_MRU_9022) | /* we want 9k */
   1224 		    MVGBE_PSC_SETFULLDX);	/* Set_FullDx */
   1225 		/* XXXX: mvgbe(4) always use RGMII. */
   1226 		MVGBE_WRITE(sc, MVGBE_PSC1,
   1227 		    MVGBE_READ(sc, MVGBE_PSC1) | MVGBE_PSC1_RGMIIEN);
   1228 		/* XXXX: Also always Weighted Round-Robin Priority Mode */
   1229 		MVGBE_WRITE(sc, MVGBE_TQFPC, MVGBE_TQFPC_EN(0));
   1230 
   1231 		sc->sc_cmdsts_opts = MVGBE_TX_GENERATE_CRC;
   1232 	}
   1233 
   1234 	MVGBE_WRITE(sc, MVGBE_CRDP(0), MVGBE_RX_RING_ADDR(sc, 0));
   1235 	MVGBE_WRITE(sc, MVGBE_TCQDP, MVGBE_TX_RING_ADDR(sc, 0));
   1236 
   1237 	if (csc->sc_flags & FLAGS_FIX_TQTB) {
   1238 		/*
   1239 		 * Queue 0 (offset 0x72700) must be programmed to 0x3fffffff.
   1240 		 * And offset 0x72704 must be programmed to 0x03ffffff.
   1241 		 * Queue 1 through 7 must be programmed to 0x0.
   1242 		 */
   1243 		MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(0), 0x3fffffff);
   1244 		MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(0), 0x03ffffff);
   1245 		for (i = 1; i < 8; i++) {
   1246 			MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x0);
   1247 			MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0x0);
   1248 		}
   1249 	} else if (sc->sc_version < 0x10)
   1250 		for (i = 1; i < 8; i++) {
   1251 			MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x3fffffff);
   1252 			MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0xffff7fff);
   1253 			MVGBE_WRITE(sc, MVGBE_TQAC(i), 0xfc0000ff);
   1254 		}
   1255 
   1256 	MVGBE_WRITE(sc, MVGBE_PXC, MVGBE_PXC_RXCS);
   1257 	MVGBE_WRITE(sc, MVGBE_PXCX, 0);
   1258 
   1259 	/* Set SDC register except IPGINT bits */
   1260 	MVGBE_WRITE(sc, MVGBE_SDC,
   1261 	    MVGBE_SDC_RXBSZ_16_64BITWORDS |
   1262 #if BYTE_ORDER == LITTLE_ENDIAN
   1263 	    MVGBE_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
   1264 	    MVGBE_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
   1265 #endif
   1266 	    MVGBE_SDC_TXBSZ_16_64BITWORDS);
   1267 	/* And then set IPGINT bits */
   1268 	mvgbe_ipgintrx(csc, sc, sc->sc_ipgintrx);
   1269 
   1270 	/* Tx side */
   1271 	MVGBE_WRITE(sc, MVGBE_PTFUT, 0);
   1272 	mvgbe_ipginttx(csc, sc, sc->sc_ipginttx);
   1273 
   1274 	mvgbe_filter_setup(sc);
   1275 
   1276 	mii_mediachg(mii);
   1277 
   1278 	/* Enable port */
   1279 	if (sc->sc_version >= 0x10) {
   1280 		reg = MVGBE_READ(csc, MVGBE_PMACC0);
   1281 		MVGBE_WRITE(csc, MVGBE_PMACC0, reg | MVGBE_PMACC0_PORTEN);
   1282 	} else {
   1283 		reg = MVGBE_READ(sc, MVGBE_PSC);
   1284 		MVGBE_WRITE(sc, MVGBE_PSC, reg | MVGBE_PSC_PORTEN);
   1285 	}
   1286 
   1287 	/* If Link is UP, Start RX and TX traffic */
   1288 	if (MVGBE_IS_LINKUP(sc)) {
   1289 		/* Enable port RX/TX. */
   1290 		MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0));
   1291 		MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ(0));
   1292 	}
   1293 
   1294 	/* Enable interrupt masks */
   1295 	MVGBE_WRITE(sc, MVGBE_PIM,
   1296 	    MVGBE_IC_RXBUF |
   1297 	    MVGBE_IC_EXTEND |
   1298 	    MVGBE_IC_RXBUFQ_MASK |
   1299 	    MVGBE_IC_RXERROR |
   1300 	    MVGBE_IC_RXERRQ_MASK);
   1301 	MVGBE_WRITE(sc, MVGBE_PEIM,
   1302 	    MVGBE_ICE_TXBUF_MASK |
   1303 	    MVGBE_ICE_TXERR_MASK |
   1304 	    MVGBE_ICE_LINKCHG);
   1305 
   1306 	callout_schedule(&sc->sc_tick_ch, hz);
   1307 
   1308 	ifp->if_flags |= IFF_RUNNING;
   1309 	ifp->if_flags &= ~IFF_OACTIVE;
   1310 
   1311 	return 0;
   1312 }
   1313 
   1314 /* ARGSUSED */
   1315 static void
   1316 mvgbe_stop(struct ifnet *ifp, int disable)
   1317 {
   1318 	struct mvgbe_softc *sc = ifp->if_softc;
   1319 	struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev));
   1320 	struct mvgbe_chain_data *cdata = &sc->sc_cdata;
   1321 	uint32_t reg, txinprog, txfifoemp;
   1322 	int i, cnt;
   1323 
   1324 	DPRINTFN(2, ("mvgbe_stop\n"));
   1325 
   1326 	callout_stop(&sc->sc_tick_ch);
   1327 
   1328 	/* Stop Rx port activity. Check port Rx activity. */
   1329 	reg = MVGBE_READ(sc, MVGBE_RQC);
   1330 	if (reg & MVGBE_RQC_ENQ_MASK)
   1331 		/* Issue stop command for active channels only */
   1332 		MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ_DISABLE(reg));
   1333 
   1334 	/* Stop Tx port activity. Check port Tx activity. */
   1335 	if (MVGBE_READ(sc, MVGBE_TQC) & MVGBE_TQC_ENQ(0))
   1336 		MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ(0));
   1337 
   1338 	/* Force link down */
   1339 	if (sc->sc_version >= 0x10) {
   1340 		reg = MVGBE_READ(csc, MVGBE_PANC);
   1341 		MVGBE_WRITE(csc, MVGBE_PANC, reg | MVGBE_PANC_FORCELINKFAIL);
   1342 
   1343 		txinprog = MVGBE_PS_TXINPROG_(0);
   1344 		txfifoemp = MVGBE_PS_TXFIFOEMP_(0);
   1345 	} else {
   1346 		reg = MVGBE_READ(sc, MVGBE_PSC);
   1347 		MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_FLFAIL);
   1348 
   1349 		txinprog = MVGBE_PS_TXINPROG;
   1350 		txfifoemp = MVGBE_PS_TXFIFOEMP;
   1351 	}
   1352 
   1353 #define RX_DISABLE_TIMEOUT          0x1000000
   1354 #define TX_FIFO_EMPTY_TIMEOUT       0x1000000
   1355 	/* Wait for all Rx activity to terminate. */
   1356 	cnt = 0;
   1357 	do {
   1358 		if (cnt >= RX_DISABLE_TIMEOUT) {
   1359 			aprint_error_ifnet(ifp,
   1360 			    "timeout for RX stopped. rqc 0x%x\n", reg);
   1361 			break;
   1362 		}
   1363 		cnt++;
   1364 
   1365 		/*
   1366 		 * Check Receive Queue Command register that all Rx queues
   1367 		 * are stopped
   1368 		 */
   1369 		reg = MVGBE_READ(sc, MVGBE_RQC);
   1370 	} while (reg & 0xff);
   1371 
   1372 	/* Double check to verify that TX FIFO is empty */
   1373 	cnt = 0;
   1374 	while (1) {
   1375 		do {
   1376 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
   1377 				aprint_error_ifnet(ifp,
   1378 				    "timeout for TX FIFO empty. status 0x%x\n",
   1379 				    reg);
   1380 				break;
   1381 			}
   1382 			cnt++;
   1383 
   1384 			reg = MVGBE_READ(sc, MVGBE_PS);
   1385 		} while (!(reg & txfifoemp) || reg & txinprog);
   1386 
   1387 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
   1388 			break;
   1389 
   1390 		/* Double check */
   1391 		reg = MVGBE_READ(sc, MVGBE_PS);
   1392 		if (reg & txfifoemp && !(reg & txinprog))
   1393 			break;
   1394 		else
   1395 			aprint_error_ifnet(ifp,
   1396 			    "TX FIFO empty double check failed."
   1397 			    " %d loops, status 0x%x\n", cnt, reg);
   1398 	}
   1399 
   1400 	/* Reset the Enable bit */
   1401 	if (sc->sc_version >= 0x10) {
   1402 		reg = MVGBE_READ(csc, MVGBE_PMACC0);
   1403 		MVGBE_WRITE(csc, MVGBE_PMACC0, reg & ~MVGBE_PMACC0_PORTEN);
   1404 	} else {
   1405 		reg = MVGBE_READ(sc, MVGBE_PSC);
   1406 		MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_PORTEN);
   1407 	}
   1408 
   1409 	/*
   1410 	 * Disable and clear interrupts
   1411 	 * 0) controller interrupt
   1412 	 * 1) port interrupt cause
   1413 	 * 2) port interrupt mask
   1414 	 */
   1415 	MVGBE_WRITE(csc, MVGBE_EUIM, 0);
   1416 	MVGBE_WRITE(csc, MVGBE_EUIC, 0);
   1417 	MVGBE_WRITE(sc, MVGBE_IC, 0);
   1418 	MVGBE_WRITE(sc, MVGBE_ICE, 0);
   1419 	MVGBE_WRITE(sc, MVGBE_PIM, 0);
   1420 	MVGBE_WRITE(sc, MVGBE_PEIM, 0);
   1421 
   1422 	/* Free RX and TX mbufs still in the queues. */
   1423 	for (i = 0; i < MVGBE_RX_RING_CNT; i++) {
   1424 		if (cdata->mvgbe_rx_chain[i].mvgbe_mbuf != NULL) {
   1425 			m_freem(cdata->mvgbe_rx_chain[i].mvgbe_mbuf);
   1426 			cdata->mvgbe_rx_chain[i].mvgbe_mbuf = NULL;
   1427 		}
   1428 	}
   1429 	for (i = 0; i < MVGBE_TX_RING_CNT; i++) {
   1430 		if (cdata->mvgbe_tx_chain[i].mvgbe_mbuf != NULL) {
   1431 			m_freem(cdata->mvgbe_tx_chain[i].mvgbe_mbuf);
   1432 			cdata->mvgbe_tx_chain[i].mvgbe_mbuf = NULL;
   1433 		}
   1434 	}
   1435 
   1436 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1437 }
   1438 
   1439 static void
   1440 mvgbe_watchdog(struct ifnet *ifp)
   1441 {
   1442 	struct mvgbe_softc *sc = ifp->if_softc;
   1443 
   1444 	/*
   1445 	 * Reclaim first as there is a possibility of losing Tx completion
   1446 	 * interrupts.
   1447 	 */
   1448 	mvgbe_txeof(sc);
   1449 	if (sc->sc_cdata.mvgbe_tx_cnt != 0) {
   1450 		if (sc->sc_wdogsoft) {
   1451 			/*
   1452 			 * There is race condition between CPU and DMA
   1453 			 * engine. When DMA engine encounters queue end,
   1454 			 * it clears MVGBE_TQC_ENQ bit.
   1455 			 */
   1456 			MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ(0));
   1457 			ifp->if_timer = 5;
   1458 			sc->sc_wdogsoft = 0;
   1459 			MVGBE_EVCNT_INCR(&sc->sc_ev_wdogsoft);
   1460 		} else {
   1461 			aprint_error_ifnet(ifp, "watchdog timeout\n");
   1462 
   1463 			ifp->if_oerrors++;
   1464 
   1465 			mvgbe_init(ifp);
   1466 		}
   1467 	}
   1468 }
   1469 
   1470 static int
   1471 mvgbe_ifflags_cb(struct ethercom *ec)
   1472 {
   1473 	struct ifnet *ifp = &ec->ec_if;
   1474 	struct mvgbe_softc *sc = ifp->if_softc;
   1475 	int change = ifp->if_flags ^ sc->sc_if_flags;
   1476 
   1477 	if (change != 0)
   1478 		sc->sc_if_flags = ifp->if_flags;
   1479 
   1480 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   1481 		return ENETRESET;
   1482 
   1483 	if ((change & IFF_PROMISC) != 0)
   1484 		mvgbe_filter_setup(sc);
   1485 
   1486 	return 0;
   1487 }
   1488 
   1489 /*
   1490  * Set media options.
   1491  */
   1492 static int
   1493 mvgbe_mediachange(struct ifnet *ifp)
   1494 {
   1495 	return ether_mediachange(ifp);
   1496 }
   1497 
   1498 /*
   1499  * Report current media status.
   1500  */
   1501 static void
   1502 mvgbe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   1503 {
   1504 	ether_mediastatus(ifp, ifmr);
   1505 }
   1506 
   1507 
   1508 static int
   1509 mvgbe_init_rx_ring(struct mvgbe_softc *sc)
   1510 {
   1511 	struct mvgbe_chain_data *cd = &sc->sc_cdata;
   1512 	struct mvgbe_ring_data *rd = sc->sc_rdata;
   1513 	int i;
   1514 
   1515 	memset(rd->mvgbe_rx_ring, 0,
   1516 	    sizeof(struct mvgbe_rx_desc) * MVGBE_RX_RING_CNT);
   1517 
   1518 	for (i = 0; i < MVGBE_RX_RING_CNT; i++) {
   1519 		cd->mvgbe_rx_chain[i].mvgbe_desc =
   1520 		    &rd->mvgbe_rx_ring[i];
   1521 		if (i == MVGBE_RX_RING_CNT - 1) {
   1522 			cd->mvgbe_rx_chain[i].mvgbe_next =
   1523 			    &cd->mvgbe_rx_chain[0];
   1524 			rd->mvgbe_rx_ring[i].nextdescptr =
   1525 			    MVGBE_RX_RING_ADDR(sc, 0);
   1526 		} else {
   1527 			cd->mvgbe_rx_chain[i].mvgbe_next =
   1528 			    &cd->mvgbe_rx_chain[i + 1];
   1529 			rd->mvgbe_rx_ring[i].nextdescptr =
   1530 			    MVGBE_RX_RING_ADDR(sc, i + 1);
   1531 		}
   1532 	}
   1533 
   1534 	for (i = 0; i < MVGBE_RX_RING_CNT; i++) {
   1535 		if (mvgbe_newbuf(sc, i, NULL,
   1536 		    sc->sc_cdata.mvgbe_rx_jumbo_map) == ENOBUFS) {
   1537 			aprint_error_ifnet(&sc->sc_ethercom.ec_if,
   1538 			    "failed alloc of %dth mbuf\n", i);
   1539 			return ENOBUFS;
   1540 		}
   1541 	}
   1542 	sc->sc_cdata.mvgbe_rx_prod = 0;
   1543 	sc->sc_cdata.mvgbe_rx_cons = 0;
   1544 
   1545 	return 0;
   1546 }
   1547 
   1548 static int
   1549 mvgbe_init_tx_ring(struct mvgbe_softc *sc)
   1550 {
   1551 	struct mvgbe_chain_data *cd = &sc->sc_cdata;
   1552 	struct mvgbe_ring_data *rd = sc->sc_rdata;
   1553 	int i;
   1554 
   1555 	memset(sc->sc_rdata->mvgbe_tx_ring, 0,
   1556 	    sizeof(struct mvgbe_tx_desc) * MVGBE_TX_RING_CNT);
   1557 
   1558 	for (i = 0; i < MVGBE_TX_RING_CNT; i++) {
   1559 		cd->mvgbe_tx_chain[i].mvgbe_desc =
   1560 		    &rd->mvgbe_tx_ring[i];
   1561 		if (i == MVGBE_TX_RING_CNT - 1) {
   1562 			cd->mvgbe_tx_chain[i].mvgbe_next =
   1563 			    &cd->mvgbe_tx_chain[0];
   1564 			rd->mvgbe_tx_ring[i].nextdescptr =
   1565 			    MVGBE_TX_RING_ADDR(sc, 0);
   1566 		} else {
   1567 			cd->mvgbe_tx_chain[i].mvgbe_next =
   1568 			    &cd->mvgbe_tx_chain[i + 1];
   1569 			rd->mvgbe_tx_ring[i].nextdescptr =
   1570 			    MVGBE_TX_RING_ADDR(sc, i + 1);
   1571 		}
   1572 		rd->mvgbe_tx_ring[i].cmdsts = MVGBE_BUFFER_OWNED_BY_HOST;
   1573 	}
   1574 
   1575 	sc->sc_cdata.mvgbe_tx_prod = 0;
   1576 	sc->sc_cdata.mvgbe_tx_cons = 0;
   1577 	sc->sc_cdata.mvgbe_tx_cnt = 0;
   1578 
   1579 	MVGBE_CDTXSYNC(sc, 0, MVGBE_TX_RING_CNT,
   1580 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1581 
   1582 	return 0;
   1583 }
   1584 
   1585 static int
   1586 mvgbe_newbuf(struct mvgbe_softc *sc, int i, struct mbuf *m,
   1587 		bus_dmamap_t dmamap)
   1588 {
   1589 	struct mbuf *m_new = NULL;
   1590 	struct mvgbe_chain *c;
   1591 	struct mvgbe_rx_desc *r;
   1592 	int align;
   1593 	vaddr_t offset;
   1594 
   1595 	if (m == NULL) {
   1596 		void *buf = NULL;
   1597 
   1598 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
   1599 		if (m_new == NULL) {
   1600 			aprint_error_ifnet(&sc->sc_ethercom.ec_if,
   1601 			    "no memory for rx list -- packet dropped!\n");
   1602 			return ENOBUFS;
   1603 		}
   1604 
   1605 		/* Allocate the jumbo buffer */
   1606 		buf = mvgbe_jalloc(sc);
   1607 		if (buf == NULL) {
   1608 			m_freem(m_new);
   1609 			DPRINTFN(1, ("%s jumbo allocation failed -- packet "
   1610 			    "dropped!\n", sc->sc_ethercom.ec_if.if_xname));
   1611 			return ENOBUFS;
   1612 		}
   1613 
   1614 		/* Attach the buffer to the mbuf */
   1615 		m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN;
   1616 		MEXTADD(m_new, buf, MVGBE_JLEN, 0, mvgbe_jfree, sc);
   1617 	} else {
   1618 		/*
   1619 		 * We're re-using a previously allocated mbuf;
   1620 		 * be sure to re-init pointers and lengths to
   1621 		 * default values.
   1622 		 */
   1623 		m_new = m;
   1624 		m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN;
   1625 		m_new->m_data = m_new->m_ext.ext_buf;
   1626 	}
   1627 	align = (u_long)m_new->m_data & MVGBE_RXBUF_MASK;
   1628 	if (align != 0) {
   1629 		DPRINTFN(1,("align = %d\n", align));
   1630 		m_adj(m_new,  MVGBE_RXBUF_ALIGN - align);
   1631 	}
   1632 
   1633 	c = &sc->sc_cdata.mvgbe_rx_chain[i];
   1634 	r = c->mvgbe_desc;
   1635 	c->mvgbe_mbuf = m_new;
   1636 	offset = (vaddr_t)m_new->m_data - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf;
   1637 	r->bufptr = dmamap->dm_segs[0].ds_addr + offset;
   1638 	r->bufsize = MVGBE_JLEN & ~MVGBE_RXBUF_MASK;
   1639 	r->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_ENABLE_INTERRUPT;
   1640 
   1641 	/* Invalidate RX buffer */
   1642 	bus_dmamap_sync(sc->sc_dmat, dmamap, offset, r->bufsize,
   1643 	    BUS_DMASYNC_PREREAD);
   1644 
   1645 	MVGBE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1646 
   1647 	return 0;
   1648 }
   1649 
   1650 /*
   1651  * Memory management for jumbo frames.
   1652  */
   1653 
   1654 static int
   1655 mvgbe_alloc_jumbo_mem(struct mvgbe_softc *sc)
   1656 {
   1657 	char *ptr, *kva;
   1658 	bus_dma_segment_t seg;
   1659 	int i, rseg, state, error;
   1660 	struct mvgbe_jpool_entry *entry;
   1661 
   1662 	state = error = 0;
   1663 
   1664 	/* Grab a big chunk o' storage. */
   1665 	if (bus_dmamem_alloc(sc->sc_dmat, MVGBE_JMEM, PAGE_SIZE, 0,
   1666 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
   1667 		aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n");
   1668 		return ENOBUFS;
   1669 	}
   1670 
   1671 	state = 1;
   1672 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, MVGBE_JMEM,
   1673 	    (void **)&kva, BUS_DMA_NOWAIT)) {
   1674 		aprint_error_dev(sc->sc_dev,
   1675 		    "can't map dma buffers (%d bytes)\n", MVGBE_JMEM);
   1676 		error = ENOBUFS;
   1677 		goto out;
   1678 	}
   1679 
   1680 	state = 2;
   1681 	if (bus_dmamap_create(sc->sc_dmat, MVGBE_JMEM, 1, MVGBE_JMEM, 0,
   1682 	    BUS_DMA_NOWAIT, &sc->sc_cdata.mvgbe_rx_jumbo_map)) {
   1683 		aprint_error_dev(sc->sc_dev, "can't create dma map\n");
   1684 		error = ENOBUFS;
   1685 		goto out;
   1686 	}
   1687 
   1688 	state = 3;
   1689 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_cdata.mvgbe_rx_jumbo_map,
   1690 	    kva, MVGBE_JMEM, NULL, BUS_DMA_NOWAIT)) {
   1691 		aprint_error_dev(sc->sc_dev, "can't load dma map\n");
   1692 		error = ENOBUFS;
   1693 		goto out;
   1694 	}
   1695 
   1696 	state = 4;
   1697 	sc->sc_cdata.mvgbe_jumbo_buf = (void *)kva;
   1698 	DPRINTFN(1,("mvgbe_jumbo_buf = %p\n", sc->sc_cdata.mvgbe_jumbo_buf));
   1699 
   1700 	LIST_INIT(&sc->sc_jfree_listhead);
   1701 	LIST_INIT(&sc->sc_jinuse_listhead);
   1702 
   1703 	/*
   1704 	 * Now divide it up into 9K pieces and save the addresses
   1705 	 * in an array.
   1706 	 */
   1707 	ptr = sc->sc_cdata.mvgbe_jumbo_buf;
   1708 	for (i = 0; i < MVGBE_JSLOTS; i++) {
   1709 		sc->sc_cdata.mvgbe_jslots[i] = ptr;
   1710 		ptr += MVGBE_JLEN;
   1711 		entry = kmem_alloc(sizeof(struct mvgbe_jpool_entry), KM_SLEEP);
   1712 		if (entry == NULL) {
   1713 			aprint_error_dev(sc->sc_dev,
   1714 			    "no memory for jumbo buffer queue!\n");
   1715 			error = ENOBUFS;
   1716 			goto out;
   1717 		}
   1718 		entry->slot = i;
   1719 		if (i)
   1720 			LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry,
   1721 			    jpool_entries);
   1722 		else
   1723 			LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry,
   1724 			    jpool_entries);
   1725 	}
   1726 out:
   1727 	if (error != 0) {
   1728 		switch (state) {
   1729 		case 4:
   1730 			bus_dmamap_unload(sc->sc_dmat,
   1731 			    sc->sc_cdata.mvgbe_rx_jumbo_map);
   1732 		case 3:
   1733 			bus_dmamap_destroy(sc->sc_dmat,
   1734 			    sc->sc_cdata.mvgbe_rx_jumbo_map);
   1735 		case 2:
   1736 			bus_dmamem_unmap(sc->sc_dmat, kva, MVGBE_JMEM);
   1737 		case 1:
   1738 			bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1739 			break;
   1740 		default:
   1741 			break;
   1742 		}
   1743 	}
   1744 
   1745 	return error;
   1746 }
   1747 
   1748 /*
   1749  * Allocate a jumbo buffer.
   1750  */
   1751 static void *
   1752 mvgbe_jalloc(struct mvgbe_softc *sc)
   1753 {
   1754 	struct mvgbe_jpool_entry *entry;
   1755 
   1756 	entry = LIST_FIRST(&sc->sc_jfree_listhead);
   1757 
   1758 	if (entry == NULL)
   1759 		return NULL;
   1760 
   1761 	LIST_REMOVE(entry, jpool_entries);
   1762 	LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, jpool_entries);
   1763 	return sc->sc_cdata.mvgbe_jslots[entry->slot];
   1764 }
   1765 
   1766 /*
   1767  * Release a jumbo buffer.
   1768  */
   1769 static void
   1770 mvgbe_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
   1771 {
   1772 	struct mvgbe_jpool_entry *entry;
   1773 	struct mvgbe_softc *sc;
   1774 	int i, s;
   1775 
   1776 	/* Extract the softc struct pointer. */
   1777 	sc = (struct mvgbe_softc *)arg;
   1778 
   1779 	if (sc == NULL)
   1780 		panic("%s: can't find softc pointer!", __func__);
   1781 
   1782 	/* calculate the slot this buffer belongs to */
   1783 
   1784 	i = ((vaddr_t)buf - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf) / MVGBE_JLEN;
   1785 
   1786 	if ((i < 0) || (i >= MVGBE_JSLOTS))
   1787 		panic("%s: asked to free buffer that we don't manage!",
   1788 		    __func__);
   1789 
   1790 	s = splvm();
   1791 	entry = LIST_FIRST(&sc->sc_jinuse_listhead);
   1792 	if (entry == NULL)
   1793 		panic("%s: buffer not in use!", __func__);
   1794 	entry->slot = i;
   1795 	LIST_REMOVE(entry, jpool_entries);
   1796 	LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, jpool_entries);
   1797 
   1798 	if (__predict_true(m != NULL))
   1799 		pool_cache_put(mb_cache, m);
   1800 	splx(s);
   1801 }
   1802 
   1803 static int
   1804 mvgbe_encap(struct mvgbe_softc *sc, struct mbuf *m_head,
   1805 	      uint32_t *txidx)
   1806 {
   1807 	struct mvgbe_tx_desc *f = NULL;
   1808 	struct mvgbe_txmap_entry *entry;
   1809 	bus_dma_segment_t *txseg;
   1810 	bus_dmamap_t txmap;
   1811 	uint32_t first, current, last, cmdsts;
   1812 	int m_csumflags, i;
   1813 	bool needs_defrag = false;
   1814 
   1815 	DPRINTFN(3, ("mvgbe_encap\n"));
   1816 
   1817 	entry = SIMPLEQ_FIRST(&sc->sc_txmap_head);
   1818 	if (entry == NULL) {
   1819 		DPRINTFN(2, ("mvgbe_encap: no txmap available\n"));
   1820 		return ENOBUFS;
   1821 	}
   1822 	txmap = entry->dmamap;
   1823 
   1824 	first = current = last = *txidx;
   1825 
   1826 	/*
   1827 	 * Preserve m_pkthdr.csum_flags here since m_head might be
   1828 	 * updated by m_defrag()
   1829 	 */
   1830 	m_csumflags = m_head->m_pkthdr.csum_flags;
   1831 
   1832 do_defrag:
   1833 	if (__predict_false(needs_defrag == true)) {
   1834 		/* A small unaligned segment was detected. */
   1835 		struct mbuf *m_new;
   1836 		m_new = m_defrag(m_head, M_DONTWAIT);
   1837 		if (m_new == NULL)
   1838 			return EFBIG;
   1839 		m_head = m_new;
   1840 	}
   1841 
   1842 	/*
   1843 	 * Start packing the mbufs in this chain into
   1844 	 * the fragment pointers. Stop when we run out
   1845 	 * of fragments or hit the end of the mbuf chain.
   1846 	 */
   1847 	if (bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head, BUS_DMA_NOWAIT)) {
   1848 		DPRINTFN(1, ("mvgbe_encap: dmamap failed\n"));
   1849 		return ENOBUFS;
   1850 	}
   1851 
   1852 	txseg = txmap->dm_segs;
   1853 
   1854 	if (__predict_true(needs_defrag == false)) {
   1855 		/*
   1856 		 * Detect rarely encountered DMA limitation.
   1857 		 */
   1858 		for (i = 0; i < txmap->dm_nsegs; i++) {
   1859 			if (((txseg[i].ds_addr & 7) != 0) &&
   1860 			    (txseg[i].ds_len <= 8) &&
   1861 			    (txseg[i].ds_len >= 1)
   1862 			    ) {
   1863 				txseg = NULL;
   1864 				bus_dmamap_unload(sc->sc_dmat, txmap);
   1865 				needs_defrag = true;
   1866 				goto do_defrag;
   1867 			}
   1868 		}
   1869 	}
   1870 
   1871 	/* Sync the DMA map. */
   1872 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
   1873 	    BUS_DMASYNC_PREWRITE);
   1874 
   1875 	if (sc->sc_cdata.mvgbe_tx_cnt + txmap->dm_nsegs >=
   1876 	    MVGBE_TX_RING_CNT) {
   1877 		DPRINTFN(2, ("mvgbe_encap: too few descriptors free\n"));
   1878 		bus_dmamap_unload(sc->sc_dmat, txmap);
   1879 		return ENOBUFS;
   1880 	}
   1881 
   1882 
   1883 	DPRINTFN(2, ("mvgbe_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
   1884 
   1885 	for (i = 0; i < txmap->dm_nsegs; i++) {
   1886 		f = &sc->sc_rdata->mvgbe_tx_ring[current];
   1887 		f->bufptr = txseg[i].ds_addr;
   1888 		f->bytecnt = txseg[i].ds_len;
   1889 		if (i != 0)
   1890 			f->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA;
   1891 		last = current;
   1892 		current = MVGBE_TX_RING_NEXT(current);
   1893 	}
   1894 
   1895 	cmdsts = sc->sc_cmdsts_opts;
   1896 	if (m_csumflags & M_CSUM_IPv4)
   1897 		cmdsts |= MVGBE_TX_GENERATE_IP_CHKSUM;
   1898 	if (m_csumflags & M_CSUM_TCPv4)
   1899 		cmdsts |=
   1900 		    MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_TCP;
   1901 	if (m_csumflags & M_CSUM_UDPv4)
   1902 		cmdsts |=
   1903 		    MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_UDP;
   1904 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
   1905 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
   1906 
   1907 		cmdsts |= MVGBE_TX_IP_NO_FRAG |
   1908 		    MVGBE_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
   1909 	}
   1910 	if (txmap->dm_nsegs == 1)
   1911 		f->cmdsts = cmdsts		|
   1912 		    MVGBE_TX_ENABLE_INTERRUPT	|
   1913 		    MVGBE_TX_ZERO_PADDING	|
   1914 		    MVGBE_TX_FIRST_DESC		|
   1915 		    MVGBE_TX_LAST_DESC;
   1916 	else {
   1917 		f = &sc->sc_rdata->mvgbe_tx_ring[first];
   1918 		f->cmdsts = cmdsts | MVGBE_TX_FIRST_DESC;
   1919 
   1920 		f = &sc->sc_rdata->mvgbe_tx_ring[last];
   1921 		f->cmdsts =
   1922 		    MVGBE_BUFFER_OWNED_BY_DMA	|
   1923 		    MVGBE_TX_ENABLE_INTERRUPT	|
   1924 		    MVGBE_TX_ZERO_PADDING	|
   1925 		    MVGBE_TX_LAST_DESC;
   1926 
   1927 		/* Sync descriptors except first */
   1928 		MVGBE_CDTXSYNC(sc,
   1929 		    (MVGBE_TX_RING_CNT - 1 == *txidx) ? 0 : (*txidx) + 1,
   1930 		    txmap->dm_nsegs - 1,
   1931 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1932 	}
   1933 
   1934 	sc->sc_cdata.mvgbe_tx_chain[last].mvgbe_mbuf = m_head;
   1935 	SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link);
   1936 	sc->sc_cdata.mvgbe_tx_map[last] = entry;
   1937 
   1938 	/* Finally, sync first descriptor */
   1939 	sc->sc_rdata->mvgbe_tx_ring[first].cmdsts |=
   1940 	    MVGBE_BUFFER_OWNED_BY_DMA;
   1941 	MVGBE_CDTXSYNC(sc, *txidx, 1,
   1942 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1943 
   1944 	sc->sc_cdata.mvgbe_tx_cnt += i;
   1945 	*txidx = current;
   1946 
   1947 	DPRINTFN(3, ("mvgbe_encap: completed successfully\n"));
   1948 
   1949 	return 0;
   1950 }
   1951 
   1952 static void
   1953 mvgbe_rxeof(struct mvgbe_softc *sc)
   1954 {
   1955 	struct mvgbe_chain_data *cdata = &sc->sc_cdata;
   1956 	struct mvgbe_rx_desc *cur_rx;
   1957 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1958 	struct mbuf *m;
   1959 	bus_dmamap_t dmamap;
   1960 	uint32_t rxstat;
   1961 	uint16_t bufsize;
   1962 	int idx, cur, total_len;
   1963 
   1964 	idx = sc->sc_cdata.mvgbe_rx_prod;
   1965 
   1966 	DPRINTFN(3, ("mvgbe_rxeof %d\n", idx));
   1967 
   1968 	for (;;) {
   1969 		cur = idx;
   1970 
   1971 		/* Sync the descriptor */
   1972 		MVGBE_CDRXSYNC(sc, idx,
   1973 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1974 
   1975 		cur_rx = &sc->sc_rdata->mvgbe_rx_ring[idx];
   1976 
   1977 		if ((cur_rx->cmdsts & MVGBE_BUFFER_OWNED_MASK) ==
   1978 		    MVGBE_BUFFER_OWNED_BY_DMA) {
   1979 			/* Invalidate the descriptor -- it's not ready yet */
   1980 			MVGBE_CDRXSYNC(sc, idx, BUS_DMASYNC_PREREAD);
   1981 			sc->sc_cdata.mvgbe_rx_prod = idx;
   1982 			break;
   1983 		}
   1984 #ifdef DIAGNOSTIC
   1985 		if ((cur_rx->cmdsts &
   1986 		    (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) !=
   1987 		    (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC))
   1988 			panic(
   1989 			    "mvgbe_rxeof: buffer size is smaller than packet");
   1990 #endif
   1991 
   1992 		dmamap = sc->sc_cdata.mvgbe_rx_jumbo_map;
   1993 
   1994 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1995 		    BUS_DMASYNC_POSTREAD);
   1996 
   1997 		m = cdata->mvgbe_rx_chain[idx].mvgbe_mbuf;
   1998 		cdata->mvgbe_rx_chain[idx].mvgbe_mbuf = NULL;
   1999 		total_len = cur_rx->bytecnt - ETHER_CRC_LEN;
   2000 		rxstat = cur_rx->cmdsts;
   2001 		bufsize = cur_rx->bufsize;
   2002 
   2003 		cdata->mvgbe_rx_map[idx] = NULL;
   2004 
   2005 		idx = MVGBE_RX_RING_NEXT(idx);
   2006 
   2007 		if (rxstat & MVGBE_ERROR_SUMMARY) {
   2008 #if 0
   2009 			int err = rxstat & MVGBE_RX_ERROR_CODE_MASK;
   2010 
   2011 			if (err == MVGBE_RX_CRC_ERROR)
   2012 				ifp->if_ierrors++;
   2013 			if (err == MVGBE_RX_OVERRUN_ERROR)
   2014 				ifp->if_ierrors++;
   2015 			if (err == MVGBE_RX_MAX_FRAME_LEN_ERROR)
   2016 				ifp->if_ierrors++;
   2017 			if (err == MVGBE_RX_RESOURCE_ERROR)
   2018 				ifp->if_ierrors++;
   2019 #else
   2020 			ifp->if_ierrors++;
   2021 #endif
   2022 			mvgbe_newbuf(sc, cur, m, dmamap);
   2023 			continue;
   2024 		}
   2025 
   2026 		if (rxstat & MVGBE_RX_IP_FRAME_TYPE) {
   2027 			int flgs = 0;
   2028 
   2029 			/* Check IPv4 header checksum */
   2030 			flgs |= M_CSUM_IPv4;
   2031 			if (!(rxstat & MVGBE_RX_IP_HEADER_OK))
   2032 				flgs |= M_CSUM_IPv4_BAD;
   2033 			else if ((bufsize & MVGBE_RX_IP_FRAGMENT) == 0) {
   2034 				/*
   2035 				 * Check TCPv4/UDPv4 checksum for
   2036 				 * non-fragmented packet only.
   2037 				 *
   2038 				 * It seemd that sometimes
   2039 				 * MVGBE_RX_L4_CHECKSUM_OK bit was set to 0
   2040 				 * even if the checksum is correct and the
   2041 				 * packet was not fragmented. So we don't set
   2042 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
   2043 				 */
   2044 
   2045 				if (((rxstat & MVGBE_RX_L4_TYPE_MASK) ==
   2046 					MVGBE_RX_L4_TYPE_TCP) &&
   2047 				    ((rxstat & MVGBE_RX_L4_CHECKSUM_OK) != 0))
   2048 					flgs |= M_CSUM_TCPv4;
   2049 				else if (((rxstat & MVGBE_RX_L4_TYPE_MASK) ==
   2050 					MVGBE_RX_L4_TYPE_UDP) &&
   2051 				    ((rxstat & MVGBE_RX_L4_CHECKSUM_OK) != 0))
   2052 					flgs |= M_CSUM_UDPv4;
   2053 			}
   2054 			m->m_pkthdr.csum_flags = flgs;
   2055 		}
   2056 
   2057 		/*
   2058 		 * Try to allocate a new jumbo buffer. If that
   2059 		 * fails, copy the packet to mbufs and put the
   2060 		 * jumbo buffer back in the ring so it can be
   2061 		 * re-used. If allocating mbufs fails, then we
   2062 		 * have to drop the packet.
   2063 		 */
   2064 		if (mvgbe_newbuf(sc, cur, NULL, dmamap) == ENOBUFS) {
   2065 			struct mbuf *m0;
   2066 
   2067 			m0 = m_devget(mtod(m, char *), total_len, 0, ifp, NULL);
   2068 			mvgbe_newbuf(sc, cur, m, dmamap);
   2069 			if (m0 == NULL) {
   2070 				aprint_error_ifnet(ifp,
   2071 				    "no receive buffers available --"
   2072 				    " packet dropped!\n");
   2073 				ifp->if_ierrors++;
   2074 				continue;
   2075 			}
   2076 			m = m0;
   2077 		} else {
   2078 			m_set_rcvif(m, ifp);
   2079 			m->m_pkthdr.len = m->m_len = total_len;
   2080 		}
   2081 
   2082 		/* Skip on first 2byte (HW header) */
   2083 		m_adj(m,  MVGBE_HWHEADER_SIZE);
   2084 
   2085 		/* pass it on. */
   2086 		if_percpuq_enqueue(ifp->if_percpuq, m);
   2087 	}
   2088 }
   2089 
   2090 static void
   2091 mvgbe_txeof(struct mvgbe_softc *sc)
   2092 {
   2093 	struct mvgbe_chain_data *cdata = &sc->sc_cdata;
   2094 	struct mvgbe_tx_desc *cur_tx;
   2095 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2096 	struct mvgbe_txmap_entry *entry;
   2097 	int idx;
   2098 
   2099 	DPRINTFN(3, ("mvgbe_txeof\n"));
   2100 
   2101 	/*
   2102 	 * Go through our tx ring and free mbufs for those
   2103 	 * frames that have been sent.
   2104 	 */
   2105 	idx = cdata->mvgbe_tx_cons;
   2106 	while (idx != cdata->mvgbe_tx_prod) {
   2107 		MVGBE_CDTXSYNC(sc, idx, 1,
   2108 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2109 
   2110 		cur_tx = &sc->sc_rdata->mvgbe_tx_ring[idx];
   2111 #ifdef MVGBE_DEBUG
   2112 		if (mvgbe_debug >= 3)
   2113 			mvgbe_dump_txdesc(cur_tx, idx);
   2114 #endif
   2115 		if ((cur_tx->cmdsts & MVGBE_BUFFER_OWNED_MASK) ==
   2116 		    MVGBE_BUFFER_OWNED_BY_DMA) {
   2117 			MVGBE_CDTXSYNC(sc, idx, 1, BUS_DMASYNC_PREREAD);
   2118 			break;
   2119 		}
   2120 		if (cur_tx->cmdsts & MVGBE_TX_LAST_DESC)
   2121 			ifp->if_opackets++;
   2122 		if (cur_tx->cmdsts & MVGBE_ERROR_SUMMARY) {
   2123 			int err = cur_tx->cmdsts & MVGBE_TX_ERROR_CODE_MASK;
   2124 
   2125 			if (err == MVGBE_TX_LATE_COLLISION_ERROR)
   2126 				ifp->if_collisions++;
   2127 			if (err == MVGBE_TX_UNDERRUN_ERROR)
   2128 				ifp->if_oerrors++;
   2129 			if (err == MVGBE_TX_EXCESSIVE_COLLISION_ERRO)
   2130 				ifp->if_collisions++;
   2131 		}
   2132 		if (cdata->mvgbe_tx_chain[idx].mvgbe_mbuf != NULL) {
   2133 			entry = cdata->mvgbe_tx_map[idx];
   2134 
   2135 			m_freem(cdata->mvgbe_tx_chain[idx].mvgbe_mbuf);
   2136 			cdata->mvgbe_tx_chain[idx].mvgbe_mbuf = NULL;
   2137 
   2138 			bus_dmamap_sync(sc->sc_dmat, entry->dmamap, 0,
   2139 			    entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   2140 
   2141 			bus_dmamap_unload(sc->sc_dmat, entry->dmamap);
   2142 			SIMPLEQ_INSERT_TAIL(&sc->sc_txmap_head, entry, link);
   2143 			cdata->mvgbe_tx_map[idx] = NULL;
   2144 		}
   2145 		cdata->mvgbe_tx_cnt--;
   2146 		idx = MVGBE_TX_RING_NEXT(idx);
   2147 	}
   2148 	if (cdata->mvgbe_tx_cnt == 0)
   2149 		ifp->if_timer = 0;
   2150 
   2151 	if (cdata->mvgbe_tx_cnt < MVGBE_TX_RING_CNT - 2)
   2152 		ifp->if_flags &= ~IFF_OACTIVE;
   2153 
   2154 	cdata->mvgbe_tx_cons = idx;
   2155 }
   2156 
   2157 static uint8_t
   2158 mvgbe_crc8(const uint8_t *data, size_t size)
   2159 {
   2160 	int bit;
   2161 	uint8_t byte;
   2162 	uint8_t crc = 0;
   2163 	const uint8_t poly = 0x07;
   2164 
   2165 	while(size--)
   2166 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
   2167 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
   2168 
   2169 	return crc;
   2170 }
   2171 
   2172 CTASSERT(MVGBE_NDFSMT == MVGBE_NDFOMT);
   2173 
   2174 static void
   2175 mvgbe_filter_setup(struct mvgbe_softc *sc)
   2176 {
   2177 	struct ethercom *ec = &sc->sc_ethercom;
   2178 	struct ifnet *ifp= &sc->sc_ethercom.ec_if;
   2179 	struct ether_multi *enm;
   2180 	struct ether_multistep step;
   2181 	uint32_t dfut[MVGBE_NDFUT], dfsmt[MVGBE_NDFSMT], dfomt[MVGBE_NDFOMT];
   2182 	uint32_t pxc;
   2183 	int i;
   2184 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
   2185 
   2186 	memset(dfut, 0, sizeof(dfut));
   2187 	memset(dfsmt, 0, sizeof(dfsmt));
   2188 	memset(dfomt, 0, sizeof(dfomt));
   2189 
   2190 	if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
   2191 		goto allmulti;
   2192 	}
   2193 
   2194 	ETHER_FIRST_MULTI(step, ec, enm);
   2195 	while (enm != NULL) {
   2196 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2197 			/* ranges are complex and somewhat rare */
   2198 			goto allmulti;
   2199 		}
   2200 		/* chip handles some IPv4 multicast specially */
   2201 		if (memcmp(enm->enm_addrlo, special, 5) == 0) {
   2202 			i = enm->enm_addrlo[5];
   2203 			dfsmt[i>>2] |=
   2204 			    MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
   2205 		} else {
   2206 			i = mvgbe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
   2207 			dfomt[i>>2] |=
   2208 			    MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
   2209 		}
   2210 
   2211 		ETHER_NEXT_MULTI(step, enm);
   2212 	}
   2213 	goto set;
   2214 
   2215 allmulti:
   2216 	if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
   2217 		for (i = 0; i < MVGBE_NDFSMT; i++) {
   2218 			dfsmt[i] = dfomt[i] =
   2219 			    MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
   2220 			    MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
   2221 			    MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
   2222 			    MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
   2223 		}
   2224 	}
   2225 
   2226 set:
   2227 	pxc = MVGBE_READ(sc, MVGBE_PXC);
   2228 	pxc &= ~MVGBE_PXC_UPM;
   2229 	pxc |= MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP;
   2230 	if (ifp->if_flags & IFF_BROADCAST) {
   2231 		pxc &= ~(MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP);
   2232 	}
   2233 	if (ifp->if_flags & IFF_PROMISC) {
   2234 		pxc |= MVGBE_PXC_UPM;
   2235 	}
   2236 	MVGBE_WRITE(sc, MVGBE_PXC, pxc);
   2237 
   2238 	/* Set Destination Address Filter Unicast Table */
   2239 	if (ifp->if_flags & IFF_PROMISC) {
   2240 		/* pass all unicast addresses */
   2241 		for (i = 0; i < MVGBE_NDFUT; i++) {
   2242 			dfut[i] =
   2243 			    MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
   2244 			    MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
   2245 			    MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
   2246 			    MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
   2247 		}
   2248 	} else {
   2249 		i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
   2250 		dfut[i>>2] = MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
   2251 	}
   2252 	MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, dfut, MVGBE_NDFUT);
   2253 
   2254 	/* Set Destination Address Filter Multicast Tables */
   2255 	MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, dfsmt, MVGBE_NDFSMT);
   2256 	MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, dfomt, MVGBE_NDFOMT);
   2257 }
   2258 
   2259 #ifdef MVGBE_DEBUG
   2260 static void
   2261 mvgbe_dump_txdesc(struct mvgbe_tx_desc *desc, int idx)
   2262 {
   2263 #define DESC_PRINT(X)					\
   2264 	if (X)						\
   2265 		printf("txdesc[%d]." #X "=%#x\n", idx, X);
   2266 
   2267 #if BYTE_ORDER == BIG_ENDIAN
   2268        DESC_PRINT(desc->bytecnt);
   2269        DESC_PRINT(desc->l4ichk);
   2270        DESC_PRINT(desc->cmdsts);
   2271        DESC_PRINT(desc->nextdescptr);
   2272        DESC_PRINT(desc->bufptr);
   2273 #else	/* LITTLE_ENDIAN */
   2274        DESC_PRINT(desc->cmdsts);
   2275        DESC_PRINT(desc->l4ichk);
   2276        DESC_PRINT(desc->bytecnt);
   2277        DESC_PRINT(desc->bufptr);
   2278        DESC_PRINT(desc->nextdescptr);
   2279 #endif
   2280 #undef DESC_PRINT
   2281 }
   2282 #endif
   2283 
   2284 SYSCTL_SETUP(sysctl_mvgbe, "sysctl mvgbe subtree setup")
   2285 {
   2286 	int rc;
   2287 	const struct sysctlnode *node;
   2288 
   2289 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
   2290 	    0, CTLTYPE_NODE, "mvgbe",
   2291 	    SYSCTL_DESCR("mvgbe interface controls"),
   2292 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
   2293 		goto err;
   2294 	}
   2295 
   2296 	mvgbe_root_num = node->sysctl_num;
   2297 	return;
   2298 
   2299 err:
   2300 	aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
   2301 }
   2302 
   2303 static void
   2304 sysctl_mvgbe_init(struct mvgbe_softc *sc)
   2305 {
   2306 	const struct sysctlnode *node;
   2307 	int mvgbe_nodenum;
   2308 
   2309 	if (sysctl_createv(&sc->mvgbe_clog, 0, NULL, &node,
   2310 		0, CTLTYPE_NODE, device_xname(sc->sc_dev),
   2311 		SYSCTL_DESCR("mvgbe per-controller controls"),
   2312 		NULL, 0, NULL, 0, CTL_HW, mvgbe_root_num, CTL_CREATE,
   2313 		CTL_EOL) != 0) {
   2314 		aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
   2315 		return;
   2316 	}
   2317 	mvgbe_nodenum = node->sysctl_num;
   2318 
   2319 	/* interrupt moderation sysctls */
   2320 	if (sysctl_createv(&sc->mvgbe_clog, 0, NULL, &node,
   2321 		CTLFLAG_READWRITE, CTLTYPE_INT, "ipginttx",
   2322 		SYSCTL_DESCR("mvgbe TX interrupt moderation timer"),
   2323 		mvgbe_sysctl_ipginttx, 0, (void *)sc,
   2324 		0, CTL_HW, mvgbe_root_num, mvgbe_nodenum, CTL_CREATE,
   2325 		CTL_EOL) != 0) {
   2326 		aprint_normal_dev(sc->sc_dev,
   2327 		    "couldn't create ipginttx sysctl node\n");
   2328 	}
   2329 	if (sysctl_createv(&sc->mvgbe_clog, 0, NULL, &node,
   2330 		CTLFLAG_READWRITE, CTLTYPE_INT, "ipgintrx",
   2331 		SYSCTL_DESCR("mvgbe RX interrupt moderation timer"),
   2332 		mvgbe_sysctl_ipgintrx, 0, (void *)sc,
   2333 		0, CTL_HW, mvgbe_root_num, mvgbe_nodenum, CTL_CREATE,
   2334 		CTL_EOL) != 0) {
   2335 		aprint_normal_dev(sc->sc_dev,
   2336 		    "couldn't create ipginttx sysctl node\n");
   2337 	}
   2338 }
   2339 
   2340 static int
   2341 mvgbe_sysctl_ipginttx(SYSCTLFN_ARGS)
   2342 {
   2343 	int error;
   2344 	unsigned int t;
   2345 	struct sysctlnode node;
   2346 	struct mvgbec_softc *csc;
   2347 	struct mvgbe_softc *sc;
   2348 
   2349 	node = *rnode;
   2350 	sc = node.sysctl_data;
   2351 	csc = device_private(device_parent(sc->sc_dev));
   2352 	t = sc->sc_ipginttx;
   2353 	node.sysctl_data = &t;
   2354 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2355 	if (error || newp == NULL)
   2356 		return error;
   2357 
   2358 	if (mvgbe_ipginttx(csc, sc, t) < 0)
   2359 		return EINVAL;
   2360 	/*
   2361 	 * update the softc with sysctl-changed value, and mark
   2362 	 * for hardware update
   2363 	 */
   2364 	sc->sc_ipginttx = t;
   2365 
   2366 	return 0;
   2367 }
   2368 
   2369 static int
   2370 mvgbe_sysctl_ipgintrx(SYSCTLFN_ARGS)
   2371 {
   2372 	int error;
   2373 	unsigned int t;
   2374 	struct sysctlnode node;
   2375 	struct mvgbec_softc *csc;
   2376 	struct mvgbe_softc *sc;
   2377 
   2378 	node = *rnode;
   2379 	sc = node.sysctl_data;
   2380 	csc = device_private(device_parent(sc->sc_dev));
   2381 	t = sc->sc_ipgintrx;
   2382 	node.sysctl_data = &t;
   2383 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2384 	if (error || newp == NULL)
   2385 		return error;
   2386 
   2387 	if (mvgbe_ipgintrx(csc, sc, t) < 0)
   2388 		return EINVAL;
   2389 	/*
   2390 	 * update the softc with sysctl-changed value, and mark
   2391 	 * for hardware update
   2392 	 */
   2393 	sc->sc_ipgintrx = t;
   2394 
   2395 	return 0;
   2396 }
   2397