Home | History | Annotate | Line # | Download | only in pci
if_xge.c revision 1.1.2.2
      1 /*      $NetBSD: if_xge.c,v 1.1.2.2 2005/09/13 20:59:37 tron Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2004, SUNET, Swedish University Computer Network.
      5  * All rights reserved.
      6  *
      7  * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *      This product includes software developed for the NetBSD Project by
     20  *      SUNET, Swedish University Computer Network.
     21  * 4. The name of SUNET may not be used to endorse or promote products
     22  *    derived from this software without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL SUNET
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 /*
     38  * Device driver for the S2io Xframe Ten Gigabit Ethernet controller.
     39  *
     40  * TODO (in no specific order):
     41  *	HW VLAN support.
     42  *	IPv6 HW cksum.
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD: if_xge.c,v 1.1.2.2 2005/09/13 20:59:37 tron Exp $");
     47 
     48 #include "bpfilter.h"
     49 #include "rnd.h"
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/mbuf.h>
     54 #include <sys/malloc.h>
     55 #include <sys/kernel.h>
     56 #include <sys/socket.h>
     57 #include <sys/device.h>
     58 
     59 #if NRND > 0
     60 #include <sys/rnd.h>
     61 #endif
     62 
     63 #include <net/if.h>
     64 #include <net/if_dl.h>
     65 #include <net/if_media.h>
     66 #include <net/if_ether.h>
     67 
     68 #if NBPFILTER > 0
     69 #include <net/bpf.h>
     70 #endif
     71 
     72 #include <machine/bus.h>
     73 #include <machine/intr.h>
     74 #include <machine/endian.h>
     75 
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 #include <dev/pci/pcivar.h>
     80 #include <dev/pci/pcireg.h>
     81 #include <dev/pci/pcidevs.h>
     82 
     83 #include <sys/lock.h>
     84 #include <sys/proc.h>
     85 
     86 #include <dev/pci/if_xgereg.h>
     87 
     88 /*
     89  * Some tunable constants, tune with care!
     90  */
     91 #define RX_MODE		RX_MODE_1  /* Receive mode (buffer usage, see below) */
     92 #define NRXDESCS	1016	   /* # of receive descriptors (requested) */
     93 #define NTXDESCS	8192	   /* Number of transmit descriptors */
     94 #define NTXFRAGS	100	   /* Max fragments per packet */
     95 #define XGE_EVENT_COUNTERS	   /* Instrumentation */
     96 
     97 /*
     98  * Receive buffer modes; 1, 3 or 5 buffers.
     99  */
    100 #define RX_MODE_1 1
    101 #define RX_MODE_3 3
    102 #define RX_MODE_5 5
    103 
    104 /*
    105  * Use clever macros to avoid a bunch of #ifdef's.
    106  */
    107 #define XCONCAT3(x,y,z) x ## y ## z
    108 #define CONCAT3(x,y,z) XCONCAT3(x,y,z)
    109 #define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE)
    110 #define rxd_4k CONCAT3(rxd,RX_MODE,_4k)
    111 #define rxdesc ___CONCAT(rxd,RX_MODE)
    112 
    113 #define NEXTTX(x)	(((x)+1) % NTXDESCS)
    114 #define NRXFRAGS	RX_MODE /* hardware imposed frags */
    115 #define NRXPAGES	((NRXDESCS/NDESC_BUFMODE)+1)
    116 #define NRXREAL		(NRXPAGES*NDESC_BUFMODE)
    117 #define RXMAPSZ		(NRXPAGES*PAGE_SIZE)
    118 
    119 #ifdef XGE_EVENT_COUNTERS
    120 #define XGE_EVCNT_INCR(ev)	(ev)->ev_count++
    121 #else
    122 #define XGE_EVCNT_INCR(ev)	/* nothing */
    123 #endif
    124 
    125 /*
    126  * Magics to fix a bug when the mac address can't be read correctly.
    127  * Comes from the Linux driver.
    128  */
    129 static uint64_t fix_mac[] = {
    130 	0x0060000000000000ULL, 0x0060600000000000ULL,
    131 	0x0040600000000000ULL, 0x0000600000000000ULL,
    132 	0x0020600000000000ULL, 0x0060600000000000ULL,
    133 	0x0020600000000000ULL, 0x0060600000000000ULL,
    134 	0x0020600000000000ULL, 0x0060600000000000ULL,
    135 	0x0020600000000000ULL, 0x0060600000000000ULL,
    136 	0x0020600000000000ULL, 0x0060600000000000ULL,
    137 	0x0020600000000000ULL, 0x0060600000000000ULL,
    138 	0x0020600000000000ULL, 0x0060600000000000ULL,
    139 	0x0020600000000000ULL, 0x0060600000000000ULL,
    140 	0x0020600000000000ULL, 0x0060600000000000ULL,
    141 	0x0020600000000000ULL, 0x0060600000000000ULL,
    142 	0x0020600000000000ULL, 0x0000600000000000ULL,
    143 	0x0040600000000000ULL, 0x0060600000000000ULL,
    144 };
    145 
    146 
    147 struct xge_softc {
    148 	struct device sc_dev;
    149 	struct ethercom sc_ethercom;
    150 #define sc_if sc_ethercom.ec_if
    151 	bus_dma_tag_t sc_dmat;
    152 	bus_space_tag_t sc_st;
    153 	bus_space_handle_t sc_sh;
    154 	bus_space_tag_t sc_txt;
    155 	bus_space_handle_t sc_txh;
    156 	void *sc_ih;
    157 
    158 	struct ifmedia xena_media;
    159 	pcireg_t sc_pciregs[16];
    160 
    161 	/* Transmit structures */
    162 	struct txd *sc_txd[NTXDESCS];	/* transmit frags array */
    163 	bus_addr_t sc_txdp[NTXDESCS];	/* bus address of transmit frags */
    164 	bus_dmamap_t sc_txm[NTXDESCS];	/* transmit frags map */
    165 	struct mbuf *sc_txb[NTXDESCS];	/* transmit mbuf pointer */
    166 	int sc_nexttx, sc_lasttx;
    167 	bus_dmamap_t sc_txmap;		/* transmit descriptor map */
    168 
    169 	/* Receive data */
    170 	bus_dmamap_t sc_rxmap;		/* receive descriptor map */
    171 	struct rxd_4k *sc_rxd_4k[NRXPAGES]; /* receive desc pages */
    172 	bus_dmamap_t sc_rxm[NRXREAL];	/* receive buffer map */
    173 	struct mbuf *sc_rxb[NRXREAL];	/* mbufs on receive descriptors */
    174 	int sc_nextrx;			/* next descriptor to check */
    175 
    176 #ifdef XGE_EVENT_COUNTERS
    177 	struct evcnt sc_intr;	/* # of interrupts */
    178 	struct evcnt sc_txintr;	/* # of transmit interrupts */
    179 	struct evcnt sc_rxintr;	/* # of receive interrupts */
    180 	struct evcnt sc_txqe;	/* # of xmit intrs when board queue empty */
    181 #endif
    182 };
    183 
    184 static int xge_match(struct device *parent, struct cfdata *cf, void *aux);
    185 static void xge_attach(struct device *parent, struct device *self, void *aux);
    186 static int xge_alloc_txmem(struct xge_softc *);
    187 static int xge_alloc_rxmem(struct xge_softc *);
    188 static void xge_start(struct ifnet *);
    189 static void xge_stop(struct ifnet *, int);
    190 static int xge_add_rxbuf(struct xge_softc *, int);
    191 static void xge_mcast_filter(struct xge_softc *sc);
    192 static int xge_setup_xgxs(struct xge_softc *sc);
    193 static int xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
    194 static int xge_init(struct ifnet *ifp);
    195 static void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
    196 static int xge_xgmii_mediachange(struct ifnet *);
    197 static int xge_intr(void  *);
    198 
    199 /*
    200  * Helpers to address registers.
    201  */
    202 #define PIF_WCSR(csr, val)	pif_wcsr(sc, csr, val)
    203 #define PIF_RCSR(csr)		pif_rcsr(sc, csr)
    204 #define TXP_WCSR(csr, val)	txp_wcsr(sc, csr, val)
    205 #define PIF_WKEY(csr, val)	pif_wkey(sc, csr, val)
    206 
    207 static inline void
    208 pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
    209 {
    210 	uint32_t lval, hval;
    211 
    212 	lval = val&0xffffffff;
    213 	hval = val>>32;
    214 	bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
    215 	bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
    216 }
    217 
    218 static inline uint64_t
    219 pif_rcsr(struct xge_softc *sc, bus_size_t csr)
    220 {
    221 	uint64_t val, val2;
    222 	val = bus_space_read_4(sc->sc_st, sc->sc_sh, csr);
    223 	val2 = bus_space_read_4(sc->sc_st, sc->sc_sh, csr+4);
    224 	val |= (val2 << 32);
    225 	return val;
    226 }
    227 
    228 static inline void
    229 txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
    230 {
    231 	uint32_t lval, hval;
    232 
    233 	lval = val&0xffffffff;
    234 	hval = val>>32;
    235 	bus_space_write_4(sc->sc_txt, sc->sc_txh, csr, lval);
    236 	bus_space_write_4(sc->sc_txt, sc->sc_txh, csr+4, hval);
    237 }
    238 
    239 
    240 static inline void
    241 pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
    242 {
    243 	uint32_t lval, hval;
    244 
    245 	lval = val&0xffffffff;
    246 	hval = val>>32;
    247 	PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
    248 	bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
    249 	PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
    250 	bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
    251 }
    252 
    253 
    254 CFATTACH_DECL(xge, sizeof(struct xge_softc),
    255     xge_match, xge_attach, NULL, NULL);
    256 
    257 #define XNAME sc->sc_dev.dv_xname
    258 
    259 #define XGE_RXSYNC(desc, what) \
    260 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \
    261 	(desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \
    262 	(desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)
    263 #define XGE_RXD(desc)	&sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \
    264 	r4_rxd[desc%NDESC_BUFMODE]
    265 
    266 /*
    267  * Non-tunable constants.
    268  */
    269 #define XGE_MAX_MTU		9600
    270 #define	XGE_IP_MAXPACKET	65535	/* same as IP_MAXPACKET */
    271 
    272 static int
    273 xge_match(struct device *parent, struct cfdata *cf, void *aux)
    274 {
    275 	struct pci_attach_args *pa = aux;
    276 
    277 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_S2IO &&
    278 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_S2IO_XFRAME)
    279 		return (1);
    280 
    281 	return (0);
    282 }
    283 
    284 void
    285 xge_attach(struct device *parent, struct device *self, void *aux)
    286 {
    287 	struct pci_attach_args *pa = aux;
    288 	struct xge_softc *sc;
    289 	struct ifnet *ifp;
    290 	pcireg_t memtype;
    291 	pci_intr_handle_t ih;
    292 	const char *intrstr = NULL;
    293 	pci_chipset_tag_t pc = pa->pa_pc;
    294 	uint8_t enaddr[ETHER_ADDR_LEN];
    295 	uint64_t val;
    296 	int i;
    297 
    298 	sc = (struct xge_softc *)self;
    299 
    300 	sc->sc_dmat = pa->pa_dmat;
    301 
    302 	/* Get BAR0 address */
    303 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR);
    304 	if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0,
    305 	    &sc->sc_st, &sc->sc_sh, 0, 0)) {
    306 		aprint_error("%s: unable to map PIF BAR registers\n", XNAME);
    307 		return;
    308 	}
    309 
    310 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR);
    311 	if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0,
    312 	    &sc->sc_txt, &sc->sc_txh, 0, 0)) {
    313 		aprint_error("%s: unable to map TXP BAR registers\n", XNAME);
    314 		return;
    315 	}
    316 
    317 	/* Save PCI config space */
    318 	for (i = 0; i < 64; i += 4)
    319 		sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
    320 
    321 #if BYTE_ORDER == LITTLE_ENDIAN
    322 	val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
    323 	val &= ~(TxF_R_SE|RxF_W_SE);
    324 	PIF_WCSR(SWAPPER_CTRL, val);
    325 	PIF_WCSR(SWAPPER_CTRL, val);
    326 #elif BYTE_ORDER == BIG_ENDIAN
    327 	/* do nothing */
    328 #else
    329 #error bad endianness!
    330 #endif
    331 
    332 	if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC)
    333 		return printf("%s: failed configuring endian, %llx != %llx!\n",
    334 		    XNAME, (unsigned long long)val, SWAPPER_MAGIC);
    335 
    336 	/*
    337 	 * The MAC addr may be all FF's, which is not good.
    338 	 * Resolve it by writing some magics to GPIO_CONTROL and
    339 	 * force a chip reset to read in the serial eeprom again.
    340 	 */
    341 	for (i = 0; i < sizeof(fix_mac)/sizeof(fix_mac[0]); i++) {
    342 		PIF_WCSR(GPIO_CONTROL, fix_mac[i]);
    343 		PIF_RCSR(GPIO_CONTROL);
    344 	}
    345 
    346 	/*
    347 	 * Reset the chip and restore the PCI registers.
    348 	 */
    349 	PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL);
    350 	DELAY(500000);
    351 	for (i = 0; i < 64; i += 4)
    352 		pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
    353 
    354 	/*
    355 	 * Restore the byte order registers.
    356 	 */
    357 #if BYTE_ORDER == LITTLE_ENDIAN
    358 	val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
    359 	val &= ~(TxF_R_SE|RxF_W_SE);
    360 	PIF_WCSR(SWAPPER_CTRL, val);
    361 	PIF_WCSR(SWAPPER_CTRL, val);
    362 #elif BYTE_ORDER == BIG_ENDIAN
    363 	/* do nothing */
    364 #else
    365 #error bad endianness!
    366 #endif
    367 
    368 	if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC)
    369 		return printf("%s: failed configuring endian2, %llx != %llx!\n",
    370 		    XNAME, (unsigned long long)val, SWAPPER_MAGIC);
    371 
    372 	/*
    373 	 * XGXS initialization.
    374 	 */
    375 	/* 29, reset */
    376 	PIF_WCSR(SW_RESET, 0);
    377 	DELAY(500000);
    378 
    379 	/* 30, configure XGXS transceiver */
    380 	xge_setup_xgxs(sc);
    381 
    382 	/* 33, program MAC address (not needed here) */
    383 	/* Get ethernet address */
    384 	PIF_WCSR(RMAC_ADDR_CMD_MEM,
    385 	    RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0));
    386 	while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
    387 		;
    388 	val = PIF_RCSR(RMAC_ADDR_DATA0_MEM);
    389 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    390 		enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
    391 
    392 	/*
    393 	 * Get memory for transmit descriptor lists.
    394 	 */
    395 	if (xge_alloc_txmem(sc))
    396 		return printf("%s: failed allocating txmem.\n", XNAME);
    397 
    398 	/* 9 and 10 - set FIFO number/prio */
    399 	PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS));
    400 	PIF_WCSR(TX_FIFO_P1, 0ULL);
    401 	PIF_WCSR(TX_FIFO_P2, 0ULL);
    402 	PIF_WCSR(TX_FIFO_P3, 0ULL);
    403 
    404 	/* 11, XXX set round-robin prio? */
    405 
    406 	/* 12, enable transmit FIFO */
    407 	val = PIF_RCSR(TX_FIFO_P0);
    408 	val |= TX_FIFO_ENABLE;
    409 	PIF_WCSR(TX_FIFO_P0, val);
    410 
    411 	/* 13, disable some error checks */
    412 	PIF_WCSR(TX_PA_CFG,
    413 	    TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE);
    414 
    415 	/*
    416 	 * Create transmit DMA maps.
    417 	 * Make them large for TSO.
    418 	 */
    419 	for (i = 0; i < NTXDESCS; i++) {
    420 		if (bus_dmamap_create(sc->sc_dmat, XGE_IP_MAXPACKET,
    421 		    NTXFRAGS, MCLBYTES, 0, 0, &sc->sc_txm[i]))
    422 			return printf("%s: cannot create TX DMA maps\n", XNAME);
    423 	}
    424 
    425 	sc->sc_lasttx = NTXDESCS-1;
    426 
    427 	/*
    428 	 * RxDMA initialization.
    429 	 * Only use one out of 8 possible receive queues.
    430 	 */
    431 	if (xge_alloc_rxmem(sc))	/* allocate rx descriptor memory */
    432 		return printf("%s: failed allocating rxmem\n", XNAME);
    433 
    434 	/* Create receive buffer DMA maps */
    435 	for (i = 0; i < NRXREAL; i++) {
    436 		if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_MTU,
    437 		    NRXFRAGS, MCLBYTES, 0, 0, &sc->sc_rxm[i]))
    438 			return printf("%s: cannot create RX DMA maps\n", XNAME);
    439 	}
    440 
    441 	/* allocate mbufs to receive descriptors */
    442 	for (i = 0; i < NRXREAL; i++)
    443 		if (xge_add_rxbuf(sc, i))
    444 			panic("out of mbufs too early");
    445 
    446 	/* 14, setup receive ring priority */
    447 	PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */
    448 
    449 	/* 15, setup receive ring round-robin calendar */
    450 	PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */
    451 	PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL);
    452 	PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL);
    453 	PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL);
    454 	PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL);
    455 
    456 	/* 16, write receive ring start address */
    457 	PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr);
    458 	/* PRC_RXD0_[1-7] are not used */
    459 
    460 	/* 17, Setup alarm registers */
    461 	PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */
    462 
    463 	/* 18, init receive ring controller */
    464 #if RX_MODE == RX_MODE_1
    465 	val = RING_MODE_1;
    466 #elif RX_MODE == RX_MODE_3
    467 	val = RING_MODE_3;
    468 #else /* RX_MODE == RX_MODE_5 */
    469 	val = RING_MODE_5;
    470 #endif
    471 	PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val);
    472 	/* leave 1-7 disabled */
    473 	/* XXXX snoop configuration? */
    474 
    475 	/* 19, set chip memory assigned to the queue */
    476 	PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64)); /* all 64M to queue 0 */
    477 
    478 	/* 20, setup RLDRAM parameters */
    479 	/* do not touch it for now */
    480 
    481 	/* 21, setup pause frame thresholds */
    482 	/* so not touch the defaults */
    483 	/* XXX - must 0xff be written as stated in the manual? */
    484 
    485 	/* 22, configure RED */
    486 	/* we do not want to drop packets, so ignore */
    487 
    488 	/* 23, initiate RLDRAM */
    489 	val = PIF_RCSR(MC_RLDRAM_MRS);
    490 	val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE;
    491 	PIF_WCSR(MC_RLDRAM_MRS, val);
    492 	DELAY(1000);
    493 
    494 	/*
    495 	 * Setup interrupt policies.
    496 	 */
    497 	/* 40, Transmit interrupts */
    498 	PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |
    499 	    TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48));
    500 	PIF_WCSR(TTI_DATA2_MEM,
    501 	    TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512));
    502 	PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE);
    503 	while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE)
    504 		;
    505 
    506 	/* 41, Receive interrupts */
    507 	PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |
    508 	    RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50));
    509 	PIF_WCSR(RTI_DATA2_MEM,
    510 	    RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512));
    511 	PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE);
    512 	while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE)
    513 		;
    514 
    515 	/*
    516 	 * Setup media stuff.
    517 	 */
    518 	ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange,
    519 	    xge_ifmedia_status);
    520 	ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_10G_LR, 0, NULL);
    521 	ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_10G_LR);
    522 
    523 	aprint_normal("%s: Ethernet address %s\n", XNAME,
    524 	    ether_sprintf(enaddr));
    525 
    526 	ifp = &sc->sc_ethercom.ec_if;
    527 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    528 	ifp->if_baudrate = 10000000000LL;
    529 	ifp->if_init = xge_init;
    530 	ifp->if_stop = xge_stop;
    531 	ifp->if_softc = sc;
    532 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    533 	ifp->if_ioctl = xge_ioctl;
    534 	ifp->if_start = xge_start;
    535 	IFQ_SET_MAXLEN(&ifp->if_snd, max(NTXDESCS - 1, IFQ_MAXLEN));
    536 	IFQ_SET_READY(&ifp->if_snd);
    537 
    538 	/*
    539 	 * Offloading capabilities.
    540 	 */
    541 	sc->sc_ethercom.ec_capabilities |=
    542 	    ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
    543 	ifp->if_capabilities |=
    544 	    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | IFCAP_TSOv4;
    545 
    546 	/*
    547 	 * Attach the interface.
    548 	 */
    549 	if_attach(ifp);
    550 	ether_ifattach(ifp, enaddr);
    551 
    552 	/*
    553 	 * Setup interrupt vector before initializing.
    554 	 */
    555 	if (pci_intr_map(pa, &ih))
    556 		return aprint_error("%s: unable to map interrupt\n",
    557 		    sc->sc_dev.dv_xname);
    558 	intrstr = pci_intr_string(pc, ih);
    559 	if ((sc->sc_ih =
    560 	    pci_intr_establish(pc, ih, IPL_NET, xge_intr, sc)) == NULL)
    561 		return aprint_error("%s: unable to establish interrupt at %s\n",
    562 		    sc->sc_dev.dv_xname, intrstr ? intrstr : "<unknown>");
    563 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
    564 
    565 #ifdef XGE_EVENT_COUNTERS
    566 	evcnt_attach_dynamic(&sc->sc_intr, EVCNT_TYPE_MISC,
    567 	    NULL, XNAME, "intr");
    568 	evcnt_attach_dynamic(&sc->sc_txintr, EVCNT_TYPE_MISC,
    569 	    NULL, XNAME, "txintr");
    570 	evcnt_attach_dynamic(&sc->sc_rxintr, EVCNT_TYPE_MISC,
    571 	    NULL, XNAME, "rxintr");
    572 	evcnt_attach_dynamic(&sc->sc_txqe, EVCNT_TYPE_MISC,
    573 	    NULL, XNAME, "txqe");
    574 #endif
    575 }
    576 
    577 void
    578 xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    579 {
    580 	struct xge_softc *sc = ifp->if_softc;
    581 	uint64_t reg;
    582 
    583 	ifmr->ifm_status = IFM_AVALID;
    584 	ifmr->ifm_active = IFM_ETHER|IFM_10G_LR;
    585 
    586 	reg = PIF_RCSR(ADAPTER_STATUS);
    587 	if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
    588 		ifmr->ifm_status |= IFM_ACTIVE;
    589 }
    590 
    591 int
    592 xge_xgmii_mediachange(struct ifnet *ifp)
    593 {
    594 	return 0;
    595 }
    596 
    597 static void
    598 xge_enable(struct xge_softc *sc)
    599 {
    600 	uint64_t val;
    601 
    602 	/* 2, enable adapter */
    603 	val = PIF_RCSR(ADAPTER_CONTROL);
    604 	val |= ADAPTER_EN;
    605 	PIF_WCSR(ADAPTER_CONTROL, val);
    606 
    607 	/* 3, light the card enable led */
    608 	val = PIF_RCSR(ADAPTER_CONTROL);
    609 	val |= LED_ON;
    610 	PIF_WCSR(ADAPTER_CONTROL, val);
    611 	printf("%s: link up\n", XNAME);
    612 
    613 }
    614 
    615 int
    616 xge_init(struct ifnet *ifp)
    617 {
    618 	struct xge_softc *sc = ifp->if_softc;
    619 	uint64_t val;
    620 
    621 	if (ifp->if_flags & IFF_RUNNING)
    622 		return 0;
    623 
    624 	/* 31+32, setup MAC config */
    625 	PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS|
    626 	    RMAC_BCAST_EN|RMAC_DISCARD_PFRM|RMAC_PROM_EN);
    627 
    628 	DELAY(1000);
    629 
    630 	/* 54, ensure that the adapter is 'quiescent' */
    631 	val = PIF_RCSR(ADAPTER_STATUS);
    632 	if ((val & QUIESCENT) != QUIESCENT) {
    633 		char buf[200];
    634 		printf("%s: adapter not quiescent, aborting\n", XNAME);
    635 		val = (val & QUIESCENT) ^ QUIESCENT;
    636 		bitmask_snprintf(val, QUIESCENT_BMSK, buf, sizeof buf);
    637 		printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf);
    638 		return 1;
    639 	}
    640 
    641 	/* 56, enable the transmit laser */
    642 	val = PIF_RCSR(ADAPTER_CONTROL);
    643 	val |= EOI_TX_ON;
    644 	PIF_WCSR(ADAPTER_CONTROL, val);
    645 
    646 	xge_enable(sc);
    647 	/*
    648 	 * Enable all interrupts
    649 	 */
    650 	PIF_WCSR(TX_TRAFFIC_MASK, 0);
    651 	PIF_WCSR(RX_TRAFFIC_MASK, 0);
    652 	PIF_WCSR(GENERAL_INT_MASK, 0);
    653 	PIF_WCSR(TXPIC_INT_MASK, 0);
    654 	PIF_WCSR(RXPIC_INT_MASK, 0);
    655 	PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */
    656 	PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT);
    657 
    658 
    659 	/* Done... */
    660 	ifp->if_flags |= IFF_RUNNING;
    661 	ifp->if_flags &= ~IFF_OACTIVE;
    662 
    663 	return 0;
    664 }
    665 
    666 static void
    667 xge_stop(struct ifnet *ifp, int disable)
    668 {
    669 	struct xge_softc *sc = ifp->if_softc;
    670 	uint64_t val;
    671 
    672 	val = PIF_RCSR(ADAPTER_CONTROL);
    673 	val &= ~ADAPTER_EN;
    674 	PIF_WCSR(ADAPTER_CONTROL, val);
    675 
    676 	while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
    677 		;
    678 }
    679 
    680 int
    681 xge_intr(void *pv)
    682 {
    683 	struct xge_softc *sc = pv;
    684 	struct txd *txd;
    685 	struct ifnet *ifp = &sc->sc_if;
    686 	bus_dmamap_t dmp;
    687 	uint64_t val;
    688 	int i, lasttx, plen;
    689 
    690 	val = PIF_RCSR(GENERAL_INT_STATUS);
    691 	if (val == 0)
    692 		return 0; /* no interrupt here */
    693 
    694 	XGE_EVCNT_INCR(&sc->sc_intr);
    695 
    696 	PIF_WCSR(GENERAL_INT_STATUS, val);
    697 
    698 	if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) {
    699 		/* Wait for quiescence */
    700 		printf("%s: link down\n", XNAME);
    701 		while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
    702 			;
    703 		PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
    704 
    705 		val = PIF_RCSR(ADAPTER_STATUS);
    706 		if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
    707 			xge_enable(sc); /* Only if link restored */
    708 	}
    709 
    710 	if ((val = PIF_RCSR(TX_TRAFFIC_INT))) {
    711 		XGE_EVCNT_INCR(&sc->sc_txintr);
    712 		PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */
    713 	}
    714 	/*
    715 	 * Collect sent packets.
    716 	 */
    717 	lasttx = sc->sc_lasttx;
    718 	while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) {
    719 		txd = sc->sc_txd[i];
    720 		dmp = sc->sc_txm[i];
    721 
    722 		bus_dmamap_sync(sc->sc_dmat, dmp, 0,
    723 		    dmp->dm_mapsize,
    724 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    725 
    726 		if (txd->txd_control1 & TXD_CTL1_OWN) {
    727 			bus_dmamap_sync(sc->sc_dmat, dmp, 0,
    728 			    dmp->dm_mapsize, BUS_DMASYNC_PREREAD);
    729 			break;
    730 		}
    731 		bus_dmamap_unload(sc->sc_dmat, dmp);
    732 		m_freem(sc->sc_txb[i]);
    733 		ifp->if_opackets++;
    734 		sc->sc_lasttx = i;
    735 	}
    736 	if (i == sc->sc_nexttx) {
    737 		XGE_EVCNT_INCR(&sc->sc_txqe);
    738 	}
    739 
    740 	if (sc->sc_lasttx != lasttx)
    741 		ifp->if_flags &= ~IFF_OACTIVE;
    742 
    743 	xge_start(ifp); /* Try to get more packets on the wire */
    744 
    745 	if ((val = PIF_RCSR(RX_TRAFFIC_INT))) {
    746 		XGE_EVCNT_INCR(&sc->sc_rxintr);
    747 		PIF_WCSR(RX_TRAFFIC_INT, val); /* clear interrupt bits */
    748 	}
    749 
    750 	for (;;) {
    751 		struct rxdesc *rxd;
    752 		struct mbuf *m;
    753 
    754 		XGE_RXSYNC(sc->sc_nextrx,
    755 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    756 
    757 		rxd = XGE_RXD(sc->sc_nextrx);
    758 		if (rxd->rxd_control1 & RXD_CTL1_OWN) {
    759 			XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD);
    760 			break;
    761 		}
    762 
    763 		/* got a packet */
    764 		m = sc->sc_rxb[sc->sc_nextrx];
    765 #if RX_MODE == RX_MODE_1
    766 		plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
    767 #elif RX_MODE == RX_MODE_3
    768 #error Fix rxmodes in xge_intr
    769 #elif RX_MODE == RX_MODE_5
    770 		plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
    771 		plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2);
    772 		plen += m->m_next->m_next->m_len =
    773 		    RXD_CTL2_BUF2SIZ(rxd->rxd_control2);
    774 		plen += m->m_next->m_next->m_next->m_len =
    775 		    RXD_CTL3_BUF3SIZ(rxd->rxd_control3);
    776 		plen += m->m_next->m_next->m_next->m_next->m_len =
    777 		    RXD_CTL3_BUF4SIZ(rxd->rxd_control3);
    778 #endif
    779 		m->m_pkthdr.rcvif = ifp;
    780 		m->m_pkthdr.len = plen;
    781 
    782 		val = rxd->rxd_control1;
    783 
    784 		if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
    785 			/* Failed, recycle this mbuf */
    786 #if RX_MODE == RX_MODE_1
    787 			rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0);
    788 			rxd->rxd_control1 = RXD_CTL1_OWN;
    789 #elif RX_MODE == RX_MODE_3
    790 #elif RX_MODE == RX_MODE_5
    791 #endif
    792 			XGE_RXSYNC(sc->sc_nextrx,
    793 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    794 			ifp->if_ierrors++;
    795 			break;
    796 		}
    797 
    798 		ifp->if_ipackets++;
    799 
    800 		if (RXD_CTL1_PROTOS(val) & (RXD_CTL1_P_IPv4|RXD_CTL1_P_IPv6)) {
    801 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
    802 			if (RXD_CTL1_L3CSUM(val) != 0xffff)
    803 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
    804 		}
    805 		if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP) {
    806 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_TCPv6;
    807 			if (RXD_CTL1_L4CSUM(val) != 0xffff)
    808 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
    809 		}
    810 		if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP) {
    811 			m->m_pkthdr.csum_flags |= M_CSUM_UDPv4|M_CSUM_UDPv6;
    812 			if (RXD_CTL1_L4CSUM(val) != 0xffff)
    813 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
    814 		}
    815 
    816 #if NBPFILTER > 0
    817 		if (ifp->if_bpf)
    818 			bpf_mtap(ifp->if_bpf, m);
    819 #endif /* NBPFILTER > 0 */
    820 
    821 		(*ifp->if_input)(ifp, m);
    822 
    823 		if (++sc->sc_nextrx == NRXREAL)
    824 			sc->sc_nextrx = 0;
    825 
    826 	}
    827 
    828 	return 0;
    829 }
    830 
    831 int
    832 xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    833 {
    834 	struct xge_softc *sc = ifp->if_softc;
    835 	struct ifreq *ifr = (struct ifreq *) data;
    836 	int s, error = 0;
    837 
    838 	s = splnet();
    839 
    840 	switch (cmd) {
    841 	case SIOCSIFMTU:
    842 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > XGE_MAX_MTU) {
    843 			error = EINVAL;
    844 		} else {
    845 			PIF_WCSR(RMAC_MAX_PYLD_LEN,
    846 			    RMAC_PYLD_LEN(ifr->ifr_mtu));
    847 			ifp->if_mtu = ifr->ifr_mtu;
    848 		}
    849 		break;
    850 
    851 	case SIOCGIFMEDIA:
    852 	case SIOCSIFMEDIA:
    853 		error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd);
    854 		break;
    855 
    856 	default:
    857 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET){
    858 			/* Change multicast list */
    859 			xge_mcast_filter(sc);
    860 			error = 0;
    861 		}
    862 		break;
    863 	}
    864 
    865 	splx(s);
    866 	return(error);
    867 }
    868 
    869 void
    870 xge_mcast_filter(struct xge_softc *sc)
    871 {
    872 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    873 	struct ethercom *ec = &sc->sc_ethercom;
    874 	struct ether_multi *enm;
    875 	struct ether_multistep step;
    876 	int i, numaddr = 1; /* first slot used for card unicast address */
    877 	uint64_t val;
    878 
    879 	ETHER_FIRST_MULTI(step, ec, enm);
    880 	while (enm != NULL) {
    881 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
    882 			/* Skip ranges */
    883 			goto allmulti;
    884 		}
    885 		if (numaddr == MAX_MCAST_ADDR)
    886 			goto allmulti;
    887 		for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) {
    888 			val <<= 8;
    889 			val |= enm->enm_addrlo[i];
    890 		}
    891 		PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16);
    892 		PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
    893 		PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
    894 		    RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr));
    895 		while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
    896 			;
    897 		numaddr++;
    898 		ETHER_NEXT_MULTI(step, enm);
    899 	}
    900 	/* set the remaining entries to the broadcast address */
    901 	for (i = numaddr; i < MAX_MCAST_ADDR; i++) {
    902 		PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL);
    903 		PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
    904 		PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
    905 		    RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i));
    906 		while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
    907 			;
    908 	}
    909 	ifp->if_flags &= ~IFF_ALLMULTI;
    910 	return;
    911 
    912 allmulti:
    913 	/* Just receive everything with the multicast bit set */
    914 	ifp->if_flags |= IFF_ALLMULTI;
    915 	PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL);
    916 	PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL);
    917 	PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
    918 	    RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1));
    919 	while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
    920 		;
    921 }
    922 
    923 void
    924 xge_start(struct ifnet *ifp)
    925 {
    926 	struct xge_softc *sc = ifp->if_softc;
    927 	struct txd *txd = NULL; /* XXX - gcc */
    928 	bus_dmamap_t dmp;
    929 	struct	mbuf *m;
    930 	uint64_t par, lcr;
    931 	int nexttx = 0, ntxd, error, i;
    932 
    933 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
    934 		return;
    935 
    936 	par = lcr = 0;
    937 	for (;;) {
    938 		IFQ_POLL(&ifp->if_snd, m);
    939 		if (m == NULL)
    940 			break;	/* out of packets */
    941 
    942 		if (sc->sc_nexttx == sc->sc_lasttx)
    943 			break;	/* No more space */
    944 
    945 		nexttx = sc->sc_nexttx;
    946 		dmp = sc->sc_txm[nexttx];
    947 
    948 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
    949 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0) {
    950 			printf("%s: bus_dmamap_load_mbuf error %d\n",
    951 			    XNAME, error);
    952 			break;
    953 		}
    954 		IFQ_DEQUEUE(&ifp->if_snd, m);
    955 
    956 		bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
    957 		    BUS_DMASYNC_PREWRITE);
    958 
    959 		txd = sc->sc_txd[nexttx];
    960 		sc->sc_txb[nexttx] = m;
    961 		for (i = 0; i < dmp->dm_nsegs; i++) {
    962 			if (dmp->dm_segs[i].ds_len == 0)
    963 				continue;
    964 			txd->txd_control1 = dmp->dm_segs[i].ds_len;
    965 			txd->txd_control2 = 0;
    966 			txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
    967 			txd++;
    968 		}
    969 		ntxd = txd - sc->sc_txd[nexttx] - 1;
    970 		txd = sc->sc_txd[nexttx];
    971 		txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF;
    972 		txd->txd_control2 = TXD_CTL2_UTIL;
    973 		if (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) {
    974 			txd->txd_control1 |= TXD_CTL1_MSS(m->m_pkthdr.segsz);
    975 			txd->txd_control1 |= TXD_CTL1_LSO;
    976 		}
    977 
    978 		if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
    979 			txd->txd_control2 |= TXD_CTL2_CIPv4;
    980 		if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
    981 			txd->txd_control2 |= TXD_CTL2_CTCP;
    982 		if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
    983 			txd->txd_control2 |= TXD_CTL2_CUDP;
    984 		txd[ntxd].txd_control1 |= TXD_CTL1_GCL;
    985 
    986 		bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
    987 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    988 
    989 		par = sc->sc_txdp[nexttx];
    990 		lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST;
    991 		if (m->m_pkthdr.csum_flags & M_CSUM_TSOv4)
    992 			lcr |= TXDL_SFF;
    993 		TXP_WCSR(TXDL_PAR, par);
    994 		TXP_WCSR(TXDL_LCR, lcr);
    995 
    996 #if NBPFILTER > 0
    997 		if (ifp->if_bpf)
    998 			bpf_mtap(ifp->if_bpf, m);
    999 #endif /* NBPFILTER > 0 */
   1000 
   1001 		sc->sc_nexttx = NEXTTX(nexttx);
   1002 	}
   1003 }
   1004 
   1005 /*
   1006  * Allocate DMA memory for transmit descriptor fragments.
   1007  * Only one map is used for all descriptors.
   1008  */
   1009 int
   1010 xge_alloc_txmem(struct xge_softc *sc)
   1011 {
   1012 	struct txd *txp;
   1013 	bus_dma_segment_t seg;
   1014 	bus_addr_t txdp;
   1015 	caddr_t kva;
   1016 	int i, rseg, state;
   1017 
   1018 #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd))
   1019 	state = 0;
   1020 	if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,
   1021 	    &seg, 1, &rseg, BUS_DMA_NOWAIT))
   1022 		goto err;
   1023 	state++;
   1024 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,
   1025 	    BUS_DMA_NOWAIT))
   1026 		goto err;
   1027 
   1028 	state++;
   1029 	if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,
   1030 	    BUS_DMA_NOWAIT, &sc->sc_txmap))
   1031 		goto err;
   1032 	state++;
   1033 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,
   1034 	    kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT))
   1035 		goto err;
   1036 
   1037 	/* setup transmit array pointers */
   1038 	txp = (struct txd *)kva;
   1039 	txdp = seg.ds_addr;
   1040 	for (txp = (struct txd *)kva, i = 0; i < NTXDESCS; i++) {
   1041 		sc->sc_txd[i] = txp;
   1042 		sc->sc_txdp[i] = txdp;
   1043 		txp += NTXFRAGS;
   1044 		txdp += (NTXFRAGS * sizeof(struct txd));
   1045 	}
   1046 
   1047 	return 0;
   1048 
   1049 err:
   1050 	if (state > 2)
   1051 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
   1052 	if (state > 1)
   1053 		bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
   1054 	if (state > 0)
   1055 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1056 	return ENOBUFS;
   1057 }
   1058 
   1059 /*
   1060  * Allocate DMA memory for receive descriptor,
   1061  * only one map is used for all descriptors.
   1062  * link receive descriptor pages together.
   1063  */
   1064 int
   1065 xge_alloc_rxmem(struct xge_softc *sc)
   1066 {
   1067 	struct rxd_4k *rxpp;
   1068 	bus_dma_segment_t seg;
   1069 	caddr_t kva;
   1070 	int i, rseg, state;
   1071 
   1072 	/* sanity check */
   1073 	if (sizeof(struct rxd_4k) != XGE_PAGE) {
   1074 		printf("bad compiler struct alignment, %d != %d\n",
   1075 		    (int)sizeof(struct rxd_4k), XGE_PAGE);
   1076 		return EINVAL;
   1077 	}
   1078 
   1079 	state = 0;
   1080 	if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,
   1081 	    &seg, 1, &rseg, BUS_DMA_NOWAIT))
   1082 		goto err;
   1083 	state++;
   1084 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,
   1085 	    BUS_DMA_NOWAIT))
   1086 		goto err;
   1087 
   1088 	state++;
   1089 	if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,
   1090 	    BUS_DMA_NOWAIT, &sc->sc_rxmap))
   1091 		goto err;
   1092 	state++;
   1093 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,
   1094 	    kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT))
   1095 		goto err;
   1096 
   1097 	/* setup receive page link pointers */
   1098 	for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) {
   1099 		sc->sc_rxd_4k[i] = rxpp;
   1100 		rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
   1101 		    (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k);
   1102 	}
   1103 	sc->sc_rxd_4k[NRXPAGES-1]->r4_next =
   1104 	    (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
   1105 
   1106 	return 0;
   1107 
   1108 err:
   1109 	if (state > 2)
   1110 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
   1111 	if (state > 1)
   1112 		bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
   1113 	if (state > 0)
   1114 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1115 	return ENOBUFS;
   1116 }
   1117 
   1118 
   1119 /*
   1120  * Add a new mbuf chain to descriptor id.
   1121  */
   1122 int
   1123 xge_add_rxbuf(struct xge_softc *sc, int id)
   1124 {
   1125 	struct rxdesc *rxd;
   1126 	struct mbuf *m[5];
   1127 	int page, desc, error;
   1128 #if RX_MODE == RX_MODE_5
   1129 	int i;
   1130 #endif
   1131 
   1132 	page = id/NDESC_BUFMODE;
   1133 	desc = id%NDESC_BUFMODE;
   1134 
   1135 	rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
   1136 
   1137 	/*
   1138 	 * Allocate mbufs.
   1139 	 * Currently five mbufs and two clusters are used,
   1140 	 * the hardware will put (ethernet, ip, tcp/udp) headers in
   1141 	 * their own buffer and the clusters are only used for data.
   1142 	 */
   1143 #if RX_MODE == RX_MODE_1
   1144 	MGETHDR(m[0], M_DONTWAIT, MT_DATA);
   1145 	if (m[0] == NULL)
   1146 		return ENOBUFS;
   1147 	MCLGET(m[0], M_DONTWAIT);
   1148 	if ((m[0]->m_flags & M_EXT) == 0) {
   1149 		m_freem(m[0]);
   1150 		return ENOBUFS;
   1151 	}
   1152 	m[0]->m_len = m[0]->m_pkthdr.len = m[0]->m_ext.ext_size;
   1153 #elif RX_MODE == RX_MODE_3
   1154 #error missing rxmode 3.
   1155 #elif RX_MODE == RX_MODE_5
   1156 	MGETHDR(m[0], M_DONTWAIT, MT_DATA);
   1157 	for (i = 1; i < 5; i++) {
   1158 		MGET(m[i], M_DONTWAIT, MT_DATA);
   1159 	}
   1160 	if (m[3])
   1161 		MCLGET(m[3], M_DONTWAIT);
   1162 	if (m[4])
   1163 		MCLGET(m[4], M_DONTWAIT);
   1164 	if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
   1165 	    ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) {
   1166 		/* Out of something */
   1167 		for (i = 0; i < 5; i++)
   1168 			if (m[i] != NULL)
   1169 				m_free(m[i]);
   1170 		return ENOBUFS;
   1171 	}
   1172 	/* Link'em together */
   1173 	m[0]->m_next = m[1];
   1174 	m[1]->m_next = m[2];
   1175 	m[2]->m_next = m[3];
   1176 	m[3]->m_next = m[4];
   1177 #else
   1178 #error bad mode RX_MODE
   1179 #endif
   1180 
   1181 	if (sc->sc_rxb[id])
   1182 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]);
   1183 	sc->sc_rxb[id] = m[0];
   1184 
   1185 	error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],
   1186 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   1187 	if (error)
   1188 		return error;
   1189 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,
   1190 	    sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD);
   1191 
   1192 #if RX_MODE == RX_MODE_1
   1193 	rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0);
   1194 	rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
   1195 	rxd->rxd_control1 = RXD_CTL1_OWN;
   1196 #elif RX_MODE == RX_MODE_3
   1197 #elif RX_MODE == RX_MODE_5
   1198 	rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len);
   1199 	rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len);
   1200 	rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
   1201 	rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
   1202 	rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
   1203 	rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
   1204 	rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
   1205 	rxd->rxd_control1 = RXD_CTL1_OWN;
   1206 #endif
   1207 
   1208 	XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1209 	return 0;
   1210 }
   1211 
   1212 /*
   1213  * These magics comes from the FreeBSD driver.
   1214  */
   1215 int
   1216 xge_setup_xgxs(struct xge_softc *sc)
   1217 {
   1218 	/* The magic numbers are described in the users guide */
   1219 
   1220 	/* Writing to MDIO 0x8000 (Global Config 0) */
   1221 	PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
   1222 	PIF_WCSR(DTX_CONTROL, 0x80000515000000E0ULL); DELAY(50);
   1223 	PIF_WCSR(DTX_CONTROL, 0x80000515D93500E4ULL); DELAY(50);
   1224 
   1225 	/* Writing to MDIO 0x8000 (Global Config 1) */
   1226 	PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
   1227 	PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
   1228 	PIF_WCSR(DTX_CONTROL, 0x80010515001e00e4ULL); DELAY(50);
   1229 
   1230 	/* Reset the Gigablaze */
   1231 	PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
   1232 	PIF_WCSR(DTX_CONTROL, 0x80020515000000E0ULL); DELAY(50);
   1233 	PIF_WCSR(DTX_CONTROL, 0x80020515F21000E4ULL); DELAY(50);
   1234 
   1235 	/* read the pole settings */
   1236 	PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
   1237 	PIF_WCSR(DTX_CONTROL, 0x80000515000000e0ULL); DELAY(50);
   1238 	PIF_WCSR(DTX_CONTROL, 0x80000515000000ecULL); DELAY(50);
   1239 
   1240 	PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
   1241 	PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
   1242 	PIF_WCSR(DTX_CONTROL, 0x80010515000000ecULL); DELAY(50);
   1243 
   1244 	PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
   1245 	PIF_WCSR(DTX_CONTROL, 0x80020515000000e0ULL); DELAY(50);
   1246 	PIF_WCSR(DTX_CONTROL, 0x80020515000000ecULL); DELAY(50);
   1247 
   1248 	/* Workaround for TX Lane XAUI initialization error.
   1249 	   Read Xpak PHY register 24 for XAUI lane status */
   1250 	PIF_WCSR(DTX_CONTROL, 0x0018040000000000ULL); DELAY(50);
   1251 	PIF_WCSR(DTX_CONTROL, 0x00180400000000e0ULL); DELAY(50);
   1252 	PIF_WCSR(DTX_CONTROL, 0x00180400000000ecULL); DELAY(50);
   1253 
   1254 	/*
   1255 	 * Reading the MDIO control with value 0x1804001c0F001c
   1256 	 * means the TxLanes were already in sync
   1257 	 * Reading the MDIO control with value 0x1804000c0x001c
   1258 	 * means some TxLanes are not in sync where x is a 4-bit
   1259 	 * value representing each lanes
   1260 	 */
   1261 #if 0
   1262 	val = PIF_RCSR(MDIO_CONTROL);
   1263 	if (val != 0x1804001c0F001cULL) {
   1264 		printf("%s: MDIO_CONTROL: %llx != %llx\n",
   1265 		    XNAME, val, 0x1804001c0F001cULL);
   1266 		return 1;
   1267 	}
   1268 #endif
   1269 
   1270 	/* Set and remove the DTE XS INTLoopBackN */
   1271 	PIF_WCSR(DTX_CONTROL, 0x0000051500000000ULL); DELAY(50);
   1272 	PIF_WCSR(DTX_CONTROL, 0x00000515604000e0ULL); DELAY(50);
   1273 	PIF_WCSR(DTX_CONTROL, 0x00000515604000e4ULL); DELAY(50);
   1274 	PIF_WCSR(DTX_CONTROL, 0x00000515204000e4ULL); DELAY(50);
   1275 	PIF_WCSR(DTX_CONTROL, 0x00000515204000ecULL); DELAY(50);
   1276 
   1277 #if 0
   1278 	/* Reading the DTX control register Should be 0x5152040001c */
   1279 	val = PIF_RCSR(DTX_CONTROL);
   1280 	if (val != 0x5152040001cULL) {
   1281 		printf("%s: DTX_CONTROL: %llx != %llx\n",
   1282 		    XNAME, val, 0x5152040001cULL);
   1283 		return 1;
   1284 	}
   1285 #endif
   1286 
   1287 	PIF_WCSR(MDIO_CONTROL, 0x0018040000000000ULL); DELAY(50);
   1288 	PIF_WCSR(MDIO_CONTROL, 0x00180400000000e0ULL); DELAY(50);
   1289 	PIF_WCSR(MDIO_CONTROL, 0x00180400000000ecULL); DELAY(50);
   1290 
   1291 #if 0
   1292 	/* Reading the MIOD control should be 0x1804001c0f001c */
   1293 	val = PIF_RCSR(MDIO_CONTROL);
   1294 	if (val != 0x1804001c0f001cULL) {
   1295 		printf("%s: MDIO_CONTROL2: %llx != %llx\n",
   1296 		    XNAME, val, 0x1804001c0f001cULL);
   1297 		return 1;
   1298 	}
   1299 #endif
   1300 	return 0;
   1301 }
   1302