Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.6
      1 /*	$NetBSD: if_wm.c,v 1.6 2002/05/08 21:22:20 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Device driver for the Intel i82542 (``Wiseman''), i82543 (``Livengood''),
     40  * and i82544 (``Cordova'') Gigabit Ethernet chips.
     41  *
     42  * TODO (in order of importance):
     43  *
     44  *	- Fix hw VLAN assist.
     45  *
     46  *	- Make GMII work on the Livengood.
     47  *
     48  *	- Fix out-bound IP header checksums.
     49  *
     50  *	- Fix UDP checksums.
     51  *
     52  *	- Jumbo frames -- requires changes to network stack due to
     53  *	  lame buffer length handling on chip.
     54  *
     55  * ...and, of course, performance tuning.
     56  */
     57 
     58 #include "bpfilter.h"
     59 
     60 #include <sys/param.h>
     61 #include <sys/systm.h>
     62 #include <sys/callout.h>
     63 #include <sys/mbuf.h>
     64 #include <sys/malloc.h>
     65 #include <sys/kernel.h>
     66 #include <sys/socket.h>
     67 #include <sys/ioctl.h>
     68 #include <sys/errno.h>
     69 #include <sys/device.h>
     70 #include <sys/queue.h>
     71 
     72 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
     73 
     74 #include <net/if.h>
     75 #include <net/if_dl.h>
     76 #include <net/if_media.h>
     77 #include <net/if_ether.h>
     78 
     79 #if NBPFILTER > 0
     80 #include <net/bpf.h>
     81 #endif
     82 
     83 #include <netinet/in.h>			/* XXX for struct ip */
     84 #include <netinet/in_systm.h>		/* XXX for struct ip */
     85 #include <netinet/ip.h>			/* XXX for struct ip */
     86 
     87 #include <machine/bus.h>
     88 #include <machine/intr.h>
     89 #include <machine/endian.h>
     90 
     91 #include <dev/mii/mii.h>
     92 #include <dev/mii/miivar.h>
     93 #include <dev/mii/mii_bitbang.h>
     94 
     95 #include <dev/pci/pcireg.h>
     96 #include <dev/pci/pcivar.h>
     97 #include <dev/pci/pcidevs.h>
     98 
     99 #include <dev/pci/if_wmreg.h>
    100 
    101 #ifdef WM_DEBUG
    102 #define	WM_DEBUG_LINK		0x01
    103 #define	WM_DEBUG_TX		0x02
    104 #define	WM_DEBUG_RX		0x04
    105 #define	WM_DEBUG_GMII		0x08
    106 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
    107 
    108 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    109 #else
    110 #define	DPRINTF(x, y)	/* nothing */
    111 #endif /* WM_DEBUG */
    112 
    113 /*
    114  * Transmit descriptor list size.  Due to errata, we can only have
    115  * 256 hardware descriptors in the ring.  We tell the upper layers
    116  * that they can queue a lot of packets, and we go ahead and mange
    117  * up to 32 of them at a time.  We allow up to 16 DMA segments per
    118  * packet.
    119  */
    120 #define	WM_NTXSEGS		16
    121 #define	WM_IFQUEUELEN		256
    122 #define	WM_TXQUEUELEN		32
    123 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
    124 #define	WM_NTXDESC		256
    125 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
    126 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
    127 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
    128 
    129 /*
    130  * The interrupt mitigation feature of the Wiseman is pretty cool -- as
    131  * long as you're transmitting, you don't have to take an interrupt at
    132  * all.  However, we force an interrupt to happen every N + 1 packets
    133  * in order to kick us in a reasonable amount of time when we run out
    134  * of descriptors.
    135  */
    136 #define	WM_TXINTR_MASK		7
    137 
    138 /*
    139  * Receive descriptor list size.  We have one Rx buffer for normal
    140  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    141  * packet.  We allocate 128 receive descriptors, each with a 2k
    142  * buffer (MCLBYTES), which gives us room for 25 jumbo packets.
    143  */
    144 #define	WM_NRXDESC		128
    145 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    146 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    147 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    148 
    149 /*
    150  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    151  * a single clump that maps to a single DMA segment to make serveral things
    152  * easier.
    153  */
    154 struct wm_control_data {
    155 	/*
    156 	 * The transmit descriptors.
    157 	 */
    158 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
    159 
    160 	/*
    161 	 * The receive descriptors.
    162 	 */
    163 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    164 };
    165 
    166 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
    167 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
    168 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    169 
    170 /*
    171  * Software state for transmit jobs.
    172  */
    173 struct wm_txsoft {
    174 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    175 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    176 	int txs_firstdesc;		/* first descriptor in packet */
    177 	int txs_lastdesc;		/* last descriptor in packet */
    178 	int txs_ndesc;			/* # of descriptors used */
    179 };
    180 
    181 /*
    182  * Software state for receive buffers.  Each descriptor gets a
    183  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    184  * more than one buffer, we chain them together.
    185  */
    186 struct wm_rxsoft {
    187 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    188 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    189 };
    190 
    191 /*
    192  * Software state per device.
    193  */
    194 struct wm_softc {
    195 	struct device sc_dev;		/* generic device information */
    196 	bus_space_tag_t sc_st;		/* bus space tag */
    197 	bus_space_handle_t sc_sh;	/* bus space handle */
    198 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    199 	struct ethercom sc_ethercom;	/* ethernet common data */
    200 	void *sc_sdhook;		/* shutdown hook */
    201 
    202 	int sc_type;			/* chip type; see below */
    203 	int sc_flags;			/* flags; see below */
    204 
    205 	void *sc_ih;			/* interrupt cookie */
    206 
    207 	struct mii_data sc_mii;		/* MII/media information */
    208 
    209 	struct callout sc_tick_ch;	/* tick callout */
    210 
    211 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    212 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    213 
    214 	/*
    215 	 * Software state for the transmit and receive descriptors.
    216 	 */
    217 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
    218 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    219 
    220 	/*
    221 	 * Control data structures.
    222 	 */
    223 	struct wm_control_data *sc_control_data;
    224 #define	sc_txdescs	sc_control_data->wcd_txdescs
    225 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    226 
    227 #ifdef WM_EVENT_COUNTERS
    228 	/* Event counters. */
    229 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    230 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    231 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    232 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    233 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    234 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    235 
    236 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    237 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    238 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    239 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    240 
    241 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
    242 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
    243 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
    244 
    245 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    246 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    247 
    248 	struct evcnt sc_ev_tu;		/* Tx underrun */
    249 #endif /* WM_EVENT_COUNTERS */
    250 
    251 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    252 
    253 	int	sc_txfree;		/* number of free Tx descriptors */
    254 	int	sc_txnext;		/* next ready Tx descriptor */
    255 	int	sc_txwin;		/* Tx descriptors since last Tx int */
    256 
    257 	int	sc_txsfree;		/* number of free Tx jobs */
    258 	int	sc_txsnext;		/* next free Tx job */
    259 	int	sc_txsdirty;		/* dirty Tx jobs */
    260 
    261 	uint32_t sc_txctx_tcmd;		/* cached Tx cksum cmd */
    262 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum start */
    263 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum start */
    264 
    265 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    266 
    267 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    268 	int	sc_rxdiscard;
    269 	int	sc_rxlen;
    270 	struct mbuf *sc_rxhead;
    271 	struct mbuf *sc_rxtail;
    272 	struct mbuf **sc_rxtailp;
    273 
    274 	uint32_t sc_ctrl;		/* prototype CTRL register */
    275 #if 0
    276 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    277 #endif
    278 	uint32_t sc_icr;		/* prototype interrupt bits */
    279 	uint32_t sc_tctl;		/* prototype TCTL register */
    280 	uint32_t sc_rctl;		/* prototype RCTL register */
    281 	uint32_t sc_txcw;		/* prototype TXCW register */
    282 	uint32_t sc_tipg;		/* prototype TIPG register */
    283 
    284 	int sc_tbi_linkup;		/* TBI link status */
    285 	int sc_tbi_anstate;		/* autonegotiation state */
    286 
    287 	int sc_mchash_type;		/* multicast filter offset */
    288 };
    289 
    290 #define	WM_RXCHAIN_RESET(sc)						\
    291 do {									\
    292 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    293 	*(sc)->sc_rxtailp = NULL;					\
    294 	(sc)->sc_rxlen = 0;						\
    295 } while (/*CONSTCOND*/0)
    296 
    297 #define	WM_RXCHAIN_LINK(sc, m)						\
    298 do {									\
    299 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    300 	(sc)->sc_rxtailp = &(m)->m_next;				\
    301 } while (/*CONSTCOND*/0)
    302 
    303 /* sc_type */
    304 #define	WM_T_WISEMAN_2_0	0	/* Wiseman (i82542) 2.0 (really old) */
    305 #define	WM_T_WISEMAN_2_1	1	/* Wiseman (i82542) 2.1+ (old) */
    306 #define	WM_T_LIVENGOOD		2	/* Livengood (i82543) */
    307 #define	WM_T_CORDOVA		3	/* Cordova (i82544) */
    308 
    309 /* sc_flags */
    310 #define	WM_F_HAS_MII		0x01	/* has MII */
    311 
    312 #ifdef WM_EVENT_COUNTERS
    313 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    314 #else
    315 #define	WM_EVCNT_INCR(ev)	/* nothing */
    316 #endif
    317 
    318 #define	CSR_READ(sc, reg)						\
    319 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    320 #define	CSR_WRITE(sc, reg, val)						\
    321 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    322 
    323 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    324 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    325 
    326 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    327 do {									\
    328 	int __x, __n;							\
    329 									\
    330 	__x = (x);							\
    331 	__n = (n);							\
    332 									\
    333 	/* If it will wrap around, sync to the end of the ring. */	\
    334 	if ((__x + __n) > WM_NTXDESC) {					\
    335 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    336 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    337 		    (WM_NTXDESC - __x), (ops));				\
    338 		__n -= (WM_NTXDESC - __x);				\
    339 		__x = 0;						\
    340 	}								\
    341 									\
    342 	/* Now sync whatever is left. */				\
    343 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    344 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    345 } while (/*CONSTCOND*/0)
    346 
    347 #define	WM_CDRXSYNC(sc, x, ops)						\
    348 do {									\
    349 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    350 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    351 } while (/*CONSTCOND*/0)
    352 
    353 #define	WM_INIT_RXDESC(sc, x)						\
    354 do {									\
    355 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    356 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    357 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    358 									\
    359 	/*								\
    360 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    361 	 * so that the payload after the Ethernet header is aligned	\
    362 	 * to a 4-byte boundary.					\
    363 	 *								\
    364 	 * XXX BRAINDAMAGE ALERT!					\
    365 	 * The stupid chip uses the same size for every buffer, which	\
    366 	 * is set in the Receive Control register.  We are using the 2K	\
    367 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    368 	 * reason, we can't accept packets longer than the standard	\
    369 	 * Ethernet MTU, without incurring a big penalty to copy every	\
    370 	 * incoming packet to a new, suitably aligned buffer.		\
    371 	 *								\
    372 	 * We'll need to make some changes to the layer 3/4 parts of	\
    373 	 * the stack (to copy the headers to a new buffer if not	\
    374 	 * aligned) in order to support large MTU on this chip.  Lame.	\
    375 	 */								\
    376 	__m->m_data = __m->m_ext.ext_buf + 2;				\
    377 									\
    378 	__rxd->wrx_addr.wa_low =					\
    379 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2);		\
    380 	__rxd->wrx_addr.wa_high = 0;					\
    381 	__rxd->wrx_len = 0;						\
    382 	__rxd->wrx_cksum = 0;						\
    383 	__rxd->wrx_status = 0;						\
    384 	__rxd->wrx_errors = 0;						\
    385 	__rxd->wrx_special = 0;						\
    386 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    387 									\
    388 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    389 } while (/*CONSTCOND*/0)
    390 
    391 void	wm_start(struct ifnet *);
    392 void	wm_watchdog(struct ifnet *);
    393 int	wm_ioctl(struct ifnet *, u_long, caddr_t);
    394 int	wm_init(struct ifnet *);
    395 void	wm_stop(struct ifnet *, int);
    396 
    397 void	wm_shutdown(void *);
    398 
    399 void	wm_reset(struct wm_softc *);
    400 void	wm_rxdrain(struct wm_softc *);
    401 int	wm_add_rxbuf(struct wm_softc *, int);
    402 void	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    403 void	wm_tick(void *);
    404 
    405 void	wm_set_filter(struct wm_softc *);
    406 
    407 int	wm_intr(void *);
    408 void	wm_txintr(struct wm_softc *);
    409 void	wm_rxintr(struct wm_softc *);
    410 void	wm_linkintr(struct wm_softc *, uint32_t);
    411 
    412 void	wm_tbi_mediainit(struct wm_softc *);
    413 int	wm_tbi_mediachange(struct ifnet *);
    414 void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    415 
    416 void	wm_tbi_set_linkled(struct wm_softc *);
    417 void	wm_tbi_check_link(struct wm_softc *);
    418 
    419 void	wm_gmii_reset(struct wm_softc *);
    420 
    421 int	wm_gmii_livengood_readreg(struct device *, int, int);
    422 void	wm_gmii_livengood_writereg(struct device *, int, int, int);
    423 
    424 int	wm_gmii_cordova_readreg(struct device *, int, int);
    425 void	wm_gmii_cordova_writereg(struct device *, int, int, int);
    426 
    427 void	wm_gmii_statchg(struct device *);
    428 
    429 void	wm_gmii_mediainit(struct wm_softc *);
    430 int	wm_gmii_mediachange(struct ifnet *);
    431 void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    432 
    433 int	wm_match(struct device *, struct cfdata *, void *);
    434 void	wm_attach(struct device *, struct device *, void *);
    435 
    436 int	wm_copy_small = 0;
    437 
    438 struct cfattach wm_ca = {
    439 	sizeof(struct wm_softc), wm_match, wm_attach,
    440 };
    441 
    442 /*
    443  * Devices supported by this driver.
    444  */
    445 const struct wm_product {
    446 	pci_vendor_id_t		wmp_vendor;
    447 	pci_product_id_t	wmp_product;
    448 	const char		*wmp_name;
    449 	int			wmp_type;
    450 	int			wmp_flags;
    451 #define	WMP_F_1000X		0x01
    452 #define	WMP_F_1000T		0x02
    453 } wm_products[] = {
    454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    455 	  "Intel i82542 1000BASE-X Ethernet",
    456 	  WM_T_WISEMAN_2_1,	WMP_F_1000X },
    457 
    458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543_FIBER,
    459 	  "Intel i82543 1000BASE-X Ethernet",
    460 	  WM_T_LIVENGOOD,	WMP_F_1000X },
    461 
    462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543_SC,
    463 	  "Intel i82543-SC 1000BASE-X Ethernet",
    464 	  WM_T_LIVENGOOD,	WMP_F_1000X },
    465 
    466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543_COPPER,
    467 	  "Intel i82543 1000BASE-T Ethernet",
    468 	  WM_T_LIVENGOOD,	WMP_F_1000T },
    469 
    470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544_XT,
    471 	  "Intel i82544 1000BASE-T Ethernet",
    472 	  WM_T_CORDOVA,		WMP_F_1000T },
    473 
    474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544_XF,
    475 	  "Intel i82544 1000BASE-X Ethernet",
    476 	  WM_T_CORDOVA,		WMP_F_1000X },
    477 
    478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC,
    479 	  "Intel i82544GC 1000BASE-T Ethernet",
    480 	  WM_T_CORDOVA,		WMP_F_1000T },
    481 
    482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_64,
    483 	  "Intel i82544GC 1000BASE-T Ethernet",
    484 	  WM_T_CORDOVA,		WMP_F_1000T },
    485 
    486 	{ 0,			0,
    487 	  NULL,
    488 	  0,			0 },
    489 };
    490 
    491 #ifdef WM_EVENT_COUNTERS
    492 #if WM_NTXSEGS != 16
    493 #error Update wm_txseg_evcnt_names
    494 #endif
    495 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
    496 	"txseg1",
    497 	"txseg2",
    498 	"txseg3",
    499 	"txseg4",
    500 	"txseg5",
    501 	"txseg6",
    502 	"txseg7",
    503 	"txseg8",
    504 	"txseg9",
    505 	"txseg10",
    506 	"txseg11",
    507 	"txseg12",
    508 	"txseg13",
    509 	"txseg14",
    510 	"txseg15",
    511 	"txseg16",
    512 };
    513 #endif /* WM_EVENT_COUNTERS */
    514 
    515 static const struct wm_product *
    516 wm_lookup(const struct pci_attach_args *pa)
    517 {
    518 	const struct wm_product *wmp;
    519 
    520 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
    521 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
    522 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
    523 			return (wmp);
    524 	}
    525 	return (NULL);
    526 }
    527 
    528 int
    529 wm_match(struct device *parent, struct cfdata *cf, void *aux)
    530 {
    531 	struct pci_attach_args *pa = aux;
    532 
    533 	if (wm_lookup(pa) != NULL)
    534 		return (1);
    535 
    536 	return (0);
    537 }
    538 
    539 void
    540 wm_attach(struct device *parent, struct device *self, void *aux)
    541 {
    542 	struct wm_softc *sc = (void *) self;
    543 	struct pci_attach_args *pa = aux;
    544 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    545 	pci_chipset_tag_t pc = pa->pa_pc;
    546 	pci_intr_handle_t ih;
    547 	const char *intrstr = NULL;
    548 	bus_space_tag_t memt;
    549 	bus_space_handle_t memh;
    550 	bus_dma_segment_t seg;
    551 	int memh_valid;
    552 	int i, rseg, error;
    553 	const struct wm_product *wmp;
    554 	uint8_t enaddr[ETHER_ADDR_LEN];
    555 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
    556 	pcireg_t preg, memtype;
    557 	int pmreg;
    558 
    559 	callout_init(&sc->sc_tick_ch);
    560 
    561 	wmp = wm_lookup(pa);
    562 	if (wmp == NULL) {
    563 		printf("\n");
    564 		panic("wm_attach: impossible");
    565 	}
    566 
    567 	sc->sc_dmat = pa->pa_dmat;
    568 
    569 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
    570 	printf(": %s, rev. %d\n", wmp->wmp_name, preg);
    571 
    572 	sc->sc_type = wmp->wmp_type;
    573 	if (sc->sc_type < WM_T_LIVENGOOD) {
    574 		if (preg < 2) {
    575 			printf("%s: Wiseman must be at least rev. 2\n",
    576 			    sc->sc_dev.dv_xname);
    577 			return;
    578 		}
    579 		if (preg < 3)
    580 			sc->sc_type = WM_T_WISEMAN_2_0;
    581 	}
    582 
    583 	/*
    584 	 * Map the device.
    585 	 */
    586 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
    587 	switch (memtype) {
    588 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
    589 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
    590 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
    591 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
    592 		break;
    593 	default:
    594 		memh_valid = 0;
    595 	}
    596 
    597 	if (memh_valid) {
    598 		sc->sc_st = memt;
    599 		sc->sc_sh = memh;
    600 	} else {
    601 		printf("%s: unable to map device registers\n",
    602 		    sc->sc_dev.dv_xname);
    603 		return;
    604 	}
    605 
    606 	/* Enable bus mastering.  Disable MWI on the Wiseman 2.0. */
    607 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    608 	preg |= PCI_COMMAND_MASTER_ENABLE;
    609 	if (sc->sc_type < WM_T_WISEMAN_2_1)
    610 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
    611 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
    612 
    613 	/* Get it out of power save mode, if needed. */
    614 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
    615 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
    616 		if (preg == 3) {
    617 			/*
    618 			 * The card has lost all configuration data in
    619 			 * this state, so punt.
    620 			 */
    621 			printf("%s: unable to wake from power state D3\n",
    622 			    sc->sc_dev.dv_xname);
    623 			return;
    624 		}
    625 		if (preg != 0) {
    626 			printf("%s: waking up from power state D%d\n",
    627 			    sc->sc_dev.dv_xname, preg);
    628 			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
    629 		}
    630 	}
    631 
    632 	/*
    633 	 * Map and establish our interrupt.
    634 	 */
    635 	if (pci_intr_map(pa, &ih)) {
    636 		printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
    637 		return;
    638 	}
    639 	intrstr = pci_intr_string(pc, ih);
    640 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
    641 	if (sc->sc_ih == NULL) {
    642 		printf("%s: unable to establish interrupt",
    643 		    sc->sc_dev.dv_xname);
    644 		if (intrstr != NULL)
    645 			printf(" at %s", intrstr);
    646 		printf("\n");
    647 		return;
    648 	}
    649 	printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
    650 
    651 	/*
    652 	 * Allocate the control data structures, and create and load the
    653 	 * DMA map for it.
    654 	 */
    655 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    656 	    sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
    657 	    0)) != 0) {
    658 		printf("%s: unable to allocate control data, error = %d\n",
    659 		    sc->sc_dev.dv_xname, error);
    660 		goto fail_0;
    661 	}
    662 
    663 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    664 	    sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
    665 	    BUS_DMA_COHERENT)) != 0) {
    666 		printf("%s: unable to map control data, error = %d\n",
    667 		    sc->sc_dev.dv_xname, error);
    668 		goto fail_1;
    669 	}
    670 
    671 	if ((error = bus_dmamap_create(sc->sc_dmat,
    672 	    sizeof(struct wm_control_data), 1,
    673 	    sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
    674 		printf("%s: unable to create control data DMA map, "
    675 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    676 		goto fail_2;
    677 	}
    678 
    679 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    680 	    sc->sc_control_data, sizeof(struct wm_control_data), NULL,
    681 	    0)) != 0) {
    682 		printf("%s: unable to load control data DMA map, error = %d\n",
    683 		    sc->sc_dev.dv_xname, error);
    684 		goto fail_3;
    685 	}
    686 
    687 	/*
    688 	 * Create the transmit buffer DMA maps.
    689 	 */
    690 	for (i = 0; i < WM_TXQUEUELEN; i++) {
    691 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
    692 		    WM_NTXSEGS, MCLBYTES, 0, 0,
    693 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    694 			printf("%s: unable to create Tx DMA map %d, "
    695 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    696 			goto fail_4;
    697 		}
    698 	}
    699 
    700 	/*
    701 	 * Create the receive buffer DMA maps.
    702 	 */
    703 	for (i = 0; i < WM_NRXDESC; i++) {
    704 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    705 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    706 			printf("%s: unable to create Rx DMA map %d, "
    707 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    708 			goto fail_5;
    709 		}
    710 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    711 	}
    712 
    713 	/*
    714 	 * Reset the chip to a known state.
    715 	 */
    716 	wm_reset(sc);
    717 
    718 	/*
    719 	 * Read the Ethernet address from the EEPROM.
    720 	 */
    721 	wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
    722 	    sizeof(myea) / sizeof(myea[0]), myea);
    723 	enaddr[0] = myea[0] & 0xff;
    724 	enaddr[1] = myea[0] >> 8;
    725 	enaddr[2] = myea[1] & 0xff;
    726 	enaddr[3] = myea[1] >> 8;
    727 	enaddr[4] = myea[2] & 0xff;
    728 	enaddr[5] = myea[2] >> 8;
    729 
    730 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
    731 	    ether_sprintf(enaddr));
    732 
    733 	/*
    734 	 * Read the config info from the EEPROM, and set up various
    735 	 * bits in the control registers based on their contents.
    736 	 */
    737 	wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
    738 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
    739 	if (sc->sc_type >= WM_T_CORDOVA)
    740 		wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
    741 
    742 	if (cfg1 & EEPROM_CFG1_ILOS)
    743 		sc->sc_ctrl |= CTRL_ILOS;
    744 	if (sc->sc_type >= WM_T_CORDOVA) {
    745 		sc->sc_ctrl |=
    746 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
    747 		    CTRL_SWDPIO_SHIFT;
    748 		sc->sc_ctrl |=
    749 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
    750 		    CTRL_SWDPINS_SHIFT;
    751 	} else {
    752 		sc->sc_ctrl |=
    753 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
    754 		    CTRL_SWDPIO_SHIFT;
    755 	}
    756 
    757 #if 0
    758 	if (sc->sc_type >= WM_T_CORDOVA) {
    759 		if (cfg1 & EEPROM_CFG1_IPS0)
    760 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
    761 		if (cfg1 & EEPROM_CFG1_IPS1)
    762 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
    763 		sc->sc_ctrl_ext |=
    764 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
    765 		    CTRL_EXT_SWDPIO_SHIFT;
    766 		sc->sc_ctrl_ext |=
    767 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
    768 		    CTRL_EXT_SWDPINS_SHIFT;
    769 	} else {
    770 		sc->sc_ctrl_ext |=
    771 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
    772 		    CTRL_EXT_SWDPIO_SHIFT;
    773 	}
    774 #endif
    775 
    776 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
    777 #if 0
    778 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
    779 #endif
    780 
    781 	/*
    782 	 * Set up some register offsets that are different between
    783 	 * the Wiseman and the Livengood and later chips.
    784 	 */
    785 	if (sc->sc_type < WM_T_LIVENGOOD) {
    786 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
    787 		sc->sc_tdt_reg = WMREG_OLD_TDT;
    788 	} else {
    789 		sc->sc_rdt_reg = WMREG_RDT;
    790 		sc->sc_tdt_reg = WMREG_TDT;
    791 	}
    792 
    793 	/*
    794 	 * Determine if we should use flow control.  We should
    795 	 * always use it, unless we're on a Wiseman < 2.1.
    796 	 */
    797 	if (sc->sc_type >= WM_T_WISEMAN_2_1)
    798 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
    799 
    800 	/*
    801 	 * Determine if we're TBI or GMII mode, and initialize the
    802 	 * media structures accordingly.
    803 	 */
    804 	if (sc->sc_type < WM_T_LIVENGOOD ||
    805 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
    806 		if (wmp->wmp_flags & WMP_F_1000T)
    807 			printf("%s: WARNING: TBIMODE set on 1000BASE-T "
    808 			    "product!\n", sc->sc_dev.dv_xname);
    809 		wm_tbi_mediainit(sc);
    810 	} else {
    811 		if (wmp->wmp_flags & WMP_F_1000X)
    812 			printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
    813 			    "product!\n", sc->sc_dev.dv_xname);
    814 		wm_gmii_mediainit(sc);
    815 	}
    816 
    817 	ifp = &sc->sc_ethercom.ec_if;
    818 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    819 	ifp->if_softc = sc;
    820 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    821 	ifp->if_ioctl = wm_ioctl;
    822 	ifp->if_start = wm_start;
    823 	ifp->if_watchdog = wm_watchdog;
    824 	ifp->if_init = wm_init;
    825 	ifp->if_stop = wm_stop;
    826 	IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
    827 	IFQ_SET_READY(&ifp->if_snd);
    828 
    829 	/*
    830 	 * If we're a Livengood or greater, we can support VLANs.
    831 	 */
    832 	if (sc->sc_type >= WM_T_LIVENGOOD)
    833 		sc->sc_ethercom.ec_capabilities |=
    834 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
    835 
    836 	/*
    837 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
    838 	 * on Livengood and later.
    839 	 */
    840 	if (sc->sc_type >= WM_T_LIVENGOOD)
    841 		ifp->if_capabilities |=
    842 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
    843 
    844 	/*
    845 	 * Attach the interface.
    846 	 */
    847 	if_attach(ifp);
    848 	ether_ifattach(ifp, enaddr);
    849 
    850 #ifdef WM_EVENT_COUNTERS
    851 	/* Attach event counters. */
    852 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
    853 	    NULL, sc->sc_dev.dv_xname, "txsstall");
    854 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
    855 	    NULL, sc->sc_dev.dv_xname, "txdstall");
    856 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
    857 	    NULL, sc->sc_dev.dv_xname, "txdw");
    858 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
    859 	    NULL, sc->sc_dev.dv_xname, "txqe");
    860 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
    861 	    NULL, sc->sc_dev.dv_xname, "rxintr");
    862 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
    863 	    NULL, sc->sc_dev.dv_xname, "linkintr");
    864 
    865 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
    866 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
    867 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
    868 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
    869 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
    870 	    NULL, sc->sc_dev.dv_xname, "txipsum");
    871 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
    872 	    NULL, sc->sc_dev.dv_xname, "txtusum");
    873 
    874 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
    875 	    NULL, sc->sc_dev.dv_xname, "txctx init");
    876 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
    877 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
    878 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
    879 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
    880 
    881 	for (i = 0; i < WM_NTXSEGS; i++)
    882 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
    883 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
    884 
    885 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
    886 	    NULL, sc->sc_dev.dv_xname, "txdrop");
    887 
    888 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
    889 	    NULL, sc->sc_dev.dv_xname, "tu");
    890 #endif /* WM_EVENT_COUNTERS */
    891 
    892 	/*
    893 	 * Make sure the interface is shutdown during reboot.
    894 	 */
    895 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
    896 	if (sc->sc_sdhook == NULL)
    897 		printf("%s: WARNING: unable to establish shutdown hook\n",
    898 		    sc->sc_dev.dv_xname);
    899 	return;
    900 
    901 	/*
    902 	 * Free any resources we've allocated during the failed attach
    903 	 * attempt.  Do this in reverse order and fall through.
    904 	 */
    905  fail_5:
    906 	for (i = 0; i < WM_NRXDESC; i++) {
    907 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
    908 			bus_dmamap_destroy(sc->sc_dmat,
    909 			    sc->sc_rxsoft[i].rxs_dmamap);
    910 	}
    911  fail_4:
    912 	for (i = 0; i < WM_TXQUEUELEN; i++) {
    913 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
    914 			bus_dmamap_destroy(sc->sc_dmat,
    915 			    sc->sc_txsoft[i].txs_dmamap);
    916 	}
    917 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
    918  fail_3:
    919 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
    920  fail_2:
    921 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
    922 	    sizeof(struct wm_control_data));
    923  fail_1:
    924 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
    925  fail_0:
    926 	return;
    927 }
    928 
    929 /*
    930  * wm_shutdown:
    931  *
    932  *	Make sure the interface is stopped at reboot time.
    933  */
    934 void
    935 wm_shutdown(void *arg)
    936 {
    937 	struct wm_softc *sc = arg;
    938 
    939 	wm_stop(&sc->sc_ethercom.ec_if, 1);
    940 }
    941 
    942 /*
    943  * wm_tx_cksum:
    944  *
    945  *	Set up TCP/IP checksumming parameters for the
    946  *	specified packet.
    947  */
    948 static int
    949 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
    950     uint32_t *fieldsp)
    951 {
    952 	struct mbuf *m0 = txs->txs_mbuf;
    953 	struct livengood_tcpip_ctxdesc *t;
    954 	uint32_t fields = 0, tcmd = 0, ipcs, tucs;
    955 	struct ip *ip;
    956 	int offset, iphl;
    957 
    958 	/*
    959 	 * XXX It would be nice if the mbuf pkthdr had offset
    960 	 * fields for the protocol headers.
    961 	 */
    962 
    963 	/* XXX Assumes normal Ethernet encap. */
    964 	offset = ETHER_HDR_LEN;
    965 
    966 	/* XXX */
    967 	if (m0->m_len < (offset + sizeof(struct ip))) {
    968 		printf("%s: wm_tx_cksum: need to m_pullup, "
    969 		    "packet dropped\n", sc->sc_dev.dv_xname);
    970 		return (EINVAL);
    971 	}
    972 
    973 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
    974 	iphl = ip->ip_hl << 2;
    975 
    976 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
    977 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
    978 		tcmd |= htole32(WTX_TCPIP_CMD_IP);
    979 		fields |= htole32(WTX_IXSM);
    980 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
    981 		    WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
    982 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
    983 	} else
    984 		ipcs = 0;
    985 
    986 	offset += iphl;
    987 
    988 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
    989 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
    990 		tcmd |= htole32(WTX_TCPIP_CMD_TCP);
    991 		fields |= htole32(WTX_TXSM);
    992 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
    993 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
    994 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
    995 	} else
    996 		tucs = 0;
    997 
    998 	if (sc->sc_txctx_ipcs == ipcs &&
    999 	    sc->sc_txctx_tucs == tucs &&
   1000 	    sc->sc_txctx_tcmd == tcmd) {
   1001 		/* Cached context is fine. */
   1002 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
   1003 	} else {
   1004 		/* Fill in the context descriptor. */
   1005 #ifdef WM_EVENT_COUNTERS
   1006 		if (sc->sc_txctx_ipcs == 0xffffffff &&
   1007 		    sc->sc_txctx_tucs == 0xffffffff &&
   1008 		    sc->sc_txctx_tcmd == 0xffffffff)
   1009 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
   1010 		else
   1011 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
   1012 #endif
   1013 		t = (struct livengood_tcpip_ctxdesc *)
   1014 		    &sc->sc_txdescs[sc->sc_txnext];
   1015 		t->tcpip_ipcs = ipcs;
   1016 		t->tcpip_tucs = tucs;
   1017 		t->tcpip_cmdlen =
   1018 		    htole32(WTX_CMD_DEXT | WTX_DTYP_C) | tcmd;
   1019 		t->tcpip_seg = 0;
   1020 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   1021 
   1022 		sc->sc_txctx_ipcs = ipcs;
   1023 		sc->sc_txctx_tucs = tucs;
   1024 		sc->sc_txctx_tcmd = tcmd;
   1025 
   1026 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
   1027 		txs->txs_ndesc++;
   1028 		sc->sc_txwin++;
   1029 	}
   1030 
   1031 	*cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
   1032 	*fieldsp = fields;
   1033 
   1034 	return (0);
   1035 }
   1036 
   1037 /*
   1038  * wm_start:		[ifnet interface function]
   1039  *
   1040  *	Start packet transmission on the interface.
   1041  */
   1042 void
   1043 wm_start(struct ifnet *ifp)
   1044 {
   1045 	struct wm_softc *sc = ifp->if_softc;
   1046 	struct mbuf *m0/*, *m*/;
   1047 	struct wm_txsoft *txs;
   1048 	bus_dmamap_t dmamap;
   1049 	int error, nexttx, lasttx, ofree, seg;
   1050 	uint32_t cksumcmd, cksumfields;
   1051 
   1052 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   1053 		return;
   1054 
   1055 	/*
   1056 	 * Remember the previous number of free descriptors.
   1057 	 */
   1058 	ofree = sc->sc_txfree;
   1059 
   1060 	/*
   1061 	 * Loop through the send queue, setting up transmit descriptors
   1062 	 * until we drain the queue, or use up all available transmit
   1063 	 * descriptors.
   1064 	 */
   1065 	for (;;) {
   1066 		/* Grab a packet off the queue. */
   1067 		IFQ_POLL(&ifp->if_snd, m0);
   1068 		if (m0 == NULL)
   1069 			break;
   1070 
   1071 		DPRINTF(WM_DEBUG_TX,
   1072 		    ("%s: TX: have packet to transmit: %p\n",
   1073 		    sc->sc_dev.dv_xname, m0));
   1074 
   1075 		/* Get a work queue entry. */
   1076 		if (sc->sc_txsfree == 0) {
   1077 			DPRINTF(WM_DEBUG_TX,
   1078 			    ("%s: TX: no free job descriptors\n",
   1079 				sc->sc_dev.dv_xname));
   1080 			WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   1081 			break;
   1082 		}
   1083 
   1084 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   1085 		dmamap = txs->txs_dmamap;
   1086 
   1087 		/*
   1088 		 * Load the DMA map.  If this fails, the packet either
   1089 		 * didn't fit in the allotted number of segments, or we
   1090 		 * were short on resources.  For the too-many-segments
   1091 		 * case, we simply report an error and drop the packet,
   1092 		 * since we can't sanely copy a jumbo packet to a single
   1093 		 * buffer.
   1094 		 */
   1095 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   1096 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1097 		if (error) {
   1098 			if (error == EFBIG) {
   1099 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   1100 				printf("%s: Tx packet consumes too many "
   1101 				    "DMA segments, dropping...\n",
   1102 				    sc->sc_dev.dv_xname);
   1103 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   1104 				m_freem(m0);
   1105 				continue;
   1106 			}
   1107 			/*
   1108 			 * Short on resources, just stop for now.
   1109 			 */
   1110 			DPRINTF(WM_DEBUG_TX,
   1111 			    ("%s: TX: dmamap load failed: %d\n",
   1112 			    sc->sc_dev.dv_xname, error));
   1113 			break;
   1114 		}
   1115 
   1116 		/*
   1117 		 * Ensure we have enough descriptors free to describe
   1118 		 * the packet.  Note, we always reserve one descriptor
   1119 		 * at the end of the ring due to the semantics of the
   1120 		 * TDT register, plus one more in the event we need
   1121 		 * to re-load checksum offload context.
   1122 		 */
   1123 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
   1124 			/*
   1125 			 * Not enough free descriptors to transmit this
   1126 			 * packet.  We haven't committed anything yet,
   1127 			 * so just unload the DMA map, put the packet
   1128 			 * pack on the queue, and punt.  Notify the upper
   1129 			 * layer that there are no more slots left.
   1130 			 */
   1131 			DPRINTF(WM_DEBUG_TX,
   1132 			    ("%s: TX: need %d descriptors, have %d\n",
   1133 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
   1134 			    sc->sc_txfree - 1));
   1135 			ifp->if_flags |= IFF_OACTIVE;
   1136 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   1137 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   1138 			break;
   1139 		}
   1140 
   1141 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   1142 
   1143 		/*
   1144 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   1145 		 */
   1146 
   1147 		/* Sync the DMA map. */
   1148 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1149 		    BUS_DMASYNC_PREWRITE);
   1150 
   1151 		DPRINTF(WM_DEBUG_TX,
   1152 		    ("%s: TX: packet has %d DMA segments\n",
   1153 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
   1154 
   1155 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   1156 
   1157 		/*
   1158 		 * Store a pointer to the packet so that we can free it
   1159 		 * later.
   1160 		 *
   1161 		 * Initially, we consider the number of descriptors the
   1162 		 * packet uses the number of DMA segments.  This may be
   1163 		 * incremented by 1 if we do checksum offload (a descriptor
   1164 		 * is used to set the checksum context).
   1165 		 */
   1166 		txs->txs_mbuf = m0;
   1167 		txs->txs_firstdesc = sc->sc_txnext;
   1168 		txs->txs_ndesc = dmamap->dm_nsegs;
   1169 
   1170 		/*
   1171 		 * Set up checksum offload parameters for
   1172 		 * this packet.
   1173 		 */
   1174 		if (m0->m_pkthdr.csum_flags &
   1175 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1176 			if (wm_tx_cksum(sc, txs, &cksumcmd,
   1177 					&cksumfields) != 0) {
   1178 				/* Error message already displayed. */
   1179 				m_freem(m0);
   1180 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   1181 				txs->txs_mbuf = NULL;
   1182 				continue;
   1183 			}
   1184 		} else {
   1185 			cksumcmd = 0;
   1186 			cksumfields = 0;
   1187 		}
   1188 
   1189 		cksumcmd |= htole32(WTX_CMD_IDE);
   1190 
   1191 		/*
   1192 		 * Initialize the transmit descriptor.
   1193 		 */
   1194 		for (nexttx = sc->sc_txnext, seg = 0;
   1195 		     seg < dmamap->dm_nsegs;
   1196 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
   1197 			/*
   1198 			 * Note: we currently only use 32-bit DMA
   1199 			 * addresses.
   1200 			 */
   1201 			sc->sc_txdescs[nexttx].wtx_addr.wa_low =
   1202 			    htole32(dmamap->dm_segs[seg].ds_addr);
   1203 			sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
   1204 			    htole32(dmamap->dm_segs[seg].ds_len);
   1205 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
   1206 			    cksumfields;
   1207 			lasttx = nexttx;
   1208 
   1209 			sc->sc_txwin++;
   1210 
   1211 			DPRINTF(WM_DEBUG_TX,
   1212 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
   1213 			    sc->sc_dev.dv_xname, nexttx,
   1214 			    (uint32_t) dmamap->dm_segs[seg].ds_addr,
   1215 			    (uint32_t) dmamap->dm_segs[seg].ds_len));
   1216 		}
   1217 
   1218 		/*
   1219 		 * Set up the command byte on the last descriptor of
   1220 		 * the packet.  If we're in the interrupt delay window,
   1221 		 * delay the interrupt.
   1222 		 */
   1223 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1224 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RPS);
   1225 		if (sc->sc_txwin < (WM_NTXDESC * 2 / 3))
   1226 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1227 			    htole32(WTX_CMD_IDE);
   1228 		else
   1229 			sc->sc_txwin = 0;
   1230 
   1231 #if 0 /* XXXJRT */
   1232 		/*
   1233 		 * If VLANs are enabled and the packet has a VLAN tag, set
   1234 		 * up the descriptor to encapsulate the packet for us.
   1235 		 *
   1236 		 * This is only valid on the last descriptor of the packet.
   1237 		 */
   1238 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1239 		    (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
   1240 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1241 			    htole32(WTX_CMD_VLE);
   1242 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
   1243 			    = htole16(*mtod(m, int *) & 0xffff);
   1244 		}
   1245 #endif /* XXXJRT */
   1246 
   1247 		txs->txs_lastdesc = lasttx;
   1248 
   1249 		DPRINTF(WM_DEBUG_TX,
   1250 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
   1251 		    lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
   1252 
   1253 		/* Sync the descriptors we're using. */
   1254 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
   1255 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1256 
   1257 		/* Give the packet to the chip. */
   1258 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   1259 
   1260 		DPRINTF(WM_DEBUG_TX,
   1261 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
   1262 
   1263 		DPRINTF(WM_DEBUG_TX,
   1264 		    ("%s: TX: finished transmitting packet, job %d\n",
   1265 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
   1266 
   1267 		/* Advance the tx pointer. */
   1268 		sc->sc_txfree -= txs->txs_ndesc;
   1269 		sc->sc_txnext = nexttx;
   1270 
   1271 		sc->sc_txsfree--;
   1272 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
   1273 
   1274 #if NBPFILTER > 0
   1275 		/* Pass the packet to any BPF listeners. */
   1276 		if (ifp->if_bpf)
   1277 			bpf_mtap(ifp->if_bpf, m0);
   1278 #endif /* NBPFILTER > 0 */
   1279 	}
   1280 
   1281 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   1282 		/* No more slots; notify upper layer. */
   1283 		ifp->if_flags |= IFF_OACTIVE;
   1284 	}
   1285 
   1286 	if (sc->sc_txfree != ofree) {
   1287 		/* Set a watchdog timer in case the chip flakes out. */
   1288 		ifp->if_timer = 5;
   1289 	}
   1290 }
   1291 
   1292 /*
   1293  * wm_watchdog:		[ifnet interface function]
   1294  *
   1295  *	Watchdog timer handler.
   1296  */
   1297 void
   1298 wm_watchdog(struct ifnet *ifp)
   1299 {
   1300 	struct wm_softc *sc = ifp->if_softc;
   1301 
   1302 	/*
   1303 	 * Since we're using delayed interrupts, sweep up
   1304 	 * before we report an error.
   1305 	 */
   1306 	wm_txintr(sc);
   1307 
   1308 	if (sc->sc_txfree != WM_NTXDESC) {
   1309 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   1310 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
   1311 		    sc->sc_txnext);
   1312 		ifp->if_oerrors++;
   1313 
   1314 		/* Reset the interface. */
   1315 		(void) wm_init(ifp);
   1316 	}
   1317 
   1318 	/* Try to get more packets going. */
   1319 	wm_start(ifp);
   1320 }
   1321 
   1322 /*
   1323  * wm_ioctl:		[ifnet interface function]
   1324  *
   1325  *	Handle control requests from the operator.
   1326  */
   1327 int
   1328 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
   1329 {
   1330 	struct wm_softc *sc = ifp->if_softc;
   1331 	struct ifreq *ifr = (struct ifreq *) data;
   1332 	int s, error;
   1333 
   1334 	s = splnet();
   1335 
   1336 	switch (cmd) {
   1337 	case SIOCSIFMEDIA:
   1338 	case SIOCGIFMEDIA:
   1339 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   1340 		break;
   1341 
   1342 	default:
   1343 		error = ether_ioctl(ifp, cmd, data);
   1344 		if (error == ENETRESET) {
   1345 			/*
   1346 			 * Multicast list has changed; set the hardware filter
   1347 			 * accordingly.
   1348 			 */
   1349 			wm_set_filter(sc);
   1350 			error = 0;
   1351 		}
   1352 		break;
   1353 	}
   1354 
   1355 	/* Try to get more packets going. */
   1356 	wm_start(ifp);
   1357 
   1358 	splx(s);
   1359 	return (error);
   1360 }
   1361 
   1362 /*
   1363  * wm_intr:
   1364  *
   1365  *	Interrupt service routine.
   1366  */
   1367 int
   1368 wm_intr(void *arg)
   1369 {
   1370 	struct wm_softc *sc = arg;
   1371 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1372 	uint32_t icr;
   1373 	int wantinit, handled = 0;
   1374 
   1375 	for (wantinit = 0; wantinit == 0;) {
   1376 		icr = CSR_READ(sc, WMREG_ICR);
   1377 		if ((icr & sc->sc_icr) == 0)
   1378 			break;
   1379 
   1380 		handled = 1;
   1381 
   1382 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   1383 			DPRINTF(WM_DEBUG_RX,
   1384 			    ("%s: RX: got Rx intr 0x%08x\n",
   1385 			    sc->sc_dev.dv_xname,
   1386 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   1387 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   1388 			wm_rxintr(sc);
   1389 		}
   1390 
   1391 		if (icr & (ICR_TXDW|ICR_TXQE)) {
   1392 			DPRINTF(WM_DEBUG_TX,
   1393 			    ("%s: TX: got TDXW|TXQE interrupt\n",
   1394 			    sc->sc_dev.dv_xname));
   1395 #ifdef WM_EVENT_COUNTERS
   1396 			if (icr & ICR_TXDW)
   1397 				WM_EVCNT_INCR(&sc->sc_ev_txdw);
   1398 			else if (icr & ICR_TXQE)
   1399 				WM_EVCNT_INCR(&sc->sc_ev_txqe);
   1400 #endif
   1401 			wm_txintr(sc);
   1402 		}
   1403 
   1404 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   1405 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   1406 			wm_linkintr(sc, icr);
   1407 		}
   1408 
   1409 		if (icr & ICR_RXO) {
   1410 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
   1411 			wantinit = 1;
   1412 		}
   1413 	}
   1414 
   1415 	if (handled) {
   1416 		if (wantinit)
   1417 			wm_init(ifp);
   1418 
   1419 		/* Try to get more packets going. */
   1420 		wm_start(ifp);
   1421 	}
   1422 
   1423 	return (handled);
   1424 }
   1425 
   1426 /*
   1427  * wm_txintr:
   1428  *
   1429  *	Helper; handle transmit interrupts.
   1430  */
   1431 void
   1432 wm_txintr(struct wm_softc *sc)
   1433 {
   1434 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1435 	struct wm_txsoft *txs;
   1436 	uint8_t status;
   1437 	int i;
   1438 
   1439 	ifp->if_flags &= ~IFF_OACTIVE;
   1440 
   1441 	/*
   1442 	 * Go through the Tx list and free mbufs for those
   1443 	 * frams which have been transmitted.
   1444 	 */
   1445 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
   1446 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
   1447 		txs = &sc->sc_txsoft[i];
   1448 
   1449 		DPRINTF(WM_DEBUG_TX,
   1450 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
   1451 
   1452 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
   1453 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1454 
   1455 		status = le32toh(sc->sc_txdescs[
   1456 		    txs->txs_lastdesc].wtx_fields.wtxu_bits);
   1457 		if ((status & WTX_ST_DD) == 0)
   1458 			break;
   1459 
   1460 		DPRINTF(WM_DEBUG_TX,
   1461 		    ("%s: TX: job %d done: descs %d..%d\n",
   1462 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
   1463 		    txs->txs_lastdesc));
   1464 
   1465 		/*
   1466 		 * XXX We should probably be using the statistics
   1467 		 * XXX registers, but I don't know if they exist
   1468 		 * XXX on chips before the Cordova.
   1469 		 */
   1470 
   1471 #ifdef WM_EVENT_COUNTERS
   1472 		if (status & WTX_ST_TU)
   1473 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   1474 #endif /* WM_EVENT_COUNTERS */
   1475 
   1476 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   1477 			ifp->if_oerrors++;
   1478 			if (status & WTX_ST_LC)
   1479 				printf("%s: late collision\n",
   1480 				    sc->sc_dev.dv_xname);
   1481 			else if (status & WTX_ST_EC) {
   1482 				ifp->if_collisions += 16;
   1483 				printf("%s: excessive collisions\n",
   1484 				    sc->sc_dev.dv_xname);
   1485 			}
   1486 		} else
   1487 			ifp->if_opackets++;
   1488 
   1489 		sc->sc_txfree += txs->txs_ndesc;
   1490 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1491 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1492 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1493 		m_freem(txs->txs_mbuf);
   1494 		txs->txs_mbuf = NULL;
   1495 	}
   1496 
   1497 	/* Update the dirty transmit buffer pointer. */
   1498 	sc->sc_txsdirty = i;
   1499 	DPRINTF(WM_DEBUG_TX,
   1500 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
   1501 
   1502 	/*
   1503 	 * If there are no more pending transmissions, cancel the watchdog
   1504 	 * timer.
   1505 	 */
   1506 	if (sc->sc_txsfree == WM_TXQUEUELEN)
   1507 		ifp->if_timer = 0;
   1508 	if (sc->sc_txfree == WM_NTXDESC)
   1509 		sc->sc_txwin = 0;
   1510 }
   1511 
   1512 /*
   1513  * wm_rxintr:
   1514  *
   1515  *	Helper; handle receive interrupts.
   1516  */
   1517 void
   1518 wm_rxintr(struct wm_softc *sc)
   1519 {
   1520 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1521 	struct wm_rxsoft *rxs;
   1522 	struct mbuf *m;
   1523 	int i, len;
   1524 	uint8_t status, errors;
   1525 
   1526 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   1527 		rxs = &sc->sc_rxsoft[i];
   1528 
   1529 		DPRINTF(WM_DEBUG_RX,
   1530 		    ("%s: RX: checking descriptor %d\n",
   1531 		    sc->sc_dev.dv_xname, i));
   1532 
   1533 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1534 
   1535 		status = sc->sc_rxdescs[i].wrx_status;
   1536 		errors = sc->sc_rxdescs[i].wrx_errors;
   1537 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   1538 
   1539 		if ((status & WRX_ST_DD) == 0) {
   1540 			/*
   1541 			 * We have processed all of the receive descriptors.
   1542 			 */
   1543 			break;
   1544 		}
   1545 
   1546 		if (__predict_false(sc->sc_rxdiscard)) {
   1547 			DPRINTF(WM_DEBUG_RX,
   1548 			    ("%s: RX: discarding contents of descriptor %d\n",
   1549 			    sc->sc_dev.dv_xname, i));
   1550 			WM_INIT_RXDESC(sc, i);
   1551 			if (status & WRX_ST_EOP) {
   1552 				/* Reset our state. */
   1553 				DPRINTF(WM_DEBUG_RX,
   1554 				    ("%s: RX: resetting rxdiscard -> 0\n",
   1555 				    sc->sc_dev.dv_xname));
   1556 				sc->sc_rxdiscard = 0;
   1557 			}
   1558 			continue;
   1559 		}
   1560 
   1561 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1562 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1563 
   1564 		m = rxs->rxs_mbuf;
   1565 
   1566 		/*
   1567 		 * Add a new receive buffer to the ring.
   1568 		 */
   1569 		if (wm_add_rxbuf(sc, i) != 0) {
   1570 			/*
   1571 			 * Failed, throw away what we've done so
   1572 			 * far, and discard the rest of the packet.
   1573 			 */
   1574 			ifp->if_ierrors++;
   1575 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1576 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1577 			WM_INIT_RXDESC(sc, i);
   1578 			if ((status & WRX_ST_EOP) == 0)
   1579 				sc->sc_rxdiscard = 1;
   1580 			if (sc->sc_rxhead != NULL)
   1581 				m_freem(sc->sc_rxhead);
   1582 			WM_RXCHAIN_RESET(sc);
   1583 			DPRINTF(WM_DEBUG_RX,
   1584 			    ("%s: RX: Rx buffer allocation failed, "
   1585 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
   1586 			    sc->sc_rxdiscard ? " (discard)" : ""));
   1587 			continue;
   1588 		}
   1589 
   1590 		WM_RXCHAIN_LINK(sc, m);
   1591 
   1592 		m->m_len = len;
   1593 
   1594 		DPRINTF(WM_DEBUG_RX,
   1595 		    ("%s: RX: buffer at %p len %d\n",
   1596 		    sc->sc_dev.dv_xname, m->m_data, len));
   1597 
   1598 		/*
   1599 		 * If this is not the end of the packet, keep
   1600 		 * looking.
   1601 		 */
   1602 		if ((status & WRX_ST_EOP) == 0) {
   1603 			sc->sc_rxlen += len;
   1604 			DPRINTF(WM_DEBUG_RX,
   1605 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   1606 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
   1607 			continue;
   1608 		}
   1609 
   1610 		/*
   1611 		 * Okay, we have the entire packet now...
   1612 		 */
   1613 		*sc->sc_rxtailp = NULL;
   1614 		m = sc->sc_rxhead;
   1615 		len += sc->sc_rxlen;
   1616 
   1617 		WM_RXCHAIN_RESET(sc);
   1618 
   1619 		DPRINTF(WM_DEBUG_RX,
   1620 		    ("%s: RX: have entire packet, len -> %d\n",
   1621 		    sc->sc_dev.dv_xname, len));
   1622 
   1623 		/*
   1624 		 * If an error occurred, update stats and drop the packet.
   1625 		 */
   1626 		if (errors &
   1627 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   1628 			ifp->if_ierrors++;
   1629 			if (errors & WRX_ER_SE)
   1630 				printf("%s: symbol error\n",
   1631 				    sc->sc_dev.dv_xname);
   1632 			else if (errors & WRX_ER_SEQ)
   1633 				printf("%s: receive sequence error\n",
   1634 				    sc->sc_dev.dv_xname);
   1635 			else if (errors & WRX_ER_CE)
   1636 				printf("%s: CRC error\n",
   1637 				    sc->sc_dev.dv_xname);
   1638 			m_freem(m);
   1639 			continue;
   1640 		}
   1641 
   1642 		/*
   1643 		 * No errors.  Receive the packet.
   1644 		 *
   1645 		 * Note, we have configured the chip to include the
   1646 		 * CRC with every packet.
   1647 		 */
   1648 		m->m_flags |= M_HASFCS;
   1649 		m->m_pkthdr.rcvif = ifp;
   1650 		m->m_pkthdr.len = len;
   1651 
   1652 #if 0 /* XXXJRT */
   1653 		/*
   1654 		 * If VLANs are enabled, VLAN packets have been unwrapped
   1655 		 * for us.  Associate the tag with the packet.
   1656 		 */
   1657 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1658 		    (status & WRX_ST_VP) != 0) {
   1659 			struct mbuf *vtag;
   1660 
   1661 			vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
   1662 			if (vtag == NULL) {
   1663 				ifp->if_ierrors++;
   1664 				printf("%s: unable to allocate VLAN tag\n",
   1665 				    sc->sc_dev.dv_xname);
   1666 				m_freem(m);
   1667 				continue;
   1668 			}
   1669 
   1670 			*mtod(m, int *) =
   1671 			    le16toh(sc->sc_rxdescs[i].wrx_special);
   1672 			vtag->m_len = sizeof(int);
   1673 		}
   1674 #endif /* XXXJRT */
   1675 
   1676 		/*
   1677 		 * Set up checksum info for this packet.
   1678 		 */
   1679 		if (status & WRX_ST_IPCS) {
   1680 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   1681 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   1682 			if (errors & WRX_ER_IPE)
   1683 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   1684 		}
   1685 		if (status & WRX_ST_TCPCS) {
   1686 			/*
   1687 			 * Note: we don't know if this was TCP or UDP,
   1688 			 * so we just set both bits, and expect the
   1689 			 * upper layers to deal.
   1690 			 */
   1691 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   1692 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
   1693 			if (errors & WRX_ER_TCPE)
   1694 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   1695 		}
   1696 
   1697 		ifp->if_ipackets++;
   1698 
   1699 #if NBPFILTER > 0
   1700 		/* Pass this up to any BPF listeners. */
   1701 		if (ifp->if_bpf)
   1702 			bpf_mtap(ifp->if_bpf, m);
   1703 #endif /* NBPFILTER > 0 */
   1704 
   1705 		/* Pass it on. */
   1706 		(*ifp->if_input)(ifp, m);
   1707 	}
   1708 
   1709 	/* Update the receive pointer. */
   1710 	sc->sc_rxptr = i;
   1711 
   1712 	DPRINTF(WM_DEBUG_RX,
   1713 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
   1714 }
   1715 
   1716 /*
   1717  * wm_linkintr:
   1718  *
   1719  *	Helper; handle link interrupts.
   1720  */
   1721 void
   1722 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   1723 {
   1724 	uint32_t status;
   1725 
   1726 	/*
   1727 	 * If we get a link status interrupt on a 1000BASE-T
   1728 	 * device, just fall into the normal MII tick path.
   1729 	 */
   1730 	if (sc->sc_flags & WM_F_HAS_MII) {
   1731 		if (icr & ICR_LSC) {
   1732 			DPRINTF(WM_DEBUG_LINK,
   1733 			    ("%s: LINK: LSC -> mii_tick\n",
   1734 			    sc->sc_dev.dv_xname));
   1735 			mii_tick(&sc->sc_mii);
   1736 		} else if (icr & ICR_RXSEQ) {
   1737 			DPRINTF(WM_DEBUG_LINK,
   1738 			    ("%s: LINK Receive sequence error\n",
   1739 			    sc->sc_dev.dv_xname));
   1740 		}
   1741 		return;
   1742 	}
   1743 
   1744 	/*
   1745 	 * If we are now receiving /C/, check for link again in
   1746 	 * a couple of link clock ticks.
   1747 	 */
   1748 	if (icr & ICR_RXCFG) {
   1749 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   1750 		    sc->sc_dev.dv_xname));
   1751 		sc->sc_tbi_anstate = 2;
   1752 	}
   1753 
   1754 	if (icr & ICR_LSC) {
   1755 		status = CSR_READ(sc, WMREG_STATUS);
   1756 		if (status & STATUS_LU) {
   1757 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   1758 			    sc->sc_dev.dv_xname,
   1759 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   1760 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   1761 			if (status & STATUS_FD)
   1762 				sc->sc_tctl |=
   1763 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   1764 			else
   1765 				sc->sc_tctl |=
   1766 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   1767 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   1768 			sc->sc_tbi_linkup = 1;
   1769 		} else {
   1770 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   1771 			    sc->sc_dev.dv_xname));
   1772 			sc->sc_tbi_linkup = 0;
   1773 		}
   1774 		sc->sc_tbi_anstate = 2;
   1775 		wm_tbi_set_linkled(sc);
   1776 	} else if (icr & ICR_RXSEQ) {
   1777 		DPRINTF(WM_DEBUG_LINK,
   1778 		    ("%s: LINK: Receive sequence error\n",
   1779 		    sc->sc_dev.dv_xname));
   1780 	}
   1781 }
   1782 
   1783 /*
   1784  * wm_tick:
   1785  *
   1786  *	One second timer, used to check link status, sweep up
   1787  *	completed transmit jobs, etc.
   1788  */
   1789 void
   1790 wm_tick(void *arg)
   1791 {
   1792 	struct wm_softc *sc = arg;
   1793 	int s;
   1794 
   1795 	s = splnet();
   1796 
   1797 	if (sc->sc_flags & WM_F_HAS_MII)
   1798 		mii_tick(&sc->sc_mii);
   1799 	else
   1800 		wm_tbi_check_link(sc);
   1801 
   1802 	splx(s);
   1803 
   1804 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   1805 }
   1806 
   1807 /*
   1808  * wm_reset:
   1809  *
   1810  *	Reset the i82542 chip.
   1811  */
   1812 void
   1813 wm_reset(struct wm_softc *sc)
   1814 {
   1815 	int i;
   1816 
   1817 	CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   1818 	delay(10000);
   1819 
   1820 	for (i = 0; i < 1000; i++) {
   1821 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
   1822 			return;
   1823 		delay(20);
   1824 	}
   1825 
   1826 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
   1827 		printf("%s: WARNING: reset failed to complete\n",
   1828 		    sc->sc_dev.dv_xname);
   1829 }
   1830 
   1831 /*
   1832  * wm_init:		[ifnet interface function]
   1833  *
   1834  *	Initialize the interface.  Must be called at splnet().
   1835  */
   1836 int
   1837 wm_init(struct ifnet *ifp)
   1838 {
   1839 	struct wm_softc *sc = ifp->if_softc;
   1840 	struct wm_rxsoft *rxs;
   1841 	int i, error = 0;
   1842 	uint32_t reg;
   1843 
   1844 	/* Cancel any pending I/O. */
   1845 	wm_stop(ifp, 0);
   1846 
   1847 	/* Reset the chip to a known state. */
   1848 	wm_reset(sc);
   1849 
   1850 	/* Initialize the transmit descriptor ring. */
   1851 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
   1852 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
   1853 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1854 	sc->sc_txfree = WM_NTXDESC;
   1855 	sc->sc_txnext = 0;
   1856 	sc->sc_txwin = 0;
   1857 
   1858 	sc->sc_txctx_tcmd = 0xffffffff;
   1859 	sc->sc_txctx_ipcs = 0xffffffff;
   1860 	sc->sc_txctx_tucs = 0xffffffff;
   1861 
   1862 	if (sc->sc_type < WM_T_LIVENGOOD) {
   1863 		CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
   1864 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
   1865 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
   1866 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   1867 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   1868 		CSR_WRITE(sc, WMREG_OLD_TIDV, 64);
   1869 	} else {
   1870 		CSR_WRITE(sc, WMREG_TBDAH, 0);
   1871 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
   1872 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
   1873 		CSR_WRITE(sc, WMREG_TDH, 0);
   1874 		CSR_WRITE(sc, WMREG_TDT, 0);
   1875 		CSR_WRITE(sc, WMREG_TIDV, 64);
   1876 
   1877 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   1878 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   1879 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   1880 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   1881 	}
   1882 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   1883 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   1884 
   1885 	/* Initialize the transmit job descriptors. */
   1886 	for (i = 0; i < WM_TXQUEUELEN; i++)
   1887 		sc->sc_txsoft[i].txs_mbuf = NULL;
   1888 	sc->sc_txsfree = WM_TXQUEUELEN;
   1889 	sc->sc_txsnext = 0;
   1890 	sc->sc_txsdirty = 0;
   1891 
   1892 	/*
   1893 	 * Initialize the receive descriptor and receive job
   1894 	 * descriptor rings.
   1895 	 */
   1896 	if (sc->sc_type < WM_T_LIVENGOOD) {
   1897 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
   1898 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
   1899 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   1900 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   1901 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   1902 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 64 | RDTR_FPD);
   1903 
   1904 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   1905 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   1906 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   1907 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   1908 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   1909 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   1910 	} else {
   1911 		CSR_WRITE(sc, WMREG_RDBAH, 0);
   1912 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
   1913 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   1914 		CSR_WRITE(sc, WMREG_RDH, 0);
   1915 		CSR_WRITE(sc, WMREG_RDT, 0);
   1916 		CSR_WRITE(sc, WMREG_RDTR, 64 | RDTR_FPD);
   1917 	}
   1918 	for (i = 0; i < WM_NRXDESC; i++) {
   1919 		rxs = &sc->sc_rxsoft[i];
   1920 		if (rxs->rxs_mbuf == NULL) {
   1921 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   1922 				printf("%s: unable to allocate or map rx "
   1923 				    "buffer %d, error = %d\n",
   1924 				    sc->sc_dev.dv_xname, i, error);
   1925 				/*
   1926 				 * XXX Should attempt to run with fewer receive
   1927 				 * XXX buffers instead of just failing.
   1928 				 */
   1929 				wm_rxdrain(sc);
   1930 				goto out;
   1931 			}
   1932 		} else
   1933 			WM_INIT_RXDESC(sc, i);
   1934 	}
   1935 	sc->sc_rxptr = 0;
   1936 	sc->sc_rxdiscard = 0;
   1937 	WM_RXCHAIN_RESET(sc);
   1938 
   1939 	/*
   1940 	 * Clear out the VLAN table -- we don't use it (yet).
   1941 	 */
   1942 	CSR_WRITE(sc, WMREG_VET, 0);
   1943 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   1944 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   1945 
   1946 	/*
   1947 	 * Set up flow-control parameters.
   1948 	 *
   1949 	 * XXX Values could probably stand some tuning.
   1950 	 */
   1951 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
   1952 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   1953 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   1954 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   1955 
   1956 		if (sc->sc_type < WM_T_LIVENGOOD) {
   1957 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   1958 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
   1959 		} else {
   1960 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   1961 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
   1962 		}
   1963 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   1964 	}
   1965 
   1966 #if 0 /* XXXJRT */
   1967 	/* Deal with VLAN enables. */
   1968 	if (sc->sc_ethercom.ec_nvlans != 0)
   1969 		sc->sc_ctrl |= CTRL_VME;
   1970 	else
   1971 #endif /* XXXJRT */
   1972 		sc->sc_ctrl &= ~CTRL_VME;
   1973 
   1974 	/* Write the control registers. */
   1975 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1976 #if 0
   1977 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1978 #endif
   1979 
   1980 	/*
   1981 	 * Set up checksum offload parameters.
   1982 	 */
   1983 	reg = CSR_READ(sc, WMREG_RXCSUM);
   1984 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
   1985 		reg |= RXCSUM_IPOFL;
   1986 	else
   1987 		reg &= ~RXCSUM_IPOFL;
   1988 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
   1989 		reg |= RXCSUM_TUOFL;
   1990 	else
   1991 		reg &= ~RXCSUM_TUOFL;
   1992 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   1993 
   1994 	/*
   1995 	 * Set up the interrupt registers.
   1996 	 */
   1997 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   1998 	sc->sc_icr = ICR_TXDW | ICR_TXQE | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   1999 	    ICR_RXO | ICR_RXT0;
   2000 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   2001 		sc->sc_icr |= ICR_RXCFG;
   2002 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   2003 
   2004 	/* Set up the inter-packet gap. */
   2005 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   2006 
   2007 #if 0 /* XXXJRT */
   2008 	/* Set the VLAN ethernetype. */
   2009 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   2010 #endif
   2011 
   2012 	/*
   2013 	 * Set up the transmit control register; we start out with
   2014 	 * a collision distance suitable for FDX, but update it whe
   2015 	 * we resolve the media type.
   2016 	 */
   2017 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
   2018 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2019 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2020 
   2021 	/* Set the media. */
   2022 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
   2023 
   2024 	/*
   2025 	 * Set up the receive control register; we actually program
   2026 	 * the register when we set the receive filter.  Use multicast
   2027 	 * address offset type 0.
   2028 	 *
   2029 	 * Only the Cordova has the ability to strip the incoming
   2030 	 * CRC, so we don't enable that feature.
   2031 	 */
   2032 	sc->sc_mchash_type = 0;
   2033 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
   2034 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
   2035 
   2036 	/* Set the receive filter. */
   2037 	wm_set_filter(sc);
   2038 
   2039 	/* Start the one second link check clock. */
   2040 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2041 
   2042 	/* ...all done! */
   2043 	ifp->if_flags |= IFF_RUNNING;
   2044 	ifp->if_flags &= ~IFF_OACTIVE;
   2045 
   2046  out:
   2047 	if (error)
   2048 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
   2049 	return (error);
   2050 }
   2051 
   2052 /*
   2053  * wm_rxdrain:
   2054  *
   2055  *	Drain the receive queue.
   2056  */
   2057 void
   2058 wm_rxdrain(struct wm_softc *sc)
   2059 {
   2060 	struct wm_rxsoft *rxs;
   2061 	int i;
   2062 
   2063 	for (i = 0; i < WM_NRXDESC; i++) {
   2064 		rxs = &sc->sc_rxsoft[i];
   2065 		if (rxs->rxs_mbuf != NULL) {
   2066 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2067 			m_freem(rxs->rxs_mbuf);
   2068 			rxs->rxs_mbuf = NULL;
   2069 		}
   2070 	}
   2071 }
   2072 
   2073 /*
   2074  * wm_stop:		[ifnet interface function]
   2075  *
   2076  *	Stop transmission on the interface.
   2077  */
   2078 void
   2079 wm_stop(struct ifnet *ifp, int disable)
   2080 {
   2081 	struct wm_softc *sc = ifp->if_softc;
   2082 	struct wm_txsoft *txs;
   2083 	int i;
   2084 
   2085 	/* Stop the one second clock. */
   2086 	callout_stop(&sc->sc_tick_ch);
   2087 
   2088 	if (sc->sc_flags & WM_F_HAS_MII) {
   2089 		/* Down the MII. */
   2090 		mii_down(&sc->sc_mii);
   2091 	}
   2092 
   2093 	/* Stop the transmit and receive processes. */
   2094 	CSR_WRITE(sc, WMREG_TCTL, 0);
   2095 	CSR_WRITE(sc, WMREG_RCTL, 0);
   2096 
   2097 	/* Release any queued transmit buffers. */
   2098 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   2099 		txs = &sc->sc_txsoft[i];
   2100 		if (txs->txs_mbuf != NULL) {
   2101 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   2102 			m_freem(txs->txs_mbuf);
   2103 			txs->txs_mbuf = NULL;
   2104 		}
   2105 	}
   2106 
   2107 	if (disable)
   2108 		wm_rxdrain(sc);
   2109 
   2110 	/* Mark the interface as down and cancel the watchdog timer. */
   2111 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2112 	ifp->if_timer = 0;
   2113 }
   2114 
   2115 /*
   2116  * wm_read_eeprom:
   2117  *
   2118  *	Read data from the serial EEPROM.
   2119  */
   2120 void
   2121 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2122 {
   2123 	uint32_t reg;
   2124 	int i, x;
   2125 
   2126 	for (i = 0; i < wordcnt; i++) {
   2127 		/* Send CHIP SELECT for one clock tick. */
   2128 		CSR_WRITE(sc, WMREG_EECD, EECD_CS);
   2129 		delay(2);
   2130 
   2131 		/* Shift in the READ command. */
   2132 		for (x = 3; x > 0; x--) {
   2133 			reg = EECD_CS;
   2134 			if (UWIRE_OPC_READ & (1 << (x - 1)))
   2135 				reg |= EECD_DI;
   2136 			CSR_WRITE(sc, WMREG_EECD, reg);
   2137 			delay(2);
   2138 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2139 			delay(2);
   2140 			CSR_WRITE(sc, WMREG_EECD, reg);
   2141 			delay(2);
   2142 		}
   2143 
   2144 		/* Shift in address. */
   2145 		for (x = 6; x > 0; x--) {
   2146 			reg = EECD_CS;
   2147 			if ((word + i) & (1 << (x - 1)))
   2148 				reg |= EECD_DI;
   2149 			CSR_WRITE(sc, WMREG_EECD, reg);
   2150 			delay(2);
   2151 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2152 			delay(2);
   2153 			CSR_WRITE(sc, WMREG_EECD, reg);
   2154 			delay(2);
   2155 		}
   2156 
   2157 		/* Shift out the data. */
   2158 		reg = EECD_CS;
   2159 		data[i] = 0;
   2160 		for (x = 16; x > 0; x--) {
   2161 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2162 			delay(2);
   2163 			if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   2164 				data[i] |= (1 << (x - 1));
   2165 			CSR_WRITE(sc, WMREG_EECD, reg);
   2166 			delay(2);
   2167 		}
   2168 
   2169 		/* Clear CHIP SELECT. */
   2170 		CSR_WRITE(sc, WMREG_EECD, 0);
   2171 	}
   2172 }
   2173 
   2174 /*
   2175  * wm_add_rxbuf:
   2176  *
   2177  *	Add a receive buffer to the indiciated descriptor.
   2178  */
   2179 int
   2180 wm_add_rxbuf(struct wm_softc *sc, int idx)
   2181 {
   2182 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   2183 	struct mbuf *m;
   2184 	int error;
   2185 
   2186 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   2187 	if (m == NULL)
   2188 		return (ENOBUFS);
   2189 
   2190 	MCLGET(m, M_DONTWAIT);
   2191 	if ((m->m_flags & M_EXT) == 0) {
   2192 		m_freem(m);
   2193 		return (ENOBUFS);
   2194 	}
   2195 
   2196 	if (rxs->rxs_mbuf != NULL)
   2197 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2198 
   2199 	rxs->rxs_mbuf = m;
   2200 
   2201 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
   2202 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
   2203 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   2204 	if (error) {
   2205 		printf("%s: unable to load rx DMA map %d, error = %d\n",
   2206 		    sc->sc_dev.dv_xname, idx, error);
   2207 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
   2208 	}
   2209 
   2210 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2211 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   2212 
   2213 	WM_INIT_RXDESC(sc, idx);
   2214 
   2215 	return (0);
   2216 }
   2217 
   2218 /*
   2219  * wm_set_ral:
   2220  *
   2221  *	Set an entery in the receive address list.
   2222  */
   2223 static void
   2224 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2225 {
   2226 	uint32_t ral_lo, ral_hi;
   2227 
   2228 	if (enaddr != NULL) {
   2229 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2230 		    (enaddr[3] << 24);
   2231 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2232 		ral_hi |= RAL_AV;
   2233 	} else {
   2234 		ral_lo = 0;
   2235 		ral_hi = 0;
   2236 	}
   2237 
   2238 	if (sc->sc_type >= WM_T_CORDOVA) {
   2239 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2240 		    ral_lo);
   2241 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2242 		    ral_hi);
   2243 	} else {
   2244 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2245 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2246 	}
   2247 }
   2248 
   2249 /*
   2250  * wm_mchash:
   2251  *
   2252  *	Compute the hash of the multicast address for the 4096-bit
   2253  *	multicast filter.
   2254  */
   2255 static uint32_t
   2256 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2257 {
   2258 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2259 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2260 	uint32_t hash;
   2261 
   2262 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2263 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2264 
   2265 	return (hash & 0xfff);
   2266 }
   2267 
   2268 /*
   2269  * wm_set_filter:
   2270  *
   2271  *	Set up the receive filter.
   2272  */
   2273 void
   2274 wm_set_filter(struct wm_softc *sc)
   2275 {
   2276 	struct ethercom *ec = &sc->sc_ethercom;
   2277 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2278 	struct ether_multi *enm;
   2279 	struct ether_multistep step;
   2280 	bus_addr_t mta_reg;
   2281 	uint32_t hash, reg, bit;
   2282 	int i;
   2283 
   2284 	if (sc->sc_type >= WM_T_CORDOVA)
   2285 		mta_reg = WMREG_CORDOVA_MTA;
   2286 	else
   2287 		mta_reg = WMREG_MTA;
   2288 
   2289 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2290 
   2291 	if (ifp->if_flags & IFF_BROADCAST)
   2292 		sc->sc_rctl |= RCTL_BAM;
   2293 	if (ifp->if_flags & IFF_PROMISC) {
   2294 		sc->sc_rctl |= RCTL_UPE;
   2295 		goto allmulti;
   2296 	}
   2297 
   2298 	/*
   2299 	 * Set the station address in the first RAL slot, and
   2300 	 * clear the remaining slots.
   2301 	 */
   2302 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
   2303 	for (i = 1; i < WM_RAL_TABSIZE; i++)
   2304 		wm_set_ral(sc, NULL, i);
   2305 
   2306 	/* Clear out the multicast table. */
   2307 	for (i = 0; i < WM_MC_TABSIZE; i++)
   2308 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2309 
   2310 	ETHER_FIRST_MULTI(step, ec, enm);
   2311 	while (enm != NULL) {
   2312 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2313 			/*
   2314 			 * We must listen to a range of multicast addresses.
   2315 			 * For now, just accept all multicasts, rather than
   2316 			 * trying to set only those filter bits needed to match
   2317 			 * the range.  (At this time, the only use of address
   2318 			 * ranges is for IP multicast routing, for which the
   2319 			 * range is big enough to require all bits set.)
   2320 			 */
   2321 			goto allmulti;
   2322 		}
   2323 
   2324 		hash = wm_mchash(sc, enm->enm_addrlo);
   2325 
   2326 		reg = (hash >> 5) & 0x7f;
   2327 		bit = hash & 0x1f;
   2328 
   2329 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2330 		hash |= 1U << bit;
   2331 
   2332 		/* XXX Hardware bug?? */
   2333 		if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
   2334 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2335 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2336 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2337 		} else
   2338 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2339 
   2340 		ETHER_NEXT_MULTI(step, enm);
   2341 	}
   2342 
   2343 	ifp->if_flags &= ~IFF_ALLMULTI;
   2344 	goto setit;
   2345 
   2346  allmulti:
   2347 	ifp->if_flags |= IFF_ALLMULTI;
   2348 	sc->sc_rctl |= RCTL_MPE;
   2349 
   2350  setit:
   2351 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2352 }
   2353 
   2354 /*
   2355  * wm_tbi_mediainit:
   2356  *
   2357  *	Initialize media for use on 1000BASE-X devices.
   2358  */
   2359 void
   2360 wm_tbi_mediainit(struct wm_softc *sc)
   2361 {
   2362 	const char *sep = "";
   2363 
   2364 	if (sc->sc_type < WM_T_LIVENGOOD)
   2365 		sc->sc_tipg = TIPG_WM_DFLT;
   2366 	else
   2367 		sc->sc_tipg = TIPG_LG_DFLT;
   2368 
   2369 	ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
   2370 	    wm_tbi_mediastatus);
   2371 
   2372 	/*
   2373 	 * SWD Pins:
   2374 	 *
   2375 	 *	0 = Link LED (output)
   2376 	 *	1 = Loss Of Signal (input)
   2377 	 */
   2378 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   2379 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   2380 
   2381 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2382 
   2383 #define	ADD(s, m, d)							\
   2384 do {									\
   2385 	printf("%s%s", sep, s);						\
   2386 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL);	\
   2387 	sep = ", ";							\
   2388 } while (/*CONSTCOND*/0)
   2389 
   2390 	printf("%s: ", sc->sc_dev.dv_xname);
   2391 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   2392 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   2393 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   2394 	printf("\n");
   2395 
   2396 #undef ADD
   2397 
   2398 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   2399 }
   2400 
   2401 /*
   2402  * wm_tbi_mediastatus:	[ifmedia interface function]
   2403  *
   2404  *	Get the current interface media status on a 1000BASE-X device.
   2405  */
   2406 void
   2407 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   2408 {
   2409 	struct wm_softc *sc = ifp->if_softc;
   2410 
   2411 	ifmr->ifm_status = IFM_AVALID;
   2412 	ifmr->ifm_active = IFM_ETHER;
   2413 
   2414 	if (sc->sc_tbi_linkup == 0) {
   2415 		ifmr->ifm_active |= IFM_NONE;
   2416 		return;
   2417 	}
   2418 
   2419 	ifmr->ifm_status |= IFM_ACTIVE;
   2420 	ifmr->ifm_active |= IFM_1000_SX;
   2421 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   2422 		ifmr->ifm_active |= IFM_FDX;
   2423 }
   2424 
   2425 /*
   2426  * wm_tbi_mediachange:	[ifmedia interface function]
   2427  *
   2428  *	Set hardware to newly-selected media on a 1000BASE-X device.
   2429  */
   2430 int
   2431 wm_tbi_mediachange(struct ifnet *ifp)
   2432 {
   2433 	struct wm_softc *sc = ifp->if_softc;
   2434 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   2435 	uint32_t status;
   2436 	int i;
   2437 
   2438 	sc->sc_txcw = ife->ifm_data;
   2439 	if (sc->sc_ctrl & CTRL_RFCE)
   2440 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
   2441 	if (sc->sc_ctrl & CTRL_TFCE)
   2442 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
   2443 	sc->sc_txcw |= TXCW_ANE;
   2444 
   2445 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   2446 	delay(10000);
   2447 
   2448 	sc->sc_tbi_anstate = 0;
   2449 
   2450 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
   2451 		/* Have signal; wait for the link to come up. */
   2452 		for (i = 0; i < 50; i++) {
   2453 			delay(10000);
   2454 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   2455 				break;
   2456 		}
   2457 
   2458 		status = CSR_READ(sc, WMREG_STATUS);
   2459 		if (status & STATUS_LU) {
   2460 			/* Link is up. */
   2461 			DPRINTF(WM_DEBUG_LINK,
   2462 			    ("%s: LINK: set media -> link up %s\n",
   2463 			    sc->sc_dev.dv_xname,
   2464 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2465 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2466 			if (status & STATUS_FD)
   2467 				sc->sc_tctl |=
   2468 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2469 			else
   2470 				sc->sc_tctl |=
   2471 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2472 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2473 			sc->sc_tbi_linkup = 1;
   2474 		} else {
   2475 			/* Link is down. */
   2476 			DPRINTF(WM_DEBUG_LINK,
   2477 			    ("%s: LINK: set media -> link down\n",
   2478 			    sc->sc_dev.dv_xname));
   2479 			sc->sc_tbi_linkup = 0;
   2480 		}
   2481 	} else {
   2482 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   2483 		    sc->sc_dev.dv_xname));
   2484 		sc->sc_tbi_linkup = 0;
   2485 	}
   2486 
   2487 	wm_tbi_set_linkled(sc);
   2488 
   2489 	return (0);
   2490 }
   2491 
   2492 /*
   2493  * wm_tbi_set_linkled:
   2494  *
   2495  *	Update the link LED on 1000BASE-X devices.
   2496  */
   2497 void
   2498 wm_tbi_set_linkled(struct wm_softc *sc)
   2499 {
   2500 
   2501 	if (sc->sc_tbi_linkup)
   2502 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   2503 	else
   2504 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   2505 
   2506 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2507 }
   2508 
   2509 /*
   2510  * wm_tbi_check_link:
   2511  *
   2512  *	Check the link on 1000BASE-X devices.
   2513  */
   2514 void
   2515 wm_tbi_check_link(struct wm_softc *sc)
   2516 {
   2517 	uint32_t rxcw, ctrl, status;
   2518 
   2519 	if (sc->sc_tbi_anstate == 0)
   2520 		return;
   2521 	else if (sc->sc_tbi_anstate > 1) {
   2522 		DPRINTF(WM_DEBUG_LINK,
   2523 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
   2524 		    sc->sc_tbi_anstate));
   2525 		sc->sc_tbi_anstate--;
   2526 		return;
   2527 	}
   2528 
   2529 	sc->sc_tbi_anstate = 0;
   2530 
   2531 	rxcw = CSR_READ(sc, WMREG_RXCW);
   2532 	ctrl = CSR_READ(sc, WMREG_CTRL);
   2533 	status = CSR_READ(sc, WMREG_STATUS);
   2534 
   2535 	if ((status & STATUS_LU) == 0) {
   2536 		DPRINTF(WM_DEBUG_LINK,
   2537 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
   2538 		sc->sc_tbi_linkup = 0;
   2539 	} else {
   2540 		DPRINTF(WM_DEBUG_LINK,
   2541 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
   2542 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   2543 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2544 		if (status & STATUS_FD)
   2545 			sc->sc_tctl |=
   2546 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2547 		else
   2548 			sc->sc_tctl |=
   2549 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2550 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2551 		sc->sc_tbi_linkup = 1;
   2552 	}
   2553 
   2554 	wm_tbi_set_linkled(sc);
   2555 }
   2556 
   2557 /*
   2558  * wm_gmii_reset:
   2559  *
   2560  *	Reset the PHY.
   2561  */
   2562 void
   2563 wm_gmii_reset(struct wm_softc *sc)
   2564 {
   2565 	uint32_t reg;
   2566 
   2567 	if (sc->sc_type >= WM_T_CORDOVA) {
   2568 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   2569 		delay(20000);
   2570 
   2571 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2572 		delay(20000);
   2573 	} else {
   2574 		/* The PHY reset pin is active-low. */
   2575 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2576 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   2577 		    CTRL_EXT_SWDPIN(4));
   2578 		reg |= CTRL_EXT_SWDPIO(4);
   2579 
   2580 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   2581 		delay(10);
   2582 
   2583 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2584 		delay(10);
   2585 
   2586 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   2587 		delay(10);
   2588 #if 0
   2589 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   2590 #endif
   2591 	}
   2592 }
   2593 
   2594 /*
   2595  * wm_gmii_mediainit:
   2596  *
   2597  *	Initialize media for use on 1000BASE-T devices.
   2598  */
   2599 void
   2600 wm_gmii_mediainit(struct wm_softc *sc)
   2601 {
   2602 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2603 
   2604 	/* We have MII. */
   2605 	sc->sc_flags |= WM_F_HAS_MII;
   2606 
   2607 	sc->sc_tipg = TIPG_1000T_DFLT;
   2608 
   2609 	/*
   2610 	 * Let the chip set speed/duplex on its own based on
   2611 	 * signals from the PHY.
   2612 	 */
   2613 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
   2614 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2615 
   2616 	/* Initialize our media structures and probe the GMII. */
   2617 	sc->sc_mii.mii_ifp = ifp;
   2618 
   2619 	if (sc->sc_type >= WM_T_CORDOVA) {
   2620 		sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
   2621 		sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
   2622 	} else {
   2623 		sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
   2624 		sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
   2625 	}
   2626 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
   2627 
   2628 	wm_gmii_reset(sc);
   2629 
   2630 	ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
   2631 	    wm_gmii_mediastatus);
   2632 
   2633 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   2634 	    MII_OFFSET_ANY, 0);
   2635 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
   2636 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   2637 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
   2638 	} else
   2639 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   2640 }
   2641 
   2642 /*
   2643  * wm_gmii_mediastatus:	[ifmedia interface function]
   2644  *
   2645  *	Get the current interface media status on a 1000BASE-T device.
   2646  */
   2647 void
   2648 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   2649 {
   2650 	struct wm_softc *sc = ifp->if_softc;
   2651 
   2652 	mii_pollstat(&sc->sc_mii);
   2653 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   2654 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   2655 }
   2656 
   2657 /*
   2658  * wm_gmii_mediachange:	[ifmedia interface function]
   2659  *
   2660  *	Set hardware to newly-selected media on a 1000BASE-T device.
   2661  */
   2662 int
   2663 wm_gmii_mediachange(struct ifnet *ifp)
   2664 {
   2665 	struct wm_softc *sc = ifp->if_softc;
   2666 
   2667 	if (ifp->if_flags & IFF_UP)
   2668 		mii_mediachg(&sc->sc_mii);
   2669 	return (0);
   2670 }
   2671 
   2672 #define	MDI_IO		CTRL_SWDPIN(2)
   2673 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   2674 #define	MDI_CLK		CTRL_SWDPIN(3)
   2675 
   2676 static void
   2677 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   2678 {
   2679 	uint32_t i, v;
   2680 
   2681 	v = CSR_READ(sc, WMREG_CTRL);
   2682 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   2683 	v |= MDI_DIR | CTRL_SWDPIO(3);
   2684 
   2685 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   2686 		if (data & i)
   2687 			v |= MDI_IO;
   2688 		else
   2689 			v &= ~MDI_IO;
   2690 		CSR_WRITE(sc, WMREG_CTRL, v);
   2691 		delay(10);
   2692 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   2693 		delay(10);
   2694 		CSR_WRITE(sc, WMREG_CTRL, v);
   2695 		delay(10);
   2696 	}
   2697 }
   2698 
   2699 static uint32_t
   2700 livengood_mii_recvbits(struct wm_softc *sc)
   2701 {
   2702 	uint32_t v, i, data = 0;
   2703 
   2704 	v = CSR_READ(sc, WMREG_CTRL);
   2705 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   2706 	v |= CTRL_SWDPIO(3);
   2707 
   2708 	CSR_WRITE(sc, WMREG_CTRL, v);
   2709 	delay(10);
   2710 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   2711 	delay(10);
   2712 	CSR_WRITE(sc, WMREG_CTRL, v);
   2713 	delay(10);
   2714 
   2715 	for (i = 0; i < 16; i++) {
   2716 		data <<= 1;
   2717 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   2718 		delay(10);
   2719 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   2720 			data |= 1;
   2721 		CSR_WRITE(sc, WMREG_CTRL, v);
   2722 		delay(10);
   2723 	}
   2724 
   2725 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   2726 	delay(10);
   2727 	CSR_WRITE(sc, WMREG_CTRL, v);
   2728 	delay(10);
   2729 
   2730 	return (data);
   2731 }
   2732 
   2733 #undef MDI_IO
   2734 #undef MDI_DIR
   2735 #undef MDI_CLK
   2736 
   2737 /*
   2738  * wm_gmii_livengood_readreg:	[mii interface function]
   2739  *
   2740  *	Read a PHY register on the GMII (Livengood version).
   2741  */
   2742 int
   2743 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
   2744 {
   2745 	struct wm_softc *sc = (void *) self;
   2746 	int rv;
   2747 
   2748 	livengood_mii_sendbits(sc, 0xffffffffU, 32);
   2749 	livengood_mii_sendbits(sc, reg | (phy << 5) |
   2750 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   2751 	rv = livengood_mii_recvbits(sc) & 0xffff;
   2752 
   2753 	DPRINTF(WM_DEBUG_GMII,
   2754 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   2755 	    sc->sc_dev.dv_xname, phy, reg, rv));
   2756 
   2757 	return (rv);
   2758 }
   2759 
   2760 /*
   2761  * wm_gmii_livengood_writereg:	[mii interface function]
   2762  *
   2763  *	Write a PHY register on the GMII (Livengood version).
   2764  */
   2765 void
   2766 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
   2767 {
   2768 	struct wm_softc *sc = (void *) self;
   2769 
   2770 	livengood_mii_sendbits(sc, 0xffffffffU, 32);
   2771 	livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   2772 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   2773 	    (MII_COMMAND_START << 30), 32);
   2774 }
   2775 
   2776 /*
   2777  * wm_gmii_cordova_readreg:	[mii interface function]
   2778  *
   2779  *	Read a PHY register on the GMII.
   2780  */
   2781 int
   2782 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
   2783 {
   2784 	struct wm_softc *sc = (void *) self;
   2785 	uint32_t mdic;
   2786 	int i, rv;
   2787 
   2788 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   2789 	    MDIC_REGADD(reg));
   2790 
   2791 	for (i = 0; i < 100; i++) {
   2792 		mdic = CSR_READ(sc, WMREG_MDIC);
   2793 		if (mdic & MDIC_READY)
   2794 			break;
   2795 		delay(10);
   2796 	}
   2797 
   2798 	if ((mdic & MDIC_READY) == 0) {
   2799 		printf("%s: MDIC read timed out: phy %d reg %d\n",
   2800 		    sc->sc_dev.dv_xname, phy, reg);
   2801 		rv = 0;
   2802 	} else if (mdic & MDIC_E) {
   2803 #if 0 /* This is normal if no PHY is present. */
   2804 		printf("%s: MDIC read error: phy %d reg %d\n",
   2805 		    sc->sc_dev.dv_xname, phy, reg);
   2806 #endif
   2807 		rv = 0;
   2808 	} else {
   2809 		rv = MDIC_DATA(mdic);
   2810 		if (rv == 0xffff)
   2811 			rv = 0;
   2812 	}
   2813 
   2814 	return (rv);
   2815 }
   2816 
   2817 /*
   2818  * wm_gmii_cordova_writereg:	[mii interface function]
   2819  *
   2820  *	Write a PHY register on the GMII.
   2821  */
   2822 void
   2823 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
   2824 {
   2825 	struct wm_softc *sc = (void *) self;
   2826 	uint32_t mdic;
   2827 	int i;
   2828 
   2829 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   2830 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   2831 
   2832 	for (i = 0; i < 100; i++) {
   2833 		mdic = CSR_READ(sc, WMREG_MDIC);
   2834 		if (mdic & MDIC_READY)
   2835 			break;
   2836 		delay(10);
   2837 	}
   2838 
   2839 	if ((mdic & MDIC_READY) == 0)
   2840 		printf("%s: MDIC write timed out: phy %d reg %d\n",
   2841 		    sc->sc_dev.dv_xname, phy, reg);
   2842 	else if (mdic & MDIC_E)
   2843 		printf("%s: MDIC write error: phy %d reg %d\n",
   2844 		    sc->sc_dev.dv_xname, phy, reg);
   2845 }
   2846 
   2847 /*
   2848  * wm_gmii_statchg:	[mii interface function]
   2849  *
   2850  *	Callback from MII layer when media changes.
   2851  */
   2852 void
   2853 wm_gmii_statchg(struct device *self)
   2854 {
   2855 	struct wm_softc *sc = (void *) self;
   2856 
   2857 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2858 
   2859 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   2860 		DPRINTF(WM_DEBUG_LINK,
   2861 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
   2862 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2863 	} else  {
   2864 		DPRINTF(WM_DEBUG_LINK,
   2865 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
   2866 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2867 	}
   2868 
   2869 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2870 }
   2871