Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.56
      1 /*	$NetBSD: if_wm.c,v 1.56 2003/10/21 16:52:08 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     40  *
     41  * TODO (in order of importance):
     42  *
     43  *	- Fix hw VLAN assist.
     44  *	- Figure out what to do with the i82545GM and i82546GB
     45  *	  SERDES controllers.
     46  */
     47 
     48 #include <sys/cdefs.h>
     49 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.56 2003/10/21 16:52:08 thorpej Exp $");
     50 
     51 #include "bpfilter.h"
     52 #include "rnd.h"
     53 
     54 #include <sys/param.h>
     55 #include <sys/systm.h>
     56 #include <sys/callout.h>
     57 #include <sys/mbuf.h>
     58 #include <sys/malloc.h>
     59 #include <sys/kernel.h>
     60 #include <sys/socket.h>
     61 #include <sys/ioctl.h>
     62 #include <sys/errno.h>
     63 #include <sys/device.h>
     64 #include <sys/queue.h>
     65 
     66 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
     67 
     68 #if NRND > 0
     69 #include <sys/rnd.h>
     70 #endif
     71 
     72 #include <net/if.h>
     73 #include <net/if_dl.h>
     74 #include <net/if_media.h>
     75 #include <net/if_ether.h>
     76 
     77 #if NBPFILTER > 0
     78 #include <net/bpf.h>
     79 #endif
     80 
     81 #include <netinet/in.h>			/* XXX for struct ip */
     82 #include <netinet/in_systm.h>		/* XXX for struct ip */
     83 #include <netinet/ip.h>			/* XXX for struct ip */
     84 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
     85 
     86 #include <machine/bus.h>
     87 #include <machine/intr.h>
     88 #include <machine/endian.h>
     89 
     90 #include <dev/mii/mii.h>
     91 #include <dev/mii/miivar.h>
     92 #include <dev/mii/mii_bitbang.h>
     93 
     94 #include <dev/pci/pcireg.h>
     95 #include <dev/pci/pcivar.h>
     96 #include <dev/pci/pcidevs.h>
     97 
     98 #include <dev/pci/if_wmreg.h>
     99 
    100 #ifdef WM_DEBUG
    101 #define	WM_DEBUG_LINK		0x01
    102 #define	WM_DEBUG_TX		0x02
    103 #define	WM_DEBUG_RX		0x04
    104 #define	WM_DEBUG_GMII		0x08
    105 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
    106 
    107 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    108 #else
    109 #define	DPRINTF(x, y)	/* nothing */
    110 #endif /* WM_DEBUG */
    111 
    112 /*
    113  * Transmit descriptor list size.  Due to errata, we can only have
    114  * 256 hardware descriptors in the ring.  We tell the upper layers
    115  * that they can queue a lot of packets, and we go ahead and manage
    116  * up to 64 of them at a time.  We allow up to 16 DMA segments per
    117  * packet.
    118  */
    119 #define	WM_NTXSEGS		16
    120 #define	WM_IFQUEUELEN		256
    121 #define	WM_TXQUEUELEN		64
    122 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
    123 #define	WM_TXQUEUE_GC		(WM_TXQUEUELEN / 8)
    124 #define	WM_NTXDESC		256
    125 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
    126 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
    127 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
    128 
    129 /*
    130  * Receive descriptor list size.  We have one Rx buffer for normal
    131  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    132  * packet.  We allocate 256 receive descriptors, each with a 2k
    133  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    134  */
    135 #define	WM_NRXDESC		256
    136 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    137 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    138 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    139 
    140 /*
    141  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    142  * a single clump that maps to a single DMA segment to make serveral things
    143  * easier.
    144  */
    145 struct wm_control_data {
    146 	/*
    147 	 * The transmit descriptors.
    148 	 */
    149 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
    150 
    151 	/*
    152 	 * The receive descriptors.
    153 	 */
    154 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    155 };
    156 
    157 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
    158 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
    159 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    160 
    161 /*
    162  * Software state for transmit jobs.
    163  */
    164 struct wm_txsoft {
    165 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    166 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    167 	int txs_firstdesc;		/* first descriptor in packet */
    168 	int txs_lastdesc;		/* last descriptor in packet */
    169 	int txs_ndesc;			/* # of descriptors used */
    170 };
    171 
    172 /*
    173  * Software state for receive buffers.  Each descriptor gets a
    174  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    175  * more than one buffer, we chain them together.
    176  */
    177 struct wm_rxsoft {
    178 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    179 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    180 };
    181 
    182 typedef enum {
    183 	WM_T_unknown		= 0,
    184 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
    185 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
    186 	WM_T_82543,			/* i82543 */
    187 	WM_T_82544,			/* i82544 */
    188 	WM_T_82540,			/* i82540 */
    189 	WM_T_82545,			/* i82545 */
    190 	WM_T_82545_3,			/* i82545 3.0+ */
    191 	WM_T_82546,			/* i82546 */
    192 	WM_T_82546_3,			/* i82546 3.0+ */
    193 	WM_T_82541,			/* i82541 */
    194 	WM_T_82541_2,			/* i82541 2.0+ */
    195 	WM_T_82547,			/* i82547 */
    196 	WM_T_82547_2,			/* i82547 2.0+ */
    197 } wm_chip_type;
    198 
    199 /*
    200  * Software state per device.
    201  */
    202 struct wm_softc {
    203 	struct device sc_dev;		/* generic device information */
    204 	bus_space_tag_t sc_st;		/* bus space tag */
    205 	bus_space_handle_t sc_sh;	/* bus space handle */
    206 	bus_space_tag_t sc_iot;		/* I/O space tag */
    207 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    208 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    209 	struct ethercom sc_ethercom;	/* ethernet common data */
    210 	void *sc_sdhook;		/* shutdown hook */
    211 
    212 	wm_chip_type sc_type;		/* chip type */
    213 	int sc_flags;			/* flags; see below */
    214 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    215 	int sc_pcix_offset;		/* PCIX capability register offset */
    216 
    217 	void *sc_ih;			/* interrupt cookie */
    218 
    219 	int sc_ee_addrbits;		/* EEPROM address bits */
    220 
    221 	struct mii_data sc_mii;		/* MII/media information */
    222 
    223 	struct callout sc_tick_ch;	/* tick callout */
    224 
    225 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    226 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    227 
    228 	int		sc_align_tweak;
    229 
    230 	/*
    231 	 * Software state for the transmit and receive descriptors.
    232 	 */
    233 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
    234 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    235 
    236 	/*
    237 	 * Control data structures.
    238 	 */
    239 	struct wm_control_data *sc_control_data;
    240 #define	sc_txdescs	sc_control_data->wcd_txdescs
    241 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    242 
    243 #ifdef WM_EVENT_COUNTERS
    244 	/* Event counters. */
    245 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    246 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    247 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
    248 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    249 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    250 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    251 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    252 
    253 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    254 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    255 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    256 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    257 
    258 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
    259 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
    260 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
    261 
    262 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    263 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    264 
    265 	struct evcnt sc_ev_tu;		/* Tx underrun */
    266 #endif /* WM_EVENT_COUNTERS */
    267 
    268 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    269 
    270 	int	sc_txfree;		/* number of free Tx descriptors */
    271 	int	sc_txnext;		/* next ready Tx descriptor */
    272 
    273 	int	sc_txsfree;		/* number of free Tx jobs */
    274 	int	sc_txsnext;		/* next free Tx job */
    275 	int	sc_txsdirty;		/* dirty Tx jobs */
    276 
    277 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
    278 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
    279 
    280 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    281 
    282 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    283 	int	sc_rxdiscard;
    284 	int	sc_rxlen;
    285 	struct mbuf *sc_rxhead;
    286 	struct mbuf *sc_rxtail;
    287 	struct mbuf **sc_rxtailp;
    288 
    289 	uint32_t sc_ctrl;		/* prototype CTRL register */
    290 #if 0
    291 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    292 #endif
    293 	uint32_t sc_icr;		/* prototype interrupt bits */
    294 	uint32_t sc_tctl;		/* prototype TCTL register */
    295 	uint32_t sc_rctl;		/* prototype RCTL register */
    296 	uint32_t sc_txcw;		/* prototype TXCW register */
    297 	uint32_t sc_tipg;		/* prototype TIPG register */
    298 
    299 	int sc_tbi_linkup;		/* TBI link status */
    300 	int sc_tbi_anstate;		/* autonegotiation state */
    301 
    302 	int sc_mchash_type;		/* multicast filter offset */
    303 
    304 #if NRND > 0
    305 	rndsource_element_t rnd_source;	/* random source */
    306 #endif
    307 };
    308 
    309 #define	WM_RXCHAIN_RESET(sc)						\
    310 do {									\
    311 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    312 	*(sc)->sc_rxtailp = NULL;					\
    313 	(sc)->sc_rxlen = 0;						\
    314 } while (/*CONSTCOND*/0)
    315 
    316 #define	WM_RXCHAIN_LINK(sc, m)						\
    317 do {									\
    318 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    319 	(sc)->sc_rxtailp = &(m)->m_next;				\
    320 } while (/*CONSTCOND*/0)
    321 
    322 /* sc_flags */
    323 #define	WM_F_HAS_MII		0x01	/* has MII */
    324 #define	WM_F_EEPROM_HANDSHAKE	0x02	/* requires EEPROM handshake */
    325 #define	WM_F_IOH_VALID		0x10	/* I/O handle is valid */
    326 #define	WM_F_BUS64		0x20	/* bus is 64-bit */
    327 #define	WM_F_PCIX		0x40	/* bus is PCI-X */
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    331 #else
    332 #define	WM_EVCNT_INCR(ev)	/* nothing */
    333 #endif
    334 
    335 #define	CSR_READ(sc, reg)						\
    336 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    337 #define	CSR_WRITE(sc, reg, val)						\
    338 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    339 
    340 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    341 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    342 
    343 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    344 do {									\
    345 	int __x, __n;							\
    346 									\
    347 	__x = (x);							\
    348 	__n = (n);							\
    349 									\
    350 	/* If it will wrap around, sync to the end of the ring. */	\
    351 	if ((__x + __n) > WM_NTXDESC) {					\
    352 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    353 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    354 		    (WM_NTXDESC - __x), (ops));				\
    355 		__n -= (WM_NTXDESC - __x);				\
    356 		__x = 0;						\
    357 	}								\
    358 									\
    359 	/* Now sync whatever is left. */				\
    360 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    361 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    362 } while (/*CONSTCOND*/0)
    363 
    364 #define	WM_CDRXSYNC(sc, x, ops)						\
    365 do {									\
    366 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    367 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    368 } while (/*CONSTCOND*/0)
    369 
    370 #define	WM_INIT_RXDESC(sc, x)						\
    371 do {									\
    372 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    373 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    374 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    375 									\
    376 	/*								\
    377 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    378 	 * so that the payload after the Ethernet header is aligned	\
    379 	 * to a 4-byte boundary.					\
    380 	 *								\
    381 	 * XXX BRAINDAMAGE ALERT!					\
    382 	 * The stupid chip uses the same size for every buffer, which	\
    383 	 * is set in the Receive Control register.  We are using the 2K	\
    384 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    385 	 * reason, we can't "scoot" packets longer than the standard	\
    386 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    387 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    388 	 * the upper layer copy the headers.				\
    389 	 */								\
    390 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    391 									\
    392 	__rxd->wrx_addr.wa_low =					\
    393 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 		\
    394 		(sc)->sc_align_tweak);					\
    395 	__rxd->wrx_addr.wa_high = 0;					\
    396 	__rxd->wrx_len = 0;						\
    397 	__rxd->wrx_cksum = 0;						\
    398 	__rxd->wrx_status = 0;						\
    399 	__rxd->wrx_errors = 0;						\
    400 	__rxd->wrx_special = 0;						\
    401 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    402 									\
    403 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    404 } while (/*CONSTCOND*/0)
    405 
    406 static void	wm_start(struct ifnet *);
    407 static void	wm_watchdog(struct ifnet *);
    408 static int	wm_ioctl(struct ifnet *, u_long, caddr_t);
    409 static int	wm_init(struct ifnet *);
    410 static void	wm_stop(struct ifnet *, int);
    411 
    412 static void	wm_shutdown(void *);
    413 
    414 static void	wm_reset(struct wm_softc *);
    415 static void	wm_rxdrain(struct wm_softc *);
    416 static int	wm_add_rxbuf(struct wm_softc *, int);
    417 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    418 static void	wm_tick(void *);
    419 
    420 static void	wm_set_filter(struct wm_softc *);
    421 
    422 static int	wm_intr(void *);
    423 static void	wm_txintr(struct wm_softc *);
    424 static void	wm_rxintr(struct wm_softc *);
    425 static void	wm_linkintr(struct wm_softc *, uint32_t);
    426 
    427 static void	wm_tbi_mediainit(struct wm_softc *);
    428 static int	wm_tbi_mediachange(struct ifnet *);
    429 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    430 
    431 static void	wm_tbi_set_linkled(struct wm_softc *);
    432 static void	wm_tbi_check_link(struct wm_softc *);
    433 
    434 static void	wm_gmii_reset(struct wm_softc *);
    435 
    436 static int	wm_gmii_i82543_readreg(struct device *, int, int);
    437 static void	wm_gmii_i82543_writereg(struct device *, int, int, int);
    438 
    439 static int	wm_gmii_i82544_readreg(struct device *, int, int);
    440 static void	wm_gmii_i82544_writereg(struct device *, int, int, int);
    441 
    442 static void	wm_gmii_statchg(struct device *);
    443 
    444 static void	wm_gmii_mediainit(struct wm_softc *);
    445 static int	wm_gmii_mediachange(struct ifnet *);
    446 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    447 
    448 static int	wm_match(struct device *, struct cfdata *, void *);
    449 static void	wm_attach(struct device *, struct device *, void *);
    450 
    451 CFATTACH_DECL(wm, sizeof(struct wm_softc),
    452     wm_match, wm_attach, NULL, NULL);
    453 
    454 /*
    455  * Devices supported by this driver.
    456  */
    457 const struct wm_product {
    458 	pci_vendor_id_t		wmp_vendor;
    459 	pci_product_id_t	wmp_product;
    460 	const char		*wmp_name;
    461 	wm_chip_type		wmp_type;
    462 	int			wmp_flags;
    463 #define	WMP_F_1000X		0x01
    464 #define	WMP_F_1000T		0x02
    465 } wm_products[] = {
    466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    467 	  "Intel i82542 1000BASE-X Ethernet",
    468 	  WM_T_82542_2_1,	WMP_F_1000X },
    469 
    470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    471 	  "Intel i82543GC 1000BASE-X Ethernet",
    472 	  WM_T_82543,		WMP_F_1000X },
    473 
    474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    475 	  "Intel i82543GC 1000BASE-T Ethernet",
    476 	  WM_T_82543,		WMP_F_1000T },
    477 
    478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    479 	  "Intel i82544EI 1000BASE-T Ethernet",
    480 	  WM_T_82544,		WMP_F_1000T },
    481 
    482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    483 	  "Intel i82544EI 1000BASE-X Ethernet",
    484 	  WM_T_82544,		WMP_F_1000X },
    485 
    486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    487 	  "Intel i82544GC 1000BASE-T Ethernet",
    488 	  WM_T_82544,		WMP_F_1000T },
    489 
    490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    491 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    492 	  WM_T_82544,		WMP_F_1000T },
    493 
    494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    495 	  "Intel i82540EM 1000BASE-T Ethernet",
    496 	  WM_T_82540,		WMP_F_1000T },
    497 
    498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    499 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    500 	  WM_T_82540,		WMP_F_1000T },
    501 
    502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    503 	  "Intel i82540EP 1000BASE-T Ethernet",
    504 	  WM_T_82540,		WMP_F_1000T },
    505 
    506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    507 	  "Intel i82540EP 1000BASE-T Ethernet",
    508 	  WM_T_82540,		WMP_F_1000T },
    509 
    510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    511 	  "Intel i82540EP 1000BASE-T Ethernet",
    512 	  WM_T_82540,		WMP_F_1000T },
    513 
    514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    515 	  "Intel i82545EM 1000BASE-T Ethernet",
    516 	  WM_T_82545,		WMP_F_1000T },
    517 
    518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    519 	  "Intel i82545GM 1000BASE-T Ethernet",
    520 	  WM_T_82545_3,		WMP_F_1000T },
    521 
    522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    523 	  "Intel i82545GM 1000BASE-X Ethernet",
    524 	  WM_T_82545_3,		WMP_F_1000X },
    525 #if 0
    526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    527 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    528 	  WM_T_82545_3,		WMP_F_SERDES },
    529 #endif
    530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    531 	  "Intel i82546EB 1000BASE-T Ethernet",
    532 	  WM_T_82546,		WMP_F_1000T },
    533 
    534 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
    535 	  "Intel i82546EB 1000BASE-T Ethernet",
    536 	  WM_T_82546,		WMP_F_1000T },
    537 
    538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    539 	  "Intel i82545EM 1000BASE-X Ethernet",
    540 	  WM_T_82545,		WMP_F_1000X },
    541 
    542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    543 	  "Intel i82546EB 1000BASE-X Ethernet",
    544 	  WM_T_82546,		WMP_F_1000X },
    545 
    546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    547 	  "Intel i82546GB 1000BASE-T Ethernet",
    548 	  WM_T_82546_3,		WMP_F_1000T },
    549 
    550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    551 	  "Intel i82546GB 1000BASE-X Ethernet",
    552 	  WM_T_82546_3,		WMP_F_1000X },
    553 #if 0
    554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    555 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    556 	  WM_T_82546_3,		WMP_F_SERDES },
    557 #endif
    558 	{ 0,			0,
    559 	  NULL,
    560 	  0,			0 },
    561 };
    562 
    563 #ifdef WM_EVENT_COUNTERS
    564 #if WM_NTXSEGS != 16
    565 #error Update wm_txseg_evcnt_names
    566 #endif
    567 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
    568 	"txseg1",
    569 	"txseg2",
    570 	"txseg3",
    571 	"txseg4",
    572 	"txseg5",
    573 	"txseg6",
    574 	"txseg7",
    575 	"txseg8",
    576 	"txseg9",
    577 	"txseg10",
    578 	"txseg11",
    579 	"txseg12",
    580 	"txseg13",
    581 	"txseg14",
    582 	"txseg15",
    583 	"txseg16",
    584 };
    585 #endif /* WM_EVENT_COUNTERS */
    586 
    587 #if 0 /* Not currently used */
    588 static __inline uint32_t
    589 wm_io_read(struct wm_softc *sc, int reg)
    590 {
    591 
    592 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    593 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
    594 }
    595 #endif
    596 
    597 static __inline void
    598 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
    599 {
    600 
    601 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    602 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
    603 }
    604 
    605 static const struct wm_product *
    606 wm_lookup(const struct pci_attach_args *pa)
    607 {
    608 	const struct wm_product *wmp;
    609 
    610 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
    611 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
    612 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
    613 			return (wmp);
    614 	}
    615 	return (NULL);
    616 }
    617 
    618 static int
    619 wm_match(struct device *parent, struct cfdata *cf, void *aux)
    620 {
    621 	struct pci_attach_args *pa = aux;
    622 
    623 	if (wm_lookup(pa) != NULL)
    624 		return (1);
    625 
    626 	return (0);
    627 }
    628 
    629 static void
    630 wm_attach(struct device *parent, struct device *self, void *aux)
    631 {
    632 	struct wm_softc *sc = (void *) self;
    633 	struct pci_attach_args *pa = aux;
    634 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    635 	pci_chipset_tag_t pc = pa->pa_pc;
    636 	pci_intr_handle_t ih;
    637 	const char *intrstr = NULL;
    638 	const char *eetype;
    639 	bus_space_tag_t memt;
    640 	bus_space_handle_t memh;
    641 	bus_dma_segment_t seg;
    642 	int memh_valid;
    643 	int i, rseg, error;
    644 	const struct wm_product *wmp;
    645 	uint8_t enaddr[ETHER_ADDR_LEN];
    646 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
    647 	pcireg_t preg, memtype;
    648 	uint32_t reg;
    649 	int pmreg;
    650 
    651 	callout_init(&sc->sc_tick_ch);
    652 
    653 	wmp = wm_lookup(pa);
    654 	if (wmp == NULL) {
    655 		printf("\n");
    656 		panic("wm_attach: impossible");
    657 	}
    658 
    659 	sc->sc_dmat = pa->pa_dmat;
    660 
    661 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
    662 	aprint_naive(": Ethernet controller\n");
    663 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
    664 
    665 	sc->sc_type = wmp->wmp_type;
    666 	if (sc->sc_type < WM_T_82543) {
    667 		if (preg < 2) {
    668 			aprint_error("%s: i82542 must be at least rev. 2\n",
    669 			    sc->sc_dev.dv_xname);
    670 			return;
    671 		}
    672 		if (preg < 3)
    673 			sc->sc_type = WM_T_82542_2_0;
    674 	}
    675 
    676 	/*
    677 	 * Map the device.  All devices support memory-mapped acccess,
    678 	 * and it is really required for normal operation.
    679 	 */
    680 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
    681 	switch (memtype) {
    682 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
    683 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
    684 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
    685 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
    686 		break;
    687 	default:
    688 		memh_valid = 0;
    689 	}
    690 
    691 	if (memh_valid) {
    692 		sc->sc_st = memt;
    693 		sc->sc_sh = memh;
    694 	} else {
    695 		aprint_error("%s: unable to map device registers\n",
    696 		    sc->sc_dev.dv_xname);
    697 		return;
    698 	}
    699 
    700 	/*
    701 	 * In addition, i82544 and later support I/O mapped indirect
    702 	 * register access.  It is not desirable (nor supported in
    703 	 * this driver) to use it for normal operation, though it is
    704 	 * required to work around bugs in some chip versions.
    705 	 */
    706 	if (sc->sc_type >= WM_T_82544) {
    707 		/* First we have to find the I/O BAR. */
    708 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
    709 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
    710 			    PCI_MAPREG_TYPE_IO)
    711 				break;
    712 		}
    713 		if (i == PCI_MAPREG_END)
    714 			aprint_error("%s: WARNING: unable to find I/O BAR\n",
    715 			    sc->sc_dev.dv_xname);
    716 		else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
    717 					0, &sc->sc_iot, &sc->sc_ioh,
    718 					NULL, NULL) == 0)
    719 			sc->sc_flags |= WM_F_IOH_VALID;
    720 		else
    721 			aprint_error("%s: WARNING: unable to map I/O space\n",
    722 			    sc->sc_dev.dv_xname);
    723 	}
    724 
    725 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
    726 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    727 	preg |= PCI_COMMAND_MASTER_ENABLE;
    728 	if (sc->sc_type < WM_T_82542_2_1)
    729 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
    730 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
    731 
    732 	/* Get it out of power save mode, if needed. */
    733 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
    734 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
    735 		    PCI_PMCSR_STATE_MASK;
    736 		if (preg == PCI_PMCSR_STATE_D3) {
    737 			/*
    738 			 * The card has lost all configuration data in
    739 			 * this state, so punt.
    740 			 */
    741 			aprint_error("%s: unable to wake from power state D3\n",
    742 			    sc->sc_dev.dv_xname);
    743 			return;
    744 		}
    745 		if (preg != PCI_PMCSR_STATE_D0) {
    746 			aprint_normal("%s: waking up from power state D%d\n",
    747 			    sc->sc_dev.dv_xname, preg);
    748 			pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
    749 			    PCI_PMCSR_STATE_D0);
    750 		}
    751 	}
    752 
    753 	/*
    754 	 * Map and establish our interrupt.
    755 	 */
    756 	if (pci_intr_map(pa, &ih)) {
    757 		aprint_error("%s: unable to map interrupt\n",
    758 		    sc->sc_dev.dv_xname);
    759 		return;
    760 	}
    761 	intrstr = pci_intr_string(pc, ih);
    762 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
    763 	if (sc->sc_ih == NULL) {
    764 		aprint_error("%s: unable to establish interrupt",
    765 		    sc->sc_dev.dv_xname);
    766 		if (intrstr != NULL)
    767 			aprint_normal(" at %s", intrstr);
    768 		aprint_normal("\n");
    769 		return;
    770 	}
    771 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
    772 
    773 	/*
    774 	 * Determine a few things about the bus we're connected to.
    775 	 */
    776 	if (sc->sc_type < WM_T_82543) {
    777 		/* We don't really know the bus characteristics here. */
    778 		sc->sc_bus_speed = 33;
    779 	} else  {
    780 		reg = CSR_READ(sc, WMREG_STATUS);
    781 		if (reg & STATUS_BUS64)
    782 			sc->sc_flags |= WM_F_BUS64;
    783 		if (sc->sc_type >= WM_T_82544 &&
    784 		    (reg & STATUS_PCIX_MODE) != 0) {
    785 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
    786 
    787 			sc->sc_flags |= WM_F_PCIX;
    788 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
    789 					       PCI_CAP_PCIX,
    790 					       &sc->sc_pcix_offset, NULL) == 0)
    791 				aprint_error("%s: unable to find PCIX "
    792 				    "capability\n", sc->sc_dev.dv_xname);
    793 			else if (sc->sc_type != WM_T_82545_3 &&
    794 				 sc->sc_type != WM_T_82546_3) {
    795 				/*
    796 				 * Work around a problem caused by the BIOS
    797 				 * setting the max memory read byte count
    798 				 * incorrectly.
    799 				 */
    800 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
    801 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
    802 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
    803 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
    804 
    805 				bytecnt =
    806 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
    807 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
    808 				maxb =
    809 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
    810 				    PCI_PCIX_STATUS_MAXB_SHIFT;
    811 				if (bytecnt > maxb) {
    812 					aprint_verbose("%s: resetting PCI-X "
    813 					    "MMRBC: %d -> %d\n",
    814 					    sc->sc_dev.dv_xname,
    815 					    512 << bytecnt, 512 << maxb);
    816 					pcix_cmd = (pcix_cmd &
    817 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
    818 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
    819 					pci_conf_write(pa->pa_pc, pa->pa_tag,
    820 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
    821 					    pcix_cmd);
    822 				}
    823 			}
    824 		}
    825 		/*
    826 		 * The quad port adapter is special; it has a PCIX-PCIX
    827 		 * bridge on the board, and can run the secondary bus at
    828 		 * a higher speed.
    829 		 */
    830 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
    831 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
    832 								      : 66;
    833 		} else if (sc->sc_flags & WM_F_PCIX) {
    834 			switch (STATUS_PCIXSPD(reg)) {
    835 			case STATUS_PCIXSPD_50_66:
    836 				sc->sc_bus_speed = 66;
    837 				break;
    838 			case STATUS_PCIXSPD_66_100:
    839 				sc->sc_bus_speed = 100;
    840 				break;
    841 			case STATUS_PCIXSPD_100_133:
    842 				sc->sc_bus_speed = 133;
    843 				break;
    844 			default:
    845 				aprint_error(
    846 				    "%s: unknown PCIXSPD %d; assuming 66MHz\n",
    847 				    sc->sc_dev.dv_xname, STATUS_PCIXSPD(reg));
    848 				sc->sc_bus_speed = 66;
    849 			}
    850 		} else
    851 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
    852 		aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
    853 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
    854 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
    855 	}
    856 
    857 	/*
    858 	 * Allocate the control data structures, and create and load the
    859 	 * DMA map for it.
    860 	 */
    861 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    862 	    sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
    863 	    0)) != 0) {
    864 		aprint_error(
    865 		    "%s: unable to allocate control data, error = %d\n",
    866 		    sc->sc_dev.dv_xname, error);
    867 		goto fail_0;
    868 	}
    869 
    870 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    871 	    sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
    872 	    0)) != 0) {
    873 		aprint_error("%s: unable to map control data, error = %d\n",
    874 		    sc->sc_dev.dv_xname, error);
    875 		goto fail_1;
    876 	}
    877 
    878 	if ((error = bus_dmamap_create(sc->sc_dmat,
    879 	    sizeof(struct wm_control_data), 1,
    880 	    sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
    881 		aprint_error("%s: unable to create control data DMA map, "
    882 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    883 		goto fail_2;
    884 	}
    885 
    886 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    887 	    sc->sc_control_data, sizeof(struct wm_control_data), NULL,
    888 	    0)) != 0) {
    889 		aprint_error(
    890 		    "%s: unable to load control data DMA map, error = %d\n",
    891 		    sc->sc_dev.dv_xname, error);
    892 		goto fail_3;
    893 	}
    894 
    895 	/*
    896 	 * Create the transmit buffer DMA maps.
    897 	 */
    898 	for (i = 0; i < WM_TXQUEUELEN; i++) {
    899 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
    900 		    WM_NTXSEGS, MCLBYTES, 0, 0,
    901 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    902 			aprint_error("%s: unable to create Tx DMA map %d, "
    903 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    904 			goto fail_4;
    905 		}
    906 	}
    907 
    908 	/*
    909 	 * Create the receive buffer DMA maps.
    910 	 */
    911 	for (i = 0; i < WM_NRXDESC; i++) {
    912 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    913 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    914 			aprint_error("%s: unable to create Rx DMA map %d, "
    915 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    916 			goto fail_5;
    917 		}
    918 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    919 	}
    920 
    921 	/*
    922 	 * Reset the chip to a known state.
    923 	 */
    924 	wm_reset(sc);
    925 
    926 	/*
    927 	 * Get some information about the EEPROM.
    928 	 */
    929 	eetype = "MicroWire";
    930 	if (sc->sc_type >= WM_T_82540)
    931 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
    932 	if (sc->sc_type <= WM_T_82544)
    933 		sc->sc_ee_addrbits = 6;
    934 	else if (sc->sc_type <= WM_T_82546_3) {
    935 		reg = CSR_READ(sc, WMREG_EECD);
    936 		if (reg & EECD_EE_SIZE)
    937 			sc->sc_ee_addrbits = 8;
    938 		else
    939 			sc->sc_ee_addrbits = 6;
    940 	}
    941 	aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
    942 	    sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
    943 	    sc->sc_ee_addrbits, eetype);
    944 
    945 	/*
    946 	 * Read the Ethernet address from the EEPROM.
    947 	 */
    948 	if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
    949 	    sizeof(myea) / sizeof(myea[0]), myea)) {
    950 		aprint_error("%s: unable to read Ethernet address\n",
    951 		    sc->sc_dev.dv_xname);
    952 		return;
    953 	}
    954 	enaddr[0] = myea[0] & 0xff;
    955 	enaddr[1] = myea[0] >> 8;
    956 	enaddr[2] = myea[1] & 0xff;
    957 	enaddr[3] = myea[1] >> 8;
    958 	enaddr[4] = myea[2] & 0xff;
    959 	enaddr[5] = myea[2] >> 8;
    960 
    961 	/*
    962 	 * Toggle the LSB of the MAC address on the second port
    963 	 * of the i82546.
    964 	 */
    965 	if (sc->sc_type == WM_T_82546) {
    966 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
    967 			enaddr[5] ^= 1;
    968 	}
    969 
    970 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
    971 	    ether_sprintf(enaddr));
    972 
    973 	/*
    974 	 * Read the config info from the EEPROM, and set up various
    975 	 * bits in the control registers based on their contents.
    976 	 */
    977 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
    978 		aprint_error("%s: unable to read CFG1 from EEPROM\n",
    979 		    sc->sc_dev.dv_xname);
    980 		return;
    981 	}
    982 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
    983 		aprint_error("%s: unable to read CFG2 from EEPROM\n",
    984 		    sc->sc_dev.dv_xname);
    985 		return;
    986 	}
    987 	if (sc->sc_type >= WM_T_82544) {
    988 		if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
    989 			aprint_error("%s: unable to read SWDPIN from EEPROM\n",
    990 			    sc->sc_dev.dv_xname);
    991 			return;
    992 		}
    993 	}
    994 
    995 	if (cfg1 & EEPROM_CFG1_ILOS)
    996 		sc->sc_ctrl |= CTRL_ILOS;
    997 	if (sc->sc_type >= WM_T_82544) {
    998 		sc->sc_ctrl |=
    999 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1000 		    CTRL_SWDPIO_SHIFT;
   1001 		sc->sc_ctrl |=
   1002 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1003 		    CTRL_SWDPINS_SHIFT;
   1004 	} else {
   1005 		sc->sc_ctrl |=
   1006 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1007 		    CTRL_SWDPIO_SHIFT;
   1008 	}
   1009 
   1010 #if 0
   1011 	if (sc->sc_type >= WM_T_82544) {
   1012 		if (cfg1 & EEPROM_CFG1_IPS0)
   1013 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1014 		if (cfg1 & EEPROM_CFG1_IPS1)
   1015 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1016 		sc->sc_ctrl_ext |=
   1017 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1018 		    CTRL_EXT_SWDPIO_SHIFT;
   1019 		sc->sc_ctrl_ext |=
   1020 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1021 		    CTRL_EXT_SWDPINS_SHIFT;
   1022 	} else {
   1023 		sc->sc_ctrl_ext |=
   1024 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1025 		    CTRL_EXT_SWDPIO_SHIFT;
   1026 	}
   1027 #endif
   1028 
   1029 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1030 #if 0
   1031 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1032 #endif
   1033 
   1034 	/*
   1035 	 * Set up some register offsets that are different between
   1036 	 * the i82542 and the i82543 and later chips.
   1037 	 */
   1038 	if (sc->sc_type < WM_T_82543) {
   1039 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1040 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1041 	} else {
   1042 		sc->sc_rdt_reg = WMREG_RDT;
   1043 		sc->sc_tdt_reg = WMREG_TDT;
   1044 	}
   1045 
   1046 	/*
   1047 	 * Determine if we should use flow control.  We should
   1048 	 * always use it, unless we're on a i82542 < 2.1.
   1049 	 */
   1050 	if (sc->sc_type >= WM_T_82542_2_1)
   1051 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
   1052 
   1053 	/*
   1054 	 * Determine if we're TBI or GMII mode, and initialize the
   1055 	 * media structures accordingly.
   1056 	 */
   1057 	if (sc->sc_type < WM_T_82543 ||
   1058 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   1059 		if (wmp->wmp_flags & WMP_F_1000T)
   1060 			aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
   1061 			    "product!\n", sc->sc_dev.dv_xname);
   1062 		wm_tbi_mediainit(sc);
   1063 	} else {
   1064 		if (wmp->wmp_flags & WMP_F_1000X)
   1065 			aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
   1066 			    "product!\n", sc->sc_dev.dv_xname);
   1067 		wm_gmii_mediainit(sc);
   1068 	}
   1069 
   1070 	ifp = &sc->sc_ethercom.ec_if;
   1071 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
   1072 	ifp->if_softc = sc;
   1073 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1074 	ifp->if_ioctl = wm_ioctl;
   1075 	ifp->if_start = wm_start;
   1076 	ifp->if_watchdog = wm_watchdog;
   1077 	ifp->if_init = wm_init;
   1078 	ifp->if_stop = wm_stop;
   1079 	IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
   1080 	IFQ_SET_READY(&ifp->if_snd);
   1081 
   1082 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1083 
   1084 	/*
   1085 	 * If we're a i82543 or greater, we can support VLANs.
   1086 	 */
   1087 	if (sc->sc_type >= WM_T_82543)
   1088 		sc->sc_ethercom.ec_capabilities |=
   1089 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
   1090 
   1091 	/*
   1092 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   1093 	 * on i82543 and later.
   1094 	 */
   1095 	if (sc->sc_type >= WM_T_82543)
   1096 		ifp->if_capabilities |=
   1097 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
   1098 
   1099 	/*
   1100 	 * Attach the interface.
   1101 	 */
   1102 	if_attach(ifp);
   1103 	ether_ifattach(ifp, enaddr);
   1104 #if NRND > 0
   1105 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
   1106 	    RND_TYPE_NET, 0);
   1107 #endif
   1108 
   1109 #ifdef WM_EVENT_COUNTERS
   1110 	/* Attach event counters. */
   1111 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   1112 	    NULL, sc->sc_dev.dv_xname, "txsstall");
   1113 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   1114 	    NULL, sc->sc_dev.dv_xname, "txdstall");
   1115 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
   1116 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
   1117 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   1118 	    NULL, sc->sc_dev.dv_xname, "txdw");
   1119 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   1120 	    NULL, sc->sc_dev.dv_xname, "txqe");
   1121 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   1122 	    NULL, sc->sc_dev.dv_xname, "rxintr");
   1123 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   1124 	    NULL, sc->sc_dev.dv_xname, "linkintr");
   1125 
   1126 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   1127 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
   1128 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   1129 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
   1130 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   1131 	    NULL, sc->sc_dev.dv_xname, "txipsum");
   1132 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   1133 	    NULL, sc->sc_dev.dv_xname, "txtusum");
   1134 
   1135 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
   1136 	    NULL, sc->sc_dev.dv_xname, "txctx init");
   1137 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
   1138 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
   1139 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
   1140 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
   1141 
   1142 	for (i = 0; i < WM_NTXSEGS; i++)
   1143 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   1144 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
   1145 
   1146 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   1147 	    NULL, sc->sc_dev.dv_xname, "txdrop");
   1148 
   1149 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   1150 	    NULL, sc->sc_dev.dv_xname, "tu");
   1151 #endif /* WM_EVENT_COUNTERS */
   1152 
   1153 	/*
   1154 	 * Make sure the interface is shutdown during reboot.
   1155 	 */
   1156 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
   1157 	if (sc->sc_sdhook == NULL)
   1158 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
   1159 		    sc->sc_dev.dv_xname);
   1160 	return;
   1161 
   1162 	/*
   1163 	 * Free any resources we've allocated during the failed attach
   1164 	 * attempt.  Do this in reverse order and fall through.
   1165 	 */
   1166  fail_5:
   1167 	for (i = 0; i < WM_NRXDESC; i++) {
   1168 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   1169 			bus_dmamap_destroy(sc->sc_dmat,
   1170 			    sc->sc_rxsoft[i].rxs_dmamap);
   1171 	}
   1172  fail_4:
   1173 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   1174 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   1175 			bus_dmamap_destroy(sc->sc_dmat,
   1176 			    sc->sc_txsoft[i].txs_dmamap);
   1177 	}
   1178 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   1179  fail_3:
   1180 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   1181  fail_2:
   1182 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
   1183 	    sizeof(struct wm_control_data));
   1184  fail_1:
   1185 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1186  fail_0:
   1187 	return;
   1188 }
   1189 
   1190 /*
   1191  * wm_shutdown:
   1192  *
   1193  *	Make sure the interface is stopped at reboot time.
   1194  */
   1195 static void
   1196 wm_shutdown(void *arg)
   1197 {
   1198 	struct wm_softc *sc = arg;
   1199 
   1200 	wm_stop(&sc->sc_ethercom.ec_if, 1);
   1201 }
   1202 
   1203 /*
   1204  * wm_tx_cksum:
   1205  *
   1206  *	Set up TCP/IP checksumming parameters for the
   1207  *	specified packet.
   1208  */
   1209 static int
   1210 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   1211     uint32_t *fieldsp)
   1212 {
   1213 	struct mbuf *m0 = txs->txs_mbuf;
   1214 	struct livengood_tcpip_ctxdesc *t;
   1215 	uint32_t fields = 0, ipcs, tucs;
   1216 	struct ip *ip;
   1217 	struct ether_header *eh;
   1218 	int offset, iphl;
   1219 
   1220 	/*
   1221 	 * XXX It would be nice if the mbuf pkthdr had offset
   1222 	 * fields for the protocol headers.
   1223 	 */
   1224 
   1225 	eh = mtod(m0, struct ether_header *);
   1226 	switch (htons(eh->ether_type)) {
   1227 	case ETHERTYPE_IP:
   1228 		iphl = sizeof(struct ip);
   1229 		offset = ETHER_HDR_LEN;
   1230 		break;
   1231 
   1232 	case ETHERTYPE_VLAN:
   1233 		iphl = sizeof(struct ip);
   1234 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1235 		break;
   1236 
   1237 	default:
   1238 		/*
   1239 		 * Don't support this protocol or encapsulation.
   1240 		 */
   1241 		*fieldsp = 0;
   1242 		*cmdp = 0;
   1243 		return (0);
   1244 	}
   1245 
   1246 	if (m0->m_len < (offset + iphl)) {
   1247 		if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
   1248 			printf("%s: wm_tx_cksum: mbuf allocation failed, "
   1249 			    "packet dropped\n", sc->sc_dev.dv_xname);
   1250 			return (ENOMEM);
   1251 		}
   1252 		m0 = txs->txs_mbuf;
   1253 	}
   1254 
   1255 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
   1256 	iphl = ip->ip_hl << 2;
   1257 
   1258 	/*
   1259 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   1260 	 * offload feature, if we load the context descriptor, we
   1261 	 * MUST provide valid values for IPCSS and TUCSS fields.
   1262 	 */
   1263 
   1264 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   1265 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   1266 		fields |= htole32(WTX_IXSM);
   1267 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
   1268 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1269 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
   1270 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
   1271 		/* Use the cached value. */
   1272 		ipcs = sc->sc_txctx_ipcs;
   1273 	} else {
   1274 		/* Just initialize it to the likely value anyway. */
   1275 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
   1276 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1277 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
   1278 	}
   1279 
   1280 	offset += iphl;
   1281 
   1282 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1283 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   1284 		fields |= htole32(WTX_TXSM);
   1285 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
   1286 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
   1287 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
   1288 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
   1289 		/* Use the cached value. */
   1290 		tucs = sc->sc_txctx_tucs;
   1291 	} else {
   1292 		/* Just initialize it to a valid TCP context. */
   1293 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
   1294 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   1295 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
   1296 	}
   1297 
   1298 	if (sc->sc_txctx_ipcs == ipcs &&
   1299 	    sc->sc_txctx_tucs == tucs) {
   1300 		/* Cached context is fine. */
   1301 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
   1302 	} else {
   1303 		/* Fill in the context descriptor. */
   1304 #ifdef WM_EVENT_COUNTERS
   1305 		if (sc->sc_txctx_ipcs == 0xffffffff &&
   1306 		    sc->sc_txctx_tucs == 0xffffffff)
   1307 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
   1308 		else
   1309 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
   1310 #endif
   1311 		t = (struct livengood_tcpip_ctxdesc *)
   1312 		    &sc->sc_txdescs[sc->sc_txnext];
   1313 		t->tcpip_ipcs = ipcs;
   1314 		t->tcpip_tucs = tucs;
   1315 		t->tcpip_cmdlen =
   1316 		    htole32(WTX_CMD_DEXT | WTX_DTYP_C);
   1317 		t->tcpip_seg = 0;
   1318 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   1319 
   1320 		sc->sc_txctx_ipcs = ipcs;
   1321 		sc->sc_txctx_tucs = tucs;
   1322 
   1323 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
   1324 		txs->txs_ndesc++;
   1325 	}
   1326 
   1327 	*cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
   1328 	*fieldsp = fields;
   1329 
   1330 	return (0);
   1331 }
   1332 
   1333 /*
   1334  * wm_start:		[ifnet interface function]
   1335  *
   1336  *	Start packet transmission on the interface.
   1337  */
   1338 static void
   1339 wm_start(struct ifnet *ifp)
   1340 {
   1341 	struct wm_softc *sc = ifp->if_softc;
   1342 	struct mbuf *m0;
   1343 #if 0 /* XXXJRT */
   1344 	struct m_tag *mtag;
   1345 #endif
   1346 	struct wm_txsoft *txs;
   1347 	bus_dmamap_t dmamap;
   1348 	int error, nexttx, lasttx, ofree, seg;
   1349 	uint32_t cksumcmd, cksumfields;
   1350 
   1351 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   1352 		return;
   1353 
   1354 	/*
   1355 	 * Remember the previous number of free descriptors.
   1356 	 */
   1357 	ofree = sc->sc_txfree;
   1358 
   1359 	/*
   1360 	 * Loop through the send queue, setting up transmit descriptors
   1361 	 * until we drain the queue, or use up all available transmit
   1362 	 * descriptors.
   1363 	 */
   1364 	for (;;) {
   1365 		/* Grab a packet off the queue. */
   1366 		IFQ_POLL(&ifp->if_snd, m0);
   1367 		if (m0 == NULL)
   1368 			break;
   1369 
   1370 		DPRINTF(WM_DEBUG_TX,
   1371 		    ("%s: TX: have packet to transmit: %p\n",
   1372 		    sc->sc_dev.dv_xname, m0));
   1373 
   1374 		/* Get a work queue entry. */
   1375 		if (sc->sc_txsfree < WM_TXQUEUE_GC) {
   1376 			wm_txintr(sc);
   1377 			if (sc->sc_txsfree == 0) {
   1378 				DPRINTF(WM_DEBUG_TX,
   1379 				    ("%s: TX: no free job descriptors\n",
   1380 					sc->sc_dev.dv_xname));
   1381 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   1382 				break;
   1383 			}
   1384 		}
   1385 
   1386 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   1387 		dmamap = txs->txs_dmamap;
   1388 
   1389 		/*
   1390 		 * Load the DMA map.  If this fails, the packet either
   1391 		 * didn't fit in the allotted number of segments, or we
   1392 		 * were short on resources.  For the too-many-segments
   1393 		 * case, we simply report an error and drop the packet,
   1394 		 * since we can't sanely copy a jumbo packet to a single
   1395 		 * buffer.
   1396 		 */
   1397 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   1398 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1399 		if (error) {
   1400 			if (error == EFBIG) {
   1401 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   1402 				printf("%s: Tx packet consumes too many "
   1403 				    "DMA segments, dropping...\n",
   1404 				    sc->sc_dev.dv_xname);
   1405 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   1406 				m_freem(m0);
   1407 				continue;
   1408 			}
   1409 			/*
   1410 			 * Short on resources, just stop for now.
   1411 			 */
   1412 			DPRINTF(WM_DEBUG_TX,
   1413 			    ("%s: TX: dmamap load failed: %d\n",
   1414 			    sc->sc_dev.dv_xname, error));
   1415 			break;
   1416 		}
   1417 
   1418 		/*
   1419 		 * Ensure we have enough descriptors free to describe
   1420 		 * the packet.  Note, we always reserve one descriptor
   1421 		 * at the end of the ring due to the semantics of the
   1422 		 * TDT register, plus one more in the event we need
   1423 		 * to re-load checksum offload context.
   1424 		 */
   1425 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
   1426 			/*
   1427 			 * Not enough free descriptors to transmit this
   1428 			 * packet.  We haven't committed anything yet,
   1429 			 * so just unload the DMA map, put the packet
   1430 			 * pack on the queue, and punt.  Notify the upper
   1431 			 * layer that there are no more slots left.
   1432 			 */
   1433 			DPRINTF(WM_DEBUG_TX,
   1434 			    ("%s: TX: need %d descriptors, have %d\n",
   1435 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
   1436 			    sc->sc_txfree - 1));
   1437 			ifp->if_flags |= IFF_OACTIVE;
   1438 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   1439 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   1440 			break;
   1441 		}
   1442 
   1443 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   1444 
   1445 		/*
   1446 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   1447 		 */
   1448 
   1449 		/* Sync the DMA map. */
   1450 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1451 		    BUS_DMASYNC_PREWRITE);
   1452 
   1453 		DPRINTF(WM_DEBUG_TX,
   1454 		    ("%s: TX: packet has %d DMA segments\n",
   1455 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
   1456 
   1457 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   1458 
   1459 		/*
   1460 		 * Store a pointer to the packet so that we can free it
   1461 		 * later.
   1462 		 *
   1463 		 * Initially, we consider the number of descriptors the
   1464 		 * packet uses the number of DMA segments.  This may be
   1465 		 * incremented by 1 if we do checksum offload (a descriptor
   1466 		 * is used to set the checksum context).
   1467 		 */
   1468 		txs->txs_mbuf = m0;
   1469 		txs->txs_firstdesc = sc->sc_txnext;
   1470 		txs->txs_ndesc = dmamap->dm_nsegs;
   1471 
   1472 		/*
   1473 		 * Set up checksum offload parameters for
   1474 		 * this packet.
   1475 		 */
   1476 		if (m0->m_pkthdr.csum_flags &
   1477 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1478 			if (wm_tx_cksum(sc, txs, &cksumcmd,
   1479 					&cksumfields) != 0) {
   1480 				/* Error message already displayed. */
   1481 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   1482 				continue;
   1483 			}
   1484 		} else {
   1485 			cksumcmd = 0;
   1486 			cksumfields = 0;
   1487 		}
   1488 
   1489 		cksumcmd |= htole32(WTX_CMD_IDE);
   1490 
   1491 		/*
   1492 		 * Initialize the transmit descriptor.
   1493 		 */
   1494 		for (nexttx = sc->sc_txnext, seg = 0;
   1495 		     seg < dmamap->dm_nsegs;
   1496 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
   1497 			/*
   1498 			 * Note: we currently only use 32-bit DMA
   1499 			 * addresses.
   1500 			 */
   1501 			sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
   1502 			sc->sc_txdescs[nexttx].wtx_addr.wa_low =
   1503 			    htole32(dmamap->dm_segs[seg].ds_addr);
   1504 			sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
   1505 			    htole32(dmamap->dm_segs[seg].ds_len);
   1506 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
   1507 			    cksumfields;
   1508 			lasttx = nexttx;
   1509 
   1510 			DPRINTF(WM_DEBUG_TX,
   1511 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
   1512 			    sc->sc_dev.dv_xname, nexttx,
   1513 			    (uint32_t) dmamap->dm_segs[seg].ds_addr,
   1514 			    (uint32_t) dmamap->dm_segs[seg].ds_len));
   1515 		}
   1516 
   1517 		/*
   1518 		 * Set up the command byte on the last descriptor of
   1519 		 * the packet.  If we're in the interrupt delay window,
   1520 		 * delay the interrupt.
   1521 		 */
   1522 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1523 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
   1524 
   1525 #if 0 /* XXXJRT */
   1526 		/*
   1527 		 * If VLANs are enabled and the packet has a VLAN tag, set
   1528 		 * up the descriptor to encapsulate the packet for us.
   1529 		 *
   1530 		 * This is only valid on the last descriptor of the packet.
   1531 		 */
   1532 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1533 		    (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
   1534 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1535 			    htole32(WTX_CMD_VLE);
   1536 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
   1537 			    = htole16(*(u_int *)(mtag + 1) & 0xffff);
   1538 		}
   1539 #endif /* XXXJRT */
   1540 
   1541 		txs->txs_lastdesc = lasttx;
   1542 
   1543 		DPRINTF(WM_DEBUG_TX,
   1544 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
   1545 		    lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
   1546 
   1547 		/* Sync the descriptors we're using. */
   1548 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
   1549 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1550 
   1551 		/* Give the packet to the chip. */
   1552 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   1553 
   1554 		DPRINTF(WM_DEBUG_TX,
   1555 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
   1556 
   1557 		DPRINTF(WM_DEBUG_TX,
   1558 		    ("%s: TX: finished transmitting packet, job %d\n",
   1559 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
   1560 
   1561 		/* Advance the tx pointer. */
   1562 		sc->sc_txfree -= txs->txs_ndesc;
   1563 		sc->sc_txnext = nexttx;
   1564 
   1565 		sc->sc_txsfree--;
   1566 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
   1567 
   1568 #if NBPFILTER > 0
   1569 		/* Pass the packet to any BPF listeners. */
   1570 		if (ifp->if_bpf)
   1571 			bpf_mtap(ifp->if_bpf, m0);
   1572 #endif /* NBPFILTER > 0 */
   1573 	}
   1574 
   1575 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   1576 		/* No more slots; notify upper layer. */
   1577 		ifp->if_flags |= IFF_OACTIVE;
   1578 	}
   1579 
   1580 	if (sc->sc_txfree != ofree) {
   1581 		/* Set a watchdog timer in case the chip flakes out. */
   1582 		ifp->if_timer = 5;
   1583 	}
   1584 }
   1585 
   1586 /*
   1587  * wm_watchdog:		[ifnet interface function]
   1588  *
   1589  *	Watchdog timer handler.
   1590  */
   1591 static void
   1592 wm_watchdog(struct ifnet *ifp)
   1593 {
   1594 	struct wm_softc *sc = ifp->if_softc;
   1595 
   1596 	/*
   1597 	 * Since we're using delayed interrupts, sweep up
   1598 	 * before we report an error.
   1599 	 */
   1600 	wm_txintr(sc);
   1601 
   1602 	if (sc->sc_txfree != WM_NTXDESC) {
   1603 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   1604 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
   1605 		    sc->sc_txnext);
   1606 		ifp->if_oerrors++;
   1607 
   1608 		/* Reset the interface. */
   1609 		(void) wm_init(ifp);
   1610 	}
   1611 
   1612 	/* Try to get more packets going. */
   1613 	wm_start(ifp);
   1614 }
   1615 
   1616 /*
   1617  * wm_ioctl:		[ifnet interface function]
   1618  *
   1619  *	Handle control requests from the operator.
   1620  */
   1621 static int
   1622 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
   1623 {
   1624 	struct wm_softc *sc = ifp->if_softc;
   1625 	struct ifreq *ifr = (struct ifreq *) data;
   1626 	int s, error;
   1627 
   1628 	s = splnet();
   1629 
   1630 	switch (cmd) {
   1631 	case SIOCSIFMEDIA:
   1632 	case SIOCGIFMEDIA:
   1633 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   1634 		break;
   1635 	default:
   1636 		error = ether_ioctl(ifp, cmd, data);
   1637 		if (error == ENETRESET) {
   1638 			/*
   1639 			 * Multicast list has changed; set the hardware filter
   1640 			 * accordingly.
   1641 			 */
   1642 			wm_set_filter(sc);
   1643 			error = 0;
   1644 		}
   1645 		break;
   1646 	}
   1647 
   1648 	/* Try to get more packets going. */
   1649 	wm_start(ifp);
   1650 
   1651 	splx(s);
   1652 	return (error);
   1653 }
   1654 
   1655 /*
   1656  * wm_intr:
   1657  *
   1658  *	Interrupt service routine.
   1659  */
   1660 static int
   1661 wm_intr(void *arg)
   1662 {
   1663 	struct wm_softc *sc = arg;
   1664 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1665 	uint32_t icr;
   1666 	int wantinit, handled = 0;
   1667 
   1668 	for (wantinit = 0; wantinit == 0;) {
   1669 		icr = CSR_READ(sc, WMREG_ICR);
   1670 		if ((icr & sc->sc_icr) == 0)
   1671 			break;
   1672 
   1673 #if 0 /*NRND > 0*/
   1674 		if (RND_ENABLED(&sc->rnd_source))
   1675 			rnd_add_uint32(&sc->rnd_source, icr);
   1676 #endif
   1677 
   1678 		handled = 1;
   1679 
   1680 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1681 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   1682 			DPRINTF(WM_DEBUG_RX,
   1683 			    ("%s: RX: got Rx intr 0x%08x\n",
   1684 			    sc->sc_dev.dv_xname,
   1685 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   1686 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   1687 		}
   1688 #endif
   1689 		wm_rxintr(sc);
   1690 
   1691 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1692 		if (icr & ICR_TXDW) {
   1693 			DPRINTF(WM_DEBUG_TX,
   1694 			    ("%s: TX: got TDXW interrupt\n",
   1695 			    sc->sc_dev.dv_xname));
   1696 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   1697 		}
   1698 #endif
   1699 		wm_txintr(sc);
   1700 
   1701 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   1702 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   1703 			wm_linkintr(sc, icr);
   1704 		}
   1705 
   1706 		if (icr & ICR_RXO) {
   1707 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
   1708 			wantinit = 1;
   1709 		}
   1710 	}
   1711 
   1712 	if (handled) {
   1713 		if (wantinit)
   1714 			wm_init(ifp);
   1715 
   1716 		/* Try to get more packets going. */
   1717 		wm_start(ifp);
   1718 	}
   1719 
   1720 	return (handled);
   1721 }
   1722 
   1723 /*
   1724  * wm_txintr:
   1725  *
   1726  *	Helper; handle transmit interrupts.
   1727  */
   1728 static void
   1729 wm_txintr(struct wm_softc *sc)
   1730 {
   1731 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1732 	struct wm_txsoft *txs;
   1733 	uint8_t status;
   1734 	int i;
   1735 
   1736 	ifp->if_flags &= ~IFF_OACTIVE;
   1737 
   1738 	/*
   1739 	 * Go through the Tx list and free mbufs for those
   1740 	 * frames which have been transmitted.
   1741 	 */
   1742 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
   1743 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
   1744 		txs = &sc->sc_txsoft[i];
   1745 
   1746 		DPRINTF(WM_DEBUG_TX,
   1747 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
   1748 
   1749 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
   1750 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1751 
   1752 		status = le32toh(sc->sc_txdescs[
   1753 		    txs->txs_lastdesc].wtx_fields.wtxu_bits);
   1754 		if ((status & WTX_ST_DD) == 0) {
   1755 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   1756 			    BUS_DMASYNC_PREREAD);
   1757 			break;
   1758 		}
   1759 
   1760 		DPRINTF(WM_DEBUG_TX,
   1761 		    ("%s: TX: job %d done: descs %d..%d\n",
   1762 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
   1763 		    txs->txs_lastdesc));
   1764 
   1765 		/*
   1766 		 * XXX We should probably be using the statistics
   1767 		 * XXX registers, but I don't know if they exist
   1768 		 * XXX on chips before the i82544.
   1769 		 */
   1770 
   1771 #ifdef WM_EVENT_COUNTERS
   1772 		if (status & WTX_ST_TU)
   1773 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   1774 #endif /* WM_EVENT_COUNTERS */
   1775 
   1776 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   1777 			ifp->if_oerrors++;
   1778 			if (status & WTX_ST_LC)
   1779 				printf("%s: late collision\n",
   1780 				    sc->sc_dev.dv_xname);
   1781 			else if (status & WTX_ST_EC) {
   1782 				ifp->if_collisions += 16;
   1783 				printf("%s: excessive collisions\n",
   1784 				    sc->sc_dev.dv_xname);
   1785 			}
   1786 		} else
   1787 			ifp->if_opackets++;
   1788 
   1789 		sc->sc_txfree += txs->txs_ndesc;
   1790 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1791 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1792 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1793 		m_freem(txs->txs_mbuf);
   1794 		txs->txs_mbuf = NULL;
   1795 	}
   1796 
   1797 	/* Update the dirty transmit buffer pointer. */
   1798 	sc->sc_txsdirty = i;
   1799 	DPRINTF(WM_DEBUG_TX,
   1800 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
   1801 
   1802 	/*
   1803 	 * If there are no more pending transmissions, cancel the watchdog
   1804 	 * timer.
   1805 	 */
   1806 	if (sc->sc_txsfree == WM_TXQUEUELEN)
   1807 		ifp->if_timer = 0;
   1808 }
   1809 
   1810 /*
   1811  * wm_rxintr:
   1812  *
   1813  *	Helper; handle receive interrupts.
   1814  */
   1815 static void
   1816 wm_rxintr(struct wm_softc *sc)
   1817 {
   1818 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1819 	struct wm_rxsoft *rxs;
   1820 	struct mbuf *m;
   1821 	int i, len;
   1822 	uint8_t status, errors;
   1823 
   1824 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   1825 		rxs = &sc->sc_rxsoft[i];
   1826 
   1827 		DPRINTF(WM_DEBUG_RX,
   1828 		    ("%s: RX: checking descriptor %d\n",
   1829 		    sc->sc_dev.dv_xname, i));
   1830 
   1831 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1832 
   1833 		status = sc->sc_rxdescs[i].wrx_status;
   1834 		errors = sc->sc_rxdescs[i].wrx_errors;
   1835 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   1836 
   1837 		if ((status & WRX_ST_DD) == 0) {
   1838 			/*
   1839 			 * We have processed all of the receive descriptors.
   1840 			 */
   1841 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   1842 			break;
   1843 		}
   1844 
   1845 		if (__predict_false(sc->sc_rxdiscard)) {
   1846 			DPRINTF(WM_DEBUG_RX,
   1847 			    ("%s: RX: discarding contents of descriptor %d\n",
   1848 			    sc->sc_dev.dv_xname, i));
   1849 			WM_INIT_RXDESC(sc, i);
   1850 			if (status & WRX_ST_EOP) {
   1851 				/* Reset our state. */
   1852 				DPRINTF(WM_DEBUG_RX,
   1853 				    ("%s: RX: resetting rxdiscard -> 0\n",
   1854 				    sc->sc_dev.dv_xname));
   1855 				sc->sc_rxdiscard = 0;
   1856 			}
   1857 			continue;
   1858 		}
   1859 
   1860 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1861 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1862 
   1863 		m = rxs->rxs_mbuf;
   1864 
   1865 		/*
   1866 		 * Add a new receive buffer to the ring.
   1867 		 */
   1868 		if (wm_add_rxbuf(sc, i) != 0) {
   1869 			/*
   1870 			 * Failed, throw away what we've done so
   1871 			 * far, and discard the rest of the packet.
   1872 			 */
   1873 			ifp->if_ierrors++;
   1874 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1875 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1876 			WM_INIT_RXDESC(sc, i);
   1877 			if ((status & WRX_ST_EOP) == 0)
   1878 				sc->sc_rxdiscard = 1;
   1879 			if (sc->sc_rxhead != NULL)
   1880 				m_freem(sc->sc_rxhead);
   1881 			WM_RXCHAIN_RESET(sc);
   1882 			DPRINTF(WM_DEBUG_RX,
   1883 			    ("%s: RX: Rx buffer allocation failed, "
   1884 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
   1885 			    sc->sc_rxdiscard ? " (discard)" : ""));
   1886 			continue;
   1887 		}
   1888 
   1889 		WM_RXCHAIN_LINK(sc, m);
   1890 
   1891 		m->m_len = len;
   1892 
   1893 		DPRINTF(WM_DEBUG_RX,
   1894 		    ("%s: RX: buffer at %p len %d\n",
   1895 		    sc->sc_dev.dv_xname, m->m_data, len));
   1896 
   1897 		/*
   1898 		 * If this is not the end of the packet, keep
   1899 		 * looking.
   1900 		 */
   1901 		if ((status & WRX_ST_EOP) == 0) {
   1902 			sc->sc_rxlen += len;
   1903 			DPRINTF(WM_DEBUG_RX,
   1904 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   1905 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
   1906 			continue;
   1907 		}
   1908 
   1909 		/*
   1910 		 * Okay, we have the entire packet now...
   1911 		 */
   1912 		*sc->sc_rxtailp = NULL;
   1913 		m = sc->sc_rxhead;
   1914 		len += sc->sc_rxlen;
   1915 
   1916 		WM_RXCHAIN_RESET(sc);
   1917 
   1918 		DPRINTF(WM_DEBUG_RX,
   1919 		    ("%s: RX: have entire packet, len -> %d\n",
   1920 		    sc->sc_dev.dv_xname, len));
   1921 
   1922 		/*
   1923 		 * If an error occurred, update stats and drop the packet.
   1924 		 */
   1925 		if (errors &
   1926 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   1927 			ifp->if_ierrors++;
   1928 			if (errors & WRX_ER_SE)
   1929 				printf("%s: symbol error\n",
   1930 				    sc->sc_dev.dv_xname);
   1931 			else if (errors & WRX_ER_SEQ)
   1932 				printf("%s: receive sequence error\n",
   1933 				    sc->sc_dev.dv_xname);
   1934 			else if (errors & WRX_ER_CE)
   1935 				printf("%s: CRC error\n",
   1936 				    sc->sc_dev.dv_xname);
   1937 			m_freem(m);
   1938 			continue;
   1939 		}
   1940 
   1941 		/*
   1942 		 * No errors.  Receive the packet.
   1943 		 *
   1944 		 * Note, we have configured the chip to include the
   1945 		 * CRC with every packet.
   1946 		 */
   1947 		m->m_flags |= M_HASFCS;
   1948 		m->m_pkthdr.rcvif = ifp;
   1949 		m->m_pkthdr.len = len;
   1950 
   1951 #if 0 /* XXXJRT */
   1952 		/*
   1953 		 * If VLANs are enabled, VLAN packets have been unwrapped
   1954 		 * for us.  Associate the tag with the packet.
   1955 		 */
   1956 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1957 		    (status & WRX_ST_VP) != 0) {
   1958 			struct m_tag *vtag;
   1959 
   1960 			vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
   1961 			    M_NOWAIT);
   1962 			if (vtag == NULL) {
   1963 				ifp->if_ierrors++;
   1964 				printf("%s: unable to allocate VLAN tag\n",
   1965 				    sc->sc_dev.dv_xname);
   1966 				m_freem(m);
   1967 				continue;
   1968 			}
   1969 
   1970 			*(u_int *)(vtag + 1) =
   1971 			    le16toh(sc->sc_rxdescs[i].wrx_special);
   1972 		}
   1973 #endif /* XXXJRT */
   1974 
   1975 		/*
   1976 		 * Set up checksum info for this packet.
   1977 		 */
   1978 		if (status & WRX_ST_IPCS) {
   1979 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   1980 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   1981 			if (errors & WRX_ER_IPE)
   1982 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   1983 		}
   1984 		if (status & WRX_ST_TCPCS) {
   1985 			/*
   1986 			 * Note: we don't know if this was TCP or UDP,
   1987 			 * so we just set both bits, and expect the
   1988 			 * upper layers to deal.
   1989 			 */
   1990 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   1991 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
   1992 			if (errors & WRX_ER_TCPE)
   1993 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   1994 		}
   1995 
   1996 		ifp->if_ipackets++;
   1997 
   1998 #if NBPFILTER > 0
   1999 		/* Pass this up to any BPF listeners. */
   2000 		if (ifp->if_bpf)
   2001 			bpf_mtap(ifp->if_bpf, m);
   2002 #endif /* NBPFILTER > 0 */
   2003 
   2004 		/* Pass it on. */
   2005 		(*ifp->if_input)(ifp, m);
   2006 	}
   2007 
   2008 	/* Update the receive pointer. */
   2009 	sc->sc_rxptr = i;
   2010 
   2011 	DPRINTF(WM_DEBUG_RX,
   2012 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
   2013 }
   2014 
   2015 /*
   2016  * wm_linkintr:
   2017  *
   2018  *	Helper; handle link interrupts.
   2019  */
   2020 static void
   2021 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   2022 {
   2023 	uint32_t status;
   2024 
   2025 	/*
   2026 	 * If we get a link status interrupt on a 1000BASE-T
   2027 	 * device, just fall into the normal MII tick path.
   2028 	 */
   2029 	if (sc->sc_flags & WM_F_HAS_MII) {
   2030 		if (icr & ICR_LSC) {
   2031 			DPRINTF(WM_DEBUG_LINK,
   2032 			    ("%s: LINK: LSC -> mii_tick\n",
   2033 			    sc->sc_dev.dv_xname));
   2034 			mii_tick(&sc->sc_mii);
   2035 		} else if (icr & ICR_RXSEQ) {
   2036 			DPRINTF(WM_DEBUG_LINK,
   2037 			    ("%s: LINK Receive sequence error\n",
   2038 			    sc->sc_dev.dv_xname));
   2039 		}
   2040 		return;
   2041 	}
   2042 
   2043 	/*
   2044 	 * If we are now receiving /C/, check for link again in
   2045 	 * a couple of link clock ticks.
   2046 	 */
   2047 	if (icr & ICR_RXCFG) {
   2048 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   2049 		    sc->sc_dev.dv_xname));
   2050 		sc->sc_tbi_anstate = 2;
   2051 	}
   2052 
   2053 	if (icr & ICR_LSC) {
   2054 		status = CSR_READ(sc, WMREG_STATUS);
   2055 		if (status & STATUS_LU) {
   2056 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   2057 			    sc->sc_dev.dv_xname,
   2058 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2059 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2060 			if (status & STATUS_FD)
   2061 				sc->sc_tctl |=
   2062 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2063 			else
   2064 				sc->sc_tctl |=
   2065 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2066 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2067 			sc->sc_tbi_linkup = 1;
   2068 		} else {
   2069 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   2070 			    sc->sc_dev.dv_xname));
   2071 			sc->sc_tbi_linkup = 0;
   2072 		}
   2073 		sc->sc_tbi_anstate = 2;
   2074 		wm_tbi_set_linkled(sc);
   2075 	} else if (icr & ICR_RXSEQ) {
   2076 		DPRINTF(WM_DEBUG_LINK,
   2077 		    ("%s: LINK: Receive sequence error\n",
   2078 		    sc->sc_dev.dv_xname));
   2079 	}
   2080 }
   2081 
   2082 /*
   2083  * wm_tick:
   2084  *
   2085  *	One second timer, used to check link status, sweep up
   2086  *	completed transmit jobs, etc.
   2087  */
   2088 static void
   2089 wm_tick(void *arg)
   2090 {
   2091 	struct wm_softc *sc = arg;
   2092 	int s;
   2093 
   2094 	s = splnet();
   2095 
   2096 	if (sc->sc_flags & WM_F_HAS_MII)
   2097 		mii_tick(&sc->sc_mii);
   2098 	else
   2099 		wm_tbi_check_link(sc);
   2100 
   2101 	splx(s);
   2102 
   2103 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2104 }
   2105 
   2106 /*
   2107  * wm_reset:
   2108  *
   2109  *	Reset the i82542 chip.
   2110  */
   2111 static void
   2112 wm_reset(struct wm_softc *sc)
   2113 {
   2114 	int i;
   2115 
   2116 	switch (sc->sc_type) {
   2117 	case WM_T_82544:
   2118 	case WM_T_82540:
   2119 	case WM_T_82545:
   2120 	case WM_T_82546:
   2121 	case WM_T_82541:
   2122 	case WM_T_82541_2:
   2123 		/*
   2124 		 * These chips have a problem with the memory-mapped
   2125 		 * write cycle when issuing the reset, so use I/O-mapped
   2126 		 * access, if possible.
   2127 		 */
   2128 		if (sc->sc_flags & WM_F_IOH_VALID)
   2129 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   2130 		else
   2131 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2132 		break;
   2133 
   2134 	case WM_T_82545_3:
   2135 	case WM_T_82546_3:
   2136 		/* Use the shadow control register on these chips. */
   2137 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   2138 		break;
   2139 
   2140 	default:
   2141 		/* Everything else can safely use the documented method. */
   2142 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2143 		break;
   2144 	}
   2145 	delay(10000);
   2146 
   2147 	for (i = 0; i < 1000; i++) {
   2148 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
   2149 			return;
   2150 		delay(20);
   2151 	}
   2152 
   2153 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
   2154 		printf("%s: WARNING: reset failed to complete\n",
   2155 		    sc->sc_dev.dv_xname);
   2156 }
   2157 
   2158 /*
   2159  * wm_init:		[ifnet interface function]
   2160  *
   2161  *	Initialize the interface.  Must be called at splnet().
   2162  */
   2163 static int
   2164 wm_init(struct ifnet *ifp)
   2165 {
   2166 	struct wm_softc *sc = ifp->if_softc;
   2167 	struct wm_rxsoft *rxs;
   2168 	int i, error = 0;
   2169 	uint32_t reg;
   2170 
   2171 	/*
   2172 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   2173 	 * There is a small but measurable benefit to avoiding the adjusment
   2174 	 * of the descriptor so that the headers are aligned, for normal mtu,
   2175 	 * on such platforms.  One possibility is that the DMA itself is
   2176 	 * slightly more efficient if the front of the entire packet (instead
   2177 	 * of the front of the headers) is aligned.
   2178 	 *
   2179 	 * Note we must always set align_tweak to 0 if we are using
   2180 	 * jumbo frames.
   2181 	 */
   2182 #ifdef __NO_STRICT_ALIGNMENT
   2183 	sc->sc_align_tweak = 0;
   2184 #else
   2185 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   2186 		sc->sc_align_tweak = 0;
   2187 	else
   2188 		sc->sc_align_tweak = 2;
   2189 #endif /* __NO_STRICT_ALIGNMENT */
   2190 
   2191 	/* Cancel any pending I/O. */
   2192 	wm_stop(ifp, 0);
   2193 
   2194 	/* Reset the chip to a known state. */
   2195 	wm_reset(sc);
   2196 
   2197 	/* Initialize the transmit descriptor ring. */
   2198 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
   2199 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
   2200 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2201 	sc->sc_txfree = WM_NTXDESC;
   2202 	sc->sc_txnext = 0;
   2203 
   2204 	sc->sc_txctx_ipcs = 0xffffffff;
   2205 	sc->sc_txctx_tucs = 0xffffffff;
   2206 
   2207 	if (sc->sc_type < WM_T_82543) {
   2208 		CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
   2209 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
   2210 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
   2211 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   2212 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   2213 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   2214 	} else {
   2215 		CSR_WRITE(sc, WMREG_TBDAH, 0);
   2216 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
   2217 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
   2218 		CSR_WRITE(sc, WMREG_TDH, 0);
   2219 		CSR_WRITE(sc, WMREG_TDT, 0);
   2220 		CSR_WRITE(sc, WMREG_TIDV, 128);
   2221 
   2222 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   2223 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   2224 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   2225 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   2226 	}
   2227 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   2228 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   2229 
   2230 	/* Initialize the transmit job descriptors. */
   2231 	for (i = 0; i < WM_TXQUEUELEN; i++)
   2232 		sc->sc_txsoft[i].txs_mbuf = NULL;
   2233 	sc->sc_txsfree = WM_TXQUEUELEN;
   2234 	sc->sc_txsnext = 0;
   2235 	sc->sc_txsdirty = 0;
   2236 
   2237 	/*
   2238 	 * Initialize the receive descriptor and receive job
   2239 	 * descriptor rings.
   2240 	 */
   2241 	if (sc->sc_type < WM_T_82543) {
   2242 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
   2243 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
   2244 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   2245 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   2246 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   2247 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   2248 
   2249 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   2250 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   2251 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   2252 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   2253 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   2254 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   2255 	} else {
   2256 		CSR_WRITE(sc, WMREG_RDBAH, 0);
   2257 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
   2258 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   2259 		CSR_WRITE(sc, WMREG_RDH, 0);
   2260 		CSR_WRITE(sc, WMREG_RDT, 0);
   2261 		CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
   2262 	}
   2263 	for (i = 0; i < WM_NRXDESC; i++) {
   2264 		rxs = &sc->sc_rxsoft[i];
   2265 		if (rxs->rxs_mbuf == NULL) {
   2266 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   2267 				printf("%s: unable to allocate or map rx "
   2268 				    "buffer %d, error = %d\n",
   2269 				    sc->sc_dev.dv_xname, i, error);
   2270 				/*
   2271 				 * XXX Should attempt to run with fewer receive
   2272 				 * XXX buffers instead of just failing.
   2273 				 */
   2274 				wm_rxdrain(sc);
   2275 				goto out;
   2276 			}
   2277 		} else
   2278 			WM_INIT_RXDESC(sc, i);
   2279 	}
   2280 	sc->sc_rxptr = 0;
   2281 	sc->sc_rxdiscard = 0;
   2282 	WM_RXCHAIN_RESET(sc);
   2283 
   2284 	/*
   2285 	 * Clear out the VLAN table -- we don't use it (yet).
   2286 	 */
   2287 	CSR_WRITE(sc, WMREG_VET, 0);
   2288 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   2289 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   2290 
   2291 	/*
   2292 	 * Set up flow-control parameters.
   2293 	 *
   2294 	 * XXX Values could probably stand some tuning.
   2295 	 */
   2296 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
   2297 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   2298 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   2299 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   2300 
   2301 		if (sc->sc_type < WM_T_82543) {
   2302 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   2303 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
   2304 		} else {
   2305 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   2306 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
   2307 		}
   2308 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   2309 	}
   2310 
   2311 #if 0 /* XXXJRT */
   2312 	/* Deal with VLAN enables. */
   2313 	if (sc->sc_ethercom.ec_nvlans != 0)
   2314 		sc->sc_ctrl |= CTRL_VME;
   2315 	else
   2316 #endif /* XXXJRT */
   2317 		sc->sc_ctrl &= ~CTRL_VME;
   2318 
   2319 	/* Write the control registers. */
   2320 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2321 #if 0
   2322 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2323 #endif
   2324 
   2325 	/*
   2326 	 * Set up checksum offload parameters.
   2327 	 */
   2328 	reg = CSR_READ(sc, WMREG_RXCSUM);
   2329 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
   2330 		reg |= RXCSUM_IPOFL;
   2331 	else
   2332 		reg &= ~RXCSUM_IPOFL;
   2333 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
   2334 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   2335 	else {
   2336 		reg &= ~RXCSUM_TUOFL;
   2337 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
   2338 			reg &= ~RXCSUM_IPOFL;
   2339 	}
   2340 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   2341 
   2342 	/*
   2343 	 * Set up the interrupt registers.
   2344 	 */
   2345 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   2346 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   2347 	    ICR_RXO | ICR_RXT0;
   2348 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   2349 		sc->sc_icr |= ICR_RXCFG;
   2350 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   2351 
   2352 	/* Set up the inter-packet gap. */
   2353 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   2354 
   2355 #if 0 /* XXXJRT */
   2356 	/* Set the VLAN ethernetype. */
   2357 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   2358 #endif
   2359 
   2360 	/*
   2361 	 * Set up the transmit control register; we start out with
   2362 	 * a collision distance suitable for FDX, but update it whe
   2363 	 * we resolve the media type.
   2364 	 */
   2365 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
   2366 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2367 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2368 
   2369 	/* Set the media. */
   2370 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
   2371 
   2372 	/*
   2373 	 * Set up the receive control register; we actually program
   2374 	 * the register when we set the receive filter.  Use multicast
   2375 	 * address offset type 0.
   2376 	 *
   2377 	 * Only the i82544 has the ability to strip the incoming
   2378 	 * CRC, so we don't enable that feature.
   2379 	 */
   2380 	sc->sc_mchash_type = 0;
   2381 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
   2382 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
   2383 
   2384 	if(MCLBYTES == 2048) {
   2385 		sc->sc_rctl |= RCTL_2k;
   2386 	} else {
   2387 	/*
   2388 	 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
   2389 	 * XXX segments, dropping" -- why?
   2390 	 */
   2391 #if 0
   2392 		if(sc->sc_type >= WM_T_82543) {
   2393 			switch(MCLBYTES) {
   2394 			case 4096:
   2395 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   2396 				break;
   2397 			case 8192:
   2398 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   2399 				break;
   2400 			case 16384:
   2401 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   2402 				break;
   2403 			default:
   2404 				panic("wm_init: MCLBYTES %d unsupported",
   2405 				    MCLBYTES);
   2406 				break;
   2407 			}
   2408 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   2409 #else
   2410 		panic("wm_init: MCLBYTES > 2048 not supported.");
   2411 #endif
   2412 	}
   2413 
   2414 	/* Set the receive filter. */
   2415 	wm_set_filter(sc);
   2416 
   2417 	/* Start the one second link check clock. */
   2418 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2419 
   2420 	/* ...all done! */
   2421 	ifp->if_flags |= IFF_RUNNING;
   2422 	ifp->if_flags &= ~IFF_OACTIVE;
   2423 
   2424  out:
   2425 	if (error)
   2426 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
   2427 	return (error);
   2428 }
   2429 
   2430 /*
   2431  * wm_rxdrain:
   2432  *
   2433  *	Drain the receive queue.
   2434  */
   2435 static void
   2436 wm_rxdrain(struct wm_softc *sc)
   2437 {
   2438 	struct wm_rxsoft *rxs;
   2439 	int i;
   2440 
   2441 	for (i = 0; i < WM_NRXDESC; i++) {
   2442 		rxs = &sc->sc_rxsoft[i];
   2443 		if (rxs->rxs_mbuf != NULL) {
   2444 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2445 			m_freem(rxs->rxs_mbuf);
   2446 			rxs->rxs_mbuf = NULL;
   2447 		}
   2448 	}
   2449 }
   2450 
   2451 /*
   2452  * wm_stop:		[ifnet interface function]
   2453  *
   2454  *	Stop transmission on the interface.
   2455  */
   2456 static void
   2457 wm_stop(struct ifnet *ifp, int disable)
   2458 {
   2459 	struct wm_softc *sc = ifp->if_softc;
   2460 	struct wm_txsoft *txs;
   2461 	int i;
   2462 
   2463 	/* Stop the one second clock. */
   2464 	callout_stop(&sc->sc_tick_ch);
   2465 
   2466 	if (sc->sc_flags & WM_F_HAS_MII) {
   2467 		/* Down the MII. */
   2468 		mii_down(&sc->sc_mii);
   2469 	}
   2470 
   2471 	/* Stop the transmit and receive processes. */
   2472 	CSR_WRITE(sc, WMREG_TCTL, 0);
   2473 	CSR_WRITE(sc, WMREG_RCTL, 0);
   2474 
   2475 	/* Release any queued transmit buffers. */
   2476 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   2477 		txs = &sc->sc_txsoft[i];
   2478 		if (txs->txs_mbuf != NULL) {
   2479 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   2480 			m_freem(txs->txs_mbuf);
   2481 			txs->txs_mbuf = NULL;
   2482 		}
   2483 	}
   2484 
   2485 	if (disable)
   2486 		wm_rxdrain(sc);
   2487 
   2488 	/* Mark the interface as down and cancel the watchdog timer. */
   2489 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2490 	ifp->if_timer = 0;
   2491 }
   2492 
   2493 /*
   2494  * wm_acquire_eeprom:
   2495  *
   2496  *	Perform the EEPROM handshake required on some chips.
   2497  */
   2498 static int
   2499 wm_acquire_eeprom(struct wm_softc *sc)
   2500 {
   2501 	uint32_t reg;
   2502 	int x;
   2503 
   2504 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
   2505 		reg = CSR_READ(sc, WMREG_EECD);
   2506 
   2507 		/* Request EEPROM access. */
   2508 		reg |= EECD_EE_REQ;
   2509 		CSR_WRITE(sc, WMREG_EECD, reg);
   2510 
   2511 		/* ..and wait for it to be granted. */
   2512 		for (x = 0; x < 100; x++) {
   2513 			reg = CSR_READ(sc, WMREG_EECD);
   2514 			if (reg & EECD_EE_GNT)
   2515 				break;
   2516 			delay(5);
   2517 		}
   2518 		if ((reg & EECD_EE_GNT) == 0) {
   2519 			aprint_error("%s: could not acquire EEPROM GNT\n",
   2520 			    sc->sc_dev.dv_xname);
   2521 			reg &= ~EECD_EE_REQ;
   2522 			CSR_WRITE(sc, WMREG_EECD, reg);
   2523 			return (1);
   2524 		}
   2525 	}
   2526 
   2527 	return (0);
   2528 }
   2529 
   2530 /*
   2531  * wm_release_eeprom:
   2532  *
   2533  *	Release the EEPROM mutex.
   2534  */
   2535 static void
   2536 wm_release_eeprom(struct wm_softc *sc)
   2537 {
   2538 	uint32_t reg;
   2539 
   2540 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   2541 		reg = CSR_READ(sc, WMREG_EECD);
   2542 		reg &= ~EECD_EE_REQ;
   2543 		CSR_WRITE(sc, WMREG_EECD, reg);
   2544 	}
   2545 }
   2546 
   2547 /*
   2548  * wm_eeprom_sendbits:
   2549  *
   2550  *	Send a series of bits to the EEPROM.
   2551  */
   2552 static void
   2553 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   2554 {
   2555 	uint32_t reg;
   2556 	int x;
   2557 
   2558 	reg = CSR_READ(sc, WMREG_EECD);
   2559 
   2560 	for (x = nbits; x > 0; x--) {
   2561 		if (bits & (1U << (x - 1)))
   2562 			reg |= EECD_DI;
   2563 		else
   2564 			reg &= ~EECD_DI;
   2565 		CSR_WRITE(sc, WMREG_EECD, reg);
   2566 		delay(2);
   2567 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2568 		delay(2);
   2569 		CSR_WRITE(sc, WMREG_EECD, reg);
   2570 		delay(2);
   2571 	}
   2572 }
   2573 
   2574 /*
   2575  * wm_eeprom_recvbits:
   2576  *
   2577  *	Receive a series of bits from the EEPROM.
   2578  */
   2579 static void
   2580 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   2581 {
   2582 	uint32_t reg, val;
   2583 	int x;
   2584 
   2585 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   2586 
   2587 	val = 0;
   2588 	for (x = nbits; x > 0; x--) {
   2589 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2590 		delay(2);
   2591 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   2592 			val |= (1U << (x - 1));
   2593 		CSR_WRITE(sc, WMREG_EECD, reg);
   2594 		delay(2);
   2595 	}
   2596 	*valp = val;
   2597 }
   2598 
   2599 /*
   2600  * wm_read_eeprom_uwire:
   2601  *
   2602  *	Read a word from the EEPROM using the MicroWire protocol.
   2603  */
   2604 static int
   2605 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2606 {
   2607 	uint32_t reg, val;
   2608 	int i;
   2609 
   2610 	for (i = 0; i < wordcnt; i++) {
   2611 		/* Clear SK and DI. */
   2612 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   2613 		CSR_WRITE(sc, WMREG_EECD, reg);
   2614 
   2615 		/* Set CHIP SELECT. */
   2616 		reg |= EECD_CS;
   2617 		CSR_WRITE(sc, WMREG_EECD, reg);
   2618 		delay(2);
   2619 
   2620 		/* Shift in the READ command. */
   2621 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   2622 
   2623 		/* Shift in address. */
   2624 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   2625 
   2626 		/* Shift out the data. */
   2627 		wm_eeprom_recvbits(sc, &val, 16);
   2628 		data[i] = val & 0xffff;
   2629 
   2630 		/* Clear CHIP SELECT. */
   2631 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   2632 		CSR_WRITE(sc, WMREG_EECD, reg);
   2633 		delay(2);
   2634 	}
   2635 
   2636 	return (0);
   2637 }
   2638 
   2639 /*
   2640  * wm_read_eeprom:
   2641  *
   2642  *	Read data from the serial EEPROM.
   2643  */
   2644 static int
   2645 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2646 {
   2647 	int rv;
   2648 
   2649 	if (wm_acquire_eeprom(sc))
   2650 		return (1);
   2651 
   2652 	rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
   2653 
   2654 	wm_release_eeprom(sc);
   2655 	return (rv);
   2656 }
   2657 
   2658 /*
   2659  * wm_add_rxbuf:
   2660  *
   2661  *	Add a receive buffer to the indiciated descriptor.
   2662  */
   2663 static int
   2664 wm_add_rxbuf(struct wm_softc *sc, int idx)
   2665 {
   2666 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   2667 	struct mbuf *m;
   2668 	int error;
   2669 
   2670 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   2671 	if (m == NULL)
   2672 		return (ENOBUFS);
   2673 
   2674 	MCLGET(m, M_DONTWAIT);
   2675 	if ((m->m_flags & M_EXT) == 0) {
   2676 		m_freem(m);
   2677 		return (ENOBUFS);
   2678 	}
   2679 
   2680 	if (rxs->rxs_mbuf != NULL)
   2681 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2682 
   2683 	rxs->rxs_mbuf = m;
   2684 
   2685 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   2686 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   2687 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   2688 	if (error) {
   2689 		printf("%s: unable to load rx DMA map %d, error = %d\n",
   2690 		    sc->sc_dev.dv_xname, idx, error);
   2691 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
   2692 	}
   2693 
   2694 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2695 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   2696 
   2697 	WM_INIT_RXDESC(sc, idx);
   2698 
   2699 	return (0);
   2700 }
   2701 
   2702 /*
   2703  * wm_set_ral:
   2704  *
   2705  *	Set an entery in the receive address list.
   2706  */
   2707 static void
   2708 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2709 {
   2710 	uint32_t ral_lo, ral_hi;
   2711 
   2712 	if (enaddr != NULL) {
   2713 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2714 		    (enaddr[3] << 24);
   2715 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2716 		ral_hi |= RAL_AV;
   2717 	} else {
   2718 		ral_lo = 0;
   2719 		ral_hi = 0;
   2720 	}
   2721 
   2722 	if (sc->sc_type >= WM_T_82544) {
   2723 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2724 		    ral_lo);
   2725 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2726 		    ral_hi);
   2727 	} else {
   2728 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2729 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2730 	}
   2731 }
   2732 
   2733 /*
   2734  * wm_mchash:
   2735  *
   2736  *	Compute the hash of the multicast address for the 4096-bit
   2737  *	multicast filter.
   2738  */
   2739 static uint32_t
   2740 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2741 {
   2742 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2743 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2744 	uint32_t hash;
   2745 
   2746 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2747 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2748 
   2749 	return (hash & 0xfff);
   2750 }
   2751 
   2752 /*
   2753  * wm_set_filter:
   2754  *
   2755  *	Set up the receive filter.
   2756  */
   2757 static void
   2758 wm_set_filter(struct wm_softc *sc)
   2759 {
   2760 	struct ethercom *ec = &sc->sc_ethercom;
   2761 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2762 	struct ether_multi *enm;
   2763 	struct ether_multistep step;
   2764 	bus_addr_t mta_reg;
   2765 	uint32_t hash, reg, bit;
   2766 	int i;
   2767 
   2768 	if (sc->sc_type >= WM_T_82544)
   2769 		mta_reg = WMREG_CORDOVA_MTA;
   2770 	else
   2771 		mta_reg = WMREG_MTA;
   2772 
   2773 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2774 
   2775 	if (ifp->if_flags & IFF_BROADCAST)
   2776 		sc->sc_rctl |= RCTL_BAM;
   2777 	if (ifp->if_flags & IFF_PROMISC) {
   2778 		sc->sc_rctl |= RCTL_UPE;
   2779 		goto allmulti;
   2780 	}
   2781 
   2782 	/*
   2783 	 * Set the station address in the first RAL slot, and
   2784 	 * clear the remaining slots.
   2785 	 */
   2786 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
   2787 	for (i = 1; i < WM_RAL_TABSIZE; i++)
   2788 		wm_set_ral(sc, NULL, i);
   2789 
   2790 	/* Clear out the multicast table. */
   2791 	for (i = 0; i < WM_MC_TABSIZE; i++)
   2792 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2793 
   2794 	ETHER_FIRST_MULTI(step, ec, enm);
   2795 	while (enm != NULL) {
   2796 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2797 			/*
   2798 			 * We must listen to a range of multicast addresses.
   2799 			 * For now, just accept all multicasts, rather than
   2800 			 * trying to set only those filter bits needed to match
   2801 			 * the range.  (At this time, the only use of address
   2802 			 * ranges is for IP multicast routing, for which the
   2803 			 * range is big enough to require all bits set.)
   2804 			 */
   2805 			goto allmulti;
   2806 		}
   2807 
   2808 		hash = wm_mchash(sc, enm->enm_addrlo);
   2809 
   2810 		reg = (hash >> 5) & 0x7f;
   2811 		bit = hash & 0x1f;
   2812 
   2813 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2814 		hash |= 1U << bit;
   2815 
   2816 		/* XXX Hardware bug?? */
   2817 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2818 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2819 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2820 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2821 		} else
   2822 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2823 
   2824 		ETHER_NEXT_MULTI(step, enm);
   2825 	}
   2826 
   2827 	ifp->if_flags &= ~IFF_ALLMULTI;
   2828 	goto setit;
   2829 
   2830  allmulti:
   2831 	ifp->if_flags |= IFF_ALLMULTI;
   2832 	sc->sc_rctl |= RCTL_MPE;
   2833 
   2834  setit:
   2835 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2836 }
   2837 
   2838 /*
   2839  * wm_tbi_mediainit:
   2840  *
   2841  *	Initialize media for use on 1000BASE-X devices.
   2842  */
   2843 static void
   2844 wm_tbi_mediainit(struct wm_softc *sc)
   2845 {
   2846 	const char *sep = "";
   2847 
   2848 	if (sc->sc_type < WM_T_82543)
   2849 		sc->sc_tipg = TIPG_WM_DFLT;
   2850 	else
   2851 		sc->sc_tipg = TIPG_LG_DFLT;
   2852 
   2853 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   2854 	    wm_tbi_mediastatus);
   2855 
   2856 	/*
   2857 	 * SWD Pins:
   2858 	 *
   2859 	 *	0 = Link LED (output)
   2860 	 *	1 = Loss Of Signal (input)
   2861 	 */
   2862 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   2863 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   2864 
   2865 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2866 
   2867 #define	ADD(ss, mm, dd)							\
   2868 do {									\
   2869 	printf("%s%s", sep, ss);					\
   2870 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   2871 	sep = ", ";							\
   2872 } while (/*CONSTCOND*/0)
   2873 
   2874 	printf("%s: ", sc->sc_dev.dv_xname);
   2875 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   2876 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   2877 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   2878 	printf("\n");
   2879 
   2880 #undef ADD
   2881 
   2882 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   2883 }
   2884 
   2885 /*
   2886  * wm_tbi_mediastatus:	[ifmedia interface function]
   2887  *
   2888  *	Get the current interface media status on a 1000BASE-X device.
   2889  */
   2890 static void
   2891 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   2892 {
   2893 	struct wm_softc *sc = ifp->if_softc;
   2894 
   2895 	ifmr->ifm_status = IFM_AVALID;
   2896 	ifmr->ifm_active = IFM_ETHER;
   2897 
   2898 	if (sc->sc_tbi_linkup == 0) {
   2899 		ifmr->ifm_active |= IFM_NONE;
   2900 		return;
   2901 	}
   2902 
   2903 	ifmr->ifm_status |= IFM_ACTIVE;
   2904 	ifmr->ifm_active |= IFM_1000_SX;
   2905 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   2906 		ifmr->ifm_active |= IFM_FDX;
   2907 }
   2908 
   2909 /*
   2910  * wm_tbi_mediachange:	[ifmedia interface function]
   2911  *
   2912  *	Set hardware to newly-selected media on a 1000BASE-X device.
   2913  */
   2914 static int
   2915 wm_tbi_mediachange(struct ifnet *ifp)
   2916 {
   2917 	struct wm_softc *sc = ifp->if_softc;
   2918 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   2919 	uint32_t status;
   2920 	int i;
   2921 
   2922 	sc->sc_txcw = ife->ifm_data;
   2923 	if (sc->sc_ctrl & CTRL_RFCE)
   2924 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
   2925 	if (sc->sc_ctrl & CTRL_TFCE)
   2926 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
   2927 	sc->sc_txcw |= TXCW_ANE;
   2928 
   2929 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   2930 	delay(10000);
   2931 
   2932 	sc->sc_tbi_anstate = 0;
   2933 
   2934 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
   2935 		/* Have signal; wait for the link to come up. */
   2936 		for (i = 0; i < 50; i++) {
   2937 			delay(10000);
   2938 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   2939 				break;
   2940 		}
   2941 
   2942 		status = CSR_READ(sc, WMREG_STATUS);
   2943 		if (status & STATUS_LU) {
   2944 			/* Link is up. */
   2945 			DPRINTF(WM_DEBUG_LINK,
   2946 			    ("%s: LINK: set media -> link up %s\n",
   2947 			    sc->sc_dev.dv_xname,
   2948 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2949 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2950 			if (status & STATUS_FD)
   2951 				sc->sc_tctl |=
   2952 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2953 			else
   2954 				sc->sc_tctl |=
   2955 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2956 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2957 			sc->sc_tbi_linkup = 1;
   2958 		} else {
   2959 			/* Link is down. */
   2960 			DPRINTF(WM_DEBUG_LINK,
   2961 			    ("%s: LINK: set media -> link down\n",
   2962 			    sc->sc_dev.dv_xname));
   2963 			sc->sc_tbi_linkup = 0;
   2964 		}
   2965 	} else {
   2966 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   2967 		    sc->sc_dev.dv_xname));
   2968 		sc->sc_tbi_linkup = 0;
   2969 	}
   2970 
   2971 	wm_tbi_set_linkled(sc);
   2972 
   2973 	return (0);
   2974 }
   2975 
   2976 /*
   2977  * wm_tbi_set_linkled:
   2978  *
   2979  *	Update the link LED on 1000BASE-X devices.
   2980  */
   2981 static void
   2982 wm_tbi_set_linkled(struct wm_softc *sc)
   2983 {
   2984 
   2985 	if (sc->sc_tbi_linkup)
   2986 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   2987 	else
   2988 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   2989 
   2990 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2991 }
   2992 
   2993 /*
   2994  * wm_tbi_check_link:
   2995  *
   2996  *	Check the link on 1000BASE-X devices.
   2997  */
   2998 static void
   2999 wm_tbi_check_link(struct wm_softc *sc)
   3000 {
   3001 	uint32_t rxcw, ctrl, status;
   3002 
   3003 	if (sc->sc_tbi_anstate == 0)
   3004 		return;
   3005 	else if (sc->sc_tbi_anstate > 1) {
   3006 		DPRINTF(WM_DEBUG_LINK,
   3007 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
   3008 		    sc->sc_tbi_anstate));
   3009 		sc->sc_tbi_anstate--;
   3010 		return;
   3011 	}
   3012 
   3013 	sc->sc_tbi_anstate = 0;
   3014 
   3015 	rxcw = CSR_READ(sc, WMREG_RXCW);
   3016 	ctrl = CSR_READ(sc, WMREG_CTRL);
   3017 	status = CSR_READ(sc, WMREG_STATUS);
   3018 
   3019 	if ((status & STATUS_LU) == 0) {
   3020 		DPRINTF(WM_DEBUG_LINK,
   3021 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
   3022 		sc->sc_tbi_linkup = 0;
   3023 	} else {
   3024 		DPRINTF(WM_DEBUG_LINK,
   3025 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
   3026 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   3027 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3028 		if (status & STATUS_FD)
   3029 			sc->sc_tctl |=
   3030 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3031 		else
   3032 			sc->sc_tctl |=
   3033 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3034 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3035 		sc->sc_tbi_linkup = 1;
   3036 	}
   3037 
   3038 	wm_tbi_set_linkled(sc);
   3039 }
   3040 
   3041 /*
   3042  * wm_gmii_reset:
   3043  *
   3044  *	Reset the PHY.
   3045  */
   3046 static void
   3047 wm_gmii_reset(struct wm_softc *sc)
   3048 {
   3049 	uint32_t reg;
   3050 
   3051 	if (sc->sc_type >= WM_T_82544) {
   3052 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   3053 		delay(20000);
   3054 
   3055 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3056 		delay(20000);
   3057 	} else {
   3058 		/* The PHY reset pin is active-low. */
   3059 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3060 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   3061 		    CTRL_EXT_SWDPIN(4));
   3062 		reg |= CTRL_EXT_SWDPIO(4);
   3063 
   3064 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   3065 		delay(10);
   3066 
   3067 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3068 		delay(10);
   3069 
   3070 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   3071 		delay(10);
   3072 #if 0
   3073 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   3074 #endif
   3075 	}
   3076 }
   3077 
   3078 /*
   3079  * wm_gmii_mediainit:
   3080  *
   3081  *	Initialize media for use on 1000BASE-T devices.
   3082  */
   3083 static void
   3084 wm_gmii_mediainit(struct wm_softc *sc)
   3085 {
   3086 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3087 
   3088 	/* We have MII. */
   3089 	sc->sc_flags |= WM_F_HAS_MII;
   3090 
   3091 	sc->sc_tipg = TIPG_1000T_DFLT;
   3092 
   3093 	/*
   3094 	 * Let the chip set speed/duplex on its own based on
   3095 	 * signals from the PHY.
   3096 	 */
   3097 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
   3098 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3099 
   3100 	/* Initialize our media structures and probe the GMII. */
   3101 	sc->sc_mii.mii_ifp = ifp;
   3102 
   3103 	if (sc->sc_type >= WM_T_82544) {
   3104 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
   3105 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
   3106 	} else {
   3107 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
   3108 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
   3109 	}
   3110 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
   3111 
   3112 	wm_gmii_reset(sc);
   3113 
   3114 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
   3115 	    wm_gmii_mediastatus);
   3116 
   3117 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   3118 	    MII_OFFSET_ANY, 0);
   3119 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
   3120 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   3121 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
   3122 	} else
   3123 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   3124 }
   3125 
   3126 /*
   3127  * wm_gmii_mediastatus:	[ifmedia interface function]
   3128  *
   3129  *	Get the current interface media status on a 1000BASE-T device.
   3130  */
   3131 static void
   3132 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   3133 {
   3134 	struct wm_softc *sc = ifp->if_softc;
   3135 
   3136 	mii_pollstat(&sc->sc_mii);
   3137 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   3138 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   3139 }
   3140 
   3141 /*
   3142  * wm_gmii_mediachange:	[ifmedia interface function]
   3143  *
   3144  *	Set hardware to newly-selected media on a 1000BASE-T device.
   3145  */
   3146 static int
   3147 wm_gmii_mediachange(struct ifnet *ifp)
   3148 {
   3149 	struct wm_softc *sc = ifp->if_softc;
   3150 
   3151 	if (ifp->if_flags & IFF_UP)
   3152 		mii_mediachg(&sc->sc_mii);
   3153 	return (0);
   3154 }
   3155 
   3156 #define	MDI_IO		CTRL_SWDPIN(2)
   3157 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   3158 #define	MDI_CLK		CTRL_SWDPIN(3)
   3159 
   3160 static void
   3161 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   3162 {
   3163 	uint32_t i, v;
   3164 
   3165 	v = CSR_READ(sc, WMREG_CTRL);
   3166 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   3167 	v |= MDI_DIR | CTRL_SWDPIO(3);
   3168 
   3169 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   3170 		if (data & i)
   3171 			v |= MDI_IO;
   3172 		else
   3173 			v &= ~MDI_IO;
   3174 		CSR_WRITE(sc, WMREG_CTRL, v);
   3175 		delay(10);
   3176 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3177 		delay(10);
   3178 		CSR_WRITE(sc, WMREG_CTRL, v);
   3179 		delay(10);
   3180 	}
   3181 }
   3182 
   3183 static uint32_t
   3184 i82543_mii_recvbits(struct wm_softc *sc)
   3185 {
   3186 	uint32_t v, i, data = 0;
   3187 
   3188 	v = CSR_READ(sc, WMREG_CTRL);
   3189 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   3190 	v |= CTRL_SWDPIO(3);
   3191 
   3192 	CSR_WRITE(sc, WMREG_CTRL, v);
   3193 	delay(10);
   3194 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3195 	delay(10);
   3196 	CSR_WRITE(sc, WMREG_CTRL, v);
   3197 	delay(10);
   3198 
   3199 	for (i = 0; i < 16; i++) {
   3200 		data <<= 1;
   3201 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3202 		delay(10);
   3203 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   3204 			data |= 1;
   3205 		CSR_WRITE(sc, WMREG_CTRL, v);
   3206 		delay(10);
   3207 	}
   3208 
   3209 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3210 	delay(10);
   3211 	CSR_WRITE(sc, WMREG_CTRL, v);
   3212 	delay(10);
   3213 
   3214 	return (data);
   3215 }
   3216 
   3217 #undef MDI_IO
   3218 #undef MDI_DIR
   3219 #undef MDI_CLK
   3220 
   3221 /*
   3222  * wm_gmii_i82543_readreg:	[mii interface function]
   3223  *
   3224  *	Read a PHY register on the GMII (i82543 version).
   3225  */
   3226 static int
   3227 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
   3228 {
   3229 	struct wm_softc *sc = (void *) self;
   3230 	int rv;
   3231 
   3232 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   3233 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   3234 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   3235 	rv = i82543_mii_recvbits(sc) & 0xffff;
   3236 
   3237 	DPRINTF(WM_DEBUG_GMII,
   3238 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   3239 	    sc->sc_dev.dv_xname, phy, reg, rv));
   3240 
   3241 	return (rv);
   3242 }
   3243 
   3244 /*
   3245  * wm_gmii_i82543_writereg:	[mii interface function]
   3246  *
   3247  *	Write a PHY register on the GMII (i82543 version).
   3248  */
   3249 static void
   3250 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
   3251 {
   3252 	struct wm_softc *sc = (void *) self;
   3253 
   3254 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   3255 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   3256 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   3257 	    (MII_COMMAND_START << 30), 32);
   3258 }
   3259 
   3260 /*
   3261  * wm_gmii_i82544_readreg:	[mii interface function]
   3262  *
   3263  *	Read a PHY register on the GMII.
   3264  */
   3265 static int
   3266 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
   3267 {
   3268 	struct wm_softc *sc = (void *) self;
   3269 	uint32_t mdic;
   3270 	int i, rv;
   3271 
   3272 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   3273 	    MDIC_REGADD(reg));
   3274 
   3275 	for (i = 0; i < 100; i++) {
   3276 		mdic = CSR_READ(sc, WMREG_MDIC);
   3277 		if (mdic & MDIC_READY)
   3278 			break;
   3279 		delay(10);
   3280 	}
   3281 
   3282 	if ((mdic & MDIC_READY) == 0) {
   3283 		printf("%s: MDIC read timed out: phy %d reg %d\n",
   3284 		    sc->sc_dev.dv_xname, phy, reg);
   3285 		rv = 0;
   3286 	} else if (mdic & MDIC_E) {
   3287 #if 0 /* This is normal if no PHY is present. */
   3288 		printf("%s: MDIC read error: phy %d reg %d\n",
   3289 		    sc->sc_dev.dv_xname, phy, reg);
   3290 #endif
   3291 		rv = 0;
   3292 	} else {
   3293 		rv = MDIC_DATA(mdic);
   3294 		if (rv == 0xffff)
   3295 			rv = 0;
   3296 	}
   3297 
   3298 	return (rv);
   3299 }
   3300 
   3301 /*
   3302  * wm_gmii_i82544_writereg:	[mii interface function]
   3303  *
   3304  *	Write a PHY register on the GMII.
   3305  */
   3306 static void
   3307 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
   3308 {
   3309 	struct wm_softc *sc = (void *) self;
   3310 	uint32_t mdic;
   3311 	int i;
   3312 
   3313 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   3314 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   3315 
   3316 	for (i = 0; i < 100; i++) {
   3317 		mdic = CSR_READ(sc, WMREG_MDIC);
   3318 		if (mdic & MDIC_READY)
   3319 			break;
   3320 		delay(10);
   3321 	}
   3322 
   3323 	if ((mdic & MDIC_READY) == 0)
   3324 		printf("%s: MDIC write timed out: phy %d reg %d\n",
   3325 		    sc->sc_dev.dv_xname, phy, reg);
   3326 	else if (mdic & MDIC_E)
   3327 		printf("%s: MDIC write error: phy %d reg %d\n",
   3328 		    sc->sc_dev.dv_xname, phy, reg);
   3329 }
   3330 
   3331 /*
   3332  * wm_gmii_statchg:	[mii interface function]
   3333  *
   3334  *	Callback from MII layer when media changes.
   3335  */
   3336 static void
   3337 wm_gmii_statchg(struct device *self)
   3338 {
   3339 	struct wm_softc *sc = (void *) self;
   3340 
   3341 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3342 
   3343 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   3344 		DPRINTF(WM_DEBUG_LINK,
   3345 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
   3346 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3347 	} else  {
   3348 		DPRINTF(WM_DEBUG_LINK,
   3349 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
   3350 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3351 	}
   3352 
   3353 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3354 }
   3355