Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.139
      1 
      2 /*
      3  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      4  * All rights reserved.
      5  *
      6  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed for the NetBSD Project by
     19  *	Wasabi Systems, Inc.
     20  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     21  *    or promote products derived from this software without specific prior
     22  *    written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 /*******************************************************************************
     38 
     39   Copyright (c) 2001-2005, Intel Corporation
     40   All rights reserved.
     41 
     42   Redistribution and use in source and binary forms, with or without
     43   modification, are permitted provided that the following conditions are met:
     44 
     45    1. Redistributions of source code must retain the above copyright notice,
     46       this list of conditions and the following disclaimer.
     47 
     48    2. Redistributions in binary form must reproduce the above copyright
     49       notice, this list of conditions and the following disclaimer in the
     50       documentation and/or other materials provided with the distribution.
     51 
     52    3. Neither the name of the Intel Corporation nor the names of its
     53       contributors may be used to endorse or promote products derived from
     54       this software without specific prior written permission.
     55 
     56   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     57   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     58   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     59   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     60   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     61   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     62   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     63   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     64   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     65   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     66   POSSIBILITY OF SUCH DAMAGE.
     67 
     68 *******************************************************************************/
     69 /*
     70  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     71  *
     72  * TODO (in order of importance):
     73  *
     74  *	- Rework how parameters are loaded from the EEPROM.
     75  *	- Figure out what to do with the i82545GM and i82546GB
     76  *	  SERDES controllers.
     77  *	- Fix hw VLAN assist.
     78  */
     79 
     80 #include <sys/cdefs.h>
     81 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.139 2007/04/29 20:35:21 bouyer Exp $");
     82 
     83 #include "bpfilter.h"
     84 #include "rnd.h"
     85 
     86 #include <sys/param.h>
     87 #include <sys/systm.h>
     88 #include <sys/callout.h>
     89 #include <sys/mbuf.h>
     90 #include <sys/malloc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/socket.h>
     93 #include <sys/ioctl.h>
     94 #include <sys/errno.h>
     95 #include <sys/device.h>
     96 #include <sys/queue.h>
     97 #include <sys/syslog.h>
     98 
     99 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
    100 
    101 #if NRND > 0
    102 #include <sys/rnd.h>
    103 #endif
    104 
    105 #include <net/if.h>
    106 #include <net/if_dl.h>
    107 #include <net/if_media.h>
    108 #include <net/if_ether.h>
    109 
    110 #if NBPFILTER > 0
    111 #include <net/bpf.h>
    112 #endif
    113 
    114 #include <netinet/in.h>			/* XXX for struct ip */
    115 #include <netinet/in_systm.h>		/* XXX for struct ip */
    116 #include <netinet/ip.h>			/* XXX for struct ip */
    117 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    118 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    119 
    120 #include <machine/bus.h>
    121 #include <machine/intr.h>
    122 #include <machine/endian.h>
    123 
    124 #include <dev/mii/mii.h>
    125 #include <dev/mii/miivar.h>
    126 #include <dev/mii/mii_bitbang.h>
    127 #include <dev/mii/ikphyreg.h>
    128 
    129 #include <dev/pci/pcireg.h>
    130 #include <dev/pci/pcivar.h>
    131 #include <dev/pci/pcidevs.h>
    132 
    133 #include <dev/pci/if_wmreg.h>
    134 
    135 #ifdef WM_DEBUG
    136 #define	WM_DEBUG_LINK		0x01
    137 #define	WM_DEBUG_TX		0x02
    138 #define	WM_DEBUG_RX		0x04
    139 #define	WM_DEBUG_GMII		0x08
    140 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
    141 
    142 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    143 #else
    144 #define	DPRINTF(x, y)	/* nothing */
    145 #endif /* WM_DEBUG */
    146 
    147 /*
    148  * Transmit descriptor list size.  Due to errata, we can only have
    149  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    150  * on >= 82544.  We tell the upper layers that they can queue a lot
    151  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    152  * of them at a time.
    153  *
    154  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    155  * chains containing many small mbufs have been observed in zero-copy
    156  * situations with jumbo frames.
    157  */
    158 #define	WM_NTXSEGS		256
    159 #define	WM_IFQUEUELEN		256
    160 #define	WM_TXQUEUELEN_MAX	64
    161 #define	WM_TXQUEUELEN_MAX_82547	16
    162 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    163 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    164 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    165 #define	WM_NTXDESC_82542	256
    166 #define	WM_NTXDESC_82544	4096
    167 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    168 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    169 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    170 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    171 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    172 
    173 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
    174 
    175 /*
    176  * Receive descriptor list size.  We have one Rx buffer for normal
    177  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    178  * packet.  We allocate 256 receive descriptors, each with a 2k
    179  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    180  */
    181 #define	WM_NRXDESC		256
    182 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    183 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    184 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    185 
    186 /*
    187  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    188  * a single clump that maps to a single DMA segment to make several things
    189  * easier.
    190  */
    191 struct wm_control_data_82544 {
    192 	/*
    193 	 * The receive descriptors.
    194 	 */
    195 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    196 
    197 	/*
    198 	 * The transmit descriptors.  Put these at the end, because
    199 	 * we might use a smaller number of them.
    200 	 */
    201 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
    202 };
    203 
    204 struct wm_control_data_82542 {
    205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    207 };
    208 
    209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    210 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
    211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    212 
    213 /*
    214  * Software state for transmit jobs.
    215  */
    216 struct wm_txsoft {
    217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    219 	int txs_firstdesc;		/* first descriptor in packet */
    220 	int txs_lastdesc;		/* last descriptor in packet */
    221 	int txs_ndesc;			/* # of descriptors used */
    222 };
    223 
    224 /*
    225  * Software state for receive buffers.  Each descriptor gets a
    226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    227  * more than one buffer, we chain them together.
    228  */
    229 struct wm_rxsoft {
    230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    232 };
    233 
    234 typedef enum {
    235 	WM_T_unknown		= 0,
    236 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
    237 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
    238 	WM_T_82543,			/* i82543 */
    239 	WM_T_82544,			/* i82544 */
    240 	WM_T_82540,			/* i82540 */
    241 	WM_T_82545,			/* i82545 */
    242 	WM_T_82545_3,			/* i82545 3.0+ */
    243 	WM_T_82546,			/* i82546 */
    244 	WM_T_82546_3,			/* i82546 3.0+ */
    245 	WM_T_82541,			/* i82541 */
    246 	WM_T_82541_2,			/* i82541 2.0+ */
    247 	WM_T_82547,			/* i82547 */
    248 	WM_T_82547_2,			/* i82547 2.0+ */
    249 	WM_T_82571,			/* i82571 */
    250 	WM_T_82572,			/* i82572 */
    251 	WM_T_82573,			/* i82573 */
    252 	WM_T_80003,			/* i80003 */
    253 	WM_T_ICH8,			/* ICH8 LAN */
    254 } wm_chip_type;
    255 
    256 /*
    257  * Software state per device.
    258  */
    259 struct wm_softc {
    260 	struct device sc_dev;		/* generic device information */
    261 	bus_space_tag_t sc_st;		/* bus space tag */
    262 	bus_space_handle_t sc_sh;	/* bus space handle */
    263 	bus_space_tag_t sc_iot;		/* I/O space tag */
    264 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    265 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    266 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    267 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    268 	struct ethercom sc_ethercom;	/* ethernet common data */
    269 	void *sc_sdhook;		/* shutdown hook */
    270 	void *sc_powerhook;		/* power hook */
    271 	pci_chipset_tag_t sc_pc;
    272 	pcitag_t sc_pcitag;
    273 	struct pci_conf_state sc_pciconf;
    274 
    275 	wm_chip_type sc_type;		/* chip type */
    276 	int sc_flags;			/* flags; see below */
    277 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    278 	int sc_pcix_offset;		/* PCIX capability register offset */
    279 	int sc_flowflags;		/* 802.3x flow control flags */
    280 
    281 	void *sc_ih;			/* interrupt cookie */
    282 
    283 	int sc_ee_addrbits;		/* EEPROM address bits */
    284 
    285 	struct mii_data sc_mii;		/* MII/media information */
    286 
    287 	struct callout sc_tick_ch;	/* tick callout */
    288 
    289 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    290 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    291 
    292 	int		sc_align_tweak;
    293 
    294 	/*
    295 	 * Software state for the transmit and receive descriptors.
    296 	 */
    297 	int			sc_txnum;	/* must be a power of two */
    298 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
    299 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
    300 
    301 	/*
    302 	 * Control data structures.
    303 	 */
    304 	int			sc_ntxdesc;	/* must be a power of two */
    305 	struct wm_control_data_82544 *sc_control_data;
    306 #define	sc_txdescs	sc_control_data->wcd_txdescs
    307 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    308 
    309 #ifdef WM_EVENT_COUNTERS
    310 	/* Event counters. */
    311 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    312 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    313 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    314 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    315 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    316 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    317 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    318 
    319 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    320 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    321 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    322 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    323 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    324 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    325 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    326 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    327 
    328 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    329 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    330 
    331 	struct evcnt sc_ev_tu;		/* Tx underrun */
    332 
    333 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    334 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    335 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    336 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    337 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    338 #endif /* WM_EVENT_COUNTERS */
    339 
    340 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    341 
    342 	int	sc_txfree;		/* number of free Tx descriptors */
    343 	int	sc_txnext;		/* next ready Tx descriptor */
    344 
    345 	int	sc_txsfree;		/* number of free Tx jobs */
    346 	int	sc_txsnext;		/* next free Tx job */
    347 	int	sc_txsdirty;		/* dirty Tx jobs */
    348 
    349 	/* These 5 variables are used only on the 82547. */
    350 	int	sc_txfifo_size;		/* Tx FIFO size */
    351 	int	sc_txfifo_head;		/* current head of FIFO */
    352 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    353 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    354 	struct callout sc_txfifo_ch;	/* Tx FIFO stall work-around timer */
    355 
    356 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    357 
    358 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    359 	int	sc_rxdiscard;
    360 	int	sc_rxlen;
    361 	struct mbuf *sc_rxhead;
    362 	struct mbuf *sc_rxtail;
    363 	struct mbuf **sc_rxtailp;
    364 
    365 	uint32_t sc_ctrl;		/* prototype CTRL register */
    366 #if 0
    367 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    368 #endif
    369 	uint32_t sc_icr;		/* prototype interrupt bits */
    370 	uint32_t sc_itr;		/* prototype intr throttling reg */
    371 	uint32_t sc_tctl;		/* prototype TCTL register */
    372 	uint32_t sc_rctl;		/* prototype RCTL register */
    373 	uint32_t sc_txcw;		/* prototype TXCW register */
    374 	uint32_t sc_tipg;		/* prototype TIPG register */
    375 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    376 	uint32_t sc_pba;		/* prototype PBA register */
    377 
    378 	int sc_tbi_linkup;		/* TBI link status */
    379 	int sc_tbi_anstate;		/* autonegotiation state */
    380 
    381 	int sc_mchash_type;		/* multicast filter offset */
    382 
    383 #if NRND > 0
    384 	rndsource_element_t rnd_source;	/* random source */
    385 #endif
    386 	int sc_ich8_flash_base;
    387 	int sc_ich8_flash_bank_size;
    388 };
    389 
    390 #define	WM_RXCHAIN_RESET(sc)						\
    391 do {									\
    392 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    393 	*(sc)->sc_rxtailp = NULL;					\
    394 	(sc)->sc_rxlen = 0;						\
    395 } while (/*CONSTCOND*/0)
    396 
    397 #define	WM_RXCHAIN_LINK(sc, m)						\
    398 do {									\
    399 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    400 	(sc)->sc_rxtailp = &(m)->m_next;				\
    401 } while (/*CONSTCOND*/0)
    402 
    403 /* sc_flags */
    404 #define	WM_F_HAS_MII		0x0001	/* has MII */
    405 #define	WM_F_EEPROM_HANDSHAKE	0x0002	/* requires EEPROM handshake */
    406 #define	WM_F_EEPROM_SEMAPHORE	0x0004	/* EEPROM with semaphore */
    407 #define	WM_F_EEPROM_EERDEEWR	0x0008	/* EEPROM access via EERD/EEWR */
    408 #define	WM_F_EEPROM_SPI		0x0010	/* EEPROM is SPI */
    409 #define	WM_F_EEPROM_FLASH	0x0020	/* EEPROM is FLASH */
    410 #define	WM_F_EEPROM_INVALID	0x0040	/* EEPROM not present (bad checksum) */
    411 #define	WM_F_IOH_VALID		0x0080	/* I/O handle is valid */
    412 #define	WM_F_BUS64		0x0100	/* bus is 64-bit */
    413 #define	WM_F_PCIX		0x0200	/* bus is PCI-X */
    414 #define	WM_F_CSA		0x0400	/* bus is CSA */
    415 #define	WM_F_PCIE		0x0800	/* bus is PCI-Express */
    416 #define WM_F_SWFW_SYNC		0x1000  /* Software-Firmware synchronisation */
    417 #define WM_F_SWFWHW_SYNC	0x2000  /* Software-Firmware synchronisation */
    418 
    419 #ifdef WM_EVENT_COUNTERS
    420 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    421 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    422 #else
    423 #define	WM_EVCNT_INCR(ev)	/* nothing */
    424 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    425 #endif
    426 
    427 #define	CSR_READ(sc, reg)						\
    428 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    429 #define	CSR_WRITE(sc, reg, val)						\
    430 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    431 #define	CSR_WRITE_FLUSH(sc)						\
    432 	(void) CSR_READ((sc), WMREG_STATUS)
    433 
    434 #define ICH8_FLASH_READ32(sc, reg) \
    435 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    436 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    437 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    438 
    439 #define ICH8_FLASH_READ16(sc, reg) \
    440 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    441 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    442 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    443 
    444 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    445 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    446 
    447 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    448 #define	WM_CDTXADDR_HI(sc, x)						\
    449 	(sizeof(bus_addr_t) == 8 ?					\
    450 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    451 
    452 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    453 #define	WM_CDRXADDR_HI(sc, x)						\
    454 	(sizeof(bus_addr_t) == 8 ?					\
    455 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    456 
    457 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    458 do {									\
    459 	int __x, __n;							\
    460 									\
    461 	__x = (x);							\
    462 	__n = (n);							\
    463 									\
    464 	/* If it will wrap around, sync to the end of the ring. */	\
    465 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    466 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    467 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    468 		    (WM_NTXDESC(sc) - __x), (ops));			\
    469 		__n -= (WM_NTXDESC(sc) - __x);				\
    470 		__x = 0;						\
    471 	}								\
    472 									\
    473 	/* Now sync whatever is left. */				\
    474 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    475 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    476 } while (/*CONSTCOND*/0)
    477 
    478 #define	WM_CDRXSYNC(sc, x, ops)						\
    479 do {									\
    480 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    481 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    482 } while (/*CONSTCOND*/0)
    483 
    484 #define	WM_INIT_RXDESC(sc, x)						\
    485 do {									\
    486 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    487 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    488 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    489 									\
    490 	/*								\
    491 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    492 	 * so that the payload after the Ethernet header is aligned	\
    493 	 * to a 4-byte boundary.					\
    494 	 *								\
    495 	 * XXX BRAINDAMAGE ALERT!					\
    496 	 * The stupid chip uses the same size for every buffer, which	\
    497 	 * is set in the Receive Control register.  We are using the 2K	\
    498 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    499 	 * reason, we can't "scoot" packets longer than the standard	\
    500 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    501 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    502 	 * the upper layer copy the headers.				\
    503 	 */								\
    504 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    505 									\
    506 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    507 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    508 	__rxd->wrx_len = 0;						\
    509 	__rxd->wrx_cksum = 0;						\
    510 	__rxd->wrx_status = 0;						\
    511 	__rxd->wrx_errors = 0;						\
    512 	__rxd->wrx_special = 0;						\
    513 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    514 									\
    515 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    516 } while (/*CONSTCOND*/0)
    517 
    518 static void	wm_start(struct ifnet *);
    519 static void	wm_watchdog(struct ifnet *);
    520 static int	wm_ioctl(struct ifnet *, u_long, void *);
    521 static int	wm_init(struct ifnet *);
    522 static void	wm_stop(struct ifnet *, int);
    523 
    524 static void	wm_shutdown(void *);
    525 static void	wm_powerhook(int, void *);
    526 
    527 static void	wm_reset(struct wm_softc *);
    528 static void	wm_rxdrain(struct wm_softc *);
    529 static int	wm_add_rxbuf(struct wm_softc *, int);
    530 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    531 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
    532 static int	wm_validate_eeprom_checksum(struct wm_softc *);
    533 static void	wm_tick(void *);
    534 
    535 static void	wm_set_filter(struct wm_softc *);
    536 
    537 static int	wm_intr(void *);
    538 static void	wm_txintr(struct wm_softc *);
    539 static void	wm_rxintr(struct wm_softc *);
    540 static void	wm_linkintr(struct wm_softc *, uint32_t);
    541 
    542 static void	wm_tbi_mediainit(struct wm_softc *);
    543 static int	wm_tbi_mediachange(struct ifnet *);
    544 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    545 
    546 static void	wm_tbi_set_linkled(struct wm_softc *);
    547 static void	wm_tbi_check_link(struct wm_softc *);
    548 
    549 static void	wm_gmii_reset(struct wm_softc *);
    550 
    551 static int	wm_gmii_i82543_readreg(struct device *, int, int);
    552 static void	wm_gmii_i82543_writereg(struct device *, int, int, int);
    553 
    554 static int	wm_gmii_i82544_readreg(struct device *, int, int);
    555 static void	wm_gmii_i82544_writereg(struct device *, int, int, int);
    556 
    557 static int	wm_gmii_i80003_readreg(struct device *, int, int);
    558 static void	wm_gmii_i80003_writereg(struct device *, int, int, int);
    559 
    560 static void	wm_gmii_statchg(struct device *);
    561 
    562 static void	wm_gmii_mediainit(struct wm_softc *);
    563 static int	wm_gmii_mediachange(struct ifnet *);
    564 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    565 
    566 static int	wm_kmrn_i80003_readreg(struct wm_softc *, int);
    567 static void	wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
    568 
    569 static int	wm_match(struct device *, struct cfdata *, void *);
    570 static void	wm_attach(struct device *, struct device *, void *);
    571 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
    572 static int	wm_get_swsm_semaphore(struct wm_softc *);
    573 static void	wm_put_swsm_semaphore(struct wm_softc *);
    574 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    575 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    576 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    577 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    578 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    579 
    580 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
    581 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    582 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    583 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
    584                      uint32_t, uint16_t *);
    585 static int32_t	wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
    586 
    587 CFATTACH_DECL(wm, sizeof(struct wm_softc),
    588     wm_match, wm_attach, NULL, NULL);
    589 
    590 static void	wm_82547_txfifo_stall(void *);
    591 
    592 /*
    593  * Devices supported by this driver.
    594  */
    595 static const struct wm_product {
    596 	pci_vendor_id_t		wmp_vendor;
    597 	pci_product_id_t	wmp_product;
    598 	const char		*wmp_name;
    599 	wm_chip_type		wmp_type;
    600 	int			wmp_flags;
    601 #define	WMP_F_1000X		0x01
    602 #define	WMP_F_1000T		0x02
    603 } wm_products[] = {
    604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    605 	  "Intel i82542 1000BASE-X Ethernet",
    606 	  WM_T_82542_2_1,	WMP_F_1000X },
    607 
    608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    609 	  "Intel i82543GC 1000BASE-X Ethernet",
    610 	  WM_T_82543,		WMP_F_1000X },
    611 
    612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    613 	  "Intel i82543GC 1000BASE-T Ethernet",
    614 	  WM_T_82543,		WMP_F_1000T },
    615 
    616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    617 	  "Intel i82544EI 1000BASE-T Ethernet",
    618 	  WM_T_82544,		WMP_F_1000T },
    619 
    620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    621 	  "Intel i82544EI 1000BASE-X Ethernet",
    622 	  WM_T_82544,		WMP_F_1000X },
    623 
    624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    625 	  "Intel i82544GC 1000BASE-T Ethernet",
    626 	  WM_T_82544,		WMP_F_1000T },
    627 
    628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    629 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    630 	  WM_T_82544,		WMP_F_1000T },
    631 
    632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    633 	  "Intel i82540EM 1000BASE-T Ethernet",
    634 	  WM_T_82540,		WMP_F_1000T },
    635 
    636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    637 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    638 	  WM_T_82540,		WMP_F_1000T },
    639 
    640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    641 	  "Intel i82540EP 1000BASE-T Ethernet",
    642 	  WM_T_82540,		WMP_F_1000T },
    643 
    644 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    645 	  "Intel i82540EP 1000BASE-T Ethernet",
    646 	  WM_T_82540,		WMP_F_1000T },
    647 
    648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    649 	  "Intel i82540EP 1000BASE-T Ethernet",
    650 	  WM_T_82540,		WMP_F_1000T },
    651 
    652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    653 	  "Intel i82545EM 1000BASE-T Ethernet",
    654 	  WM_T_82545,		WMP_F_1000T },
    655 
    656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    657 	  "Intel i82545GM 1000BASE-T Ethernet",
    658 	  WM_T_82545_3,		WMP_F_1000T },
    659 
    660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    661 	  "Intel i82545GM 1000BASE-X Ethernet",
    662 	  WM_T_82545_3,		WMP_F_1000X },
    663 #if 0
    664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    665 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    666 	  WM_T_82545_3,		WMP_F_SERDES },
    667 #endif
    668 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    669 	  "Intel i82546EB 1000BASE-T Ethernet",
    670 	  WM_T_82546,		WMP_F_1000T },
    671 
    672 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
    673 	  "Intel i82546EB 1000BASE-T Ethernet",
    674 	  WM_T_82546,		WMP_F_1000T },
    675 
    676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    677 	  "Intel i82545EM 1000BASE-X Ethernet",
    678 	  WM_T_82545,		WMP_F_1000X },
    679 
    680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    681 	  "Intel i82546EB 1000BASE-X Ethernet",
    682 	  WM_T_82546,		WMP_F_1000X },
    683 
    684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    685 	  "Intel i82546GB 1000BASE-T Ethernet",
    686 	  WM_T_82546_3,		WMP_F_1000T },
    687 
    688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    689 	  "Intel i82546GB 1000BASE-X Ethernet",
    690 	  WM_T_82546_3,		WMP_F_1000X },
    691 #if 0
    692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    693 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    694 	  WM_T_82546_3,		WMP_F_SERDES },
    695 #endif
    696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    697 	  "i82546GB quad-port Gigabit Ethernet",
    698 	  WM_T_82546_3,		WMP_F_1000T },
    699 
    700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    701 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    702 	  WM_T_82546_3,		WMP_F_1000T },
    703 
    704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    705 	  "Intel PRO/1000MT (82546GB)",
    706 	  WM_T_82546_3,		WMP_F_1000T },
    707 
    708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    709 	  "Intel i82541EI 1000BASE-T Ethernet",
    710 	  WM_T_82541,		WMP_F_1000T },
    711 
    712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    713 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    714 	  WM_T_82541,		WMP_F_1000T },
    715 
    716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    717 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    718 	  WM_T_82541,		WMP_F_1000T },
    719 
    720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    721 	  "Intel i82541ER 1000BASE-T Ethernet",
    722 	  WM_T_82541_2,		WMP_F_1000T },
    723 
    724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    725 	  "Intel i82541GI 1000BASE-T Ethernet",
    726 	  WM_T_82541_2,		WMP_F_1000T },
    727 
    728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    729 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    730 	  WM_T_82541_2,		WMP_F_1000T },
    731 
    732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    733 	  "Intel i82541PI 1000BASE-T Ethernet",
    734 	  WM_T_82541_2,		WMP_F_1000T },
    735 
    736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    737 	  "Intel i82547EI 1000BASE-T Ethernet",
    738 	  WM_T_82547,		WMP_F_1000T },
    739 
    740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    741 	  "Intel i82547EI Moblie 1000BASE-T Ethernet",
    742 	  WM_T_82547,		WMP_F_1000T },
    743 
    744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    745 	  "Intel i82547GI 1000BASE-T Ethernet",
    746 	  WM_T_82547_2,		WMP_F_1000T },
    747 
    748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    749 	  "Intel PRO/1000 PT (82571EB)",
    750 	  WM_T_82571,		WMP_F_1000T },
    751 
    752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    753 	  "Intel PRO/1000 PF (82571EB)",
    754 	  WM_T_82571,		WMP_F_1000X },
    755 #if 0
    756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    757 	  "Intel PRO/1000 PB (82571EB)",
    758 	  WM_T_82571,		WMP_F_SERDES },
    759 #endif
    760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    761 	  "Intel PRO/1000 QT (82571EB)",
    762 	  WM_T_82571,		WMP_F_1000T },
    763 
    764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    765 	  "Intel i82572EI 1000baseT Ethernet",
    766 	  WM_T_82572,		WMP_F_1000T },
    767 
    768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    769 	  "Intel i82572EI 1000baseX Ethernet",
    770 	  WM_T_82572,		WMP_F_1000X },
    771 #if 0
    772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    773 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    774 	  WM_T_82572,		WMP_F_SERDES },
    775 #endif
    776 
    777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    778 	  "Intel i82572EI 1000baseT Ethernet",
    779 	  WM_T_82572,		WMP_F_1000T },
    780 
    781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    782 	  "Intel i82573E",
    783 	  WM_T_82573,		WMP_F_1000T },
    784 
    785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    786 	  "Intel i82573E IAMT",
    787 	  WM_T_82573,		WMP_F_1000T },
    788 
    789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    790 	  "Intel i82573L Gigabit Ethernet",
    791 	  WM_T_82573,		WMP_F_1000T },
    792 
    793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    794 	  "i80003 dual 1000baseT Ethernet",
    795 	  WM_T_80003,		WMP_F_1000T },
    796 
    797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    798 	  "i80003 dual 1000baseX Ethernet",
    799 	  WM_T_80003,		WMP_F_1000T },
    800 #if 0
    801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    802 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    803 	  WM_T_80003,		WMP_F_SERDES },
    804 #endif
    805 
    806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    807 	  "Intel i80003 1000baseT Ethernet",
    808 	  WM_T_80003,		WMP_F_1000T },
    809 #if 0
    810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
    811 	  "Intel i80003 Gigabit Ethernet (SERDES)",
    812 	  WM_T_80003,		WMP_F_SERDES },
    813 #endif
    814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
    815 	  "Intel i82801H (M_AMT) LAN Controller",
    816 	  WM_T_ICH8,		WMP_F_1000T },
    817 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
    818 	  "Intel i82801H (AMT) LAN Controller",
    819 	  WM_T_ICH8,		WMP_F_1000T },
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
    821 	  "Intel i82801H LAN Controller",
    822 	  WM_T_ICH8,		WMP_F_1000T },
    823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
    824 	  "Intel i82801H (IFE) LAN Controller",
    825 	  WM_T_ICH8,		WMP_F_1000T },
    826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
    827 	  "Intel i82801H (M) LAN Controller",
    828 	  WM_T_ICH8,		WMP_F_1000T },
    829 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
    830 	  "Intel i82801H IFE (GT) LAN Controller",
    831 	  WM_T_ICH8,		WMP_F_1000T },
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
    833 	  "Intel i82801H IFE (G) LAN Controller",
    834 	  WM_T_ICH8,		WMP_F_1000T },
    835 
    836 	{ 0,			0,
    837 	  NULL,
    838 	  0,			0 },
    839 };
    840 
    841 #ifdef WM_EVENT_COUNTERS
    842 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
    843 #endif /* WM_EVENT_COUNTERS */
    844 
    845 #if 0 /* Not currently used */
    846 static inline uint32_t
    847 wm_io_read(struct wm_softc *sc, int reg)
    848 {
    849 
    850 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    851 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
    852 }
    853 #endif
    854 
    855 static inline void
    856 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
    857 {
    858 
    859 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    860 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
    861 }
    862 
    863 static inline void
    864 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
    865 {
    866 	wa->wa_low = htole32(v & 0xffffffffU);
    867 	if (sizeof(bus_addr_t) == 8)
    868 		wa->wa_high = htole32((uint64_t) v >> 32);
    869 	else
    870 		wa->wa_high = 0;
    871 }
    872 
    873 static const struct wm_product *
    874 wm_lookup(const struct pci_attach_args *pa)
    875 {
    876 	const struct wm_product *wmp;
    877 
    878 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
    879 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
    880 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
    881 			return (wmp);
    882 	}
    883 	return (NULL);
    884 }
    885 
    886 static int
    887 wm_match(struct device *parent, struct cfdata *cf, void *aux)
    888 {
    889 	struct pci_attach_args *pa = aux;
    890 
    891 	if (wm_lookup(pa) != NULL)
    892 		return (1);
    893 
    894 	return (0);
    895 }
    896 
    897 static void
    898 wm_attach(struct device *parent, struct device *self, void *aux)
    899 {
    900 	struct wm_softc *sc = (void *) self;
    901 	struct pci_attach_args *pa = aux;
    902 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    903 	pci_chipset_tag_t pc = pa->pa_pc;
    904 	pci_intr_handle_t ih;
    905 	size_t cdata_size;
    906 	const char *intrstr = NULL;
    907 	const char *eetype;
    908 	bus_space_tag_t memt;
    909 	bus_space_handle_t memh;
    910 	bus_dma_segment_t seg;
    911 	int memh_valid;
    912 	int i, rseg, error;
    913 	const struct wm_product *wmp;
    914 	prop_data_t ea;
    915 	prop_number_t pn;
    916 	uint8_t enaddr[ETHER_ADDR_LEN];
    917 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
    918 	pcireg_t preg, memtype;
    919 	uint32_t reg;
    920 
    921 	callout_init(&sc->sc_tick_ch);
    922 
    923 	wmp = wm_lookup(pa);
    924 	if (wmp == NULL) {
    925 		printf("\n");
    926 		panic("wm_attach: impossible");
    927 	}
    928 
    929 	sc->sc_pc = pa->pa_pc;
    930 	sc->sc_pcitag = pa->pa_tag;
    931 
    932 	if (pci_dma64_available(pa))
    933 		sc->sc_dmat = pa->pa_dmat64;
    934 	else
    935 		sc->sc_dmat = pa->pa_dmat;
    936 
    937 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
    938 	aprint_naive(": Ethernet controller\n");
    939 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
    940 
    941 	sc->sc_type = wmp->wmp_type;
    942 	if (sc->sc_type < WM_T_82543) {
    943 		if (preg < 2) {
    944 			aprint_error("%s: i82542 must be at least rev. 2\n",
    945 			    sc->sc_dev.dv_xname);
    946 			return;
    947 		}
    948 		if (preg < 3)
    949 			sc->sc_type = WM_T_82542_2_0;
    950 	}
    951 
    952 	/*
    953 	 * Map the device.  All devices support memory-mapped acccess,
    954 	 * and it is really required for normal operation.
    955 	 */
    956 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
    957 	switch (memtype) {
    958 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
    959 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
    960 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
    961 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
    962 		break;
    963 	default:
    964 		memh_valid = 0;
    965 	}
    966 
    967 	if (memh_valid) {
    968 		sc->sc_st = memt;
    969 		sc->sc_sh = memh;
    970 	} else {
    971 		aprint_error("%s: unable to map device registers\n",
    972 		    sc->sc_dev.dv_xname);
    973 		return;
    974 	}
    975 
    976 	/*
    977 	 * In addition, i82544 and later support I/O mapped indirect
    978 	 * register access.  It is not desirable (nor supported in
    979 	 * this driver) to use it for normal operation, though it is
    980 	 * required to work around bugs in some chip versions.
    981 	 */
    982 	if (sc->sc_type >= WM_T_82544) {
    983 		/* First we have to find the I/O BAR. */
    984 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
    985 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
    986 			    PCI_MAPREG_TYPE_IO)
    987 				break;
    988 		}
    989 		if (i == PCI_MAPREG_END)
    990 			aprint_error("%s: WARNING: unable to find I/O BAR\n",
    991 			    sc->sc_dev.dv_xname);
    992 		else {
    993 			/*
    994 			 * The i8254x doesn't apparently respond when the
    995 			 * I/O BAR is 0, which looks somewhat like it's not
    996 			 * been configured.
    997 			 */
    998 			preg = pci_conf_read(pc, pa->pa_tag, i);
    999 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1000 				aprint_error("%s: WARNING: I/O BAR at zero.\n",
   1001 				    sc->sc_dev.dv_xname);
   1002 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1003 					0, &sc->sc_iot, &sc->sc_ioh,
   1004 					NULL, NULL) == 0) {
   1005 				sc->sc_flags |= WM_F_IOH_VALID;
   1006 			} else {
   1007 				aprint_error("%s: WARNING: unable to map "
   1008 				    "I/O space\n", sc->sc_dev.dv_xname);
   1009 			}
   1010 		}
   1011 
   1012 	}
   1013 
   1014 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1015 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1016 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1017 	if (sc->sc_type < WM_T_82542_2_1)
   1018 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1019 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1020 
   1021 	/* power up chip */
   1022 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
   1023 	    NULL)) && error != EOPNOTSUPP) {
   1024 		aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
   1025 		    error);
   1026 		return;
   1027 	}
   1028 
   1029 	/*
   1030 	 * Map and establish our interrupt.
   1031 	 */
   1032 	if (pci_intr_map(pa, &ih)) {
   1033 		aprint_error("%s: unable to map interrupt\n",
   1034 		    sc->sc_dev.dv_xname);
   1035 		return;
   1036 	}
   1037 	intrstr = pci_intr_string(pc, ih);
   1038 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1039 	if (sc->sc_ih == NULL) {
   1040 		aprint_error("%s: unable to establish interrupt",
   1041 		    sc->sc_dev.dv_xname);
   1042 		if (intrstr != NULL)
   1043 			aprint_normal(" at %s", intrstr);
   1044 		aprint_normal("\n");
   1045 		return;
   1046 	}
   1047 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
   1048 
   1049 	/*
   1050 	 * Determine a few things about the bus we're connected to.
   1051 	 */
   1052 	if (sc->sc_type < WM_T_82543) {
   1053 		/* We don't really know the bus characteristics here. */
   1054 		sc->sc_bus_speed = 33;
   1055 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1056 		/*
   1057 		 * CSA (Communication Streaming Architecture) is about as fast
   1058 		 * a 32-bit 66MHz PCI Bus.
   1059 		 */
   1060 		sc->sc_flags |= WM_F_CSA;
   1061 		sc->sc_bus_speed = 66;
   1062 		aprint_verbose("%s: Communication Streaming Architecture\n",
   1063 		    sc->sc_dev.dv_xname);
   1064 		if (sc->sc_type == WM_T_82547) {
   1065 			callout_init(&sc->sc_txfifo_ch);
   1066 			callout_setfunc(&sc->sc_txfifo_ch,
   1067 					wm_82547_txfifo_stall, sc);
   1068 			aprint_verbose("%s: using 82547 Tx FIFO stall "
   1069 				       "work-around\n", sc->sc_dev.dv_xname);
   1070 		}
   1071 	} else if (sc->sc_type >= WM_T_82571) {
   1072 		sc->sc_flags |= WM_F_PCIE;
   1073 		if (sc->sc_type != WM_T_ICH8)
   1074 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
   1075 		aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname);
   1076 	} else {
   1077 		reg = CSR_READ(sc, WMREG_STATUS);
   1078 		if (reg & STATUS_BUS64)
   1079 			sc->sc_flags |= WM_F_BUS64;
   1080 		if (sc->sc_type >= WM_T_82544 &&
   1081 		    (reg & STATUS_PCIX_MODE) != 0) {
   1082 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1083 
   1084 			sc->sc_flags |= WM_F_PCIX;
   1085 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1086 					       PCI_CAP_PCIX,
   1087 					       &sc->sc_pcix_offset, NULL) == 0)
   1088 				aprint_error("%s: unable to find PCIX "
   1089 				    "capability\n", sc->sc_dev.dv_xname);
   1090 			else if (sc->sc_type != WM_T_82545_3 &&
   1091 				 sc->sc_type != WM_T_82546_3) {
   1092 				/*
   1093 				 * Work around a problem caused by the BIOS
   1094 				 * setting the max memory read byte count
   1095 				 * incorrectly.
   1096 				 */
   1097 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1098 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
   1099 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1100 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
   1101 
   1102 				bytecnt =
   1103 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
   1104 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
   1105 				maxb =
   1106 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
   1107 				    PCI_PCIX_STATUS_MAXB_SHIFT;
   1108 				if (bytecnt > maxb) {
   1109 					aprint_verbose("%s: resetting PCI-X "
   1110 					    "MMRBC: %d -> %d\n",
   1111 					    sc->sc_dev.dv_xname,
   1112 					    512 << bytecnt, 512 << maxb);
   1113 					pcix_cmd = (pcix_cmd &
   1114 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
   1115 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
   1116 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1117 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
   1118 					    pcix_cmd);
   1119 				}
   1120 			}
   1121 		}
   1122 		/*
   1123 		 * The quad port adapter is special; it has a PCIX-PCIX
   1124 		 * bridge on the board, and can run the secondary bus at
   1125 		 * a higher speed.
   1126 		 */
   1127 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1128 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1129 								      : 66;
   1130 		} else if (sc->sc_flags & WM_F_PCIX) {
   1131 			switch (reg & STATUS_PCIXSPD_MASK) {
   1132 			case STATUS_PCIXSPD_50_66:
   1133 				sc->sc_bus_speed = 66;
   1134 				break;
   1135 			case STATUS_PCIXSPD_66_100:
   1136 				sc->sc_bus_speed = 100;
   1137 				break;
   1138 			case STATUS_PCIXSPD_100_133:
   1139 				sc->sc_bus_speed = 133;
   1140 				break;
   1141 			default:
   1142 				aprint_error(
   1143 				    "%s: unknown PCIXSPD %d; assuming 66MHz\n",
   1144 				    sc->sc_dev.dv_xname,
   1145 				    reg & STATUS_PCIXSPD_MASK);
   1146 				sc->sc_bus_speed = 66;
   1147 			}
   1148 		} else
   1149 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1150 		aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
   1151 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1152 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1153 	}
   1154 
   1155 	/*
   1156 	 * Allocate the control data structures, and create and load the
   1157 	 * DMA map for it.
   1158 	 *
   1159 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1160 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1161 	 * both sets within the same 4G segment.
   1162 	 */
   1163 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1164 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1165 	cdata_size = sc->sc_type < WM_T_82544 ?
   1166 	    sizeof(struct wm_control_data_82542) :
   1167 	    sizeof(struct wm_control_data_82544);
   1168 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
   1169 				      (bus_size_t) 0x100000000ULL,
   1170 				      &seg, 1, &rseg, 0)) != 0) {
   1171 		aprint_error(
   1172 		    "%s: unable to allocate control data, error = %d\n",
   1173 		    sc->sc_dev.dv_xname, error);
   1174 		goto fail_0;
   1175 	}
   1176 
   1177 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
   1178 				    (void **)&sc->sc_control_data, 0)) != 0) {
   1179 		aprint_error("%s: unable to map control data, error = %d\n",
   1180 		    sc->sc_dev.dv_xname, error);
   1181 		goto fail_1;
   1182 	}
   1183 
   1184 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
   1185 				       0, 0, &sc->sc_cddmamap)) != 0) {
   1186 		aprint_error("%s: unable to create control data DMA map, "
   1187 		    "error = %d\n", sc->sc_dev.dv_xname, error);
   1188 		goto fail_2;
   1189 	}
   1190 
   1191 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1192 				     sc->sc_control_data, cdata_size, NULL,
   1193 				     0)) != 0) {
   1194 		aprint_error(
   1195 		    "%s: unable to load control data DMA map, error = %d\n",
   1196 		    sc->sc_dev.dv_xname, error);
   1197 		goto fail_3;
   1198 	}
   1199 
   1200 
   1201 	/*
   1202 	 * Create the transmit buffer DMA maps.
   1203 	 */
   1204 	WM_TXQUEUELEN(sc) =
   1205 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1206 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1207 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1208 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1209 					       WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1210 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1211 			aprint_error("%s: unable to create Tx DMA map %d, "
   1212 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
   1213 			goto fail_4;
   1214 		}
   1215 	}
   1216 
   1217 	/*
   1218 	 * Create the receive buffer DMA maps.
   1219 	 */
   1220 	for (i = 0; i < WM_NRXDESC; i++) {
   1221 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1222 					       MCLBYTES, 0, 0,
   1223 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1224 			aprint_error("%s: unable to create Rx DMA map %d, "
   1225 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
   1226 			goto fail_5;
   1227 		}
   1228 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1229 	}
   1230 
   1231 	/* clear interesting stat counters */
   1232 	CSR_READ(sc, WMREG_COLC);
   1233 	CSR_READ(sc, WMREG_RXERRC);
   1234 
   1235 	/*
   1236 	 * Reset the chip to a known state.
   1237 	 */
   1238 	wm_reset(sc);
   1239 
   1240 	/*
   1241 	 * Get some information about the EEPROM.
   1242 	 */
   1243 	if (sc->sc_type == WM_T_ICH8) {
   1244 		uint32_t flash_size;
   1245 		sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
   1246 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1247 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1248 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1249 			printf("%s: can't map FLASH registers\n",
   1250 			    sc->sc_dev.dv_xname);
   1251 			return;
   1252 		}
   1253 		flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1254 		sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
   1255 						ICH_FLASH_SECTOR_SIZE;
   1256 		sc->sc_ich8_flash_bank_size =
   1257 			((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1258 		sc->sc_ich8_flash_bank_size -=
   1259 			(flash_size & ICH_GFPREG_BASE_MASK);
   1260 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1261 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1262 	} else if (sc->sc_type == WM_T_80003)
   1263 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR |  WM_F_SWFW_SYNC;
   1264 	else if (sc->sc_type == WM_T_82573)
   1265 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1266 	else if (sc->sc_type > WM_T_82544)
   1267 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
   1268 
   1269 	if (sc->sc_type <= WM_T_82544)
   1270 		sc->sc_ee_addrbits = 6;
   1271 	else if (sc->sc_type <= WM_T_82546_3) {
   1272 		reg = CSR_READ(sc, WMREG_EECD);
   1273 		if (reg & EECD_EE_SIZE)
   1274 			sc->sc_ee_addrbits = 8;
   1275 		else
   1276 			sc->sc_ee_addrbits = 6;
   1277 	} else if (sc->sc_type <= WM_T_82547_2) {
   1278 		reg = CSR_READ(sc, WMREG_EECD);
   1279 		if (reg & EECD_EE_TYPE) {
   1280 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1281 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1282 		} else
   1283 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1284 	} else if ((sc->sc_type == WM_T_82573) &&
   1285 	    (wm_is_onboard_nvm_eeprom(sc) == 0)) {
   1286 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   1287 	} else {
   1288 		/* Assume everything else is SPI. */
   1289 		reg = CSR_READ(sc, WMREG_EECD);
   1290 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1291 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1292 	}
   1293 
   1294 	/*
   1295 	 * Defer printing the EEPROM type until after verifying the checksum
   1296 	 * This allows the EEPROM type to be printed correctly in the case
   1297 	 * that no EEPROM is attached.
   1298 	 */
   1299 
   1300 
   1301 	/*
   1302 	 * Validate the EEPROM checksum. If the checksum fails, flag this for
   1303 	 * later, so we can fail future reads from the EEPROM.
   1304 	 */
   1305 	if (wm_validate_eeprom_checksum(sc))
   1306 		sc->sc_flags |= WM_F_EEPROM_INVALID;
   1307 
   1308 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1309 		aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname);
   1310 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   1311 		aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname);
   1312 	} else {
   1313 		if (sc->sc_flags & WM_F_EEPROM_SPI)
   1314 			eetype = "SPI";
   1315 		else
   1316 			eetype = "MicroWire";
   1317 		aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
   1318 		    sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
   1319 		    sc->sc_ee_addrbits, eetype);
   1320 	}
   1321 
   1322 	/*
   1323 	 * Read the Ethernet address from the EEPROM, if not first found
   1324 	 * in device properties.
   1325 	 */
   1326 	ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
   1327 	if (ea != NULL) {
   1328 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1329 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1330 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1331 	} else {
   1332 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
   1333 		    sizeof(myea) / sizeof(myea[0]), myea)) {
   1334 			aprint_error("%s: unable to read Ethernet address\n",
   1335 			    sc->sc_dev.dv_xname);
   1336 			return;
   1337 		}
   1338 		enaddr[0] = myea[0] & 0xff;
   1339 		enaddr[1] = myea[0] >> 8;
   1340 		enaddr[2] = myea[1] & 0xff;
   1341 		enaddr[3] = myea[1] >> 8;
   1342 		enaddr[4] = myea[2] & 0xff;
   1343 		enaddr[5] = myea[2] >> 8;
   1344 	}
   1345 
   1346 	/*
   1347 	 * Toggle the LSB of the MAC address on the second port
   1348 	 * of the dual port controller.
   1349 	 */
   1350 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
   1351 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
   1352 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
   1353 			enaddr[5] ^= 1;
   1354 	}
   1355 
   1356 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
   1357 	    ether_sprintf(enaddr));
   1358 
   1359 	/*
   1360 	 * Read the config info from the EEPROM, and set up various
   1361 	 * bits in the control registers based on their contents.
   1362 	 */
   1363 	pn = prop_dictionary_get(device_properties(&sc->sc_dev),
   1364 				 "i82543-cfg1");
   1365 	if (pn != NULL) {
   1366 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1367 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   1368 	} else {
   1369 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1370 			aprint_error("%s: unable to read CFG1\n",
   1371 			    sc->sc_dev.dv_xname);
   1372 			return;
   1373 		}
   1374 	}
   1375 
   1376 	pn = prop_dictionary_get(device_properties(&sc->sc_dev),
   1377 				 "i82543-cfg2");
   1378 	if (pn != NULL) {
   1379 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1380 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   1381 	} else {
   1382 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1383 			aprint_error("%s: unable to read CFG2\n",
   1384 			    sc->sc_dev.dv_xname);
   1385 			return;
   1386 		}
   1387 	}
   1388 
   1389 	if (sc->sc_type >= WM_T_82544) {
   1390 		pn = prop_dictionary_get(device_properties(&sc->sc_dev),
   1391 					 "i82543-swdpin");
   1392 		if (pn != NULL) {
   1393 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1394 			swdpin = (uint16_t) prop_number_integer_value(pn);
   1395 		} else {
   1396 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1397 				aprint_error("%s: unable to read SWDPIN\n",
   1398 				    sc->sc_dev.dv_xname);
   1399 				return;
   1400 			}
   1401 		}
   1402 	}
   1403 
   1404 	if (cfg1 & EEPROM_CFG1_ILOS)
   1405 		sc->sc_ctrl |= CTRL_ILOS;
   1406 	if (sc->sc_type >= WM_T_82544) {
   1407 		sc->sc_ctrl |=
   1408 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1409 		    CTRL_SWDPIO_SHIFT;
   1410 		sc->sc_ctrl |=
   1411 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1412 		    CTRL_SWDPINS_SHIFT;
   1413 	} else {
   1414 		sc->sc_ctrl |=
   1415 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1416 		    CTRL_SWDPIO_SHIFT;
   1417 	}
   1418 
   1419 #if 0
   1420 	if (sc->sc_type >= WM_T_82544) {
   1421 		if (cfg1 & EEPROM_CFG1_IPS0)
   1422 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1423 		if (cfg1 & EEPROM_CFG1_IPS1)
   1424 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1425 		sc->sc_ctrl_ext |=
   1426 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1427 		    CTRL_EXT_SWDPIO_SHIFT;
   1428 		sc->sc_ctrl_ext |=
   1429 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1430 		    CTRL_EXT_SWDPINS_SHIFT;
   1431 	} else {
   1432 		sc->sc_ctrl_ext |=
   1433 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1434 		    CTRL_EXT_SWDPIO_SHIFT;
   1435 	}
   1436 #endif
   1437 
   1438 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1439 #if 0
   1440 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1441 #endif
   1442 
   1443 	/*
   1444 	 * Set up some register offsets that are different between
   1445 	 * the i82542 and the i82543 and later chips.
   1446 	 */
   1447 	if (sc->sc_type < WM_T_82543) {
   1448 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1449 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1450 	} else {
   1451 		sc->sc_rdt_reg = WMREG_RDT;
   1452 		sc->sc_tdt_reg = WMREG_TDT;
   1453 	}
   1454 
   1455 	/*
   1456 	 * Determine if we're TBI or GMII mode, and initialize the
   1457 	 * media structures accordingly.
   1458 	 */
   1459 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_82573) {
   1460 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   1461 		wm_gmii_mediainit(sc);
   1462 	} else if (sc->sc_type < WM_T_82543 ||
   1463 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   1464 		if (wmp->wmp_flags & WMP_F_1000T)
   1465 			aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
   1466 			    "product!\n", sc->sc_dev.dv_xname);
   1467 		wm_tbi_mediainit(sc);
   1468 	} else {
   1469 		if (wmp->wmp_flags & WMP_F_1000X)
   1470 			aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
   1471 			    "product!\n", sc->sc_dev.dv_xname);
   1472 		wm_gmii_mediainit(sc);
   1473 	}
   1474 
   1475 	ifp = &sc->sc_ethercom.ec_if;
   1476 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
   1477 	ifp->if_softc = sc;
   1478 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1479 	ifp->if_ioctl = wm_ioctl;
   1480 	ifp->if_start = wm_start;
   1481 	ifp->if_watchdog = wm_watchdog;
   1482 	ifp->if_init = wm_init;
   1483 	ifp->if_stop = wm_stop;
   1484 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   1485 	IFQ_SET_READY(&ifp->if_snd);
   1486 
   1487 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
   1488 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1489 
   1490 	/*
   1491 	 * If we're a i82543 or greater, we can support VLANs.
   1492 	 */
   1493 	if (sc->sc_type >= WM_T_82543)
   1494 		sc->sc_ethercom.ec_capabilities |=
   1495 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
   1496 
   1497 	/*
   1498 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   1499 	 * on i82543 and later.
   1500 	 */
   1501 	if (sc->sc_type >= WM_T_82543) {
   1502 		ifp->if_capabilities |=
   1503 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   1504 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   1505 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   1506 		    IFCAP_CSUM_TCPv6_Tx |
   1507 		    IFCAP_CSUM_UDPv6_Tx;
   1508 	}
   1509 
   1510 	/*
   1511 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   1512 	 *
   1513 	 *	82541GI (8086:1076) ... no
   1514 	 *	82572EI (8086:10b9) ... yes
   1515 	 */
   1516 	if (sc->sc_type >= WM_T_82571) {
   1517 		ifp->if_capabilities |=
   1518 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   1519 	}
   1520 
   1521 	/*
   1522 	 * If we're a i82544 or greater (except i82547), we can do
   1523 	 * TCP segmentation offload.
   1524 	 */
   1525 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   1526 		ifp->if_capabilities |= IFCAP_TSOv4;
   1527 	}
   1528 
   1529 	if (sc->sc_type >= WM_T_82571) {
   1530 		ifp->if_capabilities |= IFCAP_TSOv6;
   1531 	}
   1532 
   1533 	/*
   1534 	 * Attach the interface.
   1535 	 */
   1536 	if_attach(ifp);
   1537 	ether_ifattach(ifp, enaddr);
   1538 #if NRND > 0
   1539 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
   1540 	    RND_TYPE_NET, 0);
   1541 #endif
   1542 
   1543 #ifdef WM_EVENT_COUNTERS
   1544 	/* Attach event counters. */
   1545 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   1546 	    NULL, sc->sc_dev.dv_xname, "txsstall");
   1547 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   1548 	    NULL, sc->sc_dev.dv_xname, "txdstall");
   1549 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   1550 	    NULL, sc->sc_dev.dv_xname, "txfifo_stall");
   1551 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   1552 	    NULL, sc->sc_dev.dv_xname, "txdw");
   1553 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   1554 	    NULL, sc->sc_dev.dv_xname, "txqe");
   1555 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   1556 	    NULL, sc->sc_dev.dv_xname, "rxintr");
   1557 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   1558 	    NULL, sc->sc_dev.dv_xname, "linkintr");
   1559 
   1560 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   1561 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
   1562 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   1563 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
   1564 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   1565 	    NULL, sc->sc_dev.dv_xname, "txipsum");
   1566 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   1567 	    NULL, sc->sc_dev.dv_xname, "txtusum");
   1568 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   1569 	    NULL, sc->sc_dev.dv_xname, "txtusum6");
   1570 
   1571 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   1572 	    NULL, sc->sc_dev.dv_xname, "txtso");
   1573 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   1574 	    NULL, sc->sc_dev.dv_xname, "txtso6");
   1575 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   1576 	    NULL, sc->sc_dev.dv_xname, "txtsopain");
   1577 
   1578 	for (i = 0; i < WM_NTXSEGS; i++) {
   1579 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
   1580 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   1581 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
   1582 	}
   1583 
   1584 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   1585 	    NULL, sc->sc_dev.dv_xname, "txdrop");
   1586 
   1587 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   1588 	    NULL, sc->sc_dev.dv_xname, "tu");
   1589 
   1590 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   1591 	    NULL, sc->sc_dev.dv_xname, "tx_xoff");
   1592 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   1593 	    NULL, sc->sc_dev.dv_xname, "tx_xon");
   1594 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   1595 	    NULL, sc->sc_dev.dv_xname, "rx_xoff");
   1596 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   1597 	    NULL, sc->sc_dev.dv_xname, "rx_xon");
   1598 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   1599 	    NULL, sc->sc_dev.dv_xname, "rx_macctl");
   1600 #endif /* WM_EVENT_COUNTERS */
   1601 
   1602 	/*
   1603 	 * Make sure the interface is shutdown during reboot.
   1604 	 */
   1605 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
   1606 	if (sc->sc_sdhook == NULL)
   1607 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
   1608 		    sc->sc_dev.dv_xname);
   1609 
   1610 	sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
   1611 	    wm_powerhook, sc);
   1612 	if (sc->sc_powerhook == NULL)
   1613 		aprint_error("%s: can't establish powerhook\n",
   1614 		    sc->sc_dev.dv_xname);
   1615 	return;
   1616 
   1617 	/*
   1618 	 * Free any resources we've allocated during the failed attach
   1619 	 * attempt.  Do this in reverse order and fall through.
   1620 	 */
   1621  fail_5:
   1622 	for (i = 0; i < WM_NRXDESC; i++) {
   1623 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   1624 			bus_dmamap_destroy(sc->sc_dmat,
   1625 			    sc->sc_rxsoft[i].rxs_dmamap);
   1626 	}
   1627  fail_4:
   1628 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1629 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   1630 			bus_dmamap_destroy(sc->sc_dmat,
   1631 			    sc->sc_txsoft[i].txs_dmamap);
   1632 	}
   1633 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   1634  fail_3:
   1635 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   1636  fail_2:
   1637 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   1638 	    cdata_size);
   1639  fail_1:
   1640 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1641  fail_0:
   1642 	return;
   1643 }
   1644 
   1645 /*
   1646  * wm_shutdown:
   1647  *
   1648  *	Make sure the interface is stopped at reboot time.
   1649  */
   1650 static void
   1651 wm_shutdown(void *arg)
   1652 {
   1653 	struct wm_softc *sc = arg;
   1654 
   1655 	wm_stop(&sc->sc_ethercom.ec_if, 1);
   1656 }
   1657 
   1658 static void
   1659 wm_powerhook(int why, void *arg)
   1660 {
   1661 	struct wm_softc *sc = arg;
   1662 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1663 	pci_chipset_tag_t pc = sc->sc_pc;
   1664 	pcitag_t tag = sc->sc_pcitag;
   1665 
   1666 	switch (why) {
   1667 	case PWR_SOFTSUSPEND:
   1668 		wm_shutdown(sc);
   1669 		break;
   1670 	case PWR_SOFTRESUME:
   1671 		ifp->if_flags &= ~IFF_RUNNING;
   1672 		wm_init(ifp);
   1673 		if (ifp->if_flags & IFF_RUNNING)
   1674 			wm_start(ifp);
   1675 		break;
   1676 	case PWR_SUSPEND:
   1677 		pci_conf_capture(pc, tag, &sc->sc_pciconf);
   1678 		break;
   1679 	case PWR_RESUME:
   1680 		pci_conf_restore(pc, tag, &sc->sc_pciconf);
   1681 		break;
   1682 	}
   1683 
   1684 	return;
   1685 }
   1686 
   1687 /*
   1688  * wm_tx_offload:
   1689  *
   1690  *	Set up TCP/IP checksumming parameters for the
   1691  *	specified packet.
   1692  */
   1693 static int
   1694 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   1695     uint8_t *fieldsp)
   1696 {
   1697 	struct mbuf *m0 = txs->txs_mbuf;
   1698 	struct livengood_tcpip_ctxdesc *t;
   1699 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   1700 	uint32_t ipcse;
   1701 	struct ether_header *eh;
   1702 	int offset, iphl;
   1703 	uint8_t fields;
   1704 
   1705 	/*
   1706 	 * XXX It would be nice if the mbuf pkthdr had offset
   1707 	 * fields for the protocol headers.
   1708 	 */
   1709 
   1710 	eh = mtod(m0, struct ether_header *);
   1711 	switch (htons(eh->ether_type)) {
   1712 	case ETHERTYPE_IP:
   1713 	case ETHERTYPE_IPV6:
   1714 		offset = ETHER_HDR_LEN;
   1715 		break;
   1716 
   1717 	case ETHERTYPE_VLAN:
   1718 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1719 		break;
   1720 
   1721 	default:
   1722 		/*
   1723 		 * Don't support this protocol or encapsulation.
   1724 		 */
   1725 		*fieldsp = 0;
   1726 		*cmdp = 0;
   1727 		return (0);
   1728 	}
   1729 
   1730 	if ((m0->m_pkthdr.csum_flags &
   1731 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   1732 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   1733 	} else {
   1734 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   1735 	}
   1736 	ipcse = offset + iphl - 1;
   1737 
   1738 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   1739 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   1740 	seg = 0;
   1741 	fields = 0;
   1742 
   1743 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   1744 		int hlen = offset + iphl;
   1745 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   1746 
   1747 		if (__predict_false(m0->m_len <
   1748 				    (hlen + sizeof(struct tcphdr)))) {
   1749 			/*
   1750 			 * TCP/IP headers are not in the first mbuf; we need
   1751 			 * to do this the slow and painful way.  Let's just
   1752 			 * hope this doesn't happen very often.
   1753 			 */
   1754 			struct tcphdr th;
   1755 
   1756 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   1757 
   1758 			m_copydata(m0, hlen, sizeof(th), &th);
   1759 			if (v4) {
   1760 				struct ip ip;
   1761 
   1762 				m_copydata(m0, offset, sizeof(ip), &ip);
   1763 				ip.ip_len = 0;
   1764 				m_copyback(m0,
   1765 				    offset + offsetof(struct ip, ip_len),
   1766 				    sizeof(ip.ip_len), &ip.ip_len);
   1767 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   1768 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   1769 			} else {
   1770 				struct ip6_hdr ip6;
   1771 
   1772 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   1773 				ip6.ip6_plen = 0;
   1774 				m_copyback(m0,
   1775 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   1776 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   1777 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   1778 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   1779 			}
   1780 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   1781 			    sizeof(th.th_sum), &th.th_sum);
   1782 
   1783 			hlen += th.th_off << 2;
   1784 		} else {
   1785 			/*
   1786 			 * TCP/IP headers are in the first mbuf; we can do
   1787 			 * this the easy way.
   1788 			 */
   1789 			struct tcphdr *th;
   1790 
   1791 			if (v4) {
   1792 				struct ip *ip =
   1793 				    (void *)(mtod(m0, char *) + offset);
   1794 				th = (void *)(mtod(m0, char *) + hlen);
   1795 
   1796 				ip->ip_len = 0;
   1797 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   1798 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   1799 			} else {
   1800 				struct ip6_hdr *ip6 =
   1801 				    (void *)(mtod(m0, char *) + offset);
   1802 				th = (void *)(mtod(m0, char *) + hlen);
   1803 
   1804 				ip6->ip6_plen = 0;
   1805 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   1806 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   1807 			}
   1808 			hlen += th->th_off << 2;
   1809 		}
   1810 
   1811 		if (v4) {
   1812 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   1813 			cmdlen |= WTX_TCPIP_CMD_IP;
   1814 		} else {
   1815 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   1816 			ipcse = 0;
   1817 		}
   1818 		cmd |= WTX_TCPIP_CMD_TSE;
   1819 		cmdlen |= WTX_TCPIP_CMD_TSE |
   1820 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   1821 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   1822 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   1823 	}
   1824 
   1825 	/*
   1826 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   1827 	 * offload feature, if we load the context descriptor, we
   1828 	 * MUST provide valid values for IPCSS and TUCSS fields.
   1829 	 */
   1830 
   1831 	ipcs = WTX_TCPIP_IPCSS(offset) |
   1832 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1833 	    WTX_TCPIP_IPCSE(ipcse);
   1834 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   1835 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   1836 		fields |= WTX_IXSM;
   1837 	}
   1838 
   1839 	offset += iphl;
   1840 
   1841 	if (m0->m_pkthdr.csum_flags &
   1842 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   1843 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   1844 		fields |= WTX_TXSM;
   1845 		tucs = WTX_TCPIP_TUCSS(offset) |
   1846 		    WTX_TCPIP_TUCSO(offset +
   1847 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   1848 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   1849 	} else if ((m0->m_pkthdr.csum_flags &
   1850 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   1851 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   1852 		fields |= WTX_TXSM;
   1853 		tucs = WTX_TCPIP_TUCSS(offset) |
   1854 		    WTX_TCPIP_TUCSO(offset +
   1855 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   1856 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   1857 	} else {
   1858 		/* Just initialize it to a valid TCP context. */
   1859 		tucs = WTX_TCPIP_TUCSS(offset) |
   1860 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   1861 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   1862 	}
   1863 
   1864 	/* Fill in the context descriptor. */
   1865 	t = (struct livengood_tcpip_ctxdesc *)
   1866 	    &sc->sc_txdescs[sc->sc_txnext];
   1867 	t->tcpip_ipcs = htole32(ipcs);
   1868 	t->tcpip_tucs = htole32(tucs);
   1869 	t->tcpip_cmdlen = htole32(cmdlen);
   1870 	t->tcpip_seg = htole32(seg);
   1871 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   1872 
   1873 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   1874 	txs->txs_ndesc++;
   1875 
   1876 	*cmdp = cmd;
   1877 	*fieldsp = fields;
   1878 
   1879 	return (0);
   1880 }
   1881 
   1882 static void
   1883 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   1884 {
   1885 	struct mbuf *m;
   1886 	int i;
   1887 
   1888 	log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname);
   1889 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   1890 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   1891 		    "m_flags = 0x%08x\n", sc->sc_dev.dv_xname,
   1892 		    m->m_data, m->m_len, m->m_flags);
   1893 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname,
   1894 	    i, i == 1 ? "" : "s");
   1895 }
   1896 
   1897 /*
   1898  * wm_82547_txfifo_stall:
   1899  *
   1900  *	Callout used to wait for the 82547 Tx FIFO to drain,
   1901  *	reset the FIFO pointers, and restart packet transmission.
   1902  */
   1903 static void
   1904 wm_82547_txfifo_stall(void *arg)
   1905 {
   1906 	struct wm_softc *sc = arg;
   1907 	int s;
   1908 
   1909 	s = splnet();
   1910 
   1911 	if (sc->sc_txfifo_stall) {
   1912 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   1913 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   1914 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   1915 			/*
   1916 			 * Packets have drained.  Stop transmitter, reset
   1917 			 * FIFO pointers, restart transmitter, and kick
   1918 			 * the packet queue.
   1919 			 */
   1920 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   1921 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   1922 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   1923 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   1924 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   1925 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   1926 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   1927 			CSR_WRITE_FLUSH(sc);
   1928 
   1929 			sc->sc_txfifo_head = 0;
   1930 			sc->sc_txfifo_stall = 0;
   1931 			wm_start(&sc->sc_ethercom.ec_if);
   1932 		} else {
   1933 			/*
   1934 			 * Still waiting for packets to drain; try again in
   1935 			 * another tick.
   1936 			 */
   1937 			callout_schedule(&sc->sc_txfifo_ch, 1);
   1938 		}
   1939 	}
   1940 
   1941 	splx(s);
   1942 }
   1943 
   1944 /*
   1945  * wm_82547_txfifo_bugchk:
   1946  *
   1947  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   1948  *	prevent enqueueing a packet that would wrap around the end
   1949  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   1950  *
   1951  *	We do this by checking the amount of space before the end
   1952  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   1953  *	the Tx FIFO, wait for all remaining packets to drain, reset
   1954  *	the internal FIFO pointers to the beginning, and restart
   1955  *	transmission on the interface.
   1956  */
   1957 #define	WM_FIFO_HDR		0x10
   1958 #define	WM_82547_PAD_LEN	0x3e0
   1959 static int
   1960 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   1961 {
   1962 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   1963 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   1964 
   1965 	/* Just return if already stalled. */
   1966 	if (sc->sc_txfifo_stall)
   1967 		return (1);
   1968 
   1969 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   1970 		/* Stall only occurs in half-duplex mode. */
   1971 		goto send_packet;
   1972 	}
   1973 
   1974 	if (len >= WM_82547_PAD_LEN + space) {
   1975 		sc->sc_txfifo_stall = 1;
   1976 		callout_schedule(&sc->sc_txfifo_ch, 1);
   1977 		return (1);
   1978 	}
   1979 
   1980  send_packet:
   1981 	sc->sc_txfifo_head += len;
   1982 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   1983 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   1984 
   1985 	return (0);
   1986 }
   1987 
   1988 /*
   1989  * wm_start:		[ifnet interface function]
   1990  *
   1991  *	Start packet transmission on the interface.
   1992  */
   1993 static void
   1994 wm_start(struct ifnet *ifp)
   1995 {
   1996 	struct wm_softc *sc = ifp->if_softc;
   1997 	struct mbuf *m0;
   1998 #if 0 /* XXXJRT */
   1999 	struct m_tag *mtag;
   2000 #endif
   2001 	struct wm_txsoft *txs;
   2002 	bus_dmamap_t dmamap;
   2003 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   2004 	bus_addr_t curaddr;
   2005 	bus_size_t seglen, curlen;
   2006 	uint32_t cksumcmd;
   2007 	uint8_t cksumfields;
   2008 
   2009 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   2010 		return;
   2011 
   2012 	/*
   2013 	 * Remember the previous number of free descriptors.
   2014 	 */
   2015 	ofree = sc->sc_txfree;
   2016 
   2017 	/*
   2018 	 * Loop through the send queue, setting up transmit descriptors
   2019 	 * until we drain the queue, or use up all available transmit
   2020 	 * descriptors.
   2021 	 */
   2022 	for (;;) {
   2023 		/* Grab a packet off the queue. */
   2024 		IFQ_POLL(&ifp->if_snd, m0);
   2025 		if (m0 == NULL)
   2026 			break;
   2027 
   2028 		DPRINTF(WM_DEBUG_TX,
   2029 		    ("%s: TX: have packet to transmit: %p\n",
   2030 		    sc->sc_dev.dv_xname, m0));
   2031 
   2032 		/* Get a work queue entry. */
   2033 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   2034 			wm_txintr(sc);
   2035 			if (sc->sc_txsfree == 0) {
   2036 				DPRINTF(WM_DEBUG_TX,
   2037 				    ("%s: TX: no free job descriptors\n",
   2038 					sc->sc_dev.dv_xname));
   2039 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   2040 				break;
   2041 			}
   2042 		}
   2043 
   2044 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   2045 		dmamap = txs->txs_dmamap;
   2046 
   2047 		use_tso = (m0->m_pkthdr.csum_flags &
   2048 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   2049 
   2050 		/*
   2051 		 * So says the Linux driver:
   2052 		 * The controller does a simple calculation to make sure
   2053 		 * there is enough room in the FIFO before initiating the
   2054 		 * DMA for each buffer.  The calc is:
   2055 		 *	4 = ceil(buffer len / MSS)
   2056 		 * To make sure we don't overrun the FIFO, adjust the max
   2057 		 * buffer len if the MSS drops.
   2058 		 */
   2059 		dmamap->dm_maxsegsz =
   2060 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   2061 		    ? m0->m_pkthdr.segsz << 2
   2062 		    : WTX_MAX_LEN;
   2063 
   2064 		/*
   2065 		 * Load the DMA map.  If this fails, the packet either
   2066 		 * didn't fit in the allotted number of segments, or we
   2067 		 * were short on resources.  For the too-many-segments
   2068 		 * case, we simply report an error and drop the packet,
   2069 		 * since we can't sanely copy a jumbo packet to a single
   2070 		 * buffer.
   2071 		 */
   2072 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   2073 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   2074 		if (error) {
   2075 			if (error == EFBIG) {
   2076 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   2077 				log(LOG_ERR, "%s: Tx packet consumes too many "
   2078 				    "DMA segments, dropping...\n",
   2079 				    sc->sc_dev.dv_xname);
   2080 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   2081 				wm_dump_mbuf_chain(sc, m0);
   2082 				m_freem(m0);
   2083 				continue;
   2084 			}
   2085 			/*
   2086 			 * Short on resources, just stop for now.
   2087 			 */
   2088 			DPRINTF(WM_DEBUG_TX,
   2089 			    ("%s: TX: dmamap load failed: %d\n",
   2090 			    sc->sc_dev.dv_xname, error));
   2091 			break;
   2092 		}
   2093 
   2094 		segs_needed = dmamap->dm_nsegs;
   2095 		if (use_tso) {
   2096 			/* For sentinel descriptor; see below. */
   2097 			segs_needed++;
   2098 		}
   2099 
   2100 		/*
   2101 		 * Ensure we have enough descriptors free to describe
   2102 		 * the packet.  Note, we always reserve one descriptor
   2103 		 * at the end of the ring due to the semantics of the
   2104 		 * TDT register, plus one more in the event we need
   2105 		 * to load offload context.
   2106 		 */
   2107 		if (segs_needed > sc->sc_txfree - 2) {
   2108 			/*
   2109 			 * Not enough free descriptors to transmit this
   2110 			 * packet.  We haven't committed anything yet,
   2111 			 * so just unload the DMA map, put the packet
   2112 			 * pack on the queue, and punt.  Notify the upper
   2113 			 * layer that there are no more slots left.
   2114 			 */
   2115 			DPRINTF(WM_DEBUG_TX,
   2116 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   2117 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed,
   2118 			    sc->sc_txfree - 1));
   2119 			ifp->if_flags |= IFF_OACTIVE;
   2120 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   2121 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   2122 			break;
   2123 		}
   2124 
   2125 		/*
   2126 		 * Check for 82547 Tx FIFO bug.  We need to do this
   2127 		 * once we know we can transmit the packet, since we
   2128 		 * do some internal FIFO space accounting here.
   2129 		 */
   2130 		if (sc->sc_type == WM_T_82547 &&
   2131 		    wm_82547_txfifo_bugchk(sc, m0)) {
   2132 			DPRINTF(WM_DEBUG_TX,
   2133 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   2134 			    sc->sc_dev.dv_xname));
   2135 			ifp->if_flags |= IFF_OACTIVE;
   2136 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   2137 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   2138 			break;
   2139 		}
   2140 
   2141 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   2142 
   2143 		/*
   2144 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   2145 		 */
   2146 
   2147 		DPRINTF(WM_DEBUG_TX,
   2148 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   2149 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed));
   2150 
   2151 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   2152 
   2153 		/*
   2154 		 * Store a pointer to the packet so that we can free it
   2155 		 * later.
   2156 		 *
   2157 		 * Initially, we consider the number of descriptors the
   2158 		 * packet uses the number of DMA segments.  This may be
   2159 		 * incremented by 1 if we do checksum offload (a descriptor
   2160 		 * is used to set the checksum context).
   2161 		 */
   2162 		txs->txs_mbuf = m0;
   2163 		txs->txs_firstdesc = sc->sc_txnext;
   2164 		txs->txs_ndesc = segs_needed;
   2165 
   2166 		/* Set up offload parameters for this packet. */
   2167 		if (m0->m_pkthdr.csum_flags &
   2168 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   2169 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   2170 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   2171 			if (wm_tx_offload(sc, txs, &cksumcmd,
   2172 					  &cksumfields) != 0) {
   2173 				/* Error message already displayed. */
   2174 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   2175 				continue;
   2176 			}
   2177 		} else {
   2178 			cksumcmd = 0;
   2179 			cksumfields = 0;
   2180 		}
   2181 
   2182 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   2183 
   2184 		/* Sync the DMA map. */
   2185 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   2186 		    BUS_DMASYNC_PREWRITE);
   2187 
   2188 		/*
   2189 		 * Initialize the transmit descriptor.
   2190 		 */
   2191 		for (nexttx = sc->sc_txnext, seg = 0;
   2192 		     seg < dmamap->dm_nsegs; seg++) {
   2193 			for (seglen = dmamap->dm_segs[seg].ds_len,
   2194 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   2195 			     seglen != 0;
   2196 			     curaddr += curlen, seglen -= curlen,
   2197 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   2198 				curlen = seglen;
   2199 
   2200 				/*
   2201 				 * So says the Linux driver:
   2202 				 * Work around for premature descriptor
   2203 				 * write-backs in TSO mode.  Append a
   2204 				 * 4-byte sentinel descriptor.
   2205 				 */
   2206 				if (use_tso &&
   2207 				    seg == dmamap->dm_nsegs - 1 &&
   2208 				    curlen > 8)
   2209 					curlen -= 4;
   2210 
   2211 				wm_set_dma_addr(
   2212 				    &sc->sc_txdescs[nexttx].wtx_addr,
   2213 				    curaddr);
   2214 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   2215 				    htole32(cksumcmd | curlen);
   2216 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   2217 				    0;
   2218 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   2219 				    cksumfields;
   2220 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   2221 				lasttx = nexttx;
   2222 
   2223 				DPRINTF(WM_DEBUG_TX,
   2224 				    ("%s: TX: desc %d: low 0x%08lx, "
   2225 				     "len 0x%04x\n",
   2226 				    sc->sc_dev.dv_xname, nexttx,
   2227 				    curaddr & 0xffffffffUL, (unsigned)curlen));
   2228 			}
   2229 		}
   2230 
   2231 		KASSERT(lasttx != -1);
   2232 
   2233 		/*
   2234 		 * Set up the command byte on the last descriptor of
   2235 		 * the packet.  If we're in the interrupt delay window,
   2236 		 * delay the interrupt.
   2237 		 */
   2238 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   2239 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   2240 
   2241 #if 0 /* XXXJRT */
   2242 		/*
   2243 		 * If VLANs are enabled and the packet has a VLAN tag, set
   2244 		 * up the descriptor to encapsulate the packet for us.
   2245 		 *
   2246 		 * This is only valid on the last descriptor of the packet.
   2247 		 */
   2248 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   2249 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   2250 			    htole32(WTX_CMD_VLE);
   2251 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   2252 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2253 		}
   2254 #endif /* XXXJRT */
   2255 
   2256 		txs->txs_lastdesc = lasttx;
   2257 
   2258 		DPRINTF(WM_DEBUG_TX,
   2259 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
   2260 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   2261 
   2262 		/* Sync the descriptors we're using. */
   2263 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   2264 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2265 
   2266 		/* Give the packet to the chip. */
   2267 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   2268 
   2269 		DPRINTF(WM_DEBUG_TX,
   2270 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
   2271 
   2272 		DPRINTF(WM_DEBUG_TX,
   2273 		    ("%s: TX: finished transmitting packet, job %d\n",
   2274 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
   2275 
   2276 		/* Advance the tx pointer. */
   2277 		sc->sc_txfree -= txs->txs_ndesc;
   2278 		sc->sc_txnext = nexttx;
   2279 
   2280 		sc->sc_txsfree--;
   2281 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   2282 
   2283 #if NBPFILTER > 0
   2284 		/* Pass the packet to any BPF listeners. */
   2285 		if (ifp->if_bpf)
   2286 			bpf_mtap(ifp->if_bpf, m0);
   2287 #endif /* NBPFILTER > 0 */
   2288 	}
   2289 
   2290 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   2291 		/* No more slots; notify upper layer. */
   2292 		ifp->if_flags |= IFF_OACTIVE;
   2293 	}
   2294 
   2295 	if (sc->sc_txfree != ofree) {
   2296 		/* Set a watchdog timer in case the chip flakes out. */
   2297 		ifp->if_timer = 5;
   2298 	}
   2299 }
   2300 
   2301 /*
   2302  * wm_watchdog:		[ifnet interface function]
   2303  *
   2304  *	Watchdog timer handler.
   2305  */
   2306 static void
   2307 wm_watchdog(struct ifnet *ifp)
   2308 {
   2309 	struct wm_softc *sc = ifp->if_softc;
   2310 
   2311 	/*
   2312 	 * Since we're using delayed interrupts, sweep up
   2313 	 * before we report an error.
   2314 	 */
   2315 	wm_txintr(sc);
   2316 
   2317 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   2318 		log(LOG_ERR,
   2319 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2320 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
   2321 		    sc->sc_txnext);
   2322 		ifp->if_oerrors++;
   2323 
   2324 		/* Reset the interface. */
   2325 		(void) wm_init(ifp);
   2326 	}
   2327 
   2328 	/* Try to get more packets going. */
   2329 	wm_start(ifp);
   2330 }
   2331 
   2332 /*
   2333  * wm_ioctl:		[ifnet interface function]
   2334  *
   2335  *	Handle control requests from the operator.
   2336  */
   2337 static int
   2338 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2339 {
   2340 	struct wm_softc *sc = ifp->if_softc;
   2341 	struct ifreq *ifr = (struct ifreq *) data;
   2342 	int s, error;
   2343 
   2344 	s = splnet();
   2345 
   2346 	switch (cmd) {
   2347 	case SIOCSIFMEDIA:
   2348 	case SIOCGIFMEDIA:
   2349 		/* Flow control requires full-duplex mode. */
   2350 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2351 		    (ifr->ifr_media & IFM_FDX) == 0)
   2352 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2353 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2354 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2355 				/* We can do both TXPAUSE and RXPAUSE. */
   2356 				ifr->ifr_media |=
   2357 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2358 			}
   2359 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2360 		}
   2361 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2362 		break;
   2363 	default:
   2364 		error = ether_ioctl(ifp, cmd, data);
   2365 		if (error == ENETRESET) {
   2366 			/*
   2367 			 * Multicast list has changed; set the hardware filter
   2368 			 * accordingly.
   2369 			 */
   2370 			if (ifp->if_flags & IFF_RUNNING)
   2371 				wm_set_filter(sc);
   2372 			error = 0;
   2373 		}
   2374 		break;
   2375 	}
   2376 
   2377 	/* Try to get more packets going. */
   2378 	wm_start(ifp);
   2379 
   2380 	splx(s);
   2381 	return (error);
   2382 }
   2383 
   2384 /*
   2385  * wm_intr:
   2386  *
   2387  *	Interrupt service routine.
   2388  */
   2389 static int
   2390 wm_intr(void *arg)
   2391 {
   2392 	struct wm_softc *sc = arg;
   2393 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2394 	uint32_t icr;
   2395 	int handled = 0;
   2396 
   2397 	while (1 /* CONSTCOND */) {
   2398 		icr = CSR_READ(sc, WMREG_ICR);
   2399 		if ((icr & sc->sc_icr) == 0)
   2400 			break;
   2401 #if 0 /*NRND > 0*/
   2402 		if (RND_ENABLED(&sc->rnd_source))
   2403 			rnd_add_uint32(&sc->rnd_source, icr);
   2404 #endif
   2405 
   2406 		handled = 1;
   2407 
   2408 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   2409 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   2410 			DPRINTF(WM_DEBUG_RX,
   2411 			    ("%s: RX: got Rx intr 0x%08x\n",
   2412 			    sc->sc_dev.dv_xname,
   2413 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   2414 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   2415 		}
   2416 #endif
   2417 		wm_rxintr(sc);
   2418 
   2419 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   2420 		if (icr & ICR_TXDW) {
   2421 			DPRINTF(WM_DEBUG_TX,
   2422 			    ("%s: TX: got TXDW interrupt\n",
   2423 			    sc->sc_dev.dv_xname));
   2424 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   2425 		}
   2426 #endif
   2427 		wm_txintr(sc);
   2428 
   2429 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   2430 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   2431 			wm_linkintr(sc, icr);
   2432 		}
   2433 
   2434 		if (icr & ICR_RXO) {
   2435 			ifp->if_ierrors++;
   2436 #if defined(WM_DEBUG)
   2437 			log(LOG_WARNING, "%s: Receive overrun\n",
   2438 			    sc->sc_dev.dv_xname);
   2439 #endif /* defined(WM_DEBUG) */
   2440 		}
   2441 	}
   2442 
   2443 	if (handled) {
   2444 		/* Try to get more packets going. */
   2445 		wm_start(ifp);
   2446 	}
   2447 
   2448 	return (handled);
   2449 }
   2450 
   2451 /*
   2452  * wm_txintr:
   2453  *
   2454  *	Helper; handle transmit interrupts.
   2455  */
   2456 static void
   2457 wm_txintr(struct wm_softc *sc)
   2458 {
   2459 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2460 	struct wm_txsoft *txs;
   2461 	uint8_t status;
   2462 	int i;
   2463 
   2464 	ifp->if_flags &= ~IFF_OACTIVE;
   2465 
   2466 	/*
   2467 	 * Go through the Tx list and free mbufs for those
   2468 	 * frames which have been transmitted.
   2469 	 */
   2470 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   2471 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   2472 		txs = &sc->sc_txsoft[i];
   2473 
   2474 		DPRINTF(WM_DEBUG_TX,
   2475 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
   2476 
   2477 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   2478 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   2479 
   2480 		status =
   2481 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   2482 		if ((status & WTX_ST_DD) == 0) {
   2483 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   2484 			    BUS_DMASYNC_PREREAD);
   2485 			break;
   2486 		}
   2487 
   2488 		DPRINTF(WM_DEBUG_TX,
   2489 		    ("%s: TX: job %d done: descs %d..%d\n",
   2490 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
   2491 		    txs->txs_lastdesc));
   2492 
   2493 		/*
   2494 		 * XXX We should probably be using the statistics
   2495 		 * XXX registers, but I don't know if they exist
   2496 		 * XXX on chips before the i82544.
   2497 		 */
   2498 
   2499 #ifdef WM_EVENT_COUNTERS
   2500 		if (status & WTX_ST_TU)
   2501 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   2502 #endif /* WM_EVENT_COUNTERS */
   2503 
   2504 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   2505 			ifp->if_oerrors++;
   2506 			if (status & WTX_ST_LC)
   2507 				log(LOG_WARNING, "%s: late collision\n",
   2508 				    sc->sc_dev.dv_xname);
   2509 			else if (status & WTX_ST_EC) {
   2510 				ifp->if_collisions += 16;
   2511 				log(LOG_WARNING, "%s: excessive collisions\n",
   2512 				    sc->sc_dev.dv_xname);
   2513 			}
   2514 		} else
   2515 			ifp->if_opackets++;
   2516 
   2517 		sc->sc_txfree += txs->txs_ndesc;
   2518 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   2519 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   2520 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   2521 		m_freem(txs->txs_mbuf);
   2522 		txs->txs_mbuf = NULL;
   2523 	}
   2524 
   2525 	/* Update the dirty transmit buffer pointer. */
   2526 	sc->sc_txsdirty = i;
   2527 	DPRINTF(WM_DEBUG_TX,
   2528 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
   2529 
   2530 	/*
   2531 	 * If there are no more pending transmissions, cancel the watchdog
   2532 	 * timer.
   2533 	 */
   2534 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   2535 		ifp->if_timer = 0;
   2536 }
   2537 
   2538 /*
   2539  * wm_rxintr:
   2540  *
   2541  *	Helper; handle receive interrupts.
   2542  */
   2543 static void
   2544 wm_rxintr(struct wm_softc *sc)
   2545 {
   2546 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2547 	struct wm_rxsoft *rxs;
   2548 	struct mbuf *m;
   2549 	int i, len;
   2550 	uint8_t status, errors;
   2551 
   2552 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   2553 		rxs = &sc->sc_rxsoft[i];
   2554 
   2555 		DPRINTF(WM_DEBUG_RX,
   2556 		    ("%s: RX: checking descriptor %d\n",
   2557 		    sc->sc_dev.dv_xname, i));
   2558 
   2559 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   2560 
   2561 		status = sc->sc_rxdescs[i].wrx_status;
   2562 		errors = sc->sc_rxdescs[i].wrx_errors;
   2563 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   2564 
   2565 		if ((status & WRX_ST_DD) == 0) {
   2566 			/*
   2567 			 * We have processed all of the receive descriptors.
   2568 			 */
   2569 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   2570 			break;
   2571 		}
   2572 
   2573 		if (__predict_false(sc->sc_rxdiscard)) {
   2574 			DPRINTF(WM_DEBUG_RX,
   2575 			    ("%s: RX: discarding contents of descriptor %d\n",
   2576 			    sc->sc_dev.dv_xname, i));
   2577 			WM_INIT_RXDESC(sc, i);
   2578 			if (status & WRX_ST_EOP) {
   2579 				/* Reset our state. */
   2580 				DPRINTF(WM_DEBUG_RX,
   2581 				    ("%s: RX: resetting rxdiscard -> 0\n",
   2582 				    sc->sc_dev.dv_xname));
   2583 				sc->sc_rxdiscard = 0;
   2584 			}
   2585 			continue;
   2586 		}
   2587 
   2588 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2589 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   2590 
   2591 		m = rxs->rxs_mbuf;
   2592 
   2593 		/*
   2594 		 * Add a new receive buffer to the ring, unless of
   2595 		 * course the length is zero. Treat the latter as a
   2596 		 * failed mapping.
   2597 		 */
   2598 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   2599 			/*
   2600 			 * Failed, throw away what we've done so
   2601 			 * far, and discard the rest of the packet.
   2602 			 */
   2603 			ifp->if_ierrors++;
   2604 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2605 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   2606 			WM_INIT_RXDESC(sc, i);
   2607 			if ((status & WRX_ST_EOP) == 0)
   2608 				sc->sc_rxdiscard = 1;
   2609 			if (sc->sc_rxhead != NULL)
   2610 				m_freem(sc->sc_rxhead);
   2611 			WM_RXCHAIN_RESET(sc);
   2612 			DPRINTF(WM_DEBUG_RX,
   2613 			    ("%s: RX: Rx buffer allocation failed, "
   2614 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
   2615 			    sc->sc_rxdiscard ? " (discard)" : ""));
   2616 			continue;
   2617 		}
   2618 
   2619 		WM_RXCHAIN_LINK(sc, m);
   2620 
   2621 		m->m_len = len;
   2622 
   2623 		DPRINTF(WM_DEBUG_RX,
   2624 		    ("%s: RX: buffer at %p len %d\n",
   2625 		    sc->sc_dev.dv_xname, m->m_data, len));
   2626 
   2627 		/*
   2628 		 * If this is not the end of the packet, keep
   2629 		 * looking.
   2630 		 */
   2631 		if ((status & WRX_ST_EOP) == 0) {
   2632 			sc->sc_rxlen += len;
   2633 			DPRINTF(WM_DEBUG_RX,
   2634 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   2635 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
   2636 			continue;
   2637 		}
   2638 
   2639 		/*
   2640 		 * Okay, we have the entire packet now.  The chip is
   2641 		 * configured to include the FCS (not all chips can
   2642 		 * be configured to strip it), so we need to trim it.
   2643 		 */
   2644 		m->m_len -= ETHER_CRC_LEN;
   2645 
   2646 		*sc->sc_rxtailp = NULL;
   2647 		len = m->m_len + sc->sc_rxlen;
   2648 		m = sc->sc_rxhead;
   2649 
   2650 		WM_RXCHAIN_RESET(sc);
   2651 
   2652 		DPRINTF(WM_DEBUG_RX,
   2653 		    ("%s: RX: have entire packet, len -> %d\n",
   2654 		    sc->sc_dev.dv_xname, len));
   2655 
   2656 		/*
   2657 		 * If an error occurred, update stats and drop the packet.
   2658 		 */
   2659 		if (errors &
   2660 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   2661 			ifp->if_ierrors++;
   2662 			if (errors & WRX_ER_SE)
   2663 				log(LOG_WARNING, "%s: symbol error\n",
   2664 				    sc->sc_dev.dv_xname);
   2665 			else if (errors & WRX_ER_SEQ)
   2666 				log(LOG_WARNING, "%s: receive sequence error\n",
   2667 				    sc->sc_dev.dv_xname);
   2668 			else if (errors & WRX_ER_CE)
   2669 				log(LOG_WARNING, "%s: CRC error\n",
   2670 				    sc->sc_dev.dv_xname);
   2671 			m_freem(m);
   2672 			continue;
   2673 		}
   2674 
   2675 		/*
   2676 		 * No errors.  Receive the packet.
   2677 		 */
   2678 		m->m_pkthdr.rcvif = ifp;
   2679 		m->m_pkthdr.len = len;
   2680 
   2681 #if 0 /* XXXJRT */
   2682 		/*
   2683 		 * If VLANs are enabled, VLAN packets have been unwrapped
   2684 		 * for us.  Associate the tag with the packet.
   2685 		 */
   2686 		if ((status & WRX_ST_VP) != 0) {
   2687 			VLAN_INPUT_TAG(ifp, m,
   2688 			    le16toh(sc->sc_rxdescs[i].wrx_special,
   2689 			    continue);
   2690 		}
   2691 #endif /* XXXJRT */
   2692 
   2693 		/*
   2694 		 * Set up checksum info for this packet.
   2695 		 */
   2696 		if ((status & WRX_ST_IXSM) == 0) {
   2697 			if (status & WRX_ST_IPCS) {
   2698 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   2699 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   2700 				if (errors & WRX_ER_IPE)
   2701 					m->m_pkthdr.csum_flags |=
   2702 					    M_CSUM_IPv4_BAD;
   2703 			}
   2704 			if (status & WRX_ST_TCPCS) {
   2705 				/*
   2706 				 * Note: we don't know if this was TCP or UDP,
   2707 				 * so we just set both bits, and expect the
   2708 				 * upper layers to deal.
   2709 				 */
   2710 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   2711 				m->m_pkthdr.csum_flags |=
   2712 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   2713 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   2714 				if (errors & WRX_ER_TCPE)
   2715 					m->m_pkthdr.csum_flags |=
   2716 					    M_CSUM_TCP_UDP_BAD;
   2717 			}
   2718 		}
   2719 
   2720 		ifp->if_ipackets++;
   2721 
   2722 #if NBPFILTER > 0
   2723 		/* Pass this up to any BPF listeners. */
   2724 		if (ifp->if_bpf)
   2725 			bpf_mtap(ifp->if_bpf, m);
   2726 #endif /* NBPFILTER > 0 */
   2727 
   2728 		/* Pass it on. */
   2729 		(*ifp->if_input)(ifp, m);
   2730 	}
   2731 
   2732 	/* Update the receive pointer. */
   2733 	sc->sc_rxptr = i;
   2734 
   2735 	DPRINTF(WM_DEBUG_RX,
   2736 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
   2737 }
   2738 
   2739 /*
   2740  * wm_linkintr:
   2741  *
   2742  *	Helper; handle link interrupts.
   2743  */
   2744 static void
   2745 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   2746 {
   2747 	uint32_t status;
   2748 
   2749 	/*
   2750 	 * If we get a link status interrupt on a 1000BASE-T
   2751 	 * device, just fall into the normal MII tick path.
   2752 	 */
   2753 	if (sc->sc_flags & WM_F_HAS_MII) {
   2754 		if (icr & ICR_LSC) {
   2755 			DPRINTF(WM_DEBUG_LINK,
   2756 			    ("%s: LINK: LSC -> mii_tick\n",
   2757 			    sc->sc_dev.dv_xname));
   2758 			mii_tick(&sc->sc_mii);
   2759 		} else if (icr & ICR_RXSEQ) {
   2760 			DPRINTF(WM_DEBUG_LINK,
   2761 			    ("%s: LINK Receive sequence error\n",
   2762 			    sc->sc_dev.dv_xname));
   2763 		}
   2764 		return;
   2765 	}
   2766 
   2767 	/*
   2768 	 * If we are now receiving /C/, check for link again in
   2769 	 * a couple of link clock ticks.
   2770 	 */
   2771 	if (icr & ICR_RXCFG) {
   2772 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   2773 		    sc->sc_dev.dv_xname));
   2774 		sc->sc_tbi_anstate = 2;
   2775 	}
   2776 
   2777 	if (icr & ICR_LSC) {
   2778 		status = CSR_READ(sc, WMREG_STATUS);
   2779 		if (status & STATUS_LU) {
   2780 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   2781 			    sc->sc_dev.dv_xname,
   2782 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2783 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2784 			sc->sc_fcrtl &= ~FCRTL_XONE;
   2785 			if (status & STATUS_FD)
   2786 				sc->sc_tctl |=
   2787 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2788 			else
   2789 				sc->sc_tctl |=
   2790 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2791 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   2792 				sc->sc_fcrtl |= FCRTL_XONE;
   2793 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2794 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   2795 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   2796 				      sc->sc_fcrtl);
   2797 			sc->sc_tbi_linkup = 1;
   2798 		} else {
   2799 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   2800 			    sc->sc_dev.dv_xname));
   2801 			sc->sc_tbi_linkup = 0;
   2802 		}
   2803 		sc->sc_tbi_anstate = 2;
   2804 		wm_tbi_set_linkled(sc);
   2805 	} else if (icr & ICR_RXSEQ) {
   2806 		DPRINTF(WM_DEBUG_LINK,
   2807 		    ("%s: LINK: Receive sequence error\n",
   2808 		    sc->sc_dev.dv_xname));
   2809 	}
   2810 }
   2811 
   2812 /*
   2813  * wm_tick:
   2814  *
   2815  *	One second timer, used to check link status, sweep up
   2816  *	completed transmit jobs, etc.
   2817  */
   2818 static void
   2819 wm_tick(void *arg)
   2820 {
   2821 	struct wm_softc *sc = arg;
   2822 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2823 	int s;
   2824 
   2825 	s = splnet();
   2826 
   2827 	if (sc->sc_type >= WM_T_82542_2_1) {
   2828 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2829 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2830 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2831 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2832 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2833 	}
   2834 
   2835 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2836 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   2837 
   2838 
   2839 	if (sc->sc_flags & WM_F_HAS_MII)
   2840 		mii_tick(&sc->sc_mii);
   2841 	else
   2842 		wm_tbi_check_link(sc);
   2843 
   2844 	splx(s);
   2845 
   2846 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2847 }
   2848 
   2849 /*
   2850  * wm_reset:
   2851  *
   2852  *	Reset the i82542 chip.
   2853  */
   2854 static void
   2855 wm_reset(struct wm_softc *sc)
   2856 {
   2857 	int i;
   2858 
   2859 	/*
   2860 	 * Allocate on-chip memory according to the MTU size.
   2861 	 * The Packet Buffer Allocation register must be written
   2862 	 * before the chip is reset.
   2863 	 */
   2864 	switch (sc->sc_type) {
   2865 	case WM_T_82547:
   2866 	case WM_T_82547_2:
   2867 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   2868 		    PBA_22K : PBA_30K;
   2869 		sc->sc_txfifo_head = 0;
   2870 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   2871 		sc->sc_txfifo_size =
   2872 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   2873 		sc->sc_txfifo_stall = 0;
   2874 		break;
   2875 	case WM_T_82571:
   2876 	case WM_T_82572:
   2877 	case WM_T_80003:
   2878 		sc->sc_pba = PBA_32K;
   2879 		break;
   2880 	case WM_T_82573:
   2881 		sc->sc_pba = PBA_12K;
   2882 		break;
   2883 	case WM_T_ICH8:
   2884 		sc->sc_pba = PBA_8K;
   2885 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   2886 		break;
   2887 	default:
   2888 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   2889 		    PBA_40K : PBA_48K;
   2890 		break;
   2891 	}
   2892 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   2893 
   2894 	/*
   2895 	 * 82541 Errata 29? & 82547 Errata 28?
   2896 	 * See also the description about PHY_RST bit in CTRL register
   2897 	 * in 8254x_GBe_SDM.pdf.
   2898 	 */
   2899 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   2900 		CSR_WRITE(sc, WMREG_CTRL,
   2901 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   2902 		delay(5000);
   2903 	}
   2904 
   2905 	switch (sc->sc_type) {
   2906 	case WM_T_82544:
   2907 	case WM_T_82540:
   2908 	case WM_T_82545:
   2909 	case WM_T_82546:
   2910 	case WM_T_82541:
   2911 	case WM_T_82541_2:
   2912 		/*
   2913 		 * On some chipsets, a reset through a memory-mapped write
   2914 		 * cycle can cause the chip to reset before completing the
   2915 		 * write cycle.  This causes major headache that can be
   2916 		 * avoided by issuing the reset via indirect register writes
   2917 		 * through I/O space.
   2918 		 *
   2919 		 * So, if we successfully mapped the I/O BAR at attach time,
   2920 		 * use that.  Otherwise, try our luck with a memory-mapped
   2921 		 * reset.
   2922 		 */
   2923 		if (sc->sc_flags & WM_F_IOH_VALID)
   2924 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   2925 		else
   2926 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2927 		break;
   2928 
   2929 	case WM_T_82545_3:
   2930 	case WM_T_82546_3:
   2931 		/* Use the shadow control register on these chips. */
   2932 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   2933 		break;
   2934 
   2935 	case WM_T_ICH8:
   2936 		wm_get_swfwhw_semaphore(sc);
   2937 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
   2938 
   2939 	default:
   2940 		/* Everything else can safely use the documented method. */
   2941 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2942 		break;
   2943 	}
   2944 	delay(10000);
   2945 
   2946 	for (i = 0; i < 1000; i++) {
   2947 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
   2948 			return;
   2949 		delay(20);
   2950 	}
   2951 
   2952 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
   2953 		log(LOG_ERR, "%s: reset failed to complete\n",
   2954 		    sc->sc_dev.dv_xname);
   2955 
   2956 	if (sc->sc_type >= WM_T_80003) {
   2957 		/* wait for eeprom to reload */
   2958 		for (i = 1000; i > 0; i--) {
   2959 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   2960 				break;
   2961 		}
   2962 		if (i == 0) {
   2963 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   2964 			    "complete\n", sc->sc_dev.dv_xname);
   2965 		}
   2966 	}
   2967 }
   2968 
   2969 /*
   2970  * wm_init:		[ifnet interface function]
   2971  *
   2972  *	Initialize the interface.  Must be called at splnet().
   2973  */
   2974 static int
   2975 wm_init(struct ifnet *ifp)
   2976 {
   2977 	struct wm_softc *sc = ifp->if_softc;
   2978 	struct wm_rxsoft *rxs;
   2979 	int i, error = 0;
   2980 	uint32_t reg;
   2981 
   2982 	/*
   2983 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   2984 	 * There is a small but measurable benefit to avoiding the adjusment
   2985 	 * of the descriptor so that the headers are aligned, for normal mtu,
   2986 	 * on such platforms.  One possibility is that the DMA itself is
   2987 	 * slightly more efficient if the front of the entire packet (instead
   2988 	 * of the front of the headers) is aligned.
   2989 	 *
   2990 	 * Note we must always set align_tweak to 0 if we are using
   2991 	 * jumbo frames.
   2992 	 */
   2993 #ifdef __NO_STRICT_ALIGNMENT
   2994 	sc->sc_align_tweak = 0;
   2995 #else
   2996 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   2997 		sc->sc_align_tweak = 0;
   2998 	else
   2999 		sc->sc_align_tweak = 2;
   3000 #endif /* __NO_STRICT_ALIGNMENT */
   3001 
   3002 	/* Cancel any pending I/O. */
   3003 	wm_stop(ifp, 0);
   3004 
   3005 	/* update statistics before reset */
   3006 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3007 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   3008 
   3009 	/* Reset the chip to a known state. */
   3010 	wm_reset(sc);
   3011 
   3012 	/* Initialize the transmit descriptor ring. */
   3013 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   3014 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   3015 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   3016 	sc->sc_txfree = WM_NTXDESC(sc);
   3017 	sc->sc_txnext = 0;
   3018 
   3019 	if (sc->sc_type < WM_T_82543) {
   3020 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
   3021 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
   3022 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   3023 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   3024 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   3025 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   3026 	} else {
   3027 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
   3028 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
   3029 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   3030 		CSR_WRITE(sc, WMREG_TDH, 0);
   3031 		CSR_WRITE(sc, WMREG_TDT, 0);
   3032 		CSR_WRITE(sc, WMREG_TIDV, 64);
   3033 		CSR_WRITE(sc, WMREG_TADV, 128);
   3034 
   3035 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   3036 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   3037 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   3038 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   3039 	}
   3040 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   3041 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   3042 
   3043 	/* Initialize the transmit job descriptors. */
   3044 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   3045 		sc->sc_txsoft[i].txs_mbuf = NULL;
   3046 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   3047 	sc->sc_txsnext = 0;
   3048 	sc->sc_txsdirty = 0;
   3049 
   3050 	/*
   3051 	 * Initialize the receive descriptor and receive job
   3052 	 * descriptor rings.
   3053 	 */
   3054 	if (sc->sc_type < WM_T_82543) {
   3055 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   3056 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   3057 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   3058 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   3059 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   3060 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   3061 
   3062 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   3063 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   3064 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   3065 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   3066 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   3067 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   3068 	} else {
   3069 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   3070 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   3071 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   3072 		CSR_WRITE(sc, WMREG_RDH, 0);
   3073 		CSR_WRITE(sc, WMREG_RDT, 0);
   3074 		CSR_WRITE(sc, WMREG_RDTR, 0 | RDTR_FPD);
   3075 		CSR_WRITE(sc, WMREG_RADV, 128);
   3076 	}
   3077 	for (i = 0; i < WM_NRXDESC; i++) {
   3078 		rxs = &sc->sc_rxsoft[i];
   3079 		if (rxs->rxs_mbuf == NULL) {
   3080 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   3081 				log(LOG_ERR, "%s: unable to allocate or map rx "
   3082 				    "buffer %d, error = %d\n",
   3083 				    sc->sc_dev.dv_xname, i, error);
   3084 				/*
   3085 				 * XXX Should attempt to run with fewer receive
   3086 				 * XXX buffers instead of just failing.
   3087 				 */
   3088 				wm_rxdrain(sc);
   3089 				goto out;
   3090 			}
   3091 		} else
   3092 			WM_INIT_RXDESC(sc, i);
   3093 	}
   3094 	sc->sc_rxptr = 0;
   3095 	sc->sc_rxdiscard = 0;
   3096 	WM_RXCHAIN_RESET(sc);
   3097 
   3098 	/*
   3099 	 * Clear out the VLAN table -- we don't use it (yet).
   3100 	 */
   3101 	CSR_WRITE(sc, WMREG_VET, 0);
   3102 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   3103 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   3104 
   3105 	/*
   3106 	 * Set up flow-control parameters.
   3107 	 *
   3108 	 * XXX Values could probably stand some tuning.
   3109 	 */
   3110 	if (sc->sc_type != WM_T_ICH8) {
   3111 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   3112 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   3113 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   3114 	}
   3115 
   3116 	sc->sc_fcrtl = FCRTL_DFLT;
   3117 	if (sc->sc_type < WM_T_82543) {
   3118 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   3119 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   3120 	} else {
   3121 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   3122 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   3123 	}
   3124 	CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   3125 
   3126 #if 0 /* XXXJRT */
   3127 	/* Deal with VLAN enables. */
   3128 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3129 		sc->sc_ctrl |= CTRL_VME;
   3130 	else
   3131 #endif /* XXXJRT */
   3132 		sc->sc_ctrl &= ~CTRL_VME;
   3133 
   3134 	/* Write the control registers. */
   3135 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3136 	if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
   3137 		int val;
   3138 		val = CSR_READ(sc, WMREG_CTRL_EXT);
   3139 		val &= ~CTRL_EXT_LINK_MODE_MASK;
   3140 		CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   3141 
   3142 		/* Bypass RX and TX FIFO's */
   3143 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   3144 		    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
   3145 		    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   3146 
   3147 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   3148 		    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   3149 		    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   3150 		/*
   3151 		 * Set the mac to wait the maximum time between each
   3152 		 * iteration and increase the max iterations when
   3153 		 * polling the phy; this fixes erroneous timeouts at 10Mbps.
   3154 		 */
   3155 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
   3156 		val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   3157 		val |= 0x3F;
   3158 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
   3159 	}
   3160 #if 0
   3161 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   3162 #endif
   3163 
   3164 	/*
   3165 	 * Set up checksum offload parameters.
   3166 	 */
   3167 	reg = CSR_READ(sc, WMREG_RXCSUM);
   3168 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   3169 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   3170 		reg |= RXCSUM_IPOFL;
   3171 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   3172 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   3173 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   3174 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   3175 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   3176 
   3177 	/*
   3178 	 * Set up the interrupt registers.
   3179 	 */
   3180 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3181 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   3182 	    ICR_RXO | ICR_RXT0;
   3183 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   3184 		sc->sc_icr |= ICR_RXCFG;
   3185 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   3186 
   3187 	/* Set up the inter-packet gap. */
   3188 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   3189 
   3190 	if (sc->sc_type >= WM_T_82543) {
   3191 		/* Set up the interrupt throttling register (units of 256ns) */
   3192 		sc->sc_itr = 1000000000 / (7000 * 256);
   3193 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   3194 	}
   3195 
   3196 #if 0 /* XXXJRT */
   3197 	/* Set the VLAN ethernetype. */
   3198 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   3199 #endif
   3200 
   3201 	/*
   3202 	 * Set up the transmit control register; we start out with
   3203 	 * a collision distance suitable for FDX, but update it whe
   3204 	 * we resolve the media type.
   3205 	 */
   3206 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
   3207 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3208 	if (sc->sc_type >= WM_T_82571)
   3209 		sc->sc_tctl |= TCTL_MULR;
   3210 	if (sc->sc_type >= WM_T_80003)
   3211 		sc->sc_tctl |= TCTL_RTLC;
   3212 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3213 
   3214 	/* Set the media. */
   3215 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
   3216 
   3217 	/*
   3218 	 * Set up the receive control register; we actually program
   3219 	 * the register when we set the receive filter.  Use multicast
   3220 	 * address offset type 0.
   3221 	 *
   3222 	 * Only the i82544 has the ability to strip the incoming
   3223 	 * CRC, so we don't enable that feature.
   3224 	 */
   3225 	sc->sc_mchash_type = 0;
   3226 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   3227 	    | RCTL_MO(sc->sc_mchash_type);
   3228 
   3229 	/* 82573 doesn't support jumbo frame */
   3230 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
   3231 		sc->sc_rctl |= RCTL_LPE;
   3232 
   3233 	if (MCLBYTES == 2048) {
   3234 		sc->sc_rctl |= RCTL_2k;
   3235 	} else {
   3236 		if (sc->sc_type >= WM_T_82543) {
   3237 			switch(MCLBYTES) {
   3238 			case 4096:
   3239 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   3240 				break;
   3241 			case 8192:
   3242 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   3243 				break;
   3244 			case 16384:
   3245 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   3246 				break;
   3247 			default:
   3248 				panic("wm_init: MCLBYTES %d unsupported",
   3249 				    MCLBYTES);
   3250 				break;
   3251 			}
   3252 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   3253 	}
   3254 
   3255 	/* Set the receive filter. */
   3256 	wm_set_filter(sc);
   3257 
   3258 	/* Start the one second link check clock. */
   3259 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3260 
   3261 	/* ...all done! */
   3262 	ifp->if_flags |= IFF_RUNNING;
   3263 	ifp->if_flags &= ~IFF_OACTIVE;
   3264 
   3265  out:
   3266 	if (error)
   3267 		log(LOG_ERR, "%s: interface not running\n",
   3268 		    sc->sc_dev.dv_xname);
   3269 	return (error);
   3270 }
   3271 
   3272 /*
   3273  * wm_rxdrain:
   3274  *
   3275  *	Drain the receive queue.
   3276  */
   3277 static void
   3278 wm_rxdrain(struct wm_softc *sc)
   3279 {
   3280 	struct wm_rxsoft *rxs;
   3281 	int i;
   3282 
   3283 	for (i = 0; i < WM_NRXDESC; i++) {
   3284 		rxs = &sc->sc_rxsoft[i];
   3285 		if (rxs->rxs_mbuf != NULL) {
   3286 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3287 			m_freem(rxs->rxs_mbuf);
   3288 			rxs->rxs_mbuf = NULL;
   3289 		}
   3290 	}
   3291 }
   3292 
   3293 /*
   3294  * wm_stop:		[ifnet interface function]
   3295  *
   3296  *	Stop transmission on the interface.
   3297  */
   3298 static void
   3299 wm_stop(struct ifnet *ifp, int disable)
   3300 {
   3301 	struct wm_softc *sc = ifp->if_softc;
   3302 	struct wm_txsoft *txs;
   3303 	int i;
   3304 
   3305 	/* Stop the one second clock. */
   3306 	callout_stop(&sc->sc_tick_ch);
   3307 
   3308 	/* Stop the 82547 Tx FIFO stall check timer. */
   3309 	if (sc->sc_type == WM_T_82547)
   3310 		callout_stop(&sc->sc_txfifo_ch);
   3311 
   3312 	if (sc->sc_flags & WM_F_HAS_MII) {
   3313 		/* Down the MII. */
   3314 		mii_down(&sc->sc_mii);
   3315 	}
   3316 
   3317 	/* Stop the transmit and receive processes. */
   3318 	CSR_WRITE(sc, WMREG_TCTL, 0);
   3319 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3320 
   3321 	/*
   3322 	 * Clear the interrupt mask to ensure the device cannot assert its
   3323 	 * interrupt line.
   3324 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   3325 	 * any currently pending or shared interrupt.
   3326 	 */
   3327 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3328 	sc->sc_icr = 0;
   3329 
   3330 	/* Release any queued transmit buffers. */
   3331 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   3332 		txs = &sc->sc_txsoft[i];
   3333 		if (txs->txs_mbuf != NULL) {
   3334 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   3335 			m_freem(txs->txs_mbuf);
   3336 			txs->txs_mbuf = NULL;
   3337 		}
   3338 	}
   3339 
   3340 	if (disable)
   3341 		wm_rxdrain(sc);
   3342 
   3343 	/* Mark the interface as down and cancel the watchdog timer. */
   3344 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   3345 	ifp->if_timer = 0;
   3346 }
   3347 
   3348 /*
   3349  * wm_acquire_eeprom:
   3350  *
   3351  *	Perform the EEPROM handshake required on some chips.
   3352  */
   3353 static int
   3354 wm_acquire_eeprom(struct wm_softc *sc)
   3355 {
   3356 	uint32_t reg;
   3357 	int x;
   3358 	int ret = 0;
   3359 
   3360 	/* always success */
   3361 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   3362 		return 0;
   3363 
   3364 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
   3365 		ret = wm_get_swfwhw_semaphore(sc);
   3366 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
   3367 		/* this will also do wm_get_swsm_semaphore() if needed */
   3368 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   3369 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   3370 		ret = wm_get_swsm_semaphore(sc);
   3371 	}
   3372 
   3373 	if (ret)
   3374 		return 1;
   3375 
   3376 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
   3377 		reg = CSR_READ(sc, WMREG_EECD);
   3378 
   3379 		/* Request EEPROM access. */
   3380 		reg |= EECD_EE_REQ;
   3381 		CSR_WRITE(sc, WMREG_EECD, reg);
   3382 
   3383 		/* ..and wait for it to be granted. */
   3384 		for (x = 0; x < 1000; x++) {
   3385 			reg = CSR_READ(sc, WMREG_EECD);
   3386 			if (reg & EECD_EE_GNT)
   3387 				break;
   3388 			delay(5);
   3389 		}
   3390 		if ((reg & EECD_EE_GNT) == 0) {
   3391 			aprint_error("%s: could not acquire EEPROM GNT\n",
   3392 			    sc->sc_dev.dv_xname);
   3393 			reg &= ~EECD_EE_REQ;
   3394 			CSR_WRITE(sc, WMREG_EECD, reg);
   3395 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
   3396 				wm_put_swfwhw_semaphore(sc);
   3397 			if (sc->sc_flags & WM_F_SWFW_SYNC)
   3398 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   3399 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   3400 				wm_put_swsm_semaphore(sc);
   3401 			return (1);
   3402 		}
   3403 	}
   3404 
   3405 	return (0);
   3406 }
   3407 
   3408 /*
   3409  * wm_release_eeprom:
   3410  *
   3411  *	Release the EEPROM mutex.
   3412  */
   3413 static void
   3414 wm_release_eeprom(struct wm_softc *sc)
   3415 {
   3416 	uint32_t reg;
   3417 
   3418 	/* always success */
   3419 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   3420 		return;
   3421 
   3422 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   3423 		reg = CSR_READ(sc, WMREG_EECD);
   3424 		reg &= ~EECD_EE_REQ;
   3425 		CSR_WRITE(sc, WMREG_EECD, reg);
   3426 	}
   3427 
   3428 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
   3429 		wm_put_swfwhw_semaphore(sc);
   3430 	if (sc->sc_flags & WM_F_SWFW_SYNC)
   3431 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   3432 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   3433 		wm_put_swsm_semaphore(sc);
   3434 }
   3435 
   3436 /*
   3437  * wm_eeprom_sendbits:
   3438  *
   3439  *	Send a series of bits to the EEPROM.
   3440  */
   3441 static void
   3442 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   3443 {
   3444 	uint32_t reg;
   3445 	int x;
   3446 
   3447 	reg = CSR_READ(sc, WMREG_EECD);
   3448 
   3449 	for (x = nbits; x > 0; x--) {
   3450 		if (bits & (1U << (x - 1)))
   3451 			reg |= EECD_DI;
   3452 		else
   3453 			reg &= ~EECD_DI;
   3454 		CSR_WRITE(sc, WMREG_EECD, reg);
   3455 		delay(2);
   3456 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   3457 		delay(2);
   3458 		CSR_WRITE(sc, WMREG_EECD, reg);
   3459 		delay(2);
   3460 	}
   3461 }
   3462 
   3463 /*
   3464  * wm_eeprom_recvbits:
   3465  *
   3466  *	Receive a series of bits from the EEPROM.
   3467  */
   3468 static void
   3469 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   3470 {
   3471 	uint32_t reg, val;
   3472 	int x;
   3473 
   3474 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   3475 
   3476 	val = 0;
   3477 	for (x = nbits; x > 0; x--) {
   3478 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   3479 		delay(2);
   3480 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   3481 			val |= (1U << (x - 1));
   3482 		CSR_WRITE(sc, WMREG_EECD, reg);
   3483 		delay(2);
   3484 	}
   3485 	*valp = val;
   3486 }
   3487 
   3488 /*
   3489  * wm_read_eeprom_uwire:
   3490  *
   3491  *	Read a word from the EEPROM using the MicroWire protocol.
   3492  */
   3493 static int
   3494 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   3495 {
   3496 	uint32_t reg, val;
   3497 	int i;
   3498 
   3499 	for (i = 0; i < wordcnt; i++) {
   3500 		/* Clear SK and DI. */
   3501 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   3502 		CSR_WRITE(sc, WMREG_EECD, reg);
   3503 
   3504 		/* Set CHIP SELECT. */
   3505 		reg |= EECD_CS;
   3506 		CSR_WRITE(sc, WMREG_EECD, reg);
   3507 		delay(2);
   3508 
   3509 		/* Shift in the READ command. */
   3510 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   3511 
   3512 		/* Shift in address. */
   3513 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   3514 
   3515 		/* Shift out the data. */
   3516 		wm_eeprom_recvbits(sc, &val, 16);
   3517 		data[i] = val & 0xffff;
   3518 
   3519 		/* Clear CHIP SELECT. */
   3520 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   3521 		CSR_WRITE(sc, WMREG_EECD, reg);
   3522 		delay(2);
   3523 	}
   3524 
   3525 	return (0);
   3526 }
   3527 
   3528 /*
   3529  * wm_spi_eeprom_ready:
   3530  *
   3531  *	Wait for a SPI EEPROM to be ready for commands.
   3532  */
   3533 static int
   3534 wm_spi_eeprom_ready(struct wm_softc *sc)
   3535 {
   3536 	uint32_t val;
   3537 	int usec;
   3538 
   3539 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   3540 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   3541 		wm_eeprom_recvbits(sc, &val, 8);
   3542 		if ((val & SPI_SR_RDY) == 0)
   3543 			break;
   3544 	}
   3545 	if (usec >= SPI_MAX_RETRIES) {
   3546 		aprint_error("%s: EEPROM failed to become ready\n",
   3547 		    sc->sc_dev.dv_xname);
   3548 		return (1);
   3549 	}
   3550 	return (0);
   3551 }
   3552 
   3553 /*
   3554  * wm_read_eeprom_spi:
   3555  *
   3556  *	Read a work from the EEPROM using the SPI protocol.
   3557  */
   3558 static int
   3559 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   3560 {
   3561 	uint32_t reg, val;
   3562 	int i;
   3563 	uint8_t opc;
   3564 
   3565 	/* Clear SK and CS. */
   3566 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   3567 	CSR_WRITE(sc, WMREG_EECD, reg);
   3568 	delay(2);
   3569 
   3570 	if (wm_spi_eeprom_ready(sc))
   3571 		return (1);
   3572 
   3573 	/* Toggle CS to flush commands. */
   3574 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   3575 	delay(2);
   3576 	CSR_WRITE(sc, WMREG_EECD, reg);
   3577 	delay(2);
   3578 
   3579 	opc = SPI_OPC_READ;
   3580 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   3581 		opc |= SPI_OPC_A8;
   3582 
   3583 	wm_eeprom_sendbits(sc, opc, 8);
   3584 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   3585 
   3586 	for (i = 0; i < wordcnt; i++) {
   3587 		wm_eeprom_recvbits(sc, &val, 16);
   3588 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   3589 	}
   3590 
   3591 	/* Raise CS and clear SK. */
   3592 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   3593 	CSR_WRITE(sc, WMREG_EECD, reg);
   3594 	delay(2);
   3595 
   3596 	return (0);
   3597 }
   3598 
   3599 #define EEPROM_CHECKSUM		0xBABA
   3600 #define EEPROM_SIZE		0x0040
   3601 
   3602 /*
   3603  * wm_validate_eeprom_checksum
   3604  *
   3605  * The checksum is defined as the sum of the first 64 (16 bit) words.
   3606  */
   3607 static int
   3608 wm_validate_eeprom_checksum(struct wm_softc *sc)
   3609 {
   3610 	uint16_t checksum;
   3611 	uint16_t eeprom_data;
   3612 	int i;
   3613 
   3614 	checksum = 0;
   3615 
   3616 	for (i = 0; i < EEPROM_SIZE; i++) {
   3617 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
   3618 			return 1;
   3619 		checksum += eeprom_data;
   3620 	}
   3621 
   3622 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
   3623 		return 1;
   3624 
   3625 	return 0;
   3626 }
   3627 
   3628 /*
   3629  * wm_read_eeprom:
   3630  *
   3631  *	Read data from the serial EEPROM.
   3632  */
   3633 static int
   3634 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   3635 {
   3636 	int rv;
   3637 
   3638 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   3639 		return 1;
   3640 
   3641 	if (wm_acquire_eeprom(sc))
   3642 		return 1;
   3643 
   3644 	if (sc->sc_type == WM_T_ICH8)
   3645 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
   3646 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   3647 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
   3648 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   3649 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
   3650 	else
   3651 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
   3652 
   3653 	wm_release_eeprom(sc);
   3654 	return rv;
   3655 }
   3656 
   3657 static int
   3658 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
   3659     uint16_t *data)
   3660 {
   3661 	int i, eerd = 0;
   3662 	int error = 0;
   3663 
   3664 	for (i = 0; i < wordcnt; i++) {
   3665 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   3666 
   3667 		CSR_WRITE(sc, WMREG_EERD, eerd);
   3668 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   3669 		if (error != 0)
   3670 			break;
   3671 
   3672 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   3673 	}
   3674 
   3675 	return error;
   3676 }
   3677 
   3678 static int
   3679 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   3680 {
   3681 	uint32_t attempts = 100000;
   3682 	uint32_t i, reg = 0;
   3683 	int32_t done = -1;
   3684 
   3685 	for (i = 0; i < attempts; i++) {
   3686 		reg = CSR_READ(sc, rw);
   3687 
   3688 		if (reg & EERD_DONE) {
   3689 			done = 0;
   3690 			break;
   3691 		}
   3692 		delay(5);
   3693 	}
   3694 
   3695 	return done;
   3696 }
   3697 
   3698 /*
   3699  * wm_add_rxbuf:
   3700  *
   3701  *	Add a receive buffer to the indiciated descriptor.
   3702  */
   3703 static int
   3704 wm_add_rxbuf(struct wm_softc *sc, int idx)
   3705 {
   3706 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   3707 	struct mbuf *m;
   3708 	int error;
   3709 
   3710 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3711 	if (m == NULL)
   3712 		return (ENOBUFS);
   3713 
   3714 	MCLGET(m, M_DONTWAIT);
   3715 	if ((m->m_flags & M_EXT) == 0) {
   3716 		m_freem(m);
   3717 		return (ENOBUFS);
   3718 	}
   3719 
   3720 	if (rxs->rxs_mbuf != NULL)
   3721 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3722 
   3723 	rxs->rxs_mbuf = m;
   3724 
   3725 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3726 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3727 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3728 	if (error) {
   3729 		/* XXX XXX XXX */
   3730 		printf("%s: unable to load rx DMA map %d, error = %d\n",
   3731 		    sc->sc_dev.dv_xname, idx, error);
   3732 		panic("wm_add_rxbuf");
   3733 	}
   3734 
   3735 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3736 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3737 
   3738 	WM_INIT_RXDESC(sc, idx);
   3739 
   3740 	return (0);
   3741 }
   3742 
   3743 /*
   3744  * wm_set_ral:
   3745  *
   3746  *	Set an entery in the receive address list.
   3747  */
   3748 static void
   3749 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3750 {
   3751 	uint32_t ral_lo, ral_hi;
   3752 
   3753 	if (enaddr != NULL) {
   3754 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3755 		    (enaddr[3] << 24);
   3756 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3757 		ral_hi |= RAL_AV;
   3758 	} else {
   3759 		ral_lo = 0;
   3760 		ral_hi = 0;
   3761 	}
   3762 
   3763 	if (sc->sc_type >= WM_T_82544) {
   3764 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3765 		    ral_lo);
   3766 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3767 		    ral_hi);
   3768 	} else {
   3769 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3770 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3771 	}
   3772 }
   3773 
   3774 /*
   3775  * wm_mchash:
   3776  *
   3777  *	Compute the hash of the multicast address for the 4096-bit
   3778  *	multicast filter.
   3779  */
   3780 static uint32_t
   3781 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3782 {
   3783 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3784 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3785 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3786 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3787 	uint32_t hash;
   3788 
   3789 	if (sc->sc_type == WM_T_ICH8) {
   3790 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3791 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3792 		return (hash & 0x3ff);
   3793 	}
   3794 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3795 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3796 
   3797 	return (hash & 0xfff);
   3798 }
   3799 
   3800 /*
   3801  * wm_set_filter:
   3802  *
   3803  *	Set up the receive filter.
   3804  */
   3805 static void
   3806 wm_set_filter(struct wm_softc *sc)
   3807 {
   3808 	struct ethercom *ec = &sc->sc_ethercom;
   3809 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3810 	struct ether_multi *enm;
   3811 	struct ether_multistep step;
   3812 	bus_addr_t mta_reg;
   3813 	uint32_t hash, reg, bit;
   3814 	int i, size;
   3815 
   3816 	if (sc->sc_type >= WM_T_82544)
   3817 		mta_reg = WMREG_CORDOVA_MTA;
   3818 	else
   3819 		mta_reg = WMREG_MTA;
   3820 
   3821 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3822 
   3823 	if (ifp->if_flags & IFF_BROADCAST)
   3824 		sc->sc_rctl |= RCTL_BAM;
   3825 	if (ifp->if_flags & IFF_PROMISC) {
   3826 		sc->sc_rctl |= RCTL_UPE;
   3827 		goto allmulti;
   3828 	}
   3829 
   3830 	/*
   3831 	 * Set the station address in the first RAL slot, and
   3832 	 * clear the remaining slots.
   3833 	 */
   3834 	if (sc->sc_type == WM_T_ICH8)
   3835 		size = WM_ICH8_RAL_TABSIZE;
   3836 	else
   3837 		size = WM_RAL_TABSIZE;
   3838 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
   3839 	for (i = 1; i < size; i++)
   3840 		wm_set_ral(sc, NULL, i);
   3841 
   3842 	if (sc->sc_type == WM_T_ICH8)
   3843 		size = WM_ICH8_MC_TABSIZE;
   3844 	else
   3845 		size = WM_MC_TABSIZE;
   3846 	/* Clear out the multicast table. */
   3847 	for (i = 0; i < size; i++)
   3848 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3849 
   3850 	ETHER_FIRST_MULTI(step, ec, enm);
   3851 	while (enm != NULL) {
   3852 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3853 			/*
   3854 			 * We must listen to a range of multicast addresses.
   3855 			 * For now, just accept all multicasts, rather than
   3856 			 * trying to set only those filter bits needed to match
   3857 			 * the range.  (At this time, the only use of address
   3858 			 * ranges is for IP multicast routing, for which the
   3859 			 * range is big enough to require all bits set.)
   3860 			 */
   3861 			goto allmulti;
   3862 		}
   3863 
   3864 		hash = wm_mchash(sc, enm->enm_addrlo);
   3865 
   3866 		reg = (hash >> 5);
   3867 		if (sc->sc_type == WM_T_ICH8)
   3868 			reg &= 0x1f;
   3869 		else
   3870 			reg &= 0x7f;
   3871 		bit = hash & 0x1f;
   3872 
   3873 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3874 		hash |= 1U << bit;
   3875 
   3876 		/* XXX Hardware bug?? */
   3877 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   3878 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3879 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3880 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3881 		} else
   3882 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3883 
   3884 		ETHER_NEXT_MULTI(step, enm);
   3885 	}
   3886 
   3887 	ifp->if_flags &= ~IFF_ALLMULTI;
   3888 	goto setit;
   3889 
   3890  allmulti:
   3891 	ifp->if_flags |= IFF_ALLMULTI;
   3892 	sc->sc_rctl |= RCTL_MPE;
   3893 
   3894  setit:
   3895 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3896 }
   3897 
   3898 /*
   3899  * wm_tbi_mediainit:
   3900  *
   3901  *	Initialize media for use on 1000BASE-X devices.
   3902  */
   3903 static void
   3904 wm_tbi_mediainit(struct wm_softc *sc)
   3905 {
   3906 	const char *sep = "";
   3907 
   3908 	if (sc->sc_type < WM_T_82543)
   3909 		sc->sc_tipg = TIPG_WM_DFLT;
   3910 	else
   3911 		sc->sc_tipg = TIPG_LG_DFLT;
   3912 
   3913 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   3914 	    wm_tbi_mediastatus);
   3915 
   3916 	/*
   3917 	 * SWD Pins:
   3918 	 *
   3919 	 *	0 = Link LED (output)
   3920 	 *	1 = Loss Of Signal (input)
   3921 	 */
   3922 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   3923 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   3924 
   3925 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3926 
   3927 #define	ADD(ss, mm, dd)							\
   3928 do {									\
   3929 	aprint_normal("%s%s", sep, ss);					\
   3930 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   3931 	sep = ", ";							\
   3932 } while (/*CONSTCOND*/0)
   3933 
   3934 	aprint_normal("%s: ", sc->sc_dev.dv_xname);
   3935 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   3936 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   3937 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   3938 	aprint_normal("\n");
   3939 
   3940 #undef ADD
   3941 
   3942 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   3943 }
   3944 
   3945 /*
   3946  * wm_tbi_mediastatus:	[ifmedia interface function]
   3947  *
   3948  *	Get the current interface media status on a 1000BASE-X device.
   3949  */
   3950 static void
   3951 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   3952 {
   3953 	struct wm_softc *sc = ifp->if_softc;
   3954 	uint32_t ctrl;
   3955 
   3956 	ifmr->ifm_status = IFM_AVALID;
   3957 	ifmr->ifm_active = IFM_ETHER;
   3958 
   3959 	if (sc->sc_tbi_linkup == 0) {
   3960 		ifmr->ifm_active |= IFM_NONE;
   3961 		return;
   3962 	}
   3963 
   3964 	ifmr->ifm_status |= IFM_ACTIVE;
   3965 	ifmr->ifm_active |= IFM_1000_SX;
   3966 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   3967 		ifmr->ifm_active |= IFM_FDX;
   3968 	ctrl = CSR_READ(sc, WMREG_CTRL);
   3969 	if (ctrl & CTRL_RFCE)
   3970 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   3971 	if (ctrl & CTRL_TFCE)
   3972 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   3973 }
   3974 
   3975 /*
   3976  * wm_tbi_mediachange:	[ifmedia interface function]
   3977  *
   3978  *	Set hardware to newly-selected media on a 1000BASE-X device.
   3979  */
   3980 static int
   3981 wm_tbi_mediachange(struct ifnet *ifp)
   3982 {
   3983 	struct wm_softc *sc = ifp->if_softc;
   3984 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   3985 	uint32_t status;
   3986 	int i;
   3987 
   3988 	sc->sc_txcw = ife->ifm_data;
   3989 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
   3990 		    sc->sc_dev.dv_xname,sc->sc_txcw));
   3991 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
   3992 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   3993 		sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
   3994 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   3995 		sc->sc_txcw |= TXCW_ANE;
   3996 	} else {
   3997 		/*If autonegotiation is turned off, force link up and turn on full duplex*/
   3998 		sc->sc_txcw &= ~TXCW_ANE;
   3999 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
   4000 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4001 		delay(1000);
   4002 	}
   4003 
   4004 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   4005 		    sc->sc_dev.dv_xname,sc->sc_txcw));
   4006 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   4007 	delay(10000);
   4008 
   4009 	/* NOTE: CTRL will update TFCE and RFCE automatically. */
   4010 
   4011 	sc->sc_tbi_anstate = 0;
   4012 
   4013 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   4014 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", sc->sc_dev.dv_xname,i));
   4015 
   4016 	/*
   4017 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   4018 	 * optics detect a signal, 0 if they don't.
   4019 	 */
   4020 	if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
   4021 		/* Have signal; wait for the link to come up. */
   4022 
   4023 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   4024 			/*
   4025 			 * Reset the link, and let autonegotiation do its thing
   4026 			 */
   4027 			sc->sc_ctrl |= CTRL_LRST;
   4028 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4029 			delay(1000);
   4030 			sc->sc_ctrl &= ~CTRL_LRST;
   4031 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4032 			delay(1000);
   4033 		}
   4034 
   4035 		for (i = 0; i < 50; i++) {
   4036 			delay(10000);
   4037 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   4038 				break;
   4039 		}
   4040 
   4041 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   4042 			    sc->sc_dev.dv_xname,i));
   4043 
   4044 		status = CSR_READ(sc, WMREG_STATUS);
   4045 		DPRINTF(WM_DEBUG_LINK,
   4046 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   4047 			sc->sc_dev.dv_xname,status, STATUS_LU));
   4048 		if (status & STATUS_LU) {
   4049 			/* Link is up. */
   4050 			DPRINTF(WM_DEBUG_LINK,
   4051 			    ("%s: LINK: set media -> link up %s\n",
   4052 			    sc->sc_dev.dv_xname,
   4053 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   4054 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   4055 			sc->sc_fcrtl &= ~FCRTL_XONE;
   4056 			if (status & STATUS_FD)
   4057 				sc->sc_tctl |=
   4058 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4059 			else
   4060 				sc->sc_tctl |=
   4061 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   4062 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   4063 				sc->sc_fcrtl |= FCRTL_XONE;
   4064 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4065 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   4066 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   4067 				      sc->sc_fcrtl);
   4068 			sc->sc_tbi_linkup = 1;
   4069 		} else {
   4070 			/* Link is down. */
   4071 			DPRINTF(WM_DEBUG_LINK,
   4072 			    ("%s: LINK: set media -> link down\n",
   4073 			    sc->sc_dev.dv_xname));
   4074 			sc->sc_tbi_linkup = 0;
   4075 		}
   4076 	} else {
   4077 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   4078 		    sc->sc_dev.dv_xname));
   4079 		sc->sc_tbi_linkup = 0;
   4080 	}
   4081 
   4082 	wm_tbi_set_linkled(sc);
   4083 
   4084 	return (0);
   4085 }
   4086 
   4087 /*
   4088  * wm_tbi_set_linkled:
   4089  *
   4090  *	Update the link LED on 1000BASE-X devices.
   4091  */
   4092 static void
   4093 wm_tbi_set_linkled(struct wm_softc *sc)
   4094 {
   4095 
   4096 	if (sc->sc_tbi_linkup)
   4097 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   4098 	else
   4099 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   4100 
   4101 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4102 }
   4103 
   4104 /*
   4105  * wm_tbi_check_link:
   4106  *
   4107  *	Check the link on 1000BASE-X devices.
   4108  */
   4109 static void
   4110 wm_tbi_check_link(struct wm_softc *sc)
   4111 {
   4112 	uint32_t rxcw, ctrl, status;
   4113 
   4114 	if (sc->sc_tbi_anstate == 0)
   4115 		return;
   4116 	else if (sc->sc_tbi_anstate > 1) {
   4117 		DPRINTF(WM_DEBUG_LINK,
   4118 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
   4119 		    sc->sc_tbi_anstate));
   4120 		sc->sc_tbi_anstate--;
   4121 		return;
   4122 	}
   4123 
   4124 	sc->sc_tbi_anstate = 0;
   4125 
   4126 	rxcw = CSR_READ(sc, WMREG_RXCW);
   4127 	ctrl = CSR_READ(sc, WMREG_CTRL);
   4128 	status = CSR_READ(sc, WMREG_STATUS);
   4129 
   4130 	if ((status & STATUS_LU) == 0) {
   4131 		DPRINTF(WM_DEBUG_LINK,
   4132 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
   4133 		sc->sc_tbi_linkup = 0;
   4134 	} else {
   4135 		DPRINTF(WM_DEBUG_LINK,
   4136 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
   4137 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   4138 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   4139 		sc->sc_fcrtl &= ~FCRTL_XONE;
   4140 		if (status & STATUS_FD)
   4141 			sc->sc_tctl |=
   4142 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4143 		else
   4144 			sc->sc_tctl |=
   4145 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   4146 		if (ctrl & CTRL_TFCE)
   4147 			sc->sc_fcrtl |= FCRTL_XONE;
   4148 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4149 		CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   4150 			      WMREG_OLD_FCRTL : WMREG_FCRTL,
   4151 			      sc->sc_fcrtl);
   4152 		sc->sc_tbi_linkup = 1;
   4153 	}
   4154 
   4155 	wm_tbi_set_linkled(sc);
   4156 }
   4157 
   4158 /*
   4159  * wm_gmii_reset:
   4160  *
   4161  *	Reset the PHY.
   4162  */
   4163 static void
   4164 wm_gmii_reset(struct wm_softc *sc)
   4165 {
   4166 	uint32_t reg;
   4167 	int func = 0; /* XXX gcc */
   4168 
   4169 	if (sc->sc_type == WM_T_ICH8) {
   4170 		if (wm_get_swfwhw_semaphore(sc))
   4171 			return;
   4172 	}
   4173 	if (sc->sc_type == WM_T_80003) {
   4174 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
   4175 		if (wm_get_swfw_semaphore(sc,
   4176 		    func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4177 			return;
   4178 	}
   4179 	if (sc->sc_type >= WM_T_82544) {
   4180 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   4181 		delay(20000);
   4182 
   4183 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4184 		delay(20000);
   4185 	} else {
   4186                 /*
   4187                  * With 82543, we need to force speed and duplex on the MAC
   4188                  * equal to what the PHY speed and duplex configuration is.
   4189                  * In addition, we need to perform a hardware reset on the PHY
   4190                  * to take it out of reset.
   4191                  */
   4192                 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   4193                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4194 
   4195 		/* The PHY reset pin is active-low. */
   4196 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4197 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   4198 		    CTRL_EXT_SWDPIN(4));
   4199 		reg |= CTRL_EXT_SWDPIO(4);
   4200 
   4201 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   4202 		delay(10);
   4203 
   4204 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4205 		delay(10000);
   4206 
   4207 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   4208 		delay(10);
   4209 #if 0
   4210 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   4211 #endif
   4212 	}
   4213 	if (sc->sc_type == WM_T_ICH8)
   4214 		wm_put_swfwhw_semaphore(sc);
   4215 	if (sc->sc_type == WM_T_80003)
   4216 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4217 }
   4218 
   4219 /*
   4220  * wm_gmii_mediainit:
   4221  *
   4222  *	Initialize media for use on 1000BASE-T devices.
   4223  */
   4224 static void
   4225 wm_gmii_mediainit(struct wm_softc *sc)
   4226 {
   4227 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4228 
   4229 	/* We have MII. */
   4230 	sc->sc_flags |= WM_F_HAS_MII;
   4231 
   4232 	if (sc->sc_type >= WM_T_80003)
   4233 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   4234 	else
   4235 		sc->sc_tipg = TIPG_1000T_DFLT;
   4236 
   4237 	/*
   4238 	 * Let the chip set speed/duplex on its own based on
   4239 	 * signals from the PHY.
   4240 	 * XXXbouyer - I'm not sure this is right for the 80003,
   4241 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   4242 	 */
   4243 	sc->sc_ctrl |= CTRL_SLU;
   4244 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4245 
   4246 	/* Initialize our media structures and probe the GMII. */
   4247 	sc->sc_mii.mii_ifp = ifp;
   4248 
   4249 	if (sc->sc_type >= WM_T_80003) {
   4250 		sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
   4251 		sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
   4252 	} else if (sc->sc_type >= WM_T_82544) {
   4253 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
   4254 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
   4255 	} else {
   4256 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
   4257 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
   4258 	}
   4259 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
   4260 
   4261 	wm_gmii_reset(sc);
   4262 
   4263 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
   4264 	    wm_gmii_mediastatus);
   4265 
   4266 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   4267 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
   4268 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
   4269 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   4270 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
   4271 	} else
   4272 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   4273 }
   4274 
   4275 /*
   4276  * wm_gmii_mediastatus:	[ifmedia interface function]
   4277  *
   4278  *	Get the current interface media status on a 1000BASE-T device.
   4279  */
   4280 static void
   4281 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   4282 {
   4283 	struct wm_softc *sc = ifp->if_softc;
   4284 
   4285 	mii_pollstat(&sc->sc_mii);
   4286 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   4287 	ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
   4288 			   sc->sc_flowflags;
   4289 }
   4290 
   4291 /*
   4292  * wm_gmii_mediachange:	[ifmedia interface function]
   4293  *
   4294  *	Set hardware to newly-selected media on a 1000BASE-T device.
   4295  */
   4296 static int
   4297 wm_gmii_mediachange(struct ifnet *ifp)
   4298 {
   4299 	struct wm_softc *sc = ifp->if_softc;
   4300 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   4301 
   4302 	if (ifp->if_flags & IFF_UP) {
   4303 		sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   4304 		sc->sc_ctrl |= CTRL_SLU;
   4305 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   4306 		    || (sc->sc_type > WM_T_82543)) {
   4307 			sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   4308 		} else {
   4309 			sc->sc_ctrl &= ~CTRL_ASDE;
   4310 			sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   4311 			if (ife->ifm_media & IFM_FDX)
   4312 				sc->sc_ctrl |= CTRL_FD;
   4313 			switch(IFM_SUBTYPE(ife->ifm_media)) {
   4314 			case IFM_10_T:
   4315 				sc->sc_ctrl |= CTRL_SPEED_10;
   4316 				break;
   4317 			case IFM_100_TX:
   4318 				sc->sc_ctrl |= CTRL_SPEED_100;
   4319 				break;
   4320 			case IFM_1000_T:
   4321 				sc->sc_ctrl |= CTRL_SPEED_1000;
   4322 				break;
   4323 			default:
   4324 				panic("wm_gmii_mediachange: bad media 0x%x",
   4325 				    ife->ifm_media);
   4326 			}
   4327 		}
   4328 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4329 		if (sc->sc_type <= WM_T_82543)
   4330 			wm_gmii_reset(sc);
   4331 		mii_mediachg(&sc->sc_mii);
   4332 	}
   4333 	return (0);
   4334 }
   4335 
   4336 #define	MDI_IO		CTRL_SWDPIN(2)
   4337 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   4338 #define	MDI_CLK		CTRL_SWDPIN(3)
   4339 
   4340 static void
   4341 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   4342 {
   4343 	uint32_t i, v;
   4344 
   4345 	v = CSR_READ(sc, WMREG_CTRL);
   4346 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   4347 	v |= MDI_DIR | CTRL_SWDPIO(3);
   4348 
   4349 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   4350 		if (data & i)
   4351 			v |= MDI_IO;
   4352 		else
   4353 			v &= ~MDI_IO;
   4354 		CSR_WRITE(sc, WMREG_CTRL, v);
   4355 		delay(10);
   4356 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   4357 		delay(10);
   4358 		CSR_WRITE(sc, WMREG_CTRL, v);
   4359 		delay(10);
   4360 	}
   4361 }
   4362 
   4363 static uint32_t
   4364 i82543_mii_recvbits(struct wm_softc *sc)
   4365 {
   4366 	uint32_t v, i, data = 0;
   4367 
   4368 	v = CSR_READ(sc, WMREG_CTRL);
   4369 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   4370 	v |= CTRL_SWDPIO(3);
   4371 
   4372 	CSR_WRITE(sc, WMREG_CTRL, v);
   4373 	delay(10);
   4374 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   4375 	delay(10);
   4376 	CSR_WRITE(sc, WMREG_CTRL, v);
   4377 	delay(10);
   4378 
   4379 	for (i = 0; i < 16; i++) {
   4380 		data <<= 1;
   4381 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   4382 		delay(10);
   4383 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   4384 			data |= 1;
   4385 		CSR_WRITE(sc, WMREG_CTRL, v);
   4386 		delay(10);
   4387 	}
   4388 
   4389 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   4390 	delay(10);
   4391 	CSR_WRITE(sc, WMREG_CTRL, v);
   4392 	delay(10);
   4393 
   4394 	return (data);
   4395 }
   4396 
   4397 #undef MDI_IO
   4398 #undef MDI_DIR
   4399 #undef MDI_CLK
   4400 
   4401 /*
   4402  * wm_gmii_i82543_readreg:	[mii interface function]
   4403  *
   4404  *	Read a PHY register on the GMII (i82543 version).
   4405  */
   4406 static int
   4407 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
   4408 {
   4409 	struct wm_softc *sc = (void *) self;
   4410 	int rv;
   4411 
   4412 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   4413 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   4414 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   4415 	rv = i82543_mii_recvbits(sc) & 0xffff;
   4416 
   4417 	DPRINTF(WM_DEBUG_GMII,
   4418 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   4419 	    sc->sc_dev.dv_xname, phy, reg, rv));
   4420 
   4421 	return (rv);
   4422 }
   4423 
   4424 /*
   4425  * wm_gmii_i82543_writereg:	[mii interface function]
   4426  *
   4427  *	Write a PHY register on the GMII (i82543 version).
   4428  */
   4429 static void
   4430 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
   4431 {
   4432 	struct wm_softc *sc = (void *) self;
   4433 
   4434 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   4435 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   4436 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   4437 	    (MII_COMMAND_START << 30), 32);
   4438 }
   4439 
   4440 /*
   4441  * wm_gmii_i82544_readreg:	[mii interface function]
   4442  *
   4443  *	Read a PHY register on the GMII.
   4444  */
   4445 static int
   4446 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
   4447 {
   4448 	struct wm_softc *sc = (void *) self;
   4449 	uint32_t mdic = 0;
   4450 	int i, rv;
   4451 
   4452 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   4453 	    MDIC_REGADD(reg));
   4454 
   4455 	for (i = 0; i < 320; i++) {
   4456 		mdic = CSR_READ(sc, WMREG_MDIC);
   4457 		if (mdic & MDIC_READY)
   4458 			break;
   4459 		delay(10);
   4460 	}
   4461 
   4462 	if ((mdic & MDIC_READY) == 0) {
   4463 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   4464 		    sc->sc_dev.dv_xname, phy, reg);
   4465 		rv = 0;
   4466 	} else if (mdic & MDIC_E) {
   4467 #if 0 /* This is normal if no PHY is present. */
   4468 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   4469 		    sc->sc_dev.dv_xname, phy, reg);
   4470 #endif
   4471 		rv = 0;
   4472 	} else {
   4473 		rv = MDIC_DATA(mdic);
   4474 		if (rv == 0xffff)
   4475 			rv = 0;
   4476 	}
   4477 
   4478 	return (rv);
   4479 }
   4480 
   4481 /*
   4482  * wm_gmii_i82544_writereg:	[mii interface function]
   4483  *
   4484  *	Write a PHY register on the GMII.
   4485  */
   4486 static void
   4487 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
   4488 {
   4489 	struct wm_softc *sc = (void *) self;
   4490 	uint32_t mdic = 0;
   4491 	int i;
   4492 
   4493 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   4494 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   4495 
   4496 	for (i = 0; i < 320; i++) {
   4497 		mdic = CSR_READ(sc, WMREG_MDIC);
   4498 		if (mdic & MDIC_READY)
   4499 			break;
   4500 		delay(10);
   4501 	}
   4502 
   4503 	if ((mdic & MDIC_READY) == 0)
   4504 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   4505 		    sc->sc_dev.dv_xname, phy, reg);
   4506 	else if (mdic & MDIC_E)
   4507 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   4508 		    sc->sc_dev.dv_xname, phy, reg);
   4509 }
   4510 
   4511 /*
   4512  * wm_gmii_i80003_readreg:	[mii interface function]
   4513  *
   4514  *	Read a PHY register on the kumeran
   4515  * This could be handled by the PHY layer if we didn't have to lock the
   4516  * ressource ...
   4517  */
   4518 static int
   4519 wm_gmii_i80003_readreg(struct device *self, int phy, int reg)
   4520 {
   4521 	struct wm_softc *sc = (void *) self;
   4522 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
   4523 	int rv;
   4524 
   4525 	if (phy != 1) /* only one PHY on kumeran bus */
   4526 		return 0;
   4527 
   4528 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4529 		return 0;
   4530 
   4531 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   4532 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   4533 		    reg >> GG82563_PAGE_SHIFT);
   4534 	} else {
   4535 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   4536 		    reg >> GG82563_PAGE_SHIFT);
   4537 	}
   4538 
   4539 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   4540 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4541 	return (rv);
   4542 }
   4543 
   4544 /*
   4545  * wm_gmii_i80003_writereg:	[mii interface function]
   4546  *
   4547  *	Write a PHY register on the kumeran.
   4548  * This could be handled by the PHY layer if we didn't have to lock the
   4549  * ressource ...
   4550  */
   4551 static void
   4552 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val)
   4553 {
   4554 	struct wm_softc *sc = (void *) self;
   4555 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
   4556 
   4557 	if (phy != 1) /* only one PHY on kumeran bus */
   4558 		return;
   4559 
   4560 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4561 		return;
   4562 
   4563 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   4564 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   4565 		    reg >> GG82563_PAGE_SHIFT);
   4566 	} else {
   4567 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   4568 		    reg >> GG82563_PAGE_SHIFT);
   4569 	}
   4570 
   4571 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   4572 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4573 }
   4574 
   4575 /*
   4576  * wm_gmii_statchg:	[mii interface function]
   4577  *
   4578  *	Callback from MII layer when media changes.
   4579  */
   4580 static void
   4581 wm_gmii_statchg(struct device *self)
   4582 {
   4583 	struct wm_softc *sc = (void *) self;
   4584 	struct mii_data *mii = &sc->sc_mii;
   4585 
   4586 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   4587 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   4588 	sc->sc_fcrtl &= ~FCRTL_XONE;
   4589 
   4590 	/*
   4591 	 * Get flow control negotiation result.
   4592 	 */
   4593 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   4594 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   4595 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   4596 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   4597 	}
   4598 
   4599 	if (sc->sc_flowflags & IFM_FLOW) {
   4600 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   4601 			sc->sc_ctrl |= CTRL_TFCE;
   4602 			sc->sc_fcrtl |= FCRTL_XONE;
   4603 		}
   4604 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   4605 			sc->sc_ctrl |= CTRL_RFCE;
   4606 	}
   4607 
   4608 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4609 		DPRINTF(WM_DEBUG_LINK,
   4610 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
   4611 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4612 	} else  {
   4613 		DPRINTF(WM_DEBUG_LINK,
   4614 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
   4615 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   4616 	}
   4617 
   4618 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4619 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4620 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   4621 						 : WMREG_FCRTL, sc->sc_fcrtl);
   4622 	if (sc->sc_type >= WM_T_80003) {
   4623 		switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   4624 		case IFM_1000_T:
   4625 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   4626 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   4627 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   4628 			break;
   4629 		default:
   4630 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   4631 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   4632 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   4633 			break;
   4634 		}
   4635 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4636 	}
   4637 }
   4638 
   4639 /*
   4640  * wm_kmrn_i80003_readreg:
   4641  *
   4642  *	Read a kumeran register
   4643  */
   4644 static int
   4645 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
   4646 {
   4647 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
   4648 	int rv;
   4649 
   4650 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4651 		return 0;
   4652 
   4653 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   4654 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   4655 	    KUMCTRLSTA_REN);
   4656 	delay(2);
   4657 
   4658 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   4659 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4660 	return (rv);
   4661 }
   4662 
   4663 /*
   4664  * wm_kmrn_i80003_writereg:
   4665  *
   4666  *	Write a kumeran register
   4667  */
   4668 static void
   4669 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
   4670 {
   4671 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
   4672 
   4673 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4674 		return;
   4675 
   4676 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   4677 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   4678 	    (val & KUMCTRLSTA_MASK));
   4679 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4680 }
   4681 
   4682 static int
   4683 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
   4684 {
   4685 	uint32_t eecd = 0;
   4686 
   4687 	if (sc->sc_type == WM_T_82573) {
   4688 		eecd = CSR_READ(sc, WMREG_EECD);
   4689 
   4690 		/* Isolate bits 15 & 16 */
   4691 		eecd = ((eecd >> 15) & 0x03);
   4692 
   4693 		/* If both bits are set, device is Flash type */
   4694 		if (eecd == 0x03) {
   4695 			return 0;
   4696 		}
   4697 	}
   4698 	return 1;
   4699 }
   4700 
   4701 static int
   4702 wm_get_swsm_semaphore(struct wm_softc *sc)
   4703 {
   4704 	int32_t timeout;
   4705 	uint32_t swsm;
   4706 
   4707 	/* Get the FW semaphore. */
   4708 	timeout = 1000 + 1; /* XXX */
   4709 	while (timeout) {
   4710 		swsm = CSR_READ(sc, WMREG_SWSM);
   4711 		swsm |= SWSM_SWESMBI;
   4712 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   4713 		/* if we managed to set the bit we got the semaphore. */
   4714 		swsm = CSR_READ(sc, WMREG_SWSM);
   4715 		if (swsm & SWSM_SWESMBI)
   4716 			break;
   4717 
   4718 		delay(50);
   4719 		timeout--;
   4720 	}
   4721 
   4722 	if (timeout == 0) {
   4723 		aprint_error("%s: could not acquire EEPROM GNT\n",
   4724 		    sc->sc_dev.dv_xname);
   4725 		/* Release semaphores */
   4726 		wm_put_swsm_semaphore(sc);
   4727 		return 1;
   4728 	}
   4729 	return 0;
   4730 }
   4731 
   4732 static void
   4733 wm_put_swsm_semaphore(struct wm_softc *sc)
   4734 {
   4735 	uint32_t swsm;
   4736 
   4737 	swsm = CSR_READ(sc, WMREG_SWSM);
   4738 	swsm &= ~(SWSM_SWESMBI);
   4739 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   4740 }
   4741 
   4742 static int
   4743 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   4744 {
   4745 	uint32_t swfw_sync;
   4746 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   4747 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   4748 	int timeout = 200;
   4749 
   4750 	for(timeout = 0; timeout < 200; timeout++) {
   4751 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   4752 			if (wm_get_swsm_semaphore(sc))
   4753 				return 1;
   4754 		}
   4755 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   4756 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   4757 			swfw_sync |= swmask;
   4758 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   4759 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   4760 				wm_put_swsm_semaphore(sc);
   4761 			return 0;
   4762 		}
   4763 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   4764 			wm_put_swsm_semaphore(sc);
   4765 		delay(5000);
   4766 	}
   4767 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   4768 	    sc->sc_dev.dv_xname, mask, swfw_sync);
   4769 	return 1;
   4770 }
   4771 
   4772 static void
   4773 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   4774 {
   4775 	uint32_t swfw_sync;
   4776 
   4777 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   4778 		while (wm_get_swsm_semaphore(sc) != 0)
   4779 			continue;
   4780 	}
   4781 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   4782 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   4783 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   4784 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   4785 		wm_put_swsm_semaphore(sc);
   4786 }
   4787 
   4788 static int
   4789 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   4790 {
   4791 	uint32_t ext_ctrl;
   4792 	int timeout = 200;
   4793 
   4794 	for(timeout = 0; timeout < 200; timeout++) {
   4795 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   4796 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
   4797 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   4798 
   4799 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   4800 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
   4801 			return 0;
   4802 		delay(5000);
   4803 	}
   4804 	printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
   4805 	    sc->sc_dev.dv_xname, ext_ctrl);
   4806 	return 1;
   4807 }
   4808 
   4809 static void
   4810 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   4811 {
   4812 	uint32_t ext_ctrl;
   4813 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   4814 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
   4815 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   4816 }
   4817 
   4818 /******************************************************************************
   4819  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   4820  * register.
   4821  *
   4822  * sc - Struct containing variables accessed by shared code
   4823  * offset - offset of word in the EEPROM to read
   4824  * data - word read from the EEPROM
   4825  * words - number of words to read
   4826  *****************************************************************************/
   4827 static int
   4828 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   4829 {
   4830     int32_t  error = 0;
   4831     uint32_t flash_bank = 0;
   4832     uint32_t act_offset = 0;
   4833     uint32_t bank_offset = 0;
   4834     uint16_t word = 0;
   4835     uint16_t i = 0;
   4836 
   4837     /* We need to know which is the valid flash bank.  In the event
   4838      * that we didn't allocate eeprom_shadow_ram, we may not be
   4839      * managing flash_bank.  So it cannot be trusted and needs
   4840      * to be updated with each read.
   4841      */
   4842     /* Value of bit 22 corresponds to the flash bank we're on. */
   4843     flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
   4844 
   4845     /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
   4846     bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   4847 
   4848     error = wm_get_swfwhw_semaphore(sc);
   4849     if (error)
   4850         return error;
   4851 
   4852     for (i = 0; i < words; i++) {
   4853             /* The NVM part needs a byte offset, hence * 2 */
   4854             act_offset = bank_offset + ((offset + i) * 2);
   4855             error = wm_read_ich8_word(sc, act_offset, &word);
   4856             if (error)
   4857                 break;
   4858             data[i] = word;
   4859     }
   4860 
   4861     wm_put_swfwhw_semaphore(sc);
   4862     return error;
   4863 }
   4864 
   4865 /******************************************************************************
   4866  * This function does initial flash setup so that a new read/write/erase cycle
   4867  * can be started.
   4868  *
   4869  * sc - The pointer to the hw structure
   4870  ****************************************************************************/
   4871 static int32_t
   4872 wm_ich8_cycle_init(struct wm_softc *sc)
   4873 {
   4874     uint16_t hsfsts;
   4875     int32_t error = 1;
   4876     int32_t i     = 0;
   4877 
   4878     hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   4879 
   4880     /* May be check the Flash Des Valid bit in Hw status */
   4881     if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   4882         return error;
   4883     }
   4884 
   4885     /* Clear FCERR in Hw status by writing 1 */
   4886     /* Clear DAEL in Hw status by writing a 1 */
   4887     hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   4888 
   4889     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   4890 
   4891     /* Either we should have a hardware SPI cycle in progress bit to check
   4892      * against, in order to start a new cycle or FDONE bit should be changed
   4893      * in the hardware so that it is 1 after harware reset, which can then be
   4894      * used as an indication whether a cycle is in progress or has been
   4895      * completed .. we should also have some software semaphore mechanism to
   4896      * guard FDONE or the cycle in progress bit so that two threads access to
   4897      * those bits can be sequentiallized or a way so that 2 threads dont
   4898      * start the cycle at the same time */
   4899 
   4900     if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   4901         /* There is no cycle running at present, so we can start a cycle */
   4902         /* Begin by setting Flash Cycle Done. */
   4903         hsfsts |= HSFSTS_DONE;
   4904         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   4905         error = 0;
   4906     } else {
   4907         /* otherwise poll for sometime so the current cycle has a chance
   4908          * to end before giving up. */
   4909         for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   4910             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   4911             if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   4912                 error = 0;
   4913                 break;
   4914             }
   4915             delay(1);
   4916         }
   4917         if (error == 0) {
   4918             /* Successful in waiting for previous cycle to timeout,
   4919              * now set the Flash Cycle Done. */
   4920             hsfsts |= HSFSTS_DONE;
   4921             ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   4922         }
   4923     }
   4924     return error;
   4925 }
   4926 
   4927 /******************************************************************************
   4928  * This function starts a flash cycle and waits for its completion
   4929  *
   4930  * sc - The pointer to the hw structure
   4931  ****************************************************************************/
   4932 static int32_t
   4933 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   4934 {
   4935     uint16_t hsflctl;
   4936     uint16_t hsfsts;
   4937     int32_t error = 1;
   4938     uint32_t i = 0;
   4939 
   4940     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   4941     hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   4942     hsflctl |= HSFCTL_GO;
   4943     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   4944 
   4945     /* wait till FDONE bit is set to 1 */
   4946     do {
   4947         hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   4948         if (hsfsts & HSFSTS_DONE)
   4949             break;
   4950         delay(1);
   4951         i++;
   4952     } while (i < timeout);
   4953     if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
   4954         error = 0;
   4955     }
   4956     return error;
   4957 }
   4958 
   4959 /******************************************************************************
   4960  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   4961  *
   4962  * sc - The pointer to the hw structure
   4963  * index - The index of the byte or word to read.
   4964  * size - Size of data to read, 1=byte 2=word
   4965  * data - Pointer to the word to store the value read.
   4966  *****************************************************************************/
   4967 static int32_t
   4968 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   4969                      uint32_t size, uint16_t* data)
   4970 {
   4971     uint16_t hsfsts;
   4972     uint16_t hsflctl;
   4973     uint32_t flash_linear_address;
   4974     uint32_t flash_data = 0;
   4975     int32_t error = 1;
   4976     int32_t count = 0;
   4977 
   4978     if (size < 1  || size > 2 || data == 0x0 ||
   4979         index > ICH_FLASH_LINEAR_ADDR_MASK)
   4980         return error;
   4981 
   4982     flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   4983                            sc->sc_ich8_flash_base;
   4984 
   4985     do {
   4986         delay(1);
   4987         /* Steps */
   4988         error = wm_ich8_cycle_init(sc);
   4989         if (error)
   4990             break;
   4991 
   4992         hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   4993         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   4994         hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
   4995         hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   4996         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   4997 
   4998         /* Write the last 24 bits of index into Flash Linear address field in
   4999          * Flash Address */
   5000         /* TODO: TBD maybe check the index against the size of flash */
   5001 
   5002         ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   5003 
   5004         error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   5005 
   5006         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
   5007          * sequence a few more times, else read in (shift in) the Flash Data0,
   5008          * the order is least significant byte first msb to lsb */
   5009         if (error == 0) {
   5010             flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   5011             if (size == 1) {
   5012                 *data = (uint8_t)(flash_data & 0x000000FF);
   5013             } else if (size == 2) {
   5014                 *data = (uint16_t)(flash_data & 0x0000FFFF);
   5015             }
   5016             break;
   5017         } else {
   5018             /* If we've gotten here, then things are probably completely hosed,
   5019              * but if the error condition is detected, it won't hurt to give
   5020              * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
   5021              */
   5022             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   5023             if (hsfsts & HSFSTS_ERR) {
   5024                 /* Repeat for some time before giving up. */
   5025                 continue;
   5026             } else if ((hsfsts & HSFSTS_DONE) == 0) {
   5027                 break;
   5028             }
   5029         }
   5030     } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   5031 
   5032     return error;
   5033 }
   5034 
   5035 #if 0
   5036 /******************************************************************************
   5037  * Reads a single byte from the NVM using the ICH8 flash access registers.
   5038  *
   5039  * sc - pointer to wm_hw structure
   5040  * index - The index of the byte to read.
   5041  * data - Pointer to a byte to store the value read.
   5042  *****************************************************************************/
   5043 static int32_t
   5044 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   5045 {
   5046     int32_t status = 0;
   5047     uint16_t word = 0;
   5048 
   5049     status = wm_read_ich8_data(sc, index, 1, &word);
   5050     if (status == 0) {
   5051         *data = (uint8_t)word;
   5052     }
   5053 
   5054     return status;
   5055 }
   5056 #endif
   5057 
   5058 /******************************************************************************
   5059  * Reads a word from the NVM using the ICH8 flash access registers.
   5060  *
   5061  * sc - pointer to wm_hw structure
   5062  * index - The starting byte index of the word to read.
   5063  * data - Pointer to a word to store the value read.
   5064  *****************************************************************************/
   5065 static int32_t
   5066 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   5067 {
   5068     int32_t status = 0;
   5069     status = wm_read_ich8_data(sc, index, 2, data);
   5070     return status;
   5071 }
   5072