if_wm.c revision 1.231 1 /* $NetBSD: if_wm.c,v 1.231 2012/08/09 07:48:39 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.231 2012/08/09 07:48:39 msaitoh Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93
94 #include <sys/rnd.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100
101 #include <net/bpf.h>
102
103 #include <netinet/in.h> /* XXX for struct ip */
104 #include <netinet/in_systm.h> /* XXX for struct ip */
105 #include <netinet/ip.h> /* XXX for struct ip */
106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h> /* XXX for struct tcphdr */
108
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128
129 #ifdef WM_DEBUG
130 #define WM_DEBUG_LINK 0x01
131 #define WM_DEBUG_TX 0x02
132 #define WM_DEBUG_RX 0x04
133 #define WM_DEBUG_GMII 0x08
134 #define WM_DEBUG_MANAGE 0x10
135 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
136 | WM_DEBUG_MANAGE;
137
138 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
139 #else
140 #define DPRINTF(x, y) /* nothing */
141 #endif /* WM_DEBUG */
142
143 /*
144 * Transmit descriptor list size. Due to errata, we can only have
145 * 256 hardware descriptors in the ring on < 82544, but we use 4096
146 * on >= 82544. We tell the upper layers that they can queue a lot
147 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
148 * of them at a time.
149 *
150 * We allow up to 256 (!) DMA segments per packet. Pathological packet
151 * chains containing many small mbufs have been observed in zero-copy
152 * situations with jumbo frames.
153 */
154 #define WM_NTXSEGS 256
155 #define WM_IFQUEUELEN 256
156 #define WM_TXQUEUELEN_MAX 64
157 #define WM_TXQUEUELEN_MAX_82547 16
158 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
159 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
160 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
161 #define WM_NTXDESC_82542 256
162 #define WM_NTXDESC_82544 4096
163 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
164 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
165 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
166 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
167 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
168
169 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
170
171 /*
172 * Receive descriptor list size. We have one Rx buffer for normal
173 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
174 * packet. We allocate 256 receive descriptors, each with a 2k
175 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
176 */
177 #define WM_NRXDESC 256
178 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
179 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
180 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
181
182 /*
183 * Control structures are DMA'd to the i82542 chip. We allocate them in
184 * a single clump that maps to a single DMA segment to make several things
185 * easier.
186 */
187 struct wm_control_data_82544 {
188 /*
189 * The receive descriptors.
190 */
191 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
192
193 /*
194 * The transmit descriptors. Put these at the end, because
195 * we might use a smaller number of them.
196 */
197 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
198 };
199
200 struct wm_control_data_82542 {
201 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
203 };
204
205 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
206 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
207 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
208
209 /*
210 * Software state for transmit jobs.
211 */
212 struct wm_txsoft {
213 struct mbuf *txs_mbuf; /* head of our mbuf chain */
214 bus_dmamap_t txs_dmamap; /* our DMA map */
215 int txs_firstdesc; /* first descriptor in packet */
216 int txs_lastdesc; /* last descriptor in packet */
217 int txs_ndesc; /* # of descriptors used */
218 };
219
220 /*
221 * Software state for receive buffers. Each descriptor gets a
222 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
223 * more than one buffer, we chain them together.
224 */
225 struct wm_rxsoft {
226 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t rxs_dmamap; /* our DMA map */
228 };
229
230 #define WM_LINKUP_TIMEOUT 50
231
232 static uint16_t swfwphysem[] = {
233 SWFW_PHY0_SM,
234 SWFW_PHY1_SM,
235 SWFW_PHY2_SM,
236 SWFW_PHY3_SM
237 };
238
239 /*
240 * Software state per device.
241 */
242 struct wm_softc {
243 device_t sc_dev; /* generic device information */
244 bus_space_tag_t sc_st; /* bus space tag */
245 bus_space_handle_t sc_sh; /* bus space handle */
246 bus_size_t sc_ss; /* bus space size */
247 bus_space_tag_t sc_iot; /* I/O space tag */
248 bus_space_handle_t sc_ioh; /* I/O space handle */
249 bus_size_t sc_ios; /* I/O space size */
250 bus_space_tag_t sc_flasht; /* flash registers space tag */
251 bus_space_handle_t sc_flashh; /* flash registers space handle */
252 bus_dma_tag_t sc_dmat; /* bus DMA tag */
253
254 struct ethercom sc_ethercom; /* ethernet common data */
255 struct mii_data sc_mii; /* MII/media information */
256
257 pci_chipset_tag_t sc_pc;
258 pcitag_t sc_pcitag;
259 int sc_bus_speed; /* PCI/PCIX bus speed */
260 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
261
262 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
263 wm_chip_type sc_type; /* MAC type */
264 int sc_rev; /* MAC revision */
265 wm_phy_type sc_phytype; /* PHY type */
266 int sc_funcid; /* unit number of the chip (0 to 3) */
267 int sc_flags; /* flags; see below */
268 int sc_if_flags; /* last if_flags */
269 int sc_flowflags; /* 802.3x flow control flags */
270 int sc_align_tweak;
271
272 void *sc_ih; /* interrupt cookie */
273 callout_t sc_tick_ch; /* tick callout */
274
275 int sc_ee_addrbits; /* EEPROM address bits */
276 int sc_ich8_flash_base;
277 int sc_ich8_flash_bank_size;
278 int sc_nvm_k1_enabled;
279
280 /*
281 * Software state for the transmit and receive descriptors.
282 */
283 int sc_txnum; /* must be a power of two */
284 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
285 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
286
287 /*
288 * Control data structures.
289 */
290 int sc_ntxdesc; /* must be a power of two */
291 struct wm_control_data_82544 *sc_control_data;
292 bus_dmamap_t sc_cddmamap; /* control data DMA map */
293 bus_dma_segment_t sc_cd_seg; /* control data segment */
294 int sc_cd_rseg; /* real number of control segment */
295 size_t sc_cd_size; /* control data size */
296 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
297 #define sc_txdescs sc_control_data->wcd_txdescs
298 #define sc_rxdescs sc_control_data->wcd_rxdescs
299
300 #ifdef WM_EVENT_COUNTERS
301 /* Event counters. */
302 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
303 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
304 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
305 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
306 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
307 struct evcnt sc_ev_rxintr; /* Rx interrupts */
308 struct evcnt sc_ev_linkintr; /* Link interrupts */
309
310 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
311 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
312 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
313 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
314 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
315 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
316 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
317 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
318
319 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
320 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
321
322 struct evcnt sc_ev_tu; /* Tx underrun */
323
324 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
325 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
326 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
327 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
328 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
329 #endif /* WM_EVENT_COUNTERS */
330
331 bus_addr_t sc_tdt_reg; /* offset of TDT register */
332
333 int sc_txfree; /* number of free Tx descriptors */
334 int sc_txnext; /* next ready Tx descriptor */
335
336 int sc_txsfree; /* number of free Tx jobs */
337 int sc_txsnext; /* next free Tx job */
338 int sc_txsdirty; /* dirty Tx jobs */
339
340 /* These 5 variables are used only on the 82547. */
341 int sc_txfifo_size; /* Tx FIFO size */
342 int sc_txfifo_head; /* current head of FIFO */
343 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
344 int sc_txfifo_stall; /* Tx FIFO is stalled */
345 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
346
347 bus_addr_t sc_rdt_reg; /* offset of RDT register */
348
349 int sc_rxptr; /* next ready Rx descriptor/queue ent */
350 int sc_rxdiscard;
351 int sc_rxlen;
352 struct mbuf *sc_rxhead;
353 struct mbuf *sc_rxtail;
354 struct mbuf **sc_rxtailp;
355
356 uint32_t sc_ctrl; /* prototype CTRL register */
357 #if 0
358 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
359 #endif
360 uint32_t sc_icr; /* prototype interrupt bits */
361 uint32_t sc_itr; /* prototype intr throttling reg */
362 uint32_t sc_tctl; /* prototype TCTL register */
363 uint32_t sc_rctl; /* prototype RCTL register */
364 uint32_t sc_txcw; /* prototype TXCW register */
365 uint32_t sc_tipg; /* prototype TIPG register */
366 uint32_t sc_fcrtl; /* prototype FCRTL register */
367 uint32_t sc_pba; /* prototype PBA register */
368
369 int sc_tbi_linkup; /* TBI link status */
370 int sc_tbi_anegticks; /* autonegotiation ticks */
371 int sc_tbi_ticks; /* tbi ticks */
372 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
373 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
374
375 int sc_mchash_type; /* multicast filter offset */
376
377 krndsource_t rnd_source; /* random source */
378 };
379
380 #define WM_RXCHAIN_RESET(sc) \
381 do { \
382 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
383 *(sc)->sc_rxtailp = NULL; \
384 (sc)->sc_rxlen = 0; \
385 } while (/*CONSTCOND*/0)
386
387 #define WM_RXCHAIN_LINK(sc, m) \
388 do { \
389 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
390 (sc)->sc_rxtailp = &(m)->m_next; \
391 } while (/*CONSTCOND*/0)
392
393 #ifdef WM_EVENT_COUNTERS
394 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
395 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
396 #else
397 #define WM_EVCNT_INCR(ev) /* nothing */
398 #define WM_EVCNT_ADD(ev, val) /* nothing */
399 #endif
400
401 #define CSR_READ(sc, reg) \
402 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
403 #define CSR_WRITE(sc, reg, val) \
404 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
405 #define CSR_WRITE_FLUSH(sc) \
406 (void) CSR_READ((sc), WMREG_STATUS)
407
408 #define ICH8_FLASH_READ32(sc, reg) \
409 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
410 #define ICH8_FLASH_WRITE32(sc, reg, data) \
411 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
412
413 #define ICH8_FLASH_READ16(sc, reg) \
414 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE16(sc, reg, data) \
416 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417
418 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
419 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
420
421 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
422 #define WM_CDTXADDR_HI(sc, x) \
423 (sizeof(bus_addr_t) == 8 ? \
424 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
425
426 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
427 #define WM_CDRXADDR_HI(sc, x) \
428 (sizeof(bus_addr_t) == 8 ? \
429 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
430
431 #define WM_CDTXSYNC(sc, x, n, ops) \
432 do { \
433 int __x, __n; \
434 \
435 __x = (x); \
436 __n = (n); \
437 \
438 /* If it will wrap around, sync to the end of the ring. */ \
439 if ((__x + __n) > WM_NTXDESC(sc)) { \
440 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
441 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
442 (WM_NTXDESC(sc) - __x), (ops)); \
443 __n -= (WM_NTXDESC(sc) - __x); \
444 __x = 0; \
445 } \
446 \
447 /* Now sync whatever is left. */ \
448 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
449 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
450 } while (/*CONSTCOND*/0)
451
452 #define WM_CDRXSYNC(sc, x, ops) \
453 do { \
454 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
455 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
456 } while (/*CONSTCOND*/0)
457
458 #define WM_INIT_RXDESC(sc, x) \
459 do { \
460 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
461 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
462 struct mbuf *__m = __rxs->rxs_mbuf; \
463 \
464 /* \
465 * Note: We scoot the packet forward 2 bytes in the buffer \
466 * so that the payload after the Ethernet header is aligned \
467 * to a 4-byte boundary. \
468 * \
469 * XXX BRAINDAMAGE ALERT! \
470 * The stupid chip uses the same size for every buffer, which \
471 * is set in the Receive Control register. We are using the 2K \
472 * size option, but what we REALLY want is (2K - 2)! For this \
473 * reason, we can't "scoot" packets longer than the standard \
474 * Ethernet MTU. On strict-alignment platforms, if the total \
475 * size exceeds (2K - 2) we set align_tweak to 0 and let \
476 * the upper layer copy the headers. \
477 */ \
478 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
479 \
480 wm_set_dma_addr(&__rxd->wrx_addr, \
481 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
482 __rxd->wrx_len = 0; \
483 __rxd->wrx_cksum = 0; \
484 __rxd->wrx_status = 0; \
485 __rxd->wrx_errors = 0; \
486 __rxd->wrx_special = 0; \
487 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
488 \
489 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
490 } while (/*CONSTCOND*/0)
491
492 static void wm_start(struct ifnet *);
493 static void wm_watchdog(struct ifnet *);
494 static int wm_ifflags_cb(struct ethercom *);
495 static int wm_ioctl(struct ifnet *, u_long, void *);
496 static int wm_init(struct ifnet *);
497 static void wm_stop(struct ifnet *, int);
498 static bool wm_suspend(device_t, const pmf_qual_t *);
499 static bool wm_resume(device_t, const pmf_qual_t *);
500
501 static void wm_reset(struct wm_softc *);
502 static void wm_rxdrain(struct wm_softc *);
503 static int wm_add_rxbuf(struct wm_softc *, int);
504 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
505 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
506 static int wm_validate_eeprom_checksum(struct wm_softc *);
507 static int wm_check_alt_mac_addr(struct wm_softc *);
508 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
509 static void wm_tick(void *);
510
511 static void wm_set_filter(struct wm_softc *);
512 static void wm_set_vlan(struct wm_softc *);
513
514 static int wm_intr(void *);
515 static void wm_txintr(struct wm_softc *);
516 static void wm_rxintr(struct wm_softc *);
517 static void wm_linkintr(struct wm_softc *, uint32_t);
518
519 static void wm_tbi_mediainit(struct wm_softc *);
520 static int wm_tbi_mediachange(struct ifnet *);
521 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
522
523 static void wm_tbi_set_linkled(struct wm_softc *);
524 static void wm_tbi_check_link(struct wm_softc *);
525
526 static void wm_gmii_reset(struct wm_softc *);
527
528 static int wm_gmii_i82543_readreg(device_t, int, int);
529 static void wm_gmii_i82543_writereg(device_t, int, int, int);
530
531 static int wm_gmii_i82544_readreg(device_t, int, int);
532 static void wm_gmii_i82544_writereg(device_t, int, int, int);
533
534 static int wm_gmii_i80003_readreg(device_t, int, int);
535 static void wm_gmii_i80003_writereg(device_t, int, int, int);
536 static int wm_gmii_bm_readreg(device_t, int, int);
537 static void wm_gmii_bm_writereg(device_t, int, int, int);
538 static int wm_gmii_hv_readreg(device_t, int, int);
539 static void wm_gmii_hv_writereg(device_t, int, int, int);
540 static int wm_sgmii_readreg(device_t, int, int);
541 static void wm_sgmii_writereg(device_t, int, int, int);
542
543 static void wm_gmii_statchg(struct ifnet *);
544
545 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
546 static int wm_gmii_mediachange(struct ifnet *);
547 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
548
549 static int wm_kmrn_readreg(struct wm_softc *, int);
550 static void wm_kmrn_writereg(struct wm_softc *, int, int);
551
552 static void wm_set_spiaddrbits(struct wm_softc *);
553 static int wm_match(device_t, cfdata_t, void *);
554 static void wm_attach(device_t, device_t, void *);
555 static int wm_detach(device_t, int);
556 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
557 static void wm_get_auto_rd_done(struct wm_softc *);
558 static void wm_lan_init_done(struct wm_softc *);
559 static void wm_get_cfg_done(struct wm_softc *);
560 static int wm_get_swsm_semaphore(struct wm_softc *);
561 static void wm_put_swsm_semaphore(struct wm_softc *);
562 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
563 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
564 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
565 static int wm_get_swfwhw_semaphore(struct wm_softc *);
566 static void wm_put_swfwhw_semaphore(struct wm_softc *);
567
568 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
569 static int32_t wm_ich8_cycle_init(struct wm_softc *);
570 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
571 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
572 uint32_t, uint16_t *);
573 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
574 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
575 static void wm_82547_txfifo_stall(void *);
576 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
577 static int wm_check_mng_mode(struct wm_softc *);
578 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
579 static int wm_check_mng_mode_82574(struct wm_softc *);
580 static int wm_check_mng_mode_generic(struct wm_softc *);
581 static int wm_enable_mng_pass_thru(struct wm_softc *);
582 static int wm_check_reset_block(struct wm_softc *);
583 static void wm_get_hw_control(struct wm_softc *);
584 static int wm_check_for_link(struct wm_softc *);
585 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
586 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
587 #ifdef WM_WOL
588 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
589 #endif
590 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
591 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
592 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
593 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
594 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
595 static void wm_smbustopci(struct wm_softc *);
596 static void wm_set_pcie_completion_timeout(struct wm_softc *);
597 static void wm_reset_init_script_82575(struct wm_softc *);
598 static void wm_release_manageability(struct wm_softc *);
599 static void wm_release_hw_control(struct wm_softc *);
600 static void wm_get_wakeup(struct wm_softc *);
601 #ifdef WM_WOL
602 static void wm_enable_phy_wakeup(struct wm_softc *);
603 static void wm_enable_wakeup(struct wm_softc *);
604 #endif
605 static void wm_init_manageability(struct wm_softc *);
606 static void wm_set_eee_i350(struct wm_softc *);
607
608 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
609 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
610
611 /*
612 * Devices supported by this driver.
613 */
614 static const struct wm_product {
615 pci_vendor_id_t wmp_vendor;
616 pci_product_id_t wmp_product;
617 const char *wmp_name;
618 wm_chip_type wmp_type;
619 int wmp_flags;
620 #define WMP_F_1000X 0x01
621 #define WMP_F_1000T 0x02
622 #define WMP_F_SERDES 0x04
623 } wm_products[] = {
624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
625 "Intel i82542 1000BASE-X Ethernet",
626 WM_T_82542_2_1, WMP_F_1000X },
627
628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
629 "Intel i82543GC 1000BASE-X Ethernet",
630 WM_T_82543, WMP_F_1000X },
631
632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
633 "Intel i82543GC 1000BASE-T Ethernet",
634 WM_T_82543, WMP_F_1000T },
635
636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
637 "Intel i82544EI 1000BASE-T Ethernet",
638 WM_T_82544, WMP_F_1000T },
639
640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
641 "Intel i82544EI 1000BASE-X Ethernet",
642 WM_T_82544, WMP_F_1000X },
643
644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
645 "Intel i82544GC 1000BASE-T Ethernet",
646 WM_T_82544, WMP_F_1000T },
647
648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
649 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
650 WM_T_82544, WMP_F_1000T },
651
652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
653 "Intel i82540EM 1000BASE-T Ethernet",
654 WM_T_82540, WMP_F_1000T },
655
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
657 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
658 WM_T_82540, WMP_F_1000T },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
661 "Intel i82540EP 1000BASE-T Ethernet",
662 WM_T_82540, WMP_F_1000T },
663
664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
665 "Intel i82540EP 1000BASE-T Ethernet",
666 WM_T_82540, WMP_F_1000T },
667
668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
669 "Intel i82540EP 1000BASE-T Ethernet",
670 WM_T_82540, WMP_F_1000T },
671
672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
673 "Intel i82545EM 1000BASE-T Ethernet",
674 WM_T_82545, WMP_F_1000T },
675
676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
677 "Intel i82545GM 1000BASE-T Ethernet",
678 WM_T_82545_3, WMP_F_1000T },
679
680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
681 "Intel i82545GM 1000BASE-X Ethernet",
682 WM_T_82545_3, WMP_F_1000X },
683 #if 0
684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
685 "Intel i82545GM Gigabit Ethernet (SERDES)",
686 WM_T_82545_3, WMP_F_SERDES },
687 #endif
688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
689 "Intel i82546EB 1000BASE-T Ethernet",
690 WM_T_82546, WMP_F_1000T },
691
692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
693 "Intel i82546EB 1000BASE-T Ethernet",
694 WM_T_82546, WMP_F_1000T },
695
696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
697 "Intel i82545EM 1000BASE-X Ethernet",
698 WM_T_82545, WMP_F_1000X },
699
700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
701 "Intel i82546EB 1000BASE-X Ethernet",
702 WM_T_82546, WMP_F_1000X },
703
704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
705 "Intel i82546GB 1000BASE-T Ethernet",
706 WM_T_82546_3, WMP_F_1000T },
707
708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
709 "Intel i82546GB 1000BASE-X Ethernet",
710 WM_T_82546_3, WMP_F_1000X },
711 #if 0
712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
713 "Intel i82546GB Gigabit Ethernet (SERDES)",
714 WM_T_82546_3, WMP_F_SERDES },
715 #endif
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
717 "i82546GB quad-port Gigabit Ethernet",
718 WM_T_82546_3, WMP_F_1000T },
719
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
721 "i82546GB quad-port Gigabit Ethernet (KSP3)",
722 WM_T_82546_3, WMP_F_1000T },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
725 "Intel PRO/1000MT (82546GB)",
726 WM_T_82546_3, WMP_F_1000T },
727
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
729 "Intel i82541EI 1000BASE-T Ethernet",
730 WM_T_82541, WMP_F_1000T },
731
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
733 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
734 WM_T_82541, WMP_F_1000T },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
737 "Intel i82541EI Mobile 1000BASE-T Ethernet",
738 WM_T_82541, WMP_F_1000T },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
741 "Intel i82541ER 1000BASE-T Ethernet",
742 WM_T_82541_2, WMP_F_1000T },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
745 "Intel i82541GI 1000BASE-T Ethernet",
746 WM_T_82541_2, WMP_F_1000T },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
749 "Intel i82541GI Mobile 1000BASE-T Ethernet",
750 WM_T_82541_2, WMP_F_1000T },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
753 "Intel i82541PI 1000BASE-T Ethernet",
754 WM_T_82541_2, WMP_F_1000T },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
757 "Intel i82547EI 1000BASE-T Ethernet",
758 WM_T_82547, WMP_F_1000T },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
761 "Intel i82547EI Mobile 1000BASE-T Ethernet",
762 WM_T_82547, WMP_F_1000T },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
765 "Intel i82547GI 1000BASE-T Ethernet",
766 WM_T_82547_2, WMP_F_1000T },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
769 "Intel PRO/1000 PT (82571EB)",
770 WM_T_82571, WMP_F_1000T },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
773 "Intel PRO/1000 PF (82571EB)",
774 WM_T_82571, WMP_F_1000X },
775 #if 0
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
777 "Intel PRO/1000 PB (82571EB)",
778 WM_T_82571, WMP_F_SERDES },
779 #endif
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
781 "Intel PRO/1000 QT (82571EB)",
782 WM_T_82571, WMP_F_1000T },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
785 "Intel i82572EI 1000baseT Ethernet",
786 WM_T_82572, WMP_F_1000T },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
789 "Intel PRO/1000 PT Quad Port Server Adapter",
790 WM_T_82571, WMP_F_1000T, },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
793 "Intel i82572EI 1000baseX Ethernet",
794 WM_T_82572, WMP_F_1000X },
795 #if 0
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
797 "Intel i82572EI Gigabit Ethernet (SERDES)",
798 WM_T_82572, WMP_F_SERDES },
799 #endif
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
802 "Intel i82572EI 1000baseT Ethernet",
803 WM_T_82572, WMP_F_1000T },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
806 "Intel i82573E",
807 WM_T_82573, WMP_F_1000T },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
810 "Intel i82573E IAMT",
811 WM_T_82573, WMP_F_1000T },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
814 "Intel i82573L Gigabit Ethernet",
815 WM_T_82573, WMP_F_1000T },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
818 "Intel i82574L",
819 WM_T_82574, WMP_F_1000T },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
822 "Intel i82583V",
823 WM_T_82583, WMP_F_1000T },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
826 "i80003 dual 1000baseT Ethernet",
827 WM_T_80003, WMP_F_1000T },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
830 "i80003 dual 1000baseX Ethernet",
831 WM_T_80003, WMP_F_1000T },
832 #if 0
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
834 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
835 WM_T_80003, WMP_F_SERDES },
836 #endif
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
839 "Intel i80003 1000baseT Ethernet",
840 WM_T_80003, WMP_F_1000T },
841 #if 0
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
843 "Intel i80003 Gigabit Ethernet (SERDES)",
844 WM_T_80003, WMP_F_SERDES },
845 #endif
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
847 "Intel i82801H (M_AMT) LAN Controller",
848 WM_T_ICH8, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
850 "Intel i82801H (AMT) LAN Controller",
851 WM_T_ICH8, WMP_F_1000T },
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
853 "Intel i82801H LAN Controller",
854 WM_T_ICH8, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
856 "Intel i82801H (IFE) LAN Controller",
857 WM_T_ICH8, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
859 "Intel i82801H (M) LAN Controller",
860 WM_T_ICH8, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
862 "Intel i82801H IFE (GT) LAN Controller",
863 WM_T_ICH8, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
865 "Intel i82801H IFE (G) LAN Controller",
866 WM_T_ICH8, WMP_F_1000T },
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
868 "82801I (AMT) LAN Controller",
869 WM_T_ICH9, WMP_F_1000T },
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
871 "82801I LAN Controller",
872 WM_T_ICH9, WMP_F_1000T },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
874 "82801I (G) LAN Controller",
875 WM_T_ICH9, WMP_F_1000T },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
877 "82801I (GT) LAN Controller",
878 WM_T_ICH9, WMP_F_1000T },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
880 "82801I (C) LAN Controller",
881 WM_T_ICH9, WMP_F_1000T },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
883 "82801I mobile LAN Controller",
884 WM_T_ICH9, WMP_F_1000T },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
886 "82801I mobile (V) LAN Controller",
887 WM_T_ICH9, WMP_F_1000T },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
889 "82801I mobile (AMT) LAN Controller",
890 WM_T_ICH9, WMP_F_1000T },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
892 "82567LM-4 LAN Controller",
893 WM_T_ICH9, WMP_F_1000T },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
895 "82567V-3 LAN Controller",
896 WM_T_ICH9, WMP_F_1000T },
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
898 "82567LM-2 LAN Controller",
899 WM_T_ICH10, WMP_F_1000T },
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
901 "82567LF-2 LAN Controller",
902 WM_T_ICH10, WMP_F_1000T },
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
904 "82567LM-3 LAN Controller",
905 WM_T_ICH10, WMP_F_1000T },
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
907 "82567LF-3 LAN Controller",
908 WM_T_ICH10, WMP_F_1000T },
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
910 "82567V-2 LAN Controller",
911 WM_T_ICH10, WMP_F_1000T },
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
913 "82567V-3? LAN Controller",
914 WM_T_ICH10, WMP_F_1000T },
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
916 "HANKSVILLE LAN Controller",
917 WM_T_ICH10, WMP_F_1000T },
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
919 "PCH LAN (82577LM) Controller",
920 WM_T_PCH, WMP_F_1000T },
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
922 "PCH LAN (82577LC) Controller",
923 WM_T_PCH, WMP_F_1000T },
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
925 "PCH LAN (82578DM) Controller",
926 WM_T_PCH, WMP_F_1000T },
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
928 "PCH LAN (82578DC) Controller",
929 WM_T_PCH2, WMP_F_1000T },
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
931 "PCH2 LAN (82579LM) Controller",
932 WM_T_PCH2, WMP_F_1000T },
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
934 "PCH2 LAN (82579V) Controller",
935 WM_T_PCH, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
937 "82575EB dual-1000baseT Ethernet",
938 WM_T_82575, WMP_F_1000T },
939 #if 0
940 /*
941 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
942 * disabled for now ...
943 */
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
945 "82575EB dual-1000baseX Ethernet (SERDES)",
946 WM_T_82575, WMP_F_SERDES },
947 #endif
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
949 "82575GB quad-1000baseT Ethernet",
950 WM_T_82575, WMP_F_1000T },
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
952 "82575GB quad-1000baseT Ethernet (PM)",
953 WM_T_82575, WMP_F_1000T },
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
955 "82576 1000BaseT Ethernet",
956 WM_T_82576, WMP_F_1000T },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
958 "82576 1000BaseX Ethernet",
959 WM_T_82576, WMP_F_1000X },
960 #if 0
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
962 "82576 gigabit Ethernet (SERDES)",
963 WM_T_82576, WMP_F_SERDES },
964 #endif
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
966 "82576 quad-1000BaseT Ethernet",
967 WM_T_82576, WMP_F_1000T },
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
969 "82576 gigabit Ethernet",
970 WM_T_82576, WMP_F_1000T },
971 #if 0
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
973 "82576 gigabit Ethernet (SERDES)",
974 WM_T_82576, WMP_F_SERDES },
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
976 "82576 quad-gigabit Ethernet (SERDES)",
977 WM_T_82576, WMP_F_SERDES },
978 #endif
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
980 "82580 1000BaseT Ethernet",
981 WM_T_82580, WMP_F_1000T },
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
983 "82580 1000BaseX Ethernet",
984 WM_T_82580, WMP_F_1000X },
985 #if 0
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
987 "82580 1000BaseT Ethernet (SERDES)",
988 WM_T_82580, WMP_F_SERDES },
989 #endif
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
991 "82580 gigabit Ethernet (SGMII)",
992 WM_T_82580, WMP_F_1000T },
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
994 "82580 dual-1000BaseT Ethernet",
995 WM_T_82580, WMP_F_1000T },
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
997 "82580 1000BaseT Ethernet",
998 WM_T_82580ER, WMP_F_1000T },
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
1000 "82580 dual-1000BaseT Ethernet",
1001 WM_T_82580ER, WMP_F_1000T },
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1003 "82580 quad-1000BaseX Ethernet",
1004 WM_T_82580, WMP_F_1000X },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1006 "I350 Gigabit Network Connection",
1007 WM_T_I350, WMP_F_1000T },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1009 "I350 Gigabit Fiber Network Connection",
1010 WM_T_I350, WMP_F_1000X },
1011 #if 0
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1013 "I350 Gigabit Backplane Connection",
1014 WM_T_I350, WMP_F_SERDES },
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1016 "I350 Gigabit Connection",
1017 WM_T_I350, WMP_F_1000T },
1018 #endif
1019 { 0, 0,
1020 NULL,
1021 0, 0 },
1022 };
1023
1024 #ifdef WM_EVENT_COUNTERS
1025 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1026 #endif /* WM_EVENT_COUNTERS */
1027
1028 #if 0 /* Not currently used */
1029 static inline uint32_t
1030 wm_io_read(struct wm_softc *sc, int reg)
1031 {
1032
1033 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1034 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1035 }
1036 #endif
1037
1038 static inline void
1039 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1040 {
1041
1042 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1043 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1044 }
1045
1046 static inline void
1047 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1048 uint32_t data)
1049 {
1050 uint32_t regval;
1051 int i;
1052
1053 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1054
1055 CSR_WRITE(sc, reg, regval);
1056
1057 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1058 delay(5);
1059 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1060 break;
1061 }
1062 if (i == SCTL_CTL_POLL_TIMEOUT) {
1063 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1064 device_xname(sc->sc_dev), reg);
1065 }
1066 }
1067
1068 static inline void
1069 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1070 {
1071 wa->wa_low = htole32(v & 0xffffffffU);
1072 if (sizeof(bus_addr_t) == 8)
1073 wa->wa_high = htole32((uint64_t) v >> 32);
1074 else
1075 wa->wa_high = 0;
1076 }
1077
1078 static void
1079 wm_set_spiaddrbits(struct wm_softc *sc)
1080 {
1081 uint32_t reg;
1082
1083 sc->sc_flags |= WM_F_EEPROM_SPI;
1084 reg = CSR_READ(sc, WMREG_EECD);
1085 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1086 }
1087
1088 static const struct wm_product *
1089 wm_lookup(const struct pci_attach_args *pa)
1090 {
1091 const struct wm_product *wmp;
1092
1093 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1094 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1095 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1096 return wmp;
1097 }
1098 return NULL;
1099 }
1100
1101 static int
1102 wm_match(device_t parent, cfdata_t cf, void *aux)
1103 {
1104 struct pci_attach_args *pa = aux;
1105
1106 if (wm_lookup(pa) != NULL)
1107 return 1;
1108
1109 return 0;
1110 }
1111
1112 static void
1113 wm_attach(device_t parent, device_t self, void *aux)
1114 {
1115 struct wm_softc *sc = device_private(self);
1116 struct pci_attach_args *pa = aux;
1117 prop_dictionary_t dict;
1118 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1119 pci_chipset_tag_t pc = pa->pa_pc;
1120 pci_intr_handle_t ih;
1121 const char *intrstr = NULL;
1122 const char *eetype, *xname;
1123 bus_space_tag_t memt;
1124 bus_space_handle_t memh;
1125 bus_size_t memsize;
1126 int memh_valid;
1127 int i, error;
1128 const struct wm_product *wmp;
1129 prop_data_t ea;
1130 prop_number_t pn;
1131 uint8_t enaddr[ETHER_ADDR_LEN];
1132 uint16_t cfg1, cfg2, swdpin, io3;
1133 pcireg_t preg, memtype;
1134 uint16_t eeprom_data, apme_mask;
1135 uint32_t reg;
1136
1137 sc->sc_dev = self;
1138 callout_init(&sc->sc_tick_ch, 0);
1139
1140 sc->sc_wmp = wmp = wm_lookup(pa);
1141 if (wmp == NULL) {
1142 printf("\n");
1143 panic("wm_attach: impossible");
1144 }
1145
1146 sc->sc_pc = pa->pa_pc;
1147 sc->sc_pcitag = pa->pa_tag;
1148
1149 if (pci_dma64_available(pa))
1150 sc->sc_dmat = pa->pa_dmat64;
1151 else
1152 sc->sc_dmat = pa->pa_dmat;
1153
1154 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1155 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1156
1157 sc->sc_type = wmp->wmp_type;
1158 if (sc->sc_type < WM_T_82543) {
1159 if (sc->sc_rev < 2) {
1160 aprint_error_dev(sc->sc_dev,
1161 "i82542 must be at least rev. 2\n");
1162 return;
1163 }
1164 if (sc->sc_rev < 3)
1165 sc->sc_type = WM_T_82542_2_0;
1166 }
1167
1168 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1169 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1170 || (sc->sc_type == WM_T_I350))
1171 sc->sc_flags |= WM_F_NEWQUEUE;
1172
1173 /* Set device properties (mactype) */
1174 dict = device_properties(sc->sc_dev);
1175 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1176
1177 /*
1178 * Map the device. All devices support memory-mapped acccess,
1179 * and it is really required for normal operation.
1180 */
1181 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1182 switch (memtype) {
1183 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1184 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1185 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1186 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1187 break;
1188 default:
1189 memh_valid = 0;
1190 break;
1191 }
1192
1193 if (memh_valid) {
1194 sc->sc_st = memt;
1195 sc->sc_sh = memh;
1196 sc->sc_ss = memsize;
1197 } else {
1198 aprint_error_dev(sc->sc_dev,
1199 "unable to map device registers\n");
1200 return;
1201 }
1202
1203 wm_get_wakeup(sc);
1204
1205 /*
1206 * In addition, i82544 and later support I/O mapped indirect
1207 * register access. It is not desirable (nor supported in
1208 * this driver) to use it for normal operation, though it is
1209 * required to work around bugs in some chip versions.
1210 */
1211 if (sc->sc_type >= WM_T_82544) {
1212 /* First we have to find the I/O BAR. */
1213 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1214 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1215 PCI_MAPREG_TYPE_IO)
1216 break;
1217 }
1218 if (i != PCI_MAPREG_END) {
1219 /*
1220 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1221 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1222 * It's no problem because newer chips has no this
1223 * bug.
1224 *
1225 * The i8254x doesn't apparently respond when the
1226 * I/O BAR is 0, which looks somewhat like it's not
1227 * been configured.
1228 */
1229 preg = pci_conf_read(pc, pa->pa_tag, i);
1230 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1231 aprint_error_dev(sc->sc_dev,
1232 "WARNING: I/O BAR at zero.\n");
1233 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1234 0, &sc->sc_iot, &sc->sc_ioh,
1235 NULL, &sc->sc_ios) == 0) {
1236 sc->sc_flags |= WM_F_IOH_VALID;
1237 } else {
1238 aprint_error_dev(sc->sc_dev,
1239 "WARNING: unable to map I/O space\n");
1240 }
1241 }
1242
1243 }
1244
1245 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1246 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1247 preg |= PCI_COMMAND_MASTER_ENABLE;
1248 if (sc->sc_type < WM_T_82542_2_1)
1249 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1250 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1251
1252 /* power up chip */
1253 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1254 NULL)) && error != EOPNOTSUPP) {
1255 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1256 return;
1257 }
1258
1259 /*
1260 * Map and establish our interrupt.
1261 */
1262 if (pci_intr_map(pa, &ih)) {
1263 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1264 return;
1265 }
1266 intrstr = pci_intr_string(pc, ih);
1267 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1268 if (sc->sc_ih == NULL) {
1269 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1270 if (intrstr != NULL)
1271 aprint_error(" at %s", intrstr);
1272 aprint_error("\n");
1273 return;
1274 }
1275 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1276
1277 /*
1278 * Check the function ID (unit number of the chip).
1279 */
1280 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1281 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1282 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1283 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1284 || (sc->sc_type == WM_T_I350))
1285 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1286 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1287 else
1288 sc->sc_funcid = 0;
1289
1290 /*
1291 * Determine a few things about the bus we're connected to.
1292 */
1293 if (sc->sc_type < WM_T_82543) {
1294 /* We don't really know the bus characteristics here. */
1295 sc->sc_bus_speed = 33;
1296 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1297 /*
1298 * CSA (Communication Streaming Architecture) is about as fast
1299 * a 32-bit 66MHz PCI Bus.
1300 */
1301 sc->sc_flags |= WM_F_CSA;
1302 sc->sc_bus_speed = 66;
1303 aprint_verbose_dev(sc->sc_dev,
1304 "Communication Streaming Architecture\n");
1305 if (sc->sc_type == WM_T_82547) {
1306 callout_init(&sc->sc_txfifo_ch, 0);
1307 callout_setfunc(&sc->sc_txfifo_ch,
1308 wm_82547_txfifo_stall, sc);
1309 aprint_verbose_dev(sc->sc_dev,
1310 "using 82547 Tx FIFO stall work-around\n");
1311 }
1312 } else if (sc->sc_type >= WM_T_82571) {
1313 sc->sc_flags |= WM_F_PCIE;
1314 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1315 && (sc->sc_type != WM_T_ICH10)
1316 && (sc->sc_type != WM_T_PCH)
1317 && (sc->sc_type != WM_T_PCH2)) {
1318 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1319 /* ICH* and PCH* have no PCIe capability registers */
1320 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1321 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1322 NULL) == 0)
1323 aprint_error_dev(sc->sc_dev,
1324 "unable to find PCIe capability\n");
1325 }
1326 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1327 } else {
1328 reg = CSR_READ(sc, WMREG_STATUS);
1329 if (reg & STATUS_BUS64)
1330 sc->sc_flags |= WM_F_BUS64;
1331 if ((reg & STATUS_PCIX_MODE) != 0) {
1332 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1333
1334 sc->sc_flags |= WM_F_PCIX;
1335 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1336 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1337 aprint_error_dev(sc->sc_dev,
1338 "unable to find PCIX capability\n");
1339 else if (sc->sc_type != WM_T_82545_3 &&
1340 sc->sc_type != WM_T_82546_3) {
1341 /*
1342 * Work around a problem caused by the BIOS
1343 * setting the max memory read byte count
1344 * incorrectly.
1345 */
1346 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1347 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1348 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1349 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1350
1351 bytecnt =
1352 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1353 PCI_PCIX_CMD_BYTECNT_SHIFT;
1354 maxb =
1355 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1356 PCI_PCIX_STATUS_MAXB_SHIFT;
1357 if (bytecnt > maxb) {
1358 aprint_verbose_dev(sc->sc_dev,
1359 "resetting PCI-X MMRBC: %d -> %d\n",
1360 512 << bytecnt, 512 << maxb);
1361 pcix_cmd = (pcix_cmd &
1362 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1363 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1364 pci_conf_write(pa->pa_pc, pa->pa_tag,
1365 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1366 pcix_cmd);
1367 }
1368 }
1369 }
1370 /*
1371 * The quad port adapter is special; it has a PCIX-PCIX
1372 * bridge on the board, and can run the secondary bus at
1373 * a higher speed.
1374 */
1375 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1376 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1377 : 66;
1378 } else if (sc->sc_flags & WM_F_PCIX) {
1379 switch (reg & STATUS_PCIXSPD_MASK) {
1380 case STATUS_PCIXSPD_50_66:
1381 sc->sc_bus_speed = 66;
1382 break;
1383 case STATUS_PCIXSPD_66_100:
1384 sc->sc_bus_speed = 100;
1385 break;
1386 case STATUS_PCIXSPD_100_133:
1387 sc->sc_bus_speed = 133;
1388 break;
1389 default:
1390 aprint_error_dev(sc->sc_dev,
1391 "unknown PCIXSPD %d; assuming 66MHz\n",
1392 reg & STATUS_PCIXSPD_MASK);
1393 sc->sc_bus_speed = 66;
1394 break;
1395 }
1396 } else
1397 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1398 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1399 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1400 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1401 }
1402
1403 /*
1404 * Allocate the control data structures, and create and load the
1405 * DMA map for it.
1406 *
1407 * NOTE: All Tx descriptors must be in the same 4G segment of
1408 * memory. So must Rx descriptors. We simplify by allocating
1409 * both sets within the same 4G segment.
1410 */
1411 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1412 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1413 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1414 sizeof(struct wm_control_data_82542) :
1415 sizeof(struct wm_control_data_82544);
1416 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1417 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1418 &sc->sc_cd_rseg, 0)) != 0) {
1419 aprint_error_dev(sc->sc_dev,
1420 "unable to allocate control data, error = %d\n",
1421 error);
1422 goto fail_0;
1423 }
1424
1425 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1426 sc->sc_cd_rseg, sc->sc_cd_size,
1427 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1428 aprint_error_dev(sc->sc_dev,
1429 "unable to map control data, error = %d\n", error);
1430 goto fail_1;
1431 }
1432
1433 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1434 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1435 aprint_error_dev(sc->sc_dev,
1436 "unable to create control data DMA map, error = %d\n",
1437 error);
1438 goto fail_2;
1439 }
1440
1441 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1442 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1443 aprint_error_dev(sc->sc_dev,
1444 "unable to load control data DMA map, error = %d\n",
1445 error);
1446 goto fail_3;
1447 }
1448
1449 /*
1450 * Create the transmit buffer DMA maps.
1451 */
1452 WM_TXQUEUELEN(sc) =
1453 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1454 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1455 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1456 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1457 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1458 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1459 aprint_error_dev(sc->sc_dev,
1460 "unable to create Tx DMA map %d, error = %d\n",
1461 i, error);
1462 goto fail_4;
1463 }
1464 }
1465
1466 /*
1467 * Create the receive buffer DMA maps.
1468 */
1469 for (i = 0; i < WM_NRXDESC; i++) {
1470 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1471 MCLBYTES, 0, 0,
1472 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1473 aprint_error_dev(sc->sc_dev,
1474 "unable to create Rx DMA map %d error = %d\n",
1475 i, error);
1476 goto fail_5;
1477 }
1478 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1479 }
1480
1481 /* clear interesting stat counters */
1482 CSR_READ(sc, WMREG_COLC);
1483 CSR_READ(sc, WMREG_RXERRC);
1484
1485 /* get PHY control from SMBus to PCIe */
1486 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1487 wm_smbustopci(sc);
1488
1489 /*
1490 * Reset the chip to a known state.
1491 */
1492 wm_reset(sc);
1493
1494 switch (sc->sc_type) {
1495 case WM_T_82571:
1496 case WM_T_82572:
1497 case WM_T_82573:
1498 case WM_T_82574:
1499 case WM_T_82583:
1500 case WM_T_80003:
1501 case WM_T_ICH8:
1502 case WM_T_ICH9:
1503 case WM_T_ICH10:
1504 case WM_T_PCH:
1505 case WM_T_PCH2:
1506 if (wm_check_mng_mode(sc) != 0)
1507 wm_get_hw_control(sc);
1508 break;
1509 default:
1510 break;
1511 }
1512
1513 /*
1514 * Get some information about the EEPROM.
1515 */
1516 switch (sc->sc_type) {
1517 case WM_T_82542_2_0:
1518 case WM_T_82542_2_1:
1519 case WM_T_82543:
1520 case WM_T_82544:
1521 /* Microwire */
1522 sc->sc_ee_addrbits = 6;
1523 break;
1524 case WM_T_82540:
1525 case WM_T_82545:
1526 case WM_T_82545_3:
1527 case WM_T_82546:
1528 case WM_T_82546_3:
1529 /* Microwire */
1530 reg = CSR_READ(sc, WMREG_EECD);
1531 if (reg & EECD_EE_SIZE)
1532 sc->sc_ee_addrbits = 8;
1533 else
1534 sc->sc_ee_addrbits = 6;
1535 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1536 break;
1537 case WM_T_82541:
1538 case WM_T_82541_2:
1539 case WM_T_82547:
1540 case WM_T_82547_2:
1541 reg = CSR_READ(sc, WMREG_EECD);
1542 if (reg & EECD_EE_TYPE) {
1543 /* SPI */
1544 wm_set_spiaddrbits(sc);
1545 } else
1546 /* Microwire */
1547 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1548 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1549 break;
1550 case WM_T_82571:
1551 case WM_T_82572:
1552 /* SPI */
1553 wm_set_spiaddrbits(sc);
1554 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1555 break;
1556 case WM_T_82573:
1557 case WM_T_82574:
1558 case WM_T_82583:
1559 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1560 sc->sc_flags |= WM_F_EEPROM_FLASH;
1561 else {
1562 /* SPI */
1563 wm_set_spiaddrbits(sc);
1564 }
1565 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1566 break;
1567 case WM_T_82575:
1568 case WM_T_82576:
1569 case WM_T_82580:
1570 case WM_T_82580ER:
1571 case WM_T_I350:
1572 case WM_T_80003:
1573 /* SPI */
1574 wm_set_spiaddrbits(sc);
1575 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1576 break;
1577 case WM_T_ICH8:
1578 case WM_T_ICH9:
1579 case WM_T_ICH10:
1580 case WM_T_PCH:
1581 case WM_T_PCH2:
1582 /* FLASH */
1583 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1584 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1585 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1586 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1587 aprint_error_dev(sc->sc_dev,
1588 "can't map FLASH registers\n");
1589 return;
1590 }
1591 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1592 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1593 ICH_FLASH_SECTOR_SIZE;
1594 sc->sc_ich8_flash_bank_size =
1595 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1596 sc->sc_ich8_flash_bank_size -=
1597 (reg & ICH_GFPREG_BASE_MASK);
1598 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1599 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1600 break;
1601 default:
1602 break;
1603 }
1604
1605 /*
1606 * Defer printing the EEPROM type until after verifying the checksum
1607 * This allows the EEPROM type to be printed correctly in the case
1608 * that no EEPROM is attached.
1609 */
1610 /*
1611 * Validate the EEPROM checksum. If the checksum fails, flag
1612 * this for later, so we can fail future reads from the EEPROM.
1613 */
1614 if (wm_validate_eeprom_checksum(sc)) {
1615 /*
1616 * Read twice again because some PCI-e parts fail the
1617 * first check due to the link being in sleep state.
1618 */
1619 if (wm_validate_eeprom_checksum(sc))
1620 sc->sc_flags |= WM_F_EEPROM_INVALID;
1621 }
1622
1623 /* Set device properties (macflags) */
1624 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1625
1626 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1627 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1628 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1629 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1630 } else {
1631 if (sc->sc_flags & WM_F_EEPROM_SPI)
1632 eetype = "SPI";
1633 else
1634 eetype = "MicroWire";
1635 aprint_verbose_dev(sc->sc_dev,
1636 "%u word (%d address bits) %s EEPROM\n",
1637 1U << sc->sc_ee_addrbits,
1638 sc->sc_ee_addrbits, eetype);
1639 }
1640
1641 /*
1642 * Read the Ethernet address from the EEPROM, if not first found
1643 * in device properties.
1644 */
1645 ea = prop_dictionary_get(dict, "mac-address");
1646 if (ea != NULL) {
1647 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1648 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1649 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1650 } else {
1651 if (wm_read_mac_addr(sc, enaddr) != 0) {
1652 aprint_error_dev(sc->sc_dev,
1653 "unable to read Ethernet address\n");
1654 return;
1655 }
1656 }
1657
1658 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1659 ether_sprintf(enaddr));
1660
1661 /*
1662 * Read the config info from the EEPROM, and set up various
1663 * bits in the control registers based on their contents.
1664 */
1665 pn = prop_dictionary_get(dict, "i82543-cfg1");
1666 if (pn != NULL) {
1667 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1668 cfg1 = (uint16_t) prop_number_integer_value(pn);
1669 } else {
1670 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1671 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1672 return;
1673 }
1674 }
1675
1676 pn = prop_dictionary_get(dict, "i82543-cfg2");
1677 if (pn != NULL) {
1678 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1679 cfg2 = (uint16_t) prop_number_integer_value(pn);
1680 } else {
1681 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1682 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1683 return;
1684 }
1685 }
1686
1687 /* check for WM_F_WOL */
1688 switch (sc->sc_type) {
1689 case WM_T_82542_2_0:
1690 case WM_T_82542_2_1:
1691 case WM_T_82543:
1692 /* dummy? */
1693 eeprom_data = 0;
1694 apme_mask = EEPROM_CFG3_APME;
1695 break;
1696 case WM_T_82544:
1697 apme_mask = EEPROM_CFG2_82544_APM_EN;
1698 eeprom_data = cfg2;
1699 break;
1700 case WM_T_82546:
1701 case WM_T_82546_3:
1702 case WM_T_82571:
1703 case WM_T_82572:
1704 case WM_T_82573:
1705 case WM_T_82574:
1706 case WM_T_82583:
1707 case WM_T_80003:
1708 default:
1709 apme_mask = EEPROM_CFG3_APME;
1710 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1711 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1712 break;
1713 case WM_T_82575:
1714 case WM_T_82576:
1715 case WM_T_82580:
1716 case WM_T_82580ER:
1717 case WM_T_I350:
1718 case WM_T_ICH8:
1719 case WM_T_ICH9:
1720 case WM_T_ICH10:
1721 case WM_T_PCH:
1722 case WM_T_PCH2:
1723 /* XXX The funcid should be checked on some devices */
1724 apme_mask = WUC_APME;
1725 eeprom_data = CSR_READ(sc, WMREG_WUC);
1726 break;
1727 }
1728
1729 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1730 if ((eeprom_data & apme_mask) != 0)
1731 sc->sc_flags |= WM_F_WOL;
1732 #ifdef WM_DEBUG
1733 if ((sc->sc_flags & WM_F_WOL) != 0)
1734 printf("WOL\n");
1735 #endif
1736
1737 /*
1738 * XXX need special handling for some multiple port cards
1739 * to disable a paticular port.
1740 */
1741
1742 if (sc->sc_type >= WM_T_82544) {
1743 pn = prop_dictionary_get(dict, "i82543-swdpin");
1744 if (pn != NULL) {
1745 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1746 swdpin = (uint16_t) prop_number_integer_value(pn);
1747 } else {
1748 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1749 aprint_error_dev(sc->sc_dev,
1750 "unable to read SWDPIN\n");
1751 return;
1752 }
1753 }
1754 }
1755
1756 if (cfg1 & EEPROM_CFG1_ILOS)
1757 sc->sc_ctrl |= CTRL_ILOS;
1758 if (sc->sc_type >= WM_T_82544) {
1759 sc->sc_ctrl |=
1760 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1761 CTRL_SWDPIO_SHIFT;
1762 sc->sc_ctrl |=
1763 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1764 CTRL_SWDPINS_SHIFT;
1765 } else {
1766 sc->sc_ctrl |=
1767 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1768 CTRL_SWDPIO_SHIFT;
1769 }
1770
1771 #if 0
1772 if (sc->sc_type >= WM_T_82544) {
1773 if (cfg1 & EEPROM_CFG1_IPS0)
1774 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1775 if (cfg1 & EEPROM_CFG1_IPS1)
1776 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1777 sc->sc_ctrl_ext |=
1778 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1779 CTRL_EXT_SWDPIO_SHIFT;
1780 sc->sc_ctrl_ext |=
1781 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1782 CTRL_EXT_SWDPINS_SHIFT;
1783 } else {
1784 sc->sc_ctrl_ext |=
1785 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1786 CTRL_EXT_SWDPIO_SHIFT;
1787 }
1788 #endif
1789
1790 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1791 #if 0
1792 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1793 #endif
1794
1795 /*
1796 * Set up some register offsets that are different between
1797 * the i82542 and the i82543 and later chips.
1798 */
1799 if (sc->sc_type < WM_T_82543) {
1800 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1801 sc->sc_tdt_reg = WMREG_OLD_TDT;
1802 } else {
1803 sc->sc_rdt_reg = WMREG_RDT;
1804 sc->sc_tdt_reg = WMREG_TDT;
1805 }
1806
1807 if (sc->sc_type == WM_T_PCH) {
1808 uint16_t val;
1809
1810 /* Save the NVM K1 bit setting */
1811 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1812
1813 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1814 sc->sc_nvm_k1_enabled = 1;
1815 else
1816 sc->sc_nvm_k1_enabled = 0;
1817 }
1818
1819 /*
1820 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1821 * media structures accordingly.
1822 */
1823 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1824 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1825 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1826 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1827 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1828 wm_gmii_mediainit(sc, wmp->wmp_product);
1829 } else if (sc->sc_type < WM_T_82543 ||
1830 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1831 if (wmp->wmp_flags & WMP_F_1000T)
1832 aprint_error_dev(sc->sc_dev,
1833 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1834 wm_tbi_mediainit(sc);
1835 } else {
1836 switch (sc->sc_type) {
1837 case WM_T_82575:
1838 case WM_T_82576:
1839 case WM_T_82580:
1840 case WM_T_82580ER:
1841 case WM_T_I350:
1842 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1843 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1844 case CTRL_EXT_LINK_MODE_SGMII:
1845 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1846 sc->sc_flags |= WM_F_SGMII;
1847 CSR_WRITE(sc, WMREG_CTRL_EXT,
1848 reg | CTRL_EXT_I2C_ENA);
1849 wm_gmii_mediainit(sc, wmp->wmp_product);
1850 break;
1851 case CTRL_EXT_LINK_MODE_1000KX:
1852 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1853 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1854 CSR_WRITE(sc, WMREG_CTRL_EXT,
1855 reg | CTRL_EXT_I2C_ENA);
1856 panic("not supported yet\n");
1857 break;
1858 case CTRL_EXT_LINK_MODE_GMII:
1859 default:
1860 CSR_WRITE(sc, WMREG_CTRL_EXT,
1861 reg & ~CTRL_EXT_I2C_ENA);
1862 wm_gmii_mediainit(sc, wmp->wmp_product);
1863 break;
1864 }
1865 break;
1866 default:
1867 if (wmp->wmp_flags & WMP_F_1000X)
1868 aprint_error_dev(sc->sc_dev,
1869 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1870 wm_gmii_mediainit(sc, wmp->wmp_product);
1871 }
1872 }
1873
1874 ifp = &sc->sc_ethercom.ec_if;
1875 xname = device_xname(sc->sc_dev);
1876 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1877 ifp->if_softc = sc;
1878 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1879 ifp->if_ioctl = wm_ioctl;
1880 ifp->if_start = wm_start;
1881 ifp->if_watchdog = wm_watchdog;
1882 ifp->if_init = wm_init;
1883 ifp->if_stop = wm_stop;
1884 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1885 IFQ_SET_READY(&ifp->if_snd);
1886
1887 /* Check for jumbo frame */
1888 switch (sc->sc_type) {
1889 case WM_T_82573:
1890 /* XXX limited to 9234 if ASPM is disabled */
1891 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1892 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1893 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1894 break;
1895 case WM_T_82571:
1896 case WM_T_82572:
1897 case WM_T_82574:
1898 case WM_T_82575:
1899 case WM_T_82576:
1900 case WM_T_82580:
1901 case WM_T_82580ER:
1902 case WM_T_I350:
1903 case WM_T_80003:
1904 case WM_T_ICH9:
1905 case WM_T_ICH10:
1906 case WM_T_PCH2: /* PCH2 supports 9K frame size */
1907 /* XXX limited to 9234 */
1908 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1909 break;
1910 case WM_T_PCH:
1911 /* XXX limited to 4096 */
1912 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1913 break;
1914 case WM_T_82542_2_0:
1915 case WM_T_82542_2_1:
1916 case WM_T_82583:
1917 case WM_T_ICH8:
1918 /* No support for jumbo frame */
1919 break;
1920 default:
1921 /* ETHER_MAX_LEN_JUMBO */
1922 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1923 break;
1924 }
1925
1926 /*
1927 * If we're a i82543 or greater, we can support VLANs.
1928 */
1929 if (sc->sc_type == WM_T_82575 || sc->sc_type == WM_T_82576)
1930 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
1931 else if (sc->sc_type >= WM_T_82543)
1932 sc->sc_ethercom.ec_capabilities |=
1933 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1934
1935 /*
1936 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1937 * on i82543 and later.
1938 */
1939 if (sc->sc_type >= WM_T_82543) {
1940 ifp->if_capabilities |=
1941 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1942 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1943 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1944 IFCAP_CSUM_TCPv6_Tx |
1945 IFCAP_CSUM_UDPv6_Tx;
1946 }
1947
1948 /*
1949 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1950 *
1951 * 82541GI (8086:1076) ... no
1952 * 82572EI (8086:10b9) ... yes
1953 */
1954 if (sc->sc_type >= WM_T_82571) {
1955 ifp->if_capabilities |=
1956 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1957 }
1958
1959 /*
1960 * If we're a i82544 or greater (except i82547), we can do
1961 * TCP segmentation offload.
1962 */
1963 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1964 ifp->if_capabilities |= IFCAP_TSOv4;
1965 }
1966
1967 if (sc->sc_type >= WM_T_82571) {
1968 ifp->if_capabilities |= IFCAP_TSOv6;
1969 }
1970
1971 /*
1972 * Attach the interface.
1973 */
1974 if_attach(ifp);
1975 ether_ifattach(ifp, enaddr);
1976 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1977 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1978
1979 #ifdef WM_EVENT_COUNTERS
1980 /* Attach event counters. */
1981 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1982 NULL, xname, "txsstall");
1983 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1984 NULL, xname, "txdstall");
1985 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1986 NULL, xname, "txfifo_stall");
1987 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1988 NULL, xname, "txdw");
1989 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1990 NULL, xname, "txqe");
1991 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1992 NULL, xname, "rxintr");
1993 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1994 NULL, xname, "linkintr");
1995
1996 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1997 NULL, xname, "rxipsum");
1998 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1999 NULL, xname, "rxtusum");
2000 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2001 NULL, xname, "txipsum");
2002 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2003 NULL, xname, "txtusum");
2004 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2005 NULL, xname, "txtusum6");
2006
2007 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2008 NULL, xname, "txtso");
2009 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2010 NULL, xname, "txtso6");
2011 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2012 NULL, xname, "txtsopain");
2013
2014 for (i = 0; i < WM_NTXSEGS; i++) {
2015 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2016 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2017 NULL, xname, wm_txseg_evcnt_names[i]);
2018 }
2019
2020 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2021 NULL, xname, "txdrop");
2022
2023 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2024 NULL, xname, "tu");
2025
2026 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2027 NULL, xname, "tx_xoff");
2028 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2029 NULL, xname, "tx_xon");
2030 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2031 NULL, xname, "rx_xoff");
2032 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2033 NULL, xname, "rx_xon");
2034 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2035 NULL, xname, "rx_macctl");
2036 #endif /* WM_EVENT_COUNTERS */
2037
2038 if (pmf_device_register(self, wm_suspend, wm_resume))
2039 pmf_class_network_register(self, ifp);
2040 else
2041 aprint_error_dev(self, "couldn't establish power handler\n");
2042
2043 return;
2044
2045 /*
2046 * Free any resources we've allocated during the failed attach
2047 * attempt. Do this in reverse order and fall through.
2048 */
2049 fail_5:
2050 for (i = 0; i < WM_NRXDESC; i++) {
2051 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2052 bus_dmamap_destroy(sc->sc_dmat,
2053 sc->sc_rxsoft[i].rxs_dmamap);
2054 }
2055 fail_4:
2056 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2057 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2058 bus_dmamap_destroy(sc->sc_dmat,
2059 sc->sc_txsoft[i].txs_dmamap);
2060 }
2061 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2062 fail_3:
2063 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2064 fail_2:
2065 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2066 sc->sc_cd_size);
2067 fail_1:
2068 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2069 fail_0:
2070 return;
2071 }
2072
2073 static int
2074 wm_detach(device_t self, int flags __unused)
2075 {
2076 struct wm_softc *sc = device_private(self);
2077 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2078 int i, s;
2079
2080 s = splnet();
2081 /* Stop the interface. Callouts are stopped in it. */
2082 wm_stop(ifp, 1);
2083 splx(s);
2084
2085 pmf_device_deregister(self);
2086
2087 /* Tell the firmware about the release */
2088 wm_release_manageability(sc);
2089 wm_release_hw_control(sc);
2090
2091 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2092
2093 /* Delete all remaining media. */
2094 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2095
2096 ether_ifdetach(ifp);
2097 if_detach(ifp);
2098
2099
2100 /* Unload RX dmamaps and free mbufs */
2101 wm_rxdrain(sc);
2102
2103 /* Free dmamap. It's the same as the end of the wm_attach() function */
2104 for (i = 0; i < WM_NRXDESC; i++) {
2105 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2106 bus_dmamap_destroy(sc->sc_dmat,
2107 sc->sc_rxsoft[i].rxs_dmamap);
2108 }
2109 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2110 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2111 bus_dmamap_destroy(sc->sc_dmat,
2112 sc->sc_txsoft[i].txs_dmamap);
2113 }
2114 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2115 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2116 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2117 sc->sc_cd_size);
2118 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2119
2120 /* Disestablish the interrupt handler */
2121 if (sc->sc_ih != NULL) {
2122 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2123 sc->sc_ih = NULL;
2124 }
2125
2126 /* Unmap the registers */
2127 if (sc->sc_ss) {
2128 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2129 sc->sc_ss = 0;
2130 }
2131
2132 if (sc->sc_ios) {
2133 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2134 sc->sc_ios = 0;
2135 }
2136
2137 return 0;
2138 }
2139
2140 /*
2141 * wm_tx_offload:
2142 *
2143 * Set up TCP/IP checksumming parameters for the
2144 * specified packet.
2145 */
2146 static int
2147 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2148 uint8_t *fieldsp)
2149 {
2150 struct mbuf *m0 = txs->txs_mbuf;
2151 struct livengood_tcpip_ctxdesc *t;
2152 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2153 uint32_t ipcse;
2154 struct ether_header *eh;
2155 int offset, iphl;
2156 uint8_t fields;
2157
2158 /*
2159 * XXX It would be nice if the mbuf pkthdr had offset
2160 * fields for the protocol headers.
2161 */
2162
2163 eh = mtod(m0, struct ether_header *);
2164 switch (htons(eh->ether_type)) {
2165 case ETHERTYPE_IP:
2166 case ETHERTYPE_IPV6:
2167 offset = ETHER_HDR_LEN;
2168 break;
2169
2170 case ETHERTYPE_VLAN:
2171 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2172 break;
2173
2174 default:
2175 /*
2176 * Don't support this protocol or encapsulation.
2177 */
2178 *fieldsp = 0;
2179 *cmdp = 0;
2180 return 0;
2181 }
2182
2183 if ((m0->m_pkthdr.csum_flags &
2184 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2185 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2186 } else {
2187 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2188 }
2189 ipcse = offset + iphl - 1;
2190
2191 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2192 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2193 seg = 0;
2194 fields = 0;
2195
2196 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2197 int hlen = offset + iphl;
2198 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2199
2200 if (__predict_false(m0->m_len <
2201 (hlen + sizeof(struct tcphdr)))) {
2202 /*
2203 * TCP/IP headers are not in the first mbuf; we need
2204 * to do this the slow and painful way. Let's just
2205 * hope this doesn't happen very often.
2206 */
2207 struct tcphdr th;
2208
2209 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2210
2211 m_copydata(m0, hlen, sizeof(th), &th);
2212 if (v4) {
2213 struct ip ip;
2214
2215 m_copydata(m0, offset, sizeof(ip), &ip);
2216 ip.ip_len = 0;
2217 m_copyback(m0,
2218 offset + offsetof(struct ip, ip_len),
2219 sizeof(ip.ip_len), &ip.ip_len);
2220 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2221 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2222 } else {
2223 struct ip6_hdr ip6;
2224
2225 m_copydata(m0, offset, sizeof(ip6), &ip6);
2226 ip6.ip6_plen = 0;
2227 m_copyback(m0,
2228 offset + offsetof(struct ip6_hdr, ip6_plen),
2229 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2230 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2231 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2232 }
2233 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2234 sizeof(th.th_sum), &th.th_sum);
2235
2236 hlen += th.th_off << 2;
2237 } else {
2238 /*
2239 * TCP/IP headers are in the first mbuf; we can do
2240 * this the easy way.
2241 */
2242 struct tcphdr *th;
2243
2244 if (v4) {
2245 struct ip *ip =
2246 (void *)(mtod(m0, char *) + offset);
2247 th = (void *)(mtod(m0, char *) + hlen);
2248
2249 ip->ip_len = 0;
2250 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2251 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2252 } else {
2253 struct ip6_hdr *ip6 =
2254 (void *)(mtod(m0, char *) + offset);
2255 th = (void *)(mtod(m0, char *) + hlen);
2256
2257 ip6->ip6_plen = 0;
2258 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2259 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2260 }
2261 hlen += th->th_off << 2;
2262 }
2263
2264 if (v4) {
2265 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2266 cmdlen |= WTX_TCPIP_CMD_IP;
2267 } else {
2268 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2269 ipcse = 0;
2270 }
2271 cmd |= WTX_TCPIP_CMD_TSE;
2272 cmdlen |= WTX_TCPIP_CMD_TSE |
2273 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2274 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2275 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2276 }
2277
2278 /*
2279 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2280 * offload feature, if we load the context descriptor, we
2281 * MUST provide valid values for IPCSS and TUCSS fields.
2282 */
2283
2284 ipcs = WTX_TCPIP_IPCSS(offset) |
2285 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2286 WTX_TCPIP_IPCSE(ipcse);
2287 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2288 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2289 fields |= WTX_IXSM;
2290 }
2291
2292 offset += iphl;
2293
2294 if (m0->m_pkthdr.csum_flags &
2295 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2296 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2297 fields |= WTX_TXSM;
2298 tucs = WTX_TCPIP_TUCSS(offset) |
2299 WTX_TCPIP_TUCSO(offset +
2300 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2301 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2302 } else if ((m0->m_pkthdr.csum_flags &
2303 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2304 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2305 fields |= WTX_TXSM;
2306 tucs = WTX_TCPIP_TUCSS(offset) |
2307 WTX_TCPIP_TUCSO(offset +
2308 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2309 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2310 } else {
2311 /* Just initialize it to a valid TCP context. */
2312 tucs = WTX_TCPIP_TUCSS(offset) |
2313 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2314 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2315 }
2316
2317 /* Fill in the context descriptor. */
2318 t = (struct livengood_tcpip_ctxdesc *)
2319 &sc->sc_txdescs[sc->sc_txnext];
2320 t->tcpip_ipcs = htole32(ipcs);
2321 t->tcpip_tucs = htole32(tucs);
2322 t->tcpip_cmdlen = htole32(cmdlen);
2323 t->tcpip_seg = htole32(seg);
2324 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2325
2326 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2327 txs->txs_ndesc++;
2328
2329 *cmdp = cmd;
2330 *fieldsp = fields;
2331
2332 return 0;
2333 }
2334
2335 static void
2336 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2337 {
2338 struct mbuf *m;
2339 int i;
2340
2341 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2342 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2343 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2344 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2345 m->m_data, m->m_len, m->m_flags);
2346 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2347 i, i == 1 ? "" : "s");
2348 }
2349
2350 /*
2351 * wm_82547_txfifo_stall:
2352 *
2353 * Callout used to wait for the 82547 Tx FIFO to drain,
2354 * reset the FIFO pointers, and restart packet transmission.
2355 */
2356 static void
2357 wm_82547_txfifo_stall(void *arg)
2358 {
2359 struct wm_softc *sc = arg;
2360 int s;
2361
2362 s = splnet();
2363
2364 if (sc->sc_txfifo_stall) {
2365 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2366 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2367 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2368 /*
2369 * Packets have drained. Stop transmitter, reset
2370 * FIFO pointers, restart transmitter, and kick
2371 * the packet queue.
2372 */
2373 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2374 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2375 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2376 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2377 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2378 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2379 CSR_WRITE(sc, WMREG_TCTL, tctl);
2380 CSR_WRITE_FLUSH(sc);
2381
2382 sc->sc_txfifo_head = 0;
2383 sc->sc_txfifo_stall = 0;
2384 wm_start(&sc->sc_ethercom.ec_if);
2385 } else {
2386 /*
2387 * Still waiting for packets to drain; try again in
2388 * another tick.
2389 */
2390 callout_schedule(&sc->sc_txfifo_ch, 1);
2391 }
2392 }
2393
2394 splx(s);
2395 }
2396
2397 static void
2398 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2399 {
2400 uint32_t reg;
2401
2402 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2403
2404 if (on != 0)
2405 reg |= EXTCNFCTR_GATE_PHY_CFG;
2406 else
2407 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2408
2409 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2410 }
2411
2412 /*
2413 * wm_82547_txfifo_bugchk:
2414 *
2415 * Check for bug condition in the 82547 Tx FIFO. We need to
2416 * prevent enqueueing a packet that would wrap around the end
2417 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2418 *
2419 * We do this by checking the amount of space before the end
2420 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2421 * the Tx FIFO, wait for all remaining packets to drain, reset
2422 * the internal FIFO pointers to the beginning, and restart
2423 * transmission on the interface.
2424 */
2425 #define WM_FIFO_HDR 0x10
2426 #define WM_82547_PAD_LEN 0x3e0
2427 static int
2428 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2429 {
2430 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2431 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2432
2433 /* Just return if already stalled. */
2434 if (sc->sc_txfifo_stall)
2435 return 1;
2436
2437 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2438 /* Stall only occurs in half-duplex mode. */
2439 goto send_packet;
2440 }
2441
2442 if (len >= WM_82547_PAD_LEN + space) {
2443 sc->sc_txfifo_stall = 1;
2444 callout_schedule(&sc->sc_txfifo_ch, 1);
2445 return 1;
2446 }
2447
2448 send_packet:
2449 sc->sc_txfifo_head += len;
2450 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2451 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2452
2453 return 0;
2454 }
2455
2456 /*
2457 * wm_start: [ifnet interface function]
2458 *
2459 * Start packet transmission on the interface.
2460 */
2461 static void
2462 wm_start(struct ifnet *ifp)
2463 {
2464 struct wm_softc *sc = ifp->if_softc;
2465 struct mbuf *m0;
2466 struct m_tag *mtag;
2467 struct wm_txsoft *txs;
2468 bus_dmamap_t dmamap;
2469 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2470 bus_addr_t curaddr;
2471 bus_size_t seglen, curlen;
2472 uint32_t cksumcmd;
2473 uint8_t cksumfields;
2474
2475 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2476 return;
2477
2478 /*
2479 * Remember the previous number of free descriptors.
2480 */
2481 ofree = sc->sc_txfree;
2482
2483 /*
2484 * Loop through the send queue, setting up transmit descriptors
2485 * until we drain the queue, or use up all available transmit
2486 * descriptors.
2487 */
2488 for (;;) {
2489 /* Grab a packet off the queue. */
2490 IFQ_POLL(&ifp->if_snd, m0);
2491 if (m0 == NULL)
2492 break;
2493
2494 DPRINTF(WM_DEBUG_TX,
2495 ("%s: TX: have packet to transmit: %p\n",
2496 device_xname(sc->sc_dev), m0));
2497
2498 /* Get a work queue entry. */
2499 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2500 wm_txintr(sc);
2501 if (sc->sc_txsfree == 0) {
2502 DPRINTF(WM_DEBUG_TX,
2503 ("%s: TX: no free job descriptors\n",
2504 device_xname(sc->sc_dev)));
2505 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2506 break;
2507 }
2508 }
2509
2510 txs = &sc->sc_txsoft[sc->sc_txsnext];
2511 dmamap = txs->txs_dmamap;
2512
2513 use_tso = (m0->m_pkthdr.csum_flags &
2514 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2515
2516 /*
2517 * So says the Linux driver:
2518 * The controller does a simple calculation to make sure
2519 * there is enough room in the FIFO before initiating the
2520 * DMA for each buffer. The calc is:
2521 * 4 = ceil(buffer len / MSS)
2522 * To make sure we don't overrun the FIFO, adjust the max
2523 * buffer len if the MSS drops.
2524 */
2525 dmamap->dm_maxsegsz =
2526 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2527 ? m0->m_pkthdr.segsz << 2
2528 : WTX_MAX_LEN;
2529
2530 /*
2531 * Load the DMA map. If this fails, the packet either
2532 * didn't fit in the allotted number of segments, or we
2533 * were short on resources. For the too-many-segments
2534 * case, we simply report an error and drop the packet,
2535 * since we can't sanely copy a jumbo packet to a single
2536 * buffer.
2537 */
2538 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2539 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2540 if (error) {
2541 if (error == EFBIG) {
2542 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2543 log(LOG_ERR, "%s: Tx packet consumes too many "
2544 "DMA segments, dropping...\n",
2545 device_xname(sc->sc_dev));
2546 IFQ_DEQUEUE(&ifp->if_snd, m0);
2547 wm_dump_mbuf_chain(sc, m0);
2548 m_freem(m0);
2549 continue;
2550 }
2551 /*
2552 * Short on resources, just stop for now.
2553 */
2554 DPRINTF(WM_DEBUG_TX,
2555 ("%s: TX: dmamap load failed: %d\n",
2556 device_xname(sc->sc_dev), error));
2557 break;
2558 }
2559
2560 segs_needed = dmamap->dm_nsegs;
2561 if (use_tso) {
2562 /* For sentinel descriptor; see below. */
2563 segs_needed++;
2564 }
2565
2566 /*
2567 * Ensure we have enough descriptors free to describe
2568 * the packet. Note, we always reserve one descriptor
2569 * at the end of the ring due to the semantics of the
2570 * TDT register, plus one more in the event we need
2571 * to load offload context.
2572 */
2573 if (segs_needed > sc->sc_txfree - 2) {
2574 /*
2575 * Not enough free descriptors to transmit this
2576 * packet. We haven't committed anything yet,
2577 * so just unload the DMA map, put the packet
2578 * pack on the queue, and punt. Notify the upper
2579 * layer that there are no more slots left.
2580 */
2581 DPRINTF(WM_DEBUG_TX,
2582 ("%s: TX: need %d (%d) descriptors, have %d\n",
2583 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2584 segs_needed, sc->sc_txfree - 1));
2585 ifp->if_flags |= IFF_OACTIVE;
2586 bus_dmamap_unload(sc->sc_dmat, dmamap);
2587 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2588 break;
2589 }
2590
2591 /*
2592 * Check for 82547 Tx FIFO bug. We need to do this
2593 * once we know we can transmit the packet, since we
2594 * do some internal FIFO space accounting here.
2595 */
2596 if (sc->sc_type == WM_T_82547 &&
2597 wm_82547_txfifo_bugchk(sc, m0)) {
2598 DPRINTF(WM_DEBUG_TX,
2599 ("%s: TX: 82547 Tx FIFO bug detected\n",
2600 device_xname(sc->sc_dev)));
2601 ifp->if_flags |= IFF_OACTIVE;
2602 bus_dmamap_unload(sc->sc_dmat, dmamap);
2603 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2604 break;
2605 }
2606
2607 IFQ_DEQUEUE(&ifp->if_snd, m0);
2608
2609 /*
2610 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2611 */
2612
2613 DPRINTF(WM_DEBUG_TX,
2614 ("%s: TX: packet has %d (%d) DMA segments\n",
2615 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2616
2617 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2618
2619 /*
2620 * Store a pointer to the packet so that we can free it
2621 * later.
2622 *
2623 * Initially, we consider the number of descriptors the
2624 * packet uses the number of DMA segments. This may be
2625 * incremented by 1 if we do checksum offload (a descriptor
2626 * is used to set the checksum context).
2627 */
2628 txs->txs_mbuf = m0;
2629 txs->txs_firstdesc = sc->sc_txnext;
2630 txs->txs_ndesc = segs_needed;
2631
2632 /* Set up offload parameters for this packet. */
2633 if (m0->m_pkthdr.csum_flags &
2634 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2635 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2636 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2637 if (wm_tx_offload(sc, txs, &cksumcmd,
2638 &cksumfields) != 0) {
2639 /* Error message already displayed. */
2640 bus_dmamap_unload(sc->sc_dmat, dmamap);
2641 continue;
2642 }
2643 } else {
2644 cksumcmd = 0;
2645 cksumfields = 0;
2646 }
2647
2648 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2649
2650 /* Sync the DMA map. */
2651 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2652 BUS_DMASYNC_PREWRITE);
2653
2654 /*
2655 * Initialize the transmit descriptor.
2656 */
2657 for (nexttx = sc->sc_txnext, seg = 0;
2658 seg < dmamap->dm_nsegs; seg++) {
2659 for (seglen = dmamap->dm_segs[seg].ds_len,
2660 curaddr = dmamap->dm_segs[seg].ds_addr;
2661 seglen != 0;
2662 curaddr += curlen, seglen -= curlen,
2663 nexttx = WM_NEXTTX(sc, nexttx)) {
2664 curlen = seglen;
2665
2666 /*
2667 * So says the Linux driver:
2668 * Work around for premature descriptor
2669 * write-backs in TSO mode. Append a
2670 * 4-byte sentinel descriptor.
2671 */
2672 if (use_tso &&
2673 seg == dmamap->dm_nsegs - 1 &&
2674 curlen > 8)
2675 curlen -= 4;
2676
2677 wm_set_dma_addr(
2678 &sc->sc_txdescs[nexttx].wtx_addr,
2679 curaddr);
2680 sc->sc_txdescs[nexttx].wtx_cmdlen =
2681 htole32(cksumcmd | curlen);
2682 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2683 0;
2684 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2685 cksumfields;
2686 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2687 lasttx = nexttx;
2688
2689 DPRINTF(WM_DEBUG_TX,
2690 ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2691 "len %#04zx\n",
2692 device_xname(sc->sc_dev), nexttx,
2693 curaddr & 0xffffffffUL, curlen));
2694 }
2695 }
2696
2697 KASSERT(lasttx != -1);
2698
2699 /*
2700 * Set up the command byte on the last descriptor of
2701 * the packet. If we're in the interrupt delay window,
2702 * delay the interrupt.
2703 */
2704 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2705 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2706
2707 /*
2708 * If VLANs are enabled and the packet has a VLAN tag, set
2709 * up the descriptor to encapsulate the packet for us.
2710 *
2711 * This is only valid on the last descriptor of the packet.
2712 */
2713 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2714 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2715 htole32(WTX_CMD_VLE);
2716 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2717 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2718 }
2719
2720 txs->txs_lastdesc = lasttx;
2721
2722 DPRINTF(WM_DEBUG_TX,
2723 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2724 device_xname(sc->sc_dev),
2725 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2726
2727 /* Sync the descriptors we're using. */
2728 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2729 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2730
2731 /* Give the packet to the chip. */
2732 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2733
2734 DPRINTF(WM_DEBUG_TX,
2735 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2736
2737 DPRINTF(WM_DEBUG_TX,
2738 ("%s: TX: finished transmitting packet, job %d\n",
2739 device_xname(sc->sc_dev), sc->sc_txsnext));
2740
2741 /* Advance the tx pointer. */
2742 sc->sc_txfree -= txs->txs_ndesc;
2743 sc->sc_txnext = nexttx;
2744
2745 sc->sc_txsfree--;
2746 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2747
2748 /* Pass the packet to any BPF listeners. */
2749 bpf_mtap(ifp, m0);
2750 }
2751
2752 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2753 /* No more slots; notify upper layer. */
2754 ifp->if_flags |= IFF_OACTIVE;
2755 }
2756
2757 if (sc->sc_txfree != ofree) {
2758 /* Set a watchdog timer in case the chip flakes out. */
2759 ifp->if_timer = 5;
2760 }
2761 }
2762
2763 /*
2764 * wm_watchdog: [ifnet interface function]
2765 *
2766 * Watchdog timer handler.
2767 */
2768 static void
2769 wm_watchdog(struct ifnet *ifp)
2770 {
2771 struct wm_softc *sc = ifp->if_softc;
2772
2773 /*
2774 * Since we're using delayed interrupts, sweep up
2775 * before we report an error.
2776 */
2777 wm_txintr(sc);
2778
2779 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2780 log(LOG_ERR,
2781 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2782 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2783 sc->sc_txnext);
2784 ifp->if_oerrors++;
2785
2786 /* Reset the interface. */
2787 (void) wm_init(ifp);
2788 }
2789
2790 /* Try to get more packets going. */
2791 wm_start(ifp);
2792 }
2793
2794 static int
2795 wm_ifflags_cb(struct ethercom *ec)
2796 {
2797 struct ifnet *ifp = &ec->ec_if;
2798 struct wm_softc *sc = ifp->if_softc;
2799 int change = ifp->if_flags ^ sc->sc_if_flags;
2800
2801 if (change != 0)
2802 sc->sc_if_flags = ifp->if_flags;
2803
2804 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2805 return ENETRESET;
2806
2807 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2808 wm_set_filter(sc);
2809
2810 wm_set_vlan(sc);
2811
2812 return 0;
2813 }
2814
2815 /*
2816 * wm_ioctl: [ifnet interface function]
2817 *
2818 * Handle control requests from the operator.
2819 */
2820 static int
2821 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2822 {
2823 struct wm_softc *sc = ifp->if_softc;
2824 struct ifreq *ifr = (struct ifreq *) data;
2825 struct ifaddr *ifa = (struct ifaddr *)data;
2826 struct sockaddr_dl *sdl;
2827 int s, error;
2828
2829 s = splnet();
2830
2831 switch (cmd) {
2832 case SIOCSIFMEDIA:
2833 case SIOCGIFMEDIA:
2834 /* Flow control requires full-duplex mode. */
2835 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2836 (ifr->ifr_media & IFM_FDX) == 0)
2837 ifr->ifr_media &= ~IFM_ETH_FMASK;
2838 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2839 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2840 /* We can do both TXPAUSE and RXPAUSE. */
2841 ifr->ifr_media |=
2842 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2843 }
2844 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2845 }
2846 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2847 break;
2848 case SIOCINITIFADDR:
2849 if (ifa->ifa_addr->sa_family == AF_LINK) {
2850 sdl = satosdl(ifp->if_dl->ifa_addr);
2851 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2852 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2853 /* unicast address is first multicast entry */
2854 wm_set_filter(sc);
2855 error = 0;
2856 break;
2857 }
2858 /*FALLTHROUGH*/
2859 default:
2860 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2861 break;
2862
2863 error = 0;
2864
2865 if (cmd == SIOCSIFCAP)
2866 error = (*ifp->if_init)(ifp);
2867 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2868 ;
2869 else if (ifp->if_flags & IFF_RUNNING) {
2870 /*
2871 * Multicast list has changed; set the hardware filter
2872 * accordingly.
2873 */
2874 wm_set_filter(sc);
2875 }
2876 break;
2877 }
2878
2879 /* Try to get more packets going. */
2880 wm_start(ifp);
2881
2882 splx(s);
2883 return error;
2884 }
2885
2886 /*
2887 * wm_intr:
2888 *
2889 * Interrupt service routine.
2890 */
2891 static int
2892 wm_intr(void *arg)
2893 {
2894 struct wm_softc *sc = arg;
2895 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2896 uint32_t icr;
2897 int handled = 0;
2898
2899 while (1 /* CONSTCOND */) {
2900 icr = CSR_READ(sc, WMREG_ICR);
2901 if ((icr & sc->sc_icr) == 0)
2902 break;
2903 rnd_add_uint32(&sc->rnd_source, icr);
2904
2905 handled = 1;
2906
2907 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2908 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2909 DPRINTF(WM_DEBUG_RX,
2910 ("%s: RX: got Rx intr 0x%08x\n",
2911 device_xname(sc->sc_dev),
2912 icr & (ICR_RXDMT0|ICR_RXT0)));
2913 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2914 }
2915 #endif
2916 wm_rxintr(sc);
2917
2918 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2919 if (icr & ICR_TXDW) {
2920 DPRINTF(WM_DEBUG_TX,
2921 ("%s: TX: got TXDW interrupt\n",
2922 device_xname(sc->sc_dev)));
2923 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2924 }
2925 #endif
2926 wm_txintr(sc);
2927
2928 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2929 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2930 wm_linkintr(sc, icr);
2931 }
2932
2933 if (icr & ICR_RXO) {
2934 #if defined(WM_DEBUG)
2935 log(LOG_WARNING, "%s: Receive overrun\n",
2936 device_xname(sc->sc_dev));
2937 #endif /* defined(WM_DEBUG) */
2938 }
2939 }
2940
2941 if (handled) {
2942 /* Try to get more packets going. */
2943 wm_start(ifp);
2944 }
2945
2946 return handled;
2947 }
2948
2949 /*
2950 * wm_txintr:
2951 *
2952 * Helper; handle transmit interrupts.
2953 */
2954 static void
2955 wm_txintr(struct wm_softc *sc)
2956 {
2957 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2958 struct wm_txsoft *txs;
2959 uint8_t status;
2960 int i;
2961
2962 ifp->if_flags &= ~IFF_OACTIVE;
2963
2964 /*
2965 * Go through the Tx list and free mbufs for those
2966 * frames which have been transmitted.
2967 */
2968 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2969 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2970 txs = &sc->sc_txsoft[i];
2971
2972 DPRINTF(WM_DEBUG_TX,
2973 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2974
2975 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2976 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2977
2978 status =
2979 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2980 if ((status & WTX_ST_DD) == 0) {
2981 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2982 BUS_DMASYNC_PREREAD);
2983 break;
2984 }
2985
2986 DPRINTF(WM_DEBUG_TX,
2987 ("%s: TX: job %d done: descs %d..%d\n",
2988 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2989 txs->txs_lastdesc));
2990
2991 /*
2992 * XXX We should probably be using the statistics
2993 * XXX registers, but I don't know if they exist
2994 * XXX on chips before the i82544.
2995 */
2996
2997 #ifdef WM_EVENT_COUNTERS
2998 if (status & WTX_ST_TU)
2999 WM_EVCNT_INCR(&sc->sc_ev_tu);
3000 #endif /* WM_EVENT_COUNTERS */
3001
3002 if (status & (WTX_ST_EC|WTX_ST_LC)) {
3003 ifp->if_oerrors++;
3004 if (status & WTX_ST_LC)
3005 log(LOG_WARNING, "%s: late collision\n",
3006 device_xname(sc->sc_dev));
3007 else if (status & WTX_ST_EC) {
3008 ifp->if_collisions += 16;
3009 log(LOG_WARNING, "%s: excessive collisions\n",
3010 device_xname(sc->sc_dev));
3011 }
3012 } else
3013 ifp->if_opackets++;
3014
3015 sc->sc_txfree += txs->txs_ndesc;
3016 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3017 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3018 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3019 m_freem(txs->txs_mbuf);
3020 txs->txs_mbuf = NULL;
3021 }
3022
3023 /* Update the dirty transmit buffer pointer. */
3024 sc->sc_txsdirty = i;
3025 DPRINTF(WM_DEBUG_TX,
3026 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3027
3028 /*
3029 * If there are no more pending transmissions, cancel the watchdog
3030 * timer.
3031 */
3032 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3033 ifp->if_timer = 0;
3034 }
3035
3036 /*
3037 * wm_rxintr:
3038 *
3039 * Helper; handle receive interrupts.
3040 */
3041 static void
3042 wm_rxintr(struct wm_softc *sc)
3043 {
3044 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3045 struct wm_rxsoft *rxs;
3046 struct mbuf *m;
3047 int i, len;
3048 uint8_t status, errors;
3049 uint16_t vlantag;
3050
3051 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3052 rxs = &sc->sc_rxsoft[i];
3053
3054 DPRINTF(WM_DEBUG_RX,
3055 ("%s: RX: checking descriptor %d\n",
3056 device_xname(sc->sc_dev), i));
3057
3058 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3059
3060 status = sc->sc_rxdescs[i].wrx_status;
3061 errors = sc->sc_rxdescs[i].wrx_errors;
3062 len = le16toh(sc->sc_rxdescs[i].wrx_len);
3063 vlantag = sc->sc_rxdescs[i].wrx_special;
3064
3065 if ((status & WRX_ST_DD) == 0) {
3066 /*
3067 * We have processed all of the receive descriptors.
3068 */
3069 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3070 break;
3071 }
3072
3073 if (__predict_false(sc->sc_rxdiscard)) {
3074 DPRINTF(WM_DEBUG_RX,
3075 ("%s: RX: discarding contents of descriptor %d\n",
3076 device_xname(sc->sc_dev), i));
3077 WM_INIT_RXDESC(sc, i);
3078 if (status & WRX_ST_EOP) {
3079 /* Reset our state. */
3080 DPRINTF(WM_DEBUG_RX,
3081 ("%s: RX: resetting rxdiscard -> 0\n",
3082 device_xname(sc->sc_dev)));
3083 sc->sc_rxdiscard = 0;
3084 }
3085 continue;
3086 }
3087
3088 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3089 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3090
3091 m = rxs->rxs_mbuf;
3092
3093 /*
3094 * Add a new receive buffer to the ring, unless of
3095 * course the length is zero. Treat the latter as a
3096 * failed mapping.
3097 */
3098 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3099 /*
3100 * Failed, throw away what we've done so
3101 * far, and discard the rest of the packet.
3102 */
3103 ifp->if_ierrors++;
3104 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3105 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3106 WM_INIT_RXDESC(sc, i);
3107 if ((status & WRX_ST_EOP) == 0)
3108 sc->sc_rxdiscard = 1;
3109 if (sc->sc_rxhead != NULL)
3110 m_freem(sc->sc_rxhead);
3111 WM_RXCHAIN_RESET(sc);
3112 DPRINTF(WM_DEBUG_RX,
3113 ("%s: RX: Rx buffer allocation failed, "
3114 "dropping packet%s\n", device_xname(sc->sc_dev),
3115 sc->sc_rxdiscard ? " (discard)" : ""));
3116 continue;
3117 }
3118
3119 m->m_len = len;
3120 sc->sc_rxlen += len;
3121 DPRINTF(WM_DEBUG_RX,
3122 ("%s: RX: buffer at %p len %d\n",
3123 device_xname(sc->sc_dev), m->m_data, len));
3124
3125 /*
3126 * If this is not the end of the packet, keep
3127 * looking.
3128 */
3129 if ((status & WRX_ST_EOP) == 0) {
3130 WM_RXCHAIN_LINK(sc, m);
3131 DPRINTF(WM_DEBUG_RX,
3132 ("%s: RX: not yet EOP, rxlen -> %d\n",
3133 device_xname(sc->sc_dev), sc->sc_rxlen));
3134 continue;
3135 }
3136
3137 /*
3138 * Okay, we have the entire packet now. The chip is
3139 * configured to include the FCS except I350
3140 * (not all chips can be configured to strip it),
3141 * so we need to trim it.
3142 * May need to adjust length of previous mbuf in the
3143 * chain if the current mbuf is too short.
3144 * For an eratta, the RCTL_SECRC bit in RCTL register
3145 * is always set in I350, so we don't trim it.
3146 */
3147 if (sc->sc_type != WM_T_I350) {
3148 if (m->m_len < ETHER_CRC_LEN) {
3149 sc->sc_rxtail->m_len
3150 -= (ETHER_CRC_LEN - m->m_len);
3151 m->m_len = 0;
3152 } else
3153 m->m_len -= ETHER_CRC_LEN;
3154 len = sc->sc_rxlen - ETHER_CRC_LEN;
3155 } else
3156 len = sc->sc_rxlen;
3157
3158 WM_RXCHAIN_LINK(sc, m);
3159
3160 *sc->sc_rxtailp = NULL;
3161 m = sc->sc_rxhead;
3162
3163 WM_RXCHAIN_RESET(sc);
3164
3165 DPRINTF(WM_DEBUG_RX,
3166 ("%s: RX: have entire packet, len -> %d\n",
3167 device_xname(sc->sc_dev), len));
3168
3169 /*
3170 * If an error occurred, update stats and drop the packet.
3171 */
3172 if (errors &
3173 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3174 if (errors & WRX_ER_SE)
3175 log(LOG_WARNING, "%s: symbol error\n",
3176 device_xname(sc->sc_dev));
3177 else if (errors & WRX_ER_SEQ)
3178 log(LOG_WARNING, "%s: receive sequence error\n",
3179 device_xname(sc->sc_dev));
3180 else if (errors & WRX_ER_CE)
3181 log(LOG_WARNING, "%s: CRC error\n",
3182 device_xname(sc->sc_dev));
3183 m_freem(m);
3184 continue;
3185 }
3186
3187 /*
3188 * No errors. Receive the packet.
3189 */
3190 m->m_pkthdr.rcvif = ifp;
3191 m->m_pkthdr.len = len;
3192
3193 /*
3194 * If VLANs are enabled, VLAN packets have been unwrapped
3195 * for us. Associate the tag with the packet.
3196 */
3197 if ((status & WRX_ST_VP) != 0) {
3198 VLAN_INPUT_TAG(ifp, m,
3199 le16toh(vlantag),
3200 continue);
3201 }
3202
3203 /*
3204 * Set up checksum info for this packet.
3205 */
3206 if ((status & WRX_ST_IXSM) == 0) {
3207 if (status & WRX_ST_IPCS) {
3208 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3209 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3210 if (errors & WRX_ER_IPE)
3211 m->m_pkthdr.csum_flags |=
3212 M_CSUM_IPv4_BAD;
3213 }
3214 if (status & WRX_ST_TCPCS) {
3215 /*
3216 * Note: we don't know if this was TCP or UDP,
3217 * so we just set both bits, and expect the
3218 * upper layers to deal.
3219 */
3220 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3221 m->m_pkthdr.csum_flags |=
3222 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3223 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3224 if (errors & WRX_ER_TCPE)
3225 m->m_pkthdr.csum_flags |=
3226 M_CSUM_TCP_UDP_BAD;
3227 }
3228 }
3229
3230 ifp->if_ipackets++;
3231
3232 /* Pass this up to any BPF listeners. */
3233 bpf_mtap(ifp, m);
3234
3235 /* Pass it on. */
3236 (*ifp->if_input)(ifp, m);
3237 }
3238
3239 /* Update the receive pointer. */
3240 sc->sc_rxptr = i;
3241
3242 DPRINTF(WM_DEBUG_RX,
3243 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3244 }
3245
3246 /*
3247 * wm_linkintr_gmii:
3248 *
3249 * Helper; handle link interrupts for GMII.
3250 */
3251 static void
3252 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3253 {
3254
3255 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3256 __func__));
3257
3258 if (icr & ICR_LSC) {
3259 DPRINTF(WM_DEBUG_LINK,
3260 ("%s: LINK: LSC -> mii_tick\n",
3261 device_xname(sc->sc_dev)));
3262 mii_tick(&sc->sc_mii);
3263 if (sc->sc_type == WM_T_82543) {
3264 int miistatus, active;
3265
3266 /*
3267 * With 82543, we need to force speed and
3268 * duplex on the MAC equal to what the PHY
3269 * speed and duplex configuration is.
3270 */
3271 miistatus = sc->sc_mii.mii_media_status;
3272
3273 if (miistatus & IFM_ACTIVE) {
3274 active = sc->sc_mii.mii_media_active;
3275 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3276 switch (IFM_SUBTYPE(active)) {
3277 case IFM_10_T:
3278 sc->sc_ctrl |= CTRL_SPEED_10;
3279 break;
3280 case IFM_100_TX:
3281 sc->sc_ctrl |= CTRL_SPEED_100;
3282 break;
3283 case IFM_1000_T:
3284 sc->sc_ctrl |= CTRL_SPEED_1000;
3285 break;
3286 default:
3287 /*
3288 * fiber?
3289 * Shoud not enter here.
3290 */
3291 printf("unknown media (%x)\n",
3292 active);
3293 break;
3294 }
3295 if (active & IFM_FDX)
3296 sc->sc_ctrl |= CTRL_FD;
3297 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3298 }
3299 } else if ((sc->sc_type == WM_T_ICH8)
3300 && (sc->sc_phytype == WMPHY_IGP_3)) {
3301 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3302 } else if (sc->sc_type == WM_T_PCH) {
3303 wm_k1_gig_workaround_hv(sc,
3304 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3305 }
3306
3307 if ((sc->sc_phytype == WMPHY_82578)
3308 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3309 == IFM_1000_T)) {
3310
3311 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3312 delay(200*1000); /* XXX too big */
3313
3314 /* Link stall fix for link up */
3315 wm_gmii_hv_writereg(sc->sc_dev, 1,
3316 HV_MUX_DATA_CTRL,
3317 HV_MUX_DATA_CTRL_GEN_TO_MAC
3318 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3319 wm_gmii_hv_writereg(sc->sc_dev, 1,
3320 HV_MUX_DATA_CTRL,
3321 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3322 }
3323 }
3324 } else if (icr & ICR_RXSEQ) {
3325 DPRINTF(WM_DEBUG_LINK,
3326 ("%s: LINK Receive sequence error\n",
3327 device_xname(sc->sc_dev)));
3328 }
3329 }
3330
3331 /*
3332 * wm_linkintr_tbi:
3333 *
3334 * Helper; handle link interrupts for TBI mode.
3335 */
3336 static void
3337 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3338 {
3339 uint32_t status;
3340
3341 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3342 __func__));
3343
3344 status = CSR_READ(sc, WMREG_STATUS);
3345 if (icr & ICR_LSC) {
3346 if (status & STATUS_LU) {
3347 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3348 device_xname(sc->sc_dev),
3349 (status & STATUS_FD) ? "FDX" : "HDX"));
3350 /*
3351 * NOTE: CTRL will update TFCE and RFCE automatically,
3352 * so we should update sc->sc_ctrl
3353 */
3354
3355 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3356 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3357 sc->sc_fcrtl &= ~FCRTL_XONE;
3358 if (status & STATUS_FD)
3359 sc->sc_tctl |=
3360 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3361 else
3362 sc->sc_tctl |=
3363 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3364 if (sc->sc_ctrl & CTRL_TFCE)
3365 sc->sc_fcrtl |= FCRTL_XONE;
3366 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3367 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3368 WMREG_OLD_FCRTL : WMREG_FCRTL,
3369 sc->sc_fcrtl);
3370 sc->sc_tbi_linkup = 1;
3371 } else {
3372 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3373 device_xname(sc->sc_dev)));
3374 sc->sc_tbi_linkup = 0;
3375 }
3376 wm_tbi_set_linkled(sc);
3377 } else if (icr & ICR_RXCFG) {
3378 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3379 device_xname(sc->sc_dev)));
3380 sc->sc_tbi_nrxcfg++;
3381 wm_check_for_link(sc);
3382 } else if (icr & ICR_RXSEQ) {
3383 DPRINTF(WM_DEBUG_LINK,
3384 ("%s: LINK: Receive sequence error\n",
3385 device_xname(sc->sc_dev)));
3386 }
3387 }
3388
3389 /*
3390 * wm_linkintr:
3391 *
3392 * Helper; handle link interrupts.
3393 */
3394 static void
3395 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3396 {
3397
3398 if (sc->sc_flags & WM_F_HAS_MII)
3399 wm_linkintr_gmii(sc, icr);
3400 else
3401 wm_linkintr_tbi(sc, icr);
3402 }
3403
3404 /*
3405 * wm_tick:
3406 *
3407 * One second timer, used to check link status, sweep up
3408 * completed transmit jobs, etc.
3409 */
3410 static void
3411 wm_tick(void *arg)
3412 {
3413 struct wm_softc *sc = arg;
3414 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3415 int s;
3416
3417 s = splnet();
3418
3419 if (sc->sc_type >= WM_T_82542_2_1) {
3420 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3421 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3422 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3423 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3424 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3425 }
3426
3427 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3428 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3429 + CSR_READ(sc, WMREG_CRCERRS)
3430 + CSR_READ(sc, WMREG_ALGNERRC)
3431 + CSR_READ(sc, WMREG_SYMERRC)
3432 + CSR_READ(sc, WMREG_RXERRC)
3433 + CSR_READ(sc, WMREG_SEC)
3434 + CSR_READ(sc, WMREG_CEXTERR)
3435 + CSR_READ(sc, WMREG_RLEC);
3436 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3437
3438 if (sc->sc_flags & WM_F_HAS_MII)
3439 mii_tick(&sc->sc_mii);
3440 else
3441 wm_tbi_check_link(sc);
3442
3443 splx(s);
3444
3445 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3446 }
3447
3448 /*
3449 * wm_reset:
3450 *
3451 * Reset the i82542 chip.
3452 */
3453 static void
3454 wm_reset(struct wm_softc *sc)
3455 {
3456 int phy_reset = 0;
3457 uint32_t reg, mask;
3458 int i;
3459
3460 /*
3461 * Allocate on-chip memory according to the MTU size.
3462 * The Packet Buffer Allocation register must be written
3463 * before the chip is reset.
3464 */
3465 switch (sc->sc_type) {
3466 case WM_T_82547:
3467 case WM_T_82547_2:
3468 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3469 PBA_22K : PBA_30K;
3470 sc->sc_txfifo_head = 0;
3471 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3472 sc->sc_txfifo_size =
3473 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3474 sc->sc_txfifo_stall = 0;
3475 break;
3476 case WM_T_82571:
3477 case WM_T_82572:
3478 case WM_T_82575: /* XXX need special handing for jumbo frames */
3479 case WM_T_I350:
3480 case WM_T_80003:
3481 sc->sc_pba = PBA_32K;
3482 break;
3483 case WM_T_82580:
3484 case WM_T_82580ER:
3485 sc->sc_pba = PBA_35K;
3486 break;
3487 case WM_T_82576:
3488 sc->sc_pba = PBA_64K;
3489 break;
3490 case WM_T_82573:
3491 sc->sc_pba = PBA_12K;
3492 break;
3493 case WM_T_82574:
3494 case WM_T_82583:
3495 sc->sc_pba = PBA_20K;
3496 break;
3497 case WM_T_ICH8:
3498 sc->sc_pba = PBA_8K;
3499 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3500 break;
3501 case WM_T_ICH9:
3502 case WM_T_ICH10:
3503 sc->sc_pba = PBA_10K;
3504 break;
3505 case WM_T_PCH:
3506 case WM_T_PCH2:
3507 sc->sc_pba = PBA_26K;
3508 break;
3509 default:
3510 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3511 PBA_40K : PBA_48K;
3512 break;
3513 }
3514 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3515
3516 /* Prevent the PCI-E bus from sticking */
3517 if (sc->sc_flags & WM_F_PCIE) {
3518 int timeout = 800;
3519
3520 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3521 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3522
3523 while (timeout--) {
3524 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3525 break;
3526 delay(100);
3527 }
3528 }
3529
3530 /* Set the completion timeout for interface */
3531 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3532 || (sc->sc_type == WM_T_I350))
3533 wm_set_pcie_completion_timeout(sc);
3534
3535 /* Clear interrupt */
3536 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3537
3538 /* Stop the transmit and receive processes. */
3539 CSR_WRITE(sc, WMREG_RCTL, 0);
3540 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3541 sc->sc_rctl &= ~RCTL_EN;
3542
3543 /* XXX set_tbi_sbp_82543() */
3544
3545 delay(10*1000);
3546
3547 /* Must acquire the MDIO ownership before MAC reset */
3548 switch (sc->sc_type) {
3549 case WM_T_82573:
3550 case WM_T_82574:
3551 case WM_T_82583:
3552 i = 0;
3553 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3554 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3555 do {
3556 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3557 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3558 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3559 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3560 break;
3561 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3562 delay(2*1000);
3563 i++;
3564 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3565 break;
3566 default:
3567 break;
3568 }
3569
3570 /*
3571 * 82541 Errata 29? & 82547 Errata 28?
3572 * See also the description about PHY_RST bit in CTRL register
3573 * in 8254x_GBe_SDM.pdf.
3574 */
3575 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3576 CSR_WRITE(sc, WMREG_CTRL,
3577 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3578 delay(5000);
3579 }
3580
3581 switch (sc->sc_type) {
3582 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3583 case WM_T_82541:
3584 case WM_T_82541_2:
3585 case WM_T_82547:
3586 case WM_T_82547_2:
3587 /*
3588 * On some chipsets, a reset through a memory-mapped write
3589 * cycle can cause the chip to reset before completing the
3590 * write cycle. This causes major headache that can be
3591 * avoided by issuing the reset via indirect register writes
3592 * through I/O space.
3593 *
3594 * So, if we successfully mapped the I/O BAR at attach time,
3595 * use that. Otherwise, try our luck with a memory-mapped
3596 * reset.
3597 */
3598 if (sc->sc_flags & WM_F_IOH_VALID)
3599 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3600 else
3601 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3602 break;
3603 case WM_T_82545_3:
3604 case WM_T_82546_3:
3605 /* Use the shadow control register on these chips. */
3606 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3607 break;
3608 case WM_T_80003:
3609 mask = swfwphysem[sc->sc_funcid];
3610 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3611 wm_get_swfw_semaphore(sc, mask);
3612 CSR_WRITE(sc, WMREG_CTRL, reg);
3613 wm_put_swfw_semaphore(sc, mask);
3614 break;
3615 case WM_T_ICH8:
3616 case WM_T_ICH9:
3617 case WM_T_ICH10:
3618 case WM_T_PCH:
3619 case WM_T_PCH2:
3620 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3621 if (wm_check_reset_block(sc) == 0) {
3622 /*
3623 * Gate automatic PHY configuration by hardware on
3624 * manaed 82579
3625 */
3626 if ((sc->sc_type == WM_T_PCH2)
3627 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3628 != 0))
3629 wm_gate_hw_phy_config_ich8lan(sc, 1);
3630
3631
3632 reg |= CTRL_PHY_RESET;
3633 phy_reset = 1;
3634 }
3635 wm_get_swfwhw_semaphore(sc);
3636 CSR_WRITE(sc, WMREG_CTRL, reg);
3637 delay(20*1000);
3638 wm_put_swfwhw_semaphore(sc);
3639 break;
3640 case WM_T_82542_2_0:
3641 case WM_T_82542_2_1:
3642 case WM_T_82543:
3643 case WM_T_82540:
3644 case WM_T_82545:
3645 case WM_T_82546:
3646 case WM_T_82571:
3647 case WM_T_82572:
3648 case WM_T_82573:
3649 case WM_T_82574:
3650 case WM_T_82575:
3651 case WM_T_82576:
3652 case WM_T_82580:
3653 case WM_T_82580ER:
3654 case WM_T_82583:
3655 case WM_T_I350:
3656 default:
3657 /* Everything else can safely use the documented method. */
3658 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3659 break;
3660 }
3661
3662 if (phy_reset != 0)
3663 wm_get_cfg_done(sc);
3664
3665 /* reload EEPROM */
3666 switch (sc->sc_type) {
3667 case WM_T_82542_2_0:
3668 case WM_T_82542_2_1:
3669 case WM_T_82543:
3670 case WM_T_82544:
3671 delay(10);
3672 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3673 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3674 delay(2000);
3675 break;
3676 case WM_T_82540:
3677 case WM_T_82545:
3678 case WM_T_82545_3:
3679 case WM_T_82546:
3680 case WM_T_82546_3:
3681 delay(5*1000);
3682 /* XXX Disable HW ARPs on ASF enabled adapters */
3683 break;
3684 case WM_T_82541:
3685 case WM_T_82541_2:
3686 case WM_T_82547:
3687 case WM_T_82547_2:
3688 delay(20000);
3689 /* XXX Disable HW ARPs on ASF enabled adapters */
3690 break;
3691 case WM_T_82571:
3692 case WM_T_82572:
3693 case WM_T_82573:
3694 case WM_T_82574:
3695 case WM_T_82583:
3696 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3697 delay(10);
3698 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3699 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3700 }
3701 /* check EECD_EE_AUTORD */
3702 wm_get_auto_rd_done(sc);
3703 /*
3704 * Phy configuration from NVM just starts after EECD_AUTO_RD
3705 * is set.
3706 */
3707 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3708 || (sc->sc_type == WM_T_82583))
3709 delay(25*1000);
3710 break;
3711 case WM_T_82575:
3712 case WM_T_82576:
3713 case WM_T_82580:
3714 case WM_T_82580ER:
3715 case WM_T_I350:
3716 case WM_T_80003:
3717 case WM_T_ICH8:
3718 case WM_T_ICH9:
3719 /* check EECD_EE_AUTORD */
3720 wm_get_auto_rd_done(sc);
3721 break;
3722 case WM_T_ICH10:
3723 case WM_T_PCH:
3724 case WM_T_PCH2:
3725 wm_lan_init_done(sc);
3726 break;
3727 default:
3728 panic("%s: unknown type\n", __func__);
3729 }
3730
3731 /* Check whether EEPROM is present or not */
3732 switch (sc->sc_type) {
3733 case WM_T_82575:
3734 case WM_T_82576:
3735 #if 0 /* XXX */
3736 case WM_T_82580:
3737 case WM_T_82580ER:
3738 #endif
3739 case WM_T_I350:
3740 case WM_T_ICH8:
3741 case WM_T_ICH9:
3742 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3743 /* Not found */
3744 sc->sc_flags |= WM_F_EEPROM_INVALID;
3745 if ((sc->sc_type == WM_T_82575)
3746 || (sc->sc_type == WM_T_82576)
3747 || (sc->sc_type == WM_T_82580)
3748 || (sc->sc_type == WM_T_82580ER)
3749 || (sc->sc_type == WM_T_I350))
3750 wm_reset_init_script_82575(sc);
3751 }
3752 break;
3753 default:
3754 break;
3755 }
3756
3757 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3758 || (sc->sc_type == WM_T_I350)) {
3759 /* clear global device reset status bit */
3760 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3761 }
3762
3763 /* Clear any pending interrupt events. */
3764 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3765 reg = CSR_READ(sc, WMREG_ICR);
3766
3767 /* reload sc_ctrl */
3768 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3769
3770 if (sc->sc_type == WM_T_I350)
3771 wm_set_eee_i350(sc);
3772
3773 /* dummy read from WUC */
3774 if (sc->sc_type == WM_T_PCH)
3775 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3776 /*
3777 * For PCH, this write will make sure that any noise will be detected
3778 * as a CRC error and be dropped rather than show up as a bad packet
3779 * to the DMA engine
3780 */
3781 if (sc->sc_type == WM_T_PCH)
3782 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3783
3784 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3785 CSR_WRITE(sc, WMREG_WUC, 0);
3786
3787 /* XXX need special handling for 82580 */
3788 }
3789
3790 static void
3791 wm_set_vlan(struct wm_softc *sc)
3792 {
3793 /* Deal with VLAN enables. */
3794 if (VLAN_ATTACHED(&sc->sc_ethercom))
3795 sc->sc_ctrl |= CTRL_VME;
3796 else
3797 sc->sc_ctrl &= ~CTRL_VME;
3798
3799 /* Write the control registers. */
3800 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3801 }
3802
3803 /*
3804 * wm_init: [ifnet interface function]
3805 *
3806 * Initialize the interface. Must be called at splnet().
3807 */
3808 static int
3809 wm_init(struct ifnet *ifp)
3810 {
3811 struct wm_softc *sc = ifp->if_softc;
3812 struct wm_rxsoft *rxs;
3813 int i, j, trynum, error = 0;
3814 uint32_t reg;
3815
3816 /*
3817 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3818 * There is a small but measurable benefit to avoiding the adjusment
3819 * of the descriptor so that the headers are aligned, for normal mtu,
3820 * on such platforms. One possibility is that the DMA itself is
3821 * slightly more efficient if the front of the entire packet (instead
3822 * of the front of the headers) is aligned.
3823 *
3824 * Note we must always set align_tweak to 0 if we are using
3825 * jumbo frames.
3826 */
3827 #ifdef __NO_STRICT_ALIGNMENT
3828 sc->sc_align_tweak = 0;
3829 #else
3830 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3831 sc->sc_align_tweak = 0;
3832 else
3833 sc->sc_align_tweak = 2;
3834 #endif /* __NO_STRICT_ALIGNMENT */
3835
3836 /* Cancel any pending I/O. */
3837 wm_stop(ifp, 0);
3838
3839 /* update statistics before reset */
3840 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3841 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3842
3843 /* Reset the chip to a known state. */
3844 wm_reset(sc);
3845
3846 switch (sc->sc_type) {
3847 case WM_T_82571:
3848 case WM_T_82572:
3849 case WM_T_82573:
3850 case WM_T_82574:
3851 case WM_T_82583:
3852 case WM_T_80003:
3853 case WM_T_ICH8:
3854 case WM_T_ICH9:
3855 case WM_T_ICH10:
3856 case WM_T_PCH:
3857 case WM_T_PCH2:
3858 if (wm_check_mng_mode(sc) != 0)
3859 wm_get_hw_control(sc);
3860 break;
3861 default:
3862 break;
3863 }
3864
3865 /* Reset the PHY. */
3866 if (sc->sc_flags & WM_F_HAS_MII)
3867 wm_gmii_reset(sc);
3868
3869 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3870 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3871 if ((sc->sc_type == WM_T_PCH) && (sc->sc_type == WM_T_PCH2))
3872 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3873
3874 /* Initialize the transmit descriptor ring. */
3875 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3876 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3877 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3878 sc->sc_txfree = WM_NTXDESC(sc);
3879 sc->sc_txnext = 0;
3880
3881 if (sc->sc_type < WM_T_82543) {
3882 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3883 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3884 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3885 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3886 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3887 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3888 } else {
3889 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3890 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3891 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3892 CSR_WRITE(sc, WMREG_TDH, 0);
3893 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3894 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3895
3896 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3897 /*
3898 * Don't write TDT before TCTL.EN is set.
3899 * See the document.
3900 */
3901 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3902 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3903 | TXDCTL_WTHRESH(0));
3904 else {
3905 CSR_WRITE(sc, WMREG_TDT, 0);
3906 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3907 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3908 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3909 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3910 }
3911 }
3912 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3913 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3914
3915 /* Initialize the transmit job descriptors. */
3916 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3917 sc->sc_txsoft[i].txs_mbuf = NULL;
3918 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3919 sc->sc_txsnext = 0;
3920 sc->sc_txsdirty = 0;
3921
3922 /*
3923 * Initialize the receive descriptor and receive job
3924 * descriptor rings.
3925 */
3926 if (sc->sc_type < WM_T_82543) {
3927 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3928 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3929 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3930 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3931 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3932 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3933
3934 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3935 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3936 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3937 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3938 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3939 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3940 } else {
3941 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3942 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3943 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3944 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3945 CSR_WRITE(sc, WMREG_EITR(0), 450);
3946 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3947 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3948 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3949 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3950 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3951 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3952 | RXDCTL_WTHRESH(1));
3953 } else {
3954 CSR_WRITE(sc, WMREG_RDH, 0);
3955 CSR_WRITE(sc, WMREG_RDT, 0);
3956 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3957 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3958 }
3959 }
3960 for (i = 0; i < WM_NRXDESC; i++) {
3961 rxs = &sc->sc_rxsoft[i];
3962 if (rxs->rxs_mbuf == NULL) {
3963 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3964 log(LOG_ERR, "%s: unable to allocate or map rx "
3965 "buffer %d, error = %d\n",
3966 device_xname(sc->sc_dev), i, error);
3967 /*
3968 * XXX Should attempt to run with fewer receive
3969 * XXX buffers instead of just failing.
3970 */
3971 wm_rxdrain(sc);
3972 goto out;
3973 }
3974 } else {
3975 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3976 WM_INIT_RXDESC(sc, i);
3977 /*
3978 * For 82575 and newer device, the RX descriptors
3979 * must be initialized after the setting of RCTL.EN in
3980 * wm_set_filter()
3981 */
3982 }
3983 }
3984 sc->sc_rxptr = 0;
3985 sc->sc_rxdiscard = 0;
3986 WM_RXCHAIN_RESET(sc);
3987
3988 /*
3989 * Clear out the VLAN table -- we don't use it (yet).
3990 */
3991 CSR_WRITE(sc, WMREG_VET, 0);
3992 if (sc->sc_type == WM_T_I350)
3993 trynum = 10; /* Due to hw errata */
3994 else
3995 trynum = 1;
3996 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3997 for (j = 0; j < trynum; j++)
3998 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3999
4000 /*
4001 * Set up flow-control parameters.
4002 *
4003 * XXX Values could probably stand some tuning.
4004 */
4005 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4006 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4007 && (sc->sc_type != WM_T_PCH2)) {
4008 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4009 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4010 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4011 }
4012
4013 sc->sc_fcrtl = FCRTL_DFLT;
4014 if (sc->sc_type < WM_T_82543) {
4015 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4016 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4017 } else {
4018 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4019 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4020 }
4021
4022 if (sc->sc_type == WM_T_80003)
4023 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4024 else
4025 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4026
4027 /* Writes the control register. */
4028 wm_set_vlan(sc);
4029
4030 if (sc->sc_flags & WM_F_HAS_MII) {
4031 int val;
4032
4033 switch (sc->sc_type) {
4034 case WM_T_80003:
4035 case WM_T_ICH8:
4036 case WM_T_ICH9:
4037 case WM_T_ICH10:
4038 case WM_T_PCH:
4039 case WM_T_PCH2:
4040 /*
4041 * Set the mac to wait the maximum time between each
4042 * iteration and increase the max iterations when
4043 * polling the phy; this fixes erroneous timeouts at
4044 * 10Mbps.
4045 */
4046 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4047 0xFFFF);
4048 val = wm_kmrn_readreg(sc,
4049 KUMCTRLSTA_OFFSET_INB_PARAM);
4050 val |= 0x3F;
4051 wm_kmrn_writereg(sc,
4052 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4053 break;
4054 default:
4055 break;
4056 }
4057
4058 if (sc->sc_type == WM_T_80003) {
4059 val = CSR_READ(sc, WMREG_CTRL_EXT);
4060 val &= ~CTRL_EXT_LINK_MODE_MASK;
4061 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4062
4063 /* Bypass RX and TX FIFO's */
4064 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4065 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4066 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4067 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4068 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4069 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4070 }
4071 }
4072 #if 0
4073 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4074 #endif
4075
4076 /*
4077 * Set up checksum offload parameters.
4078 */
4079 reg = CSR_READ(sc, WMREG_RXCSUM);
4080 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4081 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4082 reg |= RXCSUM_IPOFL;
4083 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4084 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4085 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4086 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4087 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4088
4089 /* Reset TBI's RXCFG count */
4090 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4091
4092 /*
4093 * Set up the interrupt registers.
4094 */
4095 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4096 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4097 ICR_RXO | ICR_RXT0;
4098 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4099 sc->sc_icr |= ICR_RXCFG;
4100 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4101
4102 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4103 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4104 || (sc->sc_type == WM_T_PCH2)) {
4105 reg = CSR_READ(sc, WMREG_KABGTXD);
4106 reg |= KABGTXD_BGSQLBIAS;
4107 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4108 }
4109
4110 /* Set up the inter-packet gap. */
4111 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4112
4113 if (sc->sc_type >= WM_T_82543) {
4114 /*
4115 * Set up the interrupt throttling register (units of 256ns)
4116 * Note that a footnote in Intel's documentation says this
4117 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4118 * or 10Mbit mode. Empirically, it appears to be the case
4119 * that that is also true for the 1024ns units of the other
4120 * interrupt-related timer registers -- so, really, we ought
4121 * to divide this value by 4 when the link speed is low.
4122 *
4123 * XXX implement this division at link speed change!
4124 */
4125
4126 /*
4127 * For N interrupts/sec, set this value to:
4128 * 1000000000 / (N * 256). Note that we set the
4129 * absolute and packet timer values to this value
4130 * divided by 4 to get "simple timer" behavior.
4131 */
4132
4133 sc->sc_itr = 1500; /* 2604 ints/sec */
4134 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4135 }
4136
4137 /* Set the VLAN ethernetype. */
4138 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4139
4140 /*
4141 * Set up the transmit control register; we start out with
4142 * a collision distance suitable for FDX, but update it whe
4143 * we resolve the media type.
4144 */
4145 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4146 | TCTL_CT(TX_COLLISION_THRESHOLD)
4147 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4148 if (sc->sc_type >= WM_T_82571)
4149 sc->sc_tctl |= TCTL_MULR;
4150 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4151
4152 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4153 /*
4154 * Write TDT after TCTL.EN is set.
4155 * See the document.
4156 */
4157 CSR_WRITE(sc, WMREG_TDT, 0);
4158 }
4159
4160 if (sc->sc_type == WM_T_80003) {
4161 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4162 reg &= ~TCTL_EXT_GCEX_MASK;
4163 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4164 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4165 }
4166
4167 /* Set the media. */
4168 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4169 goto out;
4170
4171 /* Configure for OS presence */
4172 wm_init_manageability(sc);
4173
4174 /*
4175 * Set up the receive control register; we actually program
4176 * the register when we set the receive filter. Use multicast
4177 * address offset type 0.
4178 *
4179 * Only the i82544 has the ability to strip the incoming
4180 * CRC, so we don't enable that feature.
4181 */
4182 sc->sc_mchash_type = 0;
4183 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4184 | RCTL_MO(sc->sc_mchash_type);
4185
4186 /*
4187 * The I350 has a bug where it always strips the CRC whether
4188 * asked to or not. So ask for stripped CRC here and cope in rxeof
4189 */
4190 if (sc->sc_type == WM_T_I350)
4191 sc->sc_rctl |= RCTL_SECRC;
4192
4193 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4194 && (ifp->if_mtu > ETHERMTU)) {
4195 sc->sc_rctl |= RCTL_LPE;
4196 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4197 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4198 }
4199
4200 if (MCLBYTES == 2048) {
4201 sc->sc_rctl |= RCTL_2k;
4202 } else {
4203 if (sc->sc_type >= WM_T_82543) {
4204 switch (MCLBYTES) {
4205 case 4096:
4206 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4207 break;
4208 case 8192:
4209 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4210 break;
4211 case 16384:
4212 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4213 break;
4214 default:
4215 panic("wm_init: MCLBYTES %d unsupported",
4216 MCLBYTES);
4217 break;
4218 }
4219 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4220 }
4221
4222 /* Set the receive filter. */
4223 wm_set_filter(sc);
4224
4225 /* On 575 and later set RDT only if RX enabled */
4226 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4227 for (i = 0; i < WM_NRXDESC; i++)
4228 WM_INIT_RXDESC(sc, i);
4229
4230 /* Start the one second link check clock. */
4231 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4232
4233 /* ...all done! */
4234 ifp->if_flags |= IFF_RUNNING;
4235 ifp->if_flags &= ~IFF_OACTIVE;
4236
4237 out:
4238 sc->sc_if_flags = ifp->if_flags;
4239 if (error)
4240 log(LOG_ERR, "%s: interface not running\n",
4241 device_xname(sc->sc_dev));
4242 return error;
4243 }
4244
4245 /*
4246 * wm_rxdrain:
4247 *
4248 * Drain the receive queue.
4249 */
4250 static void
4251 wm_rxdrain(struct wm_softc *sc)
4252 {
4253 struct wm_rxsoft *rxs;
4254 int i;
4255
4256 for (i = 0; i < WM_NRXDESC; i++) {
4257 rxs = &sc->sc_rxsoft[i];
4258 if (rxs->rxs_mbuf != NULL) {
4259 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4260 m_freem(rxs->rxs_mbuf);
4261 rxs->rxs_mbuf = NULL;
4262 }
4263 }
4264 }
4265
4266 /*
4267 * wm_stop: [ifnet interface function]
4268 *
4269 * Stop transmission on the interface.
4270 */
4271 static void
4272 wm_stop(struct ifnet *ifp, int disable)
4273 {
4274 struct wm_softc *sc = ifp->if_softc;
4275 struct wm_txsoft *txs;
4276 int i;
4277
4278 /* Stop the one second clock. */
4279 callout_stop(&sc->sc_tick_ch);
4280
4281 /* Stop the 82547 Tx FIFO stall check timer. */
4282 if (sc->sc_type == WM_T_82547)
4283 callout_stop(&sc->sc_txfifo_ch);
4284
4285 if (sc->sc_flags & WM_F_HAS_MII) {
4286 /* Down the MII. */
4287 mii_down(&sc->sc_mii);
4288 } else {
4289 #if 0
4290 /* Should we clear PHY's status properly? */
4291 wm_reset(sc);
4292 #endif
4293 }
4294
4295 /* Stop the transmit and receive processes. */
4296 CSR_WRITE(sc, WMREG_TCTL, 0);
4297 CSR_WRITE(sc, WMREG_RCTL, 0);
4298 sc->sc_rctl &= ~RCTL_EN;
4299
4300 /*
4301 * Clear the interrupt mask to ensure the device cannot assert its
4302 * interrupt line.
4303 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4304 * any currently pending or shared interrupt.
4305 */
4306 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4307 sc->sc_icr = 0;
4308
4309 /* Release any queued transmit buffers. */
4310 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4311 txs = &sc->sc_txsoft[i];
4312 if (txs->txs_mbuf != NULL) {
4313 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4314 m_freem(txs->txs_mbuf);
4315 txs->txs_mbuf = NULL;
4316 }
4317 }
4318
4319 /* Mark the interface as down and cancel the watchdog timer. */
4320 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4321 ifp->if_timer = 0;
4322
4323 if (disable)
4324 wm_rxdrain(sc);
4325
4326 #if 0 /* notyet */
4327 if (sc->sc_type >= WM_T_82544)
4328 CSR_WRITE(sc, WMREG_WUC, 0);
4329 #endif
4330 }
4331
4332 void
4333 wm_get_auto_rd_done(struct wm_softc *sc)
4334 {
4335 int i;
4336
4337 /* wait for eeprom to reload */
4338 switch (sc->sc_type) {
4339 case WM_T_82571:
4340 case WM_T_82572:
4341 case WM_T_82573:
4342 case WM_T_82574:
4343 case WM_T_82583:
4344 case WM_T_82575:
4345 case WM_T_82576:
4346 case WM_T_82580:
4347 case WM_T_82580ER:
4348 case WM_T_I350:
4349 case WM_T_80003:
4350 case WM_T_ICH8:
4351 case WM_T_ICH9:
4352 for (i = 0; i < 10; i++) {
4353 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4354 break;
4355 delay(1000);
4356 }
4357 if (i == 10) {
4358 log(LOG_ERR, "%s: auto read from eeprom failed to "
4359 "complete\n", device_xname(sc->sc_dev));
4360 }
4361 break;
4362 default:
4363 break;
4364 }
4365 }
4366
4367 void
4368 wm_lan_init_done(struct wm_softc *sc)
4369 {
4370 uint32_t reg = 0;
4371 int i;
4372
4373 /* wait for eeprom to reload */
4374 switch (sc->sc_type) {
4375 case WM_T_ICH10:
4376 case WM_T_PCH:
4377 case WM_T_PCH2:
4378 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4379 reg = CSR_READ(sc, WMREG_STATUS);
4380 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4381 break;
4382 delay(100);
4383 }
4384 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4385 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4386 "complete\n", device_xname(sc->sc_dev), __func__);
4387 }
4388 break;
4389 default:
4390 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4391 __func__);
4392 break;
4393 }
4394
4395 reg &= ~STATUS_LAN_INIT_DONE;
4396 CSR_WRITE(sc, WMREG_STATUS, reg);
4397 }
4398
4399 void
4400 wm_get_cfg_done(struct wm_softc *sc)
4401 {
4402 int mask;
4403 uint32_t reg;
4404 int i;
4405
4406 /* wait for eeprom to reload */
4407 switch (sc->sc_type) {
4408 case WM_T_82542_2_0:
4409 case WM_T_82542_2_1:
4410 /* null */
4411 break;
4412 case WM_T_82543:
4413 case WM_T_82544:
4414 case WM_T_82540:
4415 case WM_T_82545:
4416 case WM_T_82545_3:
4417 case WM_T_82546:
4418 case WM_T_82546_3:
4419 case WM_T_82541:
4420 case WM_T_82541_2:
4421 case WM_T_82547:
4422 case WM_T_82547_2:
4423 case WM_T_82573:
4424 case WM_T_82574:
4425 case WM_T_82583:
4426 /* generic */
4427 delay(10*1000);
4428 break;
4429 case WM_T_80003:
4430 case WM_T_82571:
4431 case WM_T_82572:
4432 case WM_T_82575:
4433 case WM_T_82576:
4434 case WM_T_82580:
4435 case WM_T_82580ER:
4436 case WM_T_I350:
4437 if (sc->sc_type == WM_T_82571) {
4438 /* Only 82571 shares port 0 */
4439 mask = EEMNGCTL_CFGDONE_0;
4440 } else
4441 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4442 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4443 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4444 break;
4445 delay(1000);
4446 }
4447 if (i >= WM_PHY_CFG_TIMEOUT) {
4448 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4449 device_xname(sc->sc_dev), __func__));
4450 }
4451 break;
4452 case WM_T_ICH8:
4453 case WM_T_ICH9:
4454 case WM_T_ICH10:
4455 case WM_T_PCH:
4456 case WM_T_PCH2:
4457 if (sc->sc_type >= WM_T_PCH) {
4458 reg = CSR_READ(sc, WMREG_STATUS);
4459 if ((reg & STATUS_PHYRA) != 0)
4460 CSR_WRITE(sc, WMREG_STATUS,
4461 reg & ~STATUS_PHYRA);
4462 }
4463 delay(10*1000);
4464 break;
4465 default:
4466 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4467 __func__);
4468 break;
4469 }
4470 }
4471
4472 /*
4473 * wm_acquire_eeprom:
4474 *
4475 * Perform the EEPROM handshake required on some chips.
4476 */
4477 static int
4478 wm_acquire_eeprom(struct wm_softc *sc)
4479 {
4480 uint32_t reg;
4481 int x;
4482 int ret = 0;
4483
4484 /* always success */
4485 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4486 return 0;
4487
4488 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4489 ret = wm_get_swfwhw_semaphore(sc);
4490 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4491 /* this will also do wm_get_swsm_semaphore() if needed */
4492 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4493 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4494 ret = wm_get_swsm_semaphore(sc);
4495 }
4496
4497 if (ret) {
4498 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4499 __func__);
4500 return 1;
4501 }
4502
4503 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4504 reg = CSR_READ(sc, WMREG_EECD);
4505
4506 /* Request EEPROM access. */
4507 reg |= EECD_EE_REQ;
4508 CSR_WRITE(sc, WMREG_EECD, reg);
4509
4510 /* ..and wait for it to be granted. */
4511 for (x = 0; x < 1000; x++) {
4512 reg = CSR_READ(sc, WMREG_EECD);
4513 if (reg & EECD_EE_GNT)
4514 break;
4515 delay(5);
4516 }
4517 if ((reg & EECD_EE_GNT) == 0) {
4518 aprint_error_dev(sc->sc_dev,
4519 "could not acquire EEPROM GNT\n");
4520 reg &= ~EECD_EE_REQ;
4521 CSR_WRITE(sc, WMREG_EECD, reg);
4522 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4523 wm_put_swfwhw_semaphore(sc);
4524 if (sc->sc_flags & WM_F_SWFW_SYNC)
4525 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4526 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4527 wm_put_swsm_semaphore(sc);
4528 return 1;
4529 }
4530 }
4531
4532 return 0;
4533 }
4534
4535 /*
4536 * wm_release_eeprom:
4537 *
4538 * Release the EEPROM mutex.
4539 */
4540 static void
4541 wm_release_eeprom(struct wm_softc *sc)
4542 {
4543 uint32_t reg;
4544
4545 /* always success */
4546 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4547 return;
4548
4549 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4550 reg = CSR_READ(sc, WMREG_EECD);
4551 reg &= ~EECD_EE_REQ;
4552 CSR_WRITE(sc, WMREG_EECD, reg);
4553 }
4554
4555 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4556 wm_put_swfwhw_semaphore(sc);
4557 if (sc->sc_flags & WM_F_SWFW_SYNC)
4558 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4559 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4560 wm_put_swsm_semaphore(sc);
4561 }
4562
4563 /*
4564 * wm_eeprom_sendbits:
4565 *
4566 * Send a series of bits to the EEPROM.
4567 */
4568 static void
4569 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4570 {
4571 uint32_t reg;
4572 int x;
4573
4574 reg = CSR_READ(sc, WMREG_EECD);
4575
4576 for (x = nbits; x > 0; x--) {
4577 if (bits & (1U << (x - 1)))
4578 reg |= EECD_DI;
4579 else
4580 reg &= ~EECD_DI;
4581 CSR_WRITE(sc, WMREG_EECD, reg);
4582 delay(2);
4583 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4584 delay(2);
4585 CSR_WRITE(sc, WMREG_EECD, reg);
4586 delay(2);
4587 }
4588 }
4589
4590 /*
4591 * wm_eeprom_recvbits:
4592 *
4593 * Receive a series of bits from the EEPROM.
4594 */
4595 static void
4596 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4597 {
4598 uint32_t reg, val;
4599 int x;
4600
4601 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4602
4603 val = 0;
4604 for (x = nbits; x > 0; x--) {
4605 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4606 delay(2);
4607 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4608 val |= (1U << (x - 1));
4609 CSR_WRITE(sc, WMREG_EECD, reg);
4610 delay(2);
4611 }
4612 *valp = val;
4613 }
4614
4615 /*
4616 * wm_read_eeprom_uwire:
4617 *
4618 * Read a word from the EEPROM using the MicroWire protocol.
4619 */
4620 static int
4621 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4622 {
4623 uint32_t reg, val;
4624 int i;
4625
4626 for (i = 0; i < wordcnt; i++) {
4627 /* Clear SK and DI. */
4628 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4629 CSR_WRITE(sc, WMREG_EECD, reg);
4630
4631 /*
4632 * XXX: workaround for a bug in qemu-0.12.x and prior
4633 * and Xen.
4634 *
4635 * We use this workaround only for 82540 because qemu's
4636 * e1000 act as 82540.
4637 */
4638 if (sc->sc_type == WM_T_82540) {
4639 reg |= EECD_SK;
4640 CSR_WRITE(sc, WMREG_EECD, reg);
4641 reg &= ~EECD_SK;
4642 CSR_WRITE(sc, WMREG_EECD, reg);
4643 delay(2);
4644 }
4645 /* XXX: end of workaround */
4646
4647 /* Set CHIP SELECT. */
4648 reg |= EECD_CS;
4649 CSR_WRITE(sc, WMREG_EECD, reg);
4650 delay(2);
4651
4652 /* Shift in the READ command. */
4653 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4654
4655 /* Shift in address. */
4656 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4657
4658 /* Shift out the data. */
4659 wm_eeprom_recvbits(sc, &val, 16);
4660 data[i] = val & 0xffff;
4661
4662 /* Clear CHIP SELECT. */
4663 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4664 CSR_WRITE(sc, WMREG_EECD, reg);
4665 delay(2);
4666 }
4667
4668 return 0;
4669 }
4670
4671 /*
4672 * wm_spi_eeprom_ready:
4673 *
4674 * Wait for a SPI EEPROM to be ready for commands.
4675 */
4676 static int
4677 wm_spi_eeprom_ready(struct wm_softc *sc)
4678 {
4679 uint32_t val;
4680 int usec;
4681
4682 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4683 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4684 wm_eeprom_recvbits(sc, &val, 8);
4685 if ((val & SPI_SR_RDY) == 0)
4686 break;
4687 }
4688 if (usec >= SPI_MAX_RETRIES) {
4689 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4690 return 1;
4691 }
4692 return 0;
4693 }
4694
4695 /*
4696 * wm_read_eeprom_spi:
4697 *
4698 * Read a work from the EEPROM using the SPI protocol.
4699 */
4700 static int
4701 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4702 {
4703 uint32_t reg, val;
4704 int i;
4705 uint8_t opc;
4706
4707 /* Clear SK and CS. */
4708 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4709 CSR_WRITE(sc, WMREG_EECD, reg);
4710 delay(2);
4711
4712 if (wm_spi_eeprom_ready(sc))
4713 return 1;
4714
4715 /* Toggle CS to flush commands. */
4716 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4717 delay(2);
4718 CSR_WRITE(sc, WMREG_EECD, reg);
4719 delay(2);
4720
4721 opc = SPI_OPC_READ;
4722 if (sc->sc_ee_addrbits == 8 && word >= 128)
4723 opc |= SPI_OPC_A8;
4724
4725 wm_eeprom_sendbits(sc, opc, 8);
4726 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4727
4728 for (i = 0; i < wordcnt; i++) {
4729 wm_eeprom_recvbits(sc, &val, 16);
4730 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4731 }
4732
4733 /* Raise CS and clear SK. */
4734 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4735 CSR_WRITE(sc, WMREG_EECD, reg);
4736 delay(2);
4737
4738 return 0;
4739 }
4740
4741 #define EEPROM_CHECKSUM 0xBABA
4742 #define EEPROM_SIZE 0x0040
4743
4744 /*
4745 * wm_validate_eeprom_checksum
4746 *
4747 * The checksum is defined as the sum of the first 64 (16 bit) words.
4748 */
4749 static int
4750 wm_validate_eeprom_checksum(struct wm_softc *sc)
4751 {
4752 uint16_t checksum;
4753 uint16_t eeprom_data;
4754 int i;
4755
4756 checksum = 0;
4757
4758 for (i = 0; i < EEPROM_SIZE; i++) {
4759 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4760 return 1;
4761 checksum += eeprom_data;
4762 }
4763
4764 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4765 return 1;
4766
4767 return 0;
4768 }
4769
4770 /*
4771 * wm_read_eeprom:
4772 *
4773 * Read data from the serial EEPROM.
4774 */
4775 static int
4776 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4777 {
4778 int rv;
4779
4780 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4781 return 1;
4782
4783 if (wm_acquire_eeprom(sc))
4784 return 1;
4785
4786 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4787 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4788 || (sc->sc_type == WM_T_PCH2))
4789 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4790 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4791 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4792 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4793 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4794 else
4795 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4796
4797 wm_release_eeprom(sc);
4798 return rv;
4799 }
4800
4801 static int
4802 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4803 uint16_t *data)
4804 {
4805 int i, eerd = 0;
4806 int error = 0;
4807
4808 for (i = 0; i < wordcnt; i++) {
4809 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4810
4811 CSR_WRITE(sc, WMREG_EERD, eerd);
4812 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4813 if (error != 0)
4814 break;
4815
4816 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4817 }
4818
4819 return error;
4820 }
4821
4822 static int
4823 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4824 {
4825 uint32_t attempts = 100000;
4826 uint32_t i, reg = 0;
4827 int32_t done = -1;
4828
4829 for (i = 0; i < attempts; i++) {
4830 reg = CSR_READ(sc, rw);
4831
4832 if (reg & EERD_DONE) {
4833 done = 0;
4834 break;
4835 }
4836 delay(5);
4837 }
4838
4839 return done;
4840 }
4841
4842 static int
4843 wm_check_alt_mac_addr(struct wm_softc *sc)
4844 {
4845 uint16_t myea[ETHER_ADDR_LEN / 2];
4846 uint16_t offset = EEPROM_OFF_MACADDR;
4847
4848 /* Try to read alternative MAC address pointer */
4849 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4850 return -1;
4851
4852 /* Check pointer */
4853 if (offset == 0xffff)
4854 return -1;
4855
4856 /*
4857 * Check whether alternative MAC address is valid or not.
4858 * Some cards have non 0xffff pointer but those don't use
4859 * alternative MAC address in reality.
4860 *
4861 * Check whether the broadcast bit is set or not.
4862 */
4863 if (wm_read_eeprom(sc, offset, 1, myea) == 0)
4864 if (((myea[0] & 0xff) & 0x01) == 0)
4865 return 0; /* found! */
4866
4867 /* not found */
4868 return -1;
4869 }
4870
4871 static int
4872 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4873 {
4874 uint16_t myea[ETHER_ADDR_LEN / 2];
4875 uint16_t offset = EEPROM_OFF_MACADDR;
4876 int do_invert = 0;
4877
4878 switch (sc->sc_type) {
4879 case WM_T_82580:
4880 case WM_T_82580ER:
4881 case WM_T_I350:
4882 switch (sc->sc_funcid) {
4883 case 0:
4884 /* default value (== EEPROM_OFF_MACADDR) */
4885 break;
4886 case 1:
4887 offset = EEPROM_OFF_LAN1;
4888 break;
4889 case 2:
4890 offset = EEPROM_OFF_LAN2;
4891 break;
4892 case 3:
4893 offset = EEPROM_OFF_LAN3;
4894 break;
4895 default:
4896 goto bad;
4897 /* NOTREACHED */
4898 break;
4899 }
4900 break;
4901 case WM_T_82571:
4902 case WM_T_82575:
4903 case WM_T_82576:
4904 case WM_T_80003:
4905 if (wm_check_alt_mac_addr(sc) != 0) {
4906 /* reset the offset to LAN0 */
4907 offset = EEPROM_OFF_MACADDR;
4908 if ((sc->sc_funcid & 0x01) == 1)
4909 do_invert = 1;
4910 goto do_read;
4911 }
4912 switch (sc->sc_funcid) {
4913 case 0:
4914 /*
4915 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
4916 * itself.
4917 */
4918 break;
4919 case 1:
4920 offset += EEPROM_OFF_MACADDR_LAN1;
4921 break;
4922 case 2:
4923 offset += EEPROM_OFF_MACADDR_LAN2;
4924 break;
4925 case 3:
4926 offset += EEPROM_OFF_MACADDR_LAN3;
4927 break;
4928 default:
4929 goto bad;
4930 /* NOTREACHED */
4931 break;
4932 }
4933 break;
4934 default:
4935 if ((sc->sc_funcid & 0x01) == 1)
4936 do_invert = 1;
4937 break;
4938 }
4939
4940 do_read:
4941 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4942 myea) != 0) {
4943 goto bad;
4944 }
4945
4946 enaddr[0] = myea[0] & 0xff;
4947 enaddr[1] = myea[0] >> 8;
4948 enaddr[2] = myea[1] & 0xff;
4949 enaddr[3] = myea[1] >> 8;
4950 enaddr[4] = myea[2] & 0xff;
4951 enaddr[5] = myea[2] >> 8;
4952
4953 /*
4954 * Toggle the LSB of the MAC address on the second port
4955 * of some dual port cards.
4956 */
4957 if (do_invert != 0)
4958 enaddr[5] ^= 1;
4959
4960 return 0;
4961
4962 bad:
4963 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4964
4965 return -1;
4966 }
4967
4968 /*
4969 * wm_add_rxbuf:
4970 *
4971 * Add a receive buffer to the indiciated descriptor.
4972 */
4973 static int
4974 wm_add_rxbuf(struct wm_softc *sc, int idx)
4975 {
4976 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4977 struct mbuf *m;
4978 int error;
4979
4980 MGETHDR(m, M_DONTWAIT, MT_DATA);
4981 if (m == NULL)
4982 return ENOBUFS;
4983
4984 MCLGET(m, M_DONTWAIT);
4985 if ((m->m_flags & M_EXT) == 0) {
4986 m_freem(m);
4987 return ENOBUFS;
4988 }
4989
4990 if (rxs->rxs_mbuf != NULL)
4991 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4992
4993 rxs->rxs_mbuf = m;
4994
4995 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4996 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4997 BUS_DMA_READ|BUS_DMA_NOWAIT);
4998 if (error) {
4999 /* XXX XXX XXX */
5000 aprint_error_dev(sc->sc_dev,
5001 "unable to load rx DMA map %d, error = %d\n",
5002 idx, error);
5003 panic("wm_add_rxbuf");
5004 }
5005
5006 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5007 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5008
5009 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5010 if ((sc->sc_rctl & RCTL_EN) != 0)
5011 WM_INIT_RXDESC(sc, idx);
5012 } else
5013 WM_INIT_RXDESC(sc, idx);
5014
5015 return 0;
5016 }
5017
5018 /*
5019 * wm_set_ral:
5020 *
5021 * Set an entery in the receive address list.
5022 */
5023 static void
5024 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5025 {
5026 uint32_t ral_lo, ral_hi;
5027
5028 if (enaddr != NULL) {
5029 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5030 (enaddr[3] << 24);
5031 ral_hi = enaddr[4] | (enaddr[5] << 8);
5032 ral_hi |= RAL_AV;
5033 } else {
5034 ral_lo = 0;
5035 ral_hi = 0;
5036 }
5037
5038 if (sc->sc_type >= WM_T_82544) {
5039 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5040 ral_lo);
5041 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5042 ral_hi);
5043 } else {
5044 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5045 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5046 }
5047 }
5048
5049 /*
5050 * wm_mchash:
5051 *
5052 * Compute the hash of the multicast address for the 4096-bit
5053 * multicast filter.
5054 */
5055 static uint32_t
5056 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5057 {
5058 static const int lo_shift[4] = { 4, 3, 2, 0 };
5059 static const int hi_shift[4] = { 4, 5, 6, 8 };
5060 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5061 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5062 uint32_t hash;
5063
5064 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5065 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5066 || (sc->sc_type == WM_T_PCH2)) {
5067 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5068 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5069 return (hash & 0x3ff);
5070 }
5071 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5072 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5073
5074 return (hash & 0xfff);
5075 }
5076
5077 /*
5078 * wm_set_filter:
5079 *
5080 * Set up the receive filter.
5081 */
5082 static void
5083 wm_set_filter(struct wm_softc *sc)
5084 {
5085 struct ethercom *ec = &sc->sc_ethercom;
5086 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5087 struct ether_multi *enm;
5088 struct ether_multistep step;
5089 bus_addr_t mta_reg;
5090 uint32_t hash, reg, bit;
5091 int i, size;
5092
5093 if (sc->sc_type >= WM_T_82544)
5094 mta_reg = WMREG_CORDOVA_MTA;
5095 else
5096 mta_reg = WMREG_MTA;
5097
5098 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5099
5100 if (ifp->if_flags & IFF_BROADCAST)
5101 sc->sc_rctl |= RCTL_BAM;
5102 if (ifp->if_flags & IFF_PROMISC) {
5103 sc->sc_rctl |= RCTL_UPE;
5104 goto allmulti;
5105 }
5106
5107 /*
5108 * Set the station address in the first RAL slot, and
5109 * clear the remaining slots.
5110 */
5111 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5112 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5113 || (sc->sc_type == WM_T_PCH2))
5114 size = WM_ICH8_RAL_TABSIZE;
5115 else
5116 size = WM_RAL_TABSIZE;
5117 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5118 for (i = 1; i < size; i++)
5119 wm_set_ral(sc, NULL, i);
5120
5121 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5122 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5123 || (sc->sc_type == WM_T_PCH2))
5124 size = WM_ICH8_MC_TABSIZE;
5125 else
5126 size = WM_MC_TABSIZE;
5127 /* Clear out the multicast table. */
5128 for (i = 0; i < size; i++)
5129 CSR_WRITE(sc, mta_reg + (i << 2), 0);
5130
5131 ETHER_FIRST_MULTI(step, ec, enm);
5132 while (enm != NULL) {
5133 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5134 /*
5135 * We must listen to a range of multicast addresses.
5136 * For now, just accept all multicasts, rather than
5137 * trying to set only those filter bits needed to match
5138 * the range. (At this time, the only use of address
5139 * ranges is for IP multicast routing, for which the
5140 * range is big enough to require all bits set.)
5141 */
5142 goto allmulti;
5143 }
5144
5145 hash = wm_mchash(sc, enm->enm_addrlo);
5146
5147 reg = (hash >> 5);
5148 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5149 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5150 || (sc->sc_type == WM_T_PCH2))
5151 reg &= 0x1f;
5152 else
5153 reg &= 0x7f;
5154 bit = hash & 0x1f;
5155
5156 hash = CSR_READ(sc, mta_reg + (reg << 2));
5157 hash |= 1U << bit;
5158
5159 /* XXX Hardware bug?? */
5160 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5161 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5162 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5163 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5164 } else
5165 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5166
5167 ETHER_NEXT_MULTI(step, enm);
5168 }
5169
5170 ifp->if_flags &= ~IFF_ALLMULTI;
5171 goto setit;
5172
5173 allmulti:
5174 ifp->if_flags |= IFF_ALLMULTI;
5175 sc->sc_rctl |= RCTL_MPE;
5176
5177 setit:
5178 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5179 }
5180
5181 /*
5182 * wm_tbi_mediainit:
5183 *
5184 * Initialize media for use on 1000BASE-X devices.
5185 */
5186 static void
5187 wm_tbi_mediainit(struct wm_softc *sc)
5188 {
5189 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5190 const char *sep = "";
5191
5192 if (sc->sc_type < WM_T_82543)
5193 sc->sc_tipg = TIPG_WM_DFLT;
5194 else
5195 sc->sc_tipg = TIPG_LG_DFLT;
5196
5197 sc->sc_tbi_anegticks = 5;
5198
5199 /* Initialize our media structures */
5200 sc->sc_mii.mii_ifp = ifp;
5201
5202 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5203 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5204 wm_tbi_mediastatus);
5205
5206 /*
5207 * SWD Pins:
5208 *
5209 * 0 = Link LED (output)
5210 * 1 = Loss Of Signal (input)
5211 */
5212 sc->sc_ctrl |= CTRL_SWDPIO(0);
5213 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5214
5215 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5216
5217 #define ADD(ss, mm, dd) \
5218 do { \
5219 aprint_normal("%s%s", sep, ss); \
5220 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
5221 sep = ", "; \
5222 } while (/*CONSTCOND*/0)
5223
5224 aprint_normal_dev(sc->sc_dev, "");
5225 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5226 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5227 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5228 aprint_normal("\n");
5229
5230 #undef ADD
5231
5232 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5233 }
5234
5235 /*
5236 * wm_tbi_mediastatus: [ifmedia interface function]
5237 *
5238 * Get the current interface media status on a 1000BASE-X device.
5239 */
5240 static void
5241 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5242 {
5243 struct wm_softc *sc = ifp->if_softc;
5244 uint32_t ctrl, status;
5245
5246 ifmr->ifm_status = IFM_AVALID;
5247 ifmr->ifm_active = IFM_ETHER;
5248
5249 status = CSR_READ(sc, WMREG_STATUS);
5250 if ((status & STATUS_LU) == 0) {
5251 ifmr->ifm_active |= IFM_NONE;
5252 return;
5253 }
5254
5255 ifmr->ifm_status |= IFM_ACTIVE;
5256 ifmr->ifm_active |= IFM_1000_SX;
5257 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5258 ifmr->ifm_active |= IFM_FDX;
5259 ctrl = CSR_READ(sc, WMREG_CTRL);
5260 if (ctrl & CTRL_RFCE)
5261 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5262 if (ctrl & CTRL_TFCE)
5263 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5264 }
5265
5266 /*
5267 * wm_tbi_mediachange: [ifmedia interface function]
5268 *
5269 * Set hardware to newly-selected media on a 1000BASE-X device.
5270 */
5271 static int
5272 wm_tbi_mediachange(struct ifnet *ifp)
5273 {
5274 struct wm_softc *sc = ifp->if_softc;
5275 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5276 uint32_t status;
5277 int i;
5278
5279 sc->sc_txcw = 0;
5280 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5281 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5282 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5283 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5284 sc->sc_txcw |= TXCW_ANE;
5285 } else {
5286 /*
5287 * If autonegotiation is turned off, force link up and turn on
5288 * full duplex
5289 */
5290 sc->sc_txcw &= ~TXCW_ANE;
5291 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5292 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5293 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5294 delay(1000);
5295 }
5296
5297 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5298 device_xname(sc->sc_dev),sc->sc_txcw));
5299 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5300 delay(10000);
5301
5302 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5303 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5304
5305 /*
5306 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5307 * optics detect a signal, 0 if they don't.
5308 */
5309 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5310 /* Have signal; wait for the link to come up. */
5311
5312 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5313 /*
5314 * Reset the link, and let autonegotiation do its thing
5315 */
5316 sc->sc_ctrl |= CTRL_LRST;
5317 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5318 delay(1000);
5319 sc->sc_ctrl &= ~CTRL_LRST;
5320 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5321 delay(1000);
5322 }
5323
5324 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5325 delay(10000);
5326 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5327 break;
5328 }
5329
5330 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5331 device_xname(sc->sc_dev),i));
5332
5333 status = CSR_READ(sc, WMREG_STATUS);
5334 DPRINTF(WM_DEBUG_LINK,
5335 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5336 device_xname(sc->sc_dev),status, STATUS_LU));
5337 if (status & STATUS_LU) {
5338 /* Link is up. */
5339 DPRINTF(WM_DEBUG_LINK,
5340 ("%s: LINK: set media -> link up %s\n",
5341 device_xname(sc->sc_dev),
5342 (status & STATUS_FD) ? "FDX" : "HDX"));
5343
5344 /*
5345 * NOTE: CTRL will update TFCE and RFCE automatically,
5346 * so we should update sc->sc_ctrl
5347 */
5348 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5349 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5350 sc->sc_fcrtl &= ~FCRTL_XONE;
5351 if (status & STATUS_FD)
5352 sc->sc_tctl |=
5353 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5354 else
5355 sc->sc_tctl |=
5356 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5357 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5358 sc->sc_fcrtl |= FCRTL_XONE;
5359 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5360 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5361 WMREG_OLD_FCRTL : WMREG_FCRTL,
5362 sc->sc_fcrtl);
5363 sc->sc_tbi_linkup = 1;
5364 } else {
5365 if (i == WM_LINKUP_TIMEOUT)
5366 wm_check_for_link(sc);
5367 /* Link is down. */
5368 DPRINTF(WM_DEBUG_LINK,
5369 ("%s: LINK: set media -> link down\n",
5370 device_xname(sc->sc_dev)));
5371 sc->sc_tbi_linkup = 0;
5372 }
5373 } else {
5374 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5375 device_xname(sc->sc_dev)));
5376 sc->sc_tbi_linkup = 0;
5377 }
5378
5379 wm_tbi_set_linkled(sc);
5380
5381 return 0;
5382 }
5383
5384 /*
5385 * wm_tbi_set_linkled:
5386 *
5387 * Update the link LED on 1000BASE-X devices.
5388 */
5389 static void
5390 wm_tbi_set_linkled(struct wm_softc *sc)
5391 {
5392
5393 if (sc->sc_tbi_linkup)
5394 sc->sc_ctrl |= CTRL_SWDPIN(0);
5395 else
5396 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5397
5398 /* 82540 or newer devices are active low */
5399 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5400
5401 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5402 }
5403
5404 /*
5405 * wm_tbi_check_link:
5406 *
5407 * Check the link on 1000BASE-X devices.
5408 */
5409 static void
5410 wm_tbi_check_link(struct wm_softc *sc)
5411 {
5412 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5413 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5414 uint32_t rxcw, ctrl, status;
5415
5416 status = CSR_READ(sc, WMREG_STATUS);
5417
5418 rxcw = CSR_READ(sc, WMREG_RXCW);
5419 ctrl = CSR_READ(sc, WMREG_CTRL);
5420
5421 /* set link status */
5422 if ((status & STATUS_LU) == 0) {
5423 DPRINTF(WM_DEBUG_LINK,
5424 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5425 sc->sc_tbi_linkup = 0;
5426 } else if (sc->sc_tbi_linkup == 0) {
5427 DPRINTF(WM_DEBUG_LINK,
5428 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5429 (status & STATUS_FD) ? "FDX" : "HDX"));
5430 sc->sc_tbi_linkup = 1;
5431 }
5432
5433 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5434 && ((status & STATUS_LU) == 0)) {
5435 sc->sc_tbi_linkup = 0;
5436 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5437 /* RXCFG storm! */
5438 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5439 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5440 wm_init(ifp);
5441 wm_start(ifp);
5442 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5443 /* If the timer expired, retry autonegotiation */
5444 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5445 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5446 sc->sc_tbi_ticks = 0;
5447 /*
5448 * Reset the link, and let autonegotiation do
5449 * its thing
5450 */
5451 sc->sc_ctrl |= CTRL_LRST;
5452 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5453 delay(1000);
5454 sc->sc_ctrl &= ~CTRL_LRST;
5455 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5456 delay(1000);
5457 CSR_WRITE(sc, WMREG_TXCW,
5458 sc->sc_txcw & ~TXCW_ANE);
5459 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5460 }
5461 }
5462 }
5463
5464 wm_tbi_set_linkled(sc);
5465 }
5466
5467 /*
5468 * wm_gmii_reset:
5469 *
5470 * Reset the PHY.
5471 */
5472 static void
5473 wm_gmii_reset(struct wm_softc *sc)
5474 {
5475 uint32_t reg;
5476 int rv;
5477
5478 /* get phy semaphore */
5479 switch (sc->sc_type) {
5480 case WM_T_82571:
5481 case WM_T_82572:
5482 case WM_T_82573:
5483 case WM_T_82574:
5484 case WM_T_82583:
5485 /* XXX should get sw semaphore, too */
5486 rv = wm_get_swsm_semaphore(sc);
5487 break;
5488 case WM_T_82575:
5489 case WM_T_82576:
5490 case WM_T_82580:
5491 case WM_T_82580ER:
5492 case WM_T_I350:
5493 case WM_T_80003:
5494 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5495 break;
5496 case WM_T_ICH8:
5497 case WM_T_ICH9:
5498 case WM_T_ICH10:
5499 case WM_T_PCH:
5500 case WM_T_PCH2:
5501 rv = wm_get_swfwhw_semaphore(sc);
5502 break;
5503 default:
5504 /* nothing to do*/
5505 rv = 0;
5506 break;
5507 }
5508 if (rv != 0) {
5509 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5510 __func__);
5511 return;
5512 }
5513
5514 switch (sc->sc_type) {
5515 case WM_T_82542_2_0:
5516 case WM_T_82542_2_1:
5517 /* null */
5518 break;
5519 case WM_T_82543:
5520 /*
5521 * With 82543, we need to force speed and duplex on the MAC
5522 * equal to what the PHY speed and duplex configuration is.
5523 * In addition, we need to perform a hardware reset on the PHY
5524 * to take it out of reset.
5525 */
5526 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5527 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5528
5529 /* The PHY reset pin is active-low. */
5530 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5531 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5532 CTRL_EXT_SWDPIN(4));
5533 reg |= CTRL_EXT_SWDPIO(4);
5534
5535 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5536 delay(10*1000);
5537
5538 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5539 delay(150);
5540 #if 0
5541 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5542 #endif
5543 delay(20*1000); /* XXX extra delay to get PHY ID? */
5544 break;
5545 case WM_T_82544: /* reset 10000us */
5546 case WM_T_82540:
5547 case WM_T_82545:
5548 case WM_T_82545_3:
5549 case WM_T_82546:
5550 case WM_T_82546_3:
5551 case WM_T_82541:
5552 case WM_T_82541_2:
5553 case WM_T_82547:
5554 case WM_T_82547_2:
5555 case WM_T_82571: /* reset 100us */
5556 case WM_T_82572:
5557 case WM_T_82573:
5558 case WM_T_82574:
5559 case WM_T_82575:
5560 case WM_T_82576:
5561 case WM_T_82580:
5562 case WM_T_82580ER:
5563 case WM_T_I350:
5564 case WM_T_82583:
5565 case WM_T_80003:
5566 /* generic reset */
5567 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5568 delay(20000);
5569 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5570 delay(20000);
5571
5572 if ((sc->sc_type == WM_T_82541)
5573 || (sc->sc_type == WM_T_82541_2)
5574 || (sc->sc_type == WM_T_82547)
5575 || (sc->sc_type == WM_T_82547_2)) {
5576 /* workaround for igp are done in igp_reset() */
5577 /* XXX add code to set LED after phy reset */
5578 }
5579 break;
5580 case WM_T_ICH8:
5581 case WM_T_ICH9:
5582 case WM_T_ICH10:
5583 case WM_T_PCH:
5584 case WM_T_PCH2:
5585 /* generic reset */
5586 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5587 delay(100);
5588 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5589 delay(150);
5590 break;
5591 default:
5592 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5593 __func__);
5594 break;
5595 }
5596
5597 /* release PHY semaphore */
5598 switch (sc->sc_type) {
5599 case WM_T_82571:
5600 case WM_T_82572:
5601 case WM_T_82573:
5602 case WM_T_82574:
5603 case WM_T_82583:
5604 /* XXX should put sw semaphore, too */
5605 wm_put_swsm_semaphore(sc);
5606 break;
5607 case WM_T_82575:
5608 case WM_T_82576:
5609 case WM_T_82580:
5610 case WM_T_82580ER:
5611 case WM_T_I350:
5612 case WM_T_80003:
5613 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5614 break;
5615 case WM_T_ICH8:
5616 case WM_T_ICH9:
5617 case WM_T_ICH10:
5618 case WM_T_PCH:
5619 case WM_T_PCH2:
5620 wm_put_swfwhw_semaphore(sc);
5621 break;
5622 default:
5623 /* nothing to do*/
5624 rv = 0;
5625 break;
5626 }
5627
5628 /* get_cfg_done */
5629 wm_get_cfg_done(sc);
5630
5631 /* extra setup */
5632 switch (sc->sc_type) {
5633 case WM_T_82542_2_0:
5634 case WM_T_82542_2_1:
5635 case WM_T_82543:
5636 case WM_T_82544:
5637 case WM_T_82540:
5638 case WM_T_82545:
5639 case WM_T_82545_3:
5640 case WM_T_82546:
5641 case WM_T_82546_3:
5642 case WM_T_82541_2:
5643 case WM_T_82547_2:
5644 case WM_T_82571:
5645 case WM_T_82572:
5646 case WM_T_82573:
5647 case WM_T_82574:
5648 case WM_T_82575:
5649 case WM_T_82576:
5650 case WM_T_82580:
5651 case WM_T_82580ER:
5652 case WM_T_I350:
5653 case WM_T_82583:
5654 case WM_T_80003:
5655 /* null */
5656 break;
5657 case WM_T_82541:
5658 case WM_T_82547:
5659 /* XXX Configure actively LED after PHY reset */
5660 break;
5661 case WM_T_ICH8:
5662 case WM_T_ICH9:
5663 case WM_T_ICH10:
5664 case WM_T_PCH:
5665 case WM_T_PCH2:
5666 /* Allow time for h/w to get to a quiescent state afer reset */
5667 delay(10*1000);
5668
5669 if (sc->sc_type == WM_T_PCH)
5670 wm_hv_phy_workaround_ich8lan(sc);
5671
5672 if (sc->sc_type == WM_T_PCH2)
5673 wm_lv_phy_workaround_ich8lan(sc);
5674
5675 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5676 /*
5677 * dummy read to clear the phy wakeup bit after lcd
5678 * reset
5679 */
5680 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5681 }
5682
5683 /*
5684 * XXX Configure the LCD with th extended configuration region
5685 * in NVM
5686 */
5687
5688 /* Configure the LCD with the OEM bits in NVM */
5689 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5690 /*
5691 * Disable LPLU.
5692 * XXX It seems that 82567 has LPLU, too.
5693 */
5694 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5695 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5696 reg |= HV_OEM_BITS_ANEGNOW;
5697 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5698 }
5699 break;
5700 default:
5701 panic("%s: unknown type\n", __func__);
5702 break;
5703 }
5704 }
5705
5706 /*
5707 * wm_gmii_mediainit:
5708 *
5709 * Initialize media for use on 1000BASE-T devices.
5710 */
5711 static void
5712 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5713 {
5714 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5715
5716 /* We have MII. */
5717 sc->sc_flags |= WM_F_HAS_MII;
5718
5719 if (sc->sc_type == WM_T_80003)
5720 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5721 else
5722 sc->sc_tipg = TIPG_1000T_DFLT;
5723
5724 /*
5725 * Let the chip set speed/duplex on its own based on
5726 * signals from the PHY.
5727 * XXXbouyer - I'm not sure this is right for the 80003,
5728 * the em driver only sets CTRL_SLU here - but it seems to work.
5729 */
5730 sc->sc_ctrl |= CTRL_SLU;
5731 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5732
5733 /* Initialize our media structures and probe the GMII. */
5734 sc->sc_mii.mii_ifp = ifp;
5735
5736 switch (prodid) {
5737 case PCI_PRODUCT_INTEL_PCH_M_LM:
5738 case PCI_PRODUCT_INTEL_PCH_M_LC:
5739 /* 82577 */
5740 sc->sc_phytype = WMPHY_82577;
5741 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5742 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5743 break;
5744 case PCI_PRODUCT_INTEL_PCH_D_DM:
5745 case PCI_PRODUCT_INTEL_PCH_D_DC:
5746 /* 82578 */
5747 sc->sc_phytype = WMPHY_82578;
5748 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5749 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5750 break;
5751 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
5752 case PCI_PRODUCT_INTEL_PCH2_LV_V:
5753 /* 82578 */
5754 sc->sc_phytype = WMPHY_82579;
5755 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5756 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5757 break;
5758 case PCI_PRODUCT_INTEL_82801I_BM:
5759 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5760 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5761 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5762 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5763 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5764 /* 82567 */
5765 sc->sc_phytype = WMPHY_BM;
5766 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5767 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5768 break;
5769 default:
5770 if ((sc->sc_flags & WM_F_SGMII) != 0) {
5771 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5772 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5773 } else if (sc->sc_type >= WM_T_80003) {
5774 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5775 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5776 } else if (sc->sc_type >= WM_T_82544) {
5777 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5778 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5779 } else {
5780 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5781 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5782 }
5783 break;
5784 }
5785 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5786
5787 wm_gmii_reset(sc);
5788
5789 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5790 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5791 wm_gmii_mediastatus);
5792
5793 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5794 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
5795 || (sc->sc_type == WM_T_I350)) {
5796 if ((sc->sc_flags & WM_F_SGMII) == 0) {
5797 /* Attach only one port */
5798 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5799 MII_OFFSET_ANY, MIIF_DOPAUSE);
5800 } else {
5801 int i;
5802 uint32_t ctrl_ext;
5803
5804 /* Power on sgmii phy if it is disabled */
5805 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5806 CSR_WRITE(sc, WMREG_CTRL_EXT,
5807 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5808 CSR_WRITE_FLUSH(sc);
5809 delay(300*1000); /* XXX too long */
5810
5811 /* from 1 to 8 */
5812 for (i = 1; i < 8; i++)
5813 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5814 i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5815
5816 /* restore previous sfp cage power state */
5817 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5818 }
5819 } else {
5820 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5821 MII_OFFSET_ANY, MIIF_DOPAUSE);
5822 }
5823
5824 if ((sc->sc_type == WM_T_PCH2) &&
5825 (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
5826 wm_set_mdio_slow_mode_hv(sc);
5827 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5828 MII_OFFSET_ANY, MIIF_DOPAUSE);
5829 }
5830
5831 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5832 /* if failed, retry with *_bm_* */
5833 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5834 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5835
5836 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5837 MII_OFFSET_ANY, MIIF_DOPAUSE);
5838 }
5839 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5840 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5841 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5842 sc->sc_phytype = WMPHY_NONE;
5843 } else {
5844 /* Check PHY type */
5845 uint32_t model;
5846 struct mii_softc *child;
5847
5848 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5849 if (device_is_a(child->mii_dev, "igphy")) {
5850 struct igphy_softc *isc = (struct igphy_softc *)child;
5851
5852 model = isc->sc_mii.mii_mpd_model;
5853 if (model == MII_MODEL_yyINTEL_I82566)
5854 sc->sc_phytype = WMPHY_IGP_3;
5855 }
5856
5857 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5858 }
5859 }
5860
5861 /*
5862 * wm_gmii_mediastatus: [ifmedia interface function]
5863 *
5864 * Get the current interface media status on a 1000BASE-T device.
5865 */
5866 static void
5867 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5868 {
5869 struct wm_softc *sc = ifp->if_softc;
5870
5871 ether_mediastatus(ifp, ifmr);
5872 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5873 | sc->sc_flowflags;
5874 }
5875
5876 /*
5877 * wm_gmii_mediachange: [ifmedia interface function]
5878 *
5879 * Set hardware to newly-selected media on a 1000BASE-T device.
5880 */
5881 static int
5882 wm_gmii_mediachange(struct ifnet *ifp)
5883 {
5884 struct wm_softc *sc = ifp->if_softc;
5885 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5886 int rc;
5887
5888 if ((ifp->if_flags & IFF_UP) == 0)
5889 return 0;
5890
5891 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5892 sc->sc_ctrl |= CTRL_SLU;
5893 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5894 || (sc->sc_type > WM_T_82543)) {
5895 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5896 } else {
5897 sc->sc_ctrl &= ~CTRL_ASDE;
5898 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5899 if (ife->ifm_media & IFM_FDX)
5900 sc->sc_ctrl |= CTRL_FD;
5901 switch (IFM_SUBTYPE(ife->ifm_media)) {
5902 case IFM_10_T:
5903 sc->sc_ctrl |= CTRL_SPEED_10;
5904 break;
5905 case IFM_100_TX:
5906 sc->sc_ctrl |= CTRL_SPEED_100;
5907 break;
5908 case IFM_1000_T:
5909 sc->sc_ctrl |= CTRL_SPEED_1000;
5910 break;
5911 default:
5912 panic("wm_gmii_mediachange: bad media 0x%x",
5913 ife->ifm_media);
5914 }
5915 }
5916 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5917 if (sc->sc_type <= WM_T_82543)
5918 wm_gmii_reset(sc);
5919
5920 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5921 return 0;
5922 return rc;
5923 }
5924
5925 #define MDI_IO CTRL_SWDPIN(2)
5926 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5927 #define MDI_CLK CTRL_SWDPIN(3)
5928
5929 static void
5930 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5931 {
5932 uint32_t i, v;
5933
5934 v = CSR_READ(sc, WMREG_CTRL);
5935 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5936 v |= MDI_DIR | CTRL_SWDPIO(3);
5937
5938 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5939 if (data & i)
5940 v |= MDI_IO;
5941 else
5942 v &= ~MDI_IO;
5943 CSR_WRITE(sc, WMREG_CTRL, v);
5944 delay(10);
5945 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5946 delay(10);
5947 CSR_WRITE(sc, WMREG_CTRL, v);
5948 delay(10);
5949 }
5950 }
5951
5952 static uint32_t
5953 i82543_mii_recvbits(struct wm_softc *sc)
5954 {
5955 uint32_t v, i, data = 0;
5956
5957 v = CSR_READ(sc, WMREG_CTRL);
5958 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5959 v |= CTRL_SWDPIO(3);
5960
5961 CSR_WRITE(sc, WMREG_CTRL, v);
5962 delay(10);
5963 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5964 delay(10);
5965 CSR_WRITE(sc, WMREG_CTRL, v);
5966 delay(10);
5967
5968 for (i = 0; i < 16; i++) {
5969 data <<= 1;
5970 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5971 delay(10);
5972 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5973 data |= 1;
5974 CSR_WRITE(sc, WMREG_CTRL, v);
5975 delay(10);
5976 }
5977
5978 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5979 delay(10);
5980 CSR_WRITE(sc, WMREG_CTRL, v);
5981 delay(10);
5982
5983 return data;
5984 }
5985
5986 #undef MDI_IO
5987 #undef MDI_DIR
5988 #undef MDI_CLK
5989
5990 /*
5991 * wm_gmii_i82543_readreg: [mii interface function]
5992 *
5993 * Read a PHY register on the GMII (i82543 version).
5994 */
5995 static int
5996 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5997 {
5998 struct wm_softc *sc = device_private(self);
5999 int rv;
6000
6001 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6002 i82543_mii_sendbits(sc, reg | (phy << 5) |
6003 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6004 rv = i82543_mii_recvbits(sc) & 0xffff;
6005
6006 DPRINTF(WM_DEBUG_GMII,
6007 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6008 device_xname(sc->sc_dev), phy, reg, rv));
6009
6010 return rv;
6011 }
6012
6013 /*
6014 * wm_gmii_i82543_writereg: [mii interface function]
6015 *
6016 * Write a PHY register on the GMII (i82543 version).
6017 */
6018 static void
6019 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6020 {
6021 struct wm_softc *sc = device_private(self);
6022
6023 i82543_mii_sendbits(sc, 0xffffffffU, 32);
6024 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6025 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6026 (MII_COMMAND_START << 30), 32);
6027 }
6028
6029 /*
6030 * wm_gmii_i82544_readreg: [mii interface function]
6031 *
6032 * Read a PHY register on the GMII.
6033 */
6034 static int
6035 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6036 {
6037 struct wm_softc *sc = device_private(self);
6038 uint32_t mdic = 0;
6039 int i, rv;
6040
6041 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6042 MDIC_REGADD(reg));
6043
6044 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6045 mdic = CSR_READ(sc, WMREG_MDIC);
6046 if (mdic & MDIC_READY)
6047 break;
6048 delay(50);
6049 }
6050
6051 if ((mdic & MDIC_READY) == 0) {
6052 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6053 device_xname(sc->sc_dev), phy, reg);
6054 rv = 0;
6055 } else if (mdic & MDIC_E) {
6056 #if 0 /* This is normal if no PHY is present. */
6057 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6058 device_xname(sc->sc_dev), phy, reg);
6059 #endif
6060 rv = 0;
6061 } else {
6062 rv = MDIC_DATA(mdic);
6063 if (rv == 0xffff)
6064 rv = 0;
6065 }
6066
6067 return rv;
6068 }
6069
6070 /*
6071 * wm_gmii_i82544_writereg: [mii interface function]
6072 *
6073 * Write a PHY register on the GMII.
6074 */
6075 static void
6076 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6077 {
6078 struct wm_softc *sc = device_private(self);
6079 uint32_t mdic = 0;
6080 int i;
6081
6082 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6083 MDIC_REGADD(reg) | MDIC_DATA(val));
6084
6085 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6086 mdic = CSR_READ(sc, WMREG_MDIC);
6087 if (mdic & MDIC_READY)
6088 break;
6089 delay(50);
6090 }
6091
6092 if ((mdic & MDIC_READY) == 0)
6093 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6094 device_xname(sc->sc_dev), phy, reg);
6095 else if (mdic & MDIC_E)
6096 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6097 device_xname(sc->sc_dev), phy, reg);
6098 }
6099
6100 /*
6101 * wm_gmii_i80003_readreg: [mii interface function]
6102 *
6103 * Read a PHY register on the kumeran
6104 * This could be handled by the PHY layer if we didn't have to lock the
6105 * ressource ...
6106 */
6107 static int
6108 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6109 {
6110 struct wm_softc *sc = device_private(self);
6111 int sem;
6112 int rv;
6113
6114 if (phy != 1) /* only one PHY on kumeran bus */
6115 return 0;
6116
6117 sem = swfwphysem[sc->sc_funcid];
6118 if (wm_get_swfw_semaphore(sc, sem)) {
6119 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6120 __func__);
6121 return 0;
6122 }
6123
6124 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6125 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6126 reg >> GG82563_PAGE_SHIFT);
6127 } else {
6128 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6129 reg >> GG82563_PAGE_SHIFT);
6130 }
6131 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6132 delay(200);
6133 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6134 delay(200);
6135
6136 wm_put_swfw_semaphore(sc, sem);
6137 return rv;
6138 }
6139
6140 /*
6141 * wm_gmii_i80003_writereg: [mii interface function]
6142 *
6143 * Write a PHY register on the kumeran.
6144 * This could be handled by the PHY layer if we didn't have to lock the
6145 * ressource ...
6146 */
6147 static void
6148 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6149 {
6150 struct wm_softc *sc = device_private(self);
6151 int sem;
6152
6153 if (phy != 1) /* only one PHY on kumeran bus */
6154 return;
6155
6156 sem = swfwphysem[sc->sc_funcid];
6157 if (wm_get_swfw_semaphore(sc, sem)) {
6158 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6159 __func__);
6160 return;
6161 }
6162
6163 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6164 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6165 reg >> GG82563_PAGE_SHIFT);
6166 } else {
6167 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6168 reg >> GG82563_PAGE_SHIFT);
6169 }
6170 /* Wait more 200us for a bug of the ready bit in the MDIC register */
6171 delay(200);
6172 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6173 delay(200);
6174
6175 wm_put_swfw_semaphore(sc, sem);
6176 }
6177
6178 /*
6179 * wm_gmii_bm_readreg: [mii interface function]
6180 *
6181 * Read a PHY register on the kumeran
6182 * This could be handled by the PHY layer if we didn't have to lock the
6183 * ressource ...
6184 */
6185 static int
6186 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6187 {
6188 struct wm_softc *sc = device_private(self);
6189 int sem;
6190 int rv;
6191
6192 sem = swfwphysem[sc->sc_funcid];
6193 if (wm_get_swfw_semaphore(sc, sem)) {
6194 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6195 __func__);
6196 return 0;
6197 }
6198
6199 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6200 if (phy == 1)
6201 wm_gmii_i82544_writereg(self, phy, 0x1f,
6202 reg);
6203 else
6204 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6205 reg >> GG82563_PAGE_SHIFT);
6206
6207 }
6208
6209 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6210 wm_put_swfw_semaphore(sc, sem);
6211 return rv;
6212 }
6213
6214 /*
6215 * wm_gmii_bm_writereg: [mii interface function]
6216 *
6217 * Write a PHY register on the kumeran.
6218 * This could be handled by the PHY layer if we didn't have to lock the
6219 * ressource ...
6220 */
6221 static void
6222 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6223 {
6224 struct wm_softc *sc = device_private(self);
6225 int sem;
6226
6227 sem = swfwphysem[sc->sc_funcid];
6228 if (wm_get_swfw_semaphore(sc, sem)) {
6229 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6230 __func__);
6231 return;
6232 }
6233
6234 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6235 if (phy == 1)
6236 wm_gmii_i82544_writereg(self, phy, 0x1f,
6237 reg);
6238 else
6239 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6240 reg >> GG82563_PAGE_SHIFT);
6241
6242 }
6243
6244 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6245 wm_put_swfw_semaphore(sc, sem);
6246 }
6247
6248 static void
6249 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6250 {
6251 struct wm_softc *sc = device_private(self);
6252 uint16_t regnum = BM_PHY_REG_NUM(offset);
6253 uint16_t wuce;
6254
6255 /* XXX Gig must be disabled for MDIO accesses to page 800 */
6256 if (sc->sc_type == WM_T_PCH) {
6257 /* XXX e1000 driver do nothing... why? */
6258 }
6259
6260 /* Set page 769 */
6261 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6262 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6263
6264 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6265
6266 wuce &= ~BM_WUC_HOST_WU_BIT;
6267 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6268 wuce | BM_WUC_ENABLE_BIT);
6269
6270 /* Select page 800 */
6271 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6272 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6273
6274 /* Write page 800 */
6275 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6276
6277 if (rd)
6278 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6279 else
6280 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6281
6282 /* Set page 769 */
6283 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6284 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6285
6286 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6287 }
6288
6289 /*
6290 * wm_gmii_hv_readreg: [mii interface function]
6291 *
6292 * Read a PHY register on the kumeran
6293 * This could be handled by the PHY layer if we didn't have to lock the
6294 * ressource ...
6295 */
6296 static int
6297 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6298 {
6299 struct wm_softc *sc = device_private(self);
6300 uint16_t page = BM_PHY_REG_PAGE(reg);
6301 uint16_t regnum = BM_PHY_REG_NUM(reg);
6302 uint16_t val;
6303 int rv;
6304
6305 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6306 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6307 __func__);
6308 return 0;
6309 }
6310
6311 /* XXX Workaround failure in MDIO access while cable is disconnected */
6312 if (sc->sc_phytype == WMPHY_82577) {
6313 /* XXX must write */
6314 }
6315
6316 /* Page 800 works differently than the rest so it has its own func */
6317 if (page == BM_WUC_PAGE) {
6318 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6319 return val;
6320 }
6321
6322 /*
6323 * Lower than page 768 works differently than the rest so it has its
6324 * own func
6325 */
6326 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6327 printf("gmii_hv_readreg!!!\n");
6328 return 0;
6329 }
6330
6331 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6332 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6333 page << BME1000_PAGE_SHIFT);
6334 }
6335
6336 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6337 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6338 return rv;
6339 }
6340
6341 /*
6342 * wm_gmii_hv_writereg: [mii interface function]
6343 *
6344 * Write a PHY register on the kumeran.
6345 * This could be handled by the PHY layer if we didn't have to lock the
6346 * ressource ...
6347 */
6348 static void
6349 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6350 {
6351 struct wm_softc *sc = device_private(self);
6352 uint16_t page = BM_PHY_REG_PAGE(reg);
6353 uint16_t regnum = BM_PHY_REG_NUM(reg);
6354
6355 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6356 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6357 __func__);
6358 return;
6359 }
6360
6361 /* XXX Workaround failure in MDIO access while cable is disconnected */
6362
6363 /* Page 800 works differently than the rest so it has its own func */
6364 if (page == BM_WUC_PAGE) {
6365 uint16_t tmp;
6366
6367 tmp = val;
6368 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6369 return;
6370 }
6371
6372 /*
6373 * Lower than page 768 works differently than the rest so it has its
6374 * own func
6375 */
6376 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6377 printf("gmii_hv_writereg!!!\n");
6378 return;
6379 }
6380
6381 /*
6382 * XXX Workaround MDIO accesses being disabled after entering IEEE
6383 * Power Down (whenever bit 11 of the PHY control register is set)
6384 */
6385
6386 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6387 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6388 page << BME1000_PAGE_SHIFT);
6389 }
6390
6391 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6392 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6393 }
6394
6395 /*
6396 * wm_gmii_hv_readreg: [mii interface function]
6397 *
6398 * Read a PHY register on the kumeran
6399 * This could be handled by the PHY layer if we didn't have to lock the
6400 * ressource ...
6401 */
6402 static int
6403 wm_sgmii_readreg(device_t self, int phy, int reg)
6404 {
6405 struct wm_softc *sc = device_private(self);
6406 uint32_t i2ccmd;
6407 int i, rv;
6408
6409 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6410 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6411 __func__);
6412 return 0;
6413 }
6414
6415 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6416 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6417 | I2CCMD_OPCODE_READ;
6418 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6419
6420 /* Poll the ready bit */
6421 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6422 delay(50);
6423 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6424 if (i2ccmd & I2CCMD_READY)
6425 break;
6426 }
6427 if ((i2ccmd & I2CCMD_READY) == 0)
6428 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6429 if ((i2ccmd & I2CCMD_ERROR) != 0)
6430 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6431
6432 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6433
6434 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6435 return rv;
6436 }
6437
6438 /*
6439 * wm_gmii_hv_writereg: [mii interface function]
6440 *
6441 * Write a PHY register on the kumeran.
6442 * This could be handled by the PHY layer if we didn't have to lock the
6443 * ressource ...
6444 */
6445 static void
6446 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6447 {
6448 struct wm_softc *sc = device_private(self);
6449 uint32_t i2ccmd;
6450 int i;
6451
6452 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6453 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6454 __func__);
6455 return;
6456 }
6457
6458 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6459 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6460 | I2CCMD_OPCODE_WRITE;
6461 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6462
6463 /* Poll the ready bit */
6464 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6465 delay(50);
6466 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6467 if (i2ccmd & I2CCMD_READY)
6468 break;
6469 }
6470 if ((i2ccmd & I2CCMD_READY) == 0)
6471 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6472 if ((i2ccmd & I2CCMD_ERROR) != 0)
6473 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6474
6475 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6476 }
6477
6478 /*
6479 * wm_gmii_statchg: [mii interface function]
6480 *
6481 * Callback from MII layer when media changes.
6482 */
6483 static void
6484 wm_gmii_statchg(struct ifnet *ifp)
6485 {
6486 struct wm_softc *sc = ifp->if_softc;
6487 struct mii_data *mii = &sc->sc_mii;
6488
6489 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6490 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6491 sc->sc_fcrtl &= ~FCRTL_XONE;
6492
6493 /*
6494 * Get flow control negotiation result.
6495 */
6496 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6497 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6498 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6499 mii->mii_media_active &= ~IFM_ETH_FMASK;
6500 }
6501
6502 if (sc->sc_flowflags & IFM_FLOW) {
6503 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6504 sc->sc_ctrl |= CTRL_TFCE;
6505 sc->sc_fcrtl |= FCRTL_XONE;
6506 }
6507 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6508 sc->sc_ctrl |= CTRL_RFCE;
6509 }
6510
6511 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6512 DPRINTF(WM_DEBUG_LINK,
6513 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
6514 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6515 } else {
6516 DPRINTF(WM_DEBUG_LINK,
6517 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
6518 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6519 }
6520
6521 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6522 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6523 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6524 : WMREG_FCRTL, sc->sc_fcrtl);
6525 if (sc->sc_type == WM_T_80003) {
6526 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6527 case IFM_1000_T:
6528 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6529 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6530 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6531 break;
6532 default:
6533 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6534 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6535 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6536 break;
6537 }
6538 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6539 }
6540 }
6541
6542 /*
6543 * wm_kmrn_readreg:
6544 *
6545 * Read a kumeran register
6546 */
6547 static int
6548 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6549 {
6550 int rv;
6551
6552 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6553 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6554 aprint_error_dev(sc->sc_dev,
6555 "%s: failed to get semaphore\n", __func__);
6556 return 0;
6557 }
6558 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6559 if (wm_get_swfwhw_semaphore(sc)) {
6560 aprint_error_dev(sc->sc_dev,
6561 "%s: failed to get semaphore\n", __func__);
6562 return 0;
6563 }
6564 }
6565
6566 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6567 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6568 KUMCTRLSTA_REN);
6569 delay(2);
6570
6571 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6572
6573 if (sc->sc_flags == WM_F_SWFW_SYNC)
6574 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6575 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6576 wm_put_swfwhw_semaphore(sc);
6577
6578 return rv;
6579 }
6580
6581 /*
6582 * wm_kmrn_writereg:
6583 *
6584 * Write a kumeran register
6585 */
6586 static void
6587 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6588 {
6589
6590 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6591 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6592 aprint_error_dev(sc->sc_dev,
6593 "%s: failed to get semaphore\n", __func__);
6594 return;
6595 }
6596 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6597 if (wm_get_swfwhw_semaphore(sc)) {
6598 aprint_error_dev(sc->sc_dev,
6599 "%s: failed to get semaphore\n", __func__);
6600 return;
6601 }
6602 }
6603
6604 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6605 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6606 (val & KUMCTRLSTA_MASK));
6607
6608 if (sc->sc_flags == WM_F_SWFW_SYNC)
6609 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6610 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6611 wm_put_swfwhw_semaphore(sc);
6612 }
6613
6614 static int
6615 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6616 {
6617 uint32_t eecd = 0;
6618
6619 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6620 || sc->sc_type == WM_T_82583) {
6621 eecd = CSR_READ(sc, WMREG_EECD);
6622
6623 /* Isolate bits 15 & 16 */
6624 eecd = ((eecd >> 15) & 0x03);
6625
6626 /* If both bits are set, device is Flash type */
6627 if (eecd == 0x03)
6628 return 0;
6629 }
6630 return 1;
6631 }
6632
6633 static int
6634 wm_get_swsm_semaphore(struct wm_softc *sc)
6635 {
6636 int32_t timeout;
6637 uint32_t swsm;
6638
6639 /* Get the FW semaphore. */
6640 timeout = 1000 + 1; /* XXX */
6641 while (timeout) {
6642 swsm = CSR_READ(sc, WMREG_SWSM);
6643 swsm |= SWSM_SWESMBI;
6644 CSR_WRITE(sc, WMREG_SWSM, swsm);
6645 /* if we managed to set the bit we got the semaphore. */
6646 swsm = CSR_READ(sc, WMREG_SWSM);
6647 if (swsm & SWSM_SWESMBI)
6648 break;
6649
6650 delay(50);
6651 timeout--;
6652 }
6653
6654 if (timeout == 0) {
6655 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6656 /* Release semaphores */
6657 wm_put_swsm_semaphore(sc);
6658 return 1;
6659 }
6660 return 0;
6661 }
6662
6663 static void
6664 wm_put_swsm_semaphore(struct wm_softc *sc)
6665 {
6666 uint32_t swsm;
6667
6668 swsm = CSR_READ(sc, WMREG_SWSM);
6669 swsm &= ~(SWSM_SWESMBI);
6670 CSR_WRITE(sc, WMREG_SWSM, swsm);
6671 }
6672
6673 static int
6674 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6675 {
6676 uint32_t swfw_sync;
6677 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6678 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6679 int timeout = 200;
6680
6681 for (timeout = 0; timeout < 200; timeout++) {
6682 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6683 if (wm_get_swsm_semaphore(sc)) {
6684 aprint_error_dev(sc->sc_dev,
6685 "%s: failed to get semaphore\n",
6686 __func__);
6687 return 1;
6688 }
6689 }
6690 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6691 if ((swfw_sync & (swmask | fwmask)) == 0) {
6692 swfw_sync |= swmask;
6693 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6694 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6695 wm_put_swsm_semaphore(sc);
6696 return 0;
6697 }
6698 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6699 wm_put_swsm_semaphore(sc);
6700 delay(5000);
6701 }
6702 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6703 device_xname(sc->sc_dev), mask, swfw_sync);
6704 return 1;
6705 }
6706
6707 static void
6708 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6709 {
6710 uint32_t swfw_sync;
6711
6712 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6713 while (wm_get_swsm_semaphore(sc) != 0)
6714 continue;
6715 }
6716 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6717 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6718 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6719 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6720 wm_put_swsm_semaphore(sc);
6721 }
6722
6723 static int
6724 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6725 {
6726 uint32_t ext_ctrl;
6727 int timeout = 200;
6728
6729 for (timeout = 0; timeout < 200; timeout++) {
6730 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6731 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6732 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6733
6734 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6735 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6736 return 0;
6737 delay(5000);
6738 }
6739 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6740 device_xname(sc->sc_dev), ext_ctrl);
6741 return 1;
6742 }
6743
6744 static void
6745 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6746 {
6747 uint32_t ext_ctrl;
6748 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6749 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6750 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6751 }
6752
6753 static int
6754 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6755 {
6756 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6757 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6758
6759 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6760 /* Value of bit 22 corresponds to the flash bank we're on. */
6761 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6762 } else {
6763 uint8_t bank_high_byte;
6764 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6765 if ((bank_high_byte & 0xc0) == 0x80)
6766 *bank = 0;
6767 else {
6768 wm_read_ich8_byte(sc, act_offset + bank1_offset,
6769 &bank_high_byte);
6770 if ((bank_high_byte & 0xc0) == 0x80)
6771 *bank = 1;
6772 else {
6773 aprint_error_dev(sc->sc_dev,
6774 "EEPROM not present\n");
6775 return -1;
6776 }
6777 }
6778 }
6779
6780 return 0;
6781 }
6782
6783 /******************************************************************************
6784 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6785 * register.
6786 *
6787 * sc - Struct containing variables accessed by shared code
6788 * offset - offset of word in the EEPROM to read
6789 * data - word read from the EEPROM
6790 * words - number of words to read
6791 *****************************************************************************/
6792 static int
6793 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6794 {
6795 int32_t error = 0;
6796 uint32_t flash_bank = 0;
6797 uint32_t act_offset = 0;
6798 uint32_t bank_offset = 0;
6799 uint16_t word = 0;
6800 uint16_t i = 0;
6801
6802 /* We need to know which is the valid flash bank. In the event
6803 * that we didn't allocate eeprom_shadow_ram, we may not be
6804 * managing flash_bank. So it cannot be trusted and needs
6805 * to be updated with each read.
6806 */
6807 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6808 if (error) {
6809 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6810 __func__);
6811 return error;
6812 }
6813
6814 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6815 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6816
6817 error = wm_get_swfwhw_semaphore(sc);
6818 if (error) {
6819 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6820 __func__);
6821 return error;
6822 }
6823
6824 for (i = 0; i < words; i++) {
6825 /* The NVM part needs a byte offset, hence * 2 */
6826 act_offset = bank_offset + ((offset + i) * 2);
6827 error = wm_read_ich8_word(sc, act_offset, &word);
6828 if (error) {
6829 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6830 __func__);
6831 break;
6832 }
6833 data[i] = word;
6834 }
6835
6836 wm_put_swfwhw_semaphore(sc);
6837 return error;
6838 }
6839
6840 /******************************************************************************
6841 * This function does initial flash setup so that a new read/write/erase cycle
6842 * can be started.
6843 *
6844 * sc - The pointer to the hw structure
6845 ****************************************************************************/
6846 static int32_t
6847 wm_ich8_cycle_init(struct wm_softc *sc)
6848 {
6849 uint16_t hsfsts;
6850 int32_t error = 1;
6851 int32_t i = 0;
6852
6853 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6854
6855 /* May be check the Flash Des Valid bit in Hw status */
6856 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6857 return error;
6858 }
6859
6860 /* Clear FCERR in Hw status by writing 1 */
6861 /* Clear DAEL in Hw status by writing a 1 */
6862 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6863
6864 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6865
6866 /*
6867 * Either we should have a hardware SPI cycle in progress bit to check
6868 * against, in order to start a new cycle or FDONE bit should be
6869 * changed in the hardware so that it is 1 after harware reset, which
6870 * can then be used as an indication whether a cycle is in progress or
6871 * has been completed .. we should also have some software semaphore
6872 * mechanism to guard FDONE or the cycle in progress bit so that two
6873 * threads access to those bits can be sequentiallized or a way so that
6874 * 2 threads dont start the cycle at the same time
6875 */
6876
6877 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6878 /*
6879 * There is no cycle running at present, so we can start a
6880 * cycle
6881 */
6882
6883 /* Begin by setting Flash Cycle Done. */
6884 hsfsts |= HSFSTS_DONE;
6885 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6886 error = 0;
6887 } else {
6888 /*
6889 * otherwise poll for sometime so the current cycle has a
6890 * chance to end before giving up.
6891 */
6892 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6893 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6894 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6895 error = 0;
6896 break;
6897 }
6898 delay(1);
6899 }
6900 if (error == 0) {
6901 /*
6902 * Successful in waiting for previous cycle to timeout,
6903 * now set the Flash Cycle Done.
6904 */
6905 hsfsts |= HSFSTS_DONE;
6906 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6907 }
6908 }
6909 return error;
6910 }
6911
6912 /******************************************************************************
6913 * This function starts a flash cycle and waits for its completion
6914 *
6915 * sc - The pointer to the hw structure
6916 ****************************************************************************/
6917 static int32_t
6918 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6919 {
6920 uint16_t hsflctl;
6921 uint16_t hsfsts;
6922 int32_t error = 1;
6923 uint32_t i = 0;
6924
6925 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6926 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6927 hsflctl |= HSFCTL_GO;
6928 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6929
6930 /* wait till FDONE bit is set to 1 */
6931 do {
6932 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6933 if (hsfsts & HSFSTS_DONE)
6934 break;
6935 delay(1);
6936 i++;
6937 } while (i < timeout);
6938 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6939 error = 0;
6940
6941 return error;
6942 }
6943
6944 /******************************************************************************
6945 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6946 *
6947 * sc - The pointer to the hw structure
6948 * index - The index of the byte or word to read.
6949 * size - Size of data to read, 1=byte 2=word
6950 * data - Pointer to the word to store the value read.
6951 *****************************************************************************/
6952 static int32_t
6953 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6954 uint32_t size, uint16_t* data)
6955 {
6956 uint16_t hsfsts;
6957 uint16_t hsflctl;
6958 uint32_t flash_linear_address;
6959 uint32_t flash_data = 0;
6960 int32_t error = 1;
6961 int32_t count = 0;
6962
6963 if (size < 1 || size > 2 || data == 0x0 ||
6964 index > ICH_FLASH_LINEAR_ADDR_MASK)
6965 return error;
6966
6967 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6968 sc->sc_ich8_flash_base;
6969
6970 do {
6971 delay(1);
6972 /* Steps */
6973 error = wm_ich8_cycle_init(sc);
6974 if (error)
6975 break;
6976
6977 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6978 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6979 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6980 & HSFCTL_BCOUNT_MASK;
6981 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6982 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6983
6984 /*
6985 * Write the last 24 bits of index into Flash Linear address
6986 * field in Flash Address
6987 */
6988 /* TODO: TBD maybe check the index against the size of flash */
6989
6990 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6991
6992 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6993
6994 /*
6995 * Check if FCERR is set to 1, if set to 1, clear it and try
6996 * the whole sequence a few more times, else read in (shift in)
6997 * the Flash Data0, the order is least significant byte first
6998 * msb to lsb
6999 */
7000 if (error == 0) {
7001 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7002 if (size == 1)
7003 *data = (uint8_t)(flash_data & 0x000000FF);
7004 else if (size == 2)
7005 *data = (uint16_t)(flash_data & 0x0000FFFF);
7006 break;
7007 } else {
7008 /*
7009 * If we've gotten here, then things are probably
7010 * completely hosed, but if the error condition is
7011 * detected, it won't hurt to give it another try...
7012 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7013 */
7014 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7015 if (hsfsts & HSFSTS_ERR) {
7016 /* Repeat for some time before giving up. */
7017 continue;
7018 } else if ((hsfsts & HSFSTS_DONE) == 0)
7019 break;
7020 }
7021 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7022
7023 return error;
7024 }
7025
7026 /******************************************************************************
7027 * Reads a single byte from the NVM using the ICH8 flash access registers.
7028 *
7029 * sc - pointer to wm_hw structure
7030 * index - The index of the byte to read.
7031 * data - Pointer to a byte to store the value read.
7032 *****************************************************************************/
7033 static int32_t
7034 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7035 {
7036 int32_t status;
7037 uint16_t word = 0;
7038
7039 status = wm_read_ich8_data(sc, index, 1, &word);
7040 if (status == 0)
7041 *data = (uint8_t)word;
7042 else
7043 *data = 0;
7044
7045 return status;
7046 }
7047
7048 /******************************************************************************
7049 * Reads a word from the NVM using the ICH8 flash access registers.
7050 *
7051 * sc - pointer to wm_hw structure
7052 * index - The starting byte index of the word to read.
7053 * data - Pointer to a word to store the value read.
7054 *****************************************************************************/
7055 static int32_t
7056 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7057 {
7058 int32_t status;
7059
7060 status = wm_read_ich8_data(sc, index, 2, data);
7061 return status;
7062 }
7063
7064 static int
7065 wm_check_mng_mode(struct wm_softc *sc)
7066 {
7067 int rv;
7068
7069 switch (sc->sc_type) {
7070 case WM_T_ICH8:
7071 case WM_T_ICH9:
7072 case WM_T_ICH10:
7073 case WM_T_PCH:
7074 case WM_T_PCH2:
7075 rv = wm_check_mng_mode_ich8lan(sc);
7076 break;
7077 case WM_T_82574:
7078 case WM_T_82583:
7079 rv = wm_check_mng_mode_82574(sc);
7080 break;
7081 case WM_T_82571:
7082 case WM_T_82572:
7083 case WM_T_82573:
7084 case WM_T_80003:
7085 rv = wm_check_mng_mode_generic(sc);
7086 break;
7087 default:
7088 /* noting to do */
7089 rv = 0;
7090 break;
7091 }
7092
7093 return rv;
7094 }
7095
7096 static int
7097 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7098 {
7099 uint32_t fwsm;
7100
7101 fwsm = CSR_READ(sc, WMREG_FWSM);
7102
7103 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7104 return 1;
7105
7106 return 0;
7107 }
7108
7109 static int
7110 wm_check_mng_mode_82574(struct wm_softc *sc)
7111 {
7112 uint16_t data;
7113
7114 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7115
7116 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7117 return 1;
7118
7119 return 0;
7120 }
7121
7122 static int
7123 wm_check_mng_mode_generic(struct wm_softc *sc)
7124 {
7125 uint32_t fwsm;
7126
7127 fwsm = CSR_READ(sc, WMREG_FWSM);
7128
7129 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7130 return 1;
7131
7132 return 0;
7133 }
7134
7135 static int
7136 wm_enable_mng_pass_thru(struct wm_softc *sc)
7137 {
7138 uint32_t manc, fwsm, factps;
7139
7140 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7141 return 0;
7142
7143 manc = CSR_READ(sc, WMREG_MANC);
7144
7145 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7146 device_xname(sc->sc_dev), manc));
7147 if (((manc & MANC_RECV_TCO_EN) == 0)
7148 || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7149 return 0;
7150
7151 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7152 fwsm = CSR_READ(sc, WMREG_FWSM);
7153 factps = CSR_READ(sc, WMREG_FACTPS);
7154 if (((factps & FACTPS_MNGCG) == 0)
7155 && ((fwsm & FWSM_MODE_MASK)
7156 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7157 return 1;
7158 } else if (((manc & MANC_SMBUS_EN) != 0)
7159 && ((manc & MANC_ASF_EN) == 0))
7160 return 1;
7161
7162 return 0;
7163 }
7164
7165 static int
7166 wm_check_reset_block(struct wm_softc *sc)
7167 {
7168 uint32_t reg;
7169
7170 switch (sc->sc_type) {
7171 case WM_T_ICH8:
7172 case WM_T_ICH9:
7173 case WM_T_ICH10:
7174 case WM_T_PCH:
7175 case WM_T_PCH2:
7176 reg = CSR_READ(sc, WMREG_FWSM);
7177 if ((reg & FWSM_RSPCIPHY) != 0)
7178 return 0;
7179 else
7180 return -1;
7181 break;
7182 case WM_T_82571:
7183 case WM_T_82572:
7184 case WM_T_82573:
7185 case WM_T_82574:
7186 case WM_T_82583:
7187 case WM_T_80003:
7188 reg = CSR_READ(sc, WMREG_MANC);
7189 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7190 return -1;
7191 else
7192 return 0;
7193 break;
7194 default:
7195 /* no problem */
7196 break;
7197 }
7198
7199 return 0;
7200 }
7201
7202 static void
7203 wm_get_hw_control(struct wm_softc *sc)
7204 {
7205 uint32_t reg;
7206
7207 switch (sc->sc_type) {
7208 case WM_T_82573:
7209 reg = CSR_READ(sc, WMREG_SWSM);
7210 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7211 break;
7212 case WM_T_82571:
7213 case WM_T_82572:
7214 case WM_T_82574:
7215 case WM_T_82583:
7216 case WM_T_80003:
7217 case WM_T_ICH8:
7218 case WM_T_ICH9:
7219 case WM_T_ICH10:
7220 case WM_T_PCH:
7221 case WM_T_PCH2:
7222 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7223 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7224 break;
7225 default:
7226 break;
7227 }
7228 }
7229
7230 static void
7231 wm_release_hw_control(struct wm_softc *sc)
7232 {
7233 uint32_t reg;
7234
7235 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7236 return;
7237
7238 if (sc->sc_type == WM_T_82573) {
7239 reg = CSR_READ(sc, WMREG_SWSM);
7240 reg &= ~SWSM_DRV_LOAD;
7241 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7242 } else {
7243 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7244 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7245 }
7246 }
7247
7248 /* XXX Currently TBI only */
7249 static int
7250 wm_check_for_link(struct wm_softc *sc)
7251 {
7252 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7253 uint32_t rxcw;
7254 uint32_t ctrl;
7255 uint32_t status;
7256 uint32_t sig;
7257
7258 rxcw = CSR_READ(sc, WMREG_RXCW);
7259 ctrl = CSR_READ(sc, WMREG_CTRL);
7260 status = CSR_READ(sc, WMREG_STATUS);
7261
7262 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7263
7264 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7265 device_xname(sc->sc_dev), __func__,
7266 ((ctrl & CTRL_SWDPIN(1)) == sig),
7267 ((status & STATUS_LU) != 0),
7268 ((rxcw & RXCW_C) != 0)
7269 ));
7270
7271 /*
7272 * SWDPIN LU RXCW
7273 * 0 0 0
7274 * 0 0 1 (should not happen)
7275 * 0 1 0 (should not happen)
7276 * 0 1 1 (should not happen)
7277 * 1 0 0 Disable autonego and force linkup
7278 * 1 0 1 got /C/ but not linkup yet
7279 * 1 1 0 (linkup)
7280 * 1 1 1 If IFM_AUTO, back to autonego
7281 *
7282 */
7283 if (((ctrl & CTRL_SWDPIN(1)) == sig)
7284 && ((status & STATUS_LU) == 0)
7285 && ((rxcw & RXCW_C) == 0)) {
7286 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7287 __func__));
7288 sc->sc_tbi_linkup = 0;
7289 /* Disable auto-negotiation in the TXCW register */
7290 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7291
7292 /*
7293 * Force link-up and also force full-duplex.
7294 *
7295 * NOTE: CTRL was updated TFCE and RFCE automatically,
7296 * so we should update sc->sc_ctrl
7297 */
7298 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7299 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7300 } else if (((status & STATUS_LU) != 0)
7301 && ((rxcw & RXCW_C) != 0)
7302 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7303 sc->sc_tbi_linkup = 1;
7304 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7305 __func__));
7306 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7307 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7308 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7309 && ((rxcw & RXCW_C) != 0)) {
7310 DPRINTF(WM_DEBUG_LINK, ("/C/"));
7311 } else {
7312 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7313 status));
7314 }
7315
7316 return 0;
7317 }
7318
7319 /* Work-around for 82566 Kumeran PCS lock loss */
7320 static void
7321 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7322 {
7323 int miistatus, active, i;
7324 int reg;
7325
7326 miistatus = sc->sc_mii.mii_media_status;
7327
7328 /* If the link is not up, do nothing */
7329 if ((miistatus & IFM_ACTIVE) != 0)
7330 return;
7331
7332 active = sc->sc_mii.mii_media_active;
7333
7334 /* Nothing to do if the link is other than 1Gbps */
7335 if (IFM_SUBTYPE(active) != IFM_1000_T)
7336 return;
7337
7338 for (i = 0; i < 10; i++) {
7339 /* read twice */
7340 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7341 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7342 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7343 goto out; /* GOOD! */
7344
7345 /* Reset the PHY */
7346 wm_gmii_reset(sc);
7347 delay(5*1000);
7348 }
7349
7350 /* Disable GigE link negotiation */
7351 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7352 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7353 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7354
7355 /*
7356 * Call gig speed drop workaround on Gig disable before accessing
7357 * any PHY registers.
7358 */
7359 wm_gig_downshift_workaround_ich8lan(sc);
7360
7361 out:
7362 return;
7363 }
7364
7365 /* WOL from S5 stops working */
7366 static void
7367 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7368 {
7369 uint16_t kmrn_reg;
7370
7371 /* Only for igp3 */
7372 if (sc->sc_phytype == WMPHY_IGP_3) {
7373 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7374 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7375 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7376 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7377 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7378 }
7379 }
7380
7381 #ifdef WM_WOL
7382 /* Power down workaround on D3 */
7383 static void
7384 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7385 {
7386 uint32_t reg;
7387 int i;
7388
7389 for (i = 0; i < 2; i++) {
7390 /* Disable link */
7391 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7392 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7393 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7394
7395 /*
7396 * Call gig speed drop workaround on Gig disable before
7397 * accessing any PHY registers
7398 */
7399 if (sc->sc_type == WM_T_ICH8)
7400 wm_gig_downshift_workaround_ich8lan(sc);
7401
7402 /* Write VR power-down enable */
7403 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7404 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7405 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7406 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7407
7408 /* Read it back and test */
7409 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7410 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7411 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7412 break;
7413
7414 /* Issue PHY reset and repeat at most one more time */
7415 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7416 }
7417 }
7418 #endif /* WM_WOL */
7419
7420 /*
7421 * Workaround for pch's PHYs
7422 * XXX should be moved to new PHY driver?
7423 */
7424 static void
7425 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7426 {
7427 if (sc->sc_phytype == WMPHY_82577)
7428 wm_set_mdio_slow_mode_hv(sc);
7429
7430 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7431
7432 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7433
7434 /* 82578 */
7435 if (sc->sc_phytype == WMPHY_82578) {
7436 /* PCH rev. < 3 */
7437 if (sc->sc_rev < 3) {
7438 /* XXX 6 bit shift? Why? Is it page2? */
7439 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7440 0x66c0);
7441 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7442 0xffff);
7443 }
7444
7445 /* XXX phy rev. < 2 */
7446 }
7447
7448 /* Select page 0 */
7449
7450 /* XXX acquire semaphore */
7451 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7452 /* XXX release semaphore */
7453
7454 /*
7455 * Configure the K1 Si workaround during phy reset assuming there is
7456 * link so that it disables K1 if link is in 1Gbps.
7457 */
7458 wm_k1_gig_workaround_hv(sc, 1);
7459 }
7460
7461 static void
7462 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
7463 {
7464
7465 wm_set_mdio_slow_mode_hv(sc);
7466 }
7467
7468 static void
7469 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7470 {
7471 int k1_enable = sc->sc_nvm_k1_enabled;
7472
7473 /* XXX acquire semaphore */
7474
7475 if (link) {
7476 k1_enable = 0;
7477
7478 /* Link stall fix for link up */
7479 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7480 } else {
7481 /* Link stall fix for link down */
7482 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7483 }
7484
7485 wm_configure_k1_ich8lan(sc, k1_enable);
7486
7487 /* XXX release semaphore */
7488 }
7489
7490 static void
7491 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
7492 {
7493 uint32_t reg;
7494
7495 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
7496 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
7497 reg | HV_KMRN_MDIO_SLOW);
7498 }
7499
7500 static void
7501 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7502 {
7503 uint32_t ctrl, ctrl_ext, tmp;
7504 uint16_t kmrn_reg;
7505
7506 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7507
7508 if (k1_enable)
7509 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7510 else
7511 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7512
7513 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7514
7515 delay(20);
7516
7517 ctrl = CSR_READ(sc, WMREG_CTRL);
7518 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7519
7520 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7521 tmp |= CTRL_FRCSPD;
7522
7523 CSR_WRITE(sc, WMREG_CTRL, tmp);
7524 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7525 delay(20);
7526
7527 CSR_WRITE(sc, WMREG_CTRL, ctrl);
7528 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7529 delay(20);
7530 }
7531
7532 static void
7533 wm_smbustopci(struct wm_softc *sc)
7534 {
7535 uint32_t fwsm;
7536
7537 fwsm = CSR_READ(sc, WMREG_FWSM);
7538 if (((fwsm & FWSM_FW_VALID) == 0)
7539 && ((wm_check_reset_block(sc) == 0))) {
7540 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
7541 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
7542 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7543 delay(10);
7544 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
7545 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7546 delay(50*1000);
7547
7548 /*
7549 * Gate automatic PHY configuration by hardware on non-managed
7550 * 82579
7551 */
7552 if (sc->sc_type == WM_T_PCH2)
7553 wm_gate_hw_phy_config_ich8lan(sc, 1);
7554 }
7555 }
7556
7557 static void
7558 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7559 {
7560 uint32_t gcr;
7561 pcireg_t ctrl2;
7562
7563 gcr = CSR_READ(sc, WMREG_GCR);
7564
7565 /* Only take action if timeout value is defaulted to 0 */
7566 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7567 goto out;
7568
7569 if ((gcr & GCR_CAP_VER2) == 0) {
7570 gcr |= GCR_CMPL_TMOUT_10MS;
7571 goto out;
7572 }
7573
7574 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7575 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7576 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7577 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7578 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7579
7580 out:
7581 /* Disable completion timeout resend */
7582 gcr &= ~GCR_CMPL_TMOUT_RESEND;
7583
7584 CSR_WRITE(sc, WMREG_GCR, gcr);
7585 }
7586
7587 /* special case - for 82575 - need to do manual init ... */
7588 static void
7589 wm_reset_init_script_82575(struct wm_softc *sc)
7590 {
7591 /*
7592 * remark: this is untested code - we have no board without EEPROM
7593 * same setup as mentioned int the freeBSD driver for the i82575
7594 */
7595
7596 /* SerDes configuration via SERDESCTRL */
7597 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7598 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7599 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7600 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7601
7602 /* CCM configuration via CCMCTL register */
7603 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7604 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7605
7606 /* PCIe lanes configuration */
7607 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7608 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7609 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7610 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7611
7612 /* PCIe PLL Configuration */
7613 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7614 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7615 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7616 }
7617
7618 static void
7619 wm_init_manageability(struct wm_softc *sc)
7620 {
7621
7622 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7623 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7624 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7625
7626 /* disabl hardware interception of ARP */
7627 manc &= ~MANC_ARP_EN;
7628
7629 /* enable receiving management packets to the host */
7630 if (sc->sc_type >= WM_T_82571) {
7631 manc |= MANC_EN_MNG2HOST;
7632 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7633 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7634
7635 }
7636
7637 CSR_WRITE(sc, WMREG_MANC, manc);
7638 }
7639 }
7640
7641 static void
7642 wm_release_manageability(struct wm_softc *sc)
7643 {
7644
7645 if (sc->sc_flags & WM_F_HAS_MANAGE) {
7646 uint32_t manc = CSR_READ(sc, WMREG_MANC);
7647
7648 if (sc->sc_type >= WM_T_82571)
7649 manc &= ~MANC_EN_MNG2HOST;
7650
7651 CSR_WRITE(sc, WMREG_MANC, manc);
7652 }
7653 }
7654
7655 static void
7656 wm_get_wakeup(struct wm_softc *sc)
7657 {
7658
7659 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7660 switch (sc->sc_type) {
7661 case WM_T_82573:
7662 case WM_T_82583:
7663 sc->sc_flags |= WM_F_HAS_AMT;
7664 /* FALLTHROUGH */
7665 case WM_T_80003:
7666 case WM_T_82541:
7667 case WM_T_82547:
7668 case WM_T_82571:
7669 case WM_T_82572:
7670 case WM_T_82574:
7671 case WM_T_82575:
7672 case WM_T_82576:
7673 #if 0 /* XXX */
7674 case WM_T_82580:
7675 case WM_T_82580ER:
7676 case WM_T_I350:
7677 #endif
7678 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7679 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7680 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7681 break;
7682 case WM_T_ICH8:
7683 case WM_T_ICH9:
7684 case WM_T_ICH10:
7685 case WM_T_PCH:
7686 case WM_T_PCH2:
7687 sc->sc_flags |= WM_F_HAS_AMT;
7688 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7689 break;
7690 default:
7691 break;
7692 }
7693
7694 /* 1: HAS_MANAGE */
7695 if (wm_enable_mng_pass_thru(sc) != 0)
7696 sc->sc_flags |= WM_F_HAS_MANAGE;
7697
7698 #ifdef WM_DEBUG
7699 printf("\n");
7700 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7701 printf("HAS_AMT,");
7702 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7703 printf("ARC_SUBSYS_VALID,");
7704 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7705 printf("ASF_FIRMWARE_PRES,");
7706 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7707 printf("HAS_MANAGE,");
7708 printf("\n");
7709 #endif
7710 /*
7711 * Note that the WOL flags is set after the resetting of the eeprom
7712 * stuff
7713 */
7714 }
7715
7716 #ifdef WM_WOL
7717 /* WOL in the newer chipset interfaces (pchlan) */
7718 static void
7719 wm_enable_phy_wakeup(struct wm_softc *sc)
7720 {
7721 #if 0
7722 uint16_t preg;
7723
7724 /* Copy MAC RARs to PHY RARs */
7725
7726 /* Copy MAC MTA to PHY MTA */
7727
7728 /* Configure PHY Rx Control register */
7729
7730 /* Enable PHY wakeup in MAC register */
7731
7732 /* Configure and enable PHY wakeup in PHY registers */
7733
7734 /* Activate PHY wakeup */
7735
7736 /* XXX */
7737 #endif
7738 }
7739
7740 static void
7741 wm_enable_wakeup(struct wm_softc *sc)
7742 {
7743 uint32_t reg, pmreg;
7744 pcireg_t pmode;
7745
7746 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7747 &pmreg, NULL) == 0)
7748 return;
7749
7750 /* Advertise the wakeup capability */
7751 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7752 | CTRL_SWDPIN(3));
7753 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7754
7755 /* ICH workaround */
7756 switch (sc->sc_type) {
7757 case WM_T_ICH8:
7758 case WM_T_ICH9:
7759 case WM_T_ICH10:
7760 case WM_T_PCH:
7761 case WM_T_PCH2:
7762 /* Disable gig during WOL */
7763 reg = CSR_READ(sc, WMREG_PHY_CTRL);
7764 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7765 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7766 if (sc->sc_type == WM_T_PCH)
7767 wm_gmii_reset(sc);
7768
7769 /* Power down workaround */
7770 if (sc->sc_phytype == WMPHY_82577) {
7771 struct mii_softc *child;
7772
7773 /* Assume that the PHY is copper */
7774 child = LIST_FIRST(&sc->sc_mii.mii_phys);
7775 if (child->mii_mpd_rev <= 2)
7776 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7777 (768 << 5) | 25, 0x0444); /* magic num */
7778 }
7779 break;
7780 default:
7781 break;
7782 }
7783
7784 /* Keep the laser running on fiber adapters */
7785 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7786 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7787 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7788 reg |= CTRL_EXT_SWDPIN(3);
7789 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7790 }
7791
7792 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7793 #if 0 /* for the multicast packet */
7794 reg |= WUFC_MC;
7795 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7796 #endif
7797
7798 if (sc->sc_type == WM_T_PCH) {
7799 wm_enable_phy_wakeup(sc);
7800 } else {
7801 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7802 CSR_WRITE(sc, WMREG_WUFC, reg);
7803 }
7804
7805 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7806 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
7807 || (sc->sc_type == WM_T_PCH2))
7808 && (sc->sc_phytype == WMPHY_IGP_3))
7809 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7810
7811 /* Request PME */
7812 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7813 #if 0
7814 /* Disable WOL */
7815 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7816 #else
7817 /* For WOL */
7818 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7819 #endif
7820 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7821 }
7822 #endif /* WM_WOL */
7823
7824 static bool
7825 wm_suspend(device_t self, const pmf_qual_t *qual)
7826 {
7827 struct wm_softc *sc = device_private(self);
7828
7829 wm_release_manageability(sc);
7830 wm_release_hw_control(sc);
7831 #ifdef WM_WOL
7832 wm_enable_wakeup(sc);
7833 #endif
7834
7835 return true;
7836 }
7837
7838 static bool
7839 wm_resume(device_t self, const pmf_qual_t *qual)
7840 {
7841 struct wm_softc *sc = device_private(self);
7842
7843 wm_init_manageability(sc);
7844
7845 return true;
7846 }
7847
7848 static void
7849 wm_set_eee_i350(struct wm_softc * sc)
7850 {
7851 uint32_t ipcnfg, eeer;
7852
7853 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
7854 eeer = CSR_READ(sc, WMREG_EEER);
7855
7856 if ((sc->sc_flags & WM_F_EEE) != 0) {
7857 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
7858 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
7859 | EEER_LPI_FC);
7860 } else {
7861 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
7862 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
7863 | EEER_LPI_FC);
7864 }
7865
7866 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
7867 CSR_WRITE(sc, WMREG_EEER, eeer);
7868 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
7869 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
7870 }
7871