if_wm.c revision 1.346 1 /* $NetBSD: if_wm.c,v 1.346 2015/08/17 06:16:03 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.346 2015/08/17 06:16:03 knakahara Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102 #include <sys/interrupt.h>
103
104 #include <sys/rndsource.h>
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #include <net/bpf.h>
112
113 #include <netinet/in.h> /* XXX for struct ip */
114 #include <netinet/in_systm.h> /* XXX for struct ip */
115 #include <netinet/ip.h> /* XXX for struct ip */
116 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
117 #include <netinet/tcp.h> /* XXX for struct tcphdr */
118
119 #include <sys/bus.h>
120 #include <sys/intr.h>
121 #include <machine/endian.h>
122
123 #include <dev/mii/mii.h>
124 #include <dev/mii/miivar.h>
125 #include <dev/mii/miidevs.h>
126 #include <dev/mii/mii_bitbang.h>
127 #include <dev/mii/ikphyreg.h>
128 #include <dev/mii/igphyreg.h>
129 #include <dev/mii/igphyvar.h>
130 #include <dev/mii/inbmphyreg.h>
131
132 #include <dev/pci/pcireg.h>
133 #include <dev/pci/pcivar.h>
134 #include <dev/pci/pcidevs.h>
135
136 #include <dev/pci/if_wmreg.h>
137 #include <dev/pci/if_wmvar.h>
138
139 #ifdef WM_DEBUG
140 #define WM_DEBUG_LINK 0x01
141 #define WM_DEBUG_TX 0x02
142 #define WM_DEBUG_RX 0x04
143 #define WM_DEBUG_GMII 0x08
144 #define WM_DEBUG_MANAGE 0x10
145 #define WM_DEBUG_NVM 0x20
146 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
147 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
148
149 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
150 #else
151 #define DPRINTF(x, y) /* nothing */
152 #endif /* WM_DEBUG */
153
154 #ifdef NET_MPSAFE
155 #define WM_MPSAFE 1
156 #endif
157
158 #ifdef __HAVE_PCI_MSI_MSIX
159 #define WM_MSI_MSIX 1 /* Enable by default */
160 #endif
161
162 /*
163 * This device driver divides interrupt to TX, RX and link state.
164 * Each MSI-X vector indexes are below.
165 */
166 #define WM_MSIX_NINTR 3
167 #define WM_MSIX_TXINTR_IDX 0
168 #define WM_MSIX_RXINTR_IDX 1
169 #define WM_MSIX_LINKINTR_IDX 2
170 #define WM_MAX_NINTR WM_MSIX_NINTR
171
172 /*
173 * This device driver set affinity to each interrupts like below (round-robin).
174 * If the number CPUs is less than the number of interrupts, this driver usase
175 * the same CPU for multiple interrupts.
176 */
177 #define WM_MSIX_TXINTR_CPUID 0
178 #define WM_MSIX_RXINTR_CPUID 1
179 #define WM_MSIX_LINKINTR_CPUID 2
180
181 /*
182 * Transmit descriptor list size. Due to errata, we can only have
183 * 256 hardware descriptors in the ring on < 82544, but we use 4096
184 * on >= 82544. We tell the upper layers that they can queue a lot
185 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
186 * of them at a time.
187 *
188 * We allow up to 256 (!) DMA segments per packet. Pathological packet
189 * chains containing many small mbufs have been observed in zero-copy
190 * situations with jumbo frames.
191 */
192 #define WM_NTXSEGS 256
193 #define WM_IFQUEUELEN 256
194 #define WM_TXQUEUELEN_MAX 64
195 #define WM_TXQUEUELEN_MAX_82547 16
196 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
197 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
198 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
199 #define WM_NTXDESC_82542 256
200 #define WM_NTXDESC_82544 4096
201 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
202 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
203 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
204 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
205 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
206
207 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
208
209 /*
210 * Receive descriptor list size. We have one Rx buffer for normal
211 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
212 * packet. We allocate 256 receive descriptors, each with a 2k
213 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
214 */
215 #define WM_NRXDESC 256
216 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
217 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
218 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
219
220 /*
221 * Control structures are DMA'd to the i82542 chip. We allocate them in
222 * a single clump that maps to a single DMA segment to make several things
223 * easier.
224 */
225 struct wm_control_data_82544 {
226 /*
227 * The receive descriptors.
228 */
229 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
230
231 /*
232 * The transmit descriptors. Put these at the end, because
233 * we might use a smaller number of them.
234 */
235 union {
236 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
237 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
238 } wdc_u;
239 };
240
241 struct wm_control_data_82542 {
242 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
243 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
244 };
245
246 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
247 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
248 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
249
250 /*
251 * Software state for transmit jobs.
252 */
253 struct wm_txsoft {
254 struct mbuf *txs_mbuf; /* head of our mbuf chain */
255 bus_dmamap_t txs_dmamap; /* our DMA map */
256 int txs_firstdesc; /* first descriptor in packet */
257 int txs_lastdesc; /* last descriptor in packet */
258 int txs_ndesc; /* # of descriptors used */
259 };
260
261 /*
262 * Software state for receive buffers. Each descriptor gets a
263 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
264 * more than one buffer, we chain them together.
265 */
266 struct wm_rxsoft {
267 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
268 bus_dmamap_t rxs_dmamap; /* our DMA map */
269 };
270
271 #define WM_LINKUP_TIMEOUT 50
272
273 static uint16_t swfwphysem[] = {
274 SWFW_PHY0_SM,
275 SWFW_PHY1_SM,
276 SWFW_PHY2_SM,
277 SWFW_PHY3_SM
278 };
279
280 static const uint32_t wm_82580_rxpbs_table[] = {
281 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
282 };
283
284 /*
285 * Software state per device.
286 */
287 struct wm_softc {
288 device_t sc_dev; /* generic device information */
289 bus_space_tag_t sc_st; /* bus space tag */
290 bus_space_handle_t sc_sh; /* bus space handle */
291 bus_size_t sc_ss; /* bus space size */
292 bus_space_tag_t sc_iot; /* I/O space tag */
293 bus_space_handle_t sc_ioh; /* I/O space handle */
294 bus_size_t sc_ios; /* I/O space size */
295 bus_space_tag_t sc_flasht; /* flash registers space tag */
296 bus_space_handle_t sc_flashh; /* flash registers space handle */
297 bus_size_t sc_flashs; /* flash registers space size */
298 bus_dma_tag_t sc_dmat; /* bus DMA tag */
299
300 struct ethercom sc_ethercom; /* ethernet common data */
301 struct mii_data sc_mii; /* MII/media information */
302
303 pci_chipset_tag_t sc_pc;
304 pcitag_t sc_pcitag;
305 int sc_bus_speed; /* PCI/PCIX bus speed */
306 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
307
308 uint16_t sc_pcidevid; /* PCI device ID */
309 wm_chip_type sc_type; /* MAC type */
310 int sc_rev; /* MAC revision */
311 wm_phy_type sc_phytype; /* PHY type */
312 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
313 #define WM_MEDIATYPE_UNKNOWN 0x00
314 #define WM_MEDIATYPE_FIBER 0x01
315 #define WM_MEDIATYPE_COPPER 0x02
316 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
317 int sc_funcid; /* unit number of the chip (0 to 3) */
318 int sc_flags; /* flags; see below */
319 int sc_if_flags; /* last if_flags */
320 int sc_flowflags; /* 802.3x flow control flags */
321 int sc_align_tweak;
322
323 void *sc_ihs[WM_MAX_NINTR]; /*
324 * interrupt cookie.
325 * legacy and msi use sc_ihs[0].
326 */
327 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
328 int sc_nintrs; /* number of interrupts */
329
330 callout_t sc_tick_ch; /* tick callout */
331 bool sc_stopping;
332
333 int sc_nvm_ver_major;
334 int sc_nvm_ver_minor;
335 int sc_nvm_addrbits; /* NVM address bits */
336 unsigned int sc_nvm_wordsize; /* NVM word size */
337 int sc_ich8_flash_base;
338 int sc_ich8_flash_bank_size;
339 int sc_nvm_k1_enabled;
340
341 /* Software state for the transmit and receive descriptors. */
342 int sc_txnum; /* must be a power of two */
343 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
344 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
345
346 /* Control data structures. */
347 int sc_ntxdesc; /* must be a power of two */
348 struct wm_control_data_82544 *sc_control_data;
349 bus_dmamap_t sc_cddmamap; /* control data DMA map */
350 bus_dma_segment_t sc_cd_seg; /* control data segment */
351 int sc_cd_rseg; /* real number of control segment */
352 size_t sc_cd_size; /* control data size */
353 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
354 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
355 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
356 #define sc_rxdescs sc_control_data->wcd_rxdescs
357
358 #ifdef WM_EVENT_COUNTERS
359 /* Event counters. */
360 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
361 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
362 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
363 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
364 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
365 struct evcnt sc_ev_rxintr; /* Rx interrupts */
366 struct evcnt sc_ev_linkintr; /* Link interrupts */
367
368 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
369 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
370 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
371 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
372 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
373 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
374 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
375 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
376
377 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
378 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
379
380 struct evcnt sc_ev_tu; /* Tx underrun */
381
382 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
383 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
384 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
385 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
386 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
387 #endif /* WM_EVENT_COUNTERS */
388
389 bus_addr_t sc_tdt_reg; /* offset of TDT register */
390
391 int sc_txfree; /* number of free Tx descriptors */
392 int sc_txnext; /* next ready Tx descriptor */
393
394 int sc_txsfree; /* number of free Tx jobs */
395 int sc_txsnext; /* next free Tx job */
396 int sc_txsdirty; /* dirty Tx jobs */
397
398 /* These 5 variables are used only on the 82547. */
399 int sc_txfifo_size; /* Tx FIFO size */
400 int sc_txfifo_head; /* current head of FIFO */
401 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
402 int sc_txfifo_stall; /* Tx FIFO is stalled */
403 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
404
405 bus_addr_t sc_rdt_reg; /* offset of RDT register */
406
407 int sc_rxptr; /* next ready Rx descriptor/queue ent */
408 int sc_rxdiscard;
409 int sc_rxlen;
410 struct mbuf *sc_rxhead;
411 struct mbuf *sc_rxtail;
412 struct mbuf **sc_rxtailp;
413
414 uint32_t sc_ctrl; /* prototype CTRL register */
415 #if 0
416 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
417 #endif
418 uint32_t sc_icr; /* prototype interrupt bits */
419 uint32_t sc_itr; /* prototype intr throttling reg */
420 uint32_t sc_tctl; /* prototype TCTL register */
421 uint32_t sc_rctl; /* prototype RCTL register */
422 uint32_t sc_txcw; /* prototype TXCW register */
423 uint32_t sc_tipg; /* prototype TIPG register */
424 uint32_t sc_fcrtl; /* prototype FCRTL register */
425 uint32_t sc_pba; /* prototype PBA register */
426
427 int sc_tbi_linkup; /* TBI link status */
428 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
429 int sc_tbi_serdes_ticks; /* tbi ticks */
430
431 int sc_mchash_type; /* multicast filter offset */
432
433 krndsource_t rnd_source; /* random source */
434
435 kmutex_t *sc_tx_lock; /* lock for tx operations */
436 kmutex_t *sc_rx_lock; /* lock for rx operations */
437 };
438
439 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
440 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
441 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
442 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
443 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
444 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
445 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
446 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
447 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
448
449 #ifdef WM_MPSAFE
450 #define CALLOUT_FLAGS CALLOUT_MPSAFE
451 #else
452 #define CALLOUT_FLAGS 0
453 #endif
454
455 #define WM_RXCHAIN_RESET(sc) \
456 do { \
457 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
458 *(sc)->sc_rxtailp = NULL; \
459 (sc)->sc_rxlen = 0; \
460 } while (/*CONSTCOND*/0)
461
462 #define WM_RXCHAIN_LINK(sc, m) \
463 do { \
464 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
465 (sc)->sc_rxtailp = &(m)->m_next; \
466 } while (/*CONSTCOND*/0)
467
468 #ifdef WM_EVENT_COUNTERS
469 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
470 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
471 #else
472 #define WM_EVCNT_INCR(ev) /* nothing */
473 #define WM_EVCNT_ADD(ev, val) /* nothing */
474 #endif
475
476 #define CSR_READ(sc, reg) \
477 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
478 #define CSR_WRITE(sc, reg, val) \
479 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
480 #define CSR_WRITE_FLUSH(sc) \
481 (void) CSR_READ((sc), WMREG_STATUS)
482
483 #define ICH8_FLASH_READ32(sc, reg) \
484 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
485 #define ICH8_FLASH_WRITE32(sc, reg, data) \
486 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
487
488 #define ICH8_FLASH_READ16(sc, reg) \
489 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
490 #define ICH8_FLASH_WRITE16(sc, reg, data) \
491 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
492
493 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
494 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
495
496 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
497 #define WM_CDTXADDR_HI(sc, x) \
498 (sizeof(bus_addr_t) == 8 ? \
499 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
500
501 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
502 #define WM_CDRXADDR_HI(sc, x) \
503 (sizeof(bus_addr_t) == 8 ? \
504 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
505
506 #define WM_CDTXSYNC(sc, x, n, ops) \
507 do { \
508 int __x, __n; \
509 \
510 __x = (x); \
511 __n = (n); \
512 \
513 /* If it will wrap around, sync to the end of the ring. */ \
514 if ((__x + __n) > WM_NTXDESC(sc)) { \
515 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
516 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
517 (WM_NTXDESC(sc) - __x), (ops)); \
518 __n -= (WM_NTXDESC(sc) - __x); \
519 __x = 0; \
520 } \
521 \
522 /* Now sync whatever is left. */ \
523 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
524 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
525 } while (/*CONSTCOND*/0)
526
527 #define WM_CDRXSYNC(sc, x, ops) \
528 do { \
529 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
530 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
531 } while (/*CONSTCOND*/0)
532
533 #define WM_INIT_RXDESC(sc, x) \
534 do { \
535 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
536 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
537 struct mbuf *__m = __rxs->rxs_mbuf; \
538 \
539 /* \
540 * Note: We scoot the packet forward 2 bytes in the buffer \
541 * so that the payload after the Ethernet header is aligned \
542 * to a 4-byte boundary. \
543 * \
544 * XXX BRAINDAMAGE ALERT! \
545 * The stupid chip uses the same size for every buffer, which \
546 * is set in the Receive Control register. We are using the 2K \
547 * size option, but what we REALLY want is (2K - 2)! For this \
548 * reason, we can't "scoot" packets longer than the standard \
549 * Ethernet MTU. On strict-alignment platforms, if the total \
550 * size exceeds (2K - 2) we set align_tweak to 0 and let \
551 * the upper layer copy the headers. \
552 */ \
553 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
554 \
555 wm_set_dma_addr(&__rxd->wrx_addr, \
556 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
557 __rxd->wrx_len = 0; \
558 __rxd->wrx_cksum = 0; \
559 __rxd->wrx_status = 0; \
560 __rxd->wrx_errors = 0; \
561 __rxd->wrx_special = 0; \
562 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
563 \
564 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
565 } while (/*CONSTCOND*/0)
566
567 /*
568 * Register read/write functions.
569 * Other than CSR_{READ|WRITE}().
570 */
571 #if 0
572 static inline uint32_t wm_io_read(struct wm_softc *, int);
573 #endif
574 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
575 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
576 uint32_t, uint32_t);
577 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
578
579 /*
580 * Device driver interface functions and commonly used functions.
581 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
582 */
583 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
584 static int wm_match(device_t, cfdata_t, void *);
585 static void wm_attach(device_t, device_t, void *);
586 static int wm_detach(device_t, int);
587 static bool wm_suspend(device_t, const pmf_qual_t *);
588 static bool wm_resume(device_t, const pmf_qual_t *);
589 static void wm_watchdog(struct ifnet *);
590 static void wm_tick(void *);
591 static int wm_ifflags_cb(struct ethercom *);
592 static int wm_ioctl(struct ifnet *, u_long, void *);
593 /* MAC address related */
594 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
595 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
596 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
597 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
598 static void wm_set_filter(struct wm_softc *);
599 /* Reset and init related */
600 static void wm_set_vlan(struct wm_softc *);
601 static void wm_set_pcie_completion_timeout(struct wm_softc *);
602 static void wm_get_auto_rd_done(struct wm_softc *);
603 static void wm_lan_init_done(struct wm_softc *);
604 static void wm_get_cfg_done(struct wm_softc *);
605 static void wm_initialize_hardware_bits(struct wm_softc *);
606 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
607 static void wm_reset(struct wm_softc *);
608 static int wm_add_rxbuf(struct wm_softc *, int);
609 static void wm_rxdrain(struct wm_softc *);
610 static int wm_init(struct ifnet *);
611 static int wm_init_locked(struct ifnet *);
612 static void wm_stop(struct ifnet *, int);
613 static void wm_stop_locked(struct ifnet *, int);
614 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
615 uint32_t *, uint8_t *);
616 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
617 static void wm_82547_txfifo_stall(void *);
618 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
619 /* Start */
620 static void wm_start(struct ifnet *);
621 static void wm_start_locked(struct ifnet *);
622 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
623 uint32_t *, uint32_t *, bool *);
624 static void wm_nq_start(struct ifnet *);
625 static void wm_nq_start_locked(struct ifnet *);
626 /* Interrupt */
627 static int wm_txeof(struct wm_softc *);
628 static void wm_rxeof(struct wm_softc *);
629 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
630 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
631 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
632 static void wm_linkintr(struct wm_softc *, uint32_t);
633 static int wm_intr_legacy(void *);
634 #ifdef WM_MSI_MSIX
635 static int wm_txintr_msix(void *);
636 static int wm_rxintr_msix(void *);
637 static int wm_linkintr_msix(void *);
638 #endif
639
640 /*
641 * Media related.
642 * GMII, SGMII, TBI, SERDES and SFP.
643 */
644 /* Common */
645 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
646 /* GMII related */
647 static void wm_gmii_reset(struct wm_softc *);
648 static int wm_get_phy_id_82575(struct wm_softc *);
649 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
650 static int wm_gmii_mediachange(struct ifnet *);
651 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
652 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
653 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
654 static int wm_gmii_i82543_readreg(device_t, int, int);
655 static void wm_gmii_i82543_writereg(device_t, int, int, int);
656 static int wm_gmii_i82544_readreg(device_t, int, int);
657 static void wm_gmii_i82544_writereg(device_t, int, int, int);
658 static int wm_gmii_i80003_readreg(device_t, int, int);
659 static void wm_gmii_i80003_writereg(device_t, int, int, int);
660 static int wm_gmii_bm_readreg(device_t, int, int);
661 static void wm_gmii_bm_writereg(device_t, int, int, int);
662 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
663 static int wm_gmii_hv_readreg(device_t, int, int);
664 static void wm_gmii_hv_writereg(device_t, int, int, int);
665 static int wm_gmii_82580_readreg(device_t, int, int);
666 static void wm_gmii_82580_writereg(device_t, int, int, int);
667 static int wm_gmii_gs40g_readreg(device_t, int, int);
668 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
669 static void wm_gmii_statchg(struct ifnet *);
670 static int wm_kmrn_readreg(struct wm_softc *, int);
671 static void wm_kmrn_writereg(struct wm_softc *, int, int);
672 /* SGMII */
673 static bool wm_sgmii_uses_mdio(struct wm_softc *);
674 static int wm_sgmii_readreg(device_t, int, int);
675 static void wm_sgmii_writereg(device_t, int, int, int);
676 /* TBI related */
677 static void wm_tbi_mediainit(struct wm_softc *);
678 static int wm_tbi_mediachange(struct ifnet *);
679 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
680 static int wm_check_for_link(struct wm_softc *);
681 static void wm_tbi_tick(struct wm_softc *);
682 /* SERDES related */
683 static void wm_serdes_power_up_link_82575(struct wm_softc *);
684 static int wm_serdes_mediachange(struct ifnet *);
685 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
686 static void wm_serdes_tick(struct wm_softc *);
687 /* SFP related */
688 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
689 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
690
691 /*
692 * NVM related.
693 * Microwire, SPI (w/wo EERD) and Flash.
694 */
695 /* Misc functions */
696 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
697 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
698 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
699 /* Microwire */
700 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
701 /* SPI */
702 static int wm_nvm_ready_spi(struct wm_softc *);
703 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
704 /* Using with EERD */
705 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
706 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
707 /* Flash */
708 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
709 unsigned int *);
710 static int32_t wm_ich8_cycle_init(struct wm_softc *);
711 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
712 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
713 uint16_t *);
714 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
715 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
716 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
717 /* iNVM */
718 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
719 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
720 /* Lock, detecting NVM type, validate checksum and read */
721 static int wm_nvm_acquire(struct wm_softc *);
722 static void wm_nvm_release(struct wm_softc *);
723 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
724 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
725 static int wm_nvm_validate_checksum(struct wm_softc *);
726 static void wm_nvm_version(struct wm_softc *);
727 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
728
729 /*
730 * Hardware semaphores.
731 * Very complexed...
732 */
733 static int wm_get_swsm_semaphore(struct wm_softc *);
734 static void wm_put_swsm_semaphore(struct wm_softc *);
735 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
736 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
737 static int wm_get_swfwhw_semaphore(struct wm_softc *);
738 static void wm_put_swfwhw_semaphore(struct wm_softc *);
739 static int wm_get_hw_semaphore_82573(struct wm_softc *);
740 static void wm_put_hw_semaphore_82573(struct wm_softc *);
741
742 /*
743 * Management mode and power management related subroutines.
744 * BMC, AMT, suspend/resume and EEE.
745 */
746 static int wm_check_mng_mode(struct wm_softc *);
747 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
748 static int wm_check_mng_mode_82574(struct wm_softc *);
749 static int wm_check_mng_mode_generic(struct wm_softc *);
750 static int wm_enable_mng_pass_thru(struct wm_softc *);
751 static int wm_check_reset_block(struct wm_softc *);
752 static void wm_get_hw_control(struct wm_softc *);
753 static void wm_release_hw_control(struct wm_softc *);
754 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
755 static void wm_smbustopci(struct wm_softc *);
756 static void wm_init_manageability(struct wm_softc *);
757 static void wm_release_manageability(struct wm_softc *);
758 static void wm_get_wakeup(struct wm_softc *);
759 #ifdef WM_WOL
760 static void wm_enable_phy_wakeup(struct wm_softc *);
761 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
762 static void wm_enable_wakeup(struct wm_softc *);
763 #endif
764 /* EEE */
765 static void wm_set_eee_i350(struct wm_softc *);
766
767 /*
768 * Workarounds (mainly PHY related).
769 * Basically, PHY's workarounds are in the PHY drivers.
770 */
771 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
772 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
773 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
774 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
775 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
776 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
777 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
778 static void wm_reset_init_script_82575(struct wm_softc *);
779 static void wm_reset_mdicnfg_82580(struct wm_softc *);
780 static void wm_pll_workaround_i210(struct wm_softc *);
781
782 #ifdef WM_MSI_MSIX
783 struct _msix_matrix {
784 const char *intrname;
785 int(*func)(void *);
786 int intridx;
787 int cpuid;
788 } msix_matrix[WM_MSIX_NINTR] = {
789 { "TX", wm_txintr_msix, WM_MSIX_TXINTR_IDX, WM_MSIX_TXINTR_CPUID },
790 { "RX", wm_rxintr_msix, WM_MSIX_RXINTR_IDX, WM_MSIX_RXINTR_CPUID },
791 { "LINK", wm_linkintr_msix, WM_MSIX_LINKINTR_IDX,
792 WM_MSIX_LINKINTR_CPUID },
793 };
794 #endif
795
796 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
797 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
798
799 /*
800 * Devices supported by this driver.
801 */
802 static const struct wm_product {
803 pci_vendor_id_t wmp_vendor;
804 pci_product_id_t wmp_product;
805 const char *wmp_name;
806 wm_chip_type wmp_type;
807 uint32_t wmp_flags;
808 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
809 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
810 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
811 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
812 #define WMP_MEDIATYPE(x) ((x) & 0x03)
813 } wm_products[] = {
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
815 "Intel i82542 1000BASE-X Ethernet",
816 WM_T_82542_2_1, WMP_F_FIBER },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
819 "Intel i82543GC 1000BASE-X Ethernet",
820 WM_T_82543, WMP_F_FIBER },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
823 "Intel i82543GC 1000BASE-T Ethernet",
824 WM_T_82543, WMP_F_COPPER },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
827 "Intel i82544EI 1000BASE-T Ethernet",
828 WM_T_82544, WMP_F_COPPER },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
831 "Intel i82544EI 1000BASE-X Ethernet",
832 WM_T_82544, WMP_F_FIBER },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
835 "Intel i82544GC 1000BASE-T Ethernet",
836 WM_T_82544, WMP_F_COPPER },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
839 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
840 WM_T_82544, WMP_F_COPPER },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
843 "Intel i82540EM 1000BASE-T Ethernet",
844 WM_T_82540, WMP_F_COPPER },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
847 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
848 WM_T_82540, WMP_F_COPPER },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
851 "Intel i82540EP 1000BASE-T Ethernet",
852 WM_T_82540, WMP_F_COPPER },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
855 "Intel i82540EP 1000BASE-T Ethernet",
856 WM_T_82540, WMP_F_COPPER },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
859 "Intel i82540EP 1000BASE-T Ethernet",
860 WM_T_82540, WMP_F_COPPER },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
863 "Intel i82545EM 1000BASE-T Ethernet",
864 WM_T_82545, WMP_F_COPPER },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
867 "Intel i82545GM 1000BASE-T Ethernet",
868 WM_T_82545_3, WMP_F_COPPER },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
871 "Intel i82545GM 1000BASE-X Ethernet",
872 WM_T_82545_3, WMP_F_FIBER },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
875 "Intel i82545GM Gigabit Ethernet (SERDES)",
876 WM_T_82545_3, WMP_F_SERDES },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
879 "Intel i82546EB 1000BASE-T Ethernet",
880 WM_T_82546, WMP_F_COPPER },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
883 "Intel i82546EB 1000BASE-T Ethernet",
884 WM_T_82546, WMP_F_COPPER },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
887 "Intel i82545EM 1000BASE-X Ethernet",
888 WM_T_82545, WMP_F_FIBER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
891 "Intel i82546EB 1000BASE-X Ethernet",
892 WM_T_82546, WMP_F_FIBER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
895 "Intel i82546GB 1000BASE-T Ethernet",
896 WM_T_82546_3, WMP_F_COPPER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
899 "Intel i82546GB 1000BASE-X Ethernet",
900 WM_T_82546_3, WMP_F_FIBER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
903 "Intel i82546GB Gigabit Ethernet (SERDES)",
904 WM_T_82546_3, WMP_F_SERDES },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
907 "i82546GB quad-port Gigabit Ethernet",
908 WM_T_82546_3, WMP_F_COPPER },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
911 "i82546GB quad-port Gigabit Ethernet (KSP3)",
912 WM_T_82546_3, WMP_F_COPPER },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
915 "Intel PRO/1000MT (82546GB)",
916 WM_T_82546_3, WMP_F_COPPER },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
919 "Intel i82541EI 1000BASE-T Ethernet",
920 WM_T_82541, WMP_F_COPPER },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
923 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
924 WM_T_82541, WMP_F_COPPER },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
927 "Intel i82541EI Mobile 1000BASE-T Ethernet",
928 WM_T_82541, WMP_F_COPPER },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
931 "Intel i82541ER 1000BASE-T Ethernet",
932 WM_T_82541_2, WMP_F_COPPER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
935 "Intel i82541GI 1000BASE-T Ethernet",
936 WM_T_82541_2, WMP_F_COPPER },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
939 "Intel i82541GI Mobile 1000BASE-T Ethernet",
940 WM_T_82541_2, WMP_F_COPPER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
943 "Intel i82541PI 1000BASE-T Ethernet",
944 WM_T_82541_2, WMP_F_COPPER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
947 "Intel i82547EI 1000BASE-T Ethernet",
948 WM_T_82547, WMP_F_COPPER },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
951 "Intel i82547EI Mobile 1000BASE-T Ethernet",
952 WM_T_82547, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
955 "Intel i82547GI 1000BASE-T Ethernet",
956 WM_T_82547_2, WMP_F_COPPER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
959 "Intel PRO/1000 PT (82571EB)",
960 WM_T_82571, WMP_F_COPPER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
963 "Intel PRO/1000 PF (82571EB)",
964 WM_T_82571, WMP_F_FIBER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
967 "Intel PRO/1000 PB (82571EB)",
968 WM_T_82571, WMP_F_SERDES },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
971 "Intel PRO/1000 QT (82571EB)",
972 WM_T_82571, WMP_F_COPPER },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
975 "Intel PRO/1000 PT Quad Port Server Adapter",
976 WM_T_82571, WMP_F_COPPER, },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
979 "Intel Gigabit PT Quad Port Server ExpressModule",
980 WM_T_82571, WMP_F_COPPER, },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
983 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
984 WM_T_82571, WMP_F_SERDES, },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
987 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
988 WM_T_82571, WMP_F_SERDES, },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
991 "Intel 82571EB Quad 1000baseX Ethernet",
992 WM_T_82571, WMP_F_FIBER, },
993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
995 "Intel i82572EI 1000baseT Ethernet",
996 WM_T_82572, WMP_F_COPPER },
997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
999 "Intel i82572EI 1000baseX Ethernet",
1000 WM_T_82572, WMP_F_FIBER },
1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1003 "Intel i82572EI Gigabit Ethernet (SERDES)",
1004 WM_T_82572, WMP_F_SERDES },
1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1007 "Intel i82572EI 1000baseT Ethernet",
1008 WM_T_82572, WMP_F_COPPER },
1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1011 "Intel i82573E",
1012 WM_T_82573, WMP_F_COPPER },
1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1015 "Intel i82573E IAMT",
1016 WM_T_82573, WMP_F_COPPER },
1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1019 "Intel i82573L Gigabit Ethernet",
1020 WM_T_82573, WMP_F_COPPER },
1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1023 "Intel i82574L",
1024 WM_T_82574, WMP_F_COPPER },
1025
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1027 "Intel i82574L",
1028 WM_T_82574, WMP_F_COPPER },
1029
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1031 "Intel i82583V",
1032 WM_T_82583, WMP_F_COPPER },
1033
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1035 "i80003 dual 1000baseT Ethernet",
1036 WM_T_80003, WMP_F_COPPER },
1037
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1039 "i80003 dual 1000baseX Ethernet",
1040 WM_T_80003, WMP_F_COPPER },
1041
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1043 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1044 WM_T_80003, WMP_F_SERDES },
1045
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1047 "Intel i80003 1000baseT Ethernet",
1048 WM_T_80003, WMP_F_COPPER },
1049
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1051 "Intel i80003 Gigabit Ethernet (SERDES)",
1052 WM_T_80003, WMP_F_SERDES },
1053
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1055 "Intel i82801H (M_AMT) LAN Controller",
1056 WM_T_ICH8, WMP_F_COPPER },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1058 "Intel i82801H (AMT) LAN Controller",
1059 WM_T_ICH8, WMP_F_COPPER },
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1061 "Intel i82801H LAN Controller",
1062 WM_T_ICH8, WMP_F_COPPER },
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1064 "Intel i82801H (IFE) LAN Controller",
1065 WM_T_ICH8, WMP_F_COPPER },
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1067 "Intel i82801H (M) LAN Controller",
1068 WM_T_ICH8, WMP_F_COPPER },
1069 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1070 "Intel i82801H IFE (GT) LAN Controller",
1071 WM_T_ICH8, WMP_F_COPPER },
1072 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1073 "Intel i82801H IFE (G) LAN Controller",
1074 WM_T_ICH8, WMP_F_COPPER },
1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1076 "82801I (AMT) LAN Controller",
1077 WM_T_ICH9, WMP_F_COPPER },
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1079 "82801I LAN Controller",
1080 WM_T_ICH9, WMP_F_COPPER },
1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1082 "82801I (G) LAN Controller",
1083 WM_T_ICH9, WMP_F_COPPER },
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1085 "82801I (GT) LAN Controller",
1086 WM_T_ICH9, WMP_F_COPPER },
1087 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1088 "82801I (C) LAN Controller",
1089 WM_T_ICH9, WMP_F_COPPER },
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1091 "82801I mobile LAN Controller",
1092 WM_T_ICH9, WMP_F_COPPER },
1093 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1094 "82801I mobile (V) LAN Controller",
1095 WM_T_ICH9, WMP_F_COPPER },
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1097 "82801I mobile (AMT) LAN Controller",
1098 WM_T_ICH9, WMP_F_COPPER },
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1100 "82567LM-4 LAN Controller",
1101 WM_T_ICH9, WMP_F_COPPER },
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1103 "82567V-3 LAN Controller",
1104 WM_T_ICH9, WMP_F_COPPER },
1105 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1106 "82567LM-2 LAN Controller",
1107 WM_T_ICH10, WMP_F_COPPER },
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1109 "82567LF-2 LAN Controller",
1110 WM_T_ICH10, WMP_F_COPPER },
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1112 "82567LM-3 LAN Controller",
1113 WM_T_ICH10, WMP_F_COPPER },
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1115 "82567LF-3 LAN Controller",
1116 WM_T_ICH10, WMP_F_COPPER },
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1118 "82567V-2 LAN Controller",
1119 WM_T_ICH10, WMP_F_COPPER },
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1121 "82567V-3? LAN Controller",
1122 WM_T_ICH10, WMP_F_COPPER },
1123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1124 "HANKSVILLE LAN Controller",
1125 WM_T_ICH10, WMP_F_COPPER },
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1127 "PCH LAN (82577LM) Controller",
1128 WM_T_PCH, WMP_F_COPPER },
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1130 "PCH LAN (82577LC) Controller",
1131 WM_T_PCH, WMP_F_COPPER },
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1133 "PCH LAN (82578DM) Controller",
1134 WM_T_PCH, WMP_F_COPPER },
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1136 "PCH LAN (82578DC) Controller",
1137 WM_T_PCH, WMP_F_COPPER },
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1139 "PCH2 LAN (82579LM) Controller",
1140 WM_T_PCH2, WMP_F_COPPER },
1141 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1142 "PCH2 LAN (82579V) Controller",
1143 WM_T_PCH2, WMP_F_COPPER },
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1145 "82575EB dual-1000baseT Ethernet",
1146 WM_T_82575, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1148 "82575EB dual-1000baseX Ethernet (SERDES)",
1149 WM_T_82575, WMP_F_SERDES },
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1151 "82575GB quad-1000baseT Ethernet",
1152 WM_T_82575, WMP_F_COPPER },
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1154 "82575GB quad-1000baseT Ethernet (PM)",
1155 WM_T_82575, WMP_F_COPPER },
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1157 "82576 1000BaseT Ethernet",
1158 WM_T_82576, WMP_F_COPPER },
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1160 "82576 1000BaseX Ethernet",
1161 WM_T_82576, WMP_F_FIBER },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1164 "82576 gigabit Ethernet (SERDES)",
1165 WM_T_82576, WMP_F_SERDES },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1168 "82576 quad-1000BaseT Ethernet",
1169 WM_T_82576, WMP_F_COPPER },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1172 "82576 Gigabit ET2 Quad Port Server Adapter",
1173 WM_T_82576, WMP_F_COPPER },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1176 "82576 gigabit Ethernet",
1177 WM_T_82576, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1180 "82576 gigabit Ethernet (SERDES)",
1181 WM_T_82576, WMP_F_SERDES },
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1183 "82576 quad-gigabit Ethernet (SERDES)",
1184 WM_T_82576, WMP_F_SERDES },
1185
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1187 "82580 1000BaseT Ethernet",
1188 WM_T_82580, WMP_F_COPPER },
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1190 "82580 1000BaseX Ethernet",
1191 WM_T_82580, WMP_F_FIBER },
1192
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1194 "82580 1000BaseT Ethernet (SERDES)",
1195 WM_T_82580, WMP_F_SERDES },
1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1198 "82580 gigabit Ethernet (SGMII)",
1199 WM_T_82580, WMP_F_COPPER },
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1201 "82580 dual-1000BaseT Ethernet",
1202 WM_T_82580, WMP_F_COPPER },
1203
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1205 "82580 quad-1000BaseX Ethernet",
1206 WM_T_82580, WMP_F_FIBER },
1207
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1209 "DH89XXCC Gigabit Ethernet (SGMII)",
1210 WM_T_82580, WMP_F_COPPER },
1211
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1213 "DH89XXCC Gigabit Ethernet (SERDES)",
1214 WM_T_82580, WMP_F_SERDES },
1215
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1217 "DH89XXCC 1000BASE-KX Ethernet",
1218 WM_T_82580, WMP_F_SERDES },
1219
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1221 "DH89XXCC Gigabit Ethernet (SFP)",
1222 WM_T_82580, WMP_F_SERDES },
1223
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1225 "I350 Gigabit Network Connection",
1226 WM_T_I350, WMP_F_COPPER },
1227
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1229 "I350 Gigabit Fiber Network Connection",
1230 WM_T_I350, WMP_F_FIBER },
1231
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1233 "I350 Gigabit Backplane Connection",
1234 WM_T_I350, WMP_F_SERDES },
1235
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1237 "I350 Quad Port Gigabit Ethernet",
1238 WM_T_I350, WMP_F_SERDES },
1239
1240 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1241 "I350 Gigabit Connection",
1242 WM_T_I350, WMP_F_COPPER },
1243
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1245 "I354 Gigabit Ethernet (KX)",
1246 WM_T_I354, WMP_F_SERDES },
1247
1248 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1249 "I354 Gigabit Ethernet (SGMII)",
1250 WM_T_I354, WMP_F_COPPER },
1251
1252 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1253 "I354 Gigabit Ethernet (2.5G)",
1254 WM_T_I354, WMP_F_COPPER },
1255
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1257 "I210-T1 Ethernet Server Adapter",
1258 WM_T_I210, WMP_F_COPPER },
1259
1260 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1261 "I210 Ethernet (Copper OEM)",
1262 WM_T_I210, WMP_F_COPPER },
1263
1264 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1265 "I210 Ethernet (Copper IT)",
1266 WM_T_I210, WMP_F_COPPER },
1267
1268 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1269 "I210 Ethernet (FLASH less)",
1270 WM_T_I210, WMP_F_COPPER },
1271
1272 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1273 "I210 Gigabit Ethernet (Fiber)",
1274 WM_T_I210, WMP_F_FIBER },
1275
1276 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1277 "I210 Gigabit Ethernet (SERDES)",
1278 WM_T_I210, WMP_F_SERDES },
1279
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1281 "I210 Gigabit Ethernet (FLASH less)",
1282 WM_T_I210, WMP_F_SERDES },
1283
1284 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1285 "I210 Gigabit Ethernet (SGMII)",
1286 WM_T_I210, WMP_F_COPPER },
1287
1288 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1289 "I211 Ethernet (COPPER)",
1290 WM_T_I211, WMP_F_COPPER },
1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1292 "I217 V Ethernet Connection",
1293 WM_T_PCH_LPT, WMP_F_COPPER },
1294 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1295 "I217 LM Ethernet Connection",
1296 WM_T_PCH_LPT, WMP_F_COPPER },
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1298 "I218 V Ethernet Connection",
1299 WM_T_PCH_LPT, WMP_F_COPPER },
1300 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1301 "I218 V Ethernet Connection",
1302 WM_T_PCH_LPT, WMP_F_COPPER },
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1304 "I218 V Ethernet Connection",
1305 WM_T_PCH_LPT, WMP_F_COPPER },
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1307 "I218 LM Ethernet Connection",
1308 WM_T_PCH_LPT, WMP_F_COPPER },
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1310 "I218 LM Ethernet Connection",
1311 WM_T_PCH_LPT, WMP_F_COPPER },
1312 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1313 "I218 LM Ethernet Connection",
1314 WM_T_PCH_LPT, WMP_F_COPPER },
1315 { 0, 0,
1316 NULL,
1317 0, 0 },
1318 };
1319
1320 #ifdef WM_EVENT_COUNTERS
1321 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1322 #endif /* WM_EVENT_COUNTERS */
1323
1324
1325 /*
1326 * Register read/write functions.
1327 * Other than CSR_{READ|WRITE}().
1328 */
1329
1330 #if 0 /* Not currently used */
1331 static inline uint32_t
1332 wm_io_read(struct wm_softc *sc, int reg)
1333 {
1334
1335 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1336 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1337 }
1338 #endif
1339
1340 static inline void
1341 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1342 {
1343
1344 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1345 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1346 }
1347
1348 static inline void
1349 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1350 uint32_t data)
1351 {
1352 uint32_t regval;
1353 int i;
1354
1355 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1356
1357 CSR_WRITE(sc, reg, regval);
1358
1359 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1360 delay(5);
1361 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1362 break;
1363 }
1364 if (i == SCTL_CTL_POLL_TIMEOUT) {
1365 aprint_error("%s: WARNING:"
1366 " i82575 reg 0x%08x setup did not indicate ready\n",
1367 device_xname(sc->sc_dev), reg);
1368 }
1369 }
1370
1371 static inline void
1372 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1373 {
1374 wa->wa_low = htole32(v & 0xffffffffU);
1375 if (sizeof(bus_addr_t) == 8)
1376 wa->wa_high = htole32((uint64_t) v >> 32);
1377 else
1378 wa->wa_high = 0;
1379 }
1380
1381 /*
1382 * Device driver interface functions and commonly used functions.
1383 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1384 */
1385
1386 /* Lookup supported device table */
1387 static const struct wm_product *
1388 wm_lookup(const struct pci_attach_args *pa)
1389 {
1390 const struct wm_product *wmp;
1391
1392 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1393 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1394 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1395 return wmp;
1396 }
1397 return NULL;
1398 }
1399
1400 /* The match function (ca_match) */
1401 static int
1402 wm_match(device_t parent, cfdata_t cf, void *aux)
1403 {
1404 struct pci_attach_args *pa = aux;
1405
1406 if (wm_lookup(pa) != NULL)
1407 return 1;
1408
1409 return 0;
1410 }
1411
1412 /* The attach function (ca_attach) */
1413 static void
1414 wm_attach(device_t parent, device_t self, void *aux)
1415 {
1416 struct wm_softc *sc = device_private(self);
1417 struct pci_attach_args *pa = aux;
1418 prop_dictionary_t dict;
1419 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1420 pci_chipset_tag_t pc = pa->pa_pc;
1421 #ifndef WM_MSI_MSIX
1422 pci_intr_handle_t ih;
1423 #else
1424 int counts[PCI_INTR_TYPE_SIZE];
1425 pci_intr_type_t max_type;
1426 #endif
1427 const char *intrstr = NULL;
1428 const char *eetype, *xname;
1429 bus_space_tag_t memt;
1430 bus_space_handle_t memh;
1431 bus_size_t memsize;
1432 int memh_valid;
1433 int i, error;
1434 const struct wm_product *wmp;
1435 prop_data_t ea;
1436 prop_number_t pn;
1437 uint8_t enaddr[ETHER_ADDR_LEN];
1438 uint16_t cfg1, cfg2, swdpin, nvmword;
1439 pcireg_t preg, memtype;
1440 uint16_t eeprom_data, apme_mask;
1441 bool force_clear_smbi;
1442 uint32_t link_mode;
1443 uint32_t reg;
1444 char intrbuf[PCI_INTRSTR_LEN];
1445
1446 sc->sc_dev = self;
1447 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1448 sc->sc_stopping = false;
1449
1450 wmp = wm_lookup(pa);
1451 #ifdef DIAGNOSTIC
1452 if (wmp == NULL) {
1453 printf("\n");
1454 panic("wm_attach: impossible");
1455 }
1456 #endif
1457 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1458
1459 sc->sc_pc = pa->pa_pc;
1460 sc->sc_pcitag = pa->pa_tag;
1461
1462 if (pci_dma64_available(pa))
1463 sc->sc_dmat = pa->pa_dmat64;
1464 else
1465 sc->sc_dmat = pa->pa_dmat;
1466
1467 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1468 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1469 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1470
1471 sc->sc_type = wmp->wmp_type;
1472 if (sc->sc_type < WM_T_82543) {
1473 if (sc->sc_rev < 2) {
1474 aprint_error_dev(sc->sc_dev,
1475 "i82542 must be at least rev. 2\n");
1476 return;
1477 }
1478 if (sc->sc_rev < 3)
1479 sc->sc_type = WM_T_82542_2_0;
1480 }
1481
1482 /*
1483 * Disable MSI for Errata:
1484 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1485 *
1486 * 82544: Errata 25
1487 * 82540: Errata 6 (easy to reproduce device timeout)
1488 * 82545: Errata 4 (easy to reproduce device timeout)
1489 * 82546: Errata 26 (easy to reproduce device timeout)
1490 * 82541: Errata 7 (easy to reproduce device timeout)
1491 *
1492 * "Byte Enables 2 and 3 are not set on MSI writes"
1493 *
1494 * 82571 & 82572: Errata 63
1495 */
1496 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1497 || (sc->sc_type == WM_T_82572))
1498 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1499
1500 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1501 || (sc->sc_type == WM_T_82580)
1502 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1503 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1504 sc->sc_flags |= WM_F_NEWQUEUE;
1505
1506 /* Set device properties (mactype) */
1507 dict = device_properties(sc->sc_dev);
1508 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1509
1510 /*
1511 * Map the device. All devices support memory-mapped acccess,
1512 * and it is really required for normal operation.
1513 */
1514 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1515 switch (memtype) {
1516 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1517 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1518 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1519 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1520 break;
1521 default:
1522 memh_valid = 0;
1523 break;
1524 }
1525
1526 if (memh_valid) {
1527 sc->sc_st = memt;
1528 sc->sc_sh = memh;
1529 sc->sc_ss = memsize;
1530 } else {
1531 aprint_error_dev(sc->sc_dev,
1532 "unable to map device registers\n");
1533 return;
1534 }
1535
1536 /*
1537 * In addition, i82544 and later support I/O mapped indirect
1538 * register access. It is not desirable (nor supported in
1539 * this driver) to use it for normal operation, though it is
1540 * required to work around bugs in some chip versions.
1541 */
1542 if (sc->sc_type >= WM_T_82544) {
1543 /* First we have to find the I/O BAR. */
1544 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1545 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1546 if (memtype == PCI_MAPREG_TYPE_IO)
1547 break;
1548 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1549 PCI_MAPREG_MEM_TYPE_64BIT)
1550 i += 4; /* skip high bits, too */
1551 }
1552 if (i < PCI_MAPREG_END) {
1553 /*
1554 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1555 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1556 * It's no problem because newer chips has no this
1557 * bug.
1558 *
1559 * The i8254x doesn't apparently respond when the
1560 * I/O BAR is 0, which looks somewhat like it's not
1561 * been configured.
1562 */
1563 preg = pci_conf_read(pc, pa->pa_tag, i);
1564 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1565 aprint_error_dev(sc->sc_dev,
1566 "WARNING: I/O BAR at zero.\n");
1567 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1568 0, &sc->sc_iot, &sc->sc_ioh,
1569 NULL, &sc->sc_ios) == 0) {
1570 sc->sc_flags |= WM_F_IOH_VALID;
1571 } else {
1572 aprint_error_dev(sc->sc_dev,
1573 "WARNING: unable to map I/O space\n");
1574 }
1575 }
1576
1577 }
1578
1579 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1580 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1581 preg |= PCI_COMMAND_MASTER_ENABLE;
1582 if (sc->sc_type < WM_T_82542_2_1)
1583 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1584 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1585
1586 /* power up chip */
1587 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1588 NULL)) && error != EOPNOTSUPP) {
1589 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1590 return;
1591 }
1592
1593 #ifndef WM_MSI_MSIX
1594 /*
1595 * Map and establish our interrupt.
1596 */
1597 if (pci_intr_map(pa, &ih)) {
1598 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1599 return;
1600 }
1601 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1602 #ifdef WM_MPSAFE
1603 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1604 #endif
1605 sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
1606 wm_intr_legacy, sc, device_xname(sc->sc_dev));
1607 if (sc->sc_ihs[0] == NULL) {
1608 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1609 if (intrstr != NULL)
1610 aprint_error(" at %s", intrstr);
1611 aprint_error("\n");
1612 return;
1613 }
1614 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1615 sc->sc_nintrs = 1;
1616 #else /* WM_MSI_MSIX */
1617 /* Allocation settings */
1618 max_type = PCI_INTR_TYPE_MSIX;
1619 counts[PCI_INTR_TYPE_MSIX] = WM_MAX_NINTR;
1620 counts[PCI_INTR_TYPE_MSI] = 1;
1621 counts[PCI_INTR_TYPE_INTX] = 1;
1622
1623 alloc_retry:
1624 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1625 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1626 return;
1627 }
1628
1629 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1630 void *vih;
1631 kcpuset_t *affinity;
1632 char intr_xname[INTRDEVNAMEBUF];
1633
1634 kcpuset_create(&affinity, false);
1635
1636 for (i = 0; i < WM_MSIX_NINTR; i++) {
1637 intrstr = pci_intr_string(pc,
1638 sc->sc_intrs[msix_matrix[i].intridx], intrbuf,
1639 sizeof(intrbuf));
1640 #ifdef WM_MPSAFE
1641 pci_intr_setattr(pc,
1642 &sc->sc_intrs[msix_matrix[i].intridx],
1643 PCI_INTR_MPSAFE, true);
1644 #endif
1645 memset(intr_xname, 0, sizeof(intr_xname));
1646 strlcat(intr_xname, device_xname(sc->sc_dev),
1647 sizeof(intr_xname));
1648 strlcat(intr_xname, msix_matrix[i].intrname,
1649 sizeof(intr_xname));
1650 vih = pci_intr_establish_xname(pc,
1651 sc->sc_intrs[msix_matrix[i].intridx], IPL_NET,
1652 msix_matrix[i].func, sc, intr_xname);
1653 if (vih == NULL) {
1654 aprint_error_dev(sc->sc_dev,
1655 "unable to establish MSI-X(for %s)%s%s\n",
1656 msix_matrix[i].intrname,
1657 intrstr ? " at " : "",
1658 intrstr ? intrstr : "");
1659 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1660 WM_MSIX_NINTR);
1661 kcpuset_destroy(affinity);
1662
1663 /* Setup for MSI: Disable MSI-X */
1664 max_type = PCI_INTR_TYPE_MSI;
1665 counts[PCI_INTR_TYPE_MSI] = 1;
1666 counts[PCI_INTR_TYPE_INTX] = 1;
1667 goto alloc_retry;
1668 }
1669 kcpuset_zero(affinity);
1670 /* Round-robin affinity */
1671 kcpuset_set(affinity, msix_matrix[i].cpuid % ncpu);
1672 error = interrupt_distribute(vih, affinity, NULL);
1673 if (error == 0) {
1674 aprint_normal_dev(sc->sc_dev,
1675 "for %s interrupting at %s affinity to %u\n",
1676 msix_matrix[i].intrname, intrstr,
1677 msix_matrix[i].cpuid % ncpu);
1678 } else {
1679 aprint_normal_dev(sc->sc_dev,
1680 "for %s interrupting at %s\n",
1681 msix_matrix[i].intrname, intrstr);
1682 }
1683 sc->sc_ihs[msix_matrix[i].intridx] = vih;
1684 }
1685
1686 sc->sc_nintrs = WM_MSIX_NINTR;
1687 kcpuset_destroy(affinity);
1688 } else {
1689 /* MSI or INTx */
1690 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
1691 sizeof(intrbuf));
1692 #ifdef WM_MPSAFE
1693 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
1694 #endif
1695 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
1696 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
1697 if (sc->sc_ihs[0] == NULL) {
1698 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
1699 (pci_intr_type(sc->sc_intrs[0])
1700 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
1701 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1);
1702 switch (pci_intr_type(sc->sc_intrs[0])) {
1703 case PCI_INTR_TYPE_MSI:
1704 /* The next try is for INTx: Disable MSI */
1705 max_type = PCI_INTR_TYPE_INTX;
1706 counts[PCI_INTR_TYPE_INTX] = 1;
1707 goto alloc_retry;
1708 case PCI_INTR_TYPE_INTX:
1709 default:
1710 return;
1711 }
1712 }
1713 aprint_normal_dev(sc->sc_dev, "%s at %s\n",
1714 (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI)
1715 ? "MSI" : "interrupting", intrstr);
1716
1717 sc->sc_nintrs = 1;
1718 }
1719 #endif /* WM_MSI_MSIX */
1720
1721 /*
1722 * Check the function ID (unit number of the chip).
1723 */
1724 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1725 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1726 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1727 || (sc->sc_type == WM_T_82580)
1728 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1729 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1730 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1731 else
1732 sc->sc_funcid = 0;
1733
1734 /*
1735 * Determine a few things about the bus we're connected to.
1736 */
1737 if (sc->sc_type < WM_T_82543) {
1738 /* We don't really know the bus characteristics here. */
1739 sc->sc_bus_speed = 33;
1740 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1741 /*
1742 * CSA (Communication Streaming Architecture) is about as fast
1743 * a 32-bit 66MHz PCI Bus.
1744 */
1745 sc->sc_flags |= WM_F_CSA;
1746 sc->sc_bus_speed = 66;
1747 aprint_verbose_dev(sc->sc_dev,
1748 "Communication Streaming Architecture\n");
1749 if (sc->sc_type == WM_T_82547) {
1750 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1751 callout_setfunc(&sc->sc_txfifo_ch,
1752 wm_82547_txfifo_stall, sc);
1753 aprint_verbose_dev(sc->sc_dev,
1754 "using 82547 Tx FIFO stall work-around\n");
1755 }
1756 } else if (sc->sc_type >= WM_T_82571) {
1757 sc->sc_flags |= WM_F_PCIE;
1758 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1759 && (sc->sc_type != WM_T_ICH10)
1760 && (sc->sc_type != WM_T_PCH)
1761 && (sc->sc_type != WM_T_PCH2)
1762 && (sc->sc_type != WM_T_PCH_LPT)) {
1763 /* ICH* and PCH* have no PCIe capability registers */
1764 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1765 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1766 NULL) == 0)
1767 aprint_error_dev(sc->sc_dev,
1768 "unable to find PCIe capability\n");
1769 }
1770 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1771 } else {
1772 reg = CSR_READ(sc, WMREG_STATUS);
1773 if (reg & STATUS_BUS64)
1774 sc->sc_flags |= WM_F_BUS64;
1775 if ((reg & STATUS_PCIX_MODE) != 0) {
1776 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1777
1778 sc->sc_flags |= WM_F_PCIX;
1779 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1780 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1781 aprint_error_dev(sc->sc_dev,
1782 "unable to find PCIX capability\n");
1783 else if (sc->sc_type != WM_T_82545_3 &&
1784 sc->sc_type != WM_T_82546_3) {
1785 /*
1786 * Work around a problem caused by the BIOS
1787 * setting the max memory read byte count
1788 * incorrectly.
1789 */
1790 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1791 sc->sc_pcixe_capoff + PCIX_CMD);
1792 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1793 sc->sc_pcixe_capoff + PCIX_STATUS);
1794
1795 bytecnt =
1796 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1797 PCIX_CMD_BYTECNT_SHIFT;
1798 maxb =
1799 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1800 PCIX_STATUS_MAXB_SHIFT;
1801 if (bytecnt > maxb) {
1802 aprint_verbose_dev(sc->sc_dev,
1803 "resetting PCI-X MMRBC: %d -> %d\n",
1804 512 << bytecnt, 512 << maxb);
1805 pcix_cmd = (pcix_cmd &
1806 ~PCIX_CMD_BYTECNT_MASK) |
1807 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1808 pci_conf_write(pa->pa_pc, pa->pa_tag,
1809 sc->sc_pcixe_capoff + PCIX_CMD,
1810 pcix_cmd);
1811 }
1812 }
1813 }
1814 /*
1815 * The quad port adapter is special; it has a PCIX-PCIX
1816 * bridge on the board, and can run the secondary bus at
1817 * a higher speed.
1818 */
1819 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1820 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1821 : 66;
1822 } else if (sc->sc_flags & WM_F_PCIX) {
1823 switch (reg & STATUS_PCIXSPD_MASK) {
1824 case STATUS_PCIXSPD_50_66:
1825 sc->sc_bus_speed = 66;
1826 break;
1827 case STATUS_PCIXSPD_66_100:
1828 sc->sc_bus_speed = 100;
1829 break;
1830 case STATUS_PCIXSPD_100_133:
1831 sc->sc_bus_speed = 133;
1832 break;
1833 default:
1834 aprint_error_dev(sc->sc_dev,
1835 "unknown PCIXSPD %d; assuming 66MHz\n",
1836 reg & STATUS_PCIXSPD_MASK);
1837 sc->sc_bus_speed = 66;
1838 break;
1839 }
1840 } else
1841 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1842 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1843 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1844 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1845 }
1846
1847 /*
1848 * Allocate the control data structures, and create and load the
1849 * DMA map for it.
1850 *
1851 * NOTE: All Tx descriptors must be in the same 4G segment of
1852 * memory. So must Rx descriptors. We simplify by allocating
1853 * both sets within the same 4G segment.
1854 */
1855 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1856 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1857 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1858 sizeof(struct wm_control_data_82542) :
1859 sizeof(struct wm_control_data_82544);
1860 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1861 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1862 &sc->sc_cd_rseg, 0)) != 0) {
1863 aprint_error_dev(sc->sc_dev,
1864 "unable to allocate control data, error = %d\n",
1865 error);
1866 goto fail_0;
1867 }
1868
1869 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1870 sc->sc_cd_rseg, sc->sc_cd_size,
1871 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1872 aprint_error_dev(sc->sc_dev,
1873 "unable to map control data, error = %d\n", error);
1874 goto fail_1;
1875 }
1876
1877 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1878 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1879 aprint_error_dev(sc->sc_dev,
1880 "unable to create control data DMA map, error = %d\n",
1881 error);
1882 goto fail_2;
1883 }
1884
1885 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1886 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1887 aprint_error_dev(sc->sc_dev,
1888 "unable to load control data DMA map, error = %d\n",
1889 error);
1890 goto fail_3;
1891 }
1892
1893 /* Create the transmit buffer DMA maps. */
1894 WM_TXQUEUELEN(sc) =
1895 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1896 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1897 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1898 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1899 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1900 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1901 aprint_error_dev(sc->sc_dev,
1902 "unable to create Tx DMA map %d, error = %d\n",
1903 i, error);
1904 goto fail_4;
1905 }
1906 }
1907
1908 /* Create the receive buffer DMA maps. */
1909 for (i = 0; i < WM_NRXDESC; i++) {
1910 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1911 MCLBYTES, 0, 0,
1912 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1913 aprint_error_dev(sc->sc_dev,
1914 "unable to create Rx DMA map %d error = %d\n",
1915 i, error);
1916 goto fail_5;
1917 }
1918 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1919 }
1920
1921 /* clear interesting stat counters */
1922 CSR_READ(sc, WMREG_COLC);
1923 CSR_READ(sc, WMREG_RXERRC);
1924
1925 /* get PHY control from SMBus to PCIe */
1926 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1927 || (sc->sc_type == WM_T_PCH_LPT))
1928 wm_smbustopci(sc);
1929
1930 /* Reset the chip to a known state. */
1931 wm_reset(sc);
1932
1933 /* Get some information about the EEPROM. */
1934 switch (sc->sc_type) {
1935 case WM_T_82542_2_0:
1936 case WM_T_82542_2_1:
1937 case WM_T_82543:
1938 case WM_T_82544:
1939 /* Microwire */
1940 sc->sc_nvm_wordsize = 64;
1941 sc->sc_nvm_addrbits = 6;
1942 break;
1943 case WM_T_82540:
1944 case WM_T_82545:
1945 case WM_T_82545_3:
1946 case WM_T_82546:
1947 case WM_T_82546_3:
1948 /* Microwire */
1949 reg = CSR_READ(sc, WMREG_EECD);
1950 if (reg & EECD_EE_SIZE) {
1951 sc->sc_nvm_wordsize = 256;
1952 sc->sc_nvm_addrbits = 8;
1953 } else {
1954 sc->sc_nvm_wordsize = 64;
1955 sc->sc_nvm_addrbits = 6;
1956 }
1957 sc->sc_flags |= WM_F_LOCK_EECD;
1958 break;
1959 case WM_T_82541:
1960 case WM_T_82541_2:
1961 case WM_T_82547:
1962 case WM_T_82547_2:
1963 sc->sc_flags |= WM_F_LOCK_EECD;
1964 reg = CSR_READ(sc, WMREG_EECD);
1965 if (reg & EECD_EE_TYPE) {
1966 /* SPI */
1967 sc->sc_flags |= WM_F_EEPROM_SPI;
1968 wm_nvm_set_addrbits_size_eecd(sc);
1969 } else {
1970 /* Microwire */
1971 if ((reg & EECD_EE_ABITS) != 0) {
1972 sc->sc_nvm_wordsize = 256;
1973 sc->sc_nvm_addrbits = 8;
1974 } else {
1975 sc->sc_nvm_wordsize = 64;
1976 sc->sc_nvm_addrbits = 6;
1977 }
1978 }
1979 break;
1980 case WM_T_82571:
1981 case WM_T_82572:
1982 /* SPI */
1983 sc->sc_flags |= WM_F_EEPROM_SPI;
1984 wm_nvm_set_addrbits_size_eecd(sc);
1985 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1986 break;
1987 case WM_T_82573:
1988 sc->sc_flags |= WM_F_LOCK_SWSM;
1989 /* FALLTHROUGH */
1990 case WM_T_82574:
1991 case WM_T_82583:
1992 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1993 sc->sc_flags |= WM_F_EEPROM_FLASH;
1994 sc->sc_nvm_wordsize = 2048;
1995 } else {
1996 /* SPI */
1997 sc->sc_flags |= WM_F_EEPROM_SPI;
1998 wm_nvm_set_addrbits_size_eecd(sc);
1999 }
2000 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2001 break;
2002 case WM_T_82575:
2003 case WM_T_82576:
2004 case WM_T_82580:
2005 case WM_T_I350:
2006 case WM_T_I354:
2007 case WM_T_80003:
2008 /* SPI */
2009 sc->sc_flags |= WM_F_EEPROM_SPI;
2010 wm_nvm_set_addrbits_size_eecd(sc);
2011 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2012 | WM_F_LOCK_SWSM;
2013 break;
2014 case WM_T_ICH8:
2015 case WM_T_ICH9:
2016 case WM_T_ICH10:
2017 case WM_T_PCH:
2018 case WM_T_PCH2:
2019 case WM_T_PCH_LPT:
2020 /* FLASH */
2021 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2022 sc->sc_nvm_wordsize = 2048;
2023 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
2024 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2025 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2026 aprint_error_dev(sc->sc_dev,
2027 "can't map FLASH registers\n");
2028 goto fail_5;
2029 }
2030 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2031 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2032 ICH_FLASH_SECTOR_SIZE;
2033 sc->sc_ich8_flash_bank_size =
2034 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2035 sc->sc_ich8_flash_bank_size -=
2036 (reg & ICH_GFPREG_BASE_MASK);
2037 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2038 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2039 break;
2040 case WM_T_I210:
2041 case WM_T_I211:
2042 if (wm_nvm_get_flash_presence_i210(sc)) {
2043 wm_nvm_set_addrbits_size_eecd(sc);
2044 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2045 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2046 } else {
2047 sc->sc_nvm_wordsize = INVM_SIZE;
2048 sc->sc_flags |= WM_F_EEPROM_INVM;
2049 sc->sc_flags |= WM_F_LOCK_SWFW;
2050 }
2051 break;
2052 default:
2053 break;
2054 }
2055
2056 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2057 switch (sc->sc_type) {
2058 case WM_T_82571:
2059 case WM_T_82572:
2060 reg = CSR_READ(sc, WMREG_SWSM2);
2061 if ((reg & SWSM2_LOCK) == 0) {
2062 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2063 force_clear_smbi = true;
2064 } else
2065 force_clear_smbi = false;
2066 break;
2067 case WM_T_82573:
2068 case WM_T_82574:
2069 case WM_T_82583:
2070 force_clear_smbi = true;
2071 break;
2072 default:
2073 force_clear_smbi = false;
2074 break;
2075 }
2076 if (force_clear_smbi) {
2077 reg = CSR_READ(sc, WMREG_SWSM);
2078 if ((reg & SWSM_SMBI) != 0)
2079 aprint_error_dev(sc->sc_dev,
2080 "Please update the Bootagent\n");
2081 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2082 }
2083
2084 /*
2085 * Defer printing the EEPROM type until after verifying the checksum
2086 * This allows the EEPROM type to be printed correctly in the case
2087 * that no EEPROM is attached.
2088 */
2089 /*
2090 * Validate the EEPROM checksum. If the checksum fails, flag
2091 * this for later, so we can fail future reads from the EEPROM.
2092 */
2093 if (wm_nvm_validate_checksum(sc)) {
2094 /*
2095 * Read twice again because some PCI-e parts fail the
2096 * first check due to the link being in sleep state.
2097 */
2098 if (wm_nvm_validate_checksum(sc))
2099 sc->sc_flags |= WM_F_EEPROM_INVALID;
2100 }
2101
2102 /* Set device properties (macflags) */
2103 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2104
2105 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2106 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2107 else {
2108 aprint_verbose_dev(sc->sc_dev, "%u words ",
2109 sc->sc_nvm_wordsize);
2110 if (sc->sc_flags & WM_F_EEPROM_INVM)
2111 aprint_verbose("iNVM");
2112 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2113 aprint_verbose("FLASH(HW)");
2114 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2115 aprint_verbose("FLASH");
2116 else {
2117 if (sc->sc_flags & WM_F_EEPROM_SPI)
2118 eetype = "SPI";
2119 else
2120 eetype = "MicroWire";
2121 aprint_verbose("(%d address bits) %s EEPROM",
2122 sc->sc_nvm_addrbits, eetype);
2123 }
2124 }
2125 wm_nvm_version(sc);
2126 aprint_verbose("\n");
2127
2128 /* Check for I21[01] PLL workaround */
2129 if (sc->sc_type == WM_T_I210)
2130 sc->sc_flags |= WM_F_PLL_WA_I210;
2131 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2132 /* NVM image release 3.25 has a workaround */
2133 if ((sc->sc_nvm_ver_major < 3)
2134 || ((sc->sc_nvm_ver_major == 3)
2135 && (sc->sc_nvm_ver_minor < 25))) {
2136 aprint_verbose_dev(sc->sc_dev,
2137 "ROM image version %d.%d is older than 3.25\n",
2138 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2139 sc->sc_flags |= WM_F_PLL_WA_I210;
2140 }
2141 }
2142 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2143 wm_pll_workaround_i210(sc);
2144
2145 switch (sc->sc_type) {
2146 case WM_T_82571:
2147 case WM_T_82572:
2148 case WM_T_82573:
2149 case WM_T_82574:
2150 case WM_T_82583:
2151 case WM_T_80003:
2152 case WM_T_ICH8:
2153 case WM_T_ICH9:
2154 case WM_T_ICH10:
2155 case WM_T_PCH:
2156 case WM_T_PCH2:
2157 case WM_T_PCH_LPT:
2158 if (wm_check_mng_mode(sc) != 0)
2159 wm_get_hw_control(sc);
2160 break;
2161 default:
2162 break;
2163 }
2164 wm_get_wakeup(sc);
2165 /*
2166 * Read the Ethernet address from the EEPROM, if not first found
2167 * in device properties.
2168 */
2169 ea = prop_dictionary_get(dict, "mac-address");
2170 if (ea != NULL) {
2171 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2172 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2173 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2174 } else {
2175 if (wm_read_mac_addr(sc, enaddr) != 0) {
2176 aprint_error_dev(sc->sc_dev,
2177 "unable to read Ethernet address\n");
2178 goto fail_5;
2179 }
2180 }
2181
2182 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2183 ether_sprintf(enaddr));
2184
2185 /*
2186 * Read the config info from the EEPROM, and set up various
2187 * bits in the control registers based on their contents.
2188 */
2189 pn = prop_dictionary_get(dict, "i82543-cfg1");
2190 if (pn != NULL) {
2191 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2192 cfg1 = (uint16_t) prop_number_integer_value(pn);
2193 } else {
2194 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2195 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2196 goto fail_5;
2197 }
2198 }
2199
2200 pn = prop_dictionary_get(dict, "i82543-cfg2");
2201 if (pn != NULL) {
2202 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2203 cfg2 = (uint16_t) prop_number_integer_value(pn);
2204 } else {
2205 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2206 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2207 goto fail_5;
2208 }
2209 }
2210
2211 /* check for WM_F_WOL */
2212 switch (sc->sc_type) {
2213 case WM_T_82542_2_0:
2214 case WM_T_82542_2_1:
2215 case WM_T_82543:
2216 /* dummy? */
2217 eeprom_data = 0;
2218 apme_mask = NVM_CFG3_APME;
2219 break;
2220 case WM_T_82544:
2221 apme_mask = NVM_CFG2_82544_APM_EN;
2222 eeprom_data = cfg2;
2223 break;
2224 case WM_T_82546:
2225 case WM_T_82546_3:
2226 case WM_T_82571:
2227 case WM_T_82572:
2228 case WM_T_82573:
2229 case WM_T_82574:
2230 case WM_T_82583:
2231 case WM_T_80003:
2232 default:
2233 apme_mask = NVM_CFG3_APME;
2234 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2235 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2236 break;
2237 case WM_T_82575:
2238 case WM_T_82576:
2239 case WM_T_82580:
2240 case WM_T_I350:
2241 case WM_T_I354: /* XXX ok? */
2242 case WM_T_ICH8:
2243 case WM_T_ICH9:
2244 case WM_T_ICH10:
2245 case WM_T_PCH:
2246 case WM_T_PCH2:
2247 case WM_T_PCH_LPT:
2248 /* XXX The funcid should be checked on some devices */
2249 apme_mask = WUC_APME;
2250 eeprom_data = CSR_READ(sc, WMREG_WUC);
2251 break;
2252 }
2253
2254 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2255 if ((eeprom_data & apme_mask) != 0)
2256 sc->sc_flags |= WM_F_WOL;
2257 #ifdef WM_DEBUG
2258 if ((sc->sc_flags & WM_F_WOL) != 0)
2259 printf("WOL\n");
2260 #endif
2261
2262 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2263 /* Check NVM for autonegotiation */
2264 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2265 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2266 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2267 }
2268 }
2269
2270 /*
2271 * XXX need special handling for some multiple port cards
2272 * to disable a paticular port.
2273 */
2274
2275 if (sc->sc_type >= WM_T_82544) {
2276 pn = prop_dictionary_get(dict, "i82543-swdpin");
2277 if (pn != NULL) {
2278 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2279 swdpin = (uint16_t) prop_number_integer_value(pn);
2280 } else {
2281 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2282 aprint_error_dev(sc->sc_dev,
2283 "unable to read SWDPIN\n");
2284 goto fail_5;
2285 }
2286 }
2287 }
2288
2289 if (cfg1 & NVM_CFG1_ILOS)
2290 sc->sc_ctrl |= CTRL_ILOS;
2291
2292 /*
2293 * XXX
2294 * This code isn't correct because pin 2 and 3 are located
2295 * in different position on newer chips. Check all datasheet.
2296 *
2297 * Until resolve this problem, check if a chip < 82580
2298 */
2299 if (sc->sc_type <= WM_T_82580) {
2300 if (sc->sc_type >= WM_T_82544) {
2301 sc->sc_ctrl |=
2302 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2303 CTRL_SWDPIO_SHIFT;
2304 sc->sc_ctrl |=
2305 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2306 CTRL_SWDPINS_SHIFT;
2307 } else {
2308 sc->sc_ctrl |=
2309 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2310 CTRL_SWDPIO_SHIFT;
2311 }
2312 }
2313
2314 /* XXX For other than 82580? */
2315 if (sc->sc_type == WM_T_82580) {
2316 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2317 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2318 if (nvmword & __BIT(13)) {
2319 printf("SET ILOS\n");
2320 sc->sc_ctrl |= CTRL_ILOS;
2321 }
2322 }
2323
2324 #if 0
2325 if (sc->sc_type >= WM_T_82544) {
2326 if (cfg1 & NVM_CFG1_IPS0)
2327 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2328 if (cfg1 & NVM_CFG1_IPS1)
2329 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2330 sc->sc_ctrl_ext |=
2331 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2332 CTRL_EXT_SWDPIO_SHIFT;
2333 sc->sc_ctrl_ext |=
2334 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2335 CTRL_EXT_SWDPINS_SHIFT;
2336 } else {
2337 sc->sc_ctrl_ext |=
2338 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2339 CTRL_EXT_SWDPIO_SHIFT;
2340 }
2341 #endif
2342
2343 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2344 #if 0
2345 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2346 #endif
2347
2348 /*
2349 * Set up some register offsets that are different between
2350 * the i82542 and the i82543 and later chips.
2351 */
2352 if (sc->sc_type < WM_T_82543) {
2353 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2354 sc->sc_tdt_reg = WMREG_OLD_TDT;
2355 } else {
2356 sc->sc_rdt_reg = WMREG_RDT;
2357 sc->sc_tdt_reg = WMREG_TDT;
2358 }
2359
2360 if (sc->sc_type == WM_T_PCH) {
2361 uint16_t val;
2362
2363 /* Save the NVM K1 bit setting */
2364 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2365
2366 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2367 sc->sc_nvm_k1_enabled = 1;
2368 else
2369 sc->sc_nvm_k1_enabled = 0;
2370 }
2371
2372 /*
2373 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2374 * media structures accordingly.
2375 */
2376 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2377 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2378 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2379 || sc->sc_type == WM_T_82573
2380 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2381 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2382 wm_gmii_mediainit(sc, wmp->wmp_product);
2383 } else if (sc->sc_type < WM_T_82543 ||
2384 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2385 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2386 aprint_error_dev(sc->sc_dev,
2387 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2388 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2389 }
2390 wm_tbi_mediainit(sc);
2391 } else {
2392 switch (sc->sc_type) {
2393 case WM_T_82575:
2394 case WM_T_82576:
2395 case WM_T_82580:
2396 case WM_T_I350:
2397 case WM_T_I354:
2398 case WM_T_I210:
2399 case WM_T_I211:
2400 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2401 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2402 switch (link_mode) {
2403 case CTRL_EXT_LINK_MODE_1000KX:
2404 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2405 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2406 break;
2407 case CTRL_EXT_LINK_MODE_SGMII:
2408 if (wm_sgmii_uses_mdio(sc)) {
2409 aprint_verbose_dev(sc->sc_dev,
2410 "SGMII(MDIO)\n");
2411 sc->sc_flags |= WM_F_SGMII;
2412 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2413 break;
2414 }
2415 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2416 /*FALLTHROUGH*/
2417 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2418 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2419 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2420 if (link_mode
2421 == CTRL_EXT_LINK_MODE_SGMII) {
2422 sc->sc_mediatype
2423 = WM_MEDIATYPE_COPPER;
2424 sc->sc_flags |= WM_F_SGMII;
2425 } else {
2426 sc->sc_mediatype
2427 = WM_MEDIATYPE_SERDES;
2428 aprint_verbose_dev(sc->sc_dev,
2429 "SERDES\n");
2430 }
2431 break;
2432 }
2433 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2434 aprint_verbose_dev(sc->sc_dev,
2435 "SERDES\n");
2436
2437 /* Change current link mode setting */
2438 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2439 switch (sc->sc_mediatype) {
2440 case WM_MEDIATYPE_COPPER:
2441 reg |= CTRL_EXT_LINK_MODE_SGMII;
2442 break;
2443 case WM_MEDIATYPE_SERDES:
2444 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2445 break;
2446 default:
2447 break;
2448 }
2449 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2450 break;
2451 case CTRL_EXT_LINK_MODE_GMII:
2452 default:
2453 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2454 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2455 break;
2456 }
2457
2458 reg &= ~CTRL_EXT_I2C_ENA;
2459 if ((sc->sc_flags & WM_F_SGMII) != 0)
2460 reg |= CTRL_EXT_I2C_ENA;
2461 else
2462 reg &= ~CTRL_EXT_I2C_ENA;
2463 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2464
2465 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2466 wm_gmii_mediainit(sc, wmp->wmp_product);
2467 else
2468 wm_tbi_mediainit(sc);
2469 break;
2470 default:
2471 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2472 aprint_error_dev(sc->sc_dev,
2473 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2474 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2475 wm_gmii_mediainit(sc, wmp->wmp_product);
2476 }
2477 }
2478
2479 ifp = &sc->sc_ethercom.ec_if;
2480 xname = device_xname(sc->sc_dev);
2481 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2482 ifp->if_softc = sc;
2483 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2484 ifp->if_ioctl = wm_ioctl;
2485 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2486 ifp->if_start = wm_nq_start;
2487 else
2488 ifp->if_start = wm_start;
2489 ifp->if_watchdog = wm_watchdog;
2490 ifp->if_init = wm_init;
2491 ifp->if_stop = wm_stop;
2492 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2493 IFQ_SET_READY(&ifp->if_snd);
2494
2495 /* Check for jumbo frame */
2496 switch (sc->sc_type) {
2497 case WM_T_82573:
2498 /* XXX limited to 9234 if ASPM is disabled */
2499 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2500 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2501 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2502 break;
2503 case WM_T_82571:
2504 case WM_T_82572:
2505 case WM_T_82574:
2506 case WM_T_82575:
2507 case WM_T_82576:
2508 case WM_T_82580:
2509 case WM_T_I350:
2510 case WM_T_I354: /* XXXX ok? */
2511 case WM_T_I210:
2512 case WM_T_I211:
2513 case WM_T_80003:
2514 case WM_T_ICH9:
2515 case WM_T_ICH10:
2516 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2517 case WM_T_PCH_LPT:
2518 /* XXX limited to 9234 */
2519 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2520 break;
2521 case WM_T_PCH:
2522 /* XXX limited to 4096 */
2523 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2524 break;
2525 case WM_T_82542_2_0:
2526 case WM_T_82542_2_1:
2527 case WM_T_82583:
2528 case WM_T_ICH8:
2529 /* No support for jumbo frame */
2530 break;
2531 default:
2532 /* ETHER_MAX_LEN_JUMBO */
2533 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2534 break;
2535 }
2536
2537 /* If we're a i82543 or greater, we can support VLANs. */
2538 if (sc->sc_type >= WM_T_82543)
2539 sc->sc_ethercom.ec_capabilities |=
2540 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2541
2542 /*
2543 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2544 * on i82543 and later.
2545 */
2546 if (sc->sc_type >= WM_T_82543) {
2547 ifp->if_capabilities |=
2548 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2549 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2550 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2551 IFCAP_CSUM_TCPv6_Tx |
2552 IFCAP_CSUM_UDPv6_Tx;
2553 }
2554
2555 /*
2556 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2557 *
2558 * 82541GI (8086:1076) ... no
2559 * 82572EI (8086:10b9) ... yes
2560 */
2561 if (sc->sc_type >= WM_T_82571) {
2562 ifp->if_capabilities |=
2563 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2564 }
2565
2566 /*
2567 * If we're a i82544 or greater (except i82547), we can do
2568 * TCP segmentation offload.
2569 */
2570 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2571 ifp->if_capabilities |= IFCAP_TSOv4;
2572 }
2573
2574 if (sc->sc_type >= WM_T_82571) {
2575 ifp->if_capabilities |= IFCAP_TSOv6;
2576 }
2577
2578 #ifdef WM_MPSAFE
2579 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2580 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2581 #else
2582 sc->sc_tx_lock = NULL;
2583 sc->sc_rx_lock = NULL;
2584 #endif
2585
2586 /* Attach the interface. */
2587 if_attach(ifp);
2588 ether_ifattach(ifp, enaddr);
2589 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2590 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2591 RND_FLAG_DEFAULT);
2592
2593 #ifdef WM_EVENT_COUNTERS
2594 /* Attach event counters. */
2595 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2596 NULL, xname, "txsstall");
2597 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2598 NULL, xname, "txdstall");
2599 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2600 NULL, xname, "txfifo_stall");
2601 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2602 NULL, xname, "txdw");
2603 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2604 NULL, xname, "txqe");
2605 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2606 NULL, xname, "rxintr");
2607 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2608 NULL, xname, "linkintr");
2609
2610 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2611 NULL, xname, "rxipsum");
2612 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2613 NULL, xname, "rxtusum");
2614 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2615 NULL, xname, "txipsum");
2616 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2617 NULL, xname, "txtusum");
2618 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2619 NULL, xname, "txtusum6");
2620
2621 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2622 NULL, xname, "txtso");
2623 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2624 NULL, xname, "txtso6");
2625 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2626 NULL, xname, "txtsopain");
2627
2628 for (i = 0; i < WM_NTXSEGS; i++) {
2629 snprintf(wm_txseg_evcnt_names[i],
2630 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2631 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2632 NULL, xname, wm_txseg_evcnt_names[i]);
2633 }
2634
2635 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2636 NULL, xname, "txdrop");
2637
2638 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2639 NULL, xname, "tu");
2640
2641 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2642 NULL, xname, "tx_xoff");
2643 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2644 NULL, xname, "tx_xon");
2645 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2646 NULL, xname, "rx_xoff");
2647 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2648 NULL, xname, "rx_xon");
2649 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2650 NULL, xname, "rx_macctl");
2651 #endif /* WM_EVENT_COUNTERS */
2652
2653 if (pmf_device_register(self, wm_suspend, wm_resume))
2654 pmf_class_network_register(self, ifp);
2655 else
2656 aprint_error_dev(self, "couldn't establish power handler\n");
2657
2658 sc->sc_flags |= WM_F_ATTACHED;
2659 return;
2660
2661 /*
2662 * Free any resources we've allocated during the failed attach
2663 * attempt. Do this in reverse order and fall through.
2664 */
2665 fail_5:
2666 for (i = 0; i < WM_NRXDESC; i++) {
2667 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2668 bus_dmamap_destroy(sc->sc_dmat,
2669 sc->sc_rxsoft[i].rxs_dmamap);
2670 }
2671 fail_4:
2672 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2673 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2674 bus_dmamap_destroy(sc->sc_dmat,
2675 sc->sc_txsoft[i].txs_dmamap);
2676 }
2677 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2678 fail_3:
2679 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2680 fail_2:
2681 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2682 sc->sc_cd_size);
2683 fail_1:
2684 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2685 fail_0:
2686 return;
2687 }
2688
2689 /* The detach function (ca_detach) */
2690 static int
2691 wm_detach(device_t self, int flags __unused)
2692 {
2693 struct wm_softc *sc = device_private(self);
2694 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2695 int i;
2696 #ifndef WM_MPSAFE
2697 int s;
2698 #endif
2699
2700 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2701 return 0;
2702
2703 #ifndef WM_MPSAFE
2704 s = splnet();
2705 #endif
2706 /* Stop the interface. Callouts are stopped in it. */
2707 wm_stop(ifp, 1);
2708
2709 #ifndef WM_MPSAFE
2710 splx(s);
2711 #endif
2712
2713 pmf_device_deregister(self);
2714
2715 /* Tell the firmware about the release */
2716 WM_BOTH_LOCK(sc);
2717 wm_release_manageability(sc);
2718 wm_release_hw_control(sc);
2719 WM_BOTH_UNLOCK(sc);
2720
2721 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2722
2723 /* Delete all remaining media. */
2724 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2725
2726 ether_ifdetach(ifp);
2727 if_detach(ifp);
2728
2729
2730 /* Unload RX dmamaps and free mbufs */
2731 WM_RX_LOCK(sc);
2732 wm_rxdrain(sc);
2733 WM_RX_UNLOCK(sc);
2734 /* Must unlock here */
2735
2736 /* Free dmamap. It's the same as the end of the wm_attach() function */
2737 for (i = 0; i < WM_NRXDESC; i++) {
2738 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2739 bus_dmamap_destroy(sc->sc_dmat,
2740 sc->sc_rxsoft[i].rxs_dmamap);
2741 }
2742 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2743 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2744 bus_dmamap_destroy(sc->sc_dmat,
2745 sc->sc_txsoft[i].txs_dmamap);
2746 }
2747 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2748 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2749 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2750 sc->sc_cd_size);
2751 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2752
2753 /* Disestablish the interrupt handler */
2754 for (i = 0; i < sc->sc_nintrs; i++) {
2755 if (sc->sc_ihs[i] != NULL) {
2756 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2757 sc->sc_ihs[i] = NULL;
2758 }
2759 }
2760 #ifdef WM_MSI_MSIX
2761 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2762 #endif /* WM_MSI_MSIX */
2763
2764 /* Unmap the registers */
2765 if (sc->sc_ss) {
2766 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2767 sc->sc_ss = 0;
2768 }
2769 if (sc->sc_ios) {
2770 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2771 sc->sc_ios = 0;
2772 }
2773 if (sc->sc_flashs) {
2774 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2775 sc->sc_flashs = 0;
2776 }
2777
2778 if (sc->sc_tx_lock)
2779 mutex_obj_free(sc->sc_tx_lock);
2780 if (sc->sc_rx_lock)
2781 mutex_obj_free(sc->sc_rx_lock);
2782
2783 return 0;
2784 }
2785
2786 static bool
2787 wm_suspend(device_t self, const pmf_qual_t *qual)
2788 {
2789 struct wm_softc *sc = device_private(self);
2790
2791 wm_release_manageability(sc);
2792 wm_release_hw_control(sc);
2793 #ifdef WM_WOL
2794 wm_enable_wakeup(sc);
2795 #endif
2796
2797 return true;
2798 }
2799
2800 static bool
2801 wm_resume(device_t self, const pmf_qual_t *qual)
2802 {
2803 struct wm_softc *sc = device_private(self);
2804
2805 wm_init_manageability(sc);
2806
2807 return true;
2808 }
2809
2810 /*
2811 * wm_watchdog: [ifnet interface function]
2812 *
2813 * Watchdog timer handler.
2814 */
2815 static void
2816 wm_watchdog(struct ifnet *ifp)
2817 {
2818 struct wm_softc *sc = ifp->if_softc;
2819
2820 /*
2821 * Since we're using delayed interrupts, sweep up
2822 * before we report an error.
2823 */
2824 WM_TX_LOCK(sc);
2825 wm_txeof(sc);
2826 WM_TX_UNLOCK(sc);
2827
2828 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2829 #ifdef WM_DEBUG
2830 int i, j;
2831 struct wm_txsoft *txs;
2832 #endif
2833 log(LOG_ERR,
2834 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2835 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2836 sc->sc_txnext);
2837 ifp->if_oerrors++;
2838 #ifdef WM_DEBUG
2839 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2840 i = WM_NEXTTXS(sc, i)) {
2841 txs = &sc->sc_txsoft[i];
2842 printf("txs %d tx %d -> %d\n",
2843 i, txs->txs_firstdesc, txs->txs_lastdesc);
2844 for (j = txs->txs_firstdesc; ;
2845 j = WM_NEXTTX(sc, j)) {
2846 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2847 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2848 printf("\t %#08x%08x\n",
2849 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2850 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2851 if (j == txs->txs_lastdesc)
2852 break;
2853 }
2854 }
2855 #endif
2856 /* Reset the interface. */
2857 (void) wm_init(ifp);
2858 }
2859
2860 /* Try to get more packets going. */
2861 ifp->if_start(ifp);
2862 }
2863
2864 /*
2865 * wm_tick:
2866 *
2867 * One second timer, used to check link status, sweep up
2868 * completed transmit jobs, etc.
2869 */
2870 static void
2871 wm_tick(void *arg)
2872 {
2873 struct wm_softc *sc = arg;
2874 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2875 #ifndef WM_MPSAFE
2876 int s;
2877
2878 s = splnet();
2879 #endif
2880
2881 WM_TX_LOCK(sc);
2882
2883 if (sc->sc_stopping)
2884 goto out;
2885
2886 if (sc->sc_type >= WM_T_82542_2_1) {
2887 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2888 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2889 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2890 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2891 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2892 }
2893
2894 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2895 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2896 + CSR_READ(sc, WMREG_CRCERRS)
2897 + CSR_READ(sc, WMREG_ALGNERRC)
2898 + CSR_READ(sc, WMREG_SYMERRC)
2899 + CSR_READ(sc, WMREG_RXERRC)
2900 + CSR_READ(sc, WMREG_SEC)
2901 + CSR_READ(sc, WMREG_CEXTERR)
2902 + CSR_READ(sc, WMREG_RLEC);
2903 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2904
2905 if (sc->sc_flags & WM_F_HAS_MII)
2906 mii_tick(&sc->sc_mii);
2907 else if ((sc->sc_type >= WM_T_82575)
2908 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2909 wm_serdes_tick(sc);
2910 else
2911 wm_tbi_tick(sc);
2912
2913 out:
2914 WM_TX_UNLOCK(sc);
2915 #ifndef WM_MPSAFE
2916 splx(s);
2917 #endif
2918
2919 if (!sc->sc_stopping)
2920 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2921 }
2922
2923 static int
2924 wm_ifflags_cb(struct ethercom *ec)
2925 {
2926 struct ifnet *ifp = &ec->ec_if;
2927 struct wm_softc *sc = ifp->if_softc;
2928 int change = ifp->if_flags ^ sc->sc_if_flags;
2929 int rc = 0;
2930
2931 WM_BOTH_LOCK(sc);
2932
2933 if (change != 0)
2934 sc->sc_if_flags = ifp->if_flags;
2935
2936 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2937 rc = ENETRESET;
2938 goto out;
2939 }
2940
2941 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2942 wm_set_filter(sc);
2943
2944 wm_set_vlan(sc);
2945
2946 out:
2947 WM_BOTH_UNLOCK(sc);
2948
2949 return rc;
2950 }
2951
2952 /*
2953 * wm_ioctl: [ifnet interface function]
2954 *
2955 * Handle control requests from the operator.
2956 */
2957 static int
2958 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2959 {
2960 struct wm_softc *sc = ifp->if_softc;
2961 struct ifreq *ifr = (struct ifreq *) data;
2962 struct ifaddr *ifa = (struct ifaddr *)data;
2963 struct sockaddr_dl *sdl;
2964 int s, error;
2965
2966 #ifndef WM_MPSAFE
2967 s = splnet();
2968 #endif
2969 switch (cmd) {
2970 case SIOCSIFMEDIA:
2971 case SIOCGIFMEDIA:
2972 WM_BOTH_LOCK(sc);
2973 /* Flow control requires full-duplex mode. */
2974 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2975 (ifr->ifr_media & IFM_FDX) == 0)
2976 ifr->ifr_media &= ~IFM_ETH_FMASK;
2977 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2978 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2979 /* We can do both TXPAUSE and RXPAUSE. */
2980 ifr->ifr_media |=
2981 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2982 }
2983 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2984 }
2985 WM_BOTH_UNLOCK(sc);
2986 #ifdef WM_MPSAFE
2987 s = splnet();
2988 #endif
2989 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2990 #ifdef WM_MPSAFE
2991 splx(s);
2992 #endif
2993 break;
2994 case SIOCINITIFADDR:
2995 WM_BOTH_LOCK(sc);
2996 if (ifa->ifa_addr->sa_family == AF_LINK) {
2997 sdl = satosdl(ifp->if_dl->ifa_addr);
2998 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2999 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3000 /* unicast address is first multicast entry */
3001 wm_set_filter(sc);
3002 error = 0;
3003 WM_BOTH_UNLOCK(sc);
3004 break;
3005 }
3006 WM_BOTH_UNLOCK(sc);
3007 /*FALLTHROUGH*/
3008 default:
3009 #ifdef WM_MPSAFE
3010 s = splnet();
3011 #endif
3012 /* It may call wm_start, so unlock here */
3013 error = ether_ioctl(ifp, cmd, data);
3014 #ifdef WM_MPSAFE
3015 splx(s);
3016 #endif
3017 if (error != ENETRESET)
3018 break;
3019
3020 error = 0;
3021
3022 if (cmd == SIOCSIFCAP) {
3023 error = (*ifp->if_init)(ifp);
3024 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3025 ;
3026 else if (ifp->if_flags & IFF_RUNNING) {
3027 /*
3028 * Multicast list has changed; set the hardware filter
3029 * accordingly.
3030 */
3031 WM_BOTH_LOCK(sc);
3032 wm_set_filter(sc);
3033 WM_BOTH_UNLOCK(sc);
3034 }
3035 break;
3036 }
3037
3038 #ifndef WM_MPSAFE
3039 splx(s);
3040 #endif
3041 return error;
3042 }
3043
3044 /* MAC address related */
3045
3046 /*
3047 * Get the offset of MAC address and return it.
3048 * If error occured, use offset 0.
3049 */
3050 static uint16_t
3051 wm_check_alt_mac_addr(struct wm_softc *sc)
3052 {
3053 uint16_t myea[ETHER_ADDR_LEN / 2];
3054 uint16_t offset = NVM_OFF_MACADDR;
3055
3056 /* Try to read alternative MAC address pointer */
3057 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3058 return 0;
3059
3060 /* Check pointer if it's valid or not. */
3061 if ((offset == 0x0000) || (offset == 0xffff))
3062 return 0;
3063
3064 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3065 /*
3066 * Check whether alternative MAC address is valid or not.
3067 * Some cards have non 0xffff pointer but those don't use
3068 * alternative MAC address in reality.
3069 *
3070 * Check whether the broadcast bit is set or not.
3071 */
3072 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3073 if (((myea[0] & 0xff) & 0x01) == 0)
3074 return offset; /* Found */
3075
3076 /* Not found */
3077 return 0;
3078 }
3079
3080 static int
3081 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3082 {
3083 uint16_t myea[ETHER_ADDR_LEN / 2];
3084 uint16_t offset = NVM_OFF_MACADDR;
3085 int do_invert = 0;
3086
3087 switch (sc->sc_type) {
3088 case WM_T_82580:
3089 case WM_T_I350:
3090 case WM_T_I354:
3091 /* EEPROM Top Level Partitioning */
3092 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3093 break;
3094 case WM_T_82571:
3095 case WM_T_82575:
3096 case WM_T_82576:
3097 case WM_T_80003:
3098 case WM_T_I210:
3099 case WM_T_I211:
3100 offset = wm_check_alt_mac_addr(sc);
3101 if (offset == 0)
3102 if ((sc->sc_funcid & 0x01) == 1)
3103 do_invert = 1;
3104 break;
3105 default:
3106 if ((sc->sc_funcid & 0x01) == 1)
3107 do_invert = 1;
3108 break;
3109 }
3110
3111 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3112 myea) != 0)
3113 goto bad;
3114
3115 enaddr[0] = myea[0] & 0xff;
3116 enaddr[1] = myea[0] >> 8;
3117 enaddr[2] = myea[1] & 0xff;
3118 enaddr[3] = myea[1] >> 8;
3119 enaddr[4] = myea[2] & 0xff;
3120 enaddr[5] = myea[2] >> 8;
3121
3122 /*
3123 * Toggle the LSB of the MAC address on the second port
3124 * of some dual port cards.
3125 */
3126 if (do_invert != 0)
3127 enaddr[5] ^= 1;
3128
3129 return 0;
3130
3131 bad:
3132 return -1;
3133 }
3134
3135 /*
3136 * wm_set_ral:
3137 *
3138 * Set an entery in the receive address list.
3139 */
3140 static void
3141 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3142 {
3143 uint32_t ral_lo, ral_hi;
3144
3145 if (enaddr != NULL) {
3146 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3147 (enaddr[3] << 24);
3148 ral_hi = enaddr[4] | (enaddr[5] << 8);
3149 ral_hi |= RAL_AV;
3150 } else {
3151 ral_lo = 0;
3152 ral_hi = 0;
3153 }
3154
3155 if (sc->sc_type >= WM_T_82544) {
3156 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3157 ral_lo);
3158 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3159 ral_hi);
3160 } else {
3161 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3162 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3163 }
3164 }
3165
3166 /*
3167 * wm_mchash:
3168 *
3169 * Compute the hash of the multicast address for the 4096-bit
3170 * multicast filter.
3171 */
3172 static uint32_t
3173 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3174 {
3175 static const int lo_shift[4] = { 4, 3, 2, 0 };
3176 static const int hi_shift[4] = { 4, 5, 6, 8 };
3177 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3178 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3179 uint32_t hash;
3180
3181 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3182 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3183 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3184 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3185 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3186 return (hash & 0x3ff);
3187 }
3188 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3189 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3190
3191 return (hash & 0xfff);
3192 }
3193
3194 /*
3195 * wm_set_filter:
3196 *
3197 * Set up the receive filter.
3198 */
3199 static void
3200 wm_set_filter(struct wm_softc *sc)
3201 {
3202 struct ethercom *ec = &sc->sc_ethercom;
3203 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3204 struct ether_multi *enm;
3205 struct ether_multistep step;
3206 bus_addr_t mta_reg;
3207 uint32_t hash, reg, bit;
3208 int i, size;
3209
3210 if (sc->sc_type >= WM_T_82544)
3211 mta_reg = WMREG_CORDOVA_MTA;
3212 else
3213 mta_reg = WMREG_MTA;
3214
3215 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3216
3217 if (ifp->if_flags & IFF_BROADCAST)
3218 sc->sc_rctl |= RCTL_BAM;
3219 if (ifp->if_flags & IFF_PROMISC) {
3220 sc->sc_rctl |= RCTL_UPE;
3221 goto allmulti;
3222 }
3223
3224 /*
3225 * Set the station address in the first RAL slot, and
3226 * clear the remaining slots.
3227 */
3228 if (sc->sc_type == WM_T_ICH8)
3229 size = WM_RAL_TABSIZE_ICH8 -1;
3230 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3231 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3232 || (sc->sc_type == WM_T_PCH_LPT))
3233 size = WM_RAL_TABSIZE_ICH8;
3234 else if (sc->sc_type == WM_T_82575)
3235 size = WM_RAL_TABSIZE_82575;
3236 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3237 size = WM_RAL_TABSIZE_82576;
3238 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3239 size = WM_RAL_TABSIZE_I350;
3240 else
3241 size = WM_RAL_TABSIZE;
3242 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3243 for (i = 1; i < size; i++)
3244 wm_set_ral(sc, NULL, i);
3245
3246 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3247 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3248 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3249 size = WM_ICH8_MC_TABSIZE;
3250 else
3251 size = WM_MC_TABSIZE;
3252 /* Clear out the multicast table. */
3253 for (i = 0; i < size; i++)
3254 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3255
3256 ETHER_FIRST_MULTI(step, ec, enm);
3257 while (enm != NULL) {
3258 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3259 /*
3260 * We must listen to a range of multicast addresses.
3261 * For now, just accept all multicasts, rather than
3262 * trying to set only those filter bits needed to match
3263 * the range. (At this time, the only use of address
3264 * ranges is for IP multicast routing, for which the
3265 * range is big enough to require all bits set.)
3266 */
3267 goto allmulti;
3268 }
3269
3270 hash = wm_mchash(sc, enm->enm_addrlo);
3271
3272 reg = (hash >> 5);
3273 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3274 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3275 || (sc->sc_type == WM_T_PCH2)
3276 || (sc->sc_type == WM_T_PCH_LPT))
3277 reg &= 0x1f;
3278 else
3279 reg &= 0x7f;
3280 bit = hash & 0x1f;
3281
3282 hash = CSR_READ(sc, mta_reg + (reg << 2));
3283 hash |= 1U << bit;
3284
3285 /* XXX Hardware bug?? */
3286 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3287 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3288 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3289 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3290 } else
3291 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3292
3293 ETHER_NEXT_MULTI(step, enm);
3294 }
3295
3296 ifp->if_flags &= ~IFF_ALLMULTI;
3297 goto setit;
3298
3299 allmulti:
3300 ifp->if_flags |= IFF_ALLMULTI;
3301 sc->sc_rctl |= RCTL_MPE;
3302
3303 setit:
3304 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3305 }
3306
3307 /* Reset and init related */
3308
3309 static void
3310 wm_set_vlan(struct wm_softc *sc)
3311 {
3312 /* Deal with VLAN enables. */
3313 if (VLAN_ATTACHED(&sc->sc_ethercom))
3314 sc->sc_ctrl |= CTRL_VME;
3315 else
3316 sc->sc_ctrl &= ~CTRL_VME;
3317
3318 /* Write the control registers. */
3319 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3320 }
3321
3322 static void
3323 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3324 {
3325 uint32_t gcr;
3326 pcireg_t ctrl2;
3327
3328 gcr = CSR_READ(sc, WMREG_GCR);
3329
3330 /* Only take action if timeout value is defaulted to 0 */
3331 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3332 goto out;
3333
3334 if ((gcr & GCR_CAP_VER2) == 0) {
3335 gcr |= GCR_CMPL_TMOUT_10MS;
3336 goto out;
3337 }
3338
3339 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3340 sc->sc_pcixe_capoff + PCIE_DCSR2);
3341 ctrl2 |= WM_PCIE_DCSR2_16MS;
3342 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3343 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3344
3345 out:
3346 /* Disable completion timeout resend */
3347 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3348
3349 CSR_WRITE(sc, WMREG_GCR, gcr);
3350 }
3351
3352 void
3353 wm_get_auto_rd_done(struct wm_softc *sc)
3354 {
3355 int i;
3356
3357 /* wait for eeprom to reload */
3358 switch (sc->sc_type) {
3359 case WM_T_82571:
3360 case WM_T_82572:
3361 case WM_T_82573:
3362 case WM_T_82574:
3363 case WM_T_82583:
3364 case WM_T_82575:
3365 case WM_T_82576:
3366 case WM_T_82580:
3367 case WM_T_I350:
3368 case WM_T_I354:
3369 case WM_T_I210:
3370 case WM_T_I211:
3371 case WM_T_80003:
3372 case WM_T_ICH8:
3373 case WM_T_ICH9:
3374 for (i = 0; i < 10; i++) {
3375 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3376 break;
3377 delay(1000);
3378 }
3379 if (i == 10) {
3380 log(LOG_ERR, "%s: auto read from eeprom failed to "
3381 "complete\n", device_xname(sc->sc_dev));
3382 }
3383 break;
3384 default:
3385 break;
3386 }
3387 }
3388
3389 void
3390 wm_lan_init_done(struct wm_softc *sc)
3391 {
3392 uint32_t reg = 0;
3393 int i;
3394
3395 /* wait for eeprom to reload */
3396 switch (sc->sc_type) {
3397 case WM_T_ICH10:
3398 case WM_T_PCH:
3399 case WM_T_PCH2:
3400 case WM_T_PCH_LPT:
3401 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3402 reg = CSR_READ(sc, WMREG_STATUS);
3403 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3404 break;
3405 delay(100);
3406 }
3407 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3408 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3409 "complete\n", device_xname(sc->sc_dev), __func__);
3410 }
3411 break;
3412 default:
3413 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3414 __func__);
3415 break;
3416 }
3417
3418 reg &= ~STATUS_LAN_INIT_DONE;
3419 CSR_WRITE(sc, WMREG_STATUS, reg);
3420 }
3421
3422 void
3423 wm_get_cfg_done(struct wm_softc *sc)
3424 {
3425 int mask;
3426 uint32_t reg;
3427 int i;
3428
3429 /* wait for eeprom to reload */
3430 switch (sc->sc_type) {
3431 case WM_T_82542_2_0:
3432 case WM_T_82542_2_1:
3433 /* null */
3434 break;
3435 case WM_T_82543:
3436 case WM_T_82544:
3437 case WM_T_82540:
3438 case WM_T_82545:
3439 case WM_T_82545_3:
3440 case WM_T_82546:
3441 case WM_T_82546_3:
3442 case WM_T_82541:
3443 case WM_T_82541_2:
3444 case WM_T_82547:
3445 case WM_T_82547_2:
3446 case WM_T_82573:
3447 case WM_T_82574:
3448 case WM_T_82583:
3449 /* generic */
3450 delay(10*1000);
3451 break;
3452 case WM_T_80003:
3453 case WM_T_82571:
3454 case WM_T_82572:
3455 case WM_T_82575:
3456 case WM_T_82576:
3457 case WM_T_82580:
3458 case WM_T_I350:
3459 case WM_T_I354:
3460 case WM_T_I210:
3461 case WM_T_I211:
3462 if (sc->sc_type == WM_T_82571) {
3463 /* Only 82571 shares port 0 */
3464 mask = EEMNGCTL_CFGDONE_0;
3465 } else
3466 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3467 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3468 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3469 break;
3470 delay(1000);
3471 }
3472 if (i >= WM_PHY_CFG_TIMEOUT) {
3473 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3474 device_xname(sc->sc_dev), __func__));
3475 }
3476 break;
3477 case WM_T_ICH8:
3478 case WM_T_ICH9:
3479 case WM_T_ICH10:
3480 case WM_T_PCH:
3481 case WM_T_PCH2:
3482 case WM_T_PCH_LPT:
3483 delay(10*1000);
3484 if (sc->sc_type >= WM_T_ICH10)
3485 wm_lan_init_done(sc);
3486 else
3487 wm_get_auto_rd_done(sc);
3488
3489 reg = CSR_READ(sc, WMREG_STATUS);
3490 if ((reg & STATUS_PHYRA) != 0)
3491 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3492 break;
3493 default:
3494 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3495 __func__);
3496 break;
3497 }
3498 }
3499
3500 /* Init hardware bits */
3501 void
3502 wm_initialize_hardware_bits(struct wm_softc *sc)
3503 {
3504 uint32_t tarc0, tarc1, reg;
3505
3506 /* For 82571 variant, 80003 and ICHs */
3507 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3508 || (sc->sc_type >= WM_T_80003)) {
3509
3510 /* Transmit Descriptor Control 0 */
3511 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3512 reg |= TXDCTL_COUNT_DESC;
3513 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3514
3515 /* Transmit Descriptor Control 1 */
3516 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3517 reg |= TXDCTL_COUNT_DESC;
3518 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3519
3520 /* TARC0 */
3521 tarc0 = CSR_READ(sc, WMREG_TARC0);
3522 switch (sc->sc_type) {
3523 case WM_T_82571:
3524 case WM_T_82572:
3525 case WM_T_82573:
3526 case WM_T_82574:
3527 case WM_T_82583:
3528 case WM_T_80003:
3529 /* Clear bits 30..27 */
3530 tarc0 &= ~__BITS(30, 27);
3531 break;
3532 default:
3533 break;
3534 }
3535
3536 switch (sc->sc_type) {
3537 case WM_T_82571:
3538 case WM_T_82572:
3539 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3540
3541 tarc1 = CSR_READ(sc, WMREG_TARC1);
3542 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3543 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3544 /* 8257[12] Errata No.7 */
3545 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3546
3547 /* TARC1 bit 28 */
3548 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3549 tarc1 &= ~__BIT(28);
3550 else
3551 tarc1 |= __BIT(28);
3552 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3553
3554 /*
3555 * 8257[12] Errata No.13
3556 * Disable Dyamic Clock Gating.
3557 */
3558 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3559 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3560 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3561 break;
3562 case WM_T_82573:
3563 case WM_T_82574:
3564 case WM_T_82583:
3565 if ((sc->sc_type == WM_T_82574)
3566 || (sc->sc_type == WM_T_82583))
3567 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3568
3569 /* Extended Device Control */
3570 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3571 reg &= ~__BIT(23); /* Clear bit 23 */
3572 reg |= __BIT(22); /* Set bit 22 */
3573 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3574
3575 /* Device Control */
3576 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3577 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3578
3579 /* PCIe Control Register */
3580 if ((sc->sc_type == WM_T_82574)
3581 || (sc->sc_type == WM_T_82583)) {
3582 /*
3583 * Document says this bit must be set for
3584 * proper operation.
3585 */
3586 reg = CSR_READ(sc, WMREG_GCR);
3587 reg |= __BIT(22);
3588 CSR_WRITE(sc, WMREG_GCR, reg);
3589
3590 /*
3591 * Apply workaround for hardware errata
3592 * documented in errata docs Fixes issue where
3593 * some error prone or unreliable PCIe
3594 * completions are occurring, particularly
3595 * with ASPM enabled. Without fix, issue can
3596 * cause Tx timeouts.
3597 */
3598 reg = CSR_READ(sc, WMREG_GCR2);
3599 reg |= __BIT(0);
3600 CSR_WRITE(sc, WMREG_GCR2, reg);
3601 }
3602 break;
3603 case WM_T_80003:
3604 /* TARC0 */
3605 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3606 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3607 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3608
3609 /* TARC1 bit 28 */
3610 tarc1 = CSR_READ(sc, WMREG_TARC1);
3611 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3612 tarc1 &= ~__BIT(28);
3613 else
3614 tarc1 |= __BIT(28);
3615 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3616 break;
3617 case WM_T_ICH8:
3618 case WM_T_ICH9:
3619 case WM_T_ICH10:
3620 case WM_T_PCH:
3621 case WM_T_PCH2:
3622 case WM_T_PCH_LPT:
3623 /* TARC 0 */
3624 if (sc->sc_type == WM_T_ICH8) {
3625 /* Set TARC0 bits 29 and 28 */
3626 tarc0 |= __BITS(29, 28);
3627 }
3628 /* Set TARC0 bits 23,24,26,27 */
3629 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3630
3631 /* CTRL_EXT */
3632 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3633 reg |= __BIT(22); /* Set bit 22 */
3634 /*
3635 * Enable PHY low-power state when MAC is at D3
3636 * w/o WoL
3637 */
3638 if (sc->sc_type >= WM_T_PCH)
3639 reg |= CTRL_EXT_PHYPDEN;
3640 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3641
3642 /* TARC1 */
3643 tarc1 = CSR_READ(sc, WMREG_TARC1);
3644 /* bit 28 */
3645 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3646 tarc1 &= ~__BIT(28);
3647 else
3648 tarc1 |= __BIT(28);
3649 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3650 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3651
3652 /* Device Status */
3653 if (sc->sc_type == WM_T_ICH8) {
3654 reg = CSR_READ(sc, WMREG_STATUS);
3655 reg &= ~__BIT(31);
3656 CSR_WRITE(sc, WMREG_STATUS, reg);
3657
3658 }
3659
3660 /*
3661 * Work-around descriptor data corruption issue during
3662 * NFS v2 UDP traffic, just disable the NFS filtering
3663 * capability.
3664 */
3665 reg = CSR_READ(sc, WMREG_RFCTL);
3666 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3667 CSR_WRITE(sc, WMREG_RFCTL, reg);
3668 break;
3669 default:
3670 break;
3671 }
3672 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3673
3674 /*
3675 * 8257[12] Errata No.52 and some others.
3676 * Avoid RSS Hash Value bug.
3677 */
3678 switch (sc->sc_type) {
3679 case WM_T_82571:
3680 case WM_T_82572:
3681 case WM_T_82573:
3682 case WM_T_80003:
3683 case WM_T_ICH8:
3684 reg = CSR_READ(sc, WMREG_RFCTL);
3685 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3686 CSR_WRITE(sc, WMREG_RFCTL, reg);
3687 break;
3688 default:
3689 break;
3690 }
3691 }
3692 }
3693
3694 static uint32_t
3695 wm_rxpbs_adjust_82580(uint32_t val)
3696 {
3697 uint32_t rv = 0;
3698
3699 if (val < __arraycount(wm_82580_rxpbs_table))
3700 rv = wm_82580_rxpbs_table[val];
3701
3702 return rv;
3703 }
3704
3705 /*
3706 * wm_reset:
3707 *
3708 * Reset the i82542 chip.
3709 */
3710 static void
3711 wm_reset(struct wm_softc *sc)
3712 {
3713 int phy_reset = 0;
3714 int error = 0;
3715 uint32_t reg, mask;
3716
3717 /*
3718 * Allocate on-chip memory according to the MTU size.
3719 * The Packet Buffer Allocation register must be written
3720 * before the chip is reset.
3721 */
3722 switch (sc->sc_type) {
3723 case WM_T_82547:
3724 case WM_T_82547_2:
3725 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3726 PBA_22K : PBA_30K;
3727 sc->sc_txfifo_head = 0;
3728 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3729 sc->sc_txfifo_size =
3730 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3731 sc->sc_txfifo_stall = 0;
3732 break;
3733 case WM_T_82571:
3734 case WM_T_82572:
3735 case WM_T_82575: /* XXX need special handing for jumbo frames */
3736 case WM_T_80003:
3737 sc->sc_pba = PBA_32K;
3738 break;
3739 case WM_T_82573:
3740 sc->sc_pba = PBA_12K;
3741 break;
3742 case WM_T_82574:
3743 case WM_T_82583:
3744 sc->sc_pba = PBA_20K;
3745 break;
3746 case WM_T_82576:
3747 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3748 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3749 break;
3750 case WM_T_82580:
3751 case WM_T_I350:
3752 case WM_T_I354:
3753 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3754 break;
3755 case WM_T_I210:
3756 case WM_T_I211:
3757 sc->sc_pba = PBA_34K;
3758 break;
3759 case WM_T_ICH8:
3760 /* Workaround for a bit corruption issue in FIFO memory */
3761 sc->sc_pba = PBA_8K;
3762 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3763 break;
3764 case WM_T_ICH9:
3765 case WM_T_ICH10:
3766 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3767 PBA_14K : PBA_10K;
3768 break;
3769 case WM_T_PCH:
3770 case WM_T_PCH2:
3771 case WM_T_PCH_LPT:
3772 sc->sc_pba = PBA_26K;
3773 break;
3774 default:
3775 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3776 PBA_40K : PBA_48K;
3777 break;
3778 }
3779 /*
3780 * Only old or non-multiqueue devices have the PBA register
3781 * XXX Need special handling for 82575.
3782 */
3783 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3784 || (sc->sc_type == WM_T_82575))
3785 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3786
3787 /* Prevent the PCI-E bus from sticking */
3788 if (sc->sc_flags & WM_F_PCIE) {
3789 int timeout = 800;
3790
3791 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3792 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3793
3794 while (timeout--) {
3795 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3796 == 0)
3797 break;
3798 delay(100);
3799 }
3800 }
3801
3802 /* Set the completion timeout for interface */
3803 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3804 || (sc->sc_type == WM_T_82580)
3805 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3806 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3807 wm_set_pcie_completion_timeout(sc);
3808
3809 /* Clear interrupt */
3810 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3811 if (sc->sc_nintrs > 1) {
3812 if (sc->sc_type != WM_T_82574) {
3813 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3814 CSR_WRITE(sc, WMREG_EIAC, 0);
3815 } else {
3816 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3817 }
3818 }
3819
3820 /* Stop the transmit and receive processes. */
3821 CSR_WRITE(sc, WMREG_RCTL, 0);
3822 sc->sc_rctl &= ~RCTL_EN;
3823 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3824 CSR_WRITE_FLUSH(sc);
3825
3826 /* XXX set_tbi_sbp_82543() */
3827
3828 delay(10*1000);
3829
3830 /* Must acquire the MDIO ownership before MAC reset */
3831 switch (sc->sc_type) {
3832 case WM_T_82573:
3833 case WM_T_82574:
3834 case WM_T_82583:
3835 error = wm_get_hw_semaphore_82573(sc);
3836 break;
3837 default:
3838 break;
3839 }
3840
3841 /*
3842 * 82541 Errata 29? & 82547 Errata 28?
3843 * See also the description about PHY_RST bit in CTRL register
3844 * in 8254x_GBe_SDM.pdf.
3845 */
3846 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3847 CSR_WRITE(sc, WMREG_CTRL,
3848 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3849 CSR_WRITE_FLUSH(sc);
3850 delay(5000);
3851 }
3852
3853 switch (sc->sc_type) {
3854 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3855 case WM_T_82541:
3856 case WM_T_82541_2:
3857 case WM_T_82547:
3858 case WM_T_82547_2:
3859 /*
3860 * On some chipsets, a reset through a memory-mapped write
3861 * cycle can cause the chip to reset before completing the
3862 * write cycle. This causes major headache that can be
3863 * avoided by issuing the reset via indirect register writes
3864 * through I/O space.
3865 *
3866 * So, if we successfully mapped the I/O BAR at attach time,
3867 * use that. Otherwise, try our luck with a memory-mapped
3868 * reset.
3869 */
3870 if (sc->sc_flags & WM_F_IOH_VALID)
3871 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3872 else
3873 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3874 break;
3875 case WM_T_82545_3:
3876 case WM_T_82546_3:
3877 /* Use the shadow control register on these chips. */
3878 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3879 break;
3880 case WM_T_80003:
3881 mask = swfwphysem[sc->sc_funcid];
3882 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3883 wm_get_swfw_semaphore(sc, mask);
3884 CSR_WRITE(sc, WMREG_CTRL, reg);
3885 wm_put_swfw_semaphore(sc, mask);
3886 break;
3887 case WM_T_ICH8:
3888 case WM_T_ICH9:
3889 case WM_T_ICH10:
3890 case WM_T_PCH:
3891 case WM_T_PCH2:
3892 case WM_T_PCH_LPT:
3893 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3894 if (wm_check_reset_block(sc) == 0) {
3895 /*
3896 * Gate automatic PHY configuration by hardware on
3897 * non-managed 82579
3898 */
3899 if ((sc->sc_type == WM_T_PCH2)
3900 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3901 != 0))
3902 wm_gate_hw_phy_config_ich8lan(sc, 1);
3903
3904
3905 reg |= CTRL_PHY_RESET;
3906 phy_reset = 1;
3907 }
3908 wm_get_swfwhw_semaphore(sc);
3909 CSR_WRITE(sc, WMREG_CTRL, reg);
3910 /* Don't insert a completion barrier when reset */
3911 delay(20*1000);
3912 wm_put_swfwhw_semaphore(sc);
3913 break;
3914 case WM_T_82580:
3915 case WM_T_I350:
3916 case WM_T_I354:
3917 case WM_T_I210:
3918 case WM_T_I211:
3919 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3920 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3921 CSR_WRITE_FLUSH(sc);
3922 delay(5000);
3923 break;
3924 case WM_T_82542_2_0:
3925 case WM_T_82542_2_1:
3926 case WM_T_82543:
3927 case WM_T_82540:
3928 case WM_T_82545:
3929 case WM_T_82546:
3930 case WM_T_82571:
3931 case WM_T_82572:
3932 case WM_T_82573:
3933 case WM_T_82574:
3934 case WM_T_82575:
3935 case WM_T_82576:
3936 case WM_T_82583:
3937 default:
3938 /* Everything else can safely use the documented method. */
3939 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3940 break;
3941 }
3942
3943 /* Must release the MDIO ownership after MAC reset */
3944 switch (sc->sc_type) {
3945 case WM_T_82573:
3946 case WM_T_82574:
3947 case WM_T_82583:
3948 if (error == 0)
3949 wm_put_hw_semaphore_82573(sc);
3950 break;
3951 default:
3952 break;
3953 }
3954
3955 if (phy_reset != 0)
3956 wm_get_cfg_done(sc);
3957
3958 /* reload EEPROM */
3959 switch (sc->sc_type) {
3960 case WM_T_82542_2_0:
3961 case WM_T_82542_2_1:
3962 case WM_T_82543:
3963 case WM_T_82544:
3964 delay(10);
3965 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3966 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3967 CSR_WRITE_FLUSH(sc);
3968 delay(2000);
3969 break;
3970 case WM_T_82540:
3971 case WM_T_82545:
3972 case WM_T_82545_3:
3973 case WM_T_82546:
3974 case WM_T_82546_3:
3975 delay(5*1000);
3976 /* XXX Disable HW ARPs on ASF enabled adapters */
3977 break;
3978 case WM_T_82541:
3979 case WM_T_82541_2:
3980 case WM_T_82547:
3981 case WM_T_82547_2:
3982 delay(20000);
3983 /* XXX Disable HW ARPs on ASF enabled adapters */
3984 break;
3985 case WM_T_82571:
3986 case WM_T_82572:
3987 case WM_T_82573:
3988 case WM_T_82574:
3989 case WM_T_82583:
3990 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3991 delay(10);
3992 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3993 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3994 CSR_WRITE_FLUSH(sc);
3995 }
3996 /* check EECD_EE_AUTORD */
3997 wm_get_auto_rd_done(sc);
3998 /*
3999 * Phy configuration from NVM just starts after EECD_AUTO_RD
4000 * is set.
4001 */
4002 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4003 || (sc->sc_type == WM_T_82583))
4004 delay(25*1000);
4005 break;
4006 case WM_T_82575:
4007 case WM_T_82576:
4008 case WM_T_82580:
4009 case WM_T_I350:
4010 case WM_T_I354:
4011 case WM_T_I210:
4012 case WM_T_I211:
4013 case WM_T_80003:
4014 /* check EECD_EE_AUTORD */
4015 wm_get_auto_rd_done(sc);
4016 break;
4017 case WM_T_ICH8:
4018 case WM_T_ICH9:
4019 case WM_T_ICH10:
4020 case WM_T_PCH:
4021 case WM_T_PCH2:
4022 case WM_T_PCH_LPT:
4023 break;
4024 default:
4025 panic("%s: unknown type\n", __func__);
4026 }
4027
4028 /* Check whether EEPROM is present or not */
4029 switch (sc->sc_type) {
4030 case WM_T_82575:
4031 case WM_T_82576:
4032 case WM_T_82580:
4033 case WM_T_I350:
4034 case WM_T_I354:
4035 case WM_T_ICH8:
4036 case WM_T_ICH9:
4037 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4038 /* Not found */
4039 sc->sc_flags |= WM_F_EEPROM_INVALID;
4040 if (sc->sc_type == WM_T_82575)
4041 wm_reset_init_script_82575(sc);
4042 }
4043 break;
4044 default:
4045 break;
4046 }
4047
4048 if ((sc->sc_type == WM_T_82580)
4049 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4050 /* clear global device reset status bit */
4051 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4052 }
4053
4054 /* Clear any pending interrupt events. */
4055 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4056 reg = CSR_READ(sc, WMREG_ICR);
4057 if (sc->sc_nintrs > 1) {
4058 if (sc->sc_type != WM_T_82574) {
4059 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4060 CSR_WRITE(sc, WMREG_EIAC, 0);
4061 } else
4062 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4063 }
4064
4065 /* reload sc_ctrl */
4066 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4067
4068 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4069 wm_set_eee_i350(sc);
4070
4071 /* dummy read from WUC */
4072 if (sc->sc_type == WM_T_PCH)
4073 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4074 /*
4075 * For PCH, this write will make sure that any noise will be detected
4076 * as a CRC error and be dropped rather than show up as a bad packet
4077 * to the DMA engine
4078 */
4079 if (sc->sc_type == WM_T_PCH)
4080 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4081
4082 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4083 CSR_WRITE(sc, WMREG_WUC, 0);
4084
4085 wm_reset_mdicnfg_82580(sc);
4086
4087 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4088 wm_pll_workaround_i210(sc);
4089 }
4090
4091 /*
4092 * wm_add_rxbuf:
4093 *
4094 * Add a receive buffer to the indiciated descriptor.
4095 */
4096 static int
4097 wm_add_rxbuf(struct wm_softc *sc, int idx)
4098 {
4099 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4100 struct mbuf *m;
4101 int error;
4102
4103 KASSERT(WM_RX_LOCKED(sc));
4104
4105 MGETHDR(m, M_DONTWAIT, MT_DATA);
4106 if (m == NULL)
4107 return ENOBUFS;
4108
4109 MCLGET(m, M_DONTWAIT);
4110 if ((m->m_flags & M_EXT) == 0) {
4111 m_freem(m);
4112 return ENOBUFS;
4113 }
4114
4115 if (rxs->rxs_mbuf != NULL)
4116 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4117
4118 rxs->rxs_mbuf = m;
4119
4120 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4121 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4122 BUS_DMA_READ|BUS_DMA_NOWAIT);
4123 if (error) {
4124 /* XXX XXX XXX */
4125 aprint_error_dev(sc->sc_dev,
4126 "unable to load rx DMA map %d, error = %d\n",
4127 idx, error);
4128 panic("wm_add_rxbuf");
4129 }
4130
4131 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4132 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4133
4134 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4135 if ((sc->sc_rctl & RCTL_EN) != 0)
4136 WM_INIT_RXDESC(sc, idx);
4137 } else
4138 WM_INIT_RXDESC(sc, idx);
4139
4140 return 0;
4141 }
4142
4143 /*
4144 * wm_rxdrain:
4145 *
4146 * Drain the receive queue.
4147 */
4148 static void
4149 wm_rxdrain(struct wm_softc *sc)
4150 {
4151 struct wm_rxsoft *rxs;
4152 int i;
4153
4154 KASSERT(WM_RX_LOCKED(sc));
4155
4156 for (i = 0; i < WM_NRXDESC; i++) {
4157 rxs = &sc->sc_rxsoft[i];
4158 if (rxs->rxs_mbuf != NULL) {
4159 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4160 m_freem(rxs->rxs_mbuf);
4161 rxs->rxs_mbuf = NULL;
4162 }
4163 }
4164 }
4165
4166 /*
4167 * wm_init: [ifnet interface function]
4168 *
4169 * Initialize the interface.
4170 */
4171 static int
4172 wm_init(struct ifnet *ifp)
4173 {
4174 struct wm_softc *sc = ifp->if_softc;
4175 int ret;
4176
4177 WM_BOTH_LOCK(sc);
4178 ret = wm_init_locked(ifp);
4179 WM_BOTH_UNLOCK(sc);
4180
4181 return ret;
4182 }
4183
4184 static int
4185 wm_init_locked(struct ifnet *ifp)
4186 {
4187 struct wm_softc *sc = ifp->if_softc;
4188 struct wm_rxsoft *rxs;
4189 int i, j, trynum, error = 0;
4190 uint32_t reg;
4191
4192 KASSERT(WM_BOTH_LOCKED(sc));
4193 /*
4194 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4195 * There is a small but measurable benefit to avoiding the adjusment
4196 * of the descriptor so that the headers are aligned, for normal mtu,
4197 * on such platforms. One possibility is that the DMA itself is
4198 * slightly more efficient if the front of the entire packet (instead
4199 * of the front of the headers) is aligned.
4200 *
4201 * Note we must always set align_tweak to 0 if we are using
4202 * jumbo frames.
4203 */
4204 #ifdef __NO_STRICT_ALIGNMENT
4205 sc->sc_align_tweak = 0;
4206 #else
4207 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4208 sc->sc_align_tweak = 0;
4209 else
4210 sc->sc_align_tweak = 2;
4211 #endif /* __NO_STRICT_ALIGNMENT */
4212
4213 /* Cancel any pending I/O. */
4214 wm_stop_locked(ifp, 0);
4215
4216 /* update statistics before reset */
4217 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4218 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4219
4220 /* Reset the chip to a known state. */
4221 wm_reset(sc);
4222
4223 switch (sc->sc_type) {
4224 case WM_T_82571:
4225 case WM_T_82572:
4226 case WM_T_82573:
4227 case WM_T_82574:
4228 case WM_T_82583:
4229 case WM_T_80003:
4230 case WM_T_ICH8:
4231 case WM_T_ICH9:
4232 case WM_T_ICH10:
4233 case WM_T_PCH:
4234 case WM_T_PCH2:
4235 case WM_T_PCH_LPT:
4236 if (wm_check_mng_mode(sc) != 0)
4237 wm_get_hw_control(sc);
4238 break;
4239 default:
4240 break;
4241 }
4242
4243 /* Init hardware bits */
4244 wm_initialize_hardware_bits(sc);
4245
4246 /* Reset the PHY. */
4247 if (sc->sc_flags & WM_F_HAS_MII)
4248 wm_gmii_reset(sc);
4249
4250 /* Calculate (E)ITR value */
4251 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4252 sc->sc_itr = 450; /* For EITR */
4253 } else if (sc->sc_type >= WM_T_82543) {
4254 /*
4255 * Set up the interrupt throttling register (units of 256ns)
4256 * Note that a footnote in Intel's documentation says this
4257 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4258 * or 10Mbit mode. Empirically, it appears to be the case
4259 * that that is also true for the 1024ns units of the other
4260 * interrupt-related timer registers -- so, really, we ought
4261 * to divide this value by 4 when the link speed is low.
4262 *
4263 * XXX implement this division at link speed change!
4264 */
4265
4266 /*
4267 * For N interrupts/sec, set this value to:
4268 * 1000000000 / (N * 256). Note that we set the
4269 * absolute and packet timer values to this value
4270 * divided by 4 to get "simple timer" behavior.
4271 */
4272
4273 sc->sc_itr = 1500; /* 2604 ints/sec */
4274 }
4275
4276 /* Initialize the transmit descriptor ring. */
4277 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4278 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4279 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4280 sc->sc_txfree = WM_NTXDESC(sc);
4281 sc->sc_txnext = 0;
4282
4283 if (sc->sc_type < WM_T_82543) {
4284 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4285 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4286 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4287 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4288 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4289 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4290 } else {
4291 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4292 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4293 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4294 CSR_WRITE(sc, WMREG_TDH, 0);
4295
4296 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4297 /*
4298 * Don't write TDT before TCTL.EN is set.
4299 * See the document.
4300 */
4301 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4302 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4303 | TXDCTL_WTHRESH(0));
4304 else {
4305 /* ITR / 4 */
4306 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4307 if (sc->sc_type >= WM_T_82540) {
4308 /* should be same */
4309 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4310 }
4311
4312 CSR_WRITE(sc, WMREG_TDT, 0);
4313 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4314 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4315 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4316 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4317 }
4318 }
4319
4320 /* Initialize the transmit job descriptors. */
4321 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4322 sc->sc_txsoft[i].txs_mbuf = NULL;
4323 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4324 sc->sc_txsnext = 0;
4325 sc->sc_txsdirty = 0;
4326
4327 /*
4328 * Initialize the receive descriptor and receive job
4329 * descriptor rings.
4330 */
4331 if (sc->sc_type < WM_T_82543) {
4332 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4333 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4334 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4335 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4336 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4337 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4338
4339 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4340 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4341 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4342 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4343 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4344 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4345 } else {
4346 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4347 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4348 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4349
4350 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4351 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4352 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4353 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4354 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4355 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4356 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4357 | RXDCTL_WTHRESH(1));
4358 } else {
4359 CSR_WRITE(sc, WMREG_RDH, 0);
4360 CSR_WRITE(sc, WMREG_RDT, 0);
4361 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4362 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4363 }
4364 }
4365 for (i = 0; i < WM_NRXDESC; i++) {
4366 rxs = &sc->sc_rxsoft[i];
4367 if (rxs->rxs_mbuf == NULL) {
4368 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4369 log(LOG_ERR, "%s: unable to allocate or map "
4370 "rx buffer %d, error = %d\n",
4371 device_xname(sc->sc_dev), i, error);
4372 /*
4373 * XXX Should attempt to run with fewer receive
4374 * XXX buffers instead of just failing.
4375 */
4376 wm_rxdrain(sc);
4377 goto out;
4378 }
4379 } else {
4380 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4381 WM_INIT_RXDESC(sc, i);
4382 /*
4383 * For 82575 and newer device, the RX descriptors
4384 * must be initialized after the setting of RCTL.EN in
4385 * wm_set_filter()
4386 */
4387 }
4388 }
4389 sc->sc_rxptr = 0;
4390 sc->sc_rxdiscard = 0;
4391 WM_RXCHAIN_RESET(sc);
4392
4393 /*
4394 * Clear out the VLAN table -- we don't use it (yet).
4395 */
4396 CSR_WRITE(sc, WMREG_VET, 0);
4397 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4398 trynum = 10; /* Due to hw errata */
4399 else
4400 trynum = 1;
4401 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4402 for (j = 0; j < trynum; j++)
4403 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4404
4405 /*
4406 * Set up flow-control parameters.
4407 *
4408 * XXX Values could probably stand some tuning.
4409 */
4410 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4411 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4412 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4413 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4414 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4415 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4416 }
4417
4418 sc->sc_fcrtl = FCRTL_DFLT;
4419 if (sc->sc_type < WM_T_82543) {
4420 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4421 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4422 } else {
4423 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4424 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4425 }
4426
4427 if (sc->sc_type == WM_T_80003)
4428 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4429 else
4430 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4431
4432 /* Writes the control register. */
4433 wm_set_vlan(sc);
4434
4435 if (sc->sc_flags & WM_F_HAS_MII) {
4436 int val;
4437
4438 switch (sc->sc_type) {
4439 case WM_T_80003:
4440 case WM_T_ICH8:
4441 case WM_T_ICH9:
4442 case WM_T_ICH10:
4443 case WM_T_PCH:
4444 case WM_T_PCH2:
4445 case WM_T_PCH_LPT:
4446 /*
4447 * Set the mac to wait the maximum time between each
4448 * iteration and increase the max iterations when
4449 * polling the phy; this fixes erroneous timeouts at
4450 * 10Mbps.
4451 */
4452 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4453 0xFFFF);
4454 val = wm_kmrn_readreg(sc,
4455 KUMCTRLSTA_OFFSET_INB_PARAM);
4456 val |= 0x3F;
4457 wm_kmrn_writereg(sc,
4458 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4459 break;
4460 default:
4461 break;
4462 }
4463
4464 if (sc->sc_type == WM_T_80003) {
4465 val = CSR_READ(sc, WMREG_CTRL_EXT);
4466 val &= ~CTRL_EXT_LINK_MODE_MASK;
4467 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4468
4469 /* Bypass RX and TX FIFO's */
4470 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4471 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4472 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4473 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4474 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4475 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4476 }
4477 }
4478 #if 0
4479 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4480 #endif
4481
4482 /* Set up checksum offload parameters. */
4483 reg = CSR_READ(sc, WMREG_RXCSUM);
4484 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4485 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4486 reg |= RXCSUM_IPOFL;
4487 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4488 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4489 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4490 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4491 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4492
4493 /* Set up MSI-X */
4494 if (sc->sc_nintrs > 1) {
4495 uint32_t ivar;
4496
4497 if (sc->sc_type == WM_T_82575) {
4498 /* Interrupt control */
4499 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4500 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4501 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4502
4503 /* TX */
4504 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_TXINTR_IDX),
4505 EITR_TX_QUEUE0);
4506 /* RX */
4507 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_RXINTR_IDX),
4508 EITR_RX_QUEUE0);
4509 /* Link status */
4510 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_LINKINTR_IDX),
4511 EITR_OTHER);
4512 } else if (sc->sc_type == WM_T_82574) {
4513 /* Interrupt control */
4514 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4515 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4516 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4517
4518 /* TX, RX and Link status */
4519 ivar = __SHIFTIN((IVAR_VALID_82574|WM_MSIX_TXINTR_IDX),
4520 IVAR_TX_MASK_Q_82574(0));
4521 ivar |= __SHIFTIN((IVAR_VALID_82574
4522 | WM_MSIX_RXINTR_IDX),
4523 IVAR_RX_MASK_Q_82574(0));
4524 ivar |=__SHIFTIN((IVAR_VALID_82574|WM_MSIX_LINKINTR_IDX),
4525 IVAR_OTHER_MASK);
4526 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4527 } else {
4528 /* Interrupt control */
4529 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4530 | GPIE_MULTI_MSIX | GPIE_EIAME
4531 | GPIE_PBA);
4532
4533 switch (sc->sc_type) {
4534 case WM_T_82580:
4535 case WM_T_I350:
4536 case WM_T_I354:
4537 case WM_T_I210:
4538 case WM_T_I211:
4539 /* TX */
4540 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4541 ivar &= ~IVAR_TX_MASK_Q(0);
4542 ivar |= __SHIFTIN(
4543 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4544 IVAR_TX_MASK_Q(0));
4545 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4546
4547 /* RX */
4548 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4549 ivar &= ~IVAR_RX_MASK_Q(0);
4550 ivar |= __SHIFTIN(
4551 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4552 IVAR_RX_MASK_Q(0));
4553 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4554 break;
4555 case WM_T_82576:
4556 /* TX */
4557 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4558 ivar &= ~IVAR_TX_MASK_Q_82576(0);
4559 ivar |= __SHIFTIN(
4560 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4561 IVAR_TX_MASK_Q_82576(0));
4562 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4563
4564 /* RX */
4565 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4566 ivar &= ~IVAR_RX_MASK_Q_82576(0);
4567 ivar |= __SHIFTIN(
4568 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4569 IVAR_RX_MASK_Q_82576(0));
4570 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4571 break;
4572 default:
4573 break;
4574 }
4575
4576 /* Link status */
4577 ivar = __SHIFTIN((WM_MSIX_LINKINTR_IDX | IVAR_VALID),
4578 IVAR_MISC_OTHER);
4579 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4580 }
4581 }
4582
4583 /* Set up the interrupt registers. */
4584 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4585 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4586 ICR_RXO | ICR_RXT0;
4587 if (sc->sc_nintrs > 1) {
4588 uint32_t mask;
4589 switch (sc->sc_type) {
4590 case WM_T_82574:
4591 CSR_WRITE(sc, WMREG_EIAC_82574,
4592 WMREG_EIAC_82574_MSIX_MASK);
4593 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4594 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4595 break;
4596 default:
4597 if (sc->sc_type == WM_T_82575)
4598 mask = EITR_RX_QUEUE0 |EITR_TX_QUEUE0
4599 | EITR_OTHER;
4600 else
4601 mask = (1 << WM_MSIX_RXINTR_IDX)
4602 | (1 << WM_MSIX_TXINTR_IDX)
4603 | (1 << WM_MSIX_LINKINTR_IDX);
4604 CSR_WRITE(sc, WMREG_EIAC, mask);
4605 CSR_WRITE(sc, WMREG_EIAM, mask);
4606 CSR_WRITE(sc, WMREG_EIMS, mask);
4607 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4608 break;
4609 }
4610 } else
4611 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4612
4613 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4614 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4615 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4616 reg = CSR_READ(sc, WMREG_KABGTXD);
4617 reg |= KABGTXD_BGSQLBIAS;
4618 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4619 }
4620
4621 /* Set up the inter-packet gap. */
4622 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4623
4624 if (sc->sc_type >= WM_T_82543) {
4625 /*
4626 * XXX 82574 has both ITR and EITR. SET EITR when we use
4627 * the multi queue function with MSI-X.
4628 */
4629 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4630 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4631 else
4632 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4633 }
4634
4635 /* Set the VLAN ethernetype. */
4636 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4637
4638 /*
4639 * Set up the transmit control register; we start out with
4640 * a collision distance suitable for FDX, but update it whe
4641 * we resolve the media type.
4642 */
4643 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4644 | TCTL_CT(TX_COLLISION_THRESHOLD)
4645 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4646 if (sc->sc_type >= WM_T_82571)
4647 sc->sc_tctl |= TCTL_MULR;
4648 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4649
4650 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4651 /* Write TDT after TCTL.EN is set. See the document. */
4652 CSR_WRITE(sc, WMREG_TDT, 0);
4653 }
4654
4655 if (sc->sc_type == WM_T_80003) {
4656 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4657 reg &= ~TCTL_EXT_GCEX_MASK;
4658 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4659 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4660 }
4661
4662 /* Set the media. */
4663 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4664 goto out;
4665
4666 /* Configure for OS presence */
4667 wm_init_manageability(sc);
4668
4669 /*
4670 * Set up the receive control register; we actually program
4671 * the register when we set the receive filter. Use multicast
4672 * address offset type 0.
4673 *
4674 * Only the i82544 has the ability to strip the incoming
4675 * CRC, so we don't enable that feature.
4676 */
4677 sc->sc_mchash_type = 0;
4678 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4679 | RCTL_MO(sc->sc_mchash_type);
4680
4681 /*
4682 * The I350 has a bug where it always strips the CRC whether
4683 * asked to or not. So ask for stripped CRC here and cope in rxeof
4684 */
4685 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4686 || (sc->sc_type == WM_T_I210))
4687 sc->sc_rctl |= RCTL_SECRC;
4688
4689 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4690 && (ifp->if_mtu > ETHERMTU)) {
4691 sc->sc_rctl |= RCTL_LPE;
4692 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4693 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4694 }
4695
4696 if (MCLBYTES == 2048) {
4697 sc->sc_rctl |= RCTL_2k;
4698 } else {
4699 if (sc->sc_type >= WM_T_82543) {
4700 switch (MCLBYTES) {
4701 case 4096:
4702 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4703 break;
4704 case 8192:
4705 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4706 break;
4707 case 16384:
4708 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4709 break;
4710 default:
4711 panic("wm_init: MCLBYTES %d unsupported",
4712 MCLBYTES);
4713 break;
4714 }
4715 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4716 }
4717
4718 /* Set the receive filter. */
4719 wm_set_filter(sc);
4720
4721 /* Enable ECC */
4722 switch (sc->sc_type) {
4723 case WM_T_82571:
4724 reg = CSR_READ(sc, WMREG_PBA_ECC);
4725 reg |= PBA_ECC_CORR_EN;
4726 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4727 break;
4728 case WM_T_PCH_LPT:
4729 reg = CSR_READ(sc, WMREG_PBECCSTS);
4730 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4731 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4732
4733 reg = CSR_READ(sc, WMREG_CTRL);
4734 reg |= CTRL_MEHE;
4735 CSR_WRITE(sc, WMREG_CTRL, reg);
4736 break;
4737 default:
4738 break;
4739 }
4740
4741 /* On 575 and later set RDT only if RX enabled */
4742 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4743 for (i = 0; i < WM_NRXDESC; i++)
4744 WM_INIT_RXDESC(sc, i);
4745
4746 sc->sc_stopping = false;
4747
4748 /* Start the one second link check clock. */
4749 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4750
4751 /* ...all done! */
4752 ifp->if_flags |= IFF_RUNNING;
4753 ifp->if_flags &= ~IFF_OACTIVE;
4754
4755 out:
4756 sc->sc_if_flags = ifp->if_flags;
4757 if (error)
4758 log(LOG_ERR, "%s: interface not running\n",
4759 device_xname(sc->sc_dev));
4760 return error;
4761 }
4762
4763 /*
4764 * wm_stop: [ifnet interface function]
4765 *
4766 * Stop transmission on the interface.
4767 */
4768 static void
4769 wm_stop(struct ifnet *ifp, int disable)
4770 {
4771 struct wm_softc *sc = ifp->if_softc;
4772
4773 WM_BOTH_LOCK(sc);
4774 wm_stop_locked(ifp, disable);
4775 WM_BOTH_UNLOCK(sc);
4776 }
4777
4778 static void
4779 wm_stop_locked(struct ifnet *ifp, int disable)
4780 {
4781 struct wm_softc *sc = ifp->if_softc;
4782 struct wm_txsoft *txs;
4783 int i;
4784
4785 KASSERT(WM_BOTH_LOCKED(sc));
4786
4787 sc->sc_stopping = true;
4788
4789 /* Stop the one second clock. */
4790 callout_stop(&sc->sc_tick_ch);
4791
4792 /* Stop the 82547 Tx FIFO stall check timer. */
4793 if (sc->sc_type == WM_T_82547)
4794 callout_stop(&sc->sc_txfifo_ch);
4795
4796 if (sc->sc_flags & WM_F_HAS_MII) {
4797 /* Down the MII. */
4798 mii_down(&sc->sc_mii);
4799 } else {
4800 #if 0
4801 /* Should we clear PHY's status properly? */
4802 wm_reset(sc);
4803 #endif
4804 }
4805
4806 /* Stop the transmit and receive processes. */
4807 CSR_WRITE(sc, WMREG_TCTL, 0);
4808 CSR_WRITE(sc, WMREG_RCTL, 0);
4809 sc->sc_rctl &= ~RCTL_EN;
4810
4811 /*
4812 * Clear the interrupt mask to ensure the device cannot assert its
4813 * interrupt line.
4814 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
4815 * service any currently pending or shared interrupt.
4816 */
4817 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4818 sc->sc_icr = 0;
4819 if (sc->sc_nintrs > 1) {
4820 if (sc->sc_type != WM_T_82574) {
4821 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4822 CSR_WRITE(sc, WMREG_EIAC, 0);
4823 } else
4824 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4825 }
4826
4827 /* Release any queued transmit buffers. */
4828 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4829 txs = &sc->sc_txsoft[i];
4830 if (txs->txs_mbuf != NULL) {
4831 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4832 m_freem(txs->txs_mbuf);
4833 txs->txs_mbuf = NULL;
4834 }
4835 }
4836
4837 /* Mark the interface as down and cancel the watchdog timer. */
4838 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4839 ifp->if_timer = 0;
4840
4841 if (disable)
4842 wm_rxdrain(sc);
4843
4844 #if 0 /* notyet */
4845 if (sc->sc_type >= WM_T_82544)
4846 CSR_WRITE(sc, WMREG_WUC, 0);
4847 #endif
4848 }
4849
4850 /*
4851 * wm_tx_offload:
4852 *
4853 * Set up TCP/IP checksumming parameters for the
4854 * specified packet.
4855 */
4856 static int
4857 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4858 uint8_t *fieldsp)
4859 {
4860 struct mbuf *m0 = txs->txs_mbuf;
4861 struct livengood_tcpip_ctxdesc *t;
4862 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4863 uint32_t ipcse;
4864 struct ether_header *eh;
4865 int offset, iphl;
4866 uint8_t fields;
4867
4868 /*
4869 * XXX It would be nice if the mbuf pkthdr had offset
4870 * fields for the protocol headers.
4871 */
4872
4873 eh = mtod(m0, struct ether_header *);
4874 switch (htons(eh->ether_type)) {
4875 case ETHERTYPE_IP:
4876 case ETHERTYPE_IPV6:
4877 offset = ETHER_HDR_LEN;
4878 break;
4879
4880 case ETHERTYPE_VLAN:
4881 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4882 break;
4883
4884 default:
4885 /*
4886 * Don't support this protocol or encapsulation.
4887 */
4888 *fieldsp = 0;
4889 *cmdp = 0;
4890 return 0;
4891 }
4892
4893 if ((m0->m_pkthdr.csum_flags &
4894 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4895 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4896 } else {
4897 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4898 }
4899 ipcse = offset + iphl - 1;
4900
4901 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4902 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4903 seg = 0;
4904 fields = 0;
4905
4906 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4907 int hlen = offset + iphl;
4908 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4909
4910 if (__predict_false(m0->m_len <
4911 (hlen + sizeof(struct tcphdr)))) {
4912 /*
4913 * TCP/IP headers are not in the first mbuf; we need
4914 * to do this the slow and painful way. Let's just
4915 * hope this doesn't happen very often.
4916 */
4917 struct tcphdr th;
4918
4919 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4920
4921 m_copydata(m0, hlen, sizeof(th), &th);
4922 if (v4) {
4923 struct ip ip;
4924
4925 m_copydata(m0, offset, sizeof(ip), &ip);
4926 ip.ip_len = 0;
4927 m_copyback(m0,
4928 offset + offsetof(struct ip, ip_len),
4929 sizeof(ip.ip_len), &ip.ip_len);
4930 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4931 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4932 } else {
4933 struct ip6_hdr ip6;
4934
4935 m_copydata(m0, offset, sizeof(ip6), &ip6);
4936 ip6.ip6_plen = 0;
4937 m_copyback(m0,
4938 offset + offsetof(struct ip6_hdr, ip6_plen),
4939 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4940 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4941 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4942 }
4943 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4944 sizeof(th.th_sum), &th.th_sum);
4945
4946 hlen += th.th_off << 2;
4947 } else {
4948 /*
4949 * TCP/IP headers are in the first mbuf; we can do
4950 * this the easy way.
4951 */
4952 struct tcphdr *th;
4953
4954 if (v4) {
4955 struct ip *ip =
4956 (void *)(mtod(m0, char *) + offset);
4957 th = (void *)(mtod(m0, char *) + hlen);
4958
4959 ip->ip_len = 0;
4960 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4961 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4962 } else {
4963 struct ip6_hdr *ip6 =
4964 (void *)(mtod(m0, char *) + offset);
4965 th = (void *)(mtod(m0, char *) + hlen);
4966
4967 ip6->ip6_plen = 0;
4968 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4969 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4970 }
4971 hlen += th->th_off << 2;
4972 }
4973
4974 if (v4) {
4975 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4976 cmdlen |= WTX_TCPIP_CMD_IP;
4977 } else {
4978 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4979 ipcse = 0;
4980 }
4981 cmd |= WTX_TCPIP_CMD_TSE;
4982 cmdlen |= WTX_TCPIP_CMD_TSE |
4983 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4984 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4985 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4986 }
4987
4988 /*
4989 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4990 * offload feature, if we load the context descriptor, we
4991 * MUST provide valid values for IPCSS and TUCSS fields.
4992 */
4993
4994 ipcs = WTX_TCPIP_IPCSS(offset) |
4995 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4996 WTX_TCPIP_IPCSE(ipcse);
4997 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4998 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4999 fields |= WTX_IXSM;
5000 }
5001
5002 offset += iphl;
5003
5004 if (m0->m_pkthdr.csum_flags &
5005 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5006 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5007 fields |= WTX_TXSM;
5008 tucs = WTX_TCPIP_TUCSS(offset) |
5009 WTX_TCPIP_TUCSO(offset +
5010 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5011 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5012 } else if ((m0->m_pkthdr.csum_flags &
5013 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5014 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5015 fields |= WTX_TXSM;
5016 tucs = WTX_TCPIP_TUCSS(offset) |
5017 WTX_TCPIP_TUCSO(offset +
5018 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5019 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5020 } else {
5021 /* Just initialize it to a valid TCP context. */
5022 tucs = WTX_TCPIP_TUCSS(offset) |
5023 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5024 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5025 }
5026
5027 /* Fill in the context descriptor. */
5028 t = (struct livengood_tcpip_ctxdesc *)
5029 &sc->sc_txdescs[sc->sc_txnext];
5030 t->tcpip_ipcs = htole32(ipcs);
5031 t->tcpip_tucs = htole32(tucs);
5032 t->tcpip_cmdlen = htole32(cmdlen);
5033 t->tcpip_seg = htole32(seg);
5034 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5035
5036 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5037 txs->txs_ndesc++;
5038
5039 *cmdp = cmd;
5040 *fieldsp = fields;
5041
5042 return 0;
5043 }
5044
5045 static void
5046 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5047 {
5048 struct mbuf *m;
5049 int i;
5050
5051 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5052 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5053 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5054 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5055 m->m_data, m->m_len, m->m_flags);
5056 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5057 i, i == 1 ? "" : "s");
5058 }
5059
5060 /*
5061 * wm_82547_txfifo_stall:
5062 *
5063 * Callout used to wait for the 82547 Tx FIFO to drain,
5064 * reset the FIFO pointers, and restart packet transmission.
5065 */
5066 static void
5067 wm_82547_txfifo_stall(void *arg)
5068 {
5069 struct wm_softc *sc = arg;
5070 #ifndef WM_MPSAFE
5071 int s;
5072
5073 s = splnet();
5074 #endif
5075 WM_TX_LOCK(sc);
5076
5077 if (sc->sc_stopping)
5078 goto out;
5079
5080 if (sc->sc_txfifo_stall) {
5081 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
5082 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5083 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5084 /*
5085 * Packets have drained. Stop transmitter, reset
5086 * FIFO pointers, restart transmitter, and kick
5087 * the packet queue.
5088 */
5089 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5090 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5091 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
5092 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
5093 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
5094 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
5095 CSR_WRITE(sc, WMREG_TCTL, tctl);
5096 CSR_WRITE_FLUSH(sc);
5097
5098 sc->sc_txfifo_head = 0;
5099 sc->sc_txfifo_stall = 0;
5100 wm_start_locked(&sc->sc_ethercom.ec_if);
5101 } else {
5102 /*
5103 * Still waiting for packets to drain; try again in
5104 * another tick.
5105 */
5106 callout_schedule(&sc->sc_txfifo_ch, 1);
5107 }
5108 }
5109
5110 out:
5111 WM_TX_UNLOCK(sc);
5112 #ifndef WM_MPSAFE
5113 splx(s);
5114 #endif
5115 }
5116
5117 /*
5118 * wm_82547_txfifo_bugchk:
5119 *
5120 * Check for bug condition in the 82547 Tx FIFO. We need to
5121 * prevent enqueueing a packet that would wrap around the end
5122 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5123 *
5124 * We do this by checking the amount of space before the end
5125 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5126 * the Tx FIFO, wait for all remaining packets to drain, reset
5127 * the internal FIFO pointers to the beginning, and restart
5128 * transmission on the interface.
5129 */
5130 #define WM_FIFO_HDR 0x10
5131 #define WM_82547_PAD_LEN 0x3e0
5132 static int
5133 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5134 {
5135 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
5136 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5137
5138 /* Just return if already stalled. */
5139 if (sc->sc_txfifo_stall)
5140 return 1;
5141
5142 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5143 /* Stall only occurs in half-duplex mode. */
5144 goto send_packet;
5145 }
5146
5147 if (len >= WM_82547_PAD_LEN + space) {
5148 sc->sc_txfifo_stall = 1;
5149 callout_schedule(&sc->sc_txfifo_ch, 1);
5150 return 1;
5151 }
5152
5153 send_packet:
5154 sc->sc_txfifo_head += len;
5155 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
5156 sc->sc_txfifo_head -= sc->sc_txfifo_size;
5157
5158 return 0;
5159 }
5160
5161 /*
5162 * wm_start: [ifnet interface function]
5163 *
5164 * Start packet transmission on the interface.
5165 */
5166 static void
5167 wm_start(struct ifnet *ifp)
5168 {
5169 struct wm_softc *sc = ifp->if_softc;
5170
5171 WM_TX_LOCK(sc);
5172 if (!sc->sc_stopping)
5173 wm_start_locked(ifp);
5174 WM_TX_UNLOCK(sc);
5175 }
5176
5177 static void
5178 wm_start_locked(struct ifnet *ifp)
5179 {
5180 struct wm_softc *sc = ifp->if_softc;
5181 struct mbuf *m0;
5182 struct m_tag *mtag;
5183 struct wm_txsoft *txs;
5184 bus_dmamap_t dmamap;
5185 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
5186 bus_addr_t curaddr;
5187 bus_size_t seglen, curlen;
5188 uint32_t cksumcmd;
5189 uint8_t cksumfields;
5190
5191 KASSERT(WM_TX_LOCKED(sc));
5192
5193 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5194 return;
5195
5196 /* Remember the previous number of free descriptors. */
5197 ofree = sc->sc_txfree;
5198
5199 /*
5200 * Loop through the send queue, setting up transmit descriptors
5201 * until we drain the queue, or use up all available transmit
5202 * descriptors.
5203 */
5204 for (;;) {
5205 m0 = NULL;
5206
5207 /* Get a work queue entry. */
5208 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5209 wm_txeof(sc);
5210 if (sc->sc_txsfree == 0) {
5211 DPRINTF(WM_DEBUG_TX,
5212 ("%s: TX: no free job descriptors\n",
5213 device_xname(sc->sc_dev)));
5214 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5215 break;
5216 }
5217 }
5218
5219 /* Grab a packet off the queue. */
5220 IFQ_DEQUEUE(&ifp->if_snd, m0);
5221 if (m0 == NULL)
5222 break;
5223
5224 DPRINTF(WM_DEBUG_TX,
5225 ("%s: TX: have packet to transmit: %p\n",
5226 device_xname(sc->sc_dev), m0));
5227
5228 txs = &sc->sc_txsoft[sc->sc_txsnext];
5229 dmamap = txs->txs_dmamap;
5230
5231 use_tso = (m0->m_pkthdr.csum_flags &
5232 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
5233
5234 /*
5235 * So says the Linux driver:
5236 * The controller does a simple calculation to make sure
5237 * there is enough room in the FIFO before initiating the
5238 * DMA for each buffer. The calc is:
5239 * 4 = ceil(buffer len / MSS)
5240 * To make sure we don't overrun the FIFO, adjust the max
5241 * buffer len if the MSS drops.
5242 */
5243 dmamap->dm_maxsegsz =
5244 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
5245 ? m0->m_pkthdr.segsz << 2
5246 : WTX_MAX_LEN;
5247
5248 /*
5249 * Load the DMA map. If this fails, the packet either
5250 * didn't fit in the allotted number of segments, or we
5251 * were short on resources. For the too-many-segments
5252 * case, we simply report an error and drop the packet,
5253 * since we can't sanely copy a jumbo packet to a single
5254 * buffer.
5255 */
5256 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5257 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5258 if (error) {
5259 if (error == EFBIG) {
5260 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5261 log(LOG_ERR, "%s: Tx packet consumes too many "
5262 "DMA segments, dropping...\n",
5263 device_xname(sc->sc_dev));
5264 wm_dump_mbuf_chain(sc, m0);
5265 m_freem(m0);
5266 continue;
5267 }
5268 /* Short on resources, just stop for now. */
5269 DPRINTF(WM_DEBUG_TX,
5270 ("%s: TX: dmamap load failed: %d\n",
5271 device_xname(sc->sc_dev), error));
5272 break;
5273 }
5274
5275 segs_needed = dmamap->dm_nsegs;
5276 if (use_tso) {
5277 /* For sentinel descriptor; see below. */
5278 segs_needed++;
5279 }
5280
5281 /*
5282 * Ensure we have enough descriptors free to describe
5283 * the packet. Note, we always reserve one descriptor
5284 * at the end of the ring due to the semantics of the
5285 * TDT register, plus one more in the event we need
5286 * to load offload context.
5287 */
5288 if (segs_needed > sc->sc_txfree - 2) {
5289 /*
5290 * Not enough free descriptors to transmit this
5291 * packet. We haven't committed anything yet,
5292 * so just unload the DMA map, put the packet
5293 * pack on the queue, and punt. Notify the upper
5294 * layer that there are no more slots left.
5295 */
5296 DPRINTF(WM_DEBUG_TX,
5297 ("%s: TX: need %d (%d) descriptors, have %d\n",
5298 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5299 segs_needed, sc->sc_txfree - 1));
5300 ifp->if_flags |= IFF_OACTIVE;
5301 bus_dmamap_unload(sc->sc_dmat, dmamap);
5302 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5303 break;
5304 }
5305
5306 /*
5307 * Check for 82547 Tx FIFO bug. We need to do this
5308 * once we know we can transmit the packet, since we
5309 * do some internal FIFO space accounting here.
5310 */
5311 if (sc->sc_type == WM_T_82547 &&
5312 wm_82547_txfifo_bugchk(sc, m0)) {
5313 DPRINTF(WM_DEBUG_TX,
5314 ("%s: TX: 82547 Tx FIFO bug detected\n",
5315 device_xname(sc->sc_dev)));
5316 ifp->if_flags |= IFF_OACTIVE;
5317 bus_dmamap_unload(sc->sc_dmat, dmamap);
5318 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
5319 break;
5320 }
5321
5322 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5323
5324 DPRINTF(WM_DEBUG_TX,
5325 ("%s: TX: packet has %d (%d) DMA segments\n",
5326 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5327
5328 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5329
5330 /*
5331 * Store a pointer to the packet so that we can free it
5332 * later.
5333 *
5334 * Initially, we consider the number of descriptors the
5335 * packet uses the number of DMA segments. This may be
5336 * incremented by 1 if we do checksum offload (a descriptor
5337 * is used to set the checksum context).
5338 */
5339 txs->txs_mbuf = m0;
5340 txs->txs_firstdesc = sc->sc_txnext;
5341 txs->txs_ndesc = segs_needed;
5342
5343 /* Set up offload parameters for this packet. */
5344 if (m0->m_pkthdr.csum_flags &
5345 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5346 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5347 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5348 if (wm_tx_offload(sc, txs, &cksumcmd,
5349 &cksumfields) != 0) {
5350 /* Error message already displayed. */
5351 bus_dmamap_unload(sc->sc_dmat, dmamap);
5352 continue;
5353 }
5354 } else {
5355 cksumcmd = 0;
5356 cksumfields = 0;
5357 }
5358
5359 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5360
5361 /* Sync the DMA map. */
5362 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5363 BUS_DMASYNC_PREWRITE);
5364
5365 /* Initialize the transmit descriptor. */
5366 for (nexttx = sc->sc_txnext, seg = 0;
5367 seg < dmamap->dm_nsegs; seg++) {
5368 for (seglen = dmamap->dm_segs[seg].ds_len,
5369 curaddr = dmamap->dm_segs[seg].ds_addr;
5370 seglen != 0;
5371 curaddr += curlen, seglen -= curlen,
5372 nexttx = WM_NEXTTX(sc, nexttx)) {
5373 curlen = seglen;
5374
5375 /*
5376 * So says the Linux driver:
5377 * Work around for premature descriptor
5378 * write-backs in TSO mode. Append a
5379 * 4-byte sentinel descriptor.
5380 */
5381 if (use_tso &&
5382 seg == dmamap->dm_nsegs - 1 &&
5383 curlen > 8)
5384 curlen -= 4;
5385
5386 wm_set_dma_addr(
5387 &sc->sc_txdescs[nexttx].wtx_addr,
5388 curaddr);
5389 sc->sc_txdescs[nexttx].wtx_cmdlen =
5390 htole32(cksumcmd | curlen);
5391 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5392 0;
5393 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5394 cksumfields;
5395 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5396 lasttx = nexttx;
5397
5398 DPRINTF(WM_DEBUG_TX,
5399 ("%s: TX: desc %d: low %#" PRIx64 ", "
5400 "len %#04zx\n",
5401 device_xname(sc->sc_dev), nexttx,
5402 (uint64_t)curaddr, curlen));
5403 }
5404 }
5405
5406 KASSERT(lasttx != -1);
5407
5408 /*
5409 * Set up the command byte on the last descriptor of
5410 * the packet. If we're in the interrupt delay window,
5411 * delay the interrupt.
5412 */
5413 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5414 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5415
5416 /*
5417 * If VLANs are enabled and the packet has a VLAN tag, set
5418 * up the descriptor to encapsulate the packet for us.
5419 *
5420 * This is only valid on the last descriptor of the packet.
5421 */
5422 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5423 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5424 htole32(WTX_CMD_VLE);
5425 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5426 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5427 }
5428
5429 txs->txs_lastdesc = lasttx;
5430
5431 DPRINTF(WM_DEBUG_TX,
5432 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5433 device_xname(sc->sc_dev),
5434 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5435
5436 /* Sync the descriptors we're using. */
5437 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5438 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5439
5440 /* Give the packet to the chip. */
5441 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5442
5443 DPRINTF(WM_DEBUG_TX,
5444 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5445
5446 DPRINTF(WM_DEBUG_TX,
5447 ("%s: TX: finished transmitting packet, job %d\n",
5448 device_xname(sc->sc_dev), sc->sc_txsnext));
5449
5450 /* Advance the tx pointer. */
5451 sc->sc_txfree -= txs->txs_ndesc;
5452 sc->sc_txnext = nexttx;
5453
5454 sc->sc_txsfree--;
5455 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5456
5457 /* Pass the packet to any BPF listeners. */
5458 bpf_mtap(ifp, m0);
5459 }
5460
5461 if (m0 != NULL) {
5462 ifp->if_flags |= IFF_OACTIVE;
5463 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5464 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5465 m_freem(m0);
5466 }
5467
5468 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5469 /* No more slots; notify upper layer. */
5470 ifp->if_flags |= IFF_OACTIVE;
5471 }
5472
5473 if (sc->sc_txfree != ofree) {
5474 /* Set a watchdog timer in case the chip flakes out. */
5475 ifp->if_timer = 5;
5476 }
5477 }
5478
5479 /*
5480 * wm_nq_tx_offload:
5481 *
5482 * Set up TCP/IP checksumming parameters for the
5483 * specified packet, for NEWQUEUE devices
5484 */
5485 static int
5486 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5487 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5488 {
5489 struct mbuf *m0 = txs->txs_mbuf;
5490 struct m_tag *mtag;
5491 uint32_t vl_len, mssidx, cmdc;
5492 struct ether_header *eh;
5493 int offset, iphl;
5494
5495 /*
5496 * XXX It would be nice if the mbuf pkthdr had offset
5497 * fields for the protocol headers.
5498 */
5499 *cmdlenp = 0;
5500 *fieldsp = 0;
5501
5502 eh = mtod(m0, struct ether_header *);
5503 switch (htons(eh->ether_type)) {
5504 case ETHERTYPE_IP:
5505 case ETHERTYPE_IPV6:
5506 offset = ETHER_HDR_LEN;
5507 break;
5508
5509 case ETHERTYPE_VLAN:
5510 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5511 break;
5512
5513 default:
5514 /* Don't support this protocol or encapsulation. */
5515 *do_csum = false;
5516 return 0;
5517 }
5518 *do_csum = true;
5519 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5520 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5521
5522 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5523 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5524
5525 if ((m0->m_pkthdr.csum_flags &
5526 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5527 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5528 } else {
5529 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5530 }
5531 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5532 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5533
5534 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5535 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5536 << NQTXC_VLLEN_VLAN_SHIFT);
5537 *cmdlenp |= NQTX_CMD_VLE;
5538 }
5539
5540 mssidx = 0;
5541
5542 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5543 int hlen = offset + iphl;
5544 int tcp_hlen;
5545 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5546
5547 if (__predict_false(m0->m_len <
5548 (hlen + sizeof(struct tcphdr)))) {
5549 /*
5550 * TCP/IP headers are not in the first mbuf; we need
5551 * to do this the slow and painful way. Let's just
5552 * hope this doesn't happen very often.
5553 */
5554 struct tcphdr th;
5555
5556 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5557
5558 m_copydata(m0, hlen, sizeof(th), &th);
5559 if (v4) {
5560 struct ip ip;
5561
5562 m_copydata(m0, offset, sizeof(ip), &ip);
5563 ip.ip_len = 0;
5564 m_copyback(m0,
5565 offset + offsetof(struct ip, ip_len),
5566 sizeof(ip.ip_len), &ip.ip_len);
5567 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5568 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5569 } else {
5570 struct ip6_hdr ip6;
5571
5572 m_copydata(m0, offset, sizeof(ip6), &ip6);
5573 ip6.ip6_plen = 0;
5574 m_copyback(m0,
5575 offset + offsetof(struct ip6_hdr, ip6_plen),
5576 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5577 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5578 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5579 }
5580 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5581 sizeof(th.th_sum), &th.th_sum);
5582
5583 tcp_hlen = th.th_off << 2;
5584 } else {
5585 /*
5586 * TCP/IP headers are in the first mbuf; we can do
5587 * this the easy way.
5588 */
5589 struct tcphdr *th;
5590
5591 if (v4) {
5592 struct ip *ip =
5593 (void *)(mtod(m0, char *) + offset);
5594 th = (void *)(mtod(m0, char *) + hlen);
5595
5596 ip->ip_len = 0;
5597 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5598 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5599 } else {
5600 struct ip6_hdr *ip6 =
5601 (void *)(mtod(m0, char *) + offset);
5602 th = (void *)(mtod(m0, char *) + hlen);
5603
5604 ip6->ip6_plen = 0;
5605 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5606 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5607 }
5608 tcp_hlen = th->th_off << 2;
5609 }
5610 hlen += tcp_hlen;
5611 *cmdlenp |= NQTX_CMD_TSE;
5612
5613 if (v4) {
5614 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5615 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5616 } else {
5617 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5618 *fieldsp |= NQTXD_FIELDS_TUXSM;
5619 }
5620 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5621 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5622 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5623 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5624 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5625 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5626 } else {
5627 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5628 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5629 }
5630
5631 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5632 *fieldsp |= NQTXD_FIELDS_IXSM;
5633 cmdc |= NQTXC_CMD_IP4;
5634 }
5635
5636 if (m0->m_pkthdr.csum_flags &
5637 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5638 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5639 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5640 cmdc |= NQTXC_CMD_TCP;
5641 } else {
5642 cmdc |= NQTXC_CMD_UDP;
5643 }
5644 cmdc |= NQTXC_CMD_IP4;
5645 *fieldsp |= NQTXD_FIELDS_TUXSM;
5646 }
5647 if (m0->m_pkthdr.csum_flags &
5648 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5649 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5650 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5651 cmdc |= NQTXC_CMD_TCP;
5652 } else {
5653 cmdc |= NQTXC_CMD_UDP;
5654 }
5655 cmdc |= NQTXC_CMD_IP6;
5656 *fieldsp |= NQTXD_FIELDS_TUXSM;
5657 }
5658
5659 /* Fill in the context descriptor. */
5660 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5661 htole32(vl_len);
5662 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5663 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5664 htole32(cmdc);
5665 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5666 htole32(mssidx);
5667 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5668 DPRINTF(WM_DEBUG_TX,
5669 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5670 sc->sc_txnext, 0, vl_len));
5671 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5672 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5673 txs->txs_ndesc++;
5674 return 0;
5675 }
5676
5677 /*
5678 * wm_nq_start: [ifnet interface function]
5679 *
5680 * Start packet transmission on the interface for NEWQUEUE devices
5681 */
5682 static void
5683 wm_nq_start(struct ifnet *ifp)
5684 {
5685 struct wm_softc *sc = ifp->if_softc;
5686
5687 WM_TX_LOCK(sc);
5688 if (!sc->sc_stopping)
5689 wm_nq_start_locked(ifp);
5690 WM_TX_UNLOCK(sc);
5691 }
5692
5693 static void
5694 wm_nq_start_locked(struct ifnet *ifp)
5695 {
5696 struct wm_softc *sc = ifp->if_softc;
5697 struct mbuf *m0;
5698 struct m_tag *mtag;
5699 struct wm_txsoft *txs;
5700 bus_dmamap_t dmamap;
5701 int error, nexttx, lasttx = -1, seg, segs_needed;
5702 bool do_csum, sent;
5703
5704 KASSERT(WM_TX_LOCKED(sc));
5705
5706 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5707 return;
5708
5709 sent = false;
5710
5711 /*
5712 * Loop through the send queue, setting up transmit descriptors
5713 * until we drain the queue, or use up all available transmit
5714 * descriptors.
5715 */
5716 for (;;) {
5717 m0 = NULL;
5718
5719 /* Get a work queue entry. */
5720 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5721 wm_txeof(sc);
5722 if (sc->sc_txsfree == 0) {
5723 DPRINTF(WM_DEBUG_TX,
5724 ("%s: TX: no free job descriptors\n",
5725 device_xname(sc->sc_dev)));
5726 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5727 break;
5728 }
5729 }
5730
5731 /* Grab a packet off the queue. */
5732 IFQ_DEQUEUE(&ifp->if_snd, m0);
5733 if (m0 == NULL)
5734 break;
5735
5736 DPRINTF(WM_DEBUG_TX,
5737 ("%s: TX: have packet to transmit: %p\n",
5738 device_xname(sc->sc_dev), m0));
5739
5740 txs = &sc->sc_txsoft[sc->sc_txsnext];
5741 dmamap = txs->txs_dmamap;
5742
5743 /*
5744 * Load the DMA map. If this fails, the packet either
5745 * didn't fit in the allotted number of segments, or we
5746 * were short on resources. For the too-many-segments
5747 * case, we simply report an error and drop the packet,
5748 * since we can't sanely copy a jumbo packet to a single
5749 * buffer.
5750 */
5751 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5752 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5753 if (error) {
5754 if (error == EFBIG) {
5755 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5756 log(LOG_ERR, "%s: Tx packet consumes too many "
5757 "DMA segments, dropping...\n",
5758 device_xname(sc->sc_dev));
5759 wm_dump_mbuf_chain(sc, m0);
5760 m_freem(m0);
5761 continue;
5762 }
5763 /* Short on resources, just stop for now. */
5764 DPRINTF(WM_DEBUG_TX,
5765 ("%s: TX: dmamap load failed: %d\n",
5766 device_xname(sc->sc_dev), error));
5767 break;
5768 }
5769
5770 segs_needed = dmamap->dm_nsegs;
5771
5772 /*
5773 * Ensure we have enough descriptors free to describe
5774 * the packet. Note, we always reserve one descriptor
5775 * at the end of the ring due to the semantics of the
5776 * TDT register, plus one more in the event we need
5777 * to load offload context.
5778 */
5779 if (segs_needed > sc->sc_txfree - 2) {
5780 /*
5781 * Not enough free descriptors to transmit this
5782 * packet. We haven't committed anything yet,
5783 * so just unload the DMA map, put the packet
5784 * pack on the queue, and punt. Notify the upper
5785 * layer that there are no more slots left.
5786 */
5787 DPRINTF(WM_DEBUG_TX,
5788 ("%s: TX: need %d (%d) descriptors, have %d\n",
5789 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5790 segs_needed, sc->sc_txfree - 1));
5791 ifp->if_flags |= IFF_OACTIVE;
5792 bus_dmamap_unload(sc->sc_dmat, dmamap);
5793 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5794 break;
5795 }
5796
5797 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5798
5799 DPRINTF(WM_DEBUG_TX,
5800 ("%s: TX: packet has %d (%d) DMA segments\n",
5801 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5802
5803 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5804
5805 /*
5806 * Store a pointer to the packet so that we can free it
5807 * later.
5808 *
5809 * Initially, we consider the number of descriptors the
5810 * packet uses the number of DMA segments. This may be
5811 * incremented by 1 if we do checksum offload (a descriptor
5812 * is used to set the checksum context).
5813 */
5814 txs->txs_mbuf = m0;
5815 txs->txs_firstdesc = sc->sc_txnext;
5816 txs->txs_ndesc = segs_needed;
5817
5818 /* Set up offload parameters for this packet. */
5819 uint32_t cmdlen, fields, dcmdlen;
5820 if (m0->m_pkthdr.csum_flags &
5821 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5822 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5823 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5824 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5825 &do_csum) != 0) {
5826 /* Error message already displayed. */
5827 bus_dmamap_unload(sc->sc_dmat, dmamap);
5828 continue;
5829 }
5830 } else {
5831 do_csum = false;
5832 cmdlen = 0;
5833 fields = 0;
5834 }
5835
5836 /* Sync the DMA map. */
5837 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5838 BUS_DMASYNC_PREWRITE);
5839
5840 /* Initialize the first transmit descriptor. */
5841 nexttx = sc->sc_txnext;
5842 if (!do_csum) {
5843 /* setup a legacy descriptor */
5844 wm_set_dma_addr(
5845 &sc->sc_txdescs[nexttx].wtx_addr,
5846 dmamap->dm_segs[0].ds_addr);
5847 sc->sc_txdescs[nexttx].wtx_cmdlen =
5848 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5849 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5850 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5851 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5852 NULL) {
5853 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5854 htole32(WTX_CMD_VLE);
5855 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5856 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5857 } else {
5858 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =0;
5859 }
5860 dcmdlen = 0;
5861 } else {
5862 /* setup an advanced data descriptor */
5863 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5864 htole64(dmamap->dm_segs[0].ds_addr);
5865 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5866 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5867 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5868 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5869 htole32(fields);
5870 DPRINTF(WM_DEBUG_TX,
5871 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5872 device_xname(sc->sc_dev), nexttx,
5873 (uint64_t)dmamap->dm_segs[0].ds_addr));
5874 DPRINTF(WM_DEBUG_TX,
5875 ("\t 0x%08x%08x\n", fields,
5876 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5877 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5878 }
5879
5880 lasttx = nexttx;
5881 nexttx = WM_NEXTTX(sc, nexttx);
5882 /*
5883 * fill in the next descriptors. legacy or adcanced format
5884 * is the same here
5885 */
5886 for (seg = 1; seg < dmamap->dm_nsegs;
5887 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5888 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5889 htole64(dmamap->dm_segs[seg].ds_addr);
5890 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5891 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5892 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5893 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5894 lasttx = nexttx;
5895
5896 DPRINTF(WM_DEBUG_TX,
5897 ("%s: TX: desc %d: %#" PRIx64 ", "
5898 "len %#04zx\n",
5899 device_xname(sc->sc_dev), nexttx,
5900 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5901 dmamap->dm_segs[seg].ds_len));
5902 }
5903
5904 KASSERT(lasttx != -1);
5905
5906 /*
5907 * Set up the command byte on the last descriptor of
5908 * the packet. If we're in the interrupt delay window,
5909 * delay the interrupt.
5910 */
5911 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5912 (NQTX_CMD_EOP | NQTX_CMD_RS));
5913 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5914 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5915
5916 txs->txs_lastdesc = lasttx;
5917
5918 DPRINTF(WM_DEBUG_TX,
5919 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5920 device_xname(sc->sc_dev),
5921 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5922
5923 /* Sync the descriptors we're using. */
5924 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5925 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5926
5927 /* Give the packet to the chip. */
5928 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5929 sent = true;
5930
5931 DPRINTF(WM_DEBUG_TX,
5932 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5933
5934 DPRINTF(WM_DEBUG_TX,
5935 ("%s: TX: finished transmitting packet, job %d\n",
5936 device_xname(sc->sc_dev), sc->sc_txsnext));
5937
5938 /* Advance the tx pointer. */
5939 sc->sc_txfree -= txs->txs_ndesc;
5940 sc->sc_txnext = nexttx;
5941
5942 sc->sc_txsfree--;
5943 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5944
5945 /* Pass the packet to any BPF listeners. */
5946 bpf_mtap(ifp, m0);
5947 }
5948
5949 if (m0 != NULL) {
5950 ifp->if_flags |= IFF_OACTIVE;
5951 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5952 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5953 m_freem(m0);
5954 }
5955
5956 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5957 /* No more slots; notify upper layer. */
5958 ifp->if_flags |= IFF_OACTIVE;
5959 }
5960
5961 if (sent) {
5962 /* Set a watchdog timer in case the chip flakes out. */
5963 ifp->if_timer = 5;
5964 }
5965 }
5966
5967 /* Interrupt */
5968
5969 /*
5970 * wm_txeof:
5971 *
5972 * Helper; handle transmit interrupts.
5973 */
5974 static int
5975 wm_txeof(struct wm_softc *sc)
5976 {
5977 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5978 struct wm_txsoft *txs;
5979 bool processed = false;
5980 int count = 0;
5981 int i;
5982 uint8_t status;
5983
5984 if (sc->sc_stopping)
5985 return 0;
5986
5987 ifp->if_flags &= ~IFF_OACTIVE;
5988
5989 /*
5990 * Go through the Tx list and free mbufs for those
5991 * frames which have been transmitted.
5992 */
5993 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5994 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5995 txs = &sc->sc_txsoft[i];
5996
5997 DPRINTF(WM_DEBUG_TX,
5998 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5999
6000 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
6001 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6002
6003 status =
6004 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6005 if ((status & WTX_ST_DD) == 0) {
6006 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
6007 BUS_DMASYNC_PREREAD);
6008 break;
6009 }
6010
6011 processed = true;
6012 count++;
6013 DPRINTF(WM_DEBUG_TX,
6014 ("%s: TX: job %d done: descs %d..%d\n",
6015 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6016 txs->txs_lastdesc));
6017
6018 /*
6019 * XXX We should probably be using the statistics
6020 * XXX registers, but I don't know if they exist
6021 * XXX on chips before the i82544.
6022 */
6023
6024 #ifdef WM_EVENT_COUNTERS
6025 if (status & WTX_ST_TU)
6026 WM_EVCNT_INCR(&sc->sc_ev_tu);
6027 #endif /* WM_EVENT_COUNTERS */
6028
6029 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6030 ifp->if_oerrors++;
6031 if (status & WTX_ST_LC)
6032 log(LOG_WARNING, "%s: late collision\n",
6033 device_xname(sc->sc_dev));
6034 else if (status & WTX_ST_EC) {
6035 ifp->if_collisions += 16;
6036 log(LOG_WARNING, "%s: excessive collisions\n",
6037 device_xname(sc->sc_dev));
6038 }
6039 } else
6040 ifp->if_opackets++;
6041
6042 sc->sc_txfree += txs->txs_ndesc;
6043 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6044 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6045 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6046 m_freem(txs->txs_mbuf);
6047 txs->txs_mbuf = NULL;
6048 }
6049
6050 /* Update the dirty transmit buffer pointer. */
6051 sc->sc_txsdirty = i;
6052 DPRINTF(WM_DEBUG_TX,
6053 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6054
6055 if (count != 0)
6056 rnd_add_uint32(&sc->rnd_source, count);
6057
6058 /*
6059 * If there are no more pending transmissions, cancel the watchdog
6060 * timer.
6061 */
6062 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
6063 ifp->if_timer = 0;
6064
6065 return processed;
6066 }
6067
6068 /*
6069 * wm_rxeof:
6070 *
6071 * Helper; handle receive interrupts.
6072 */
6073 static void
6074 wm_rxeof(struct wm_softc *sc)
6075 {
6076 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6077 struct wm_rxsoft *rxs;
6078 struct mbuf *m;
6079 int i, len;
6080 int count = 0;
6081 uint8_t status, errors;
6082 uint16_t vlantag;
6083
6084 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
6085 rxs = &sc->sc_rxsoft[i];
6086
6087 DPRINTF(WM_DEBUG_RX,
6088 ("%s: RX: checking descriptor %d\n",
6089 device_xname(sc->sc_dev), i));
6090
6091 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6092
6093 status = sc->sc_rxdescs[i].wrx_status;
6094 errors = sc->sc_rxdescs[i].wrx_errors;
6095 len = le16toh(sc->sc_rxdescs[i].wrx_len);
6096 vlantag = sc->sc_rxdescs[i].wrx_special;
6097
6098 if ((status & WRX_ST_DD) == 0) {
6099 /* We have processed all of the receive descriptors. */
6100 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
6101 break;
6102 }
6103
6104 count++;
6105 if (__predict_false(sc->sc_rxdiscard)) {
6106 DPRINTF(WM_DEBUG_RX,
6107 ("%s: RX: discarding contents of descriptor %d\n",
6108 device_xname(sc->sc_dev), i));
6109 WM_INIT_RXDESC(sc, i);
6110 if (status & WRX_ST_EOP) {
6111 /* Reset our state. */
6112 DPRINTF(WM_DEBUG_RX,
6113 ("%s: RX: resetting rxdiscard -> 0\n",
6114 device_xname(sc->sc_dev)));
6115 sc->sc_rxdiscard = 0;
6116 }
6117 continue;
6118 }
6119
6120 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6121 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6122
6123 m = rxs->rxs_mbuf;
6124
6125 /*
6126 * Add a new receive buffer to the ring, unless of
6127 * course the length is zero. Treat the latter as a
6128 * failed mapping.
6129 */
6130 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
6131 /*
6132 * Failed, throw away what we've done so
6133 * far, and discard the rest of the packet.
6134 */
6135 ifp->if_ierrors++;
6136 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6137 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6138 WM_INIT_RXDESC(sc, i);
6139 if ((status & WRX_ST_EOP) == 0)
6140 sc->sc_rxdiscard = 1;
6141 if (sc->sc_rxhead != NULL)
6142 m_freem(sc->sc_rxhead);
6143 WM_RXCHAIN_RESET(sc);
6144 DPRINTF(WM_DEBUG_RX,
6145 ("%s: RX: Rx buffer allocation failed, "
6146 "dropping packet%s\n", device_xname(sc->sc_dev),
6147 sc->sc_rxdiscard ? " (discard)" : ""));
6148 continue;
6149 }
6150
6151 m->m_len = len;
6152 sc->sc_rxlen += len;
6153 DPRINTF(WM_DEBUG_RX,
6154 ("%s: RX: buffer at %p len %d\n",
6155 device_xname(sc->sc_dev), m->m_data, len));
6156
6157 /* If this is not the end of the packet, keep looking. */
6158 if ((status & WRX_ST_EOP) == 0) {
6159 WM_RXCHAIN_LINK(sc, m);
6160 DPRINTF(WM_DEBUG_RX,
6161 ("%s: RX: not yet EOP, rxlen -> %d\n",
6162 device_xname(sc->sc_dev), sc->sc_rxlen));
6163 continue;
6164 }
6165
6166 /*
6167 * Okay, we have the entire packet now. The chip is
6168 * configured to include the FCS except I350 and I21[01]
6169 * (not all chips can be configured to strip it),
6170 * so we need to trim it.
6171 * May need to adjust length of previous mbuf in the
6172 * chain if the current mbuf is too short.
6173 * For an eratta, the RCTL_SECRC bit in RCTL register
6174 * is always set in I350, so we don't trim it.
6175 */
6176 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6177 && (sc->sc_type != WM_T_I210)
6178 && (sc->sc_type != WM_T_I211)) {
6179 if (m->m_len < ETHER_CRC_LEN) {
6180 sc->sc_rxtail->m_len
6181 -= (ETHER_CRC_LEN - m->m_len);
6182 m->m_len = 0;
6183 } else
6184 m->m_len -= ETHER_CRC_LEN;
6185 len = sc->sc_rxlen - ETHER_CRC_LEN;
6186 } else
6187 len = sc->sc_rxlen;
6188
6189 WM_RXCHAIN_LINK(sc, m);
6190
6191 *sc->sc_rxtailp = NULL;
6192 m = sc->sc_rxhead;
6193
6194 WM_RXCHAIN_RESET(sc);
6195
6196 DPRINTF(WM_DEBUG_RX,
6197 ("%s: RX: have entire packet, len -> %d\n",
6198 device_xname(sc->sc_dev), len));
6199
6200 /* If an error occurred, update stats and drop the packet. */
6201 if (errors &
6202 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
6203 if (errors & WRX_ER_SE)
6204 log(LOG_WARNING, "%s: symbol error\n",
6205 device_xname(sc->sc_dev));
6206 else if (errors & WRX_ER_SEQ)
6207 log(LOG_WARNING, "%s: receive sequence error\n",
6208 device_xname(sc->sc_dev));
6209 else if (errors & WRX_ER_CE)
6210 log(LOG_WARNING, "%s: CRC error\n",
6211 device_xname(sc->sc_dev));
6212 m_freem(m);
6213 continue;
6214 }
6215
6216 /* No errors. Receive the packet. */
6217 m->m_pkthdr.rcvif = ifp;
6218 m->m_pkthdr.len = len;
6219
6220 /*
6221 * If VLANs are enabled, VLAN packets have been unwrapped
6222 * for us. Associate the tag with the packet.
6223 */
6224 /* XXXX should check for i350 and i354 */
6225 if ((status & WRX_ST_VP) != 0) {
6226 VLAN_INPUT_TAG(ifp, m,
6227 le16toh(vlantag),
6228 continue);
6229 }
6230
6231 /* Set up checksum info for this packet. */
6232 if ((status & WRX_ST_IXSM) == 0) {
6233 if (status & WRX_ST_IPCS) {
6234 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
6235 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6236 if (errors & WRX_ER_IPE)
6237 m->m_pkthdr.csum_flags |=
6238 M_CSUM_IPv4_BAD;
6239 }
6240 if (status & WRX_ST_TCPCS) {
6241 /*
6242 * Note: we don't know if this was TCP or UDP,
6243 * so we just set both bits, and expect the
6244 * upper layers to deal.
6245 */
6246 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
6247 m->m_pkthdr.csum_flags |=
6248 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6249 M_CSUM_TCPv6 | M_CSUM_UDPv6;
6250 if (errors & WRX_ER_TCPE)
6251 m->m_pkthdr.csum_flags |=
6252 M_CSUM_TCP_UDP_BAD;
6253 }
6254 }
6255
6256 ifp->if_ipackets++;
6257
6258 WM_RX_UNLOCK(sc);
6259
6260 /* Pass this up to any BPF listeners. */
6261 bpf_mtap(ifp, m);
6262
6263 /* Pass it on. */
6264 (*ifp->if_input)(ifp, m);
6265
6266 WM_RX_LOCK(sc);
6267
6268 if (sc->sc_stopping)
6269 break;
6270 }
6271
6272 /* Update the receive pointer. */
6273 sc->sc_rxptr = i;
6274 if (count != 0)
6275 rnd_add_uint32(&sc->rnd_source, count);
6276
6277 DPRINTF(WM_DEBUG_RX,
6278 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
6279 }
6280
6281 /*
6282 * wm_linkintr_gmii:
6283 *
6284 * Helper; handle link interrupts for GMII.
6285 */
6286 static void
6287 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
6288 {
6289
6290 KASSERT(WM_TX_LOCKED(sc));
6291
6292 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6293 __func__));
6294
6295 if (icr & ICR_LSC) {
6296 DPRINTF(WM_DEBUG_LINK,
6297 ("%s: LINK: LSC -> mii_pollstat\n",
6298 device_xname(sc->sc_dev)));
6299 mii_pollstat(&sc->sc_mii);
6300 if (sc->sc_type == WM_T_82543) {
6301 int miistatus, active;
6302
6303 /*
6304 * With 82543, we need to force speed and
6305 * duplex on the MAC equal to what the PHY
6306 * speed and duplex configuration is.
6307 */
6308 miistatus = sc->sc_mii.mii_media_status;
6309
6310 if (miistatus & IFM_ACTIVE) {
6311 active = sc->sc_mii.mii_media_active;
6312 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6313 switch (IFM_SUBTYPE(active)) {
6314 case IFM_10_T:
6315 sc->sc_ctrl |= CTRL_SPEED_10;
6316 break;
6317 case IFM_100_TX:
6318 sc->sc_ctrl |= CTRL_SPEED_100;
6319 break;
6320 case IFM_1000_T:
6321 sc->sc_ctrl |= CTRL_SPEED_1000;
6322 break;
6323 default:
6324 /*
6325 * fiber?
6326 * Shoud not enter here.
6327 */
6328 printf("unknown media (%x)\n",
6329 active);
6330 break;
6331 }
6332 if (active & IFM_FDX)
6333 sc->sc_ctrl |= CTRL_FD;
6334 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6335 }
6336 } else if ((sc->sc_type == WM_T_ICH8)
6337 && (sc->sc_phytype == WMPHY_IGP_3)) {
6338 wm_kmrn_lock_loss_workaround_ich8lan(sc);
6339 } else if (sc->sc_type == WM_T_PCH) {
6340 wm_k1_gig_workaround_hv(sc,
6341 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
6342 }
6343
6344 if ((sc->sc_phytype == WMPHY_82578)
6345 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
6346 == IFM_1000_T)) {
6347
6348 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
6349 delay(200*1000); /* XXX too big */
6350
6351 /* Link stall fix for link up */
6352 wm_gmii_hv_writereg(sc->sc_dev, 1,
6353 HV_MUX_DATA_CTRL,
6354 HV_MUX_DATA_CTRL_GEN_TO_MAC
6355 | HV_MUX_DATA_CTRL_FORCE_SPEED);
6356 wm_gmii_hv_writereg(sc->sc_dev, 1,
6357 HV_MUX_DATA_CTRL,
6358 HV_MUX_DATA_CTRL_GEN_TO_MAC);
6359 }
6360 }
6361 } else if (icr & ICR_RXSEQ) {
6362 DPRINTF(WM_DEBUG_LINK,
6363 ("%s: LINK Receive sequence error\n",
6364 device_xname(sc->sc_dev)));
6365 }
6366 }
6367
6368 /*
6369 * wm_linkintr_tbi:
6370 *
6371 * Helper; handle link interrupts for TBI mode.
6372 */
6373 static void
6374 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6375 {
6376 uint32_t status;
6377
6378 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6379 __func__));
6380
6381 status = CSR_READ(sc, WMREG_STATUS);
6382 if (icr & ICR_LSC) {
6383 if (status & STATUS_LU) {
6384 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6385 device_xname(sc->sc_dev),
6386 (status & STATUS_FD) ? "FDX" : "HDX"));
6387 /*
6388 * NOTE: CTRL will update TFCE and RFCE automatically,
6389 * so we should update sc->sc_ctrl
6390 */
6391
6392 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6393 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6394 sc->sc_fcrtl &= ~FCRTL_XONE;
6395 if (status & STATUS_FD)
6396 sc->sc_tctl |=
6397 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6398 else
6399 sc->sc_tctl |=
6400 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6401 if (sc->sc_ctrl & CTRL_TFCE)
6402 sc->sc_fcrtl |= FCRTL_XONE;
6403 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6404 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6405 WMREG_OLD_FCRTL : WMREG_FCRTL,
6406 sc->sc_fcrtl);
6407 sc->sc_tbi_linkup = 1;
6408 } else {
6409 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6410 device_xname(sc->sc_dev)));
6411 sc->sc_tbi_linkup = 0;
6412 }
6413 /* Update LED */
6414 wm_tbi_serdes_set_linkled(sc);
6415 } else if (icr & ICR_RXSEQ) {
6416 DPRINTF(WM_DEBUG_LINK,
6417 ("%s: LINK: Receive sequence error\n",
6418 device_xname(sc->sc_dev)));
6419 }
6420 }
6421
6422 /*
6423 * wm_linkintr_serdes:
6424 *
6425 * Helper; handle link interrupts for TBI mode.
6426 */
6427 static void
6428 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6429 {
6430 struct mii_data *mii = &sc->sc_mii;
6431 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6432 uint32_t pcs_adv, pcs_lpab, reg;
6433
6434 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6435 __func__));
6436
6437 if (icr & ICR_LSC) {
6438 /* Check PCS */
6439 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6440 if ((reg & PCS_LSTS_LINKOK) != 0) {
6441 mii->mii_media_status |= IFM_ACTIVE;
6442 sc->sc_tbi_linkup = 1;
6443 } else {
6444 mii->mii_media_status |= IFM_NONE;
6445 sc->sc_tbi_linkup = 0;
6446 wm_tbi_serdes_set_linkled(sc);
6447 return;
6448 }
6449 mii->mii_media_active |= IFM_1000_SX;
6450 if ((reg & PCS_LSTS_FDX) != 0)
6451 mii->mii_media_active |= IFM_FDX;
6452 else
6453 mii->mii_media_active |= IFM_HDX;
6454 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6455 /* Check flow */
6456 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6457 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6458 DPRINTF(WM_DEBUG_LINK,
6459 ("XXX LINKOK but not ACOMP\n"));
6460 return;
6461 }
6462 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6463 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6464 DPRINTF(WM_DEBUG_LINK,
6465 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6466 if ((pcs_adv & TXCW_SYM_PAUSE)
6467 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6468 mii->mii_media_active |= IFM_FLOW
6469 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6470 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6471 && (pcs_adv & TXCW_ASYM_PAUSE)
6472 && (pcs_lpab & TXCW_SYM_PAUSE)
6473 && (pcs_lpab & TXCW_ASYM_PAUSE))
6474 mii->mii_media_active |= IFM_FLOW
6475 | IFM_ETH_TXPAUSE;
6476 else if ((pcs_adv & TXCW_SYM_PAUSE)
6477 && (pcs_adv & TXCW_ASYM_PAUSE)
6478 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6479 && (pcs_lpab & TXCW_ASYM_PAUSE))
6480 mii->mii_media_active |= IFM_FLOW
6481 | IFM_ETH_RXPAUSE;
6482 }
6483 /* Update LED */
6484 wm_tbi_serdes_set_linkled(sc);
6485 } else {
6486 DPRINTF(WM_DEBUG_LINK,
6487 ("%s: LINK: Receive sequence error\n",
6488 device_xname(sc->sc_dev)));
6489 }
6490 }
6491
6492 /*
6493 * wm_linkintr:
6494 *
6495 * Helper; handle link interrupts.
6496 */
6497 static void
6498 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6499 {
6500
6501 if (sc->sc_flags & WM_F_HAS_MII)
6502 wm_linkintr_gmii(sc, icr);
6503 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6504 && (sc->sc_type >= WM_T_82575))
6505 wm_linkintr_serdes(sc, icr);
6506 else
6507 wm_linkintr_tbi(sc, icr);
6508 }
6509
6510 /*
6511 * wm_intr_legacy:
6512 *
6513 * Interrupt service routine for INTx and MSI.
6514 */
6515 static int
6516 wm_intr_legacy(void *arg)
6517 {
6518 struct wm_softc *sc = arg;
6519 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6520 uint32_t icr, rndval = 0;
6521 int handled = 0;
6522
6523 DPRINTF(WM_DEBUG_TX,
6524 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
6525 while (1 /* CONSTCOND */) {
6526 icr = CSR_READ(sc, WMREG_ICR);
6527 if ((icr & sc->sc_icr) == 0)
6528 break;
6529 if (rndval == 0)
6530 rndval = icr;
6531
6532 WM_RX_LOCK(sc);
6533
6534 if (sc->sc_stopping) {
6535 WM_RX_UNLOCK(sc);
6536 break;
6537 }
6538
6539 handled = 1;
6540
6541 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6542 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6543 DPRINTF(WM_DEBUG_RX,
6544 ("%s: RX: got Rx intr 0x%08x\n",
6545 device_xname(sc->sc_dev),
6546 icr & (ICR_RXDMT0|ICR_RXT0)));
6547 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6548 }
6549 #endif
6550 wm_rxeof(sc);
6551
6552 WM_RX_UNLOCK(sc);
6553 WM_TX_LOCK(sc);
6554
6555 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6556 if (icr & ICR_TXDW) {
6557 DPRINTF(WM_DEBUG_TX,
6558 ("%s: TX: got TXDW interrupt\n",
6559 device_xname(sc->sc_dev)));
6560 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6561 }
6562 #endif
6563 wm_txeof(sc);
6564
6565 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6566 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6567 wm_linkintr(sc, icr);
6568 }
6569
6570 WM_TX_UNLOCK(sc);
6571
6572 if (icr & ICR_RXO) {
6573 #if defined(WM_DEBUG)
6574 log(LOG_WARNING, "%s: Receive overrun\n",
6575 device_xname(sc->sc_dev));
6576 #endif /* defined(WM_DEBUG) */
6577 }
6578 }
6579
6580 rnd_add_uint32(&sc->rnd_source, rndval);
6581
6582 if (handled) {
6583 /* Try to get more packets going. */
6584 ifp->if_start(ifp);
6585 }
6586
6587 return handled;
6588 }
6589
6590 #ifdef WM_MSI_MSIX
6591 /*
6592 * wm_txintr_msix:
6593 *
6594 * Interrupt service routine for TX complete interrupt for MSI-X.
6595 */
6596 static int
6597 wm_txintr_msix(void *arg)
6598 {
6599 struct wm_softc *sc = arg;
6600 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6601 int handled = 0;
6602
6603 DPRINTF(WM_DEBUG_TX,
6604 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
6605
6606 if (sc->sc_type == WM_T_82574)
6607 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ0); /* 82574 only */
6608 else if (sc->sc_type == WM_T_82575)
6609 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE0);
6610 else
6611 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_TXINTR_IDX);
6612
6613 WM_TX_LOCK(sc);
6614
6615 if (sc->sc_stopping)
6616 goto out;
6617
6618 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6619 handled = wm_txeof(sc);
6620
6621 out:
6622 WM_TX_UNLOCK(sc);
6623
6624 if (sc->sc_type == WM_T_82574)
6625 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ0); /* 82574 only */
6626 else if (sc->sc_type == WM_T_82575)
6627 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE0);
6628 else
6629 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_TXINTR_IDX);
6630
6631 if (handled) {
6632 /* Try to get more packets going. */
6633 ifp->if_start(ifp);
6634 }
6635
6636 return handled;
6637 }
6638
6639 /*
6640 * wm_rxintr_msix:
6641 *
6642 * Interrupt service routine for RX interrupt for MSI-X.
6643 */
6644 static int
6645 wm_rxintr_msix(void *arg)
6646 {
6647 struct wm_softc *sc = arg;
6648
6649 DPRINTF(WM_DEBUG_TX,
6650 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
6651
6652 if (sc->sc_type == WM_T_82574)
6653 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ0); /* 82574 only */
6654 else if (sc->sc_type == WM_T_82575)
6655 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE0);
6656 else
6657 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
6658
6659 WM_RX_LOCK(sc);
6660
6661 if (sc->sc_stopping)
6662 goto out;
6663
6664 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6665 wm_rxeof(sc);
6666
6667 out:
6668 WM_RX_UNLOCK(sc);
6669
6670 if (sc->sc_type == WM_T_82574)
6671 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ0);
6672 else if (sc->sc_type == WM_T_82575)
6673 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE0);
6674 else
6675 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
6676
6677 return 1;
6678 }
6679
6680 /*
6681 * wm_linkintr_msix:
6682 *
6683 * Interrupt service routine for link status change for MSI-X.
6684 */
6685 static int
6686 wm_linkintr_msix(void *arg)
6687 {
6688 struct wm_softc *sc = arg;
6689
6690 DPRINTF(WM_DEBUG_TX,
6691 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
6692
6693 if (sc->sc_type == WM_T_82574)
6694 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); /* 82574 only */
6695 else if (sc->sc_type == WM_T_82575)
6696 CSR_WRITE(sc, WMREG_EIMC, EITR_OTHER);
6697 else
6698 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_LINKINTR_IDX);
6699 WM_TX_LOCK(sc);
6700 if (sc->sc_stopping)
6701 goto out;
6702
6703 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6704 wm_linkintr(sc, ICR_LSC);
6705
6706 out:
6707 WM_TX_UNLOCK(sc);
6708
6709 if (sc->sc_type == WM_T_82574)
6710 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
6711 else if (sc->sc_type == WM_T_82575)
6712 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
6713 else
6714 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_LINKINTR_IDX);
6715
6716 return 1;
6717 }
6718 #endif /* WM_MSI_MSIX */
6719
6720 /*
6721 * Media related.
6722 * GMII, SGMII, TBI (and SERDES)
6723 */
6724
6725 /* Common */
6726
6727 /*
6728 * wm_tbi_serdes_set_linkled:
6729 *
6730 * Update the link LED on TBI and SERDES devices.
6731 */
6732 static void
6733 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
6734 {
6735
6736 if (sc->sc_tbi_linkup)
6737 sc->sc_ctrl |= CTRL_SWDPIN(0);
6738 else
6739 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6740
6741 /* 82540 or newer devices are active low */
6742 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6743
6744 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6745 }
6746
6747 /* GMII related */
6748
6749 /*
6750 * wm_gmii_reset:
6751 *
6752 * Reset the PHY.
6753 */
6754 static void
6755 wm_gmii_reset(struct wm_softc *sc)
6756 {
6757 uint32_t reg;
6758 int rv;
6759
6760 /* get phy semaphore */
6761 switch (sc->sc_type) {
6762 case WM_T_82571:
6763 case WM_T_82572:
6764 case WM_T_82573:
6765 case WM_T_82574:
6766 case WM_T_82583:
6767 /* XXX should get sw semaphore, too */
6768 rv = wm_get_swsm_semaphore(sc);
6769 break;
6770 case WM_T_82575:
6771 case WM_T_82576:
6772 case WM_T_82580:
6773 case WM_T_I350:
6774 case WM_T_I354:
6775 case WM_T_I210:
6776 case WM_T_I211:
6777 case WM_T_80003:
6778 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6779 break;
6780 case WM_T_ICH8:
6781 case WM_T_ICH9:
6782 case WM_T_ICH10:
6783 case WM_T_PCH:
6784 case WM_T_PCH2:
6785 case WM_T_PCH_LPT:
6786 rv = wm_get_swfwhw_semaphore(sc);
6787 break;
6788 default:
6789 /* nothing to do*/
6790 rv = 0;
6791 break;
6792 }
6793 if (rv != 0) {
6794 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6795 __func__);
6796 return;
6797 }
6798
6799 switch (sc->sc_type) {
6800 case WM_T_82542_2_0:
6801 case WM_T_82542_2_1:
6802 /* null */
6803 break;
6804 case WM_T_82543:
6805 /*
6806 * With 82543, we need to force speed and duplex on the MAC
6807 * equal to what the PHY speed and duplex configuration is.
6808 * In addition, we need to perform a hardware reset on the PHY
6809 * to take it out of reset.
6810 */
6811 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6812 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6813
6814 /* The PHY reset pin is active-low. */
6815 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6816 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6817 CTRL_EXT_SWDPIN(4));
6818 reg |= CTRL_EXT_SWDPIO(4);
6819
6820 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6821 CSR_WRITE_FLUSH(sc);
6822 delay(10*1000);
6823
6824 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6825 CSR_WRITE_FLUSH(sc);
6826 delay(150);
6827 #if 0
6828 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6829 #endif
6830 delay(20*1000); /* XXX extra delay to get PHY ID? */
6831 break;
6832 case WM_T_82544: /* reset 10000us */
6833 case WM_T_82540:
6834 case WM_T_82545:
6835 case WM_T_82545_3:
6836 case WM_T_82546:
6837 case WM_T_82546_3:
6838 case WM_T_82541:
6839 case WM_T_82541_2:
6840 case WM_T_82547:
6841 case WM_T_82547_2:
6842 case WM_T_82571: /* reset 100us */
6843 case WM_T_82572:
6844 case WM_T_82573:
6845 case WM_T_82574:
6846 case WM_T_82575:
6847 case WM_T_82576:
6848 case WM_T_82580:
6849 case WM_T_I350:
6850 case WM_T_I354:
6851 case WM_T_I210:
6852 case WM_T_I211:
6853 case WM_T_82583:
6854 case WM_T_80003:
6855 /* generic reset */
6856 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6857 CSR_WRITE_FLUSH(sc);
6858 delay(20000);
6859 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6860 CSR_WRITE_FLUSH(sc);
6861 delay(20000);
6862
6863 if ((sc->sc_type == WM_T_82541)
6864 || (sc->sc_type == WM_T_82541_2)
6865 || (sc->sc_type == WM_T_82547)
6866 || (sc->sc_type == WM_T_82547_2)) {
6867 /* workaround for igp are done in igp_reset() */
6868 /* XXX add code to set LED after phy reset */
6869 }
6870 break;
6871 case WM_T_ICH8:
6872 case WM_T_ICH9:
6873 case WM_T_ICH10:
6874 case WM_T_PCH:
6875 case WM_T_PCH2:
6876 case WM_T_PCH_LPT:
6877 /* generic reset */
6878 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6879 CSR_WRITE_FLUSH(sc);
6880 delay(100);
6881 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6882 CSR_WRITE_FLUSH(sc);
6883 delay(150);
6884 break;
6885 default:
6886 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6887 __func__);
6888 break;
6889 }
6890
6891 /* release PHY semaphore */
6892 switch (sc->sc_type) {
6893 case WM_T_82571:
6894 case WM_T_82572:
6895 case WM_T_82573:
6896 case WM_T_82574:
6897 case WM_T_82583:
6898 /* XXX should put sw semaphore, too */
6899 wm_put_swsm_semaphore(sc);
6900 break;
6901 case WM_T_82575:
6902 case WM_T_82576:
6903 case WM_T_82580:
6904 case WM_T_I350:
6905 case WM_T_I354:
6906 case WM_T_I210:
6907 case WM_T_I211:
6908 case WM_T_80003:
6909 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6910 break;
6911 case WM_T_ICH8:
6912 case WM_T_ICH9:
6913 case WM_T_ICH10:
6914 case WM_T_PCH:
6915 case WM_T_PCH2:
6916 case WM_T_PCH_LPT:
6917 wm_put_swfwhw_semaphore(sc);
6918 break;
6919 default:
6920 /* nothing to do*/
6921 rv = 0;
6922 break;
6923 }
6924
6925 /* get_cfg_done */
6926 wm_get_cfg_done(sc);
6927
6928 /* extra setup */
6929 switch (sc->sc_type) {
6930 case WM_T_82542_2_0:
6931 case WM_T_82542_2_1:
6932 case WM_T_82543:
6933 case WM_T_82544:
6934 case WM_T_82540:
6935 case WM_T_82545:
6936 case WM_T_82545_3:
6937 case WM_T_82546:
6938 case WM_T_82546_3:
6939 case WM_T_82541_2:
6940 case WM_T_82547_2:
6941 case WM_T_82571:
6942 case WM_T_82572:
6943 case WM_T_82573:
6944 case WM_T_82574:
6945 case WM_T_82575:
6946 case WM_T_82576:
6947 case WM_T_82580:
6948 case WM_T_I350:
6949 case WM_T_I354:
6950 case WM_T_I210:
6951 case WM_T_I211:
6952 case WM_T_82583:
6953 case WM_T_80003:
6954 /* null */
6955 break;
6956 case WM_T_82541:
6957 case WM_T_82547:
6958 /* XXX Configure actively LED after PHY reset */
6959 break;
6960 case WM_T_ICH8:
6961 case WM_T_ICH9:
6962 case WM_T_ICH10:
6963 case WM_T_PCH:
6964 case WM_T_PCH2:
6965 case WM_T_PCH_LPT:
6966 /* Allow time for h/w to get to a quiescent state afer reset */
6967 delay(10*1000);
6968
6969 if (sc->sc_type == WM_T_PCH)
6970 wm_hv_phy_workaround_ich8lan(sc);
6971
6972 if (sc->sc_type == WM_T_PCH2)
6973 wm_lv_phy_workaround_ich8lan(sc);
6974
6975 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6976 /*
6977 * dummy read to clear the phy wakeup bit after lcd
6978 * reset
6979 */
6980 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6981 }
6982
6983 /*
6984 * XXX Configure the LCD with th extended configuration region
6985 * in NVM
6986 */
6987
6988 /* Configure the LCD with the OEM bits in NVM */
6989 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6990 || (sc->sc_type == WM_T_PCH_LPT)) {
6991 /*
6992 * Disable LPLU.
6993 * XXX It seems that 82567 has LPLU, too.
6994 */
6995 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6996 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6997 reg |= HV_OEM_BITS_ANEGNOW;
6998 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6999 }
7000 break;
7001 default:
7002 panic("%s: unknown type\n", __func__);
7003 break;
7004 }
7005 }
7006
7007 /*
7008 * wm_get_phy_id_82575:
7009 *
7010 * Return PHY ID. Return -1 if it failed.
7011 */
7012 static int
7013 wm_get_phy_id_82575(struct wm_softc *sc)
7014 {
7015 uint32_t reg;
7016 int phyid = -1;
7017
7018 /* XXX */
7019 if ((sc->sc_flags & WM_F_SGMII) == 0)
7020 return -1;
7021
7022 if (wm_sgmii_uses_mdio(sc)) {
7023 switch (sc->sc_type) {
7024 case WM_T_82575:
7025 case WM_T_82576:
7026 reg = CSR_READ(sc, WMREG_MDIC);
7027 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7028 break;
7029 case WM_T_82580:
7030 case WM_T_I350:
7031 case WM_T_I354:
7032 case WM_T_I210:
7033 case WM_T_I211:
7034 reg = CSR_READ(sc, WMREG_MDICNFG);
7035 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7036 break;
7037 default:
7038 return -1;
7039 }
7040 }
7041
7042 return phyid;
7043 }
7044
7045
7046 /*
7047 * wm_gmii_mediainit:
7048 *
7049 * Initialize media for use on 1000BASE-T devices.
7050 */
7051 static void
7052 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7053 {
7054 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7055 struct mii_data *mii = &sc->sc_mii;
7056 uint32_t reg;
7057
7058 /* We have GMII. */
7059 sc->sc_flags |= WM_F_HAS_MII;
7060
7061 if (sc->sc_type == WM_T_80003)
7062 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7063 else
7064 sc->sc_tipg = TIPG_1000T_DFLT;
7065
7066 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7067 if ((sc->sc_type == WM_T_82580)
7068 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7069 || (sc->sc_type == WM_T_I211)) {
7070 reg = CSR_READ(sc, WMREG_PHPM);
7071 reg &= ~PHPM_GO_LINK_D;
7072 CSR_WRITE(sc, WMREG_PHPM, reg);
7073 }
7074
7075 /*
7076 * Let the chip set speed/duplex on its own based on
7077 * signals from the PHY.
7078 * XXXbouyer - I'm not sure this is right for the 80003,
7079 * the em driver only sets CTRL_SLU here - but it seems to work.
7080 */
7081 sc->sc_ctrl |= CTRL_SLU;
7082 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7083
7084 /* Initialize our media structures and probe the GMII. */
7085 mii->mii_ifp = ifp;
7086
7087 /*
7088 * Determine the PHY access method.
7089 *
7090 * For SGMII, use SGMII specific method.
7091 *
7092 * For some devices, we can determine the PHY access method
7093 * from sc_type.
7094 *
7095 * For ICH and PCH variants, it's difficult to determine the PHY
7096 * access method by sc_type, so use the PCI product ID for some
7097 * devices.
7098 * For other ICH8 variants, try to use igp's method. If the PHY
7099 * can't detect, then use bm's method.
7100 */
7101 switch (prodid) {
7102 case PCI_PRODUCT_INTEL_PCH_M_LM:
7103 case PCI_PRODUCT_INTEL_PCH_M_LC:
7104 /* 82577 */
7105 sc->sc_phytype = WMPHY_82577;
7106 break;
7107 case PCI_PRODUCT_INTEL_PCH_D_DM:
7108 case PCI_PRODUCT_INTEL_PCH_D_DC:
7109 /* 82578 */
7110 sc->sc_phytype = WMPHY_82578;
7111 break;
7112 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7113 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7114 /* 82579 */
7115 sc->sc_phytype = WMPHY_82579;
7116 break;
7117 case PCI_PRODUCT_INTEL_82801I_BM:
7118 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7119 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7120 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7121 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7122 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7123 /* 82567 */
7124 sc->sc_phytype = WMPHY_BM;
7125 mii->mii_readreg = wm_gmii_bm_readreg;
7126 mii->mii_writereg = wm_gmii_bm_writereg;
7127 break;
7128 default:
7129 if (((sc->sc_flags & WM_F_SGMII) != 0)
7130 && !wm_sgmii_uses_mdio(sc)){
7131 /* SGMII */
7132 mii->mii_readreg = wm_sgmii_readreg;
7133 mii->mii_writereg = wm_sgmii_writereg;
7134 } else if (sc->sc_type >= WM_T_80003) {
7135 /* 80003 */
7136 mii->mii_readreg = wm_gmii_i80003_readreg;
7137 mii->mii_writereg = wm_gmii_i80003_writereg;
7138 } else if (sc->sc_type >= WM_T_I210) {
7139 /* I210 and I211 */
7140 mii->mii_readreg = wm_gmii_gs40g_readreg;
7141 mii->mii_writereg = wm_gmii_gs40g_writereg;
7142 } else if (sc->sc_type >= WM_T_82580) {
7143 /* 82580, I350 and I354 */
7144 sc->sc_phytype = WMPHY_82580;
7145 mii->mii_readreg = wm_gmii_82580_readreg;
7146 mii->mii_writereg = wm_gmii_82580_writereg;
7147 } else if (sc->sc_type >= WM_T_82544) {
7148 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
7149 mii->mii_readreg = wm_gmii_i82544_readreg;
7150 mii->mii_writereg = wm_gmii_i82544_writereg;
7151 } else {
7152 mii->mii_readreg = wm_gmii_i82543_readreg;
7153 mii->mii_writereg = wm_gmii_i82543_writereg;
7154 }
7155 break;
7156 }
7157 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7158 /* All PCH* use _hv_ */
7159 mii->mii_readreg = wm_gmii_hv_readreg;
7160 mii->mii_writereg = wm_gmii_hv_writereg;
7161 }
7162 mii->mii_statchg = wm_gmii_statchg;
7163
7164 wm_gmii_reset(sc);
7165
7166 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7167 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7168 wm_gmii_mediastatus);
7169
7170 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7171 || (sc->sc_type == WM_T_82580)
7172 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7173 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7174 if ((sc->sc_flags & WM_F_SGMII) == 0) {
7175 /* Attach only one port */
7176 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7177 MII_OFFSET_ANY, MIIF_DOPAUSE);
7178 } else {
7179 int i, id;
7180 uint32_t ctrl_ext;
7181
7182 id = wm_get_phy_id_82575(sc);
7183 if (id != -1) {
7184 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
7185 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
7186 }
7187 if ((id == -1)
7188 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
7189 /* Power on sgmii phy if it is disabled */
7190 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7191 CSR_WRITE(sc, WMREG_CTRL_EXT,
7192 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
7193 CSR_WRITE_FLUSH(sc);
7194 delay(300*1000); /* XXX too long */
7195
7196 /* from 1 to 8 */
7197 for (i = 1; i < 8; i++)
7198 mii_attach(sc->sc_dev, &sc->sc_mii,
7199 0xffffffff, i, MII_OFFSET_ANY,
7200 MIIF_DOPAUSE);
7201
7202 /* restore previous sfp cage power state */
7203 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7204 }
7205 }
7206 } else {
7207 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7208 MII_OFFSET_ANY, MIIF_DOPAUSE);
7209 }
7210
7211 /*
7212 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
7213 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
7214 */
7215 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
7216 (LIST_FIRST(&mii->mii_phys) == NULL)) {
7217 wm_set_mdio_slow_mode_hv(sc);
7218 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7219 MII_OFFSET_ANY, MIIF_DOPAUSE);
7220 }
7221
7222 /*
7223 * (For ICH8 variants)
7224 * If PHY detection failed, use BM's r/w function and retry.
7225 */
7226 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7227 /* if failed, retry with *_bm_* */
7228 mii->mii_readreg = wm_gmii_bm_readreg;
7229 mii->mii_writereg = wm_gmii_bm_writereg;
7230
7231 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7232 MII_OFFSET_ANY, MIIF_DOPAUSE);
7233 }
7234
7235 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7236 /* Any PHY wasn't find */
7237 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
7238 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
7239 sc->sc_phytype = WMPHY_NONE;
7240 } else {
7241 /*
7242 * PHY Found!
7243 * Check PHY type.
7244 */
7245 uint32_t model;
7246 struct mii_softc *child;
7247
7248 child = LIST_FIRST(&mii->mii_phys);
7249 if (device_is_a(child->mii_dev, "igphy")) {
7250 struct igphy_softc *isc = (struct igphy_softc *)child;
7251
7252 model = isc->sc_mii.mii_mpd_model;
7253 if (model == MII_MODEL_yyINTEL_I82566)
7254 sc->sc_phytype = WMPHY_IGP_3;
7255 }
7256
7257 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
7258 }
7259 }
7260
7261 /*
7262 * wm_gmii_mediachange: [ifmedia interface function]
7263 *
7264 * Set hardware to newly-selected media on a 1000BASE-T device.
7265 */
7266 static int
7267 wm_gmii_mediachange(struct ifnet *ifp)
7268 {
7269 struct wm_softc *sc = ifp->if_softc;
7270 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7271 int rc;
7272
7273 if ((ifp->if_flags & IFF_UP) == 0)
7274 return 0;
7275
7276 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7277 sc->sc_ctrl |= CTRL_SLU;
7278 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7279 || (sc->sc_type > WM_T_82543)) {
7280 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
7281 } else {
7282 sc->sc_ctrl &= ~CTRL_ASDE;
7283 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7284 if (ife->ifm_media & IFM_FDX)
7285 sc->sc_ctrl |= CTRL_FD;
7286 switch (IFM_SUBTYPE(ife->ifm_media)) {
7287 case IFM_10_T:
7288 sc->sc_ctrl |= CTRL_SPEED_10;
7289 break;
7290 case IFM_100_TX:
7291 sc->sc_ctrl |= CTRL_SPEED_100;
7292 break;
7293 case IFM_1000_T:
7294 sc->sc_ctrl |= CTRL_SPEED_1000;
7295 break;
7296 default:
7297 panic("wm_gmii_mediachange: bad media 0x%x",
7298 ife->ifm_media);
7299 }
7300 }
7301 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7302 if (sc->sc_type <= WM_T_82543)
7303 wm_gmii_reset(sc);
7304
7305 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
7306 return 0;
7307 return rc;
7308 }
7309
7310 /*
7311 * wm_gmii_mediastatus: [ifmedia interface function]
7312 *
7313 * Get the current interface media status on a 1000BASE-T device.
7314 */
7315 static void
7316 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7317 {
7318 struct wm_softc *sc = ifp->if_softc;
7319
7320 ether_mediastatus(ifp, ifmr);
7321 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
7322 | sc->sc_flowflags;
7323 }
7324
7325 #define MDI_IO CTRL_SWDPIN(2)
7326 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
7327 #define MDI_CLK CTRL_SWDPIN(3)
7328
7329 static void
7330 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
7331 {
7332 uint32_t i, v;
7333
7334 v = CSR_READ(sc, WMREG_CTRL);
7335 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7336 v |= MDI_DIR | CTRL_SWDPIO(3);
7337
7338 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
7339 if (data & i)
7340 v |= MDI_IO;
7341 else
7342 v &= ~MDI_IO;
7343 CSR_WRITE(sc, WMREG_CTRL, v);
7344 CSR_WRITE_FLUSH(sc);
7345 delay(10);
7346 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7347 CSR_WRITE_FLUSH(sc);
7348 delay(10);
7349 CSR_WRITE(sc, WMREG_CTRL, v);
7350 CSR_WRITE_FLUSH(sc);
7351 delay(10);
7352 }
7353 }
7354
7355 static uint32_t
7356 wm_i82543_mii_recvbits(struct wm_softc *sc)
7357 {
7358 uint32_t v, i, data = 0;
7359
7360 v = CSR_READ(sc, WMREG_CTRL);
7361 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7362 v |= CTRL_SWDPIO(3);
7363
7364 CSR_WRITE(sc, WMREG_CTRL, v);
7365 CSR_WRITE_FLUSH(sc);
7366 delay(10);
7367 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7368 CSR_WRITE_FLUSH(sc);
7369 delay(10);
7370 CSR_WRITE(sc, WMREG_CTRL, v);
7371 CSR_WRITE_FLUSH(sc);
7372 delay(10);
7373
7374 for (i = 0; i < 16; i++) {
7375 data <<= 1;
7376 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7377 CSR_WRITE_FLUSH(sc);
7378 delay(10);
7379 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7380 data |= 1;
7381 CSR_WRITE(sc, WMREG_CTRL, v);
7382 CSR_WRITE_FLUSH(sc);
7383 delay(10);
7384 }
7385
7386 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7387 CSR_WRITE_FLUSH(sc);
7388 delay(10);
7389 CSR_WRITE(sc, WMREG_CTRL, v);
7390 CSR_WRITE_FLUSH(sc);
7391 delay(10);
7392
7393 return data;
7394 }
7395
7396 #undef MDI_IO
7397 #undef MDI_DIR
7398 #undef MDI_CLK
7399
7400 /*
7401 * wm_gmii_i82543_readreg: [mii interface function]
7402 *
7403 * Read a PHY register on the GMII (i82543 version).
7404 */
7405 static int
7406 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7407 {
7408 struct wm_softc *sc = device_private(self);
7409 int rv;
7410
7411 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7412 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
7413 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7414 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
7415
7416 DPRINTF(WM_DEBUG_GMII,
7417 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7418 device_xname(sc->sc_dev), phy, reg, rv));
7419
7420 return rv;
7421 }
7422
7423 /*
7424 * wm_gmii_i82543_writereg: [mii interface function]
7425 *
7426 * Write a PHY register on the GMII (i82543 version).
7427 */
7428 static void
7429 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7430 {
7431 struct wm_softc *sc = device_private(self);
7432
7433 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7434 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7435 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7436 (MII_COMMAND_START << 30), 32);
7437 }
7438
7439 /*
7440 * wm_gmii_i82544_readreg: [mii interface function]
7441 *
7442 * Read a PHY register on the GMII.
7443 */
7444 static int
7445 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7446 {
7447 struct wm_softc *sc = device_private(self);
7448 uint32_t mdic = 0;
7449 int i, rv;
7450
7451 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7452 MDIC_REGADD(reg));
7453
7454 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7455 mdic = CSR_READ(sc, WMREG_MDIC);
7456 if (mdic & MDIC_READY)
7457 break;
7458 delay(50);
7459 }
7460
7461 if ((mdic & MDIC_READY) == 0) {
7462 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7463 device_xname(sc->sc_dev), phy, reg);
7464 rv = 0;
7465 } else if (mdic & MDIC_E) {
7466 #if 0 /* This is normal if no PHY is present. */
7467 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7468 device_xname(sc->sc_dev), phy, reg);
7469 #endif
7470 rv = 0;
7471 } else {
7472 rv = MDIC_DATA(mdic);
7473 if (rv == 0xffff)
7474 rv = 0;
7475 }
7476
7477 return rv;
7478 }
7479
7480 /*
7481 * wm_gmii_i82544_writereg: [mii interface function]
7482 *
7483 * Write a PHY register on the GMII.
7484 */
7485 static void
7486 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7487 {
7488 struct wm_softc *sc = device_private(self);
7489 uint32_t mdic = 0;
7490 int i;
7491
7492 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7493 MDIC_REGADD(reg) | MDIC_DATA(val));
7494
7495 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7496 mdic = CSR_READ(sc, WMREG_MDIC);
7497 if (mdic & MDIC_READY)
7498 break;
7499 delay(50);
7500 }
7501
7502 if ((mdic & MDIC_READY) == 0)
7503 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7504 device_xname(sc->sc_dev), phy, reg);
7505 else if (mdic & MDIC_E)
7506 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7507 device_xname(sc->sc_dev), phy, reg);
7508 }
7509
7510 /*
7511 * wm_gmii_i80003_readreg: [mii interface function]
7512 *
7513 * Read a PHY register on the kumeran
7514 * This could be handled by the PHY layer if we didn't have to lock the
7515 * ressource ...
7516 */
7517 static int
7518 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7519 {
7520 struct wm_softc *sc = device_private(self);
7521 int sem;
7522 int rv;
7523
7524 if (phy != 1) /* only one PHY on kumeran bus */
7525 return 0;
7526
7527 sem = swfwphysem[sc->sc_funcid];
7528 if (wm_get_swfw_semaphore(sc, sem)) {
7529 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7530 __func__);
7531 return 0;
7532 }
7533
7534 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7535 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7536 reg >> GG82563_PAGE_SHIFT);
7537 } else {
7538 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7539 reg >> GG82563_PAGE_SHIFT);
7540 }
7541 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7542 delay(200);
7543 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7544 delay(200);
7545
7546 wm_put_swfw_semaphore(sc, sem);
7547 return rv;
7548 }
7549
7550 /*
7551 * wm_gmii_i80003_writereg: [mii interface function]
7552 *
7553 * Write a PHY register on the kumeran.
7554 * This could be handled by the PHY layer if we didn't have to lock the
7555 * ressource ...
7556 */
7557 static void
7558 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7559 {
7560 struct wm_softc *sc = device_private(self);
7561 int sem;
7562
7563 if (phy != 1) /* only one PHY on kumeran bus */
7564 return;
7565
7566 sem = swfwphysem[sc->sc_funcid];
7567 if (wm_get_swfw_semaphore(sc, sem)) {
7568 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7569 __func__);
7570 return;
7571 }
7572
7573 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7574 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7575 reg >> GG82563_PAGE_SHIFT);
7576 } else {
7577 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7578 reg >> GG82563_PAGE_SHIFT);
7579 }
7580 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7581 delay(200);
7582 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7583 delay(200);
7584
7585 wm_put_swfw_semaphore(sc, sem);
7586 }
7587
7588 /*
7589 * wm_gmii_bm_readreg: [mii interface function]
7590 *
7591 * Read a PHY register on the kumeran
7592 * This could be handled by the PHY layer if we didn't have to lock the
7593 * ressource ...
7594 */
7595 static int
7596 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7597 {
7598 struct wm_softc *sc = device_private(self);
7599 int sem;
7600 int rv;
7601
7602 sem = swfwphysem[sc->sc_funcid];
7603 if (wm_get_swfw_semaphore(sc, sem)) {
7604 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7605 __func__);
7606 return 0;
7607 }
7608
7609 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7610 if (phy == 1)
7611 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7612 reg);
7613 else
7614 wm_gmii_i82544_writereg(self, phy,
7615 GG82563_PHY_PAGE_SELECT,
7616 reg >> GG82563_PAGE_SHIFT);
7617 }
7618
7619 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7620 wm_put_swfw_semaphore(sc, sem);
7621 return rv;
7622 }
7623
7624 /*
7625 * wm_gmii_bm_writereg: [mii interface function]
7626 *
7627 * Write a PHY register on the kumeran.
7628 * This could be handled by the PHY layer if we didn't have to lock the
7629 * ressource ...
7630 */
7631 static void
7632 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7633 {
7634 struct wm_softc *sc = device_private(self);
7635 int sem;
7636
7637 sem = swfwphysem[sc->sc_funcid];
7638 if (wm_get_swfw_semaphore(sc, sem)) {
7639 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7640 __func__);
7641 return;
7642 }
7643
7644 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7645 if (phy == 1)
7646 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7647 reg);
7648 else
7649 wm_gmii_i82544_writereg(self, phy,
7650 GG82563_PHY_PAGE_SELECT,
7651 reg >> GG82563_PAGE_SHIFT);
7652 }
7653
7654 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7655 wm_put_swfw_semaphore(sc, sem);
7656 }
7657
7658 static void
7659 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7660 {
7661 struct wm_softc *sc = device_private(self);
7662 uint16_t regnum = BM_PHY_REG_NUM(offset);
7663 uint16_t wuce;
7664
7665 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7666 if (sc->sc_type == WM_T_PCH) {
7667 /* XXX e1000 driver do nothing... why? */
7668 }
7669
7670 /* Set page 769 */
7671 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7672 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7673
7674 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7675
7676 wuce &= ~BM_WUC_HOST_WU_BIT;
7677 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7678 wuce | BM_WUC_ENABLE_BIT);
7679
7680 /* Select page 800 */
7681 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7682 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7683
7684 /* Write page 800 */
7685 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7686
7687 if (rd)
7688 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7689 else
7690 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7691
7692 /* Set page 769 */
7693 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7694 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7695
7696 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7697 }
7698
7699 /*
7700 * wm_gmii_hv_readreg: [mii interface function]
7701 *
7702 * Read a PHY register on the kumeran
7703 * This could be handled by the PHY layer if we didn't have to lock the
7704 * ressource ...
7705 */
7706 static int
7707 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7708 {
7709 struct wm_softc *sc = device_private(self);
7710 uint16_t page = BM_PHY_REG_PAGE(reg);
7711 uint16_t regnum = BM_PHY_REG_NUM(reg);
7712 uint16_t val;
7713 int rv;
7714
7715 if (wm_get_swfwhw_semaphore(sc)) {
7716 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7717 __func__);
7718 return 0;
7719 }
7720
7721 /* XXX Workaround failure in MDIO access while cable is disconnected */
7722 if (sc->sc_phytype == WMPHY_82577) {
7723 /* XXX must write */
7724 }
7725
7726 /* Page 800 works differently than the rest so it has its own func */
7727 if (page == BM_WUC_PAGE) {
7728 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7729 return val;
7730 }
7731
7732 /*
7733 * Lower than page 768 works differently than the rest so it has its
7734 * own func
7735 */
7736 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7737 printf("gmii_hv_readreg!!!\n");
7738 return 0;
7739 }
7740
7741 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7742 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7743 page << BME1000_PAGE_SHIFT);
7744 }
7745
7746 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7747 wm_put_swfwhw_semaphore(sc);
7748 return rv;
7749 }
7750
7751 /*
7752 * wm_gmii_hv_writereg: [mii interface function]
7753 *
7754 * Write a PHY register on the kumeran.
7755 * This could be handled by the PHY layer if we didn't have to lock the
7756 * ressource ...
7757 */
7758 static void
7759 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7760 {
7761 struct wm_softc *sc = device_private(self);
7762 uint16_t page = BM_PHY_REG_PAGE(reg);
7763 uint16_t regnum = BM_PHY_REG_NUM(reg);
7764
7765 if (wm_get_swfwhw_semaphore(sc)) {
7766 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7767 __func__);
7768 return;
7769 }
7770
7771 /* XXX Workaround failure in MDIO access while cable is disconnected */
7772
7773 /* Page 800 works differently than the rest so it has its own func */
7774 if (page == BM_WUC_PAGE) {
7775 uint16_t tmp;
7776
7777 tmp = val;
7778 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7779 return;
7780 }
7781
7782 /*
7783 * Lower than page 768 works differently than the rest so it has its
7784 * own func
7785 */
7786 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7787 printf("gmii_hv_writereg!!!\n");
7788 return;
7789 }
7790
7791 /*
7792 * XXX Workaround MDIO accesses being disabled after entering IEEE
7793 * Power Down (whenever bit 11 of the PHY control register is set)
7794 */
7795
7796 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7797 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7798 page << BME1000_PAGE_SHIFT);
7799 }
7800
7801 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7802 wm_put_swfwhw_semaphore(sc);
7803 }
7804
7805 /*
7806 * wm_gmii_82580_readreg: [mii interface function]
7807 *
7808 * Read a PHY register on the 82580 and I350.
7809 * This could be handled by the PHY layer if we didn't have to lock the
7810 * ressource ...
7811 */
7812 static int
7813 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7814 {
7815 struct wm_softc *sc = device_private(self);
7816 int sem;
7817 int rv;
7818
7819 sem = swfwphysem[sc->sc_funcid];
7820 if (wm_get_swfw_semaphore(sc, sem)) {
7821 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7822 __func__);
7823 return 0;
7824 }
7825
7826 rv = wm_gmii_i82544_readreg(self, phy, reg);
7827
7828 wm_put_swfw_semaphore(sc, sem);
7829 return rv;
7830 }
7831
7832 /*
7833 * wm_gmii_82580_writereg: [mii interface function]
7834 *
7835 * Write a PHY register on the 82580 and I350.
7836 * This could be handled by the PHY layer if we didn't have to lock the
7837 * ressource ...
7838 */
7839 static void
7840 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7841 {
7842 struct wm_softc *sc = device_private(self);
7843 int sem;
7844
7845 sem = swfwphysem[sc->sc_funcid];
7846 if (wm_get_swfw_semaphore(sc, sem)) {
7847 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7848 __func__);
7849 return;
7850 }
7851
7852 wm_gmii_i82544_writereg(self, phy, reg, val);
7853
7854 wm_put_swfw_semaphore(sc, sem);
7855 }
7856
7857 /*
7858 * wm_gmii_gs40g_readreg: [mii interface function]
7859 *
7860 * Read a PHY register on the I2100 and I211.
7861 * This could be handled by the PHY layer if we didn't have to lock the
7862 * ressource ...
7863 */
7864 static int
7865 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
7866 {
7867 struct wm_softc *sc = device_private(self);
7868 int sem;
7869 int page, offset;
7870 int rv;
7871
7872 /* Acquire semaphore */
7873 sem = swfwphysem[sc->sc_funcid];
7874 if (wm_get_swfw_semaphore(sc, sem)) {
7875 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7876 __func__);
7877 return 0;
7878 }
7879
7880 /* Page select */
7881 page = reg >> GS40G_PAGE_SHIFT;
7882 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7883
7884 /* Read reg */
7885 offset = reg & GS40G_OFFSET_MASK;
7886 rv = wm_gmii_i82544_readreg(self, phy, offset);
7887
7888 wm_put_swfw_semaphore(sc, sem);
7889 return rv;
7890 }
7891
7892 /*
7893 * wm_gmii_gs40g_writereg: [mii interface function]
7894 *
7895 * Write a PHY register on the I210 and I211.
7896 * This could be handled by the PHY layer if we didn't have to lock the
7897 * ressource ...
7898 */
7899 static void
7900 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
7901 {
7902 struct wm_softc *sc = device_private(self);
7903 int sem;
7904 int page, offset;
7905
7906 /* Acquire semaphore */
7907 sem = swfwphysem[sc->sc_funcid];
7908 if (wm_get_swfw_semaphore(sc, sem)) {
7909 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7910 __func__);
7911 return;
7912 }
7913
7914 /* Page select */
7915 page = reg >> GS40G_PAGE_SHIFT;
7916 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7917
7918 /* Write reg */
7919 offset = reg & GS40G_OFFSET_MASK;
7920 wm_gmii_i82544_writereg(self, phy, offset, val);
7921
7922 /* Release semaphore */
7923 wm_put_swfw_semaphore(sc, sem);
7924 }
7925
7926 /*
7927 * wm_gmii_statchg: [mii interface function]
7928 *
7929 * Callback from MII layer when media changes.
7930 */
7931 static void
7932 wm_gmii_statchg(struct ifnet *ifp)
7933 {
7934 struct wm_softc *sc = ifp->if_softc;
7935 struct mii_data *mii = &sc->sc_mii;
7936
7937 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7938 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7939 sc->sc_fcrtl &= ~FCRTL_XONE;
7940
7941 /*
7942 * Get flow control negotiation result.
7943 */
7944 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7945 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7946 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7947 mii->mii_media_active &= ~IFM_ETH_FMASK;
7948 }
7949
7950 if (sc->sc_flowflags & IFM_FLOW) {
7951 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7952 sc->sc_ctrl |= CTRL_TFCE;
7953 sc->sc_fcrtl |= FCRTL_XONE;
7954 }
7955 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7956 sc->sc_ctrl |= CTRL_RFCE;
7957 }
7958
7959 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7960 DPRINTF(WM_DEBUG_LINK,
7961 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7962 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7963 } else {
7964 DPRINTF(WM_DEBUG_LINK,
7965 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7966 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7967 }
7968
7969 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7970 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7971 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7972 : WMREG_FCRTL, sc->sc_fcrtl);
7973 if (sc->sc_type == WM_T_80003) {
7974 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7975 case IFM_1000_T:
7976 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7977 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7978 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7979 break;
7980 default:
7981 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7982 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7983 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7984 break;
7985 }
7986 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7987 }
7988 }
7989
7990 /*
7991 * wm_kmrn_readreg:
7992 *
7993 * Read a kumeran register
7994 */
7995 static int
7996 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7997 {
7998 int rv;
7999
8000 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8001 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8002 aprint_error_dev(sc->sc_dev,
8003 "%s: failed to get semaphore\n", __func__);
8004 return 0;
8005 }
8006 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8007 if (wm_get_swfwhw_semaphore(sc)) {
8008 aprint_error_dev(sc->sc_dev,
8009 "%s: failed to get semaphore\n", __func__);
8010 return 0;
8011 }
8012 }
8013
8014 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8015 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8016 KUMCTRLSTA_REN);
8017 CSR_WRITE_FLUSH(sc);
8018 delay(2);
8019
8020 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8021
8022 if (sc->sc_flags & WM_F_LOCK_SWFW)
8023 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8024 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8025 wm_put_swfwhw_semaphore(sc);
8026
8027 return rv;
8028 }
8029
8030 /*
8031 * wm_kmrn_writereg:
8032 *
8033 * Write a kumeran register
8034 */
8035 static void
8036 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8037 {
8038
8039 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8040 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8041 aprint_error_dev(sc->sc_dev,
8042 "%s: failed to get semaphore\n", __func__);
8043 return;
8044 }
8045 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8046 if (wm_get_swfwhw_semaphore(sc)) {
8047 aprint_error_dev(sc->sc_dev,
8048 "%s: failed to get semaphore\n", __func__);
8049 return;
8050 }
8051 }
8052
8053 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8054 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8055 (val & KUMCTRLSTA_MASK));
8056
8057 if (sc->sc_flags & WM_F_LOCK_SWFW)
8058 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8059 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8060 wm_put_swfwhw_semaphore(sc);
8061 }
8062
8063 /* SGMII related */
8064
8065 /*
8066 * wm_sgmii_uses_mdio
8067 *
8068 * Check whether the transaction is to the internal PHY or the external
8069 * MDIO interface. Return true if it's MDIO.
8070 */
8071 static bool
8072 wm_sgmii_uses_mdio(struct wm_softc *sc)
8073 {
8074 uint32_t reg;
8075 bool ismdio = false;
8076
8077 switch (sc->sc_type) {
8078 case WM_T_82575:
8079 case WM_T_82576:
8080 reg = CSR_READ(sc, WMREG_MDIC);
8081 ismdio = ((reg & MDIC_DEST) != 0);
8082 break;
8083 case WM_T_82580:
8084 case WM_T_I350:
8085 case WM_T_I354:
8086 case WM_T_I210:
8087 case WM_T_I211:
8088 reg = CSR_READ(sc, WMREG_MDICNFG);
8089 ismdio = ((reg & MDICNFG_DEST) != 0);
8090 break;
8091 default:
8092 break;
8093 }
8094
8095 return ismdio;
8096 }
8097
8098 /*
8099 * wm_sgmii_readreg: [mii interface function]
8100 *
8101 * Read a PHY register on the SGMII
8102 * This could be handled by the PHY layer if we didn't have to lock the
8103 * ressource ...
8104 */
8105 static int
8106 wm_sgmii_readreg(device_t self, int phy, int reg)
8107 {
8108 struct wm_softc *sc = device_private(self);
8109 uint32_t i2ccmd;
8110 int i, rv;
8111
8112 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8113 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8114 __func__);
8115 return 0;
8116 }
8117
8118 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8119 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8120 | I2CCMD_OPCODE_READ;
8121 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8122
8123 /* Poll the ready bit */
8124 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8125 delay(50);
8126 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8127 if (i2ccmd & I2CCMD_READY)
8128 break;
8129 }
8130 if ((i2ccmd & I2CCMD_READY) == 0)
8131 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8132 if ((i2ccmd & I2CCMD_ERROR) != 0)
8133 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8134
8135 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8136
8137 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8138 return rv;
8139 }
8140
8141 /*
8142 * wm_sgmii_writereg: [mii interface function]
8143 *
8144 * Write a PHY register on the SGMII.
8145 * This could be handled by the PHY layer if we didn't have to lock the
8146 * ressource ...
8147 */
8148 static void
8149 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8150 {
8151 struct wm_softc *sc = device_private(self);
8152 uint32_t i2ccmd;
8153 int i;
8154 int val_swapped;
8155
8156 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8157 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8158 __func__);
8159 return;
8160 }
8161 /* Swap the data bytes for the I2C interface */
8162 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8163 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8164 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8165 | I2CCMD_OPCODE_WRITE | val_swapped;
8166 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8167
8168 /* Poll the ready bit */
8169 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8170 delay(50);
8171 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8172 if (i2ccmd & I2CCMD_READY)
8173 break;
8174 }
8175 if ((i2ccmd & I2CCMD_READY) == 0)
8176 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8177 if ((i2ccmd & I2CCMD_ERROR) != 0)
8178 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8179
8180 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8181 }
8182
8183 /* TBI related */
8184
8185 /*
8186 * wm_tbi_mediainit:
8187 *
8188 * Initialize media for use on 1000BASE-X devices.
8189 */
8190 static void
8191 wm_tbi_mediainit(struct wm_softc *sc)
8192 {
8193 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8194 const char *sep = "";
8195
8196 if (sc->sc_type < WM_T_82543)
8197 sc->sc_tipg = TIPG_WM_DFLT;
8198 else
8199 sc->sc_tipg = TIPG_LG_DFLT;
8200
8201 sc->sc_tbi_serdes_anegticks = 5;
8202
8203 /* Initialize our media structures */
8204 sc->sc_mii.mii_ifp = ifp;
8205 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8206
8207 if ((sc->sc_type >= WM_T_82575)
8208 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
8209 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8210 wm_serdes_mediachange, wm_serdes_mediastatus);
8211 else
8212 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8213 wm_tbi_mediachange, wm_tbi_mediastatus);
8214
8215 /*
8216 * SWD Pins:
8217 *
8218 * 0 = Link LED (output)
8219 * 1 = Loss Of Signal (input)
8220 */
8221 sc->sc_ctrl |= CTRL_SWDPIO(0);
8222
8223 /* XXX Perhaps this is only for TBI */
8224 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8225 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
8226
8227 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8228 sc->sc_ctrl &= ~CTRL_LRST;
8229
8230 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8231
8232 #define ADD(ss, mm, dd) \
8233 do { \
8234 aprint_normal("%s%s", sep, ss); \
8235 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
8236 sep = ", "; \
8237 } while (/*CONSTCOND*/0)
8238
8239 aprint_normal_dev(sc->sc_dev, "");
8240
8241 /* Only 82545 is LX */
8242 if (sc->sc_type == WM_T_82545) {
8243 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
8244 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
8245 } else {
8246 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
8247 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
8248 }
8249 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
8250 aprint_normal("\n");
8251
8252 #undef ADD
8253
8254 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
8255 }
8256
8257 /*
8258 * wm_tbi_mediachange: [ifmedia interface function]
8259 *
8260 * Set hardware to newly-selected media on a 1000BASE-X device.
8261 */
8262 static int
8263 wm_tbi_mediachange(struct ifnet *ifp)
8264 {
8265 struct wm_softc *sc = ifp->if_softc;
8266 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8267 uint32_t status;
8268 int i;
8269
8270 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8271 /* XXX need some work for >= 82571 and < 82575 */
8272 if (sc->sc_type < WM_T_82575)
8273 return 0;
8274 }
8275
8276 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8277 || (sc->sc_type >= WM_T_82575))
8278 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8279
8280 sc->sc_ctrl &= ~CTRL_LRST;
8281 sc->sc_txcw = TXCW_ANE;
8282 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8283 sc->sc_txcw |= TXCW_FD | TXCW_HD;
8284 else if (ife->ifm_media & IFM_FDX)
8285 sc->sc_txcw |= TXCW_FD;
8286 else
8287 sc->sc_txcw |= TXCW_HD;
8288
8289 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
8290 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
8291
8292 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
8293 device_xname(sc->sc_dev), sc->sc_txcw));
8294 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8295 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8296 CSR_WRITE_FLUSH(sc);
8297 delay(1000);
8298
8299 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
8300 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
8301
8302 /*
8303 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
8304 * optics detect a signal, 0 if they don't.
8305 */
8306 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
8307 /* Have signal; wait for the link to come up. */
8308 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
8309 delay(10000);
8310 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
8311 break;
8312 }
8313
8314 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
8315 device_xname(sc->sc_dev),i));
8316
8317 status = CSR_READ(sc, WMREG_STATUS);
8318 DPRINTF(WM_DEBUG_LINK,
8319 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
8320 device_xname(sc->sc_dev),status, STATUS_LU));
8321 if (status & STATUS_LU) {
8322 /* Link is up. */
8323 DPRINTF(WM_DEBUG_LINK,
8324 ("%s: LINK: set media -> link up %s\n",
8325 device_xname(sc->sc_dev),
8326 (status & STATUS_FD) ? "FDX" : "HDX"));
8327
8328 /*
8329 * NOTE: CTRL will update TFCE and RFCE automatically,
8330 * so we should update sc->sc_ctrl
8331 */
8332 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8333 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8334 sc->sc_fcrtl &= ~FCRTL_XONE;
8335 if (status & STATUS_FD)
8336 sc->sc_tctl |=
8337 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8338 else
8339 sc->sc_tctl |=
8340 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8341 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
8342 sc->sc_fcrtl |= FCRTL_XONE;
8343 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8344 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8345 WMREG_OLD_FCRTL : WMREG_FCRTL,
8346 sc->sc_fcrtl);
8347 sc->sc_tbi_linkup = 1;
8348 } else {
8349 if (i == WM_LINKUP_TIMEOUT)
8350 wm_check_for_link(sc);
8351 /* Link is down. */
8352 DPRINTF(WM_DEBUG_LINK,
8353 ("%s: LINK: set media -> link down\n",
8354 device_xname(sc->sc_dev)));
8355 sc->sc_tbi_linkup = 0;
8356 }
8357 } else {
8358 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
8359 device_xname(sc->sc_dev)));
8360 sc->sc_tbi_linkup = 0;
8361 }
8362
8363 wm_tbi_serdes_set_linkled(sc);
8364
8365 return 0;
8366 }
8367
8368 /*
8369 * wm_tbi_mediastatus: [ifmedia interface function]
8370 *
8371 * Get the current interface media status on a 1000BASE-X device.
8372 */
8373 static void
8374 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8375 {
8376 struct wm_softc *sc = ifp->if_softc;
8377 uint32_t ctrl, status;
8378
8379 ifmr->ifm_status = IFM_AVALID;
8380 ifmr->ifm_active = IFM_ETHER;
8381
8382 status = CSR_READ(sc, WMREG_STATUS);
8383 if ((status & STATUS_LU) == 0) {
8384 ifmr->ifm_active |= IFM_NONE;
8385 return;
8386 }
8387
8388 ifmr->ifm_status |= IFM_ACTIVE;
8389 /* Only 82545 is LX */
8390 if (sc->sc_type == WM_T_82545)
8391 ifmr->ifm_active |= IFM_1000_LX;
8392 else
8393 ifmr->ifm_active |= IFM_1000_SX;
8394 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
8395 ifmr->ifm_active |= IFM_FDX;
8396 else
8397 ifmr->ifm_active |= IFM_HDX;
8398 ctrl = CSR_READ(sc, WMREG_CTRL);
8399 if (ctrl & CTRL_RFCE)
8400 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
8401 if (ctrl & CTRL_TFCE)
8402 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
8403 }
8404
8405 /* XXX TBI only */
8406 static int
8407 wm_check_for_link(struct wm_softc *sc)
8408 {
8409 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8410 uint32_t rxcw;
8411 uint32_t ctrl;
8412 uint32_t status;
8413 uint32_t sig;
8414
8415 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8416 /* XXX need some work for >= 82571 */
8417 if (sc->sc_type >= WM_T_82571) {
8418 sc->sc_tbi_linkup = 1;
8419 return 0;
8420 }
8421 }
8422
8423 rxcw = CSR_READ(sc, WMREG_RXCW);
8424 ctrl = CSR_READ(sc, WMREG_CTRL);
8425 status = CSR_READ(sc, WMREG_STATUS);
8426
8427 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8428
8429 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8430 device_xname(sc->sc_dev), __func__,
8431 ((ctrl & CTRL_SWDPIN(1)) == sig),
8432 ((status & STATUS_LU) != 0),
8433 ((rxcw & RXCW_C) != 0)
8434 ));
8435
8436 /*
8437 * SWDPIN LU RXCW
8438 * 0 0 0
8439 * 0 0 1 (should not happen)
8440 * 0 1 0 (should not happen)
8441 * 0 1 1 (should not happen)
8442 * 1 0 0 Disable autonego and force linkup
8443 * 1 0 1 got /C/ but not linkup yet
8444 * 1 1 0 (linkup)
8445 * 1 1 1 If IFM_AUTO, back to autonego
8446 *
8447 */
8448 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8449 && ((status & STATUS_LU) == 0)
8450 && ((rxcw & RXCW_C) == 0)) {
8451 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8452 __func__));
8453 sc->sc_tbi_linkup = 0;
8454 /* Disable auto-negotiation in the TXCW register */
8455 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8456
8457 /*
8458 * Force link-up and also force full-duplex.
8459 *
8460 * NOTE: CTRL was updated TFCE and RFCE automatically,
8461 * so we should update sc->sc_ctrl
8462 */
8463 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8464 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8465 } else if (((status & STATUS_LU) != 0)
8466 && ((rxcw & RXCW_C) != 0)
8467 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8468 sc->sc_tbi_linkup = 1;
8469 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8470 __func__));
8471 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8472 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8473 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8474 && ((rxcw & RXCW_C) != 0)) {
8475 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8476 } else {
8477 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8478 status));
8479 }
8480
8481 return 0;
8482 }
8483
8484 /*
8485 * wm_tbi_tick:
8486 *
8487 * Check the link on TBI devices.
8488 * This function acts as mii_tick().
8489 */
8490 static void
8491 wm_tbi_tick(struct wm_softc *sc)
8492 {
8493 struct mii_data *mii = &sc->sc_mii;
8494 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8495 uint32_t status;
8496
8497 KASSERT(WM_TX_LOCKED(sc));
8498
8499 status = CSR_READ(sc, WMREG_STATUS);
8500
8501 /* XXX is this needed? */
8502 (void)CSR_READ(sc, WMREG_RXCW);
8503 (void)CSR_READ(sc, WMREG_CTRL);
8504
8505 /* set link status */
8506 if ((status & STATUS_LU) == 0) {
8507 DPRINTF(WM_DEBUG_LINK,
8508 ("%s: LINK: checklink -> down\n",
8509 device_xname(sc->sc_dev)));
8510 sc->sc_tbi_linkup = 0;
8511 } else if (sc->sc_tbi_linkup == 0) {
8512 DPRINTF(WM_DEBUG_LINK,
8513 ("%s: LINK: checklink -> up %s\n",
8514 device_xname(sc->sc_dev),
8515 (status & STATUS_FD) ? "FDX" : "HDX"));
8516 sc->sc_tbi_linkup = 1;
8517 sc->sc_tbi_serdes_ticks = 0;
8518 }
8519
8520 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
8521 goto setled;
8522
8523 if ((status & STATUS_LU) == 0) {
8524 sc->sc_tbi_linkup = 0;
8525 /* If the timer expired, retry autonegotiation */
8526 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8527 && (++sc->sc_tbi_serdes_ticks
8528 >= sc->sc_tbi_serdes_anegticks)) {
8529 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8530 sc->sc_tbi_serdes_ticks = 0;
8531 /*
8532 * Reset the link, and let autonegotiation do
8533 * its thing
8534 */
8535 sc->sc_ctrl |= CTRL_LRST;
8536 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8537 CSR_WRITE_FLUSH(sc);
8538 delay(1000);
8539 sc->sc_ctrl &= ~CTRL_LRST;
8540 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8541 CSR_WRITE_FLUSH(sc);
8542 delay(1000);
8543 CSR_WRITE(sc, WMREG_TXCW,
8544 sc->sc_txcw & ~TXCW_ANE);
8545 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8546 }
8547 }
8548
8549 setled:
8550 wm_tbi_serdes_set_linkled(sc);
8551 }
8552
8553 /* SERDES related */
8554 static void
8555 wm_serdes_power_up_link_82575(struct wm_softc *sc)
8556 {
8557 uint32_t reg;
8558
8559 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8560 && ((sc->sc_flags & WM_F_SGMII) == 0))
8561 return;
8562
8563 reg = CSR_READ(sc, WMREG_PCS_CFG);
8564 reg |= PCS_CFG_PCS_EN;
8565 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
8566
8567 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8568 reg &= ~CTRL_EXT_SWDPIN(3);
8569 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8570 CSR_WRITE_FLUSH(sc);
8571 }
8572
8573 static int
8574 wm_serdes_mediachange(struct ifnet *ifp)
8575 {
8576 struct wm_softc *sc = ifp->if_softc;
8577 bool pcs_autoneg = true; /* XXX */
8578 uint32_t ctrl_ext, pcs_lctl, reg;
8579
8580 /* XXX Currently, this function is not called on 8257[12] */
8581 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8582 || (sc->sc_type >= WM_T_82575))
8583 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8584
8585 wm_serdes_power_up_link_82575(sc);
8586
8587 sc->sc_ctrl |= CTRL_SLU;
8588
8589 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8590 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8591
8592 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8593 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8594 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8595 case CTRL_EXT_LINK_MODE_SGMII:
8596 pcs_autoneg = true;
8597 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8598 break;
8599 case CTRL_EXT_LINK_MODE_1000KX:
8600 pcs_autoneg = false;
8601 /* FALLTHROUGH */
8602 default:
8603 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8604 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8605 pcs_autoneg = false;
8606 }
8607 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8608 | CTRL_FRCFDX;
8609 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8610 }
8611 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8612
8613 if (pcs_autoneg) {
8614 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8615 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8616
8617 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8618 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8619 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8620 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8621 } else
8622 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8623
8624 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8625
8626
8627 return 0;
8628 }
8629
8630 static void
8631 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8632 {
8633 struct wm_softc *sc = ifp->if_softc;
8634 struct mii_data *mii = &sc->sc_mii;
8635 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8636 uint32_t pcs_adv, pcs_lpab, reg;
8637
8638 ifmr->ifm_status = IFM_AVALID;
8639 ifmr->ifm_active = IFM_ETHER;
8640
8641 /* Check PCS */
8642 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8643 if ((reg & PCS_LSTS_LINKOK) == 0) {
8644 ifmr->ifm_active |= IFM_NONE;
8645 sc->sc_tbi_linkup = 0;
8646 goto setled;
8647 }
8648
8649 sc->sc_tbi_linkup = 1;
8650 ifmr->ifm_status |= IFM_ACTIVE;
8651 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8652 if ((reg & PCS_LSTS_FDX) != 0)
8653 ifmr->ifm_active |= IFM_FDX;
8654 else
8655 ifmr->ifm_active |= IFM_HDX;
8656 mii->mii_media_active &= ~IFM_ETH_FMASK;
8657 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8658 /* Check flow */
8659 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8660 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8661 printf("XXX LINKOK but not ACOMP\n");
8662 goto setled;
8663 }
8664 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8665 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8666 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8667 if ((pcs_adv & TXCW_SYM_PAUSE)
8668 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8669 mii->mii_media_active |= IFM_FLOW
8670 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8671 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8672 && (pcs_adv & TXCW_ASYM_PAUSE)
8673 && (pcs_lpab & TXCW_SYM_PAUSE)
8674 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8675 mii->mii_media_active |= IFM_FLOW
8676 | IFM_ETH_TXPAUSE;
8677 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8678 && (pcs_adv & TXCW_ASYM_PAUSE)
8679 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8680 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8681 mii->mii_media_active |= IFM_FLOW
8682 | IFM_ETH_RXPAUSE;
8683 } else {
8684 }
8685 }
8686 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8687 | (mii->mii_media_active & IFM_ETH_FMASK);
8688 setled:
8689 wm_tbi_serdes_set_linkled(sc);
8690 }
8691
8692 /*
8693 * wm_serdes_tick:
8694 *
8695 * Check the link on serdes devices.
8696 */
8697 static void
8698 wm_serdes_tick(struct wm_softc *sc)
8699 {
8700 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8701 struct mii_data *mii = &sc->sc_mii;
8702 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8703 uint32_t reg;
8704
8705 KASSERT(WM_TX_LOCKED(sc));
8706
8707 mii->mii_media_status = IFM_AVALID;
8708 mii->mii_media_active = IFM_ETHER;
8709
8710 /* Check PCS */
8711 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8712 if ((reg & PCS_LSTS_LINKOK) != 0) {
8713 mii->mii_media_status |= IFM_ACTIVE;
8714 sc->sc_tbi_linkup = 1;
8715 sc->sc_tbi_serdes_ticks = 0;
8716 mii->mii_media_active |= IFM_1000_SX; /* XXX */
8717 if ((reg & PCS_LSTS_FDX) != 0)
8718 mii->mii_media_active |= IFM_FDX;
8719 else
8720 mii->mii_media_active |= IFM_HDX;
8721 } else {
8722 mii->mii_media_status |= IFM_NONE;
8723 sc->sc_tbi_linkup = 0;
8724 /* If the timer expired, retry autonegotiation */
8725 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8726 && (++sc->sc_tbi_serdes_ticks
8727 >= sc->sc_tbi_serdes_anegticks)) {
8728 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8729 sc->sc_tbi_serdes_ticks = 0;
8730 /* XXX */
8731 wm_serdes_mediachange(ifp);
8732 }
8733 }
8734
8735 wm_tbi_serdes_set_linkled(sc);
8736 }
8737
8738 /* SFP related */
8739
8740 static int
8741 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
8742 {
8743 uint32_t i2ccmd;
8744 int i;
8745
8746 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
8747 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8748
8749 /* Poll the ready bit */
8750 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8751 delay(50);
8752 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8753 if (i2ccmd & I2CCMD_READY)
8754 break;
8755 }
8756 if ((i2ccmd & I2CCMD_READY) == 0)
8757 return -1;
8758 if ((i2ccmd & I2CCMD_ERROR) != 0)
8759 return -1;
8760
8761 *data = i2ccmd & 0x00ff;
8762
8763 return 0;
8764 }
8765
8766 static uint32_t
8767 wm_sfp_get_media_type(struct wm_softc *sc)
8768 {
8769 uint32_t ctrl_ext;
8770 uint8_t val = 0;
8771 int timeout = 3;
8772 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
8773 int rv = -1;
8774
8775 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8776 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
8777 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
8778 CSR_WRITE_FLUSH(sc);
8779
8780 /* Read SFP module data */
8781 while (timeout) {
8782 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
8783 if (rv == 0)
8784 break;
8785 delay(100*1000); /* XXX too big */
8786 timeout--;
8787 }
8788 if (rv != 0)
8789 goto out;
8790 switch (val) {
8791 case SFF_SFP_ID_SFF:
8792 aprint_normal_dev(sc->sc_dev,
8793 "Module/Connector soldered to board\n");
8794 break;
8795 case SFF_SFP_ID_SFP:
8796 aprint_normal_dev(sc->sc_dev, "SFP\n");
8797 break;
8798 case SFF_SFP_ID_UNKNOWN:
8799 goto out;
8800 default:
8801 break;
8802 }
8803
8804 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
8805 if (rv != 0) {
8806 goto out;
8807 }
8808
8809 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
8810 mediatype = WM_MEDIATYPE_SERDES;
8811 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
8812 sc->sc_flags |= WM_F_SGMII;
8813 mediatype = WM_MEDIATYPE_COPPER;
8814 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
8815 sc->sc_flags |= WM_F_SGMII;
8816 mediatype = WM_MEDIATYPE_SERDES;
8817 }
8818
8819 out:
8820 /* Restore I2C interface setting */
8821 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8822
8823 return mediatype;
8824 }
8825 /*
8826 * NVM related.
8827 * Microwire, SPI (w/wo EERD) and Flash.
8828 */
8829
8830 /* Both spi and uwire */
8831
8832 /*
8833 * wm_eeprom_sendbits:
8834 *
8835 * Send a series of bits to the EEPROM.
8836 */
8837 static void
8838 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
8839 {
8840 uint32_t reg;
8841 int x;
8842
8843 reg = CSR_READ(sc, WMREG_EECD);
8844
8845 for (x = nbits; x > 0; x--) {
8846 if (bits & (1U << (x - 1)))
8847 reg |= EECD_DI;
8848 else
8849 reg &= ~EECD_DI;
8850 CSR_WRITE(sc, WMREG_EECD, reg);
8851 CSR_WRITE_FLUSH(sc);
8852 delay(2);
8853 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8854 CSR_WRITE_FLUSH(sc);
8855 delay(2);
8856 CSR_WRITE(sc, WMREG_EECD, reg);
8857 CSR_WRITE_FLUSH(sc);
8858 delay(2);
8859 }
8860 }
8861
8862 /*
8863 * wm_eeprom_recvbits:
8864 *
8865 * Receive a series of bits from the EEPROM.
8866 */
8867 static void
8868 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
8869 {
8870 uint32_t reg, val;
8871 int x;
8872
8873 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
8874
8875 val = 0;
8876 for (x = nbits; x > 0; x--) {
8877 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8878 CSR_WRITE_FLUSH(sc);
8879 delay(2);
8880 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8881 val |= (1U << (x - 1));
8882 CSR_WRITE(sc, WMREG_EECD, reg);
8883 CSR_WRITE_FLUSH(sc);
8884 delay(2);
8885 }
8886 *valp = val;
8887 }
8888
8889 /* Microwire */
8890
8891 /*
8892 * wm_nvm_read_uwire:
8893 *
8894 * Read a word from the EEPROM using the MicroWire protocol.
8895 */
8896 static int
8897 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8898 {
8899 uint32_t reg, val;
8900 int i;
8901
8902 for (i = 0; i < wordcnt; i++) {
8903 /* Clear SK and DI. */
8904 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8905 CSR_WRITE(sc, WMREG_EECD, reg);
8906
8907 /*
8908 * XXX: workaround for a bug in qemu-0.12.x and prior
8909 * and Xen.
8910 *
8911 * We use this workaround only for 82540 because qemu's
8912 * e1000 act as 82540.
8913 */
8914 if (sc->sc_type == WM_T_82540) {
8915 reg |= EECD_SK;
8916 CSR_WRITE(sc, WMREG_EECD, reg);
8917 reg &= ~EECD_SK;
8918 CSR_WRITE(sc, WMREG_EECD, reg);
8919 CSR_WRITE_FLUSH(sc);
8920 delay(2);
8921 }
8922 /* XXX: end of workaround */
8923
8924 /* Set CHIP SELECT. */
8925 reg |= EECD_CS;
8926 CSR_WRITE(sc, WMREG_EECD, reg);
8927 CSR_WRITE_FLUSH(sc);
8928 delay(2);
8929
8930 /* Shift in the READ command. */
8931 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8932
8933 /* Shift in address. */
8934 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8935
8936 /* Shift out the data. */
8937 wm_eeprom_recvbits(sc, &val, 16);
8938 data[i] = val & 0xffff;
8939
8940 /* Clear CHIP SELECT. */
8941 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8942 CSR_WRITE(sc, WMREG_EECD, reg);
8943 CSR_WRITE_FLUSH(sc);
8944 delay(2);
8945 }
8946
8947 return 0;
8948 }
8949
8950 /* SPI */
8951
8952 /*
8953 * Set SPI and FLASH related information from the EECD register.
8954 * For 82541 and 82547, the word size is taken from EEPROM.
8955 */
8956 static int
8957 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8958 {
8959 int size;
8960 uint32_t reg;
8961 uint16_t data;
8962
8963 reg = CSR_READ(sc, WMREG_EECD);
8964 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8965
8966 /* Read the size of NVM from EECD by default */
8967 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8968 switch (sc->sc_type) {
8969 case WM_T_82541:
8970 case WM_T_82541_2:
8971 case WM_T_82547:
8972 case WM_T_82547_2:
8973 /* Set dummy value to access EEPROM */
8974 sc->sc_nvm_wordsize = 64;
8975 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8976 reg = data;
8977 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8978 if (size == 0)
8979 size = 6; /* 64 word size */
8980 else
8981 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8982 break;
8983 case WM_T_80003:
8984 case WM_T_82571:
8985 case WM_T_82572:
8986 case WM_T_82573: /* SPI case */
8987 case WM_T_82574: /* SPI case */
8988 case WM_T_82583: /* SPI case */
8989 size += NVM_WORD_SIZE_BASE_SHIFT;
8990 if (size > 14)
8991 size = 14;
8992 break;
8993 case WM_T_82575:
8994 case WM_T_82576:
8995 case WM_T_82580:
8996 case WM_T_I350:
8997 case WM_T_I354:
8998 case WM_T_I210:
8999 case WM_T_I211:
9000 size += NVM_WORD_SIZE_BASE_SHIFT;
9001 if (size > 15)
9002 size = 15;
9003 break;
9004 default:
9005 aprint_error_dev(sc->sc_dev,
9006 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9007 return -1;
9008 break;
9009 }
9010
9011 sc->sc_nvm_wordsize = 1 << size;
9012
9013 return 0;
9014 }
9015
9016 /*
9017 * wm_nvm_ready_spi:
9018 *
9019 * Wait for a SPI EEPROM to be ready for commands.
9020 */
9021 static int
9022 wm_nvm_ready_spi(struct wm_softc *sc)
9023 {
9024 uint32_t val;
9025 int usec;
9026
9027 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9028 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9029 wm_eeprom_recvbits(sc, &val, 8);
9030 if ((val & SPI_SR_RDY) == 0)
9031 break;
9032 }
9033 if (usec >= SPI_MAX_RETRIES) {
9034 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9035 return 1;
9036 }
9037 return 0;
9038 }
9039
9040 /*
9041 * wm_nvm_read_spi:
9042 *
9043 * Read a work from the EEPROM using the SPI protocol.
9044 */
9045 static int
9046 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9047 {
9048 uint32_t reg, val;
9049 int i;
9050 uint8_t opc;
9051
9052 /* Clear SK and CS. */
9053 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9054 CSR_WRITE(sc, WMREG_EECD, reg);
9055 CSR_WRITE_FLUSH(sc);
9056 delay(2);
9057
9058 if (wm_nvm_ready_spi(sc))
9059 return 1;
9060
9061 /* Toggle CS to flush commands. */
9062 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9063 CSR_WRITE_FLUSH(sc);
9064 delay(2);
9065 CSR_WRITE(sc, WMREG_EECD, reg);
9066 CSR_WRITE_FLUSH(sc);
9067 delay(2);
9068
9069 opc = SPI_OPC_READ;
9070 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9071 opc |= SPI_OPC_A8;
9072
9073 wm_eeprom_sendbits(sc, opc, 8);
9074 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9075
9076 for (i = 0; i < wordcnt; i++) {
9077 wm_eeprom_recvbits(sc, &val, 16);
9078 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9079 }
9080
9081 /* Raise CS and clear SK. */
9082 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9083 CSR_WRITE(sc, WMREG_EECD, reg);
9084 CSR_WRITE_FLUSH(sc);
9085 delay(2);
9086
9087 return 0;
9088 }
9089
9090 /* Using with EERD */
9091
9092 static int
9093 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9094 {
9095 uint32_t attempts = 100000;
9096 uint32_t i, reg = 0;
9097 int32_t done = -1;
9098
9099 for (i = 0; i < attempts; i++) {
9100 reg = CSR_READ(sc, rw);
9101
9102 if (reg & EERD_DONE) {
9103 done = 0;
9104 break;
9105 }
9106 delay(5);
9107 }
9108
9109 return done;
9110 }
9111
9112 static int
9113 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9114 uint16_t *data)
9115 {
9116 int i, eerd = 0;
9117 int error = 0;
9118
9119 for (i = 0; i < wordcnt; i++) {
9120 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9121
9122 CSR_WRITE(sc, WMREG_EERD, eerd);
9123 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9124 if (error != 0)
9125 break;
9126
9127 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9128 }
9129
9130 return error;
9131 }
9132
9133 /* Flash */
9134
9135 static int
9136 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9137 {
9138 uint32_t eecd;
9139 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9140 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9141 uint8_t sig_byte = 0;
9142
9143 switch (sc->sc_type) {
9144 case WM_T_ICH8:
9145 case WM_T_ICH9:
9146 eecd = CSR_READ(sc, WMREG_EECD);
9147 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9148 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9149 return 0;
9150 }
9151 /* FALLTHROUGH */
9152 default:
9153 /* Default to 0 */
9154 *bank = 0;
9155
9156 /* Check bank 0 */
9157 wm_read_ich8_byte(sc, act_offset, &sig_byte);
9158 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9159 *bank = 0;
9160 return 0;
9161 }
9162
9163 /* Check bank 1 */
9164 wm_read_ich8_byte(sc, act_offset + bank1_offset,
9165 &sig_byte);
9166 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9167 *bank = 1;
9168 return 0;
9169 }
9170 }
9171
9172 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9173 device_xname(sc->sc_dev)));
9174 return -1;
9175 }
9176
9177 /******************************************************************************
9178 * This function does initial flash setup so that a new read/write/erase cycle
9179 * can be started.
9180 *
9181 * sc - The pointer to the hw structure
9182 ****************************************************************************/
9183 static int32_t
9184 wm_ich8_cycle_init(struct wm_softc *sc)
9185 {
9186 uint16_t hsfsts;
9187 int32_t error = 1;
9188 int32_t i = 0;
9189
9190 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9191
9192 /* May be check the Flash Des Valid bit in Hw status */
9193 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
9194 return error;
9195 }
9196
9197 /* Clear FCERR in Hw status by writing 1 */
9198 /* Clear DAEL in Hw status by writing a 1 */
9199 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
9200
9201 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9202
9203 /*
9204 * Either we should have a hardware SPI cycle in progress bit to check
9205 * against, in order to start a new cycle or FDONE bit should be
9206 * changed in the hardware so that it is 1 after harware reset, which
9207 * can then be used as an indication whether a cycle is in progress or
9208 * has been completed .. we should also have some software semaphore
9209 * mechanism to guard FDONE or the cycle in progress bit so that two
9210 * threads access to those bits can be sequentiallized or a way so that
9211 * 2 threads dont start the cycle at the same time
9212 */
9213
9214 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9215 /*
9216 * There is no cycle running at present, so we can start a
9217 * cycle
9218 */
9219
9220 /* Begin by setting Flash Cycle Done. */
9221 hsfsts |= HSFSTS_DONE;
9222 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9223 error = 0;
9224 } else {
9225 /*
9226 * otherwise poll for sometime so the current cycle has a
9227 * chance to end before giving up.
9228 */
9229 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
9230 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9231 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9232 error = 0;
9233 break;
9234 }
9235 delay(1);
9236 }
9237 if (error == 0) {
9238 /*
9239 * Successful in waiting for previous cycle to timeout,
9240 * now set the Flash Cycle Done.
9241 */
9242 hsfsts |= HSFSTS_DONE;
9243 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9244 }
9245 }
9246 return error;
9247 }
9248
9249 /******************************************************************************
9250 * This function starts a flash cycle and waits for its completion
9251 *
9252 * sc - The pointer to the hw structure
9253 ****************************************************************************/
9254 static int32_t
9255 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
9256 {
9257 uint16_t hsflctl;
9258 uint16_t hsfsts;
9259 int32_t error = 1;
9260 uint32_t i = 0;
9261
9262 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
9263 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9264 hsflctl |= HSFCTL_GO;
9265 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9266
9267 /* Wait till FDONE bit is set to 1 */
9268 do {
9269 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9270 if (hsfsts & HSFSTS_DONE)
9271 break;
9272 delay(1);
9273 i++;
9274 } while (i < timeout);
9275 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
9276 error = 0;
9277
9278 return error;
9279 }
9280
9281 /******************************************************************************
9282 * Reads a byte or word from the NVM using the ICH8 flash access registers.
9283 *
9284 * sc - The pointer to the hw structure
9285 * index - The index of the byte or word to read.
9286 * size - Size of data to read, 1=byte 2=word
9287 * data - Pointer to the word to store the value read.
9288 *****************************************************************************/
9289 static int32_t
9290 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
9291 uint32_t size, uint16_t *data)
9292 {
9293 uint16_t hsfsts;
9294 uint16_t hsflctl;
9295 uint32_t flash_linear_address;
9296 uint32_t flash_data = 0;
9297 int32_t error = 1;
9298 int32_t count = 0;
9299
9300 if (size < 1 || size > 2 || data == 0x0 ||
9301 index > ICH_FLASH_LINEAR_ADDR_MASK)
9302 return error;
9303
9304 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
9305 sc->sc_ich8_flash_base;
9306
9307 do {
9308 delay(1);
9309 /* Steps */
9310 error = wm_ich8_cycle_init(sc);
9311 if (error)
9312 break;
9313
9314 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9315 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
9316 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
9317 & HSFCTL_BCOUNT_MASK;
9318 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
9319 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9320
9321 /*
9322 * Write the last 24 bits of index into Flash Linear address
9323 * field in Flash Address
9324 */
9325 /* TODO: TBD maybe check the index against the size of flash */
9326
9327 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
9328
9329 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
9330
9331 /*
9332 * Check if FCERR is set to 1, if set to 1, clear it and try
9333 * the whole sequence a few more times, else read in (shift in)
9334 * the Flash Data0, the order is least significant byte first
9335 * msb to lsb
9336 */
9337 if (error == 0) {
9338 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
9339 if (size == 1)
9340 *data = (uint8_t)(flash_data & 0x000000FF);
9341 else if (size == 2)
9342 *data = (uint16_t)(flash_data & 0x0000FFFF);
9343 break;
9344 } else {
9345 /*
9346 * If we've gotten here, then things are probably
9347 * completely hosed, but if the error condition is
9348 * detected, it won't hurt to give it another try...
9349 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
9350 */
9351 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9352 if (hsfsts & HSFSTS_ERR) {
9353 /* Repeat for some time before giving up. */
9354 continue;
9355 } else if ((hsfsts & HSFSTS_DONE) == 0)
9356 break;
9357 }
9358 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
9359
9360 return error;
9361 }
9362
9363 /******************************************************************************
9364 * Reads a single byte from the NVM using the ICH8 flash access registers.
9365 *
9366 * sc - pointer to wm_hw structure
9367 * index - The index of the byte to read.
9368 * data - Pointer to a byte to store the value read.
9369 *****************************************************************************/
9370 static int32_t
9371 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
9372 {
9373 int32_t status;
9374 uint16_t word = 0;
9375
9376 status = wm_read_ich8_data(sc, index, 1, &word);
9377 if (status == 0)
9378 *data = (uint8_t)word;
9379 else
9380 *data = 0;
9381
9382 return status;
9383 }
9384
9385 /******************************************************************************
9386 * Reads a word from the NVM using the ICH8 flash access registers.
9387 *
9388 * sc - pointer to wm_hw structure
9389 * index - The starting byte index of the word to read.
9390 * data - Pointer to a word to store the value read.
9391 *****************************************************************************/
9392 static int32_t
9393 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
9394 {
9395 int32_t status;
9396
9397 status = wm_read_ich8_data(sc, index, 2, data);
9398 return status;
9399 }
9400
9401 /******************************************************************************
9402 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
9403 * register.
9404 *
9405 * sc - Struct containing variables accessed by shared code
9406 * offset - offset of word in the EEPROM to read
9407 * data - word read from the EEPROM
9408 * words - number of words to read
9409 *****************************************************************************/
9410 static int
9411 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
9412 {
9413 int32_t error = 0;
9414 uint32_t flash_bank = 0;
9415 uint32_t act_offset = 0;
9416 uint32_t bank_offset = 0;
9417 uint16_t word = 0;
9418 uint16_t i = 0;
9419
9420 /*
9421 * We need to know which is the valid flash bank. In the event
9422 * that we didn't allocate eeprom_shadow_ram, we may not be
9423 * managing flash_bank. So it cannot be trusted and needs
9424 * to be updated with each read.
9425 */
9426 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
9427 if (error) {
9428 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
9429 device_xname(sc->sc_dev)));
9430 flash_bank = 0;
9431 }
9432
9433 /*
9434 * Adjust offset appropriately if we're on bank 1 - adjust for word
9435 * size
9436 */
9437 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
9438
9439 error = wm_get_swfwhw_semaphore(sc);
9440 if (error) {
9441 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9442 __func__);
9443 return error;
9444 }
9445
9446 for (i = 0; i < words; i++) {
9447 /* The NVM part needs a byte offset, hence * 2 */
9448 act_offset = bank_offset + ((offset + i) * 2);
9449 error = wm_read_ich8_word(sc, act_offset, &word);
9450 if (error) {
9451 aprint_error_dev(sc->sc_dev,
9452 "%s: failed to read NVM\n", __func__);
9453 break;
9454 }
9455 data[i] = word;
9456 }
9457
9458 wm_put_swfwhw_semaphore(sc);
9459 return error;
9460 }
9461
9462 /* iNVM */
9463
9464 static int
9465 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
9466 {
9467 int32_t rv = 0;
9468 uint32_t invm_dword;
9469 uint16_t i;
9470 uint8_t record_type, word_address;
9471
9472 for (i = 0; i < INVM_SIZE; i++) {
9473 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
9474 /* Get record type */
9475 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
9476 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
9477 break;
9478 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
9479 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
9480 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
9481 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
9482 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
9483 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
9484 if (word_address == address) {
9485 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
9486 rv = 0;
9487 break;
9488 }
9489 }
9490 }
9491
9492 return rv;
9493 }
9494
9495 static int
9496 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
9497 {
9498 int rv = 0;
9499 int i;
9500
9501 for (i = 0; i < words; i++) {
9502 switch (offset + i) {
9503 case NVM_OFF_MACADDR:
9504 case NVM_OFF_MACADDR1:
9505 case NVM_OFF_MACADDR2:
9506 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
9507 if (rv != 0) {
9508 data[i] = 0xffff;
9509 rv = -1;
9510 }
9511 break;
9512 case NVM_OFF_CFG2:
9513 rv = wm_nvm_read_word_invm(sc, offset, data);
9514 if (rv != 0) {
9515 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
9516 rv = 0;
9517 }
9518 break;
9519 case NVM_OFF_CFG4:
9520 rv = wm_nvm_read_word_invm(sc, offset, data);
9521 if (rv != 0) {
9522 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
9523 rv = 0;
9524 }
9525 break;
9526 case NVM_OFF_LED_1_CFG:
9527 rv = wm_nvm_read_word_invm(sc, offset, data);
9528 if (rv != 0) {
9529 *data = NVM_LED_1_CFG_DEFAULT_I211;
9530 rv = 0;
9531 }
9532 break;
9533 case NVM_OFF_LED_0_2_CFG:
9534 rv = wm_nvm_read_word_invm(sc, offset, data);
9535 if (rv != 0) {
9536 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
9537 rv = 0;
9538 }
9539 break;
9540 case NVM_OFF_ID_LED_SETTINGS:
9541 rv = wm_nvm_read_word_invm(sc, offset, data);
9542 if (rv != 0) {
9543 *data = ID_LED_RESERVED_FFFF;
9544 rv = 0;
9545 }
9546 break;
9547 default:
9548 DPRINTF(WM_DEBUG_NVM,
9549 ("NVM word 0x%02x is not mapped.\n", offset));
9550 *data = NVM_RESERVED_WORD;
9551 break;
9552 }
9553 }
9554
9555 return rv;
9556 }
9557
9558 /* Lock, detecting NVM type, validate checksum, version and read */
9559
9560 /*
9561 * wm_nvm_acquire:
9562 *
9563 * Perform the EEPROM handshake required on some chips.
9564 */
9565 static int
9566 wm_nvm_acquire(struct wm_softc *sc)
9567 {
9568 uint32_t reg;
9569 int x;
9570 int ret = 0;
9571
9572 /* always success */
9573 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9574 return 0;
9575
9576 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9577 ret = wm_get_swfwhw_semaphore(sc);
9578 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9579 /* This will also do wm_get_swsm_semaphore() if needed */
9580 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9581 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9582 ret = wm_get_swsm_semaphore(sc);
9583 }
9584
9585 if (ret) {
9586 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9587 __func__);
9588 return 1;
9589 }
9590
9591 if (sc->sc_flags & WM_F_LOCK_EECD) {
9592 reg = CSR_READ(sc, WMREG_EECD);
9593
9594 /* Request EEPROM access. */
9595 reg |= EECD_EE_REQ;
9596 CSR_WRITE(sc, WMREG_EECD, reg);
9597
9598 /* ..and wait for it to be granted. */
9599 for (x = 0; x < 1000; x++) {
9600 reg = CSR_READ(sc, WMREG_EECD);
9601 if (reg & EECD_EE_GNT)
9602 break;
9603 delay(5);
9604 }
9605 if ((reg & EECD_EE_GNT) == 0) {
9606 aprint_error_dev(sc->sc_dev,
9607 "could not acquire EEPROM GNT\n");
9608 reg &= ~EECD_EE_REQ;
9609 CSR_WRITE(sc, WMREG_EECD, reg);
9610 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9611 wm_put_swfwhw_semaphore(sc);
9612 if (sc->sc_flags & WM_F_LOCK_SWFW)
9613 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9614 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9615 wm_put_swsm_semaphore(sc);
9616 return 1;
9617 }
9618 }
9619
9620 return 0;
9621 }
9622
9623 /*
9624 * wm_nvm_release:
9625 *
9626 * Release the EEPROM mutex.
9627 */
9628 static void
9629 wm_nvm_release(struct wm_softc *sc)
9630 {
9631 uint32_t reg;
9632
9633 /* always success */
9634 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9635 return;
9636
9637 if (sc->sc_flags & WM_F_LOCK_EECD) {
9638 reg = CSR_READ(sc, WMREG_EECD);
9639 reg &= ~EECD_EE_REQ;
9640 CSR_WRITE(sc, WMREG_EECD, reg);
9641 }
9642
9643 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9644 wm_put_swfwhw_semaphore(sc);
9645 if (sc->sc_flags & WM_F_LOCK_SWFW)
9646 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9647 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9648 wm_put_swsm_semaphore(sc);
9649 }
9650
9651 static int
9652 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9653 {
9654 uint32_t eecd = 0;
9655
9656 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9657 || sc->sc_type == WM_T_82583) {
9658 eecd = CSR_READ(sc, WMREG_EECD);
9659
9660 /* Isolate bits 15 & 16 */
9661 eecd = ((eecd >> 15) & 0x03);
9662
9663 /* If both bits are set, device is Flash type */
9664 if (eecd == 0x03)
9665 return 0;
9666 }
9667 return 1;
9668 }
9669
9670 static int
9671 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9672 {
9673 uint32_t eec;
9674
9675 eec = CSR_READ(sc, WMREG_EEC);
9676 if ((eec & EEC_FLASH_DETECTED) != 0)
9677 return 1;
9678
9679 return 0;
9680 }
9681
9682 /*
9683 * wm_nvm_validate_checksum
9684 *
9685 * The checksum is defined as the sum of the first 64 (16 bit) words.
9686 */
9687 static int
9688 wm_nvm_validate_checksum(struct wm_softc *sc)
9689 {
9690 uint16_t checksum;
9691 uint16_t eeprom_data;
9692 #ifdef WM_DEBUG
9693 uint16_t csum_wordaddr, valid_checksum;
9694 #endif
9695 int i;
9696
9697 checksum = 0;
9698
9699 /* Don't check for I211 */
9700 if (sc->sc_type == WM_T_I211)
9701 return 0;
9702
9703 #ifdef WM_DEBUG
9704 if (sc->sc_type == WM_T_PCH_LPT) {
9705 csum_wordaddr = NVM_OFF_COMPAT;
9706 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
9707 } else {
9708 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
9709 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
9710 }
9711
9712 /* Dump EEPROM image for debug */
9713 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9714 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9715 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
9716 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
9717 if ((eeprom_data & valid_checksum) == 0) {
9718 DPRINTF(WM_DEBUG_NVM,
9719 ("%s: NVM need to be updated (%04x != %04x)\n",
9720 device_xname(sc->sc_dev), eeprom_data,
9721 valid_checksum));
9722 }
9723 }
9724
9725 if ((wm_debug & WM_DEBUG_NVM) != 0) {
9726 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
9727 for (i = 0; i < NVM_SIZE; i++) {
9728 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9729 printf("XXXX ");
9730 else
9731 printf("%04hx ", eeprom_data);
9732 if (i % 8 == 7)
9733 printf("\n");
9734 }
9735 }
9736
9737 #endif /* WM_DEBUG */
9738
9739 for (i = 0; i < NVM_SIZE; i++) {
9740 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9741 return 1;
9742 checksum += eeprom_data;
9743 }
9744
9745 if (checksum != (uint16_t) NVM_CHECKSUM) {
9746 #ifdef WM_DEBUG
9747 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
9748 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
9749 #endif
9750 }
9751
9752 return 0;
9753 }
9754
9755 static void
9756 wm_nvm_version(struct wm_softc *sc)
9757 {
9758 uint16_t major, minor, build, patch;
9759 uint16_t uid0, uid1;
9760 uint16_t nvm_data;
9761 uint16_t off;
9762 bool check_version = false;
9763 bool check_optionrom = false;
9764 bool have_build = false;
9765
9766 /*
9767 * Version format:
9768 *
9769 * XYYZ
9770 * X0YZ
9771 * X0YY
9772 *
9773 * Example:
9774 *
9775 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
9776 * 82571 0x50a6 5.10.6?
9777 * 82572 0x506a 5.6.10?
9778 * 82572EI 0x5069 5.6.9?
9779 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
9780 * 0x2013 2.1.3?
9781 * 82583 0x10a0 1.10.0? (document says it's default vaule)
9782 */
9783 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
9784 switch (sc->sc_type) {
9785 case WM_T_82571:
9786 case WM_T_82572:
9787 case WM_T_82574:
9788 check_version = true;
9789 check_optionrom = true;
9790 have_build = true;
9791 break;
9792 case WM_T_82575:
9793 case WM_T_82576:
9794 case WM_T_82580:
9795 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
9796 check_version = true;
9797 break;
9798 case WM_T_I211:
9799 /* XXX wm_nvm_version_invm(sc); */
9800 return;
9801 case WM_T_I210:
9802 if (!wm_nvm_get_flash_presence_i210(sc)) {
9803 /* XXX wm_nvm_version_invm(sc); */
9804 return;
9805 }
9806 /* FALLTHROUGH */
9807 case WM_T_I350:
9808 case WM_T_I354:
9809 check_version = true;
9810 check_optionrom = true;
9811 break;
9812 default:
9813 return;
9814 }
9815 if (check_version) {
9816 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
9817 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
9818 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
9819 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
9820 build = nvm_data & NVM_BUILD_MASK;
9821 have_build = true;
9822 } else
9823 minor = nvm_data & 0x00ff;
9824
9825 /* Decimal */
9826 minor = (minor / 16) * 10 + (minor % 16);
9827
9828 aprint_verbose(", version %d.%d", major, minor);
9829 if (have_build)
9830 aprint_verbose(".%d", build);
9831 sc->sc_nvm_ver_major = major;
9832 sc->sc_nvm_ver_minor = minor;
9833 }
9834 if (check_optionrom) {
9835 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
9836 /* Option ROM Version */
9837 if ((off != 0x0000) && (off != 0xffff)) {
9838 off += NVM_COMBO_VER_OFF;
9839 wm_nvm_read(sc, off + 1, 1, &uid1);
9840 wm_nvm_read(sc, off, 1, &uid0);
9841 if ((uid0 != 0) && (uid0 != 0xffff)
9842 && (uid1 != 0) && (uid1 != 0xffff)) {
9843 /* 16bits */
9844 major = uid0 >> 8;
9845 build = (uid0 << 8) | (uid1 >> 8);
9846 patch = uid1 & 0x00ff;
9847 aprint_verbose(", option ROM Version %d.%d.%d",
9848 major, build, patch);
9849 }
9850 }
9851 }
9852
9853 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
9854 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
9855 }
9856
9857 /*
9858 * wm_nvm_read:
9859 *
9860 * Read data from the serial EEPROM.
9861 */
9862 static int
9863 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9864 {
9865 int rv;
9866
9867 if (sc->sc_flags & WM_F_EEPROM_INVALID)
9868 return 1;
9869
9870 if (wm_nvm_acquire(sc))
9871 return 1;
9872
9873 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9874 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9875 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
9876 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
9877 else if (sc->sc_flags & WM_F_EEPROM_INVM)
9878 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
9879 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
9880 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
9881 else if (sc->sc_flags & WM_F_EEPROM_SPI)
9882 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
9883 else
9884 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
9885
9886 wm_nvm_release(sc);
9887 return rv;
9888 }
9889
9890 /*
9891 * Hardware semaphores.
9892 * Very complexed...
9893 */
9894
9895 static int
9896 wm_get_swsm_semaphore(struct wm_softc *sc)
9897 {
9898 int32_t timeout;
9899 uint32_t swsm;
9900
9901 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9902 /* Get the SW semaphore. */
9903 timeout = sc->sc_nvm_wordsize + 1;
9904 while (timeout) {
9905 swsm = CSR_READ(sc, WMREG_SWSM);
9906
9907 if ((swsm & SWSM_SMBI) == 0)
9908 break;
9909
9910 delay(50);
9911 timeout--;
9912 }
9913
9914 if (timeout == 0) {
9915 aprint_error_dev(sc->sc_dev,
9916 "could not acquire SWSM SMBI\n");
9917 return 1;
9918 }
9919 }
9920
9921 /* Get the FW semaphore. */
9922 timeout = sc->sc_nvm_wordsize + 1;
9923 while (timeout) {
9924 swsm = CSR_READ(sc, WMREG_SWSM);
9925 swsm |= SWSM_SWESMBI;
9926 CSR_WRITE(sc, WMREG_SWSM, swsm);
9927 /* If we managed to set the bit we got the semaphore. */
9928 swsm = CSR_READ(sc, WMREG_SWSM);
9929 if (swsm & SWSM_SWESMBI)
9930 break;
9931
9932 delay(50);
9933 timeout--;
9934 }
9935
9936 if (timeout == 0) {
9937 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
9938 /* Release semaphores */
9939 wm_put_swsm_semaphore(sc);
9940 return 1;
9941 }
9942 return 0;
9943 }
9944
9945 static void
9946 wm_put_swsm_semaphore(struct wm_softc *sc)
9947 {
9948 uint32_t swsm;
9949
9950 swsm = CSR_READ(sc, WMREG_SWSM);
9951 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
9952 CSR_WRITE(sc, WMREG_SWSM, swsm);
9953 }
9954
9955 static int
9956 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9957 {
9958 uint32_t swfw_sync;
9959 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
9960 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
9961 int timeout = 200;
9962
9963 for (timeout = 0; timeout < 200; timeout++) {
9964 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9965 if (wm_get_swsm_semaphore(sc)) {
9966 aprint_error_dev(sc->sc_dev,
9967 "%s: failed to get semaphore\n",
9968 __func__);
9969 return 1;
9970 }
9971 }
9972 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9973 if ((swfw_sync & (swmask | fwmask)) == 0) {
9974 swfw_sync |= swmask;
9975 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
9976 if (sc->sc_flags & WM_F_LOCK_SWSM)
9977 wm_put_swsm_semaphore(sc);
9978 return 0;
9979 }
9980 if (sc->sc_flags & WM_F_LOCK_SWSM)
9981 wm_put_swsm_semaphore(sc);
9982 delay(5000);
9983 }
9984 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
9985 device_xname(sc->sc_dev), mask, swfw_sync);
9986 return 1;
9987 }
9988
9989 static void
9990 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9991 {
9992 uint32_t swfw_sync;
9993
9994 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9995 while (wm_get_swsm_semaphore(sc) != 0)
9996 continue;
9997 }
9998 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9999 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10000 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10001 if (sc->sc_flags & WM_F_LOCK_SWSM)
10002 wm_put_swsm_semaphore(sc);
10003 }
10004
10005 static int
10006 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10007 {
10008 uint32_t ext_ctrl;
10009 int timeout = 200;
10010
10011 for (timeout = 0; timeout < 200; timeout++) {
10012 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10013 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10014 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10015
10016 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10017 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10018 return 0;
10019 delay(5000);
10020 }
10021 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10022 device_xname(sc->sc_dev), ext_ctrl);
10023 return 1;
10024 }
10025
10026 static void
10027 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10028 {
10029 uint32_t ext_ctrl;
10030 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10031 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10032 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10033 }
10034
10035 static int
10036 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10037 {
10038 int i = 0;
10039 uint32_t reg;
10040
10041 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10042 do {
10043 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10044 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10045 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10046 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10047 break;
10048 delay(2*1000);
10049 i++;
10050 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10051
10052 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10053 wm_put_hw_semaphore_82573(sc);
10054 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10055 device_xname(sc->sc_dev));
10056 return -1;
10057 }
10058
10059 return 0;
10060 }
10061
10062 static void
10063 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10064 {
10065 uint32_t reg;
10066
10067 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10068 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10069 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10070 }
10071
10072 /*
10073 * Management mode and power management related subroutines.
10074 * BMC, AMT, suspend/resume and EEE.
10075 */
10076
10077 static int
10078 wm_check_mng_mode(struct wm_softc *sc)
10079 {
10080 int rv;
10081
10082 switch (sc->sc_type) {
10083 case WM_T_ICH8:
10084 case WM_T_ICH9:
10085 case WM_T_ICH10:
10086 case WM_T_PCH:
10087 case WM_T_PCH2:
10088 case WM_T_PCH_LPT:
10089 rv = wm_check_mng_mode_ich8lan(sc);
10090 break;
10091 case WM_T_82574:
10092 case WM_T_82583:
10093 rv = wm_check_mng_mode_82574(sc);
10094 break;
10095 case WM_T_82571:
10096 case WM_T_82572:
10097 case WM_T_82573:
10098 case WM_T_80003:
10099 rv = wm_check_mng_mode_generic(sc);
10100 break;
10101 default:
10102 /* noting to do */
10103 rv = 0;
10104 break;
10105 }
10106
10107 return rv;
10108 }
10109
10110 static int
10111 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10112 {
10113 uint32_t fwsm;
10114
10115 fwsm = CSR_READ(sc, WMREG_FWSM);
10116
10117 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10118 return 1;
10119
10120 return 0;
10121 }
10122
10123 static int
10124 wm_check_mng_mode_82574(struct wm_softc *sc)
10125 {
10126 uint16_t data;
10127
10128 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10129
10130 if ((data & NVM_CFG2_MNGM_MASK) != 0)
10131 return 1;
10132
10133 return 0;
10134 }
10135
10136 static int
10137 wm_check_mng_mode_generic(struct wm_softc *sc)
10138 {
10139 uint32_t fwsm;
10140
10141 fwsm = CSR_READ(sc, WMREG_FWSM);
10142
10143 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10144 return 1;
10145
10146 return 0;
10147 }
10148
10149 static int
10150 wm_enable_mng_pass_thru(struct wm_softc *sc)
10151 {
10152 uint32_t manc, fwsm, factps;
10153
10154 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
10155 return 0;
10156
10157 manc = CSR_READ(sc, WMREG_MANC);
10158
10159 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
10160 device_xname(sc->sc_dev), manc));
10161 if ((manc & MANC_RECV_TCO_EN) == 0)
10162 return 0;
10163
10164 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
10165 fwsm = CSR_READ(sc, WMREG_FWSM);
10166 factps = CSR_READ(sc, WMREG_FACTPS);
10167 if (((factps & FACTPS_MNGCG) == 0)
10168 && ((fwsm & FWSM_MODE_MASK)
10169 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
10170 return 1;
10171 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10172 uint16_t data;
10173
10174 factps = CSR_READ(sc, WMREG_FACTPS);
10175 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10176 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
10177 device_xname(sc->sc_dev), factps, data));
10178 if (((factps & FACTPS_MNGCG) == 0)
10179 && ((data & NVM_CFG2_MNGM_MASK)
10180 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
10181 return 1;
10182 } else if (((manc & MANC_SMBUS_EN) != 0)
10183 && ((manc & MANC_ASF_EN) == 0))
10184 return 1;
10185
10186 return 0;
10187 }
10188
10189 static int
10190 wm_check_reset_block(struct wm_softc *sc)
10191 {
10192 uint32_t reg;
10193
10194 switch (sc->sc_type) {
10195 case WM_T_ICH8:
10196 case WM_T_ICH9:
10197 case WM_T_ICH10:
10198 case WM_T_PCH:
10199 case WM_T_PCH2:
10200 case WM_T_PCH_LPT:
10201 reg = CSR_READ(sc, WMREG_FWSM);
10202 if ((reg & FWSM_RSPCIPHY) != 0)
10203 return 0;
10204 else
10205 return -1;
10206 break;
10207 case WM_T_82571:
10208 case WM_T_82572:
10209 case WM_T_82573:
10210 case WM_T_82574:
10211 case WM_T_82583:
10212 case WM_T_80003:
10213 reg = CSR_READ(sc, WMREG_MANC);
10214 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
10215 return -1;
10216 else
10217 return 0;
10218 break;
10219 default:
10220 /* no problem */
10221 break;
10222 }
10223
10224 return 0;
10225 }
10226
10227 static void
10228 wm_get_hw_control(struct wm_softc *sc)
10229 {
10230 uint32_t reg;
10231
10232 switch (sc->sc_type) {
10233 case WM_T_82573:
10234 reg = CSR_READ(sc, WMREG_SWSM);
10235 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
10236 break;
10237 case WM_T_82571:
10238 case WM_T_82572:
10239 case WM_T_82574:
10240 case WM_T_82583:
10241 case WM_T_80003:
10242 case WM_T_ICH8:
10243 case WM_T_ICH9:
10244 case WM_T_ICH10:
10245 case WM_T_PCH:
10246 case WM_T_PCH2:
10247 case WM_T_PCH_LPT:
10248 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10249 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
10250 break;
10251 default:
10252 break;
10253 }
10254 }
10255
10256 static void
10257 wm_release_hw_control(struct wm_softc *sc)
10258 {
10259 uint32_t reg;
10260
10261 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
10262 return;
10263
10264 if (sc->sc_type == WM_T_82573) {
10265 reg = CSR_READ(sc, WMREG_SWSM);
10266 reg &= ~SWSM_DRV_LOAD;
10267 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
10268 } else {
10269 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10270 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
10271 }
10272 }
10273
10274 static void
10275 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
10276 {
10277 uint32_t reg;
10278
10279 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10280
10281 if (on != 0)
10282 reg |= EXTCNFCTR_GATE_PHY_CFG;
10283 else
10284 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
10285
10286 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10287 }
10288
10289 static void
10290 wm_smbustopci(struct wm_softc *sc)
10291 {
10292 uint32_t fwsm;
10293
10294 fwsm = CSR_READ(sc, WMREG_FWSM);
10295 if (((fwsm & FWSM_FW_VALID) == 0)
10296 && ((wm_check_reset_block(sc) == 0))) {
10297 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
10298 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
10299 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10300 CSR_WRITE_FLUSH(sc);
10301 delay(10);
10302 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
10303 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10304 CSR_WRITE_FLUSH(sc);
10305 delay(50*1000);
10306
10307 /*
10308 * Gate automatic PHY configuration by hardware on non-managed
10309 * 82579
10310 */
10311 if (sc->sc_type == WM_T_PCH2)
10312 wm_gate_hw_phy_config_ich8lan(sc, 1);
10313 }
10314 }
10315
10316 static void
10317 wm_init_manageability(struct wm_softc *sc)
10318 {
10319
10320 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10321 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
10322 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10323
10324 /* Disable hardware interception of ARP */
10325 manc &= ~MANC_ARP_EN;
10326
10327 /* Enable receiving management packets to the host */
10328 if (sc->sc_type >= WM_T_82571) {
10329 manc |= MANC_EN_MNG2HOST;
10330 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
10331 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
10332 }
10333
10334 CSR_WRITE(sc, WMREG_MANC, manc);
10335 }
10336 }
10337
10338 static void
10339 wm_release_manageability(struct wm_softc *sc)
10340 {
10341
10342 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10343 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10344
10345 manc |= MANC_ARP_EN;
10346 if (sc->sc_type >= WM_T_82571)
10347 manc &= ~MANC_EN_MNG2HOST;
10348
10349 CSR_WRITE(sc, WMREG_MANC, manc);
10350 }
10351 }
10352
10353 static void
10354 wm_get_wakeup(struct wm_softc *sc)
10355 {
10356
10357 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
10358 switch (sc->sc_type) {
10359 case WM_T_82573:
10360 case WM_T_82583:
10361 sc->sc_flags |= WM_F_HAS_AMT;
10362 /* FALLTHROUGH */
10363 case WM_T_80003:
10364 case WM_T_82541:
10365 case WM_T_82547:
10366 case WM_T_82571:
10367 case WM_T_82572:
10368 case WM_T_82574:
10369 case WM_T_82575:
10370 case WM_T_82576:
10371 case WM_T_82580:
10372 case WM_T_I350:
10373 case WM_T_I354:
10374 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
10375 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
10376 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10377 break;
10378 case WM_T_ICH8:
10379 case WM_T_ICH9:
10380 case WM_T_ICH10:
10381 case WM_T_PCH:
10382 case WM_T_PCH2:
10383 case WM_T_PCH_LPT:
10384 sc->sc_flags |= WM_F_HAS_AMT;
10385 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10386 break;
10387 default:
10388 break;
10389 }
10390
10391 /* 1: HAS_MANAGE */
10392 if (wm_enable_mng_pass_thru(sc) != 0)
10393 sc->sc_flags |= WM_F_HAS_MANAGE;
10394
10395 #ifdef WM_DEBUG
10396 printf("\n");
10397 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
10398 printf("HAS_AMT,");
10399 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
10400 printf("ARC_SUBSYS_VALID,");
10401 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
10402 printf("ASF_FIRMWARE_PRES,");
10403 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
10404 printf("HAS_MANAGE,");
10405 printf("\n");
10406 #endif
10407 /*
10408 * Note that the WOL flags is set after the resetting of the eeprom
10409 * stuff
10410 */
10411 }
10412
10413 #ifdef WM_WOL
10414 /* WOL in the newer chipset interfaces (pchlan) */
10415 static void
10416 wm_enable_phy_wakeup(struct wm_softc *sc)
10417 {
10418 #if 0
10419 uint16_t preg;
10420
10421 /* Copy MAC RARs to PHY RARs */
10422
10423 /* Copy MAC MTA to PHY MTA */
10424
10425 /* Configure PHY Rx Control register */
10426
10427 /* Enable PHY wakeup in MAC register */
10428
10429 /* Configure and enable PHY wakeup in PHY registers */
10430
10431 /* Activate PHY wakeup */
10432
10433 /* XXX */
10434 #endif
10435 }
10436
10437 /* Power down workaround on D3 */
10438 static void
10439 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
10440 {
10441 uint32_t reg;
10442 int i;
10443
10444 for (i = 0; i < 2; i++) {
10445 /* Disable link */
10446 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10447 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10448 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10449
10450 /*
10451 * Call gig speed drop workaround on Gig disable before
10452 * accessing any PHY registers
10453 */
10454 if (sc->sc_type == WM_T_ICH8)
10455 wm_gig_downshift_workaround_ich8lan(sc);
10456
10457 /* Write VR power-down enable */
10458 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10459 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10460 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
10461 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
10462
10463 /* Read it back and test */
10464 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10465 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10466 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
10467 break;
10468
10469 /* Issue PHY reset and repeat at most one more time */
10470 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10471 }
10472 }
10473
10474 static void
10475 wm_enable_wakeup(struct wm_softc *sc)
10476 {
10477 uint32_t reg, pmreg;
10478 pcireg_t pmode;
10479
10480 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10481 &pmreg, NULL) == 0)
10482 return;
10483
10484 /* Advertise the wakeup capability */
10485 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
10486 | CTRL_SWDPIN(3));
10487 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
10488
10489 /* ICH workaround */
10490 switch (sc->sc_type) {
10491 case WM_T_ICH8:
10492 case WM_T_ICH9:
10493 case WM_T_ICH10:
10494 case WM_T_PCH:
10495 case WM_T_PCH2:
10496 case WM_T_PCH_LPT:
10497 /* Disable gig during WOL */
10498 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10499 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
10500 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10501 if (sc->sc_type == WM_T_PCH)
10502 wm_gmii_reset(sc);
10503
10504 /* Power down workaround */
10505 if (sc->sc_phytype == WMPHY_82577) {
10506 struct mii_softc *child;
10507
10508 /* Assume that the PHY is copper */
10509 child = LIST_FIRST(&sc->sc_mii.mii_phys);
10510 if (child->mii_mpd_rev <= 2)
10511 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
10512 (768 << 5) | 25, 0x0444); /* magic num */
10513 }
10514 break;
10515 default:
10516 break;
10517 }
10518
10519 /* Keep the laser running on fiber adapters */
10520 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
10521 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
10522 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10523 reg |= CTRL_EXT_SWDPIN(3);
10524 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10525 }
10526
10527 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
10528 #if 0 /* for the multicast packet */
10529 reg |= WUFC_MC;
10530 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
10531 #endif
10532
10533 if (sc->sc_type == WM_T_PCH) {
10534 wm_enable_phy_wakeup(sc);
10535 } else {
10536 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
10537 CSR_WRITE(sc, WMREG_WUFC, reg);
10538 }
10539
10540 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10541 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10542 || (sc->sc_type == WM_T_PCH2))
10543 && (sc->sc_phytype == WMPHY_IGP_3))
10544 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
10545
10546 /* Request PME */
10547 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
10548 #if 0
10549 /* Disable WOL */
10550 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
10551 #else
10552 /* For WOL */
10553 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
10554 #endif
10555 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
10556 }
10557 #endif /* WM_WOL */
10558
10559 /* EEE */
10560
10561 static void
10562 wm_set_eee_i350(struct wm_softc *sc)
10563 {
10564 uint32_t ipcnfg, eeer;
10565
10566 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
10567 eeer = CSR_READ(sc, WMREG_EEER);
10568
10569 if ((sc->sc_flags & WM_F_EEE) != 0) {
10570 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10571 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
10572 | EEER_LPI_FC);
10573 } else {
10574 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10575 ipcnfg &= ~IPCNFG_10BASE_TE;
10576 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
10577 | EEER_LPI_FC);
10578 }
10579
10580 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
10581 CSR_WRITE(sc, WMREG_EEER, eeer);
10582 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
10583 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
10584 }
10585
10586 /*
10587 * Workarounds (mainly PHY related).
10588 * Basically, PHY's workarounds are in the PHY drivers.
10589 */
10590
10591 /* Work-around for 82566 Kumeran PCS lock loss */
10592 static void
10593 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
10594 {
10595 int miistatus, active, i;
10596 int reg;
10597
10598 miistatus = sc->sc_mii.mii_media_status;
10599
10600 /* If the link is not up, do nothing */
10601 if ((miistatus & IFM_ACTIVE) != 0)
10602 return;
10603
10604 active = sc->sc_mii.mii_media_active;
10605
10606 /* Nothing to do if the link is other than 1Gbps */
10607 if (IFM_SUBTYPE(active) != IFM_1000_T)
10608 return;
10609
10610 for (i = 0; i < 10; i++) {
10611 /* read twice */
10612 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10613 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10614 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
10615 goto out; /* GOOD! */
10616
10617 /* Reset the PHY */
10618 wm_gmii_reset(sc);
10619 delay(5*1000);
10620 }
10621
10622 /* Disable GigE link negotiation */
10623 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10624 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10625 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10626
10627 /*
10628 * Call gig speed drop workaround on Gig disable before accessing
10629 * any PHY registers.
10630 */
10631 wm_gig_downshift_workaround_ich8lan(sc);
10632
10633 out:
10634 return;
10635 }
10636
10637 /* WOL from S5 stops working */
10638 static void
10639 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
10640 {
10641 uint16_t kmrn_reg;
10642
10643 /* Only for igp3 */
10644 if (sc->sc_phytype == WMPHY_IGP_3) {
10645 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
10646 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
10647 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10648 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
10649 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10650 }
10651 }
10652
10653 /*
10654 * Workaround for pch's PHYs
10655 * XXX should be moved to new PHY driver?
10656 */
10657 static void
10658 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
10659 {
10660 if (sc->sc_phytype == WMPHY_82577)
10661 wm_set_mdio_slow_mode_hv(sc);
10662
10663 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
10664
10665 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
10666
10667 /* 82578 */
10668 if (sc->sc_phytype == WMPHY_82578) {
10669 /* PCH rev. < 3 */
10670 if (sc->sc_rev < 3) {
10671 /* XXX 6 bit shift? Why? Is it page2? */
10672 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10673 0x66c0);
10674 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
10675 0xffff);
10676 }
10677
10678 /* XXX phy rev. < 2 */
10679 }
10680
10681 /* Select page 0 */
10682
10683 /* XXX acquire semaphore */
10684 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
10685 /* XXX release semaphore */
10686
10687 /*
10688 * Configure the K1 Si workaround during phy reset assuming there is
10689 * link so that it disables K1 if link is in 1Gbps.
10690 */
10691 wm_k1_gig_workaround_hv(sc, 1);
10692 }
10693
10694 static void
10695 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
10696 {
10697
10698 wm_set_mdio_slow_mode_hv(sc);
10699 }
10700
10701 static void
10702 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
10703 {
10704 int k1_enable = sc->sc_nvm_k1_enabled;
10705
10706 /* XXX acquire semaphore */
10707
10708 if (link) {
10709 k1_enable = 0;
10710
10711 /* Link stall fix for link up */
10712 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
10713 } else {
10714 /* Link stall fix for link down */
10715 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
10716 }
10717
10718 wm_configure_k1_ich8lan(sc, k1_enable);
10719
10720 /* XXX release semaphore */
10721 }
10722
10723 static void
10724 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
10725 {
10726 uint32_t reg;
10727
10728 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
10729 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
10730 reg | HV_KMRN_MDIO_SLOW);
10731 }
10732
10733 static void
10734 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
10735 {
10736 uint32_t ctrl, ctrl_ext, tmp;
10737 uint16_t kmrn_reg;
10738
10739 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
10740
10741 if (k1_enable)
10742 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
10743 else
10744 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
10745
10746 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
10747
10748 delay(20);
10749
10750 ctrl = CSR_READ(sc, WMREG_CTRL);
10751 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10752
10753 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
10754 tmp |= CTRL_FRCSPD;
10755
10756 CSR_WRITE(sc, WMREG_CTRL, tmp);
10757 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
10758 CSR_WRITE_FLUSH(sc);
10759 delay(20);
10760
10761 CSR_WRITE(sc, WMREG_CTRL, ctrl);
10762 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10763 CSR_WRITE_FLUSH(sc);
10764 delay(20);
10765 }
10766
10767 /* special case - for 82575 - need to do manual init ... */
10768 static void
10769 wm_reset_init_script_82575(struct wm_softc *sc)
10770 {
10771 /*
10772 * remark: this is untested code - we have no board without EEPROM
10773 * same setup as mentioned int the FreeBSD driver for the i82575
10774 */
10775
10776 /* SerDes configuration via SERDESCTRL */
10777 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
10778 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
10779 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
10780 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
10781
10782 /* CCM configuration via CCMCTL register */
10783 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
10784 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
10785
10786 /* PCIe lanes configuration */
10787 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
10788 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
10789 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
10790 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
10791
10792 /* PCIe PLL Configuration */
10793 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
10794 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
10795 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
10796 }
10797
10798 static void
10799 wm_reset_mdicnfg_82580(struct wm_softc *sc)
10800 {
10801 uint32_t reg;
10802 uint16_t nvmword;
10803 int rv;
10804
10805 if ((sc->sc_flags & WM_F_SGMII) == 0)
10806 return;
10807
10808 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
10809 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
10810 if (rv != 0) {
10811 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
10812 __func__);
10813 return;
10814 }
10815
10816 reg = CSR_READ(sc, WMREG_MDICNFG);
10817 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
10818 reg |= MDICNFG_DEST;
10819 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
10820 reg |= MDICNFG_COM_MDIO;
10821 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10822 }
10823
10824 /*
10825 * I210 Errata 25 and I211 Errata 10
10826 * Slow System Clock.
10827 */
10828 static void
10829 wm_pll_workaround_i210(struct wm_softc *sc)
10830 {
10831 uint32_t mdicnfg, wuc;
10832 uint32_t reg;
10833 pcireg_t pcireg;
10834 uint32_t pmreg;
10835 uint16_t nvmword, tmp_nvmword;
10836 int phyval;
10837 bool wa_done = false;
10838 int i;
10839
10840 /* Save WUC and MDICNFG registers */
10841 wuc = CSR_READ(sc, WMREG_WUC);
10842 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
10843
10844 reg = mdicnfg & ~MDICNFG_DEST;
10845 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10846
10847 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
10848 nvmword = INVM_DEFAULT_AL;
10849 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
10850
10851 /* Get Power Management cap offset */
10852 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10853 &pmreg, NULL) == 0)
10854 return;
10855 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
10856 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
10857 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
10858
10859 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
10860 break; /* OK */
10861 }
10862
10863 wa_done = true;
10864 /* Directly reset the internal PHY */
10865 reg = CSR_READ(sc, WMREG_CTRL);
10866 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
10867
10868 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10869 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
10870 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10871
10872 CSR_WRITE(sc, WMREG_WUC, 0);
10873 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
10874 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10875
10876 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
10877 pmreg + PCI_PMCSR);
10878 pcireg |= PCI_PMCSR_STATE_D3;
10879 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10880 pmreg + PCI_PMCSR, pcireg);
10881 delay(1000);
10882 pcireg &= ~PCI_PMCSR_STATE_D3;
10883 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10884 pmreg + PCI_PMCSR, pcireg);
10885
10886 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
10887 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10888
10889 /* Restore WUC register */
10890 CSR_WRITE(sc, WMREG_WUC, wuc);
10891 }
10892
10893 /* Restore MDICNFG setting */
10894 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
10895 if (wa_done)
10896 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
10897 }
10898