if_wm.c revision 1.350 1 /* $NetBSD: if_wm.c,v 1.350 2015/09/30 04:28:04 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - Multi queue
78 * - Image Unique ID
79 * - LPLU other than PCH*
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.350 2015/09/30 04:28:04 msaitoh Exp $");
87
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kernel.h>
98 #include <sys/socket.h>
99 #include <sys/ioctl.h>
100 #include <sys/errno.h>
101 #include <sys/device.h>
102 #include <sys/queue.h>
103 #include <sys/syslog.h>
104 #include <sys/interrupt.h>
105
106 #include <sys/rndsource.h>
107
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_media.h>
111 #include <net/if_ether.h>
112
113 #include <net/bpf.h>
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/miidevs.h>
128 #include <dev/mii/mii_bitbang.h>
129 #include <dev/mii/ikphyreg.h>
130 #include <dev/mii/igphyreg.h>
131 #include <dev/mii/igphyvar.h>
132 #include <dev/mii/inbmphyreg.h>
133
134 #include <dev/pci/pcireg.h>
135 #include <dev/pci/pcivar.h>
136 #include <dev/pci/pcidevs.h>
137
138 #include <dev/pci/if_wmreg.h>
139 #include <dev/pci/if_wmvar.h>
140
141 #ifdef WM_DEBUG
142 #define WM_DEBUG_LINK 0x01
143 #define WM_DEBUG_TX 0x02
144 #define WM_DEBUG_RX 0x04
145 #define WM_DEBUG_GMII 0x08
146 #define WM_DEBUG_MANAGE 0x10
147 #define WM_DEBUG_NVM 0x20
148 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
149 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
150
151 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
152 #else
153 #define DPRINTF(x, y) /* nothing */
154 #endif /* WM_DEBUG */
155
156 #ifdef NET_MPSAFE
157 #define WM_MPSAFE 1
158 #endif
159
160 #ifdef __HAVE_PCI_MSI_MSIX
161 #define WM_MSI_MSIX 1 /* Enable by default */
162 #endif
163
164 /*
165 * This device driver divides interrupt to TX, RX and link state.
166 * Each MSI-X vector indexes are below.
167 */
168 #define WM_MSIX_NINTR 3
169 #define WM_MSIX_TXINTR_IDX 0
170 #define WM_MSIX_RXINTR_IDX 1
171 #define WM_MSIX_LINKINTR_IDX 2
172 #define WM_MAX_NINTR WM_MSIX_NINTR
173
174 /*
175 * This device driver set affinity to each interrupts like below (round-robin).
176 * If the number CPUs is less than the number of interrupts, this driver usase
177 * the same CPU for multiple interrupts.
178 */
179 #define WM_MSIX_TXINTR_CPUID 0
180 #define WM_MSIX_RXINTR_CPUID 1
181 #define WM_MSIX_LINKINTR_CPUID 2
182
183 /*
184 * Transmit descriptor list size. Due to errata, we can only have
185 * 256 hardware descriptors in the ring on < 82544, but we use 4096
186 * on >= 82544. We tell the upper layers that they can queue a lot
187 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
188 * of them at a time.
189 *
190 * We allow up to 256 (!) DMA segments per packet. Pathological packet
191 * chains containing many small mbufs have been observed in zero-copy
192 * situations with jumbo frames.
193 */
194 #define WM_NTXSEGS 256
195 #define WM_IFQUEUELEN 256
196 #define WM_TXQUEUELEN_MAX 64
197 #define WM_TXQUEUELEN_MAX_82547 16
198 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
199 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
200 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
201 #define WM_NTXDESC_82542 256
202 #define WM_NTXDESC_82544 4096
203 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
204 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
205 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
206 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
207 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
208
209 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
210
211 /*
212 * Receive descriptor list size. We have one Rx buffer for normal
213 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
214 * packet. We allocate 256 receive descriptors, each with a 2k
215 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
216 */
217 #define WM_NRXDESC 256
218 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
219 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
220 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
221
222 /*
223 * Control structures are DMA'd to the i82542 chip. We allocate them in
224 * a single clump that maps to a single DMA segment to make several things
225 * easier.
226 */
227 struct wm_control_data_82544 {
228 /*
229 * The receive descriptors.
230 */
231 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
232
233 /*
234 * The transmit descriptors. Put these at the end, because
235 * we might use a smaller number of them.
236 */
237 union {
238 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
239 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
240 } wdc_u;
241 };
242
243 struct wm_control_data_82542 {
244 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
245 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
246 };
247
248 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
249 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
250 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
251
252 /*
253 * Software state for transmit jobs.
254 */
255 struct wm_txsoft {
256 struct mbuf *txs_mbuf; /* head of our mbuf chain */
257 bus_dmamap_t txs_dmamap; /* our DMA map */
258 int txs_firstdesc; /* first descriptor in packet */
259 int txs_lastdesc; /* last descriptor in packet */
260 int txs_ndesc; /* # of descriptors used */
261 };
262
263 /*
264 * Software state for receive buffers. Each descriptor gets a
265 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
266 * more than one buffer, we chain them together.
267 */
268 struct wm_rxsoft {
269 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
270 bus_dmamap_t rxs_dmamap; /* our DMA map */
271 };
272
273 #define WM_LINKUP_TIMEOUT 50
274
275 static uint16_t swfwphysem[] = {
276 SWFW_PHY0_SM,
277 SWFW_PHY1_SM,
278 SWFW_PHY2_SM,
279 SWFW_PHY3_SM
280 };
281
282 static const uint32_t wm_82580_rxpbs_table[] = {
283 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
284 };
285
286 /*
287 * Software state per device.
288 */
289 struct wm_softc {
290 device_t sc_dev; /* generic device information */
291 bus_space_tag_t sc_st; /* bus space tag */
292 bus_space_handle_t sc_sh; /* bus space handle */
293 bus_size_t sc_ss; /* bus space size */
294 bus_space_tag_t sc_iot; /* I/O space tag */
295 bus_space_handle_t sc_ioh; /* I/O space handle */
296 bus_size_t sc_ios; /* I/O space size */
297 bus_space_tag_t sc_flasht; /* flash registers space tag */
298 bus_space_handle_t sc_flashh; /* flash registers space handle */
299 bus_size_t sc_flashs; /* flash registers space size */
300 bus_dma_tag_t sc_dmat; /* bus DMA tag */
301
302 struct ethercom sc_ethercom; /* ethernet common data */
303 struct mii_data sc_mii; /* MII/media information */
304
305 pci_chipset_tag_t sc_pc;
306 pcitag_t sc_pcitag;
307 int sc_bus_speed; /* PCI/PCIX bus speed */
308 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
309
310 uint16_t sc_pcidevid; /* PCI device ID */
311 wm_chip_type sc_type; /* MAC type */
312 int sc_rev; /* MAC revision */
313 wm_phy_type sc_phytype; /* PHY type */
314 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
315 #define WM_MEDIATYPE_UNKNOWN 0x00
316 #define WM_MEDIATYPE_FIBER 0x01
317 #define WM_MEDIATYPE_COPPER 0x02
318 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
319 int sc_funcid; /* unit number of the chip (0 to 3) */
320 int sc_flags; /* flags; see below */
321 int sc_if_flags; /* last if_flags */
322 int sc_flowflags; /* 802.3x flow control flags */
323 int sc_align_tweak;
324
325 void *sc_ihs[WM_MAX_NINTR]; /*
326 * interrupt cookie.
327 * legacy and msi use sc_ihs[0].
328 */
329 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
330 int sc_nintrs; /* number of interrupts */
331
332 callout_t sc_tick_ch; /* tick callout */
333 bool sc_stopping;
334
335 int sc_nvm_ver_major;
336 int sc_nvm_ver_minor;
337 int sc_nvm_ver_build;
338 int sc_nvm_addrbits; /* NVM address bits */
339 unsigned int sc_nvm_wordsize; /* NVM word size */
340 int sc_ich8_flash_base;
341 int sc_ich8_flash_bank_size;
342 int sc_nvm_k1_enabled;
343
344 /* Software state for the transmit and receive descriptors. */
345 int sc_txnum; /* must be a power of two */
346 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
347 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
348
349 /* Control data structures. */
350 int sc_ntxdesc; /* must be a power of two */
351 struct wm_control_data_82544 *sc_control_data;
352 bus_dmamap_t sc_cddmamap; /* control data DMA map */
353 bus_dma_segment_t sc_cd_seg; /* control data segment */
354 int sc_cd_rseg; /* real number of control segment */
355 size_t sc_cd_size; /* control data size */
356 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
357 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
358 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
359 #define sc_rxdescs sc_control_data->wcd_rxdescs
360
361 #ifdef WM_EVENT_COUNTERS
362 /* Event counters. */
363 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
364 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
365 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
366 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
367 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
368 struct evcnt sc_ev_rxintr; /* Rx interrupts */
369 struct evcnt sc_ev_linkintr; /* Link interrupts */
370
371 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
372 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
373 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
374 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
375 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
376 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
377 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
378 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
379
380 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
381 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
382
383 struct evcnt sc_ev_tu; /* Tx underrun */
384
385 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
386 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
387 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
388 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
389 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
390 #endif /* WM_EVENT_COUNTERS */
391
392 bus_addr_t sc_tdt_reg; /* offset of TDT register */
393
394 int sc_txfree; /* number of free Tx descriptors */
395 int sc_txnext; /* next ready Tx descriptor */
396
397 int sc_txsfree; /* number of free Tx jobs */
398 int sc_txsnext; /* next free Tx job */
399 int sc_txsdirty; /* dirty Tx jobs */
400
401 /* These 5 variables are used only on the 82547. */
402 int sc_txfifo_size; /* Tx FIFO size */
403 int sc_txfifo_head; /* current head of FIFO */
404 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
405 int sc_txfifo_stall; /* Tx FIFO is stalled */
406 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
407
408 bus_addr_t sc_rdt_reg; /* offset of RDT register */
409
410 int sc_rxptr; /* next ready Rx descriptor/queue ent */
411 int sc_rxdiscard;
412 int sc_rxlen;
413 struct mbuf *sc_rxhead;
414 struct mbuf *sc_rxtail;
415 struct mbuf **sc_rxtailp;
416
417 uint32_t sc_ctrl; /* prototype CTRL register */
418 #if 0
419 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
420 #endif
421 uint32_t sc_icr; /* prototype interrupt bits */
422 uint32_t sc_itr; /* prototype intr throttling reg */
423 uint32_t sc_tctl; /* prototype TCTL register */
424 uint32_t sc_rctl; /* prototype RCTL register */
425 uint32_t sc_txcw; /* prototype TXCW register */
426 uint32_t sc_tipg; /* prototype TIPG register */
427 uint32_t sc_fcrtl; /* prototype FCRTL register */
428 uint32_t sc_pba; /* prototype PBA register */
429
430 int sc_tbi_linkup; /* TBI link status */
431 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
432 int sc_tbi_serdes_ticks; /* tbi ticks */
433
434 int sc_mchash_type; /* multicast filter offset */
435
436 krndsource_t rnd_source; /* random source */
437
438 kmutex_t *sc_tx_lock; /* lock for tx operations */
439 kmutex_t *sc_rx_lock; /* lock for rx operations */
440 };
441
442 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
443 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
444 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
445 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
446 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
447 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
448 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
449 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
450 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
451
452 #ifdef WM_MPSAFE
453 #define CALLOUT_FLAGS CALLOUT_MPSAFE
454 #else
455 #define CALLOUT_FLAGS 0
456 #endif
457
458 #define WM_RXCHAIN_RESET(sc) \
459 do { \
460 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
461 *(sc)->sc_rxtailp = NULL; \
462 (sc)->sc_rxlen = 0; \
463 } while (/*CONSTCOND*/0)
464
465 #define WM_RXCHAIN_LINK(sc, m) \
466 do { \
467 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
468 (sc)->sc_rxtailp = &(m)->m_next; \
469 } while (/*CONSTCOND*/0)
470
471 #ifdef WM_EVENT_COUNTERS
472 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
473 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
474 #else
475 #define WM_EVCNT_INCR(ev) /* nothing */
476 #define WM_EVCNT_ADD(ev, val) /* nothing */
477 #endif
478
479 #define CSR_READ(sc, reg) \
480 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
481 #define CSR_WRITE(sc, reg, val) \
482 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
483 #define CSR_WRITE_FLUSH(sc) \
484 (void) CSR_READ((sc), WMREG_STATUS)
485
486 #define ICH8_FLASH_READ32(sc, reg) \
487 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
488 #define ICH8_FLASH_WRITE32(sc, reg, data) \
489 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
490
491 #define ICH8_FLASH_READ16(sc, reg) \
492 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
493 #define ICH8_FLASH_WRITE16(sc, reg, data) \
494 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
495
496 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
497 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
498
499 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
500 #define WM_CDTXADDR_HI(sc, x) \
501 (sizeof(bus_addr_t) == 8 ? \
502 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
503
504 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
505 #define WM_CDRXADDR_HI(sc, x) \
506 (sizeof(bus_addr_t) == 8 ? \
507 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
508
509 #define WM_CDTXSYNC(sc, x, n, ops) \
510 do { \
511 int __x, __n; \
512 \
513 __x = (x); \
514 __n = (n); \
515 \
516 /* If it will wrap around, sync to the end of the ring. */ \
517 if ((__x + __n) > WM_NTXDESC(sc)) { \
518 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
519 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
520 (WM_NTXDESC(sc) - __x), (ops)); \
521 __n -= (WM_NTXDESC(sc) - __x); \
522 __x = 0; \
523 } \
524 \
525 /* Now sync whatever is left. */ \
526 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
527 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
528 } while (/*CONSTCOND*/0)
529
530 #define WM_CDRXSYNC(sc, x, ops) \
531 do { \
532 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
533 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
534 } while (/*CONSTCOND*/0)
535
536 #define WM_INIT_RXDESC(sc, x) \
537 do { \
538 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
539 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
540 struct mbuf *__m = __rxs->rxs_mbuf; \
541 \
542 /* \
543 * Note: We scoot the packet forward 2 bytes in the buffer \
544 * so that the payload after the Ethernet header is aligned \
545 * to a 4-byte boundary. \
546 * \
547 * XXX BRAINDAMAGE ALERT! \
548 * The stupid chip uses the same size for every buffer, which \
549 * is set in the Receive Control register. We are using the 2K \
550 * size option, but what we REALLY want is (2K - 2)! For this \
551 * reason, we can't "scoot" packets longer than the standard \
552 * Ethernet MTU. On strict-alignment platforms, if the total \
553 * size exceeds (2K - 2) we set align_tweak to 0 and let \
554 * the upper layer copy the headers. \
555 */ \
556 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
557 \
558 wm_set_dma_addr(&__rxd->wrx_addr, \
559 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
560 __rxd->wrx_len = 0; \
561 __rxd->wrx_cksum = 0; \
562 __rxd->wrx_status = 0; \
563 __rxd->wrx_errors = 0; \
564 __rxd->wrx_special = 0; \
565 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
566 \
567 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
568 } while (/*CONSTCOND*/0)
569
570 /*
571 * Register read/write functions.
572 * Other than CSR_{READ|WRITE}().
573 */
574 #if 0
575 static inline uint32_t wm_io_read(struct wm_softc *, int);
576 #endif
577 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
578 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
579 uint32_t, uint32_t);
580 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
581
582 /*
583 * Device driver interface functions and commonly used functions.
584 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
585 */
586 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
587 static int wm_match(device_t, cfdata_t, void *);
588 static void wm_attach(device_t, device_t, void *);
589 static int wm_detach(device_t, int);
590 static bool wm_suspend(device_t, const pmf_qual_t *);
591 static bool wm_resume(device_t, const pmf_qual_t *);
592 static void wm_watchdog(struct ifnet *);
593 static void wm_tick(void *);
594 static int wm_ifflags_cb(struct ethercom *);
595 static int wm_ioctl(struct ifnet *, u_long, void *);
596 /* MAC address related */
597 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
598 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
599 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
600 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
601 static void wm_set_filter(struct wm_softc *);
602 /* Reset and init related */
603 static void wm_set_vlan(struct wm_softc *);
604 static void wm_set_pcie_completion_timeout(struct wm_softc *);
605 static void wm_get_auto_rd_done(struct wm_softc *);
606 static void wm_lan_init_done(struct wm_softc *);
607 static void wm_get_cfg_done(struct wm_softc *);
608 static void wm_initialize_hardware_bits(struct wm_softc *);
609 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
610 static void wm_reset(struct wm_softc *);
611 static int wm_add_rxbuf(struct wm_softc *, int);
612 static void wm_rxdrain(struct wm_softc *);
613 static int wm_init(struct ifnet *);
614 static int wm_init_locked(struct ifnet *);
615 static void wm_stop(struct ifnet *, int);
616 static void wm_stop_locked(struct ifnet *, int);
617 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
618 uint32_t *, uint8_t *);
619 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
620 static void wm_82547_txfifo_stall(void *);
621 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
622 /* Start */
623 static void wm_start(struct ifnet *);
624 static void wm_start_locked(struct ifnet *);
625 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
626 uint32_t *, uint32_t *, bool *);
627 static void wm_nq_start(struct ifnet *);
628 static void wm_nq_start_locked(struct ifnet *);
629 /* Interrupt */
630 static int wm_txeof(struct wm_softc *);
631 static void wm_rxeof(struct wm_softc *);
632 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
633 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
634 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
635 static void wm_linkintr(struct wm_softc *, uint32_t);
636 static int wm_intr_legacy(void *);
637 #ifdef WM_MSI_MSIX
638 static int wm_txintr_msix(void *);
639 static int wm_rxintr_msix(void *);
640 static int wm_linkintr_msix(void *);
641 #endif
642
643 /*
644 * Media related.
645 * GMII, SGMII, TBI, SERDES and SFP.
646 */
647 /* Common */
648 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
649 /* GMII related */
650 static void wm_gmii_reset(struct wm_softc *);
651 static int wm_get_phy_id_82575(struct wm_softc *);
652 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
653 static int wm_gmii_mediachange(struct ifnet *);
654 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
655 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
656 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
657 static int wm_gmii_i82543_readreg(device_t, int, int);
658 static void wm_gmii_i82543_writereg(device_t, int, int, int);
659 static int wm_gmii_i82544_readreg(device_t, int, int);
660 static void wm_gmii_i82544_writereg(device_t, int, int, int);
661 static int wm_gmii_i80003_readreg(device_t, int, int);
662 static void wm_gmii_i80003_writereg(device_t, int, int, int);
663 static int wm_gmii_bm_readreg(device_t, int, int);
664 static void wm_gmii_bm_writereg(device_t, int, int, int);
665 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
666 static int wm_gmii_hv_readreg(device_t, int, int);
667 static void wm_gmii_hv_writereg(device_t, int, int, int);
668 static int wm_gmii_82580_readreg(device_t, int, int);
669 static void wm_gmii_82580_writereg(device_t, int, int, int);
670 static int wm_gmii_gs40g_readreg(device_t, int, int);
671 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
672 static void wm_gmii_statchg(struct ifnet *);
673 static int wm_kmrn_readreg(struct wm_softc *, int);
674 static void wm_kmrn_writereg(struct wm_softc *, int, int);
675 /* SGMII */
676 static bool wm_sgmii_uses_mdio(struct wm_softc *);
677 static int wm_sgmii_readreg(device_t, int, int);
678 static void wm_sgmii_writereg(device_t, int, int, int);
679 /* TBI related */
680 static void wm_tbi_mediainit(struct wm_softc *);
681 static int wm_tbi_mediachange(struct ifnet *);
682 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
683 static int wm_check_for_link(struct wm_softc *);
684 static void wm_tbi_tick(struct wm_softc *);
685 /* SERDES related */
686 static void wm_serdes_power_up_link_82575(struct wm_softc *);
687 static int wm_serdes_mediachange(struct ifnet *);
688 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
689 static void wm_serdes_tick(struct wm_softc *);
690 /* SFP related */
691 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
692 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
693
694 /*
695 * NVM related.
696 * Microwire, SPI (w/wo EERD) and Flash.
697 */
698 /* Misc functions */
699 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
700 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
701 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
702 /* Microwire */
703 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
704 /* SPI */
705 static int wm_nvm_ready_spi(struct wm_softc *);
706 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
707 /* Using with EERD */
708 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
709 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
710 /* Flash */
711 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
712 unsigned int *);
713 static int32_t wm_ich8_cycle_init(struct wm_softc *);
714 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
715 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
716 uint16_t *);
717 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
718 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
719 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
720 /* iNVM */
721 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
722 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
723 /* Lock, detecting NVM type, validate checksum and read */
724 static int wm_nvm_acquire(struct wm_softc *);
725 static void wm_nvm_release(struct wm_softc *);
726 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
727 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
728 static int wm_nvm_validate_checksum(struct wm_softc *);
729 static void wm_nvm_version_invm(struct wm_softc *);
730 static void wm_nvm_version(struct wm_softc *);
731 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
732
733 /*
734 * Hardware semaphores.
735 * Very complexed...
736 */
737 static int wm_get_swsm_semaphore(struct wm_softc *);
738 static void wm_put_swsm_semaphore(struct wm_softc *);
739 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
740 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
741 static int wm_get_swfwhw_semaphore(struct wm_softc *);
742 static void wm_put_swfwhw_semaphore(struct wm_softc *);
743 static int wm_get_hw_semaphore_82573(struct wm_softc *);
744 static void wm_put_hw_semaphore_82573(struct wm_softc *);
745
746 /*
747 * Management mode and power management related subroutines.
748 * BMC, AMT, suspend/resume and EEE.
749 */
750 static int wm_check_mng_mode(struct wm_softc *);
751 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
752 static int wm_check_mng_mode_82574(struct wm_softc *);
753 static int wm_check_mng_mode_generic(struct wm_softc *);
754 static int wm_enable_mng_pass_thru(struct wm_softc *);
755 static int wm_check_reset_block(struct wm_softc *);
756 static void wm_get_hw_control(struct wm_softc *);
757 static void wm_release_hw_control(struct wm_softc *);
758 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
759 static void wm_smbustopci(struct wm_softc *);
760 static void wm_init_manageability(struct wm_softc *);
761 static void wm_release_manageability(struct wm_softc *);
762 static void wm_get_wakeup(struct wm_softc *);
763 #ifdef WM_WOL
764 static void wm_enable_phy_wakeup(struct wm_softc *);
765 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
766 static void wm_enable_wakeup(struct wm_softc *);
767 #endif
768 /* EEE */
769 static void wm_set_eee_i350(struct wm_softc *);
770
771 /*
772 * Workarounds (mainly PHY related).
773 * Basically, PHY's workarounds are in the PHY drivers.
774 */
775 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
776 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
777 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
778 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
779 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
780 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
781 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
782 static void wm_reset_init_script_82575(struct wm_softc *);
783 static void wm_reset_mdicnfg_82580(struct wm_softc *);
784 static void wm_pll_workaround_i210(struct wm_softc *);
785
786 #ifdef WM_MSI_MSIX
787 struct _msix_matrix {
788 const char *intrname;
789 int(*func)(void *);
790 int intridx;
791 int cpuid;
792 } msix_matrix[WM_MSIX_NINTR] = {
793 { "TX", wm_txintr_msix, WM_MSIX_TXINTR_IDX, WM_MSIX_TXINTR_CPUID },
794 { "RX", wm_rxintr_msix, WM_MSIX_RXINTR_IDX, WM_MSIX_RXINTR_CPUID },
795 { "LINK", wm_linkintr_msix, WM_MSIX_LINKINTR_IDX,
796 WM_MSIX_LINKINTR_CPUID },
797 };
798 #endif
799
800 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
801 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
802
803 /*
804 * Devices supported by this driver.
805 */
806 static const struct wm_product {
807 pci_vendor_id_t wmp_vendor;
808 pci_product_id_t wmp_product;
809 const char *wmp_name;
810 wm_chip_type wmp_type;
811 uint32_t wmp_flags;
812 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
813 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
814 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
815 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
816 #define WMP_MEDIATYPE(x) ((x) & 0x03)
817 } wm_products[] = {
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
819 "Intel i82542 1000BASE-X Ethernet",
820 WM_T_82542_2_1, WMP_F_FIBER },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
823 "Intel i82543GC 1000BASE-X Ethernet",
824 WM_T_82543, WMP_F_FIBER },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
827 "Intel i82543GC 1000BASE-T Ethernet",
828 WM_T_82543, WMP_F_COPPER },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
831 "Intel i82544EI 1000BASE-T Ethernet",
832 WM_T_82544, WMP_F_COPPER },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
835 "Intel i82544EI 1000BASE-X Ethernet",
836 WM_T_82544, WMP_F_FIBER },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
839 "Intel i82544GC 1000BASE-T Ethernet",
840 WM_T_82544, WMP_F_COPPER },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
843 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
844 WM_T_82544, WMP_F_COPPER },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
847 "Intel i82540EM 1000BASE-T Ethernet",
848 WM_T_82540, WMP_F_COPPER },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
851 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
852 WM_T_82540, WMP_F_COPPER },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
855 "Intel i82540EP 1000BASE-T Ethernet",
856 WM_T_82540, WMP_F_COPPER },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
859 "Intel i82540EP 1000BASE-T Ethernet",
860 WM_T_82540, WMP_F_COPPER },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
863 "Intel i82540EP 1000BASE-T Ethernet",
864 WM_T_82540, WMP_F_COPPER },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
867 "Intel i82545EM 1000BASE-T Ethernet",
868 WM_T_82545, WMP_F_COPPER },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
871 "Intel i82545GM 1000BASE-T Ethernet",
872 WM_T_82545_3, WMP_F_COPPER },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
875 "Intel i82545GM 1000BASE-X Ethernet",
876 WM_T_82545_3, WMP_F_FIBER },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
879 "Intel i82545GM Gigabit Ethernet (SERDES)",
880 WM_T_82545_3, WMP_F_SERDES },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
883 "Intel i82546EB 1000BASE-T Ethernet",
884 WM_T_82546, WMP_F_COPPER },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
887 "Intel i82546EB 1000BASE-T Ethernet",
888 WM_T_82546, WMP_F_COPPER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
891 "Intel i82545EM 1000BASE-X Ethernet",
892 WM_T_82545, WMP_F_FIBER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
895 "Intel i82546EB 1000BASE-X Ethernet",
896 WM_T_82546, WMP_F_FIBER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
899 "Intel i82546GB 1000BASE-T Ethernet",
900 WM_T_82546_3, WMP_F_COPPER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
903 "Intel i82546GB 1000BASE-X Ethernet",
904 WM_T_82546_3, WMP_F_FIBER },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
907 "Intel i82546GB Gigabit Ethernet (SERDES)",
908 WM_T_82546_3, WMP_F_SERDES },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
911 "i82546GB quad-port Gigabit Ethernet",
912 WM_T_82546_3, WMP_F_COPPER },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
915 "i82546GB quad-port Gigabit Ethernet (KSP3)",
916 WM_T_82546_3, WMP_F_COPPER },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
919 "Intel PRO/1000MT (82546GB)",
920 WM_T_82546_3, WMP_F_COPPER },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
923 "Intel i82541EI 1000BASE-T Ethernet",
924 WM_T_82541, WMP_F_COPPER },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
927 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
928 WM_T_82541, WMP_F_COPPER },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
931 "Intel i82541EI Mobile 1000BASE-T Ethernet",
932 WM_T_82541, WMP_F_COPPER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
935 "Intel i82541ER 1000BASE-T Ethernet",
936 WM_T_82541_2, WMP_F_COPPER },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
939 "Intel i82541GI 1000BASE-T Ethernet",
940 WM_T_82541_2, WMP_F_COPPER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
943 "Intel i82541GI Mobile 1000BASE-T Ethernet",
944 WM_T_82541_2, WMP_F_COPPER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
947 "Intel i82541PI 1000BASE-T Ethernet",
948 WM_T_82541_2, WMP_F_COPPER },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
951 "Intel i82547EI 1000BASE-T Ethernet",
952 WM_T_82547, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
955 "Intel i82547EI Mobile 1000BASE-T Ethernet",
956 WM_T_82547, WMP_F_COPPER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
959 "Intel i82547GI 1000BASE-T Ethernet",
960 WM_T_82547_2, WMP_F_COPPER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
963 "Intel PRO/1000 PT (82571EB)",
964 WM_T_82571, WMP_F_COPPER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
967 "Intel PRO/1000 PF (82571EB)",
968 WM_T_82571, WMP_F_FIBER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
971 "Intel PRO/1000 PB (82571EB)",
972 WM_T_82571, WMP_F_SERDES },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
975 "Intel PRO/1000 QT (82571EB)",
976 WM_T_82571, WMP_F_COPPER },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
979 "Intel PRO/1000 PT Quad Port Server Adapter",
980 WM_T_82571, WMP_F_COPPER, },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
983 "Intel Gigabit PT Quad Port Server ExpressModule",
984 WM_T_82571, WMP_F_COPPER, },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
987 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
988 WM_T_82571, WMP_F_SERDES, },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
991 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
992 WM_T_82571, WMP_F_SERDES, },
993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
995 "Intel 82571EB Quad 1000baseX Ethernet",
996 WM_T_82571, WMP_F_FIBER, },
997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
999 "Intel i82572EI 1000baseT Ethernet",
1000 WM_T_82572, WMP_F_COPPER },
1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1003 "Intel i82572EI 1000baseX Ethernet",
1004 WM_T_82572, WMP_F_FIBER },
1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1007 "Intel i82572EI Gigabit Ethernet (SERDES)",
1008 WM_T_82572, WMP_F_SERDES },
1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1011 "Intel i82572EI 1000baseT Ethernet",
1012 WM_T_82572, WMP_F_COPPER },
1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1015 "Intel i82573E",
1016 WM_T_82573, WMP_F_COPPER },
1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1019 "Intel i82573E IAMT",
1020 WM_T_82573, WMP_F_COPPER },
1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1023 "Intel i82573L Gigabit Ethernet",
1024 WM_T_82573, WMP_F_COPPER },
1025
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1027 "Intel i82574L",
1028 WM_T_82574, WMP_F_COPPER },
1029
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1031 "Intel i82574L",
1032 WM_T_82574, WMP_F_COPPER },
1033
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1035 "Intel i82583V",
1036 WM_T_82583, WMP_F_COPPER },
1037
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1039 "i80003 dual 1000baseT Ethernet",
1040 WM_T_80003, WMP_F_COPPER },
1041
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1043 "i80003 dual 1000baseX Ethernet",
1044 WM_T_80003, WMP_F_COPPER },
1045
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1047 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1048 WM_T_80003, WMP_F_SERDES },
1049
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1051 "Intel i80003 1000baseT Ethernet",
1052 WM_T_80003, WMP_F_COPPER },
1053
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1055 "Intel i80003 Gigabit Ethernet (SERDES)",
1056 WM_T_80003, WMP_F_SERDES },
1057
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1059 "Intel i82801H (M_AMT) LAN Controller",
1060 WM_T_ICH8, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1062 "Intel i82801H (AMT) LAN Controller",
1063 WM_T_ICH8, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1065 "Intel i82801H LAN Controller",
1066 WM_T_ICH8, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1068 "Intel i82801H (IFE) LAN Controller",
1069 WM_T_ICH8, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1071 "Intel i82801H (M) LAN Controller",
1072 WM_T_ICH8, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1074 "Intel i82801H IFE (GT) LAN Controller",
1075 WM_T_ICH8, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1077 "Intel i82801H IFE (G) LAN Controller",
1078 WM_T_ICH8, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1080 "82801I (AMT) LAN Controller",
1081 WM_T_ICH9, WMP_F_COPPER },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1083 "82801I LAN Controller",
1084 WM_T_ICH9, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1086 "82801I (G) LAN Controller",
1087 WM_T_ICH9, WMP_F_COPPER },
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1089 "82801I (GT) LAN Controller",
1090 WM_T_ICH9, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1092 "82801I (C) LAN Controller",
1093 WM_T_ICH9, WMP_F_COPPER },
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1095 "82801I mobile LAN Controller",
1096 WM_T_ICH9, WMP_F_COPPER },
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1098 "82801I mobile (V) LAN Controller",
1099 WM_T_ICH9, WMP_F_COPPER },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1101 "82801I mobile (AMT) LAN Controller",
1102 WM_T_ICH9, WMP_F_COPPER },
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1104 "82567LM-4 LAN Controller",
1105 WM_T_ICH9, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1107 "82567V-3 LAN Controller",
1108 WM_T_ICH9, WMP_F_COPPER },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1110 "82567LM-2 LAN Controller",
1111 WM_T_ICH10, WMP_F_COPPER },
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1113 "82567LF-2 LAN Controller",
1114 WM_T_ICH10, WMP_F_COPPER },
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1116 "82567LM-3 LAN Controller",
1117 WM_T_ICH10, WMP_F_COPPER },
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1119 "82567LF-3 LAN Controller",
1120 WM_T_ICH10, WMP_F_COPPER },
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1122 "82567V-2 LAN Controller",
1123 WM_T_ICH10, WMP_F_COPPER },
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1125 "82567V-3? LAN Controller",
1126 WM_T_ICH10, WMP_F_COPPER },
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1128 "HANKSVILLE LAN Controller",
1129 WM_T_ICH10, WMP_F_COPPER },
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1131 "PCH LAN (82577LM) Controller",
1132 WM_T_PCH, WMP_F_COPPER },
1133 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1134 "PCH LAN (82577LC) Controller",
1135 WM_T_PCH, WMP_F_COPPER },
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1137 "PCH LAN (82578DM) Controller",
1138 WM_T_PCH, WMP_F_COPPER },
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1140 "PCH LAN (82578DC) Controller",
1141 WM_T_PCH, WMP_F_COPPER },
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1143 "PCH2 LAN (82579LM) Controller",
1144 WM_T_PCH2, WMP_F_COPPER },
1145 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1146 "PCH2 LAN (82579V) Controller",
1147 WM_T_PCH2, WMP_F_COPPER },
1148 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1149 "82575EB dual-1000baseT Ethernet",
1150 WM_T_82575, WMP_F_COPPER },
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1152 "82575EB dual-1000baseX Ethernet (SERDES)",
1153 WM_T_82575, WMP_F_SERDES },
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1155 "82575GB quad-1000baseT Ethernet",
1156 WM_T_82575, WMP_F_COPPER },
1157 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1158 "82575GB quad-1000baseT Ethernet (PM)",
1159 WM_T_82575, WMP_F_COPPER },
1160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1161 "82576 1000BaseT Ethernet",
1162 WM_T_82576, WMP_F_COPPER },
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1164 "82576 1000BaseX Ethernet",
1165 WM_T_82576, WMP_F_FIBER },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1168 "82576 gigabit Ethernet (SERDES)",
1169 WM_T_82576, WMP_F_SERDES },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1172 "82576 quad-1000BaseT Ethernet",
1173 WM_T_82576, WMP_F_COPPER },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1176 "82576 Gigabit ET2 Quad Port Server Adapter",
1177 WM_T_82576, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1180 "82576 gigabit Ethernet",
1181 WM_T_82576, WMP_F_COPPER },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1184 "82576 gigabit Ethernet (SERDES)",
1185 WM_T_82576, WMP_F_SERDES },
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1187 "82576 quad-gigabit Ethernet (SERDES)",
1188 WM_T_82576, WMP_F_SERDES },
1189
1190 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1191 "82580 1000BaseT Ethernet",
1192 WM_T_82580, WMP_F_COPPER },
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1194 "82580 1000BaseX Ethernet",
1195 WM_T_82580, WMP_F_FIBER },
1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1198 "82580 1000BaseT Ethernet (SERDES)",
1199 WM_T_82580, WMP_F_SERDES },
1200
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1202 "82580 gigabit Ethernet (SGMII)",
1203 WM_T_82580, WMP_F_COPPER },
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1205 "82580 dual-1000BaseT Ethernet",
1206 WM_T_82580, WMP_F_COPPER },
1207
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1209 "82580 quad-1000BaseX Ethernet",
1210 WM_T_82580, WMP_F_FIBER },
1211
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1213 "DH89XXCC Gigabit Ethernet (SGMII)",
1214 WM_T_82580, WMP_F_COPPER },
1215
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1217 "DH89XXCC Gigabit Ethernet (SERDES)",
1218 WM_T_82580, WMP_F_SERDES },
1219
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1221 "DH89XXCC 1000BASE-KX Ethernet",
1222 WM_T_82580, WMP_F_SERDES },
1223
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1225 "DH89XXCC Gigabit Ethernet (SFP)",
1226 WM_T_82580, WMP_F_SERDES },
1227
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1229 "I350 Gigabit Network Connection",
1230 WM_T_I350, WMP_F_COPPER },
1231
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1233 "I350 Gigabit Fiber Network Connection",
1234 WM_T_I350, WMP_F_FIBER },
1235
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1237 "I350 Gigabit Backplane Connection",
1238 WM_T_I350, WMP_F_SERDES },
1239
1240 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1241 "I350 Quad Port Gigabit Ethernet",
1242 WM_T_I350, WMP_F_SERDES },
1243
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1245 "I350 Gigabit Connection",
1246 WM_T_I350, WMP_F_COPPER },
1247
1248 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1249 "I354 Gigabit Ethernet (KX)",
1250 WM_T_I354, WMP_F_SERDES },
1251
1252 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1253 "I354 Gigabit Ethernet (SGMII)",
1254 WM_T_I354, WMP_F_COPPER },
1255
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1257 "I354 Gigabit Ethernet (2.5G)",
1258 WM_T_I354, WMP_F_COPPER },
1259
1260 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1261 "I210-T1 Ethernet Server Adapter",
1262 WM_T_I210, WMP_F_COPPER },
1263
1264 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1265 "I210 Ethernet (Copper OEM)",
1266 WM_T_I210, WMP_F_COPPER },
1267
1268 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1269 "I210 Ethernet (Copper IT)",
1270 WM_T_I210, WMP_F_COPPER },
1271
1272 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1273 "I210 Ethernet (FLASH less)",
1274 WM_T_I210, WMP_F_COPPER },
1275
1276 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1277 "I210 Gigabit Ethernet (Fiber)",
1278 WM_T_I210, WMP_F_FIBER },
1279
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1281 "I210 Gigabit Ethernet (SERDES)",
1282 WM_T_I210, WMP_F_SERDES },
1283
1284 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1285 "I210 Gigabit Ethernet (FLASH less)",
1286 WM_T_I210, WMP_F_SERDES },
1287
1288 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1289 "I210 Gigabit Ethernet (SGMII)",
1290 WM_T_I210, WMP_F_COPPER },
1291
1292 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1293 "I211 Ethernet (COPPER)",
1294 WM_T_I211, WMP_F_COPPER },
1295 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1296 "I217 V Ethernet Connection",
1297 WM_T_PCH_LPT, WMP_F_COPPER },
1298 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1299 "I217 LM Ethernet Connection",
1300 WM_T_PCH_LPT, WMP_F_COPPER },
1301 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1302 "I218 V Ethernet Connection",
1303 WM_T_PCH_LPT, WMP_F_COPPER },
1304 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1305 "I218 V Ethernet Connection",
1306 WM_T_PCH_LPT, WMP_F_COPPER },
1307 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1308 "I218 V Ethernet Connection",
1309 WM_T_PCH_LPT, WMP_F_COPPER },
1310 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1311 "I218 LM Ethernet Connection",
1312 WM_T_PCH_LPT, WMP_F_COPPER },
1313 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1314 "I218 LM Ethernet Connection",
1315 WM_T_PCH_LPT, WMP_F_COPPER },
1316 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1317 "I218 LM Ethernet Connection",
1318 WM_T_PCH_LPT, WMP_F_COPPER },
1319 { 0, 0,
1320 NULL,
1321 0, 0 },
1322 };
1323
1324 #ifdef WM_EVENT_COUNTERS
1325 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1326 #endif /* WM_EVENT_COUNTERS */
1327
1328
1329 /*
1330 * Register read/write functions.
1331 * Other than CSR_{READ|WRITE}().
1332 */
1333
1334 #if 0 /* Not currently used */
1335 static inline uint32_t
1336 wm_io_read(struct wm_softc *sc, int reg)
1337 {
1338
1339 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1340 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1341 }
1342 #endif
1343
1344 static inline void
1345 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1346 {
1347
1348 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1349 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1350 }
1351
1352 static inline void
1353 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1354 uint32_t data)
1355 {
1356 uint32_t regval;
1357 int i;
1358
1359 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1360
1361 CSR_WRITE(sc, reg, regval);
1362
1363 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1364 delay(5);
1365 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1366 break;
1367 }
1368 if (i == SCTL_CTL_POLL_TIMEOUT) {
1369 aprint_error("%s: WARNING:"
1370 " i82575 reg 0x%08x setup did not indicate ready\n",
1371 device_xname(sc->sc_dev), reg);
1372 }
1373 }
1374
1375 static inline void
1376 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1377 {
1378 wa->wa_low = htole32(v & 0xffffffffU);
1379 if (sizeof(bus_addr_t) == 8)
1380 wa->wa_high = htole32((uint64_t) v >> 32);
1381 else
1382 wa->wa_high = 0;
1383 }
1384
1385 /*
1386 * Device driver interface functions and commonly used functions.
1387 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1388 */
1389
1390 /* Lookup supported device table */
1391 static const struct wm_product *
1392 wm_lookup(const struct pci_attach_args *pa)
1393 {
1394 const struct wm_product *wmp;
1395
1396 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1397 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1398 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1399 return wmp;
1400 }
1401 return NULL;
1402 }
1403
1404 /* The match function (ca_match) */
1405 static int
1406 wm_match(device_t parent, cfdata_t cf, void *aux)
1407 {
1408 struct pci_attach_args *pa = aux;
1409
1410 if (wm_lookup(pa) != NULL)
1411 return 1;
1412
1413 return 0;
1414 }
1415
1416 /* The attach function (ca_attach) */
1417 static void
1418 wm_attach(device_t parent, device_t self, void *aux)
1419 {
1420 struct wm_softc *sc = device_private(self);
1421 struct pci_attach_args *pa = aux;
1422 prop_dictionary_t dict;
1423 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1424 pci_chipset_tag_t pc = pa->pa_pc;
1425 #ifndef WM_MSI_MSIX
1426 pci_intr_handle_t ih;
1427 #else
1428 int counts[PCI_INTR_TYPE_SIZE];
1429 pci_intr_type_t max_type;
1430 #endif
1431 const char *intrstr = NULL;
1432 const char *eetype, *xname;
1433 bus_space_tag_t memt;
1434 bus_space_handle_t memh;
1435 bus_size_t memsize;
1436 int memh_valid;
1437 int i, error;
1438 const struct wm_product *wmp;
1439 prop_data_t ea;
1440 prop_number_t pn;
1441 uint8_t enaddr[ETHER_ADDR_LEN];
1442 uint16_t cfg1, cfg2, swdpin, nvmword;
1443 pcireg_t preg, memtype;
1444 uint16_t eeprom_data, apme_mask;
1445 bool force_clear_smbi;
1446 uint32_t link_mode;
1447 uint32_t reg;
1448 char intrbuf[PCI_INTRSTR_LEN];
1449
1450 sc->sc_dev = self;
1451 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1452 sc->sc_stopping = false;
1453
1454 wmp = wm_lookup(pa);
1455 #ifdef DIAGNOSTIC
1456 if (wmp == NULL) {
1457 printf("\n");
1458 panic("wm_attach: impossible");
1459 }
1460 #endif
1461 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1462
1463 sc->sc_pc = pa->pa_pc;
1464 sc->sc_pcitag = pa->pa_tag;
1465
1466 if (pci_dma64_available(pa))
1467 sc->sc_dmat = pa->pa_dmat64;
1468 else
1469 sc->sc_dmat = pa->pa_dmat;
1470
1471 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1472 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1473 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1474
1475 sc->sc_type = wmp->wmp_type;
1476 if (sc->sc_type < WM_T_82543) {
1477 if (sc->sc_rev < 2) {
1478 aprint_error_dev(sc->sc_dev,
1479 "i82542 must be at least rev. 2\n");
1480 return;
1481 }
1482 if (sc->sc_rev < 3)
1483 sc->sc_type = WM_T_82542_2_0;
1484 }
1485
1486 /*
1487 * Disable MSI for Errata:
1488 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1489 *
1490 * 82544: Errata 25
1491 * 82540: Errata 6 (easy to reproduce device timeout)
1492 * 82545: Errata 4 (easy to reproduce device timeout)
1493 * 82546: Errata 26 (easy to reproduce device timeout)
1494 * 82541: Errata 7 (easy to reproduce device timeout)
1495 *
1496 * "Byte Enables 2 and 3 are not set on MSI writes"
1497 *
1498 * 82571 & 82572: Errata 63
1499 */
1500 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1501 || (sc->sc_type == WM_T_82572))
1502 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1503
1504 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1505 || (sc->sc_type == WM_T_82580)
1506 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1507 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1508 sc->sc_flags |= WM_F_NEWQUEUE;
1509
1510 /* Set device properties (mactype) */
1511 dict = device_properties(sc->sc_dev);
1512 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1513
1514 /*
1515 * Map the device. All devices support memory-mapped acccess,
1516 * and it is really required for normal operation.
1517 */
1518 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1519 switch (memtype) {
1520 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1521 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1522 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1523 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1524 break;
1525 default:
1526 memh_valid = 0;
1527 break;
1528 }
1529
1530 if (memh_valid) {
1531 sc->sc_st = memt;
1532 sc->sc_sh = memh;
1533 sc->sc_ss = memsize;
1534 } else {
1535 aprint_error_dev(sc->sc_dev,
1536 "unable to map device registers\n");
1537 return;
1538 }
1539
1540 /*
1541 * In addition, i82544 and later support I/O mapped indirect
1542 * register access. It is not desirable (nor supported in
1543 * this driver) to use it for normal operation, though it is
1544 * required to work around bugs in some chip versions.
1545 */
1546 if (sc->sc_type >= WM_T_82544) {
1547 /* First we have to find the I/O BAR. */
1548 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1549 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1550 if (memtype == PCI_MAPREG_TYPE_IO)
1551 break;
1552 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1553 PCI_MAPREG_MEM_TYPE_64BIT)
1554 i += 4; /* skip high bits, too */
1555 }
1556 if (i < PCI_MAPREG_END) {
1557 /*
1558 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1559 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1560 * It's no problem because newer chips has no this
1561 * bug.
1562 *
1563 * The i8254x doesn't apparently respond when the
1564 * I/O BAR is 0, which looks somewhat like it's not
1565 * been configured.
1566 */
1567 preg = pci_conf_read(pc, pa->pa_tag, i);
1568 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1569 aprint_error_dev(sc->sc_dev,
1570 "WARNING: I/O BAR at zero.\n");
1571 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1572 0, &sc->sc_iot, &sc->sc_ioh,
1573 NULL, &sc->sc_ios) == 0) {
1574 sc->sc_flags |= WM_F_IOH_VALID;
1575 } else {
1576 aprint_error_dev(sc->sc_dev,
1577 "WARNING: unable to map I/O space\n");
1578 }
1579 }
1580
1581 }
1582
1583 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1584 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1585 preg |= PCI_COMMAND_MASTER_ENABLE;
1586 if (sc->sc_type < WM_T_82542_2_1)
1587 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1588 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1589
1590 /* power up chip */
1591 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1592 NULL)) && error != EOPNOTSUPP) {
1593 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1594 return;
1595 }
1596
1597 #ifndef WM_MSI_MSIX
1598 /*
1599 * Map and establish our interrupt.
1600 */
1601 if (pci_intr_map(pa, &ih)) {
1602 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1603 return;
1604 }
1605 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1606 #ifdef WM_MPSAFE
1607 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1608 #endif
1609 sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
1610 wm_intr_legacy, sc, device_xname(sc->sc_dev));
1611 if (sc->sc_ihs[0] == NULL) {
1612 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1613 if (intrstr != NULL)
1614 aprint_error(" at %s", intrstr);
1615 aprint_error("\n");
1616 return;
1617 }
1618 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1619 sc->sc_nintrs = 1;
1620 #else /* WM_MSI_MSIX */
1621 /* Allocation settings */
1622 max_type = PCI_INTR_TYPE_MSIX;
1623 counts[PCI_INTR_TYPE_MSIX] = WM_MAX_NINTR;
1624 counts[PCI_INTR_TYPE_MSI] = 1;
1625 counts[PCI_INTR_TYPE_INTX] = 1;
1626
1627 alloc_retry:
1628 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1629 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1630 return;
1631 }
1632
1633 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1634 void *vih;
1635 kcpuset_t *affinity;
1636 char intr_xname[INTRDEVNAMEBUF];
1637
1638 kcpuset_create(&affinity, false);
1639
1640 for (i = 0; i < WM_MSIX_NINTR; i++) {
1641 intrstr = pci_intr_string(pc,
1642 sc->sc_intrs[msix_matrix[i].intridx], intrbuf,
1643 sizeof(intrbuf));
1644 #ifdef WM_MPSAFE
1645 pci_intr_setattr(pc,
1646 &sc->sc_intrs[msix_matrix[i].intridx],
1647 PCI_INTR_MPSAFE, true);
1648 #endif
1649 memset(intr_xname, 0, sizeof(intr_xname));
1650 strlcat(intr_xname, device_xname(sc->sc_dev),
1651 sizeof(intr_xname));
1652 strlcat(intr_xname, msix_matrix[i].intrname,
1653 sizeof(intr_xname));
1654 vih = pci_intr_establish_xname(pc,
1655 sc->sc_intrs[msix_matrix[i].intridx], IPL_NET,
1656 msix_matrix[i].func, sc, intr_xname);
1657 if (vih == NULL) {
1658 aprint_error_dev(sc->sc_dev,
1659 "unable to establish MSI-X(for %s)%s%s\n",
1660 msix_matrix[i].intrname,
1661 intrstr ? " at " : "",
1662 intrstr ? intrstr : "");
1663 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1664 WM_MSIX_NINTR);
1665 kcpuset_destroy(affinity);
1666
1667 /* Setup for MSI: Disable MSI-X */
1668 max_type = PCI_INTR_TYPE_MSI;
1669 counts[PCI_INTR_TYPE_MSI] = 1;
1670 counts[PCI_INTR_TYPE_INTX] = 1;
1671 goto alloc_retry;
1672 }
1673 kcpuset_zero(affinity);
1674 /* Round-robin affinity */
1675 kcpuset_set(affinity, msix_matrix[i].cpuid % ncpu);
1676 error = interrupt_distribute(vih, affinity, NULL);
1677 if (error == 0) {
1678 aprint_normal_dev(sc->sc_dev,
1679 "for %s interrupting at %s affinity to %u\n",
1680 msix_matrix[i].intrname, intrstr,
1681 msix_matrix[i].cpuid % ncpu);
1682 } else {
1683 aprint_normal_dev(sc->sc_dev,
1684 "for %s interrupting at %s\n",
1685 msix_matrix[i].intrname, intrstr);
1686 }
1687 sc->sc_ihs[msix_matrix[i].intridx] = vih;
1688 }
1689
1690 sc->sc_nintrs = WM_MSIX_NINTR;
1691 kcpuset_destroy(affinity);
1692 } else {
1693 /* MSI or INTx */
1694 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
1695 sizeof(intrbuf));
1696 #ifdef WM_MPSAFE
1697 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
1698 #endif
1699 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
1700 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
1701 if (sc->sc_ihs[0] == NULL) {
1702 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
1703 (pci_intr_type(sc->sc_intrs[0])
1704 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
1705 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1);
1706 switch (pci_intr_type(sc->sc_intrs[0])) {
1707 case PCI_INTR_TYPE_MSI:
1708 /* The next try is for INTx: Disable MSI */
1709 max_type = PCI_INTR_TYPE_INTX;
1710 counts[PCI_INTR_TYPE_INTX] = 1;
1711 goto alloc_retry;
1712 case PCI_INTR_TYPE_INTX:
1713 default:
1714 return;
1715 }
1716 }
1717 aprint_normal_dev(sc->sc_dev, "%s at %s\n",
1718 (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI)
1719 ? "MSI" : "interrupting", intrstr);
1720
1721 sc->sc_nintrs = 1;
1722 }
1723 #endif /* WM_MSI_MSIX */
1724
1725 /*
1726 * Check the function ID (unit number of the chip).
1727 */
1728 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1729 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1730 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1731 || (sc->sc_type == WM_T_82580)
1732 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1733 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1734 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1735 else
1736 sc->sc_funcid = 0;
1737
1738 /*
1739 * Determine a few things about the bus we're connected to.
1740 */
1741 if (sc->sc_type < WM_T_82543) {
1742 /* We don't really know the bus characteristics here. */
1743 sc->sc_bus_speed = 33;
1744 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1745 /*
1746 * CSA (Communication Streaming Architecture) is about as fast
1747 * a 32-bit 66MHz PCI Bus.
1748 */
1749 sc->sc_flags |= WM_F_CSA;
1750 sc->sc_bus_speed = 66;
1751 aprint_verbose_dev(sc->sc_dev,
1752 "Communication Streaming Architecture\n");
1753 if (sc->sc_type == WM_T_82547) {
1754 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1755 callout_setfunc(&sc->sc_txfifo_ch,
1756 wm_82547_txfifo_stall, sc);
1757 aprint_verbose_dev(sc->sc_dev,
1758 "using 82547 Tx FIFO stall work-around\n");
1759 }
1760 } else if (sc->sc_type >= WM_T_82571) {
1761 sc->sc_flags |= WM_F_PCIE;
1762 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1763 && (sc->sc_type != WM_T_ICH10)
1764 && (sc->sc_type != WM_T_PCH)
1765 && (sc->sc_type != WM_T_PCH2)
1766 && (sc->sc_type != WM_T_PCH_LPT)) {
1767 /* ICH* and PCH* have no PCIe capability registers */
1768 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1769 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1770 NULL) == 0)
1771 aprint_error_dev(sc->sc_dev,
1772 "unable to find PCIe capability\n");
1773 }
1774 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1775 } else {
1776 reg = CSR_READ(sc, WMREG_STATUS);
1777 if (reg & STATUS_BUS64)
1778 sc->sc_flags |= WM_F_BUS64;
1779 if ((reg & STATUS_PCIX_MODE) != 0) {
1780 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1781
1782 sc->sc_flags |= WM_F_PCIX;
1783 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1784 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1785 aprint_error_dev(sc->sc_dev,
1786 "unable to find PCIX capability\n");
1787 else if (sc->sc_type != WM_T_82545_3 &&
1788 sc->sc_type != WM_T_82546_3) {
1789 /*
1790 * Work around a problem caused by the BIOS
1791 * setting the max memory read byte count
1792 * incorrectly.
1793 */
1794 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1795 sc->sc_pcixe_capoff + PCIX_CMD);
1796 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1797 sc->sc_pcixe_capoff + PCIX_STATUS);
1798
1799 bytecnt =
1800 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1801 PCIX_CMD_BYTECNT_SHIFT;
1802 maxb =
1803 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1804 PCIX_STATUS_MAXB_SHIFT;
1805 if (bytecnt > maxb) {
1806 aprint_verbose_dev(sc->sc_dev,
1807 "resetting PCI-X MMRBC: %d -> %d\n",
1808 512 << bytecnt, 512 << maxb);
1809 pcix_cmd = (pcix_cmd &
1810 ~PCIX_CMD_BYTECNT_MASK) |
1811 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1812 pci_conf_write(pa->pa_pc, pa->pa_tag,
1813 sc->sc_pcixe_capoff + PCIX_CMD,
1814 pcix_cmd);
1815 }
1816 }
1817 }
1818 /*
1819 * The quad port adapter is special; it has a PCIX-PCIX
1820 * bridge on the board, and can run the secondary bus at
1821 * a higher speed.
1822 */
1823 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1824 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1825 : 66;
1826 } else if (sc->sc_flags & WM_F_PCIX) {
1827 switch (reg & STATUS_PCIXSPD_MASK) {
1828 case STATUS_PCIXSPD_50_66:
1829 sc->sc_bus_speed = 66;
1830 break;
1831 case STATUS_PCIXSPD_66_100:
1832 sc->sc_bus_speed = 100;
1833 break;
1834 case STATUS_PCIXSPD_100_133:
1835 sc->sc_bus_speed = 133;
1836 break;
1837 default:
1838 aprint_error_dev(sc->sc_dev,
1839 "unknown PCIXSPD %d; assuming 66MHz\n",
1840 reg & STATUS_PCIXSPD_MASK);
1841 sc->sc_bus_speed = 66;
1842 break;
1843 }
1844 } else
1845 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1846 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1847 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1848 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1849 }
1850
1851 /*
1852 * Allocate the control data structures, and create and load the
1853 * DMA map for it.
1854 *
1855 * NOTE: All Tx descriptors must be in the same 4G segment of
1856 * memory. So must Rx descriptors. We simplify by allocating
1857 * both sets within the same 4G segment.
1858 */
1859 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1860 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1861 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1862 sizeof(struct wm_control_data_82542) :
1863 sizeof(struct wm_control_data_82544);
1864 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1865 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1866 &sc->sc_cd_rseg, 0)) != 0) {
1867 aprint_error_dev(sc->sc_dev,
1868 "unable to allocate control data, error = %d\n",
1869 error);
1870 goto fail_0;
1871 }
1872
1873 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1874 sc->sc_cd_rseg, sc->sc_cd_size,
1875 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1876 aprint_error_dev(sc->sc_dev,
1877 "unable to map control data, error = %d\n", error);
1878 goto fail_1;
1879 }
1880
1881 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1882 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1883 aprint_error_dev(sc->sc_dev,
1884 "unable to create control data DMA map, error = %d\n",
1885 error);
1886 goto fail_2;
1887 }
1888
1889 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1890 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1891 aprint_error_dev(sc->sc_dev,
1892 "unable to load control data DMA map, error = %d\n",
1893 error);
1894 goto fail_3;
1895 }
1896
1897 /* Create the transmit buffer DMA maps. */
1898 WM_TXQUEUELEN(sc) =
1899 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1900 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1901 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1902 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1903 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1904 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1905 aprint_error_dev(sc->sc_dev,
1906 "unable to create Tx DMA map %d, error = %d\n",
1907 i, error);
1908 goto fail_4;
1909 }
1910 }
1911
1912 /* Create the receive buffer DMA maps. */
1913 for (i = 0; i < WM_NRXDESC; i++) {
1914 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1915 MCLBYTES, 0, 0,
1916 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1917 aprint_error_dev(sc->sc_dev,
1918 "unable to create Rx DMA map %d error = %d\n",
1919 i, error);
1920 goto fail_5;
1921 }
1922 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1923 }
1924
1925 /* clear interesting stat counters */
1926 CSR_READ(sc, WMREG_COLC);
1927 CSR_READ(sc, WMREG_RXERRC);
1928
1929 /* get PHY control from SMBus to PCIe */
1930 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1931 || (sc->sc_type == WM_T_PCH_LPT))
1932 wm_smbustopci(sc);
1933
1934 /* Reset the chip to a known state. */
1935 wm_reset(sc);
1936
1937 /* Get some information about the EEPROM. */
1938 switch (sc->sc_type) {
1939 case WM_T_82542_2_0:
1940 case WM_T_82542_2_1:
1941 case WM_T_82543:
1942 case WM_T_82544:
1943 /* Microwire */
1944 sc->sc_nvm_wordsize = 64;
1945 sc->sc_nvm_addrbits = 6;
1946 break;
1947 case WM_T_82540:
1948 case WM_T_82545:
1949 case WM_T_82545_3:
1950 case WM_T_82546:
1951 case WM_T_82546_3:
1952 /* Microwire */
1953 reg = CSR_READ(sc, WMREG_EECD);
1954 if (reg & EECD_EE_SIZE) {
1955 sc->sc_nvm_wordsize = 256;
1956 sc->sc_nvm_addrbits = 8;
1957 } else {
1958 sc->sc_nvm_wordsize = 64;
1959 sc->sc_nvm_addrbits = 6;
1960 }
1961 sc->sc_flags |= WM_F_LOCK_EECD;
1962 break;
1963 case WM_T_82541:
1964 case WM_T_82541_2:
1965 case WM_T_82547:
1966 case WM_T_82547_2:
1967 sc->sc_flags |= WM_F_LOCK_EECD;
1968 reg = CSR_READ(sc, WMREG_EECD);
1969 if (reg & EECD_EE_TYPE) {
1970 /* SPI */
1971 sc->sc_flags |= WM_F_EEPROM_SPI;
1972 wm_nvm_set_addrbits_size_eecd(sc);
1973 } else {
1974 /* Microwire */
1975 if ((reg & EECD_EE_ABITS) != 0) {
1976 sc->sc_nvm_wordsize = 256;
1977 sc->sc_nvm_addrbits = 8;
1978 } else {
1979 sc->sc_nvm_wordsize = 64;
1980 sc->sc_nvm_addrbits = 6;
1981 }
1982 }
1983 break;
1984 case WM_T_82571:
1985 case WM_T_82572:
1986 /* SPI */
1987 sc->sc_flags |= WM_F_EEPROM_SPI;
1988 wm_nvm_set_addrbits_size_eecd(sc);
1989 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1990 break;
1991 case WM_T_82573:
1992 sc->sc_flags |= WM_F_LOCK_SWSM;
1993 /* FALLTHROUGH */
1994 case WM_T_82574:
1995 case WM_T_82583:
1996 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1997 sc->sc_flags |= WM_F_EEPROM_FLASH;
1998 sc->sc_nvm_wordsize = 2048;
1999 } else {
2000 /* SPI */
2001 sc->sc_flags |= WM_F_EEPROM_SPI;
2002 wm_nvm_set_addrbits_size_eecd(sc);
2003 }
2004 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2005 break;
2006 case WM_T_82575:
2007 case WM_T_82576:
2008 case WM_T_82580:
2009 case WM_T_I350:
2010 case WM_T_I354:
2011 case WM_T_80003:
2012 /* SPI */
2013 sc->sc_flags |= WM_F_EEPROM_SPI;
2014 wm_nvm_set_addrbits_size_eecd(sc);
2015 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2016 | WM_F_LOCK_SWSM;
2017 break;
2018 case WM_T_ICH8:
2019 case WM_T_ICH9:
2020 case WM_T_ICH10:
2021 case WM_T_PCH:
2022 case WM_T_PCH2:
2023 case WM_T_PCH_LPT:
2024 /* FLASH */
2025 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2026 sc->sc_nvm_wordsize = 2048;
2027 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
2028 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2029 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2030 aprint_error_dev(sc->sc_dev,
2031 "can't map FLASH registers\n");
2032 goto fail_5;
2033 }
2034 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2035 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2036 ICH_FLASH_SECTOR_SIZE;
2037 sc->sc_ich8_flash_bank_size =
2038 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2039 sc->sc_ich8_flash_bank_size -=
2040 (reg & ICH_GFPREG_BASE_MASK);
2041 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2042 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2043 break;
2044 case WM_T_I210:
2045 case WM_T_I211:
2046 if (wm_nvm_get_flash_presence_i210(sc)) {
2047 wm_nvm_set_addrbits_size_eecd(sc);
2048 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2049 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2050 } else {
2051 sc->sc_nvm_wordsize = INVM_SIZE;
2052 sc->sc_flags |= WM_F_EEPROM_INVM;
2053 sc->sc_flags |= WM_F_LOCK_SWFW;
2054 }
2055 break;
2056 default:
2057 break;
2058 }
2059
2060 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2061 switch (sc->sc_type) {
2062 case WM_T_82571:
2063 case WM_T_82572:
2064 reg = CSR_READ(sc, WMREG_SWSM2);
2065 if ((reg & SWSM2_LOCK) == 0) {
2066 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2067 force_clear_smbi = true;
2068 } else
2069 force_clear_smbi = false;
2070 break;
2071 case WM_T_82573:
2072 case WM_T_82574:
2073 case WM_T_82583:
2074 force_clear_smbi = true;
2075 break;
2076 default:
2077 force_clear_smbi = false;
2078 break;
2079 }
2080 if (force_clear_smbi) {
2081 reg = CSR_READ(sc, WMREG_SWSM);
2082 if ((reg & SWSM_SMBI) != 0)
2083 aprint_error_dev(sc->sc_dev,
2084 "Please update the Bootagent\n");
2085 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2086 }
2087
2088 /*
2089 * Defer printing the EEPROM type until after verifying the checksum
2090 * This allows the EEPROM type to be printed correctly in the case
2091 * that no EEPROM is attached.
2092 */
2093 /*
2094 * Validate the EEPROM checksum. If the checksum fails, flag
2095 * this for later, so we can fail future reads from the EEPROM.
2096 */
2097 if (wm_nvm_validate_checksum(sc)) {
2098 /*
2099 * Read twice again because some PCI-e parts fail the
2100 * first check due to the link being in sleep state.
2101 */
2102 if (wm_nvm_validate_checksum(sc))
2103 sc->sc_flags |= WM_F_EEPROM_INVALID;
2104 }
2105
2106 /* Set device properties (macflags) */
2107 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2108
2109 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2110 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2111 else {
2112 aprint_verbose_dev(sc->sc_dev, "%u words ",
2113 sc->sc_nvm_wordsize);
2114 if (sc->sc_flags & WM_F_EEPROM_INVM)
2115 aprint_verbose("iNVM");
2116 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2117 aprint_verbose("FLASH(HW)");
2118 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2119 aprint_verbose("FLASH");
2120 else {
2121 if (sc->sc_flags & WM_F_EEPROM_SPI)
2122 eetype = "SPI";
2123 else
2124 eetype = "MicroWire";
2125 aprint_verbose("(%d address bits) %s EEPROM",
2126 sc->sc_nvm_addrbits, eetype);
2127 }
2128 }
2129 wm_nvm_version(sc);
2130 aprint_verbose("\n");
2131
2132 /* Check for I21[01] PLL workaround */
2133 if (sc->sc_type == WM_T_I210)
2134 sc->sc_flags |= WM_F_PLL_WA_I210;
2135 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2136 /* NVM image release 3.25 has a workaround */
2137 if ((sc->sc_nvm_ver_major < 3)
2138 || ((sc->sc_nvm_ver_major == 3)
2139 && (sc->sc_nvm_ver_minor < 25))) {
2140 aprint_verbose_dev(sc->sc_dev,
2141 "ROM image version %d.%d is older than 3.25\n",
2142 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2143 sc->sc_flags |= WM_F_PLL_WA_I210;
2144 }
2145 }
2146 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2147 wm_pll_workaround_i210(sc);
2148
2149 switch (sc->sc_type) {
2150 case WM_T_82571:
2151 case WM_T_82572:
2152 case WM_T_82573:
2153 case WM_T_82574:
2154 case WM_T_82583:
2155 case WM_T_80003:
2156 case WM_T_ICH8:
2157 case WM_T_ICH9:
2158 case WM_T_ICH10:
2159 case WM_T_PCH:
2160 case WM_T_PCH2:
2161 case WM_T_PCH_LPT:
2162 if (wm_check_mng_mode(sc) != 0)
2163 wm_get_hw_control(sc);
2164 break;
2165 default:
2166 break;
2167 }
2168 wm_get_wakeup(sc);
2169 /*
2170 * Read the Ethernet address from the EEPROM, if not first found
2171 * in device properties.
2172 */
2173 ea = prop_dictionary_get(dict, "mac-address");
2174 if (ea != NULL) {
2175 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2176 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2177 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2178 } else {
2179 if (wm_read_mac_addr(sc, enaddr) != 0) {
2180 aprint_error_dev(sc->sc_dev,
2181 "unable to read Ethernet address\n");
2182 goto fail_5;
2183 }
2184 }
2185
2186 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2187 ether_sprintf(enaddr));
2188
2189 /*
2190 * Read the config info from the EEPROM, and set up various
2191 * bits in the control registers based on their contents.
2192 */
2193 pn = prop_dictionary_get(dict, "i82543-cfg1");
2194 if (pn != NULL) {
2195 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2196 cfg1 = (uint16_t) prop_number_integer_value(pn);
2197 } else {
2198 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2199 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2200 goto fail_5;
2201 }
2202 }
2203
2204 pn = prop_dictionary_get(dict, "i82543-cfg2");
2205 if (pn != NULL) {
2206 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2207 cfg2 = (uint16_t) prop_number_integer_value(pn);
2208 } else {
2209 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2210 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2211 goto fail_5;
2212 }
2213 }
2214
2215 /* check for WM_F_WOL */
2216 switch (sc->sc_type) {
2217 case WM_T_82542_2_0:
2218 case WM_T_82542_2_1:
2219 case WM_T_82543:
2220 /* dummy? */
2221 eeprom_data = 0;
2222 apme_mask = NVM_CFG3_APME;
2223 break;
2224 case WM_T_82544:
2225 apme_mask = NVM_CFG2_82544_APM_EN;
2226 eeprom_data = cfg2;
2227 break;
2228 case WM_T_82546:
2229 case WM_T_82546_3:
2230 case WM_T_82571:
2231 case WM_T_82572:
2232 case WM_T_82573:
2233 case WM_T_82574:
2234 case WM_T_82583:
2235 case WM_T_80003:
2236 default:
2237 apme_mask = NVM_CFG3_APME;
2238 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2239 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2240 break;
2241 case WM_T_82575:
2242 case WM_T_82576:
2243 case WM_T_82580:
2244 case WM_T_I350:
2245 case WM_T_I354: /* XXX ok? */
2246 case WM_T_ICH8:
2247 case WM_T_ICH9:
2248 case WM_T_ICH10:
2249 case WM_T_PCH:
2250 case WM_T_PCH2:
2251 case WM_T_PCH_LPT:
2252 /* XXX The funcid should be checked on some devices */
2253 apme_mask = WUC_APME;
2254 eeprom_data = CSR_READ(sc, WMREG_WUC);
2255 break;
2256 }
2257
2258 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2259 if ((eeprom_data & apme_mask) != 0)
2260 sc->sc_flags |= WM_F_WOL;
2261 #ifdef WM_DEBUG
2262 if ((sc->sc_flags & WM_F_WOL) != 0)
2263 printf("WOL\n");
2264 #endif
2265
2266 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2267 /* Check NVM for autonegotiation */
2268 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2269 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2270 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2271 }
2272 }
2273
2274 /*
2275 * XXX need special handling for some multiple port cards
2276 * to disable a paticular port.
2277 */
2278
2279 if (sc->sc_type >= WM_T_82544) {
2280 pn = prop_dictionary_get(dict, "i82543-swdpin");
2281 if (pn != NULL) {
2282 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2283 swdpin = (uint16_t) prop_number_integer_value(pn);
2284 } else {
2285 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2286 aprint_error_dev(sc->sc_dev,
2287 "unable to read SWDPIN\n");
2288 goto fail_5;
2289 }
2290 }
2291 }
2292
2293 if (cfg1 & NVM_CFG1_ILOS)
2294 sc->sc_ctrl |= CTRL_ILOS;
2295
2296 /*
2297 * XXX
2298 * This code isn't correct because pin 2 and 3 are located
2299 * in different position on newer chips. Check all datasheet.
2300 *
2301 * Until resolve this problem, check if a chip < 82580
2302 */
2303 if (sc->sc_type <= WM_T_82580) {
2304 if (sc->sc_type >= WM_T_82544) {
2305 sc->sc_ctrl |=
2306 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2307 CTRL_SWDPIO_SHIFT;
2308 sc->sc_ctrl |=
2309 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2310 CTRL_SWDPINS_SHIFT;
2311 } else {
2312 sc->sc_ctrl |=
2313 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2314 CTRL_SWDPIO_SHIFT;
2315 }
2316 }
2317
2318 /* XXX For other than 82580? */
2319 if (sc->sc_type == WM_T_82580) {
2320 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2321 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2322 if (nvmword & __BIT(13)) {
2323 printf("SET ILOS\n");
2324 sc->sc_ctrl |= CTRL_ILOS;
2325 }
2326 }
2327
2328 #if 0
2329 if (sc->sc_type >= WM_T_82544) {
2330 if (cfg1 & NVM_CFG1_IPS0)
2331 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2332 if (cfg1 & NVM_CFG1_IPS1)
2333 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2334 sc->sc_ctrl_ext |=
2335 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2336 CTRL_EXT_SWDPIO_SHIFT;
2337 sc->sc_ctrl_ext |=
2338 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2339 CTRL_EXT_SWDPINS_SHIFT;
2340 } else {
2341 sc->sc_ctrl_ext |=
2342 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2343 CTRL_EXT_SWDPIO_SHIFT;
2344 }
2345 #endif
2346
2347 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2348 #if 0
2349 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2350 #endif
2351
2352 /*
2353 * Set up some register offsets that are different between
2354 * the i82542 and the i82543 and later chips.
2355 */
2356 if (sc->sc_type < WM_T_82543) {
2357 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2358 sc->sc_tdt_reg = WMREG_OLD_TDT;
2359 } else {
2360 sc->sc_rdt_reg = WMREG_RDT;
2361 sc->sc_tdt_reg = WMREG_TDT;
2362 }
2363
2364 if (sc->sc_type == WM_T_PCH) {
2365 uint16_t val;
2366
2367 /* Save the NVM K1 bit setting */
2368 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2369
2370 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2371 sc->sc_nvm_k1_enabled = 1;
2372 else
2373 sc->sc_nvm_k1_enabled = 0;
2374 }
2375
2376 /*
2377 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2378 * media structures accordingly.
2379 */
2380 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2381 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2382 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2383 || sc->sc_type == WM_T_82573
2384 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2385 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2386 wm_gmii_mediainit(sc, wmp->wmp_product);
2387 } else if (sc->sc_type < WM_T_82543 ||
2388 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2389 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2390 aprint_error_dev(sc->sc_dev,
2391 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2392 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2393 }
2394 wm_tbi_mediainit(sc);
2395 } else {
2396 switch (sc->sc_type) {
2397 case WM_T_82575:
2398 case WM_T_82576:
2399 case WM_T_82580:
2400 case WM_T_I350:
2401 case WM_T_I354:
2402 case WM_T_I210:
2403 case WM_T_I211:
2404 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2405 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2406 switch (link_mode) {
2407 case CTRL_EXT_LINK_MODE_1000KX:
2408 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2409 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2410 break;
2411 case CTRL_EXT_LINK_MODE_SGMII:
2412 if (wm_sgmii_uses_mdio(sc)) {
2413 aprint_verbose_dev(sc->sc_dev,
2414 "SGMII(MDIO)\n");
2415 sc->sc_flags |= WM_F_SGMII;
2416 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2417 break;
2418 }
2419 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2420 /*FALLTHROUGH*/
2421 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2422 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2423 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2424 if (link_mode
2425 == CTRL_EXT_LINK_MODE_SGMII) {
2426 sc->sc_mediatype
2427 = WM_MEDIATYPE_COPPER;
2428 sc->sc_flags |= WM_F_SGMII;
2429 } else {
2430 sc->sc_mediatype
2431 = WM_MEDIATYPE_SERDES;
2432 aprint_verbose_dev(sc->sc_dev,
2433 "SERDES\n");
2434 }
2435 break;
2436 }
2437 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2438 aprint_verbose_dev(sc->sc_dev,
2439 "SERDES\n");
2440
2441 /* Change current link mode setting */
2442 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2443 switch (sc->sc_mediatype) {
2444 case WM_MEDIATYPE_COPPER:
2445 reg |= CTRL_EXT_LINK_MODE_SGMII;
2446 break;
2447 case WM_MEDIATYPE_SERDES:
2448 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2449 break;
2450 default:
2451 break;
2452 }
2453 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2454 break;
2455 case CTRL_EXT_LINK_MODE_GMII:
2456 default:
2457 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2458 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2459 break;
2460 }
2461
2462 reg &= ~CTRL_EXT_I2C_ENA;
2463 if ((sc->sc_flags & WM_F_SGMII) != 0)
2464 reg |= CTRL_EXT_I2C_ENA;
2465 else
2466 reg &= ~CTRL_EXT_I2C_ENA;
2467 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2468
2469 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2470 wm_gmii_mediainit(sc, wmp->wmp_product);
2471 else
2472 wm_tbi_mediainit(sc);
2473 break;
2474 default:
2475 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2476 aprint_error_dev(sc->sc_dev,
2477 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2478 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2479 wm_gmii_mediainit(sc, wmp->wmp_product);
2480 }
2481 }
2482
2483 ifp = &sc->sc_ethercom.ec_if;
2484 xname = device_xname(sc->sc_dev);
2485 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2486 ifp->if_softc = sc;
2487 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2488 ifp->if_ioctl = wm_ioctl;
2489 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2490 ifp->if_start = wm_nq_start;
2491 else
2492 ifp->if_start = wm_start;
2493 ifp->if_watchdog = wm_watchdog;
2494 ifp->if_init = wm_init;
2495 ifp->if_stop = wm_stop;
2496 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2497 IFQ_SET_READY(&ifp->if_snd);
2498
2499 /* Check for jumbo frame */
2500 switch (sc->sc_type) {
2501 case WM_T_82573:
2502 /* XXX limited to 9234 if ASPM is disabled */
2503 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2504 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2505 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2506 break;
2507 case WM_T_82571:
2508 case WM_T_82572:
2509 case WM_T_82574:
2510 case WM_T_82575:
2511 case WM_T_82576:
2512 case WM_T_82580:
2513 case WM_T_I350:
2514 case WM_T_I354: /* XXXX ok? */
2515 case WM_T_I210:
2516 case WM_T_I211:
2517 case WM_T_80003:
2518 case WM_T_ICH9:
2519 case WM_T_ICH10:
2520 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2521 case WM_T_PCH_LPT:
2522 /* XXX limited to 9234 */
2523 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2524 break;
2525 case WM_T_PCH:
2526 /* XXX limited to 4096 */
2527 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2528 break;
2529 case WM_T_82542_2_0:
2530 case WM_T_82542_2_1:
2531 case WM_T_82583:
2532 case WM_T_ICH8:
2533 /* No support for jumbo frame */
2534 break;
2535 default:
2536 /* ETHER_MAX_LEN_JUMBO */
2537 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2538 break;
2539 }
2540
2541 /* If we're a i82543 or greater, we can support VLANs. */
2542 if (sc->sc_type >= WM_T_82543)
2543 sc->sc_ethercom.ec_capabilities |=
2544 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2545
2546 /*
2547 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2548 * on i82543 and later.
2549 */
2550 if (sc->sc_type >= WM_T_82543) {
2551 ifp->if_capabilities |=
2552 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2553 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2554 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2555 IFCAP_CSUM_TCPv6_Tx |
2556 IFCAP_CSUM_UDPv6_Tx;
2557 }
2558
2559 /*
2560 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2561 *
2562 * 82541GI (8086:1076) ... no
2563 * 82572EI (8086:10b9) ... yes
2564 */
2565 if (sc->sc_type >= WM_T_82571) {
2566 ifp->if_capabilities |=
2567 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2568 }
2569
2570 /*
2571 * If we're a i82544 or greater (except i82547), we can do
2572 * TCP segmentation offload.
2573 */
2574 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2575 ifp->if_capabilities |= IFCAP_TSOv4;
2576 }
2577
2578 if (sc->sc_type >= WM_T_82571) {
2579 ifp->if_capabilities |= IFCAP_TSOv6;
2580 }
2581
2582 #ifdef WM_MPSAFE
2583 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2584 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2585 #else
2586 sc->sc_tx_lock = NULL;
2587 sc->sc_rx_lock = NULL;
2588 #endif
2589
2590 /* Attach the interface. */
2591 if_attach(ifp);
2592 ether_ifattach(ifp, enaddr);
2593 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2594 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2595 RND_FLAG_DEFAULT);
2596
2597 #ifdef WM_EVENT_COUNTERS
2598 /* Attach event counters. */
2599 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2600 NULL, xname, "txsstall");
2601 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2602 NULL, xname, "txdstall");
2603 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2604 NULL, xname, "txfifo_stall");
2605 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2606 NULL, xname, "txdw");
2607 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2608 NULL, xname, "txqe");
2609 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2610 NULL, xname, "rxintr");
2611 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2612 NULL, xname, "linkintr");
2613
2614 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2615 NULL, xname, "rxipsum");
2616 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2617 NULL, xname, "rxtusum");
2618 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2619 NULL, xname, "txipsum");
2620 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2621 NULL, xname, "txtusum");
2622 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2623 NULL, xname, "txtusum6");
2624
2625 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2626 NULL, xname, "txtso");
2627 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2628 NULL, xname, "txtso6");
2629 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2630 NULL, xname, "txtsopain");
2631
2632 for (i = 0; i < WM_NTXSEGS; i++) {
2633 snprintf(wm_txseg_evcnt_names[i],
2634 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2635 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2636 NULL, xname, wm_txseg_evcnt_names[i]);
2637 }
2638
2639 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2640 NULL, xname, "txdrop");
2641
2642 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2643 NULL, xname, "tu");
2644
2645 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2646 NULL, xname, "tx_xoff");
2647 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2648 NULL, xname, "tx_xon");
2649 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2650 NULL, xname, "rx_xoff");
2651 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2652 NULL, xname, "rx_xon");
2653 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2654 NULL, xname, "rx_macctl");
2655 #endif /* WM_EVENT_COUNTERS */
2656
2657 if (pmf_device_register(self, wm_suspend, wm_resume))
2658 pmf_class_network_register(self, ifp);
2659 else
2660 aprint_error_dev(self, "couldn't establish power handler\n");
2661
2662 sc->sc_flags |= WM_F_ATTACHED;
2663 return;
2664
2665 /*
2666 * Free any resources we've allocated during the failed attach
2667 * attempt. Do this in reverse order and fall through.
2668 */
2669 fail_5:
2670 for (i = 0; i < WM_NRXDESC; i++) {
2671 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2672 bus_dmamap_destroy(sc->sc_dmat,
2673 sc->sc_rxsoft[i].rxs_dmamap);
2674 }
2675 fail_4:
2676 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2677 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2678 bus_dmamap_destroy(sc->sc_dmat,
2679 sc->sc_txsoft[i].txs_dmamap);
2680 }
2681 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2682 fail_3:
2683 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2684 fail_2:
2685 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2686 sc->sc_cd_size);
2687 fail_1:
2688 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2689 fail_0:
2690 return;
2691 }
2692
2693 /* The detach function (ca_detach) */
2694 static int
2695 wm_detach(device_t self, int flags __unused)
2696 {
2697 struct wm_softc *sc = device_private(self);
2698 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2699 int i;
2700 #ifndef WM_MPSAFE
2701 int s;
2702 #endif
2703
2704 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2705 return 0;
2706
2707 #ifndef WM_MPSAFE
2708 s = splnet();
2709 #endif
2710 /* Stop the interface. Callouts are stopped in it. */
2711 wm_stop(ifp, 1);
2712
2713 #ifndef WM_MPSAFE
2714 splx(s);
2715 #endif
2716
2717 pmf_device_deregister(self);
2718
2719 /* Tell the firmware about the release */
2720 WM_BOTH_LOCK(sc);
2721 wm_release_manageability(sc);
2722 wm_release_hw_control(sc);
2723 WM_BOTH_UNLOCK(sc);
2724
2725 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2726
2727 /* Delete all remaining media. */
2728 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2729
2730 ether_ifdetach(ifp);
2731 if_detach(ifp);
2732
2733
2734 /* Unload RX dmamaps and free mbufs */
2735 WM_RX_LOCK(sc);
2736 wm_rxdrain(sc);
2737 WM_RX_UNLOCK(sc);
2738 /* Must unlock here */
2739
2740 /* Free dmamap. It's the same as the end of the wm_attach() function */
2741 for (i = 0; i < WM_NRXDESC; i++) {
2742 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2743 bus_dmamap_destroy(sc->sc_dmat,
2744 sc->sc_rxsoft[i].rxs_dmamap);
2745 }
2746 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2747 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2748 bus_dmamap_destroy(sc->sc_dmat,
2749 sc->sc_txsoft[i].txs_dmamap);
2750 }
2751 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2752 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2753 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2754 sc->sc_cd_size);
2755 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2756
2757 /* Disestablish the interrupt handler */
2758 for (i = 0; i < sc->sc_nintrs; i++) {
2759 if (sc->sc_ihs[i] != NULL) {
2760 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2761 sc->sc_ihs[i] = NULL;
2762 }
2763 }
2764 #ifdef WM_MSI_MSIX
2765 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2766 #endif /* WM_MSI_MSIX */
2767
2768 /* Unmap the registers */
2769 if (sc->sc_ss) {
2770 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2771 sc->sc_ss = 0;
2772 }
2773 if (sc->sc_ios) {
2774 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2775 sc->sc_ios = 0;
2776 }
2777 if (sc->sc_flashs) {
2778 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2779 sc->sc_flashs = 0;
2780 }
2781
2782 if (sc->sc_tx_lock)
2783 mutex_obj_free(sc->sc_tx_lock);
2784 if (sc->sc_rx_lock)
2785 mutex_obj_free(sc->sc_rx_lock);
2786
2787 return 0;
2788 }
2789
2790 static bool
2791 wm_suspend(device_t self, const pmf_qual_t *qual)
2792 {
2793 struct wm_softc *sc = device_private(self);
2794
2795 wm_release_manageability(sc);
2796 wm_release_hw_control(sc);
2797 #ifdef WM_WOL
2798 wm_enable_wakeup(sc);
2799 #endif
2800
2801 return true;
2802 }
2803
2804 static bool
2805 wm_resume(device_t self, const pmf_qual_t *qual)
2806 {
2807 struct wm_softc *sc = device_private(self);
2808
2809 wm_init_manageability(sc);
2810
2811 return true;
2812 }
2813
2814 /*
2815 * wm_watchdog: [ifnet interface function]
2816 *
2817 * Watchdog timer handler.
2818 */
2819 static void
2820 wm_watchdog(struct ifnet *ifp)
2821 {
2822 struct wm_softc *sc = ifp->if_softc;
2823
2824 /*
2825 * Since we're using delayed interrupts, sweep up
2826 * before we report an error.
2827 */
2828 WM_TX_LOCK(sc);
2829 wm_txeof(sc);
2830 WM_TX_UNLOCK(sc);
2831
2832 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2833 #ifdef WM_DEBUG
2834 int i, j;
2835 struct wm_txsoft *txs;
2836 #endif
2837 log(LOG_ERR,
2838 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2839 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2840 sc->sc_txnext);
2841 ifp->if_oerrors++;
2842 #ifdef WM_DEBUG
2843 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2844 i = WM_NEXTTXS(sc, i)) {
2845 txs = &sc->sc_txsoft[i];
2846 printf("txs %d tx %d -> %d\n",
2847 i, txs->txs_firstdesc, txs->txs_lastdesc);
2848 for (j = txs->txs_firstdesc; ;
2849 j = WM_NEXTTX(sc, j)) {
2850 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2851 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2852 printf("\t %#08x%08x\n",
2853 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2854 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2855 if (j == txs->txs_lastdesc)
2856 break;
2857 }
2858 }
2859 #endif
2860 /* Reset the interface. */
2861 (void) wm_init(ifp);
2862 }
2863
2864 /* Try to get more packets going. */
2865 ifp->if_start(ifp);
2866 }
2867
2868 /*
2869 * wm_tick:
2870 *
2871 * One second timer, used to check link status, sweep up
2872 * completed transmit jobs, etc.
2873 */
2874 static void
2875 wm_tick(void *arg)
2876 {
2877 struct wm_softc *sc = arg;
2878 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2879 #ifndef WM_MPSAFE
2880 int s;
2881
2882 s = splnet();
2883 #endif
2884
2885 WM_TX_LOCK(sc);
2886
2887 if (sc->sc_stopping)
2888 goto out;
2889
2890 if (sc->sc_type >= WM_T_82542_2_1) {
2891 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2892 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2893 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2894 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2895 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2896 }
2897
2898 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2899 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2900 + CSR_READ(sc, WMREG_CRCERRS)
2901 + CSR_READ(sc, WMREG_ALGNERRC)
2902 + CSR_READ(sc, WMREG_SYMERRC)
2903 + CSR_READ(sc, WMREG_RXERRC)
2904 + CSR_READ(sc, WMREG_SEC)
2905 + CSR_READ(sc, WMREG_CEXTERR)
2906 + CSR_READ(sc, WMREG_RLEC);
2907 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2908
2909 if (sc->sc_flags & WM_F_HAS_MII)
2910 mii_tick(&sc->sc_mii);
2911 else if ((sc->sc_type >= WM_T_82575)
2912 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2913 wm_serdes_tick(sc);
2914 else
2915 wm_tbi_tick(sc);
2916
2917 out:
2918 WM_TX_UNLOCK(sc);
2919 #ifndef WM_MPSAFE
2920 splx(s);
2921 #endif
2922
2923 if (!sc->sc_stopping)
2924 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2925 }
2926
2927 static int
2928 wm_ifflags_cb(struct ethercom *ec)
2929 {
2930 struct ifnet *ifp = &ec->ec_if;
2931 struct wm_softc *sc = ifp->if_softc;
2932 int change = ifp->if_flags ^ sc->sc_if_flags;
2933 int rc = 0;
2934
2935 WM_BOTH_LOCK(sc);
2936
2937 if (change != 0)
2938 sc->sc_if_flags = ifp->if_flags;
2939
2940 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2941 rc = ENETRESET;
2942 goto out;
2943 }
2944
2945 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2946 wm_set_filter(sc);
2947
2948 wm_set_vlan(sc);
2949
2950 out:
2951 WM_BOTH_UNLOCK(sc);
2952
2953 return rc;
2954 }
2955
2956 /*
2957 * wm_ioctl: [ifnet interface function]
2958 *
2959 * Handle control requests from the operator.
2960 */
2961 static int
2962 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2963 {
2964 struct wm_softc *sc = ifp->if_softc;
2965 struct ifreq *ifr = (struct ifreq *) data;
2966 struct ifaddr *ifa = (struct ifaddr *)data;
2967 struct sockaddr_dl *sdl;
2968 int s, error;
2969
2970 #ifndef WM_MPSAFE
2971 s = splnet();
2972 #endif
2973 switch (cmd) {
2974 case SIOCSIFMEDIA:
2975 case SIOCGIFMEDIA:
2976 WM_BOTH_LOCK(sc);
2977 /* Flow control requires full-duplex mode. */
2978 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2979 (ifr->ifr_media & IFM_FDX) == 0)
2980 ifr->ifr_media &= ~IFM_ETH_FMASK;
2981 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2982 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2983 /* We can do both TXPAUSE and RXPAUSE. */
2984 ifr->ifr_media |=
2985 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2986 }
2987 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2988 }
2989 WM_BOTH_UNLOCK(sc);
2990 #ifdef WM_MPSAFE
2991 s = splnet();
2992 #endif
2993 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2994 #ifdef WM_MPSAFE
2995 splx(s);
2996 #endif
2997 break;
2998 case SIOCINITIFADDR:
2999 WM_BOTH_LOCK(sc);
3000 if (ifa->ifa_addr->sa_family == AF_LINK) {
3001 sdl = satosdl(ifp->if_dl->ifa_addr);
3002 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3003 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3004 /* unicast address is first multicast entry */
3005 wm_set_filter(sc);
3006 error = 0;
3007 WM_BOTH_UNLOCK(sc);
3008 break;
3009 }
3010 WM_BOTH_UNLOCK(sc);
3011 /*FALLTHROUGH*/
3012 default:
3013 #ifdef WM_MPSAFE
3014 s = splnet();
3015 #endif
3016 /* It may call wm_start, so unlock here */
3017 error = ether_ioctl(ifp, cmd, data);
3018 #ifdef WM_MPSAFE
3019 splx(s);
3020 #endif
3021 if (error != ENETRESET)
3022 break;
3023
3024 error = 0;
3025
3026 if (cmd == SIOCSIFCAP) {
3027 error = (*ifp->if_init)(ifp);
3028 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3029 ;
3030 else if (ifp->if_flags & IFF_RUNNING) {
3031 /*
3032 * Multicast list has changed; set the hardware filter
3033 * accordingly.
3034 */
3035 WM_BOTH_LOCK(sc);
3036 wm_set_filter(sc);
3037 WM_BOTH_UNLOCK(sc);
3038 }
3039 break;
3040 }
3041
3042 #ifndef WM_MPSAFE
3043 splx(s);
3044 #endif
3045 return error;
3046 }
3047
3048 /* MAC address related */
3049
3050 /*
3051 * Get the offset of MAC address and return it.
3052 * If error occured, use offset 0.
3053 */
3054 static uint16_t
3055 wm_check_alt_mac_addr(struct wm_softc *sc)
3056 {
3057 uint16_t myea[ETHER_ADDR_LEN / 2];
3058 uint16_t offset = NVM_OFF_MACADDR;
3059
3060 /* Try to read alternative MAC address pointer */
3061 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3062 return 0;
3063
3064 /* Check pointer if it's valid or not. */
3065 if ((offset == 0x0000) || (offset == 0xffff))
3066 return 0;
3067
3068 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3069 /*
3070 * Check whether alternative MAC address is valid or not.
3071 * Some cards have non 0xffff pointer but those don't use
3072 * alternative MAC address in reality.
3073 *
3074 * Check whether the broadcast bit is set or not.
3075 */
3076 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3077 if (((myea[0] & 0xff) & 0x01) == 0)
3078 return offset; /* Found */
3079
3080 /* Not found */
3081 return 0;
3082 }
3083
3084 static int
3085 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3086 {
3087 uint16_t myea[ETHER_ADDR_LEN / 2];
3088 uint16_t offset = NVM_OFF_MACADDR;
3089 int do_invert = 0;
3090
3091 switch (sc->sc_type) {
3092 case WM_T_82580:
3093 case WM_T_I350:
3094 case WM_T_I354:
3095 /* EEPROM Top Level Partitioning */
3096 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3097 break;
3098 case WM_T_82571:
3099 case WM_T_82575:
3100 case WM_T_82576:
3101 case WM_T_80003:
3102 case WM_T_I210:
3103 case WM_T_I211:
3104 offset = wm_check_alt_mac_addr(sc);
3105 if (offset == 0)
3106 if ((sc->sc_funcid & 0x01) == 1)
3107 do_invert = 1;
3108 break;
3109 default:
3110 if ((sc->sc_funcid & 0x01) == 1)
3111 do_invert = 1;
3112 break;
3113 }
3114
3115 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3116 myea) != 0)
3117 goto bad;
3118
3119 enaddr[0] = myea[0] & 0xff;
3120 enaddr[1] = myea[0] >> 8;
3121 enaddr[2] = myea[1] & 0xff;
3122 enaddr[3] = myea[1] >> 8;
3123 enaddr[4] = myea[2] & 0xff;
3124 enaddr[5] = myea[2] >> 8;
3125
3126 /*
3127 * Toggle the LSB of the MAC address on the second port
3128 * of some dual port cards.
3129 */
3130 if (do_invert != 0)
3131 enaddr[5] ^= 1;
3132
3133 return 0;
3134
3135 bad:
3136 return -1;
3137 }
3138
3139 /*
3140 * wm_set_ral:
3141 *
3142 * Set an entery in the receive address list.
3143 */
3144 static void
3145 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3146 {
3147 uint32_t ral_lo, ral_hi;
3148
3149 if (enaddr != NULL) {
3150 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3151 (enaddr[3] << 24);
3152 ral_hi = enaddr[4] | (enaddr[5] << 8);
3153 ral_hi |= RAL_AV;
3154 } else {
3155 ral_lo = 0;
3156 ral_hi = 0;
3157 }
3158
3159 if (sc->sc_type >= WM_T_82544) {
3160 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3161 ral_lo);
3162 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3163 ral_hi);
3164 } else {
3165 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3166 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3167 }
3168 }
3169
3170 /*
3171 * wm_mchash:
3172 *
3173 * Compute the hash of the multicast address for the 4096-bit
3174 * multicast filter.
3175 */
3176 static uint32_t
3177 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3178 {
3179 static const int lo_shift[4] = { 4, 3, 2, 0 };
3180 static const int hi_shift[4] = { 4, 5, 6, 8 };
3181 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3182 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3183 uint32_t hash;
3184
3185 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3186 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3187 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3188 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3189 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3190 return (hash & 0x3ff);
3191 }
3192 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3193 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3194
3195 return (hash & 0xfff);
3196 }
3197
3198 /*
3199 * wm_set_filter:
3200 *
3201 * Set up the receive filter.
3202 */
3203 static void
3204 wm_set_filter(struct wm_softc *sc)
3205 {
3206 struct ethercom *ec = &sc->sc_ethercom;
3207 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3208 struct ether_multi *enm;
3209 struct ether_multistep step;
3210 bus_addr_t mta_reg;
3211 uint32_t hash, reg, bit;
3212 int i, size;
3213
3214 if (sc->sc_type >= WM_T_82544)
3215 mta_reg = WMREG_CORDOVA_MTA;
3216 else
3217 mta_reg = WMREG_MTA;
3218
3219 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3220
3221 if (ifp->if_flags & IFF_BROADCAST)
3222 sc->sc_rctl |= RCTL_BAM;
3223 if (ifp->if_flags & IFF_PROMISC) {
3224 sc->sc_rctl |= RCTL_UPE;
3225 goto allmulti;
3226 }
3227
3228 /*
3229 * Set the station address in the first RAL slot, and
3230 * clear the remaining slots.
3231 */
3232 if (sc->sc_type == WM_T_ICH8)
3233 size = WM_RAL_TABSIZE_ICH8 -1;
3234 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3235 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3236 || (sc->sc_type == WM_T_PCH_LPT))
3237 size = WM_RAL_TABSIZE_ICH8;
3238 else if (sc->sc_type == WM_T_82575)
3239 size = WM_RAL_TABSIZE_82575;
3240 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3241 size = WM_RAL_TABSIZE_82576;
3242 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3243 size = WM_RAL_TABSIZE_I350;
3244 else
3245 size = WM_RAL_TABSIZE;
3246 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3247 for (i = 1; i < size; i++)
3248 wm_set_ral(sc, NULL, i);
3249
3250 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3251 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3252 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3253 size = WM_ICH8_MC_TABSIZE;
3254 else
3255 size = WM_MC_TABSIZE;
3256 /* Clear out the multicast table. */
3257 for (i = 0; i < size; i++)
3258 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3259
3260 ETHER_FIRST_MULTI(step, ec, enm);
3261 while (enm != NULL) {
3262 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3263 /*
3264 * We must listen to a range of multicast addresses.
3265 * For now, just accept all multicasts, rather than
3266 * trying to set only those filter bits needed to match
3267 * the range. (At this time, the only use of address
3268 * ranges is for IP multicast routing, for which the
3269 * range is big enough to require all bits set.)
3270 */
3271 goto allmulti;
3272 }
3273
3274 hash = wm_mchash(sc, enm->enm_addrlo);
3275
3276 reg = (hash >> 5);
3277 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3278 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3279 || (sc->sc_type == WM_T_PCH2)
3280 || (sc->sc_type == WM_T_PCH_LPT))
3281 reg &= 0x1f;
3282 else
3283 reg &= 0x7f;
3284 bit = hash & 0x1f;
3285
3286 hash = CSR_READ(sc, mta_reg + (reg << 2));
3287 hash |= 1U << bit;
3288
3289 /* XXX Hardware bug?? */
3290 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3291 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3292 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3293 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3294 } else
3295 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3296
3297 ETHER_NEXT_MULTI(step, enm);
3298 }
3299
3300 ifp->if_flags &= ~IFF_ALLMULTI;
3301 goto setit;
3302
3303 allmulti:
3304 ifp->if_flags |= IFF_ALLMULTI;
3305 sc->sc_rctl |= RCTL_MPE;
3306
3307 setit:
3308 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3309 }
3310
3311 /* Reset and init related */
3312
3313 static void
3314 wm_set_vlan(struct wm_softc *sc)
3315 {
3316 /* Deal with VLAN enables. */
3317 if (VLAN_ATTACHED(&sc->sc_ethercom))
3318 sc->sc_ctrl |= CTRL_VME;
3319 else
3320 sc->sc_ctrl &= ~CTRL_VME;
3321
3322 /* Write the control registers. */
3323 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3324 }
3325
3326 static void
3327 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3328 {
3329 uint32_t gcr;
3330 pcireg_t ctrl2;
3331
3332 gcr = CSR_READ(sc, WMREG_GCR);
3333
3334 /* Only take action if timeout value is defaulted to 0 */
3335 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3336 goto out;
3337
3338 if ((gcr & GCR_CAP_VER2) == 0) {
3339 gcr |= GCR_CMPL_TMOUT_10MS;
3340 goto out;
3341 }
3342
3343 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3344 sc->sc_pcixe_capoff + PCIE_DCSR2);
3345 ctrl2 |= WM_PCIE_DCSR2_16MS;
3346 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3347 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3348
3349 out:
3350 /* Disable completion timeout resend */
3351 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3352
3353 CSR_WRITE(sc, WMREG_GCR, gcr);
3354 }
3355
3356 void
3357 wm_get_auto_rd_done(struct wm_softc *sc)
3358 {
3359 int i;
3360
3361 /* wait for eeprom to reload */
3362 switch (sc->sc_type) {
3363 case WM_T_82571:
3364 case WM_T_82572:
3365 case WM_T_82573:
3366 case WM_T_82574:
3367 case WM_T_82583:
3368 case WM_T_82575:
3369 case WM_T_82576:
3370 case WM_T_82580:
3371 case WM_T_I350:
3372 case WM_T_I354:
3373 case WM_T_I210:
3374 case WM_T_I211:
3375 case WM_T_80003:
3376 case WM_T_ICH8:
3377 case WM_T_ICH9:
3378 for (i = 0; i < 10; i++) {
3379 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3380 break;
3381 delay(1000);
3382 }
3383 if (i == 10) {
3384 log(LOG_ERR, "%s: auto read from eeprom failed to "
3385 "complete\n", device_xname(sc->sc_dev));
3386 }
3387 break;
3388 default:
3389 break;
3390 }
3391 }
3392
3393 void
3394 wm_lan_init_done(struct wm_softc *sc)
3395 {
3396 uint32_t reg = 0;
3397 int i;
3398
3399 /* wait for eeprom to reload */
3400 switch (sc->sc_type) {
3401 case WM_T_ICH10:
3402 case WM_T_PCH:
3403 case WM_T_PCH2:
3404 case WM_T_PCH_LPT:
3405 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3406 reg = CSR_READ(sc, WMREG_STATUS);
3407 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3408 break;
3409 delay(100);
3410 }
3411 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3412 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3413 "complete\n", device_xname(sc->sc_dev), __func__);
3414 }
3415 break;
3416 default:
3417 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3418 __func__);
3419 break;
3420 }
3421
3422 reg &= ~STATUS_LAN_INIT_DONE;
3423 CSR_WRITE(sc, WMREG_STATUS, reg);
3424 }
3425
3426 void
3427 wm_get_cfg_done(struct wm_softc *sc)
3428 {
3429 int mask;
3430 uint32_t reg;
3431 int i;
3432
3433 /* wait for eeprom to reload */
3434 switch (sc->sc_type) {
3435 case WM_T_82542_2_0:
3436 case WM_T_82542_2_1:
3437 /* null */
3438 break;
3439 case WM_T_82543:
3440 case WM_T_82544:
3441 case WM_T_82540:
3442 case WM_T_82545:
3443 case WM_T_82545_3:
3444 case WM_T_82546:
3445 case WM_T_82546_3:
3446 case WM_T_82541:
3447 case WM_T_82541_2:
3448 case WM_T_82547:
3449 case WM_T_82547_2:
3450 case WM_T_82573:
3451 case WM_T_82574:
3452 case WM_T_82583:
3453 /* generic */
3454 delay(10*1000);
3455 break;
3456 case WM_T_80003:
3457 case WM_T_82571:
3458 case WM_T_82572:
3459 case WM_T_82575:
3460 case WM_T_82576:
3461 case WM_T_82580:
3462 case WM_T_I350:
3463 case WM_T_I354:
3464 case WM_T_I210:
3465 case WM_T_I211:
3466 if (sc->sc_type == WM_T_82571) {
3467 /* Only 82571 shares port 0 */
3468 mask = EEMNGCTL_CFGDONE_0;
3469 } else
3470 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3471 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3472 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3473 break;
3474 delay(1000);
3475 }
3476 if (i >= WM_PHY_CFG_TIMEOUT) {
3477 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3478 device_xname(sc->sc_dev), __func__));
3479 }
3480 break;
3481 case WM_T_ICH8:
3482 case WM_T_ICH9:
3483 case WM_T_ICH10:
3484 case WM_T_PCH:
3485 case WM_T_PCH2:
3486 case WM_T_PCH_LPT:
3487 delay(10*1000);
3488 if (sc->sc_type >= WM_T_ICH10)
3489 wm_lan_init_done(sc);
3490 else
3491 wm_get_auto_rd_done(sc);
3492
3493 reg = CSR_READ(sc, WMREG_STATUS);
3494 if ((reg & STATUS_PHYRA) != 0)
3495 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3496 break;
3497 default:
3498 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3499 __func__);
3500 break;
3501 }
3502 }
3503
3504 /* Init hardware bits */
3505 void
3506 wm_initialize_hardware_bits(struct wm_softc *sc)
3507 {
3508 uint32_t tarc0, tarc1, reg;
3509
3510 /* For 82571 variant, 80003 and ICHs */
3511 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3512 || (sc->sc_type >= WM_T_80003)) {
3513
3514 /* Transmit Descriptor Control 0 */
3515 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3516 reg |= TXDCTL_COUNT_DESC;
3517 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3518
3519 /* Transmit Descriptor Control 1 */
3520 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3521 reg |= TXDCTL_COUNT_DESC;
3522 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3523
3524 /* TARC0 */
3525 tarc0 = CSR_READ(sc, WMREG_TARC0);
3526 switch (sc->sc_type) {
3527 case WM_T_82571:
3528 case WM_T_82572:
3529 case WM_T_82573:
3530 case WM_T_82574:
3531 case WM_T_82583:
3532 case WM_T_80003:
3533 /* Clear bits 30..27 */
3534 tarc0 &= ~__BITS(30, 27);
3535 break;
3536 default:
3537 break;
3538 }
3539
3540 switch (sc->sc_type) {
3541 case WM_T_82571:
3542 case WM_T_82572:
3543 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3544
3545 tarc1 = CSR_READ(sc, WMREG_TARC1);
3546 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3547 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3548 /* 8257[12] Errata No.7 */
3549 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3550
3551 /* TARC1 bit 28 */
3552 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3553 tarc1 &= ~__BIT(28);
3554 else
3555 tarc1 |= __BIT(28);
3556 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3557
3558 /*
3559 * 8257[12] Errata No.13
3560 * Disable Dyamic Clock Gating.
3561 */
3562 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3563 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3564 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3565 break;
3566 case WM_T_82573:
3567 case WM_T_82574:
3568 case WM_T_82583:
3569 if ((sc->sc_type == WM_T_82574)
3570 || (sc->sc_type == WM_T_82583))
3571 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3572
3573 /* Extended Device Control */
3574 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3575 reg &= ~__BIT(23); /* Clear bit 23 */
3576 reg |= __BIT(22); /* Set bit 22 */
3577 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3578
3579 /* Device Control */
3580 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3581 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3582
3583 /* PCIe Control Register */
3584 /*
3585 * 82573 Errata (unknown).
3586 *
3587 * 82574 Errata 25 and 82583 Errata 12
3588 * "Dropped Rx Packets":
3589 * NVM Image Version 2.1.4 and newer has no this bug.
3590 */
3591 reg = CSR_READ(sc, WMREG_GCR);
3592 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3593 CSR_WRITE(sc, WMREG_GCR, reg);
3594
3595 if ((sc->sc_type == WM_T_82574)
3596 || (sc->sc_type == WM_T_82583)) {
3597 /*
3598 * Document says this bit must be set for
3599 * proper operation.
3600 */
3601 reg = CSR_READ(sc, WMREG_GCR);
3602 reg |= __BIT(22);
3603 CSR_WRITE(sc, WMREG_GCR, reg);
3604
3605 /*
3606 * Apply workaround for hardware errata
3607 * documented in errata docs Fixes issue where
3608 * some error prone or unreliable PCIe
3609 * completions are occurring, particularly
3610 * with ASPM enabled. Without fix, issue can
3611 * cause Tx timeouts.
3612 */
3613 reg = CSR_READ(sc, WMREG_GCR2);
3614 reg |= __BIT(0);
3615 CSR_WRITE(sc, WMREG_GCR2, reg);
3616 }
3617 break;
3618 case WM_T_80003:
3619 /* TARC0 */
3620 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3621 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3622 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3623
3624 /* TARC1 bit 28 */
3625 tarc1 = CSR_READ(sc, WMREG_TARC1);
3626 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3627 tarc1 &= ~__BIT(28);
3628 else
3629 tarc1 |= __BIT(28);
3630 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3631 break;
3632 case WM_T_ICH8:
3633 case WM_T_ICH9:
3634 case WM_T_ICH10:
3635 case WM_T_PCH:
3636 case WM_T_PCH2:
3637 case WM_T_PCH_LPT:
3638 /* TARC 0 */
3639 if (sc->sc_type == WM_T_ICH8) {
3640 /* Set TARC0 bits 29 and 28 */
3641 tarc0 |= __BITS(29, 28);
3642 }
3643 /* Set TARC0 bits 23,24,26,27 */
3644 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3645
3646 /* CTRL_EXT */
3647 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3648 reg |= __BIT(22); /* Set bit 22 */
3649 /*
3650 * Enable PHY low-power state when MAC is at D3
3651 * w/o WoL
3652 */
3653 if (sc->sc_type >= WM_T_PCH)
3654 reg |= CTRL_EXT_PHYPDEN;
3655 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3656
3657 /* TARC1 */
3658 tarc1 = CSR_READ(sc, WMREG_TARC1);
3659 /* bit 28 */
3660 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3661 tarc1 &= ~__BIT(28);
3662 else
3663 tarc1 |= __BIT(28);
3664 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3665 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3666
3667 /* Device Status */
3668 if (sc->sc_type == WM_T_ICH8) {
3669 reg = CSR_READ(sc, WMREG_STATUS);
3670 reg &= ~__BIT(31);
3671 CSR_WRITE(sc, WMREG_STATUS, reg);
3672
3673 }
3674
3675 /*
3676 * Work-around descriptor data corruption issue during
3677 * NFS v2 UDP traffic, just disable the NFS filtering
3678 * capability.
3679 */
3680 reg = CSR_READ(sc, WMREG_RFCTL);
3681 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3682 CSR_WRITE(sc, WMREG_RFCTL, reg);
3683 break;
3684 default:
3685 break;
3686 }
3687 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3688
3689 /*
3690 * 8257[12] Errata No.52 and some others.
3691 * Avoid RSS Hash Value bug.
3692 */
3693 switch (sc->sc_type) {
3694 case WM_T_82571:
3695 case WM_T_82572:
3696 case WM_T_82573:
3697 case WM_T_80003:
3698 case WM_T_ICH8:
3699 reg = CSR_READ(sc, WMREG_RFCTL);
3700 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3701 CSR_WRITE(sc, WMREG_RFCTL, reg);
3702 break;
3703 default:
3704 break;
3705 }
3706 }
3707 }
3708
3709 static uint32_t
3710 wm_rxpbs_adjust_82580(uint32_t val)
3711 {
3712 uint32_t rv = 0;
3713
3714 if (val < __arraycount(wm_82580_rxpbs_table))
3715 rv = wm_82580_rxpbs_table[val];
3716
3717 return rv;
3718 }
3719
3720 /*
3721 * wm_reset:
3722 *
3723 * Reset the i82542 chip.
3724 */
3725 static void
3726 wm_reset(struct wm_softc *sc)
3727 {
3728 int phy_reset = 0;
3729 int error = 0;
3730 uint32_t reg, mask;
3731
3732 /*
3733 * Allocate on-chip memory according to the MTU size.
3734 * The Packet Buffer Allocation register must be written
3735 * before the chip is reset.
3736 */
3737 switch (sc->sc_type) {
3738 case WM_T_82547:
3739 case WM_T_82547_2:
3740 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3741 PBA_22K : PBA_30K;
3742 sc->sc_txfifo_head = 0;
3743 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3744 sc->sc_txfifo_size =
3745 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3746 sc->sc_txfifo_stall = 0;
3747 break;
3748 case WM_T_82571:
3749 case WM_T_82572:
3750 case WM_T_82575: /* XXX need special handing for jumbo frames */
3751 case WM_T_80003:
3752 sc->sc_pba = PBA_32K;
3753 break;
3754 case WM_T_82573:
3755 sc->sc_pba = PBA_12K;
3756 break;
3757 case WM_T_82574:
3758 case WM_T_82583:
3759 sc->sc_pba = PBA_20K;
3760 break;
3761 case WM_T_82576:
3762 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3763 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3764 break;
3765 case WM_T_82580:
3766 case WM_T_I350:
3767 case WM_T_I354:
3768 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3769 break;
3770 case WM_T_I210:
3771 case WM_T_I211:
3772 sc->sc_pba = PBA_34K;
3773 break;
3774 case WM_T_ICH8:
3775 /* Workaround for a bit corruption issue in FIFO memory */
3776 sc->sc_pba = PBA_8K;
3777 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3778 break;
3779 case WM_T_ICH9:
3780 case WM_T_ICH10:
3781 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3782 PBA_14K : PBA_10K;
3783 break;
3784 case WM_T_PCH:
3785 case WM_T_PCH2:
3786 case WM_T_PCH_LPT:
3787 sc->sc_pba = PBA_26K;
3788 break;
3789 default:
3790 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3791 PBA_40K : PBA_48K;
3792 break;
3793 }
3794 /*
3795 * Only old or non-multiqueue devices have the PBA register
3796 * XXX Need special handling for 82575.
3797 */
3798 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3799 || (sc->sc_type == WM_T_82575))
3800 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3801
3802 /* Prevent the PCI-E bus from sticking */
3803 if (sc->sc_flags & WM_F_PCIE) {
3804 int timeout = 800;
3805
3806 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3807 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3808
3809 while (timeout--) {
3810 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3811 == 0)
3812 break;
3813 delay(100);
3814 }
3815 }
3816
3817 /* Set the completion timeout for interface */
3818 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3819 || (sc->sc_type == WM_T_82580)
3820 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3821 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3822 wm_set_pcie_completion_timeout(sc);
3823
3824 /* Clear interrupt */
3825 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3826 if (sc->sc_nintrs > 1) {
3827 if (sc->sc_type != WM_T_82574) {
3828 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3829 CSR_WRITE(sc, WMREG_EIAC, 0);
3830 } else {
3831 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3832 }
3833 }
3834
3835 /* Stop the transmit and receive processes. */
3836 CSR_WRITE(sc, WMREG_RCTL, 0);
3837 sc->sc_rctl &= ~RCTL_EN;
3838 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3839 CSR_WRITE_FLUSH(sc);
3840
3841 /* XXX set_tbi_sbp_82543() */
3842
3843 delay(10*1000);
3844
3845 /* Must acquire the MDIO ownership before MAC reset */
3846 switch (sc->sc_type) {
3847 case WM_T_82573:
3848 case WM_T_82574:
3849 case WM_T_82583:
3850 error = wm_get_hw_semaphore_82573(sc);
3851 break;
3852 default:
3853 break;
3854 }
3855
3856 /*
3857 * 82541 Errata 29? & 82547 Errata 28?
3858 * See also the description about PHY_RST bit in CTRL register
3859 * in 8254x_GBe_SDM.pdf.
3860 */
3861 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3862 CSR_WRITE(sc, WMREG_CTRL,
3863 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3864 CSR_WRITE_FLUSH(sc);
3865 delay(5000);
3866 }
3867
3868 switch (sc->sc_type) {
3869 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3870 case WM_T_82541:
3871 case WM_T_82541_2:
3872 case WM_T_82547:
3873 case WM_T_82547_2:
3874 /*
3875 * On some chipsets, a reset through a memory-mapped write
3876 * cycle can cause the chip to reset before completing the
3877 * write cycle. This causes major headache that can be
3878 * avoided by issuing the reset via indirect register writes
3879 * through I/O space.
3880 *
3881 * So, if we successfully mapped the I/O BAR at attach time,
3882 * use that. Otherwise, try our luck with a memory-mapped
3883 * reset.
3884 */
3885 if (sc->sc_flags & WM_F_IOH_VALID)
3886 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3887 else
3888 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3889 break;
3890 case WM_T_82545_3:
3891 case WM_T_82546_3:
3892 /* Use the shadow control register on these chips. */
3893 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3894 break;
3895 case WM_T_80003:
3896 mask = swfwphysem[sc->sc_funcid];
3897 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3898 wm_get_swfw_semaphore(sc, mask);
3899 CSR_WRITE(sc, WMREG_CTRL, reg);
3900 wm_put_swfw_semaphore(sc, mask);
3901 break;
3902 case WM_T_ICH8:
3903 case WM_T_ICH9:
3904 case WM_T_ICH10:
3905 case WM_T_PCH:
3906 case WM_T_PCH2:
3907 case WM_T_PCH_LPT:
3908 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3909 if (wm_check_reset_block(sc) == 0) {
3910 /*
3911 * Gate automatic PHY configuration by hardware on
3912 * non-managed 82579
3913 */
3914 if ((sc->sc_type == WM_T_PCH2)
3915 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3916 != 0))
3917 wm_gate_hw_phy_config_ich8lan(sc, 1);
3918
3919
3920 reg |= CTRL_PHY_RESET;
3921 phy_reset = 1;
3922 }
3923 wm_get_swfwhw_semaphore(sc);
3924 CSR_WRITE(sc, WMREG_CTRL, reg);
3925 /* Don't insert a completion barrier when reset */
3926 delay(20*1000);
3927 wm_put_swfwhw_semaphore(sc);
3928 break;
3929 case WM_T_82580:
3930 case WM_T_I350:
3931 case WM_T_I354:
3932 case WM_T_I210:
3933 case WM_T_I211:
3934 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3935 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3936 CSR_WRITE_FLUSH(sc);
3937 delay(5000);
3938 break;
3939 case WM_T_82542_2_0:
3940 case WM_T_82542_2_1:
3941 case WM_T_82543:
3942 case WM_T_82540:
3943 case WM_T_82545:
3944 case WM_T_82546:
3945 case WM_T_82571:
3946 case WM_T_82572:
3947 case WM_T_82573:
3948 case WM_T_82574:
3949 case WM_T_82575:
3950 case WM_T_82576:
3951 case WM_T_82583:
3952 default:
3953 /* Everything else can safely use the documented method. */
3954 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3955 break;
3956 }
3957
3958 /* Must release the MDIO ownership after MAC reset */
3959 switch (sc->sc_type) {
3960 case WM_T_82573:
3961 case WM_T_82574:
3962 case WM_T_82583:
3963 if (error == 0)
3964 wm_put_hw_semaphore_82573(sc);
3965 break;
3966 default:
3967 break;
3968 }
3969
3970 if (phy_reset != 0)
3971 wm_get_cfg_done(sc);
3972
3973 /* reload EEPROM */
3974 switch (sc->sc_type) {
3975 case WM_T_82542_2_0:
3976 case WM_T_82542_2_1:
3977 case WM_T_82543:
3978 case WM_T_82544:
3979 delay(10);
3980 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3981 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3982 CSR_WRITE_FLUSH(sc);
3983 delay(2000);
3984 break;
3985 case WM_T_82540:
3986 case WM_T_82545:
3987 case WM_T_82545_3:
3988 case WM_T_82546:
3989 case WM_T_82546_3:
3990 delay(5*1000);
3991 /* XXX Disable HW ARPs on ASF enabled adapters */
3992 break;
3993 case WM_T_82541:
3994 case WM_T_82541_2:
3995 case WM_T_82547:
3996 case WM_T_82547_2:
3997 delay(20000);
3998 /* XXX Disable HW ARPs on ASF enabled adapters */
3999 break;
4000 case WM_T_82571:
4001 case WM_T_82572:
4002 case WM_T_82573:
4003 case WM_T_82574:
4004 case WM_T_82583:
4005 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4006 delay(10);
4007 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4008 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4009 CSR_WRITE_FLUSH(sc);
4010 }
4011 /* check EECD_EE_AUTORD */
4012 wm_get_auto_rd_done(sc);
4013 /*
4014 * Phy configuration from NVM just starts after EECD_AUTO_RD
4015 * is set.
4016 */
4017 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4018 || (sc->sc_type == WM_T_82583))
4019 delay(25*1000);
4020 break;
4021 case WM_T_82575:
4022 case WM_T_82576:
4023 case WM_T_82580:
4024 case WM_T_I350:
4025 case WM_T_I354:
4026 case WM_T_I210:
4027 case WM_T_I211:
4028 case WM_T_80003:
4029 /* check EECD_EE_AUTORD */
4030 wm_get_auto_rd_done(sc);
4031 break;
4032 case WM_T_ICH8:
4033 case WM_T_ICH9:
4034 case WM_T_ICH10:
4035 case WM_T_PCH:
4036 case WM_T_PCH2:
4037 case WM_T_PCH_LPT:
4038 break;
4039 default:
4040 panic("%s: unknown type\n", __func__);
4041 }
4042
4043 /* Check whether EEPROM is present or not */
4044 switch (sc->sc_type) {
4045 case WM_T_82575:
4046 case WM_T_82576:
4047 case WM_T_82580:
4048 case WM_T_I350:
4049 case WM_T_I354:
4050 case WM_T_ICH8:
4051 case WM_T_ICH9:
4052 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4053 /* Not found */
4054 sc->sc_flags |= WM_F_EEPROM_INVALID;
4055 if (sc->sc_type == WM_T_82575)
4056 wm_reset_init_script_82575(sc);
4057 }
4058 break;
4059 default:
4060 break;
4061 }
4062
4063 if ((sc->sc_type == WM_T_82580)
4064 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4065 /* clear global device reset status bit */
4066 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4067 }
4068
4069 /* Clear any pending interrupt events. */
4070 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4071 reg = CSR_READ(sc, WMREG_ICR);
4072 if (sc->sc_nintrs > 1) {
4073 if (sc->sc_type != WM_T_82574) {
4074 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4075 CSR_WRITE(sc, WMREG_EIAC, 0);
4076 } else
4077 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4078 }
4079
4080 /* reload sc_ctrl */
4081 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4082
4083 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4084 wm_set_eee_i350(sc);
4085
4086 /* dummy read from WUC */
4087 if (sc->sc_type == WM_T_PCH)
4088 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4089 /*
4090 * For PCH, this write will make sure that any noise will be detected
4091 * as a CRC error and be dropped rather than show up as a bad packet
4092 * to the DMA engine
4093 */
4094 if (sc->sc_type == WM_T_PCH)
4095 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4096
4097 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4098 CSR_WRITE(sc, WMREG_WUC, 0);
4099
4100 wm_reset_mdicnfg_82580(sc);
4101
4102 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4103 wm_pll_workaround_i210(sc);
4104 }
4105
4106 /*
4107 * wm_add_rxbuf:
4108 *
4109 * Add a receive buffer to the indiciated descriptor.
4110 */
4111 static int
4112 wm_add_rxbuf(struct wm_softc *sc, int idx)
4113 {
4114 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4115 struct mbuf *m;
4116 int error;
4117
4118 KASSERT(WM_RX_LOCKED(sc));
4119
4120 MGETHDR(m, M_DONTWAIT, MT_DATA);
4121 if (m == NULL)
4122 return ENOBUFS;
4123
4124 MCLGET(m, M_DONTWAIT);
4125 if ((m->m_flags & M_EXT) == 0) {
4126 m_freem(m);
4127 return ENOBUFS;
4128 }
4129
4130 if (rxs->rxs_mbuf != NULL)
4131 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4132
4133 rxs->rxs_mbuf = m;
4134
4135 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4136 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4137 BUS_DMA_READ|BUS_DMA_NOWAIT);
4138 if (error) {
4139 /* XXX XXX XXX */
4140 aprint_error_dev(sc->sc_dev,
4141 "unable to load rx DMA map %d, error = %d\n",
4142 idx, error);
4143 panic("wm_add_rxbuf");
4144 }
4145
4146 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4147 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4148
4149 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4150 if ((sc->sc_rctl & RCTL_EN) != 0)
4151 WM_INIT_RXDESC(sc, idx);
4152 } else
4153 WM_INIT_RXDESC(sc, idx);
4154
4155 return 0;
4156 }
4157
4158 /*
4159 * wm_rxdrain:
4160 *
4161 * Drain the receive queue.
4162 */
4163 static void
4164 wm_rxdrain(struct wm_softc *sc)
4165 {
4166 struct wm_rxsoft *rxs;
4167 int i;
4168
4169 KASSERT(WM_RX_LOCKED(sc));
4170
4171 for (i = 0; i < WM_NRXDESC; i++) {
4172 rxs = &sc->sc_rxsoft[i];
4173 if (rxs->rxs_mbuf != NULL) {
4174 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4175 m_freem(rxs->rxs_mbuf);
4176 rxs->rxs_mbuf = NULL;
4177 }
4178 }
4179 }
4180
4181 /*
4182 * wm_init: [ifnet interface function]
4183 *
4184 * Initialize the interface.
4185 */
4186 static int
4187 wm_init(struct ifnet *ifp)
4188 {
4189 struct wm_softc *sc = ifp->if_softc;
4190 int ret;
4191
4192 WM_BOTH_LOCK(sc);
4193 ret = wm_init_locked(ifp);
4194 WM_BOTH_UNLOCK(sc);
4195
4196 return ret;
4197 }
4198
4199 static int
4200 wm_init_locked(struct ifnet *ifp)
4201 {
4202 struct wm_softc *sc = ifp->if_softc;
4203 struct wm_rxsoft *rxs;
4204 int i, j, trynum, error = 0;
4205 uint32_t reg;
4206
4207 KASSERT(WM_BOTH_LOCKED(sc));
4208 /*
4209 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4210 * There is a small but measurable benefit to avoiding the adjusment
4211 * of the descriptor so that the headers are aligned, for normal mtu,
4212 * on such platforms. One possibility is that the DMA itself is
4213 * slightly more efficient if the front of the entire packet (instead
4214 * of the front of the headers) is aligned.
4215 *
4216 * Note we must always set align_tweak to 0 if we are using
4217 * jumbo frames.
4218 */
4219 #ifdef __NO_STRICT_ALIGNMENT
4220 sc->sc_align_tweak = 0;
4221 #else
4222 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4223 sc->sc_align_tweak = 0;
4224 else
4225 sc->sc_align_tweak = 2;
4226 #endif /* __NO_STRICT_ALIGNMENT */
4227
4228 /* Cancel any pending I/O. */
4229 wm_stop_locked(ifp, 0);
4230
4231 /* update statistics before reset */
4232 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4233 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4234
4235 /* Reset the chip to a known state. */
4236 wm_reset(sc);
4237
4238 switch (sc->sc_type) {
4239 case WM_T_82571:
4240 case WM_T_82572:
4241 case WM_T_82573:
4242 case WM_T_82574:
4243 case WM_T_82583:
4244 case WM_T_80003:
4245 case WM_T_ICH8:
4246 case WM_T_ICH9:
4247 case WM_T_ICH10:
4248 case WM_T_PCH:
4249 case WM_T_PCH2:
4250 case WM_T_PCH_LPT:
4251 if (wm_check_mng_mode(sc) != 0)
4252 wm_get_hw_control(sc);
4253 break;
4254 default:
4255 break;
4256 }
4257
4258 /* Init hardware bits */
4259 wm_initialize_hardware_bits(sc);
4260
4261 /* Reset the PHY. */
4262 if (sc->sc_flags & WM_F_HAS_MII)
4263 wm_gmii_reset(sc);
4264
4265 /* Calculate (E)ITR value */
4266 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4267 sc->sc_itr = 450; /* For EITR */
4268 } else if (sc->sc_type >= WM_T_82543) {
4269 /*
4270 * Set up the interrupt throttling register (units of 256ns)
4271 * Note that a footnote in Intel's documentation says this
4272 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4273 * or 10Mbit mode. Empirically, it appears to be the case
4274 * that that is also true for the 1024ns units of the other
4275 * interrupt-related timer registers -- so, really, we ought
4276 * to divide this value by 4 when the link speed is low.
4277 *
4278 * XXX implement this division at link speed change!
4279 */
4280
4281 /*
4282 * For N interrupts/sec, set this value to:
4283 * 1000000000 / (N * 256). Note that we set the
4284 * absolute and packet timer values to this value
4285 * divided by 4 to get "simple timer" behavior.
4286 */
4287
4288 sc->sc_itr = 1500; /* 2604 ints/sec */
4289 }
4290
4291 /* Initialize the transmit descriptor ring. */
4292 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4293 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4294 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4295 sc->sc_txfree = WM_NTXDESC(sc);
4296 sc->sc_txnext = 0;
4297
4298 if (sc->sc_type < WM_T_82543) {
4299 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4300 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4301 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4302 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4303 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4304 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4305 } else {
4306 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4307 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4308 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4309 CSR_WRITE(sc, WMREG_TDH, 0);
4310
4311 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4312 /*
4313 * Don't write TDT before TCTL.EN is set.
4314 * See the document.
4315 */
4316 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4317 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4318 | TXDCTL_WTHRESH(0));
4319 else {
4320 /* ITR / 4 */
4321 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4322 if (sc->sc_type >= WM_T_82540) {
4323 /* should be same */
4324 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4325 }
4326
4327 CSR_WRITE(sc, WMREG_TDT, 0);
4328 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4329 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4330 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4331 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4332 }
4333 }
4334
4335 /* Initialize the transmit job descriptors. */
4336 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4337 sc->sc_txsoft[i].txs_mbuf = NULL;
4338 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4339 sc->sc_txsnext = 0;
4340 sc->sc_txsdirty = 0;
4341
4342 /*
4343 * Initialize the receive descriptor and receive job
4344 * descriptor rings.
4345 */
4346 if (sc->sc_type < WM_T_82543) {
4347 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4348 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4349 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4350 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4351 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4352 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4353
4354 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4355 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4356 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4357 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4358 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4359 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4360 } else {
4361 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4362 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4363 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4364
4365 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4366 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4367 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4368 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4369 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4370 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4371 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4372 | RXDCTL_WTHRESH(1));
4373 } else {
4374 CSR_WRITE(sc, WMREG_RDH, 0);
4375 CSR_WRITE(sc, WMREG_RDT, 0);
4376 /* ITR/4 */
4377 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
4378 /* MUST be same */
4379 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr);
4380 }
4381 }
4382 for (i = 0; i < WM_NRXDESC; i++) {
4383 rxs = &sc->sc_rxsoft[i];
4384 if (rxs->rxs_mbuf == NULL) {
4385 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4386 log(LOG_ERR, "%s: unable to allocate or map "
4387 "rx buffer %d, error = %d\n",
4388 device_xname(sc->sc_dev), i, error);
4389 /*
4390 * XXX Should attempt to run with fewer receive
4391 * XXX buffers instead of just failing.
4392 */
4393 wm_rxdrain(sc);
4394 goto out;
4395 }
4396 } else {
4397 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4398 WM_INIT_RXDESC(sc, i);
4399 /*
4400 * For 82575 and newer device, the RX descriptors
4401 * must be initialized after the setting of RCTL.EN in
4402 * wm_set_filter()
4403 */
4404 }
4405 }
4406 sc->sc_rxptr = 0;
4407 sc->sc_rxdiscard = 0;
4408 WM_RXCHAIN_RESET(sc);
4409
4410 /*
4411 * Clear out the VLAN table -- we don't use it (yet).
4412 */
4413 CSR_WRITE(sc, WMREG_VET, 0);
4414 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4415 trynum = 10; /* Due to hw errata */
4416 else
4417 trynum = 1;
4418 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4419 for (j = 0; j < trynum; j++)
4420 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4421
4422 /*
4423 * Set up flow-control parameters.
4424 *
4425 * XXX Values could probably stand some tuning.
4426 */
4427 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4428 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4429 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4430 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4431 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4432 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4433 }
4434
4435 sc->sc_fcrtl = FCRTL_DFLT;
4436 if (sc->sc_type < WM_T_82543) {
4437 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4438 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4439 } else {
4440 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4441 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4442 }
4443
4444 if (sc->sc_type == WM_T_80003)
4445 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4446 else
4447 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4448
4449 /* Writes the control register. */
4450 wm_set_vlan(sc);
4451
4452 if (sc->sc_flags & WM_F_HAS_MII) {
4453 int val;
4454
4455 switch (sc->sc_type) {
4456 case WM_T_80003:
4457 case WM_T_ICH8:
4458 case WM_T_ICH9:
4459 case WM_T_ICH10:
4460 case WM_T_PCH:
4461 case WM_T_PCH2:
4462 case WM_T_PCH_LPT:
4463 /*
4464 * Set the mac to wait the maximum time between each
4465 * iteration and increase the max iterations when
4466 * polling the phy; this fixes erroneous timeouts at
4467 * 10Mbps.
4468 */
4469 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4470 0xFFFF);
4471 val = wm_kmrn_readreg(sc,
4472 KUMCTRLSTA_OFFSET_INB_PARAM);
4473 val |= 0x3F;
4474 wm_kmrn_writereg(sc,
4475 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4476 break;
4477 default:
4478 break;
4479 }
4480
4481 if (sc->sc_type == WM_T_80003) {
4482 val = CSR_READ(sc, WMREG_CTRL_EXT);
4483 val &= ~CTRL_EXT_LINK_MODE_MASK;
4484 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4485
4486 /* Bypass RX and TX FIFO's */
4487 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4488 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4489 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4490 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4491 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4492 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4493 }
4494 }
4495 #if 0
4496 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4497 #endif
4498
4499 /* Set up checksum offload parameters. */
4500 reg = CSR_READ(sc, WMREG_RXCSUM);
4501 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4502 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4503 reg |= RXCSUM_IPOFL;
4504 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4505 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4506 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4507 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4508 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4509
4510 /* Set up MSI-X */
4511 if (sc->sc_nintrs > 1) {
4512 uint32_t ivar;
4513
4514 if (sc->sc_type == WM_T_82575) {
4515 /* Interrupt control */
4516 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4517 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4518 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4519
4520 /* TX */
4521 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_TXINTR_IDX),
4522 EITR_TX_QUEUE0);
4523 /* RX */
4524 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_RXINTR_IDX),
4525 EITR_RX_QUEUE0);
4526 /* Link status */
4527 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_LINKINTR_IDX),
4528 EITR_OTHER);
4529 } else if (sc->sc_type == WM_T_82574) {
4530 /* Interrupt control */
4531 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4532 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4533 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4534
4535 /* TX, RX and Link status */
4536 ivar = __SHIFTIN((IVAR_VALID_82574|WM_MSIX_TXINTR_IDX),
4537 IVAR_TX_MASK_Q_82574(0));
4538 ivar |= __SHIFTIN((IVAR_VALID_82574
4539 | WM_MSIX_RXINTR_IDX),
4540 IVAR_RX_MASK_Q_82574(0));
4541 ivar |=__SHIFTIN((IVAR_VALID_82574|WM_MSIX_LINKINTR_IDX),
4542 IVAR_OTHER_MASK);
4543 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4544 } else {
4545 /* Interrupt control */
4546 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4547 | GPIE_MULTI_MSIX | GPIE_EIAME
4548 | GPIE_PBA);
4549
4550 switch (sc->sc_type) {
4551 case WM_T_82580:
4552 case WM_T_I350:
4553 case WM_T_I354:
4554 case WM_T_I210:
4555 case WM_T_I211:
4556 /* TX */
4557 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4558 ivar &= ~IVAR_TX_MASK_Q(0);
4559 ivar |= __SHIFTIN(
4560 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4561 IVAR_TX_MASK_Q(0));
4562 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4563
4564 /* RX */
4565 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4566 ivar &= ~IVAR_RX_MASK_Q(0);
4567 ivar |= __SHIFTIN(
4568 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4569 IVAR_RX_MASK_Q(0));
4570 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4571 break;
4572 case WM_T_82576:
4573 /* TX */
4574 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4575 ivar &= ~IVAR_TX_MASK_Q_82576(0);
4576 ivar |= __SHIFTIN(
4577 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4578 IVAR_TX_MASK_Q_82576(0));
4579 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4580
4581 /* RX */
4582 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4583 ivar &= ~IVAR_RX_MASK_Q_82576(0);
4584 ivar |= __SHIFTIN(
4585 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4586 IVAR_RX_MASK_Q_82576(0));
4587 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4588 break;
4589 default:
4590 break;
4591 }
4592
4593 /* Link status */
4594 ivar = __SHIFTIN((WM_MSIX_LINKINTR_IDX | IVAR_VALID),
4595 IVAR_MISC_OTHER);
4596 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4597 }
4598 }
4599
4600 /* Set up the interrupt registers. */
4601 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4602 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4603 ICR_RXO | ICR_RXT0;
4604 if (sc->sc_nintrs > 1) {
4605 uint32_t mask;
4606 switch (sc->sc_type) {
4607 case WM_T_82574:
4608 CSR_WRITE(sc, WMREG_EIAC_82574,
4609 WMREG_EIAC_82574_MSIX_MASK);
4610 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4611 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4612 break;
4613 default:
4614 if (sc->sc_type == WM_T_82575)
4615 mask = EITR_RX_QUEUE0 |EITR_TX_QUEUE0
4616 | EITR_OTHER;
4617 else
4618 mask = (1 << WM_MSIX_RXINTR_IDX)
4619 | (1 << WM_MSIX_TXINTR_IDX)
4620 | (1 << WM_MSIX_LINKINTR_IDX);
4621 CSR_WRITE(sc, WMREG_EIAC, mask);
4622 CSR_WRITE(sc, WMREG_EIAM, mask);
4623 CSR_WRITE(sc, WMREG_EIMS, mask);
4624 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4625 break;
4626 }
4627 } else
4628 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4629
4630 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4631 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4632 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4633 reg = CSR_READ(sc, WMREG_KABGTXD);
4634 reg |= KABGTXD_BGSQLBIAS;
4635 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4636 }
4637
4638 /* Set up the inter-packet gap. */
4639 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4640
4641 if (sc->sc_type >= WM_T_82543) {
4642 /*
4643 * XXX 82574 has both ITR and EITR. SET EITR when we use
4644 * the multi queue function with MSI-X.
4645 */
4646 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4647 if (sc->sc_nintrs > 1) {
4648 CSR_WRITE(sc, WMREG_EITR(WM_MSIX_RXINTR_IDX),
4649 sc->sc_itr);
4650 CSR_WRITE(sc, WMREG_EITR(WM_MSIX_TXINTR_IDX),
4651 sc->sc_itr);
4652 /*
4653 * Link interrupts occur much less than TX
4654 * interrupts and RX interrupts. So, we don't
4655 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4656 * FreeBSD's if_igb.
4657 */
4658 } else
4659 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4660 } else
4661 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4662 }
4663
4664 /* Set the VLAN ethernetype. */
4665 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4666
4667 /*
4668 * Set up the transmit control register; we start out with
4669 * a collision distance suitable for FDX, but update it whe
4670 * we resolve the media type.
4671 */
4672 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4673 | TCTL_CT(TX_COLLISION_THRESHOLD)
4674 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4675 if (sc->sc_type >= WM_T_82571)
4676 sc->sc_tctl |= TCTL_MULR;
4677 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4678
4679 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4680 /* Write TDT after TCTL.EN is set. See the document. */
4681 CSR_WRITE(sc, WMREG_TDT, 0);
4682 }
4683
4684 if (sc->sc_type == WM_T_80003) {
4685 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4686 reg &= ~TCTL_EXT_GCEX_MASK;
4687 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4688 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4689 }
4690
4691 /* Set the media. */
4692 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4693 goto out;
4694
4695 /* Configure for OS presence */
4696 wm_init_manageability(sc);
4697
4698 /*
4699 * Set up the receive control register; we actually program
4700 * the register when we set the receive filter. Use multicast
4701 * address offset type 0.
4702 *
4703 * Only the i82544 has the ability to strip the incoming
4704 * CRC, so we don't enable that feature.
4705 */
4706 sc->sc_mchash_type = 0;
4707 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4708 | RCTL_MO(sc->sc_mchash_type);
4709
4710 /*
4711 * The I350 has a bug where it always strips the CRC whether
4712 * asked to or not. So ask for stripped CRC here and cope in rxeof
4713 */
4714 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4715 || (sc->sc_type == WM_T_I210))
4716 sc->sc_rctl |= RCTL_SECRC;
4717
4718 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4719 && (ifp->if_mtu > ETHERMTU)) {
4720 sc->sc_rctl |= RCTL_LPE;
4721 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4722 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4723 }
4724
4725 if (MCLBYTES == 2048) {
4726 sc->sc_rctl |= RCTL_2k;
4727 } else {
4728 if (sc->sc_type >= WM_T_82543) {
4729 switch (MCLBYTES) {
4730 case 4096:
4731 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4732 break;
4733 case 8192:
4734 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4735 break;
4736 case 16384:
4737 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4738 break;
4739 default:
4740 panic("wm_init: MCLBYTES %d unsupported",
4741 MCLBYTES);
4742 break;
4743 }
4744 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4745 }
4746
4747 /* Set the receive filter. */
4748 wm_set_filter(sc);
4749
4750 /* Enable ECC */
4751 switch (sc->sc_type) {
4752 case WM_T_82571:
4753 reg = CSR_READ(sc, WMREG_PBA_ECC);
4754 reg |= PBA_ECC_CORR_EN;
4755 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4756 break;
4757 case WM_T_PCH_LPT:
4758 reg = CSR_READ(sc, WMREG_PBECCSTS);
4759 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4760 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4761
4762 reg = CSR_READ(sc, WMREG_CTRL);
4763 reg |= CTRL_MEHE;
4764 CSR_WRITE(sc, WMREG_CTRL, reg);
4765 break;
4766 default:
4767 break;
4768 }
4769
4770 /* On 575 and later set RDT only if RX enabled */
4771 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4772 for (i = 0; i < WM_NRXDESC; i++)
4773 WM_INIT_RXDESC(sc, i);
4774
4775 sc->sc_stopping = false;
4776
4777 /* Start the one second link check clock. */
4778 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4779
4780 /* ...all done! */
4781 ifp->if_flags |= IFF_RUNNING;
4782 ifp->if_flags &= ~IFF_OACTIVE;
4783
4784 out:
4785 sc->sc_if_flags = ifp->if_flags;
4786 if (error)
4787 log(LOG_ERR, "%s: interface not running\n",
4788 device_xname(sc->sc_dev));
4789 return error;
4790 }
4791
4792 /*
4793 * wm_stop: [ifnet interface function]
4794 *
4795 * Stop transmission on the interface.
4796 */
4797 static void
4798 wm_stop(struct ifnet *ifp, int disable)
4799 {
4800 struct wm_softc *sc = ifp->if_softc;
4801
4802 WM_BOTH_LOCK(sc);
4803 wm_stop_locked(ifp, disable);
4804 WM_BOTH_UNLOCK(sc);
4805 }
4806
4807 static void
4808 wm_stop_locked(struct ifnet *ifp, int disable)
4809 {
4810 struct wm_softc *sc = ifp->if_softc;
4811 struct wm_txsoft *txs;
4812 int i;
4813
4814 KASSERT(WM_BOTH_LOCKED(sc));
4815
4816 sc->sc_stopping = true;
4817
4818 /* Stop the one second clock. */
4819 callout_stop(&sc->sc_tick_ch);
4820
4821 /* Stop the 82547 Tx FIFO stall check timer. */
4822 if (sc->sc_type == WM_T_82547)
4823 callout_stop(&sc->sc_txfifo_ch);
4824
4825 if (sc->sc_flags & WM_F_HAS_MII) {
4826 /* Down the MII. */
4827 mii_down(&sc->sc_mii);
4828 } else {
4829 #if 0
4830 /* Should we clear PHY's status properly? */
4831 wm_reset(sc);
4832 #endif
4833 }
4834
4835 /* Stop the transmit and receive processes. */
4836 CSR_WRITE(sc, WMREG_TCTL, 0);
4837 CSR_WRITE(sc, WMREG_RCTL, 0);
4838 sc->sc_rctl &= ~RCTL_EN;
4839
4840 /*
4841 * Clear the interrupt mask to ensure the device cannot assert its
4842 * interrupt line.
4843 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
4844 * service any currently pending or shared interrupt.
4845 */
4846 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4847 sc->sc_icr = 0;
4848 if (sc->sc_nintrs > 1) {
4849 if (sc->sc_type != WM_T_82574) {
4850 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4851 CSR_WRITE(sc, WMREG_EIAC, 0);
4852 } else
4853 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4854 }
4855
4856 /* Release any queued transmit buffers. */
4857 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4858 txs = &sc->sc_txsoft[i];
4859 if (txs->txs_mbuf != NULL) {
4860 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4861 m_freem(txs->txs_mbuf);
4862 txs->txs_mbuf = NULL;
4863 }
4864 }
4865
4866 /* Mark the interface as down and cancel the watchdog timer. */
4867 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4868 ifp->if_timer = 0;
4869
4870 if (disable)
4871 wm_rxdrain(sc);
4872
4873 #if 0 /* notyet */
4874 if (sc->sc_type >= WM_T_82544)
4875 CSR_WRITE(sc, WMREG_WUC, 0);
4876 #endif
4877 }
4878
4879 /*
4880 * wm_tx_offload:
4881 *
4882 * Set up TCP/IP checksumming parameters for the
4883 * specified packet.
4884 */
4885 static int
4886 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4887 uint8_t *fieldsp)
4888 {
4889 struct mbuf *m0 = txs->txs_mbuf;
4890 struct livengood_tcpip_ctxdesc *t;
4891 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4892 uint32_t ipcse;
4893 struct ether_header *eh;
4894 int offset, iphl;
4895 uint8_t fields;
4896
4897 /*
4898 * XXX It would be nice if the mbuf pkthdr had offset
4899 * fields for the protocol headers.
4900 */
4901
4902 eh = mtod(m0, struct ether_header *);
4903 switch (htons(eh->ether_type)) {
4904 case ETHERTYPE_IP:
4905 case ETHERTYPE_IPV6:
4906 offset = ETHER_HDR_LEN;
4907 break;
4908
4909 case ETHERTYPE_VLAN:
4910 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4911 break;
4912
4913 default:
4914 /*
4915 * Don't support this protocol or encapsulation.
4916 */
4917 *fieldsp = 0;
4918 *cmdp = 0;
4919 return 0;
4920 }
4921
4922 if ((m0->m_pkthdr.csum_flags &
4923 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4924 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4925 } else {
4926 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4927 }
4928 ipcse = offset + iphl - 1;
4929
4930 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4931 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4932 seg = 0;
4933 fields = 0;
4934
4935 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4936 int hlen = offset + iphl;
4937 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4938
4939 if (__predict_false(m0->m_len <
4940 (hlen + sizeof(struct tcphdr)))) {
4941 /*
4942 * TCP/IP headers are not in the first mbuf; we need
4943 * to do this the slow and painful way. Let's just
4944 * hope this doesn't happen very often.
4945 */
4946 struct tcphdr th;
4947
4948 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4949
4950 m_copydata(m0, hlen, sizeof(th), &th);
4951 if (v4) {
4952 struct ip ip;
4953
4954 m_copydata(m0, offset, sizeof(ip), &ip);
4955 ip.ip_len = 0;
4956 m_copyback(m0,
4957 offset + offsetof(struct ip, ip_len),
4958 sizeof(ip.ip_len), &ip.ip_len);
4959 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4960 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4961 } else {
4962 struct ip6_hdr ip6;
4963
4964 m_copydata(m0, offset, sizeof(ip6), &ip6);
4965 ip6.ip6_plen = 0;
4966 m_copyback(m0,
4967 offset + offsetof(struct ip6_hdr, ip6_plen),
4968 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4969 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4970 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4971 }
4972 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4973 sizeof(th.th_sum), &th.th_sum);
4974
4975 hlen += th.th_off << 2;
4976 } else {
4977 /*
4978 * TCP/IP headers are in the first mbuf; we can do
4979 * this the easy way.
4980 */
4981 struct tcphdr *th;
4982
4983 if (v4) {
4984 struct ip *ip =
4985 (void *)(mtod(m0, char *) + offset);
4986 th = (void *)(mtod(m0, char *) + hlen);
4987
4988 ip->ip_len = 0;
4989 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4990 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4991 } else {
4992 struct ip6_hdr *ip6 =
4993 (void *)(mtod(m0, char *) + offset);
4994 th = (void *)(mtod(m0, char *) + hlen);
4995
4996 ip6->ip6_plen = 0;
4997 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4998 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4999 }
5000 hlen += th->th_off << 2;
5001 }
5002
5003 if (v4) {
5004 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5005 cmdlen |= WTX_TCPIP_CMD_IP;
5006 } else {
5007 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5008 ipcse = 0;
5009 }
5010 cmd |= WTX_TCPIP_CMD_TSE;
5011 cmdlen |= WTX_TCPIP_CMD_TSE |
5012 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
5013 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
5014 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
5015 }
5016
5017 /*
5018 * NOTE: Even if we're not using the IP or TCP/UDP checksum
5019 * offload feature, if we load the context descriptor, we
5020 * MUST provide valid values for IPCSS and TUCSS fields.
5021 */
5022
5023 ipcs = WTX_TCPIP_IPCSS(offset) |
5024 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5025 WTX_TCPIP_IPCSE(ipcse);
5026 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
5027 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5028 fields |= WTX_IXSM;
5029 }
5030
5031 offset += iphl;
5032
5033 if (m0->m_pkthdr.csum_flags &
5034 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5035 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5036 fields |= WTX_TXSM;
5037 tucs = WTX_TCPIP_TUCSS(offset) |
5038 WTX_TCPIP_TUCSO(offset +
5039 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5040 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5041 } else if ((m0->m_pkthdr.csum_flags &
5042 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5043 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5044 fields |= WTX_TXSM;
5045 tucs = WTX_TCPIP_TUCSS(offset) |
5046 WTX_TCPIP_TUCSO(offset +
5047 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5048 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5049 } else {
5050 /* Just initialize it to a valid TCP context. */
5051 tucs = WTX_TCPIP_TUCSS(offset) |
5052 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5053 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5054 }
5055
5056 /* Fill in the context descriptor. */
5057 t = (struct livengood_tcpip_ctxdesc *)
5058 &sc->sc_txdescs[sc->sc_txnext];
5059 t->tcpip_ipcs = htole32(ipcs);
5060 t->tcpip_tucs = htole32(tucs);
5061 t->tcpip_cmdlen = htole32(cmdlen);
5062 t->tcpip_seg = htole32(seg);
5063 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5064
5065 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5066 txs->txs_ndesc++;
5067
5068 *cmdp = cmd;
5069 *fieldsp = fields;
5070
5071 return 0;
5072 }
5073
5074 static void
5075 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5076 {
5077 struct mbuf *m;
5078 int i;
5079
5080 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5081 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5082 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5083 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5084 m->m_data, m->m_len, m->m_flags);
5085 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5086 i, i == 1 ? "" : "s");
5087 }
5088
5089 /*
5090 * wm_82547_txfifo_stall:
5091 *
5092 * Callout used to wait for the 82547 Tx FIFO to drain,
5093 * reset the FIFO pointers, and restart packet transmission.
5094 */
5095 static void
5096 wm_82547_txfifo_stall(void *arg)
5097 {
5098 struct wm_softc *sc = arg;
5099 #ifndef WM_MPSAFE
5100 int s;
5101
5102 s = splnet();
5103 #endif
5104 WM_TX_LOCK(sc);
5105
5106 if (sc->sc_stopping)
5107 goto out;
5108
5109 if (sc->sc_txfifo_stall) {
5110 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
5111 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5112 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5113 /*
5114 * Packets have drained. Stop transmitter, reset
5115 * FIFO pointers, restart transmitter, and kick
5116 * the packet queue.
5117 */
5118 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5119 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5120 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
5121 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
5122 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
5123 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
5124 CSR_WRITE(sc, WMREG_TCTL, tctl);
5125 CSR_WRITE_FLUSH(sc);
5126
5127 sc->sc_txfifo_head = 0;
5128 sc->sc_txfifo_stall = 0;
5129 wm_start_locked(&sc->sc_ethercom.ec_if);
5130 } else {
5131 /*
5132 * Still waiting for packets to drain; try again in
5133 * another tick.
5134 */
5135 callout_schedule(&sc->sc_txfifo_ch, 1);
5136 }
5137 }
5138
5139 out:
5140 WM_TX_UNLOCK(sc);
5141 #ifndef WM_MPSAFE
5142 splx(s);
5143 #endif
5144 }
5145
5146 /*
5147 * wm_82547_txfifo_bugchk:
5148 *
5149 * Check for bug condition in the 82547 Tx FIFO. We need to
5150 * prevent enqueueing a packet that would wrap around the end
5151 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5152 *
5153 * We do this by checking the amount of space before the end
5154 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5155 * the Tx FIFO, wait for all remaining packets to drain, reset
5156 * the internal FIFO pointers to the beginning, and restart
5157 * transmission on the interface.
5158 */
5159 #define WM_FIFO_HDR 0x10
5160 #define WM_82547_PAD_LEN 0x3e0
5161 static int
5162 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5163 {
5164 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
5165 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5166
5167 /* Just return if already stalled. */
5168 if (sc->sc_txfifo_stall)
5169 return 1;
5170
5171 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5172 /* Stall only occurs in half-duplex mode. */
5173 goto send_packet;
5174 }
5175
5176 if (len >= WM_82547_PAD_LEN + space) {
5177 sc->sc_txfifo_stall = 1;
5178 callout_schedule(&sc->sc_txfifo_ch, 1);
5179 return 1;
5180 }
5181
5182 send_packet:
5183 sc->sc_txfifo_head += len;
5184 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
5185 sc->sc_txfifo_head -= sc->sc_txfifo_size;
5186
5187 return 0;
5188 }
5189
5190 /*
5191 * wm_start: [ifnet interface function]
5192 *
5193 * Start packet transmission on the interface.
5194 */
5195 static void
5196 wm_start(struct ifnet *ifp)
5197 {
5198 struct wm_softc *sc = ifp->if_softc;
5199
5200 WM_TX_LOCK(sc);
5201 if (!sc->sc_stopping)
5202 wm_start_locked(ifp);
5203 WM_TX_UNLOCK(sc);
5204 }
5205
5206 static void
5207 wm_start_locked(struct ifnet *ifp)
5208 {
5209 struct wm_softc *sc = ifp->if_softc;
5210 struct mbuf *m0;
5211 struct m_tag *mtag;
5212 struct wm_txsoft *txs;
5213 bus_dmamap_t dmamap;
5214 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
5215 bus_addr_t curaddr;
5216 bus_size_t seglen, curlen;
5217 uint32_t cksumcmd;
5218 uint8_t cksumfields;
5219
5220 KASSERT(WM_TX_LOCKED(sc));
5221
5222 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5223 return;
5224
5225 /* Remember the previous number of free descriptors. */
5226 ofree = sc->sc_txfree;
5227
5228 /*
5229 * Loop through the send queue, setting up transmit descriptors
5230 * until we drain the queue, or use up all available transmit
5231 * descriptors.
5232 */
5233 for (;;) {
5234 m0 = NULL;
5235
5236 /* Get a work queue entry. */
5237 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5238 wm_txeof(sc);
5239 if (sc->sc_txsfree == 0) {
5240 DPRINTF(WM_DEBUG_TX,
5241 ("%s: TX: no free job descriptors\n",
5242 device_xname(sc->sc_dev)));
5243 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5244 break;
5245 }
5246 }
5247
5248 /* Grab a packet off the queue. */
5249 IFQ_DEQUEUE(&ifp->if_snd, m0);
5250 if (m0 == NULL)
5251 break;
5252
5253 DPRINTF(WM_DEBUG_TX,
5254 ("%s: TX: have packet to transmit: %p\n",
5255 device_xname(sc->sc_dev), m0));
5256
5257 txs = &sc->sc_txsoft[sc->sc_txsnext];
5258 dmamap = txs->txs_dmamap;
5259
5260 use_tso = (m0->m_pkthdr.csum_flags &
5261 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
5262
5263 /*
5264 * So says the Linux driver:
5265 * The controller does a simple calculation to make sure
5266 * there is enough room in the FIFO before initiating the
5267 * DMA for each buffer. The calc is:
5268 * 4 = ceil(buffer len / MSS)
5269 * To make sure we don't overrun the FIFO, adjust the max
5270 * buffer len if the MSS drops.
5271 */
5272 dmamap->dm_maxsegsz =
5273 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
5274 ? m0->m_pkthdr.segsz << 2
5275 : WTX_MAX_LEN;
5276
5277 /*
5278 * Load the DMA map. If this fails, the packet either
5279 * didn't fit in the allotted number of segments, or we
5280 * were short on resources. For the too-many-segments
5281 * case, we simply report an error and drop the packet,
5282 * since we can't sanely copy a jumbo packet to a single
5283 * buffer.
5284 */
5285 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5286 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5287 if (error) {
5288 if (error == EFBIG) {
5289 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5290 log(LOG_ERR, "%s: Tx packet consumes too many "
5291 "DMA segments, dropping...\n",
5292 device_xname(sc->sc_dev));
5293 wm_dump_mbuf_chain(sc, m0);
5294 m_freem(m0);
5295 continue;
5296 }
5297 /* Short on resources, just stop for now. */
5298 DPRINTF(WM_DEBUG_TX,
5299 ("%s: TX: dmamap load failed: %d\n",
5300 device_xname(sc->sc_dev), error));
5301 break;
5302 }
5303
5304 segs_needed = dmamap->dm_nsegs;
5305 if (use_tso) {
5306 /* For sentinel descriptor; see below. */
5307 segs_needed++;
5308 }
5309
5310 /*
5311 * Ensure we have enough descriptors free to describe
5312 * the packet. Note, we always reserve one descriptor
5313 * at the end of the ring due to the semantics of the
5314 * TDT register, plus one more in the event we need
5315 * to load offload context.
5316 */
5317 if (segs_needed > sc->sc_txfree - 2) {
5318 /*
5319 * Not enough free descriptors to transmit this
5320 * packet. We haven't committed anything yet,
5321 * so just unload the DMA map, put the packet
5322 * pack on the queue, and punt. Notify the upper
5323 * layer that there are no more slots left.
5324 */
5325 DPRINTF(WM_DEBUG_TX,
5326 ("%s: TX: need %d (%d) descriptors, have %d\n",
5327 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5328 segs_needed, sc->sc_txfree - 1));
5329 ifp->if_flags |= IFF_OACTIVE;
5330 bus_dmamap_unload(sc->sc_dmat, dmamap);
5331 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5332 break;
5333 }
5334
5335 /*
5336 * Check for 82547 Tx FIFO bug. We need to do this
5337 * once we know we can transmit the packet, since we
5338 * do some internal FIFO space accounting here.
5339 */
5340 if (sc->sc_type == WM_T_82547 &&
5341 wm_82547_txfifo_bugchk(sc, m0)) {
5342 DPRINTF(WM_DEBUG_TX,
5343 ("%s: TX: 82547 Tx FIFO bug detected\n",
5344 device_xname(sc->sc_dev)));
5345 ifp->if_flags |= IFF_OACTIVE;
5346 bus_dmamap_unload(sc->sc_dmat, dmamap);
5347 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
5348 break;
5349 }
5350
5351 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5352
5353 DPRINTF(WM_DEBUG_TX,
5354 ("%s: TX: packet has %d (%d) DMA segments\n",
5355 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5356
5357 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5358
5359 /*
5360 * Store a pointer to the packet so that we can free it
5361 * later.
5362 *
5363 * Initially, we consider the number of descriptors the
5364 * packet uses the number of DMA segments. This may be
5365 * incremented by 1 if we do checksum offload (a descriptor
5366 * is used to set the checksum context).
5367 */
5368 txs->txs_mbuf = m0;
5369 txs->txs_firstdesc = sc->sc_txnext;
5370 txs->txs_ndesc = segs_needed;
5371
5372 /* Set up offload parameters for this packet. */
5373 if (m0->m_pkthdr.csum_flags &
5374 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5375 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5376 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5377 if (wm_tx_offload(sc, txs, &cksumcmd,
5378 &cksumfields) != 0) {
5379 /* Error message already displayed. */
5380 bus_dmamap_unload(sc->sc_dmat, dmamap);
5381 continue;
5382 }
5383 } else {
5384 cksumcmd = 0;
5385 cksumfields = 0;
5386 }
5387
5388 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5389
5390 /* Sync the DMA map. */
5391 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5392 BUS_DMASYNC_PREWRITE);
5393
5394 /* Initialize the transmit descriptor. */
5395 for (nexttx = sc->sc_txnext, seg = 0;
5396 seg < dmamap->dm_nsegs; seg++) {
5397 for (seglen = dmamap->dm_segs[seg].ds_len,
5398 curaddr = dmamap->dm_segs[seg].ds_addr;
5399 seglen != 0;
5400 curaddr += curlen, seglen -= curlen,
5401 nexttx = WM_NEXTTX(sc, nexttx)) {
5402 curlen = seglen;
5403
5404 /*
5405 * So says the Linux driver:
5406 * Work around for premature descriptor
5407 * write-backs in TSO mode. Append a
5408 * 4-byte sentinel descriptor.
5409 */
5410 if (use_tso &&
5411 seg == dmamap->dm_nsegs - 1 &&
5412 curlen > 8)
5413 curlen -= 4;
5414
5415 wm_set_dma_addr(
5416 &sc->sc_txdescs[nexttx].wtx_addr,
5417 curaddr);
5418 sc->sc_txdescs[nexttx].wtx_cmdlen =
5419 htole32(cksumcmd | curlen);
5420 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5421 0;
5422 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5423 cksumfields;
5424 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5425 lasttx = nexttx;
5426
5427 DPRINTF(WM_DEBUG_TX,
5428 ("%s: TX: desc %d: low %#" PRIx64 ", "
5429 "len %#04zx\n",
5430 device_xname(sc->sc_dev), nexttx,
5431 (uint64_t)curaddr, curlen));
5432 }
5433 }
5434
5435 KASSERT(lasttx != -1);
5436
5437 /*
5438 * Set up the command byte on the last descriptor of
5439 * the packet. If we're in the interrupt delay window,
5440 * delay the interrupt.
5441 */
5442 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5443 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5444
5445 /*
5446 * If VLANs are enabled and the packet has a VLAN tag, set
5447 * up the descriptor to encapsulate the packet for us.
5448 *
5449 * This is only valid on the last descriptor of the packet.
5450 */
5451 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5452 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5453 htole32(WTX_CMD_VLE);
5454 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5455 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5456 }
5457
5458 txs->txs_lastdesc = lasttx;
5459
5460 DPRINTF(WM_DEBUG_TX,
5461 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5462 device_xname(sc->sc_dev),
5463 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5464
5465 /* Sync the descriptors we're using. */
5466 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5467 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5468
5469 /* Give the packet to the chip. */
5470 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5471
5472 DPRINTF(WM_DEBUG_TX,
5473 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5474
5475 DPRINTF(WM_DEBUG_TX,
5476 ("%s: TX: finished transmitting packet, job %d\n",
5477 device_xname(sc->sc_dev), sc->sc_txsnext));
5478
5479 /* Advance the tx pointer. */
5480 sc->sc_txfree -= txs->txs_ndesc;
5481 sc->sc_txnext = nexttx;
5482
5483 sc->sc_txsfree--;
5484 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5485
5486 /* Pass the packet to any BPF listeners. */
5487 bpf_mtap(ifp, m0);
5488 }
5489
5490 if (m0 != NULL) {
5491 ifp->if_flags |= IFF_OACTIVE;
5492 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5493 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5494 m_freem(m0);
5495 }
5496
5497 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5498 /* No more slots; notify upper layer. */
5499 ifp->if_flags |= IFF_OACTIVE;
5500 }
5501
5502 if (sc->sc_txfree != ofree) {
5503 /* Set a watchdog timer in case the chip flakes out. */
5504 ifp->if_timer = 5;
5505 }
5506 }
5507
5508 /*
5509 * wm_nq_tx_offload:
5510 *
5511 * Set up TCP/IP checksumming parameters for the
5512 * specified packet, for NEWQUEUE devices
5513 */
5514 static int
5515 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5516 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5517 {
5518 struct mbuf *m0 = txs->txs_mbuf;
5519 struct m_tag *mtag;
5520 uint32_t vl_len, mssidx, cmdc;
5521 struct ether_header *eh;
5522 int offset, iphl;
5523
5524 /*
5525 * XXX It would be nice if the mbuf pkthdr had offset
5526 * fields for the protocol headers.
5527 */
5528 *cmdlenp = 0;
5529 *fieldsp = 0;
5530
5531 eh = mtod(m0, struct ether_header *);
5532 switch (htons(eh->ether_type)) {
5533 case ETHERTYPE_IP:
5534 case ETHERTYPE_IPV6:
5535 offset = ETHER_HDR_LEN;
5536 break;
5537
5538 case ETHERTYPE_VLAN:
5539 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5540 break;
5541
5542 default:
5543 /* Don't support this protocol or encapsulation. */
5544 *do_csum = false;
5545 return 0;
5546 }
5547 *do_csum = true;
5548 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5549 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5550
5551 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5552 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5553
5554 if ((m0->m_pkthdr.csum_flags &
5555 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5556 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5557 } else {
5558 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5559 }
5560 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5561 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5562
5563 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5564 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5565 << NQTXC_VLLEN_VLAN_SHIFT);
5566 *cmdlenp |= NQTX_CMD_VLE;
5567 }
5568
5569 mssidx = 0;
5570
5571 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5572 int hlen = offset + iphl;
5573 int tcp_hlen;
5574 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5575
5576 if (__predict_false(m0->m_len <
5577 (hlen + sizeof(struct tcphdr)))) {
5578 /*
5579 * TCP/IP headers are not in the first mbuf; we need
5580 * to do this the slow and painful way. Let's just
5581 * hope this doesn't happen very often.
5582 */
5583 struct tcphdr th;
5584
5585 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5586
5587 m_copydata(m0, hlen, sizeof(th), &th);
5588 if (v4) {
5589 struct ip ip;
5590
5591 m_copydata(m0, offset, sizeof(ip), &ip);
5592 ip.ip_len = 0;
5593 m_copyback(m0,
5594 offset + offsetof(struct ip, ip_len),
5595 sizeof(ip.ip_len), &ip.ip_len);
5596 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5597 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5598 } else {
5599 struct ip6_hdr ip6;
5600
5601 m_copydata(m0, offset, sizeof(ip6), &ip6);
5602 ip6.ip6_plen = 0;
5603 m_copyback(m0,
5604 offset + offsetof(struct ip6_hdr, ip6_plen),
5605 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5606 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5607 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5608 }
5609 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5610 sizeof(th.th_sum), &th.th_sum);
5611
5612 tcp_hlen = th.th_off << 2;
5613 } else {
5614 /*
5615 * TCP/IP headers are in the first mbuf; we can do
5616 * this the easy way.
5617 */
5618 struct tcphdr *th;
5619
5620 if (v4) {
5621 struct ip *ip =
5622 (void *)(mtod(m0, char *) + offset);
5623 th = (void *)(mtod(m0, char *) + hlen);
5624
5625 ip->ip_len = 0;
5626 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5627 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5628 } else {
5629 struct ip6_hdr *ip6 =
5630 (void *)(mtod(m0, char *) + offset);
5631 th = (void *)(mtod(m0, char *) + hlen);
5632
5633 ip6->ip6_plen = 0;
5634 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5635 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5636 }
5637 tcp_hlen = th->th_off << 2;
5638 }
5639 hlen += tcp_hlen;
5640 *cmdlenp |= NQTX_CMD_TSE;
5641
5642 if (v4) {
5643 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5644 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5645 } else {
5646 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5647 *fieldsp |= NQTXD_FIELDS_TUXSM;
5648 }
5649 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5650 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5651 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5652 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5653 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5654 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5655 } else {
5656 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5657 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5658 }
5659
5660 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5661 *fieldsp |= NQTXD_FIELDS_IXSM;
5662 cmdc |= NQTXC_CMD_IP4;
5663 }
5664
5665 if (m0->m_pkthdr.csum_flags &
5666 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5667 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5668 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5669 cmdc |= NQTXC_CMD_TCP;
5670 } else {
5671 cmdc |= NQTXC_CMD_UDP;
5672 }
5673 cmdc |= NQTXC_CMD_IP4;
5674 *fieldsp |= NQTXD_FIELDS_TUXSM;
5675 }
5676 if (m0->m_pkthdr.csum_flags &
5677 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5678 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5679 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5680 cmdc |= NQTXC_CMD_TCP;
5681 } else {
5682 cmdc |= NQTXC_CMD_UDP;
5683 }
5684 cmdc |= NQTXC_CMD_IP6;
5685 *fieldsp |= NQTXD_FIELDS_TUXSM;
5686 }
5687
5688 /* Fill in the context descriptor. */
5689 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5690 htole32(vl_len);
5691 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5692 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5693 htole32(cmdc);
5694 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5695 htole32(mssidx);
5696 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5697 DPRINTF(WM_DEBUG_TX,
5698 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5699 sc->sc_txnext, 0, vl_len));
5700 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5701 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5702 txs->txs_ndesc++;
5703 return 0;
5704 }
5705
5706 /*
5707 * wm_nq_start: [ifnet interface function]
5708 *
5709 * Start packet transmission on the interface for NEWQUEUE devices
5710 */
5711 static void
5712 wm_nq_start(struct ifnet *ifp)
5713 {
5714 struct wm_softc *sc = ifp->if_softc;
5715
5716 WM_TX_LOCK(sc);
5717 if (!sc->sc_stopping)
5718 wm_nq_start_locked(ifp);
5719 WM_TX_UNLOCK(sc);
5720 }
5721
5722 static void
5723 wm_nq_start_locked(struct ifnet *ifp)
5724 {
5725 struct wm_softc *sc = ifp->if_softc;
5726 struct mbuf *m0;
5727 struct m_tag *mtag;
5728 struct wm_txsoft *txs;
5729 bus_dmamap_t dmamap;
5730 int error, nexttx, lasttx = -1, seg, segs_needed;
5731 bool do_csum, sent;
5732
5733 KASSERT(WM_TX_LOCKED(sc));
5734
5735 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5736 return;
5737
5738 sent = false;
5739
5740 /*
5741 * Loop through the send queue, setting up transmit descriptors
5742 * until we drain the queue, or use up all available transmit
5743 * descriptors.
5744 */
5745 for (;;) {
5746 m0 = NULL;
5747
5748 /* Get a work queue entry. */
5749 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5750 wm_txeof(sc);
5751 if (sc->sc_txsfree == 0) {
5752 DPRINTF(WM_DEBUG_TX,
5753 ("%s: TX: no free job descriptors\n",
5754 device_xname(sc->sc_dev)));
5755 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5756 break;
5757 }
5758 }
5759
5760 /* Grab a packet off the queue. */
5761 IFQ_DEQUEUE(&ifp->if_snd, m0);
5762 if (m0 == NULL)
5763 break;
5764
5765 DPRINTF(WM_DEBUG_TX,
5766 ("%s: TX: have packet to transmit: %p\n",
5767 device_xname(sc->sc_dev), m0));
5768
5769 txs = &sc->sc_txsoft[sc->sc_txsnext];
5770 dmamap = txs->txs_dmamap;
5771
5772 /*
5773 * Load the DMA map. If this fails, the packet either
5774 * didn't fit in the allotted number of segments, or we
5775 * were short on resources. For the too-many-segments
5776 * case, we simply report an error and drop the packet,
5777 * since we can't sanely copy a jumbo packet to a single
5778 * buffer.
5779 */
5780 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5781 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5782 if (error) {
5783 if (error == EFBIG) {
5784 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5785 log(LOG_ERR, "%s: Tx packet consumes too many "
5786 "DMA segments, dropping...\n",
5787 device_xname(sc->sc_dev));
5788 wm_dump_mbuf_chain(sc, m0);
5789 m_freem(m0);
5790 continue;
5791 }
5792 /* Short on resources, just stop for now. */
5793 DPRINTF(WM_DEBUG_TX,
5794 ("%s: TX: dmamap load failed: %d\n",
5795 device_xname(sc->sc_dev), error));
5796 break;
5797 }
5798
5799 segs_needed = dmamap->dm_nsegs;
5800
5801 /*
5802 * Ensure we have enough descriptors free to describe
5803 * the packet. Note, we always reserve one descriptor
5804 * at the end of the ring due to the semantics of the
5805 * TDT register, plus one more in the event we need
5806 * to load offload context.
5807 */
5808 if (segs_needed > sc->sc_txfree - 2) {
5809 /*
5810 * Not enough free descriptors to transmit this
5811 * packet. We haven't committed anything yet,
5812 * so just unload the DMA map, put the packet
5813 * pack on the queue, and punt. Notify the upper
5814 * layer that there are no more slots left.
5815 */
5816 DPRINTF(WM_DEBUG_TX,
5817 ("%s: TX: need %d (%d) descriptors, have %d\n",
5818 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5819 segs_needed, sc->sc_txfree - 1));
5820 ifp->if_flags |= IFF_OACTIVE;
5821 bus_dmamap_unload(sc->sc_dmat, dmamap);
5822 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5823 break;
5824 }
5825
5826 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5827
5828 DPRINTF(WM_DEBUG_TX,
5829 ("%s: TX: packet has %d (%d) DMA segments\n",
5830 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5831
5832 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5833
5834 /*
5835 * Store a pointer to the packet so that we can free it
5836 * later.
5837 *
5838 * Initially, we consider the number of descriptors the
5839 * packet uses the number of DMA segments. This may be
5840 * incremented by 1 if we do checksum offload (a descriptor
5841 * is used to set the checksum context).
5842 */
5843 txs->txs_mbuf = m0;
5844 txs->txs_firstdesc = sc->sc_txnext;
5845 txs->txs_ndesc = segs_needed;
5846
5847 /* Set up offload parameters for this packet. */
5848 uint32_t cmdlen, fields, dcmdlen;
5849 if (m0->m_pkthdr.csum_flags &
5850 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5851 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5852 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5853 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5854 &do_csum) != 0) {
5855 /* Error message already displayed. */
5856 bus_dmamap_unload(sc->sc_dmat, dmamap);
5857 continue;
5858 }
5859 } else {
5860 do_csum = false;
5861 cmdlen = 0;
5862 fields = 0;
5863 }
5864
5865 /* Sync the DMA map. */
5866 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5867 BUS_DMASYNC_PREWRITE);
5868
5869 /* Initialize the first transmit descriptor. */
5870 nexttx = sc->sc_txnext;
5871 if (!do_csum) {
5872 /* setup a legacy descriptor */
5873 wm_set_dma_addr(
5874 &sc->sc_txdescs[nexttx].wtx_addr,
5875 dmamap->dm_segs[0].ds_addr);
5876 sc->sc_txdescs[nexttx].wtx_cmdlen =
5877 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5878 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5879 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5880 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5881 NULL) {
5882 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5883 htole32(WTX_CMD_VLE);
5884 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5885 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5886 } else {
5887 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =0;
5888 }
5889 dcmdlen = 0;
5890 } else {
5891 /* setup an advanced data descriptor */
5892 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5893 htole64(dmamap->dm_segs[0].ds_addr);
5894 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5895 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5896 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5897 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5898 htole32(fields);
5899 DPRINTF(WM_DEBUG_TX,
5900 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5901 device_xname(sc->sc_dev), nexttx,
5902 (uint64_t)dmamap->dm_segs[0].ds_addr));
5903 DPRINTF(WM_DEBUG_TX,
5904 ("\t 0x%08x%08x\n", fields,
5905 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5906 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5907 }
5908
5909 lasttx = nexttx;
5910 nexttx = WM_NEXTTX(sc, nexttx);
5911 /*
5912 * fill in the next descriptors. legacy or adcanced format
5913 * is the same here
5914 */
5915 for (seg = 1; seg < dmamap->dm_nsegs;
5916 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5917 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5918 htole64(dmamap->dm_segs[seg].ds_addr);
5919 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5920 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5921 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5922 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5923 lasttx = nexttx;
5924
5925 DPRINTF(WM_DEBUG_TX,
5926 ("%s: TX: desc %d: %#" PRIx64 ", "
5927 "len %#04zx\n",
5928 device_xname(sc->sc_dev), nexttx,
5929 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5930 dmamap->dm_segs[seg].ds_len));
5931 }
5932
5933 KASSERT(lasttx != -1);
5934
5935 /*
5936 * Set up the command byte on the last descriptor of
5937 * the packet. If we're in the interrupt delay window,
5938 * delay the interrupt.
5939 */
5940 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5941 (NQTX_CMD_EOP | NQTX_CMD_RS));
5942 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5943 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5944
5945 txs->txs_lastdesc = lasttx;
5946
5947 DPRINTF(WM_DEBUG_TX,
5948 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5949 device_xname(sc->sc_dev),
5950 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5951
5952 /* Sync the descriptors we're using. */
5953 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5954 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5955
5956 /* Give the packet to the chip. */
5957 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5958 sent = true;
5959
5960 DPRINTF(WM_DEBUG_TX,
5961 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5962
5963 DPRINTF(WM_DEBUG_TX,
5964 ("%s: TX: finished transmitting packet, job %d\n",
5965 device_xname(sc->sc_dev), sc->sc_txsnext));
5966
5967 /* Advance the tx pointer. */
5968 sc->sc_txfree -= txs->txs_ndesc;
5969 sc->sc_txnext = nexttx;
5970
5971 sc->sc_txsfree--;
5972 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5973
5974 /* Pass the packet to any BPF listeners. */
5975 bpf_mtap(ifp, m0);
5976 }
5977
5978 if (m0 != NULL) {
5979 ifp->if_flags |= IFF_OACTIVE;
5980 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5981 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5982 m_freem(m0);
5983 }
5984
5985 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5986 /* No more slots; notify upper layer. */
5987 ifp->if_flags |= IFF_OACTIVE;
5988 }
5989
5990 if (sent) {
5991 /* Set a watchdog timer in case the chip flakes out. */
5992 ifp->if_timer = 5;
5993 }
5994 }
5995
5996 /* Interrupt */
5997
5998 /*
5999 * wm_txeof:
6000 *
6001 * Helper; handle transmit interrupts.
6002 */
6003 static int
6004 wm_txeof(struct wm_softc *sc)
6005 {
6006 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6007 struct wm_txsoft *txs;
6008 bool processed = false;
6009 int count = 0;
6010 int i;
6011 uint8_t status;
6012
6013 if (sc->sc_stopping)
6014 return 0;
6015
6016 ifp->if_flags &= ~IFF_OACTIVE;
6017
6018 /*
6019 * Go through the Tx list and free mbufs for those
6020 * frames which have been transmitted.
6021 */
6022 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
6023 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
6024 txs = &sc->sc_txsoft[i];
6025
6026 DPRINTF(WM_DEBUG_TX,
6027 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6028
6029 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
6030 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6031
6032 status =
6033 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6034 if ((status & WTX_ST_DD) == 0) {
6035 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
6036 BUS_DMASYNC_PREREAD);
6037 break;
6038 }
6039
6040 processed = true;
6041 count++;
6042 DPRINTF(WM_DEBUG_TX,
6043 ("%s: TX: job %d done: descs %d..%d\n",
6044 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6045 txs->txs_lastdesc));
6046
6047 /*
6048 * XXX We should probably be using the statistics
6049 * XXX registers, but I don't know if they exist
6050 * XXX on chips before the i82544.
6051 */
6052
6053 #ifdef WM_EVENT_COUNTERS
6054 if (status & WTX_ST_TU)
6055 WM_EVCNT_INCR(&sc->sc_ev_tu);
6056 #endif /* WM_EVENT_COUNTERS */
6057
6058 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6059 ifp->if_oerrors++;
6060 if (status & WTX_ST_LC)
6061 log(LOG_WARNING, "%s: late collision\n",
6062 device_xname(sc->sc_dev));
6063 else if (status & WTX_ST_EC) {
6064 ifp->if_collisions += 16;
6065 log(LOG_WARNING, "%s: excessive collisions\n",
6066 device_xname(sc->sc_dev));
6067 }
6068 } else
6069 ifp->if_opackets++;
6070
6071 sc->sc_txfree += txs->txs_ndesc;
6072 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6073 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6074 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6075 m_freem(txs->txs_mbuf);
6076 txs->txs_mbuf = NULL;
6077 }
6078
6079 /* Update the dirty transmit buffer pointer. */
6080 sc->sc_txsdirty = i;
6081 DPRINTF(WM_DEBUG_TX,
6082 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6083
6084 if (count != 0)
6085 rnd_add_uint32(&sc->rnd_source, count);
6086
6087 /*
6088 * If there are no more pending transmissions, cancel the watchdog
6089 * timer.
6090 */
6091 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
6092 ifp->if_timer = 0;
6093
6094 return processed;
6095 }
6096
6097 /*
6098 * wm_rxeof:
6099 *
6100 * Helper; handle receive interrupts.
6101 */
6102 static void
6103 wm_rxeof(struct wm_softc *sc)
6104 {
6105 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6106 struct wm_rxsoft *rxs;
6107 struct mbuf *m;
6108 int i, len;
6109 int count = 0;
6110 uint8_t status, errors;
6111 uint16_t vlantag;
6112
6113 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
6114 rxs = &sc->sc_rxsoft[i];
6115
6116 DPRINTF(WM_DEBUG_RX,
6117 ("%s: RX: checking descriptor %d\n",
6118 device_xname(sc->sc_dev), i));
6119
6120 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6121
6122 status = sc->sc_rxdescs[i].wrx_status;
6123 errors = sc->sc_rxdescs[i].wrx_errors;
6124 len = le16toh(sc->sc_rxdescs[i].wrx_len);
6125 vlantag = sc->sc_rxdescs[i].wrx_special;
6126
6127 if ((status & WRX_ST_DD) == 0) {
6128 /* We have processed all of the receive descriptors. */
6129 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
6130 break;
6131 }
6132
6133 count++;
6134 if (__predict_false(sc->sc_rxdiscard)) {
6135 DPRINTF(WM_DEBUG_RX,
6136 ("%s: RX: discarding contents of descriptor %d\n",
6137 device_xname(sc->sc_dev), i));
6138 WM_INIT_RXDESC(sc, i);
6139 if (status & WRX_ST_EOP) {
6140 /* Reset our state. */
6141 DPRINTF(WM_DEBUG_RX,
6142 ("%s: RX: resetting rxdiscard -> 0\n",
6143 device_xname(sc->sc_dev)));
6144 sc->sc_rxdiscard = 0;
6145 }
6146 continue;
6147 }
6148
6149 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6150 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6151
6152 m = rxs->rxs_mbuf;
6153
6154 /*
6155 * Add a new receive buffer to the ring, unless of
6156 * course the length is zero. Treat the latter as a
6157 * failed mapping.
6158 */
6159 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
6160 /*
6161 * Failed, throw away what we've done so
6162 * far, and discard the rest of the packet.
6163 */
6164 ifp->if_ierrors++;
6165 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6166 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6167 WM_INIT_RXDESC(sc, i);
6168 if ((status & WRX_ST_EOP) == 0)
6169 sc->sc_rxdiscard = 1;
6170 if (sc->sc_rxhead != NULL)
6171 m_freem(sc->sc_rxhead);
6172 WM_RXCHAIN_RESET(sc);
6173 DPRINTF(WM_DEBUG_RX,
6174 ("%s: RX: Rx buffer allocation failed, "
6175 "dropping packet%s\n", device_xname(sc->sc_dev),
6176 sc->sc_rxdiscard ? " (discard)" : ""));
6177 continue;
6178 }
6179
6180 m->m_len = len;
6181 sc->sc_rxlen += len;
6182 DPRINTF(WM_DEBUG_RX,
6183 ("%s: RX: buffer at %p len %d\n",
6184 device_xname(sc->sc_dev), m->m_data, len));
6185
6186 /* If this is not the end of the packet, keep looking. */
6187 if ((status & WRX_ST_EOP) == 0) {
6188 WM_RXCHAIN_LINK(sc, m);
6189 DPRINTF(WM_DEBUG_RX,
6190 ("%s: RX: not yet EOP, rxlen -> %d\n",
6191 device_xname(sc->sc_dev), sc->sc_rxlen));
6192 continue;
6193 }
6194
6195 /*
6196 * Okay, we have the entire packet now. The chip is
6197 * configured to include the FCS except I350 and I21[01]
6198 * (not all chips can be configured to strip it),
6199 * so we need to trim it.
6200 * May need to adjust length of previous mbuf in the
6201 * chain if the current mbuf is too short.
6202 * For an eratta, the RCTL_SECRC bit in RCTL register
6203 * is always set in I350, so we don't trim it.
6204 */
6205 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6206 && (sc->sc_type != WM_T_I210)
6207 && (sc->sc_type != WM_T_I211)) {
6208 if (m->m_len < ETHER_CRC_LEN) {
6209 sc->sc_rxtail->m_len
6210 -= (ETHER_CRC_LEN - m->m_len);
6211 m->m_len = 0;
6212 } else
6213 m->m_len -= ETHER_CRC_LEN;
6214 len = sc->sc_rxlen - ETHER_CRC_LEN;
6215 } else
6216 len = sc->sc_rxlen;
6217
6218 WM_RXCHAIN_LINK(sc, m);
6219
6220 *sc->sc_rxtailp = NULL;
6221 m = sc->sc_rxhead;
6222
6223 WM_RXCHAIN_RESET(sc);
6224
6225 DPRINTF(WM_DEBUG_RX,
6226 ("%s: RX: have entire packet, len -> %d\n",
6227 device_xname(sc->sc_dev), len));
6228
6229 /* If an error occurred, update stats and drop the packet. */
6230 if (errors &
6231 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
6232 if (errors & WRX_ER_SE)
6233 log(LOG_WARNING, "%s: symbol error\n",
6234 device_xname(sc->sc_dev));
6235 else if (errors & WRX_ER_SEQ)
6236 log(LOG_WARNING, "%s: receive sequence error\n",
6237 device_xname(sc->sc_dev));
6238 else if (errors & WRX_ER_CE)
6239 log(LOG_WARNING, "%s: CRC error\n",
6240 device_xname(sc->sc_dev));
6241 m_freem(m);
6242 continue;
6243 }
6244
6245 /* No errors. Receive the packet. */
6246 m->m_pkthdr.rcvif = ifp;
6247 m->m_pkthdr.len = len;
6248
6249 /*
6250 * If VLANs are enabled, VLAN packets have been unwrapped
6251 * for us. Associate the tag with the packet.
6252 */
6253 /* XXXX should check for i350 and i354 */
6254 if ((status & WRX_ST_VP) != 0) {
6255 VLAN_INPUT_TAG(ifp, m,
6256 le16toh(vlantag),
6257 continue);
6258 }
6259
6260 /* Set up checksum info for this packet. */
6261 if ((status & WRX_ST_IXSM) == 0) {
6262 if (status & WRX_ST_IPCS) {
6263 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
6264 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6265 if (errors & WRX_ER_IPE)
6266 m->m_pkthdr.csum_flags |=
6267 M_CSUM_IPv4_BAD;
6268 }
6269 if (status & WRX_ST_TCPCS) {
6270 /*
6271 * Note: we don't know if this was TCP or UDP,
6272 * so we just set both bits, and expect the
6273 * upper layers to deal.
6274 */
6275 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
6276 m->m_pkthdr.csum_flags |=
6277 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6278 M_CSUM_TCPv6 | M_CSUM_UDPv6;
6279 if (errors & WRX_ER_TCPE)
6280 m->m_pkthdr.csum_flags |=
6281 M_CSUM_TCP_UDP_BAD;
6282 }
6283 }
6284
6285 ifp->if_ipackets++;
6286
6287 WM_RX_UNLOCK(sc);
6288
6289 /* Pass this up to any BPF listeners. */
6290 bpf_mtap(ifp, m);
6291
6292 /* Pass it on. */
6293 (*ifp->if_input)(ifp, m);
6294
6295 WM_RX_LOCK(sc);
6296
6297 if (sc->sc_stopping)
6298 break;
6299 }
6300
6301 /* Update the receive pointer. */
6302 sc->sc_rxptr = i;
6303 if (count != 0)
6304 rnd_add_uint32(&sc->rnd_source, count);
6305
6306 DPRINTF(WM_DEBUG_RX,
6307 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
6308 }
6309
6310 /*
6311 * wm_linkintr_gmii:
6312 *
6313 * Helper; handle link interrupts for GMII.
6314 */
6315 static void
6316 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
6317 {
6318
6319 KASSERT(WM_TX_LOCKED(sc));
6320
6321 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6322 __func__));
6323
6324 if (icr & ICR_LSC) {
6325 DPRINTF(WM_DEBUG_LINK,
6326 ("%s: LINK: LSC -> mii_pollstat\n",
6327 device_xname(sc->sc_dev)));
6328 mii_pollstat(&sc->sc_mii);
6329 if (sc->sc_type == WM_T_82543) {
6330 int miistatus, active;
6331
6332 /*
6333 * With 82543, we need to force speed and
6334 * duplex on the MAC equal to what the PHY
6335 * speed and duplex configuration is.
6336 */
6337 miistatus = sc->sc_mii.mii_media_status;
6338
6339 if (miistatus & IFM_ACTIVE) {
6340 active = sc->sc_mii.mii_media_active;
6341 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6342 switch (IFM_SUBTYPE(active)) {
6343 case IFM_10_T:
6344 sc->sc_ctrl |= CTRL_SPEED_10;
6345 break;
6346 case IFM_100_TX:
6347 sc->sc_ctrl |= CTRL_SPEED_100;
6348 break;
6349 case IFM_1000_T:
6350 sc->sc_ctrl |= CTRL_SPEED_1000;
6351 break;
6352 default:
6353 /*
6354 * fiber?
6355 * Shoud not enter here.
6356 */
6357 printf("unknown media (%x)\n",
6358 active);
6359 break;
6360 }
6361 if (active & IFM_FDX)
6362 sc->sc_ctrl |= CTRL_FD;
6363 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6364 }
6365 } else if ((sc->sc_type == WM_T_ICH8)
6366 && (sc->sc_phytype == WMPHY_IGP_3)) {
6367 wm_kmrn_lock_loss_workaround_ich8lan(sc);
6368 } else if (sc->sc_type == WM_T_PCH) {
6369 wm_k1_gig_workaround_hv(sc,
6370 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
6371 }
6372
6373 if ((sc->sc_phytype == WMPHY_82578)
6374 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
6375 == IFM_1000_T)) {
6376
6377 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
6378 delay(200*1000); /* XXX too big */
6379
6380 /* Link stall fix for link up */
6381 wm_gmii_hv_writereg(sc->sc_dev, 1,
6382 HV_MUX_DATA_CTRL,
6383 HV_MUX_DATA_CTRL_GEN_TO_MAC
6384 | HV_MUX_DATA_CTRL_FORCE_SPEED);
6385 wm_gmii_hv_writereg(sc->sc_dev, 1,
6386 HV_MUX_DATA_CTRL,
6387 HV_MUX_DATA_CTRL_GEN_TO_MAC);
6388 }
6389 }
6390 } else if (icr & ICR_RXSEQ) {
6391 DPRINTF(WM_DEBUG_LINK,
6392 ("%s: LINK Receive sequence error\n",
6393 device_xname(sc->sc_dev)));
6394 }
6395 }
6396
6397 /*
6398 * wm_linkintr_tbi:
6399 *
6400 * Helper; handle link interrupts for TBI mode.
6401 */
6402 static void
6403 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6404 {
6405 uint32_t status;
6406
6407 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6408 __func__));
6409
6410 status = CSR_READ(sc, WMREG_STATUS);
6411 if (icr & ICR_LSC) {
6412 if (status & STATUS_LU) {
6413 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6414 device_xname(sc->sc_dev),
6415 (status & STATUS_FD) ? "FDX" : "HDX"));
6416 /*
6417 * NOTE: CTRL will update TFCE and RFCE automatically,
6418 * so we should update sc->sc_ctrl
6419 */
6420
6421 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6422 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6423 sc->sc_fcrtl &= ~FCRTL_XONE;
6424 if (status & STATUS_FD)
6425 sc->sc_tctl |=
6426 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6427 else
6428 sc->sc_tctl |=
6429 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6430 if (sc->sc_ctrl & CTRL_TFCE)
6431 sc->sc_fcrtl |= FCRTL_XONE;
6432 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6433 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6434 WMREG_OLD_FCRTL : WMREG_FCRTL,
6435 sc->sc_fcrtl);
6436 sc->sc_tbi_linkup = 1;
6437 } else {
6438 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6439 device_xname(sc->sc_dev)));
6440 sc->sc_tbi_linkup = 0;
6441 }
6442 /* Update LED */
6443 wm_tbi_serdes_set_linkled(sc);
6444 } else if (icr & ICR_RXSEQ) {
6445 DPRINTF(WM_DEBUG_LINK,
6446 ("%s: LINK: Receive sequence error\n",
6447 device_xname(sc->sc_dev)));
6448 }
6449 }
6450
6451 /*
6452 * wm_linkintr_serdes:
6453 *
6454 * Helper; handle link interrupts for TBI mode.
6455 */
6456 static void
6457 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6458 {
6459 struct mii_data *mii = &sc->sc_mii;
6460 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6461 uint32_t pcs_adv, pcs_lpab, reg;
6462
6463 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6464 __func__));
6465
6466 if (icr & ICR_LSC) {
6467 /* Check PCS */
6468 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6469 if ((reg & PCS_LSTS_LINKOK) != 0) {
6470 mii->mii_media_status |= IFM_ACTIVE;
6471 sc->sc_tbi_linkup = 1;
6472 } else {
6473 mii->mii_media_status |= IFM_NONE;
6474 sc->sc_tbi_linkup = 0;
6475 wm_tbi_serdes_set_linkled(sc);
6476 return;
6477 }
6478 mii->mii_media_active |= IFM_1000_SX;
6479 if ((reg & PCS_LSTS_FDX) != 0)
6480 mii->mii_media_active |= IFM_FDX;
6481 else
6482 mii->mii_media_active |= IFM_HDX;
6483 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6484 /* Check flow */
6485 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6486 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6487 DPRINTF(WM_DEBUG_LINK,
6488 ("XXX LINKOK but not ACOMP\n"));
6489 return;
6490 }
6491 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6492 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6493 DPRINTF(WM_DEBUG_LINK,
6494 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6495 if ((pcs_adv & TXCW_SYM_PAUSE)
6496 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6497 mii->mii_media_active |= IFM_FLOW
6498 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6499 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6500 && (pcs_adv & TXCW_ASYM_PAUSE)
6501 && (pcs_lpab & TXCW_SYM_PAUSE)
6502 && (pcs_lpab & TXCW_ASYM_PAUSE))
6503 mii->mii_media_active |= IFM_FLOW
6504 | IFM_ETH_TXPAUSE;
6505 else if ((pcs_adv & TXCW_SYM_PAUSE)
6506 && (pcs_adv & TXCW_ASYM_PAUSE)
6507 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6508 && (pcs_lpab & TXCW_ASYM_PAUSE))
6509 mii->mii_media_active |= IFM_FLOW
6510 | IFM_ETH_RXPAUSE;
6511 }
6512 /* Update LED */
6513 wm_tbi_serdes_set_linkled(sc);
6514 } else {
6515 DPRINTF(WM_DEBUG_LINK,
6516 ("%s: LINK: Receive sequence error\n",
6517 device_xname(sc->sc_dev)));
6518 }
6519 }
6520
6521 /*
6522 * wm_linkintr:
6523 *
6524 * Helper; handle link interrupts.
6525 */
6526 static void
6527 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6528 {
6529
6530 if (sc->sc_flags & WM_F_HAS_MII)
6531 wm_linkintr_gmii(sc, icr);
6532 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6533 && (sc->sc_type >= WM_T_82575))
6534 wm_linkintr_serdes(sc, icr);
6535 else
6536 wm_linkintr_tbi(sc, icr);
6537 }
6538
6539 /*
6540 * wm_intr_legacy:
6541 *
6542 * Interrupt service routine for INTx and MSI.
6543 */
6544 static int
6545 wm_intr_legacy(void *arg)
6546 {
6547 struct wm_softc *sc = arg;
6548 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6549 uint32_t icr, rndval = 0;
6550 int handled = 0;
6551
6552 DPRINTF(WM_DEBUG_TX,
6553 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
6554 while (1 /* CONSTCOND */) {
6555 icr = CSR_READ(sc, WMREG_ICR);
6556 if ((icr & sc->sc_icr) == 0)
6557 break;
6558 if (rndval == 0)
6559 rndval = icr;
6560
6561 WM_RX_LOCK(sc);
6562
6563 if (sc->sc_stopping) {
6564 WM_RX_UNLOCK(sc);
6565 break;
6566 }
6567
6568 handled = 1;
6569
6570 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6571 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6572 DPRINTF(WM_DEBUG_RX,
6573 ("%s: RX: got Rx intr 0x%08x\n",
6574 device_xname(sc->sc_dev),
6575 icr & (ICR_RXDMT0|ICR_RXT0)));
6576 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6577 }
6578 #endif
6579 wm_rxeof(sc);
6580
6581 WM_RX_UNLOCK(sc);
6582 WM_TX_LOCK(sc);
6583
6584 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6585 if (icr & ICR_TXDW) {
6586 DPRINTF(WM_DEBUG_TX,
6587 ("%s: TX: got TXDW interrupt\n",
6588 device_xname(sc->sc_dev)));
6589 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6590 }
6591 #endif
6592 wm_txeof(sc);
6593
6594 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6595 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6596 wm_linkintr(sc, icr);
6597 }
6598
6599 WM_TX_UNLOCK(sc);
6600
6601 if (icr & ICR_RXO) {
6602 #if defined(WM_DEBUG)
6603 log(LOG_WARNING, "%s: Receive overrun\n",
6604 device_xname(sc->sc_dev));
6605 #endif /* defined(WM_DEBUG) */
6606 }
6607 }
6608
6609 rnd_add_uint32(&sc->rnd_source, rndval);
6610
6611 if (handled) {
6612 /* Try to get more packets going. */
6613 ifp->if_start(ifp);
6614 }
6615
6616 return handled;
6617 }
6618
6619 #ifdef WM_MSI_MSIX
6620 /*
6621 * wm_txintr_msix:
6622 *
6623 * Interrupt service routine for TX complete interrupt for MSI-X.
6624 */
6625 static int
6626 wm_txintr_msix(void *arg)
6627 {
6628 struct wm_softc *sc = arg;
6629 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6630 int handled = 0;
6631
6632 DPRINTF(WM_DEBUG_TX,
6633 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
6634
6635 if (sc->sc_type == WM_T_82574)
6636 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ0); /* 82574 only */
6637 else if (sc->sc_type == WM_T_82575)
6638 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE0);
6639 else
6640 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_TXINTR_IDX);
6641
6642 WM_TX_LOCK(sc);
6643
6644 if (sc->sc_stopping)
6645 goto out;
6646
6647 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6648 handled = wm_txeof(sc);
6649
6650 out:
6651 WM_TX_UNLOCK(sc);
6652
6653 if (sc->sc_type == WM_T_82574)
6654 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ0); /* 82574 only */
6655 else if (sc->sc_type == WM_T_82575)
6656 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE0);
6657 else
6658 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_TXINTR_IDX);
6659
6660 if (handled) {
6661 /* Try to get more packets going. */
6662 ifp->if_start(ifp);
6663 }
6664
6665 return handled;
6666 }
6667
6668 /*
6669 * wm_rxintr_msix:
6670 *
6671 * Interrupt service routine for RX interrupt for MSI-X.
6672 */
6673 static int
6674 wm_rxintr_msix(void *arg)
6675 {
6676 struct wm_softc *sc = arg;
6677
6678 DPRINTF(WM_DEBUG_TX,
6679 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
6680
6681 if (sc->sc_type == WM_T_82574)
6682 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ0); /* 82574 only */
6683 else if (sc->sc_type == WM_T_82575)
6684 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE0);
6685 else
6686 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
6687
6688 WM_RX_LOCK(sc);
6689
6690 if (sc->sc_stopping)
6691 goto out;
6692
6693 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6694 wm_rxeof(sc);
6695
6696 out:
6697 WM_RX_UNLOCK(sc);
6698
6699 if (sc->sc_type == WM_T_82574)
6700 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ0);
6701 else if (sc->sc_type == WM_T_82575)
6702 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE0);
6703 else
6704 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
6705
6706 return 1;
6707 }
6708
6709 /*
6710 * wm_linkintr_msix:
6711 *
6712 * Interrupt service routine for link status change for MSI-X.
6713 */
6714 static int
6715 wm_linkintr_msix(void *arg)
6716 {
6717 struct wm_softc *sc = arg;
6718
6719 DPRINTF(WM_DEBUG_TX,
6720 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
6721
6722 if (sc->sc_type == WM_T_82574)
6723 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); /* 82574 only */
6724 else if (sc->sc_type == WM_T_82575)
6725 CSR_WRITE(sc, WMREG_EIMC, EITR_OTHER);
6726 else
6727 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_LINKINTR_IDX);
6728 WM_TX_LOCK(sc);
6729 if (sc->sc_stopping)
6730 goto out;
6731
6732 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6733 wm_linkintr(sc, ICR_LSC);
6734
6735 out:
6736 WM_TX_UNLOCK(sc);
6737
6738 if (sc->sc_type == WM_T_82574)
6739 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
6740 else if (sc->sc_type == WM_T_82575)
6741 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
6742 else
6743 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_LINKINTR_IDX);
6744
6745 return 1;
6746 }
6747 #endif /* WM_MSI_MSIX */
6748
6749 /*
6750 * Media related.
6751 * GMII, SGMII, TBI (and SERDES)
6752 */
6753
6754 /* Common */
6755
6756 /*
6757 * wm_tbi_serdes_set_linkled:
6758 *
6759 * Update the link LED on TBI and SERDES devices.
6760 */
6761 static void
6762 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
6763 {
6764
6765 if (sc->sc_tbi_linkup)
6766 sc->sc_ctrl |= CTRL_SWDPIN(0);
6767 else
6768 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6769
6770 /* 82540 or newer devices are active low */
6771 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6772
6773 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6774 }
6775
6776 /* GMII related */
6777
6778 /*
6779 * wm_gmii_reset:
6780 *
6781 * Reset the PHY.
6782 */
6783 static void
6784 wm_gmii_reset(struct wm_softc *sc)
6785 {
6786 uint32_t reg;
6787 int rv;
6788
6789 /* get phy semaphore */
6790 switch (sc->sc_type) {
6791 case WM_T_82571:
6792 case WM_T_82572:
6793 case WM_T_82573:
6794 case WM_T_82574:
6795 case WM_T_82583:
6796 /* XXX should get sw semaphore, too */
6797 rv = wm_get_swsm_semaphore(sc);
6798 break;
6799 case WM_T_82575:
6800 case WM_T_82576:
6801 case WM_T_82580:
6802 case WM_T_I350:
6803 case WM_T_I354:
6804 case WM_T_I210:
6805 case WM_T_I211:
6806 case WM_T_80003:
6807 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6808 break;
6809 case WM_T_ICH8:
6810 case WM_T_ICH9:
6811 case WM_T_ICH10:
6812 case WM_T_PCH:
6813 case WM_T_PCH2:
6814 case WM_T_PCH_LPT:
6815 rv = wm_get_swfwhw_semaphore(sc);
6816 break;
6817 default:
6818 /* nothing to do*/
6819 rv = 0;
6820 break;
6821 }
6822 if (rv != 0) {
6823 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6824 __func__);
6825 return;
6826 }
6827
6828 switch (sc->sc_type) {
6829 case WM_T_82542_2_0:
6830 case WM_T_82542_2_1:
6831 /* null */
6832 break;
6833 case WM_T_82543:
6834 /*
6835 * With 82543, we need to force speed and duplex on the MAC
6836 * equal to what the PHY speed and duplex configuration is.
6837 * In addition, we need to perform a hardware reset on the PHY
6838 * to take it out of reset.
6839 */
6840 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6841 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6842
6843 /* The PHY reset pin is active-low. */
6844 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6845 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6846 CTRL_EXT_SWDPIN(4));
6847 reg |= CTRL_EXT_SWDPIO(4);
6848
6849 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6850 CSR_WRITE_FLUSH(sc);
6851 delay(10*1000);
6852
6853 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6854 CSR_WRITE_FLUSH(sc);
6855 delay(150);
6856 #if 0
6857 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6858 #endif
6859 delay(20*1000); /* XXX extra delay to get PHY ID? */
6860 break;
6861 case WM_T_82544: /* reset 10000us */
6862 case WM_T_82540:
6863 case WM_T_82545:
6864 case WM_T_82545_3:
6865 case WM_T_82546:
6866 case WM_T_82546_3:
6867 case WM_T_82541:
6868 case WM_T_82541_2:
6869 case WM_T_82547:
6870 case WM_T_82547_2:
6871 case WM_T_82571: /* reset 100us */
6872 case WM_T_82572:
6873 case WM_T_82573:
6874 case WM_T_82574:
6875 case WM_T_82575:
6876 case WM_T_82576:
6877 case WM_T_82580:
6878 case WM_T_I350:
6879 case WM_T_I354:
6880 case WM_T_I210:
6881 case WM_T_I211:
6882 case WM_T_82583:
6883 case WM_T_80003:
6884 /* generic reset */
6885 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6886 CSR_WRITE_FLUSH(sc);
6887 delay(20000);
6888 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6889 CSR_WRITE_FLUSH(sc);
6890 delay(20000);
6891
6892 if ((sc->sc_type == WM_T_82541)
6893 || (sc->sc_type == WM_T_82541_2)
6894 || (sc->sc_type == WM_T_82547)
6895 || (sc->sc_type == WM_T_82547_2)) {
6896 /* workaround for igp are done in igp_reset() */
6897 /* XXX add code to set LED after phy reset */
6898 }
6899 break;
6900 case WM_T_ICH8:
6901 case WM_T_ICH9:
6902 case WM_T_ICH10:
6903 case WM_T_PCH:
6904 case WM_T_PCH2:
6905 case WM_T_PCH_LPT:
6906 /* generic reset */
6907 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6908 CSR_WRITE_FLUSH(sc);
6909 delay(100);
6910 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6911 CSR_WRITE_FLUSH(sc);
6912 delay(150);
6913 break;
6914 default:
6915 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6916 __func__);
6917 break;
6918 }
6919
6920 /* release PHY semaphore */
6921 switch (sc->sc_type) {
6922 case WM_T_82571:
6923 case WM_T_82572:
6924 case WM_T_82573:
6925 case WM_T_82574:
6926 case WM_T_82583:
6927 /* XXX should put sw semaphore, too */
6928 wm_put_swsm_semaphore(sc);
6929 break;
6930 case WM_T_82575:
6931 case WM_T_82576:
6932 case WM_T_82580:
6933 case WM_T_I350:
6934 case WM_T_I354:
6935 case WM_T_I210:
6936 case WM_T_I211:
6937 case WM_T_80003:
6938 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6939 break;
6940 case WM_T_ICH8:
6941 case WM_T_ICH9:
6942 case WM_T_ICH10:
6943 case WM_T_PCH:
6944 case WM_T_PCH2:
6945 case WM_T_PCH_LPT:
6946 wm_put_swfwhw_semaphore(sc);
6947 break;
6948 default:
6949 /* nothing to do*/
6950 rv = 0;
6951 break;
6952 }
6953
6954 /* get_cfg_done */
6955 wm_get_cfg_done(sc);
6956
6957 /* extra setup */
6958 switch (sc->sc_type) {
6959 case WM_T_82542_2_0:
6960 case WM_T_82542_2_1:
6961 case WM_T_82543:
6962 case WM_T_82544:
6963 case WM_T_82540:
6964 case WM_T_82545:
6965 case WM_T_82545_3:
6966 case WM_T_82546:
6967 case WM_T_82546_3:
6968 case WM_T_82541_2:
6969 case WM_T_82547_2:
6970 case WM_T_82571:
6971 case WM_T_82572:
6972 case WM_T_82573:
6973 case WM_T_82574:
6974 case WM_T_82575:
6975 case WM_T_82576:
6976 case WM_T_82580:
6977 case WM_T_I350:
6978 case WM_T_I354:
6979 case WM_T_I210:
6980 case WM_T_I211:
6981 case WM_T_82583:
6982 case WM_T_80003:
6983 /* null */
6984 break;
6985 case WM_T_82541:
6986 case WM_T_82547:
6987 /* XXX Configure actively LED after PHY reset */
6988 break;
6989 case WM_T_ICH8:
6990 case WM_T_ICH9:
6991 case WM_T_ICH10:
6992 case WM_T_PCH:
6993 case WM_T_PCH2:
6994 case WM_T_PCH_LPT:
6995 /* Allow time for h/w to get to a quiescent state afer reset */
6996 delay(10*1000);
6997
6998 if (sc->sc_type == WM_T_PCH)
6999 wm_hv_phy_workaround_ich8lan(sc);
7000
7001 if (sc->sc_type == WM_T_PCH2)
7002 wm_lv_phy_workaround_ich8lan(sc);
7003
7004 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7005 /*
7006 * dummy read to clear the phy wakeup bit after lcd
7007 * reset
7008 */
7009 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7010 }
7011
7012 /*
7013 * XXX Configure the LCD with th extended configuration region
7014 * in NVM
7015 */
7016
7017 /* Configure the LCD with the OEM bits in NVM */
7018 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
7019 || (sc->sc_type == WM_T_PCH_LPT)) {
7020 /*
7021 * Disable LPLU.
7022 * XXX It seems that 82567 has LPLU, too.
7023 */
7024 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
7025 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
7026 reg |= HV_OEM_BITS_ANEGNOW;
7027 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
7028 }
7029 break;
7030 default:
7031 panic("%s: unknown type\n", __func__);
7032 break;
7033 }
7034 }
7035
7036 /*
7037 * wm_get_phy_id_82575:
7038 *
7039 * Return PHY ID. Return -1 if it failed.
7040 */
7041 static int
7042 wm_get_phy_id_82575(struct wm_softc *sc)
7043 {
7044 uint32_t reg;
7045 int phyid = -1;
7046
7047 /* XXX */
7048 if ((sc->sc_flags & WM_F_SGMII) == 0)
7049 return -1;
7050
7051 if (wm_sgmii_uses_mdio(sc)) {
7052 switch (sc->sc_type) {
7053 case WM_T_82575:
7054 case WM_T_82576:
7055 reg = CSR_READ(sc, WMREG_MDIC);
7056 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7057 break;
7058 case WM_T_82580:
7059 case WM_T_I350:
7060 case WM_T_I354:
7061 case WM_T_I210:
7062 case WM_T_I211:
7063 reg = CSR_READ(sc, WMREG_MDICNFG);
7064 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7065 break;
7066 default:
7067 return -1;
7068 }
7069 }
7070
7071 return phyid;
7072 }
7073
7074
7075 /*
7076 * wm_gmii_mediainit:
7077 *
7078 * Initialize media for use on 1000BASE-T devices.
7079 */
7080 static void
7081 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7082 {
7083 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7084 struct mii_data *mii = &sc->sc_mii;
7085 uint32_t reg;
7086
7087 /* We have GMII. */
7088 sc->sc_flags |= WM_F_HAS_MII;
7089
7090 if (sc->sc_type == WM_T_80003)
7091 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7092 else
7093 sc->sc_tipg = TIPG_1000T_DFLT;
7094
7095 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7096 if ((sc->sc_type == WM_T_82580)
7097 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7098 || (sc->sc_type == WM_T_I211)) {
7099 reg = CSR_READ(sc, WMREG_PHPM);
7100 reg &= ~PHPM_GO_LINK_D;
7101 CSR_WRITE(sc, WMREG_PHPM, reg);
7102 }
7103
7104 /*
7105 * Let the chip set speed/duplex on its own based on
7106 * signals from the PHY.
7107 * XXXbouyer - I'm not sure this is right for the 80003,
7108 * the em driver only sets CTRL_SLU here - but it seems to work.
7109 */
7110 sc->sc_ctrl |= CTRL_SLU;
7111 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7112
7113 /* Initialize our media structures and probe the GMII. */
7114 mii->mii_ifp = ifp;
7115
7116 /*
7117 * Determine the PHY access method.
7118 *
7119 * For SGMII, use SGMII specific method.
7120 *
7121 * For some devices, we can determine the PHY access method
7122 * from sc_type.
7123 *
7124 * For ICH and PCH variants, it's difficult to determine the PHY
7125 * access method by sc_type, so use the PCI product ID for some
7126 * devices.
7127 * For other ICH8 variants, try to use igp's method. If the PHY
7128 * can't detect, then use bm's method.
7129 */
7130 switch (prodid) {
7131 case PCI_PRODUCT_INTEL_PCH_M_LM:
7132 case PCI_PRODUCT_INTEL_PCH_M_LC:
7133 /* 82577 */
7134 sc->sc_phytype = WMPHY_82577;
7135 break;
7136 case PCI_PRODUCT_INTEL_PCH_D_DM:
7137 case PCI_PRODUCT_INTEL_PCH_D_DC:
7138 /* 82578 */
7139 sc->sc_phytype = WMPHY_82578;
7140 break;
7141 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7142 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7143 /* 82579 */
7144 sc->sc_phytype = WMPHY_82579;
7145 break;
7146 case PCI_PRODUCT_INTEL_82801I_BM:
7147 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7148 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7149 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7150 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7151 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7152 /* 82567 */
7153 sc->sc_phytype = WMPHY_BM;
7154 mii->mii_readreg = wm_gmii_bm_readreg;
7155 mii->mii_writereg = wm_gmii_bm_writereg;
7156 break;
7157 default:
7158 if (((sc->sc_flags & WM_F_SGMII) != 0)
7159 && !wm_sgmii_uses_mdio(sc)){
7160 /* SGMII */
7161 mii->mii_readreg = wm_sgmii_readreg;
7162 mii->mii_writereg = wm_sgmii_writereg;
7163 } else if (sc->sc_type >= WM_T_80003) {
7164 /* 80003 */
7165 mii->mii_readreg = wm_gmii_i80003_readreg;
7166 mii->mii_writereg = wm_gmii_i80003_writereg;
7167 } else if (sc->sc_type >= WM_T_I210) {
7168 /* I210 and I211 */
7169 mii->mii_readreg = wm_gmii_gs40g_readreg;
7170 mii->mii_writereg = wm_gmii_gs40g_writereg;
7171 } else if (sc->sc_type >= WM_T_82580) {
7172 /* 82580, I350 and I354 */
7173 sc->sc_phytype = WMPHY_82580;
7174 mii->mii_readreg = wm_gmii_82580_readreg;
7175 mii->mii_writereg = wm_gmii_82580_writereg;
7176 } else if (sc->sc_type >= WM_T_82544) {
7177 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
7178 mii->mii_readreg = wm_gmii_i82544_readreg;
7179 mii->mii_writereg = wm_gmii_i82544_writereg;
7180 } else {
7181 mii->mii_readreg = wm_gmii_i82543_readreg;
7182 mii->mii_writereg = wm_gmii_i82543_writereg;
7183 }
7184 break;
7185 }
7186 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7187 /* All PCH* use _hv_ */
7188 mii->mii_readreg = wm_gmii_hv_readreg;
7189 mii->mii_writereg = wm_gmii_hv_writereg;
7190 }
7191 mii->mii_statchg = wm_gmii_statchg;
7192
7193 wm_gmii_reset(sc);
7194
7195 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7196 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7197 wm_gmii_mediastatus);
7198
7199 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7200 || (sc->sc_type == WM_T_82580)
7201 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7202 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7203 if ((sc->sc_flags & WM_F_SGMII) == 0) {
7204 /* Attach only one port */
7205 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7206 MII_OFFSET_ANY, MIIF_DOPAUSE);
7207 } else {
7208 int i, id;
7209 uint32_t ctrl_ext;
7210
7211 id = wm_get_phy_id_82575(sc);
7212 if (id != -1) {
7213 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
7214 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
7215 }
7216 if ((id == -1)
7217 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
7218 /* Power on sgmii phy if it is disabled */
7219 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7220 CSR_WRITE(sc, WMREG_CTRL_EXT,
7221 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
7222 CSR_WRITE_FLUSH(sc);
7223 delay(300*1000); /* XXX too long */
7224
7225 /* from 1 to 8 */
7226 for (i = 1; i < 8; i++)
7227 mii_attach(sc->sc_dev, &sc->sc_mii,
7228 0xffffffff, i, MII_OFFSET_ANY,
7229 MIIF_DOPAUSE);
7230
7231 /* restore previous sfp cage power state */
7232 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7233 }
7234 }
7235 } else {
7236 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7237 MII_OFFSET_ANY, MIIF_DOPAUSE);
7238 }
7239
7240 /*
7241 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
7242 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
7243 */
7244 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
7245 (LIST_FIRST(&mii->mii_phys) == NULL)) {
7246 wm_set_mdio_slow_mode_hv(sc);
7247 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7248 MII_OFFSET_ANY, MIIF_DOPAUSE);
7249 }
7250
7251 /*
7252 * (For ICH8 variants)
7253 * If PHY detection failed, use BM's r/w function and retry.
7254 */
7255 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7256 /* if failed, retry with *_bm_* */
7257 mii->mii_readreg = wm_gmii_bm_readreg;
7258 mii->mii_writereg = wm_gmii_bm_writereg;
7259
7260 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7261 MII_OFFSET_ANY, MIIF_DOPAUSE);
7262 }
7263
7264 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7265 /* Any PHY wasn't find */
7266 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
7267 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
7268 sc->sc_phytype = WMPHY_NONE;
7269 } else {
7270 /*
7271 * PHY Found!
7272 * Check PHY type.
7273 */
7274 uint32_t model;
7275 struct mii_softc *child;
7276
7277 child = LIST_FIRST(&mii->mii_phys);
7278 if (device_is_a(child->mii_dev, "igphy")) {
7279 struct igphy_softc *isc = (struct igphy_softc *)child;
7280
7281 model = isc->sc_mii.mii_mpd_model;
7282 if (model == MII_MODEL_yyINTEL_I82566)
7283 sc->sc_phytype = WMPHY_IGP_3;
7284 }
7285
7286 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
7287 }
7288 }
7289
7290 /*
7291 * wm_gmii_mediachange: [ifmedia interface function]
7292 *
7293 * Set hardware to newly-selected media on a 1000BASE-T device.
7294 */
7295 static int
7296 wm_gmii_mediachange(struct ifnet *ifp)
7297 {
7298 struct wm_softc *sc = ifp->if_softc;
7299 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7300 int rc;
7301
7302 if ((ifp->if_flags & IFF_UP) == 0)
7303 return 0;
7304
7305 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7306 sc->sc_ctrl |= CTRL_SLU;
7307 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7308 || (sc->sc_type > WM_T_82543)) {
7309 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
7310 } else {
7311 sc->sc_ctrl &= ~CTRL_ASDE;
7312 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7313 if (ife->ifm_media & IFM_FDX)
7314 sc->sc_ctrl |= CTRL_FD;
7315 switch (IFM_SUBTYPE(ife->ifm_media)) {
7316 case IFM_10_T:
7317 sc->sc_ctrl |= CTRL_SPEED_10;
7318 break;
7319 case IFM_100_TX:
7320 sc->sc_ctrl |= CTRL_SPEED_100;
7321 break;
7322 case IFM_1000_T:
7323 sc->sc_ctrl |= CTRL_SPEED_1000;
7324 break;
7325 default:
7326 panic("wm_gmii_mediachange: bad media 0x%x",
7327 ife->ifm_media);
7328 }
7329 }
7330 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7331 if (sc->sc_type <= WM_T_82543)
7332 wm_gmii_reset(sc);
7333
7334 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
7335 return 0;
7336 return rc;
7337 }
7338
7339 /*
7340 * wm_gmii_mediastatus: [ifmedia interface function]
7341 *
7342 * Get the current interface media status on a 1000BASE-T device.
7343 */
7344 static void
7345 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7346 {
7347 struct wm_softc *sc = ifp->if_softc;
7348
7349 ether_mediastatus(ifp, ifmr);
7350 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
7351 | sc->sc_flowflags;
7352 }
7353
7354 #define MDI_IO CTRL_SWDPIN(2)
7355 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
7356 #define MDI_CLK CTRL_SWDPIN(3)
7357
7358 static void
7359 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
7360 {
7361 uint32_t i, v;
7362
7363 v = CSR_READ(sc, WMREG_CTRL);
7364 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7365 v |= MDI_DIR | CTRL_SWDPIO(3);
7366
7367 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
7368 if (data & i)
7369 v |= MDI_IO;
7370 else
7371 v &= ~MDI_IO;
7372 CSR_WRITE(sc, WMREG_CTRL, v);
7373 CSR_WRITE_FLUSH(sc);
7374 delay(10);
7375 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7376 CSR_WRITE_FLUSH(sc);
7377 delay(10);
7378 CSR_WRITE(sc, WMREG_CTRL, v);
7379 CSR_WRITE_FLUSH(sc);
7380 delay(10);
7381 }
7382 }
7383
7384 static uint32_t
7385 wm_i82543_mii_recvbits(struct wm_softc *sc)
7386 {
7387 uint32_t v, i, data = 0;
7388
7389 v = CSR_READ(sc, WMREG_CTRL);
7390 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7391 v |= CTRL_SWDPIO(3);
7392
7393 CSR_WRITE(sc, WMREG_CTRL, v);
7394 CSR_WRITE_FLUSH(sc);
7395 delay(10);
7396 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7397 CSR_WRITE_FLUSH(sc);
7398 delay(10);
7399 CSR_WRITE(sc, WMREG_CTRL, v);
7400 CSR_WRITE_FLUSH(sc);
7401 delay(10);
7402
7403 for (i = 0; i < 16; i++) {
7404 data <<= 1;
7405 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7406 CSR_WRITE_FLUSH(sc);
7407 delay(10);
7408 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7409 data |= 1;
7410 CSR_WRITE(sc, WMREG_CTRL, v);
7411 CSR_WRITE_FLUSH(sc);
7412 delay(10);
7413 }
7414
7415 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7416 CSR_WRITE_FLUSH(sc);
7417 delay(10);
7418 CSR_WRITE(sc, WMREG_CTRL, v);
7419 CSR_WRITE_FLUSH(sc);
7420 delay(10);
7421
7422 return data;
7423 }
7424
7425 #undef MDI_IO
7426 #undef MDI_DIR
7427 #undef MDI_CLK
7428
7429 /*
7430 * wm_gmii_i82543_readreg: [mii interface function]
7431 *
7432 * Read a PHY register on the GMII (i82543 version).
7433 */
7434 static int
7435 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7436 {
7437 struct wm_softc *sc = device_private(self);
7438 int rv;
7439
7440 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7441 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
7442 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7443 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
7444
7445 DPRINTF(WM_DEBUG_GMII,
7446 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7447 device_xname(sc->sc_dev), phy, reg, rv));
7448
7449 return rv;
7450 }
7451
7452 /*
7453 * wm_gmii_i82543_writereg: [mii interface function]
7454 *
7455 * Write a PHY register on the GMII (i82543 version).
7456 */
7457 static void
7458 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7459 {
7460 struct wm_softc *sc = device_private(self);
7461
7462 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7463 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7464 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7465 (MII_COMMAND_START << 30), 32);
7466 }
7467
7468 /*
7469 * wm_gmii_i82544_readreg: [mii interface function]
7470 *
7471 * Read a PHY register on the GMII.
7472 */
7473 static int
7474 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7475 {
7476 struct wm_softc *sc = device_private(self);
7477 uint32_t mdic = 0;
7478 int i, rv;
7479
7480 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7481 MDIC_REGADD(reg));
7482
7483 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7484 mdic = CSR_READ(sc, WMREG_MDIC);
7485 if (mdic & MDIC_READY)
7486 break;
7487 delay(50);
7488 }
7489
7490 if ((mdic & MDIC_READY) == 0) {
7491 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7492 device_xname(sc->sc_dev), phy, reg);
7493 rv = 0;
7494 } else if (mdic & MDIC_E) {
7495 #if 0 /* This is normal if no PHY is present. */
7496 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7497 device_xname(sc->sc_dev), phy, reg);
7498 #endif
7499 rv = 0;
7500 } else {
7501 rv = MDIC_DATA(mdic);
7502 if (rv == 0xffff)
7503 rv = 0;
7504 }
7505
7506 return rv;
7507 }
7508
7509 /*
7510 * wm_gmii_i82544_writereg: [mii interface function]
7511 *
7512 * Write a PHY register on the GMII.
7513 */
7514 static void
7515 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7516 {
7517 struct wm_softc *sc = device_private(self);
7518 uint32_t mdic = 0;
7519 int i;
7520
7521 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7522 MDIC_REGADD(reg) | MDIC_DATA(val));
7523
7524 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7525 mdic = CSR_READ(sc, WMREG_MDIC);
7526 if (mdic & MDIC_READY)
7527 break;
7528 delay(50);
7529 }
7530
7531 if ((mdic & MDIC_READY) == 0)
7532 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7533 device_xname(sc->sc_dev), phy, reg);
7534 else if (mdic & MDIC_E)
7535 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7536 device_xname(sc->sc_dev), phy, reg);
7537 }
7538
7539 /*
7540 * wm_gmii_i80003_readreg: [mii interface function]
7541 *
7542 * Read a PHY register on the kumeran
7543 * This could be handled by the PHY layer if we didn't have to lock the
7544 * ressource ...
7545 */
7546 static int
7547 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7548 {
7549 struct wm_softc *sc = device_private(self);
7550 int sem;
7551 int rv;
7552
7553 if (phy != 1) /* only one PHY on kumeran bus */
7554 return 0;
7555
7556 sem = swfwphysem[sc->sc_funcid];
7557 if (wm_get_swfw_semaphore(sc, sem)) {
7558 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7559 __func__);
7560 return 0;
7561 }
7562
7563 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7564 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7565 reg >> GG82563_PAGE_SHIFT);
7566 } else {
7567 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7568 reg >> GG82563_PAGE_SHIFT);
7569 }
7570 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7571 delay(200);
7572 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7573 delay(200);
7574
7575 wm_put_swfw_semaphore(sc, sem);
7576 return rv;
7577 }
7578
7579 /*
7580 * wm_gmii_i80003_writereg: [mii interface function]
7581 *
7582 * Write a PHY register on the kumeran.
7583 * This could be handled by the PHY layer if we didn't have to lock the
7584 * ressource ...
7585 */
7586 static void
7587 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7588 {
7589 struct wm_softc *sc = device_private(self);
7590 int sem;
7591
7592 if (phy != 1) /* only one PHY on kumeran bus */
7593 return;
7594
7595 sem = swfwphysem[sc->sc_funcid];
7596 if (wm_get_swfw_semaphore(sc, sem)) {
7597 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7598 __func__);
7599 return;
7600 }
7601
7602 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7603 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7604 reg >> GG82563_PAGE_SHIFT);
7605 } else {
7606 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7607 reg >> GG82563_PAGE_SHIFT);
7608 }
7609 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7610 delay(200);
7611 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7612 delay(200);
7613
7614 wm_put_swfw_semaphore(sc, sem);
7615 }
7616
7617 /*
7618 * wm_gmii_bm_readreg: [mii interface function]
7619 *
7620 * Read a PHY register on the kumeran
7621 * This could be handled by the PHY layer if we didn't have to lock the
7622 * ressource ...
7623 */
7624 static int
7625 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7626 {
7627 struct wm_softc *sc = device_private(self);
7628 int sem;
7629 int rv;
7630
7631 sem = swfwphysem[sc->sc_funcid];
7632 if (wm_get_swfw_semaphore(sc, sem)) {
7633 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7634 __func__);
7635 return 0;
7636 }
7637
7638 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7639 if (phy == 1)
7640 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7641 reg);
7642 else
7643 wm_gmii_i82544_writereg(self, phy,
7644 GG82563_PHY_PAGE_SELECT,
7645 reg >> GG82563_PAGE_SHIFT);
7646 }
7647
7648 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7649 wm_put_swfw_semaphore(sc, sem);
7650 return rv;
7651 }
7652
7653 /*
7654 * wm_gmii_bm_writereg: [mii interface function]
7655 *
7656 * Write a PHY register on the kumeran.
7657 * This could be handled by the PHY layer if we didn't have to lock the
7658 * ressource ...
7659 */
7660 static void
7661 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7662 {
7663 struct wm_softc *sc = device_private(self);
7664 int sem;
7665
7666 sem = swfwphysem[sc->sc_funcid];
7667 if (wm_get_swfw_semaphore(sc, sem)) {
7668 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7669 __func__);
7670 return;
7671 }
7672
7673 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7674 if (phy == 1)
7675 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7676 reg);
7677 else
7678 wm_gmii_i82544_writereg(self, phy,
7679 GG82563_PHY_PAGE_SELECT,
7680 reg >> GG82563_PAGE_SHIFT);
7681 }
7682
7683 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7684 wm_put_swfw_semaphore(sc, sem);
7685 }
7686
7687 static void
7688 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7689 {
7690 struct wm_softc *sc = device_private(self);
7691 uint16_t regnum = BM_PHY_REG_NUM(offset);
7692 uint16_t wuce;
7693
7694 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7695 if (sc->sc_type == WM_T_PCH) {
7696 /* XXX e1000 driver do nothing... why? */
7697 }
7698
7699 /* Set page 769 */
7700 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7701 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7702
7703 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7704
7705 wuce &= ~BM_WUC_HOST_WU_BIT;
7706 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7707 wuce | BM_WUC_ENABLE_BIT);
7708
7709 /* Select page 800 */
7710 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7711 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7712
7713 /* Write page 800 */
7714 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7715
7716 if (rd)
7717 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7718 else
7719 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7720
7721 /* Set page 769 */
7722 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7723 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7724
7725 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7726 }
7727
7728 /*
7729 * wm_gmii_hv_readreg: [mii interface function]
7730 *
7731 * Read a PHY register on the kumeran
7732 * This could be handled by the PHY layer if we didn't have to lock the
7733 * ressource ...
7734 */
7735 static int
7736 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7737 {
7738 struct wm_softc *sc = device_private(self);
7739 uint16_t page = BM_PHY_REG_PAGE(reg);
7740 uint16_t regnum = BM_PHY_REG_NUM(reg);
7741 uint16_t val;
7742 int rv;
7743
7744 if (wm_get_swfwhw_semaphore(sc)) {
7745 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7746 __func__);
7747 return 0;
7748 }
7749
7750 /* XXX Workaround failure in MDIO access while cable is disconnected */
7751 if (sc->sc_phytype == WMPHY_82577) {
7752 /* XXX must write */
7753 }
7754
7755 /* Page 800 works differently than the rest so it has its own func */
7756 if (page == BM_WUC_PAGE) {
7757 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7758 return val;
7759 }
7760
7761 /*
7762 * Lower than page 768 works differently than the rest so it has its
7763 * own func
7764 */
7765 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7766 printf("gmii_hv_readreg!!!\n");
7767 return 0;
7768 }
7769
7770 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7771 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7772 page << BME1000_PAGE_SHIFT);
7773 }
7774
7775 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7776 wm_put_swfwhw_semaphore(sc);
7777 return rv;
7778 }
7779
7780 /*
7781 * wm_gmii_hv_writereg: [mii interface function]
7782 *
7783 * Write a PHY register on the kumeran.
7784 * This could be handled by the PHY layer if we didn't have to lock the
7785 * ressource ...
7786 */
7787 static void
7788 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7789 {
7790 struct wm_softc *sc = device_private(self);
7791 uint16_t page = BM_PHY_REG_PAGE(reg);
7792 uint16_t regnum = BM_PHY_REG_NUM(reg);
7793
7794 if (wm_get_swfwhw_semaphore(sc)) {
7795 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7796 __func__);
7797 return;
7798 }
7799
7800 /* XXX Workaround failure in MDIO access while cable is disconnected */
7801
7802 /* Page 800 works differently than the rest so it has its own func */
7803 if (page == BM_WUC_PAGE) {
7804 uint16_t tmp;
7805
7806 tmp = val;
7807 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7808 return;
7809 }
7810
7811 /*
7812 * Lower than page 768 works differently than the rest so it has its
7813 * own func
7814 */
7815 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7816 printf("gmii_hv_writereg!!!\n");
7817 return;
7818 }
7819
7820 /*
7821 * XXX Workaround MDIO accesses being disabled after entering IEEE
7822 * Power Down (whenever bit 11 of the PHY control register is set)
7823 */
7824
7825 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7826 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7827 page << BME1000_PAGE_SHIFT);
7828 }
7829
7830 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7831 wm_put_swfwhw_semaphore(sc);
7832 }
7833
7834 /*
7835 * wm_gmii_82580_readreg: [mii interface function]
7836 *
7837 * Read a PHY register on the 82580 and I350.
7838 * This could be handled by the PHY layer if we didn't have to lock the
7839 * ressource ...
7840 */
7841 static int
7842 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7843 {
7844 struct wm_softc *sc = device_private(self);
7845 int sem;
7846 int rv;
7847
7848 sem = swfwphysem[sc->sc_funcid];
7849 if (wm_get_swfw_semaphore(sc, sem)) {
7850 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7851 __func__);
7852 return 0;
7853 }
7854
7855 rv = wm_gmii_i82544_readreg(self, phy, reg);
7856
7857 wm_put_swfw_semaphore(sc, sem);
7858 return rv;
7859 }
7860
7861 /*
7862 * wm_gmii_82580_writereg: [mii interface function]
7863 *
7864 * Write a PHY register on the 82580 and I350.
7865 * This could be handled by the PHY layer if we didn't have to lock the
7866 * ressource ...
7867 */
7868 static void
7869 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7870 {
7871 struct wm_softc *sc = device_private(self);
7872 int sem;
7873
7874 sem = swfwphysem[sc->sc_funcid];
7875 if (wm_get_swfw_semaphore(sc, sem)) {
7876 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7877 __func__);
7878 return;
7879 }
7880
7881 wm_gmii_i82544_writereg(self, phy, reg, val);
7882
7883 wm_put_swfw_semaphore(sc, sem);
7884 }
7885
7886 /*
7887 * wm_gmii_gs40g_readreg: [mii interface function]
7888 *
7889 * Read a PHY register on the I2100 and I211.
7890 * This could be handled by the PHY layer if we didn't have to lock the
7891 * ressource ...
7892 */
7893 static int
7894 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
7895 {
7896 struct wm_softc *sc = device_private(self);
7897 int sem;
7898 int page, offset;
7899 int rv;
7900
7901 /* Acquire semaphore */
7902 sem = swfwphysem[sc->sc_funcid];
7903 if (wm_get_swfw_semaphore(sc, sem)) {
7904 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7905 __func__);
7906 return 0;
7907 }
7908
7909 /* Page select */
7910 page = reg >> GS40G_PAGE_SHIFT;
7911 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7912
7913 /* Read reg */
7914 offset = reg & GS40G_OFFSET_MASK;
7915 rv = wm_gmii_i82544_readreg(self, phy, offset);
7916
7917 wm_put_swfw_semaphore(sc, sem);
7918 return rv;
7919 }
7920
7921 /*
7922 * wm_gmii_gs40g_writereg: [mii interface function]
7923 *
7924 * Write a PHY register on the I210 and I211.
7925 * This could be handled by the PHY layer if we didn't have to lock the
7926 * ressource ...
7927 */
7928 static void
7929 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
7930 {
7931 struct wm_softc *sc = device_private(self);
7932 int sem;
7933 int page, offset;
7934
7935 /* Acquire semaphore */
7936 sem = swfwphysem[sc->sc_funcid];
7937 if (wm_get_swfw_semaphore(sc, sem)) {
7938 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7939 __func__);
7940 return;
7941 }
7942
7943 /* Page select */
7944 page = reg >> GS40G_PAGE_SHIFT;
7945 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7946
7947 /* Write reg */
7948 offset = reg & GS40G_OFFSET_MASK;
7949 wm_gmii_i82544_writereg(self, phy, offset, val);
7950
7951 /* Release semaphore */
7952 wm_put_swfw_semaphore(sc, sem);
7953 }
7954
7955 /*
7956 * wm_gmii_statchg: [mii interface function]
7957 *
7958 * Callback from MII layer when media changes.
7959 */
7960 static void
7961 wm_gmii_statchg(struct ifnet *ifp)
7962 {
7963 struct wm_softc *sc = ifp->if_softc;
7964 struct mii_data *mii = &sc->sc_mii;
7965
7966 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7967 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7968 sc->sc_fcrtl &= ~FCRTL_XONE;
7969
7970 /*
7971 * Get flow control negotiation result.
7972 */
7973 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7974 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7975 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7976 mii->mii_media_active &= ~IFM_ETH_FMASK;
7977 }
7978
7979 if (sc->sc_flowflags & IFM_FLOW) {
7980 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7981 sc->sc_ctrl |= CTRL_TFCE;
7982 sc->sc_fcrtl |= FCRTL_XONE;
7983 }
7984 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7985 sc->sc_ctrl |= CTRL_RFCE;
7986 }
7987
7988 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7989 DPRINTF(WM_DEBUG_LINK,
7990 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7991 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7992 } else {
7993 DPRINTF(WM_DEBUG_LINK,
7994 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7995 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7996 }
7997
7998 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7999 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8000 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8001 : WMREG_FCRTL, sc->sc_fcrtl);
8002 if (sc->sc_type == WM_T_80003) {
8003 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8004 case IFM_1000_T:
8005 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8006 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8007 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8008 break;
8009 default:
8010 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8011 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8012 sc->sc_tipg = TIPG_10_100_80003_DFLT;
8013 break;
8014 }
8015 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8016 }
8017 }
8018
8019 /*
8020 * wm_kmrn_readreg:
8021 *
8022 * Read a kumeran register
8023 */
8024 static int
8025 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8026 {
8027 int rv;
8028
8029 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8030 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8031 aprint_error_dev(sc->sc_dev,
8032 "%s: failed to get semaphore\n", __func__);
8033 return 0;
8034 }
8035 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8036 if (wm_get_swfwhw_semaphore(sc)) {
8037 aprint_error_dev(sc->sc_dev,
8038 "%s: failed to get semaphore\n", __func__);
8039 return 0;
8040 }
8041 }
8042
8043 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8044 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8045 KUMCTRLSTA_REN);
8046 CSR_WRITE_FLUSH(sc);
8047 delay(2);
8048
8049 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8050
8051 if (sc->sc_flags & WM_F_LOCK_SWFW)
8052 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8053 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8054 wm_put_swfwhw_semaphore(sc);
8055
8056 return rv;
8057 }
8058
8059 /*
8060 * wm_kmrn_writereg:
8061 *
8062 * Write a kumeran register
8063 */
8064 static void
8065 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8066 {
8067
8068 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8069 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8070 aprint_error_dev(sc->sc_dev,
8071 "%s: failed to get semaphore\n", __func__);
8072 return;
8073 }
8074 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8075 if (wm_get_swfwhw_semaphore(sc)) {
8076 aprint_error_dev(sc->sc_dev,
8077 "%s: failed to get semaphore\n", __func__);
8078 return;
8079 }
8080 }
8081
8082 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8083 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8084 (val & KUMCTRLSTA_MASK));
8085
8086 if (sc->sc_flags & WM_F_LOCK_SWFW)
8087 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8088 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8089 wm_put_swfwhw_semaphore(sc);
8090 }
8091
8092 /* SGMII related */
8093
8094 /*
8095 * wm_sgmii_uses_mdio
8096 *
8097 * Check whether the transaction is to the internal PHY or the external
8098 * MDIO interface. Return true if it's MDIO.
8099 */
8100 static bool
8101 wm_sgmii_uses_mdio(struct wm_softc *sc)
8102 {
8103 uint32_t reg;
8104 bool ismdio = false;
8105
8106 switch (sc->sc_type) {
8107 case WM_T_82575:
8108 case WM_T_82576:
8109 reg = CSR_READ(sc, WMREG_MDIC);
8110 ismdio = ((reg & MDIC_DEST) != 0);
8111 break;
8112 case WM_T_82580:
8113 case WM_T_I350:
8114 case WM_T_I354:
8115 case WM_T_I210:
8116 case WM_T_I211:
8117 reg = CSR_READ(sc, WMREG_MDICNFG);
8118 ismdio = ((reg & MDICNFG_DEST) != 0);
8119 break;
8120 default:
8121 break;
8122 }
8123
8124 return ismdio;
8125 }
8126
8127 /*
8128 * wm_sgmii_readreg: [mii interface function]
8129 *
8130 * Read a PHY register on the SGMII
8131 * This could be handled by the PHY layer if we didn't have to lock the
8132 * ressource ...
8133 */
8134 static int
8135 wm_sgmii_readreg(device_t self, int phy, int reg)
8136 {
8137 struct wm_softc *sc = device_private(self);
8138 uint32_t i2ccmd;
8139 int i, rv;
8140
8141 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8142 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8143 __func__);
8144 return 0;
8145 }
8146
8147 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8148 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8149 | I2CCMD_OPCODE_READ;
8150 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8151
8152 /* Poll the ready bit */
8153 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8154 delay(50);
8155 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8156 if (i2ccmd & I2CCMD_READY)
8157 break;
8158 }
8159 if ((i2ccmd & I2CCMD_READY) == 0)
8160 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8161 if ((i2ccmd & I2CCMD_ERROR) != 0)
8162 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8163
8164 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8165
8166 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8167 return rv;
8168 }
8169
8170 /*
8171 * wm_sgmii_writereg: [mii interface function]
8172 *
8173 * Write a PHY register on the SGMII.
8174 * This could be handled by the PHY layer if we didn't have to lock the
8175 * ressource ...
8176 */
8177 static void
8178 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8179 {
8180 struct wm_softc *sc = device_private(self);
8181 uint32_t i2ccmd;
8182 int i;
8183 int val_swapped;
8184
8185 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8186 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8187 __func__);
8188 return;
8189 }
8190 /* Swap the data bytes for the I2C interface */
8191 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8192 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8193 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8194 | I2CCMD_OPCODE_WRITE | val_swapped;
8195 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8196
8197 /* Poll the ready bit */
8198 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8199 delay(50);
8200 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8201 if (i2ccmd & I2CCMD_READY)
8202 break;
8203 }
8204 if ((i2ccmd & I2CCMD_READY) == 0)
8205 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8206 if ((i2ccmd & I2CCMD_ERROR) != 0)
8207 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8208
8209 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8210 }
8211
8212 /* TBI related */
8213
8214 /*
8215 * wm_tbi_mediainit:
8216 *
8217 * Initialize media for use on 1000BASE-X devices.
8218 */
8219 static void
8220 wm_tbi_mediainit(struct wm_softc *sc)
8221 {
8222 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8223 const char *sep = "";
8224
8225 if (sc->sc_type < WM_T_82543)
8226 sc->sc_tipg = TIPG_WM_DFLT;
8227 else
8228 sc->sc_tipg = TIPG_LG_DFLT;
8229
8230 sc->sc_tbi_serdes_anegticks = 5;
8231
8232 /* Initialize our media structures */
8233 sc->sc_mii.mii_ifp = ifp;
8234 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8235
8236 if ((sc->sc_type >= WM_T_82575)
8237 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
8238 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8239 wm_serdes_mediachange, wm_serdes_mediastatus);
8240 else
8241 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8242 wm_tbi_mediachange, wm_tbi_mediastatus);
8243
8244 /*
8245 * SWD Pins:
8246 *
8247 * 0 = Link LED (output)
8248 * 1 = Loss Of Signal (input)
8249 */
8250 sc->sc_ctrl |= CTRL_SWDPIO(0);
8251
8252 /* XXX Perhaps this is only for TBI */
8253 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8254 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
8255
8256 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8257 sc->sc_ctrl &= ~CTRL_LRST;
8258
8259 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8260
8261 #define ADD(ss, mm, dd) \
8262 do { \
8263 aprint_normal("%s%s", sep, ss); \
8264 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
8265 sep = ", "; \
8266 } while (/*CONSTCOND*/0)
8267
8268 aprint_normal_dev(sc->sc_dev, "");
8269
8270 /* Only 82545 is LX */
8271 if (sc->sc_type == WM_T_82545) {
8272 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
8273 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
8274 } else {
8275 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
8276 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
8277 }
8278 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
8279 aprint_normal("\n");
8280
8281 #undef ADD
8282
8283 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
8284 }
8285
8286 /*
8287 * wm_tbi_mediachange: [ifmedia interface function]
8288 *
8289 * Set hardware to newly-selected media on a 1000BASE-X device.
8290 */
8291 static int
8292 wm_tbi_mediachange(struct ifnet *ifp)
8293 {
8294 struct wm_softc *sc = ifp->if_softc;
8295 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8296 uint32_t status;
8297 int i;
8298
8299 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8300 /* XXX need some work for >= 82571 and < 82575 */
8301 if (sc->sc_type < WM_T_82575)
8302 return 0;
8303 }
8304
8305 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8306 || (sc->sc_type >= WM_T_82575))
8307 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8308
8309 sc->sc_ctrl &= ~CTRL_LRST;
8310 sc->sc_txcw = TXCW_ANE;
8311 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8312 sc->sc_txcw |= TXCW_FD | TXCW_HD;
8313 else if (ife->ifm_media & IFM_FDX)
8314 sc->sc_txcw |= TXCW_FD;
8315 else
8316 sc->sc_txcw |= TXCW_HD;
8317
8318 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
8319 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
8320
8321 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
8322 device_xname(sc->sc_dev), sc->sc_txcw));
8323 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8324 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8325 CSR_WRITE_FLUSH(sc);
8326 delay(1000);
8327
8328 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
8329 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
8330
8331 /*
8332 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
8333 * optics detect a signal, 0 if they don't.
8334 */
8335 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
8336 /* Have signal; wait for the link to come up. */
8337 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
8338 delay(10000);
8339 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
8340 break;
8341 }
8342
8343 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
8344 device_xname(sc->sc_dev),i));
8345
8346 status = CSR_READ(sc, WMREG_STATUS);
8347 DPRINTF(WM_DEBUG_LINK,
8348 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
8349 device_xname(sc->sc_dev),status, STATUS_LU));
8350 if (status & STATUS_LU) {
8351 /* Link is up. */
8352 DPRINTF(WM_DEBUG_LINK,
8353 ("%s: LINK: set media -> link up %s\n",
8354 device_xname(sc->sc_dev),
8355 (status & STATUS_FD) ? "FDX" : "HDX"));
8356
8357 /*
8358 * NOTE: CTRL will update TFCE and RFCE automatically,
8359 * so we should update sc->sc_ctrl
8360 */
8361 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8362 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8363 sc->sc_fcrtl &= ~FCRTL_XONE;
8364 if (status & STATUS_FD)
8365 sc->sc_tctl |=
8366 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8367 else
8368 sc->sc_tctl |=
8369 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8370 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
8371 sc->sc_fcrtl |= FCRTL_XONE;
8372 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8373 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8374 WMREG_OLD_FCRTL : WMREG_FCRTL,
8375 sc->sc_fcrtl);
8376 sc->sc_tbi_linkup = 1;
8377 } else {
8378 if (i == WM_LINKUP_TIMEOUT)
8379 wm_check_for_link(sc);
8380 /* Link is down. */
8381 DPRINTF(WM_DEBUG_LINK,
8382 ("%s: LINK: set media -> link down\n",
8383 device_xname(sc->sc_dev)));
8384 sc->sc_tbi_linkup = 0;
8385 }
8386 } else {
8387 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
8388 device_xname(sc->sc_dev)));
8389 sc->sc_tbi_linkup = 0;
8390 }
8391
8392 wm_tbi_serdes_set_linkled(sc);
8393
8394 return 0;
8395 }
8396
8397 /*
8398 * wm_tbi_mediastatus: [ifmedia interface function]
8399 *
8400 * Get the current interface media status on a 1000BASE-X device.
8401 */
8402 static void
8403 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8404 {
8405 struct wm_softc *sc = ifp->if_softc;
8406 uint32_t ctrl, status;
8407
8408 ifmr->ifm_status = IFM_AVALID;
8409 ifmr->ifm_active = IFM_ETHER;
8410
8411 status = CSR_READ(sc, WMREG_STATUS);
8412 if ((status & STATUS_LU) == 0) {
8413 ifmr->ifm_active |= IFM_NONE;
8414 return;
8415 }
8416
8417 ifmr->ifm_status |= IFM_ACTIVE;
8418 /* Only 82545 is LX */
8419 if (sc->sc_type == WM_T_82545)
8420 ifmr->ifm_active |= IFM_1000_LX;
8421 else
8422 ifmr->ifm_active |= IFM_1000_SX;
8423 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
8424 ifmr->ifm_active |= IFM_FDX;
8425 else
8426 ifmr->ifm_active |= IFM_HDX;
8427 ctrl = CSR_READ(sc, WMREG_CTRL);
8428 if (ctrl & CTRL_RFCE)
8429 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
8430 if (ctrl & CTRL_TFCE)
8431 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
8432 }
8433
8434 /* XXX TBI only */
8435 static int
8436 wm_check_for_link(struct wm_softc *sc)
8437 {
8438 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8439 uint32_t rxcw;
8440 uint32_t ctrl;
8441 uint32_t status;
8442 uint32_t sig;
8443
8444 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8445 /* XXX need some work for >= 82571 */
8446 if (sc->sc_type >= WM_T_82571) {
8447 sc->sc_tbi_linkup = 1;
8448 return 0;
8449 }
8450 }
8451
8452 rxcw = CSR_READ(sc, WMREG_RXCW);
8453 ctrl = CSR_READ(sc, WMREG_CTRL);
8454 status = CSR_READ(sc, WMREG_STATUS);
8455
8456 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8457
8458 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8459 device_xname(sc->sc_dev), __func__,
8460 ((ctrl & CTRL_SWDPIN(1)) == sig),
8461 ((status & STATUS_LU) != 0),
8462 ((rxcw & RXCW_C) != 0)
8463 ));
8464
8465 /*
8466 * SWDPIN LU RXCW
8467 * 0 0 0
8468 * 0 0 1 (should not happen)
8469 * 0 1 0 (should not happen)
8470 * 0 1 1 (should not happen)
8471 * 1 0 0 Disable autonego and force linkup
8472 * 1 0 1 got /C/ but not linkup yet
8473 * 1 1 0 (linkup)
8474 * 1 1 1 If IFM_AUTO, back to autonego
8475 *
8476 */
8477 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8478 && ((status & STATUS_LU) == 0)
8479 && ((rxcw & RXCW_C) == 0)) {
8480 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8481 __func__));
8482 sc->sc_tbi_linkup = 0;
8483 /* Disable auto-negotiation in the TXCW register */
8484 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8485
8486 /*
8487 * Force link-up and also force full-duplex.
8488 *
8489 * NOTE: CTRL was updated TFCE and RFCE automatically,
8490 * so we should update sc->sc_ctrl
8491 */
8492 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8493 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8494 } else if (((status & STATUS_LU) != 0)
8495 && ((rxcw & RXCW_C) != 0)
8496 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8497 sc->sc_tbi_linkup = 1;
8498 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8499 __func__));
8500 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8501 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8502 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8503 && ((rxcw & RXCW_C) != 0)) {
8504 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8505 } else {
8506 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8507 status));
8508 }
8509
8510 return 0;
8511 }
8512
8513 /*
8514 * wm_tbi_tick:
8515 *
8516 * Check the link on TBI devices.
8517 * This function acts as mii_tick().
8518 */
8519 static void
8520 wm_tbi_tick(struct wm_softc *sc)
8521 {
8522 struct mii_data *mii = &sc->sc_mii;
8523 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8524 uint32_t status;
8525
8526 KASSERT(WM_TX_LOCKED(sc));
8527
8528 status = CSR_READ(sc, WMREG_STATUS);
8529
8530 /* XXX is this needed? */
8531 (void)CSR_READ(sc, WMREG_RXCW);
8532 (void)CSR_READ(sc, WMREG_CTRL);
8533
8534 /* set link status */
8535 if ((status & STATUS_LU) == 0) {
8536 DPRINTF(WM_DEBUG_LINK,
8537 ("%s: LINK: checklink -> down\n",
8538 device_xname(sc->sc_dev)));
8539 sc->sc_tbi_linkup = 0;
8540 } else if (sc->sc_tbi_linkup == 0) {
8541 DPRINTF(WM_DEBUG_LINK,
8542 ("%s: LINK: checklink -> up %s\n",
8543 device_xname(sc->sc_dev),
8544 (status & STATUS_FD) ? "FDX" : "HDX"));
8545 sc->sc_tbi_linkup = 1;
8546 sc->sc_tbi_serdes_ticks = 0;
8547 }
8548
8549 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
8550 goto setled;
8551
8552 if ((status & STATUS_LU) == 0) {
8553 sc->sc_tbi_linkup = 0;
8554 /* If the timer expired, retry autonegotiation */
8555 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8556 && (++sc->sc_tbi_serdes_ticks
8557 >= sc->sc_tbi_serdes_anegticks)) {
8558 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8559 sc->sc_tbi_serdes_ticks = 0;
8560 /*
8561 * Reset the link, and let autonegotiation do
8562 * its thing
8563 */
8564 sc->sc_ctrl |= CTRL_LRST;
8565 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8566 CSR_WRITE_FLUSH(sc);
8567 delay(1000);
8568 sc->sc_ctrl &= ~CTRL_LRST;
8569 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8570 CSR_WRITE_FLUSH(sc);
8571 delay(1000);
8572 CSR_WRITE(sc, WMREG_TXCW,
8573 sc->sc_txcw & ~TXCW_ANE);
8574 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8575 }
8576 }
8577
8578 setled:
8579 wm_tbi_serdes_set_linkled(sc);
8580 }
8581
8582 /* SERDES related */
8583 static void
8584 wm_serdes_power_up_link_82575(struct wm_softc *sc)
8585 {
8586 uint32_t reg;
8587
8588 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8589 && ((sc->sc_flags & WM_F_SGMII) == 0))
8590 return;
8591
8592 reg = CSR_READ(sc, WMREG_PCS_CFG);
8593 reg |= PCS_CFG_PCS_EN;
8594 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
8595
8596 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8597 reg &= ~CTRL_EXT_SWDPIN(3);
8598 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8599 CSR_WRITE_FLUSH(sc);
8600 }
8601
8602 static int
8603 wm_serdes_mediachange(struct ifnet *ifp)
8604 {
8605 struct wm_softc *sc = ifp->if_softc;
8606 bool pcs_autoneg = true; /* XXX */
8607 uint32_t ctrl_ext, pcs_lctl, reg;
8608
8609 /* XXX Currently, this function is not called on 8257[12] */
8610 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8611 || (sc->sc_type >= WM_T_82575))
8612 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8613
8614 wm_serdes_power_up_link_82575(sc);
8615
8616 sc->sc_ctrl |= CTRL_SLU;
8617
8618 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8619 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8620
8621 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8622 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8623 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8624 case CTRL_EXT_LINK_MODE_SGMII:
8625 pcs_autoneg = true;
8626 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8627 break;
8628 case CTRL_EXT_LINK_MODE_1000KX:
8629 pcs_autoneg = false;
8630 /* FALLTHROUGH */
8631 default:
8632 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8633 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8634 pcs_autoneg = false;
8635 }
8636 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8637 | CTRL_FRCFDX;
8638 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8639 }
8640 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8641
8642 if (pcs_autoneg) {
8643 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8644 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8645
8646 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8647 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8648 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8649 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8650 } else
8651 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8652
8653 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8654
8655
8656 return 0;
8657 }
8658
8659 static void
8660 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8661 {
8662 struct wm_softc *sc = ifp->if_softc;
8663 struct mii_data *mii = &sc->sc_mii;
8664 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8665 uint32_t pcs_adv, pcs_lpab, reg;
8666
8667 ifmr->ifm_status = IFM_AVALID;
8668 ifmr->ifm_active = IFM_ETHER;
8669
8670 /* Check PCS */
8671 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8672 if ((reg & PCS_LSTS_LINKOK) == 0) {
8673 ifmr->ifm_active |= IFM_NONE;
8674 sc->sc_tbi_linkup = 0;
8675 goto setled;
8676 }
8677
8678 sc->sc_tbi_linkup = 1;
8679 ifmr->ifm_status |= IFM_ACTIVE;
8680 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8681 if ((reg & PCS_LSTS_FDX) != 0)
8682 ifmr->ifm_active |= IFM_FDX;
8683 else
8684 ifmr->ifm_active |= IFM_HDX;
8685 mii->mii_media_active &= ~IFM_ETH_FMASK;
8686 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8687 /* Check flow */
8688 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8689 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8690 printf("XXX LINKOK but not ACOMP\n");
8691 goto setled;
8692 }
8693 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8694 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8695 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8696 if ((pcs_adv & TXCW_SYM_PAUSE)
8697 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8698 mii->mii_media_active |= IFM_FLOW
8699 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8700 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8701 && (pcs_adv & TXCW_ASYM_PAUSE)
8702 && (pcs_lpab & TXCW_SYM_PAUSE)
8703 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8704 mii->mii_media_active |= IFM_FLOW
8705 | IFM_ETH_TXPAUSE;
8706 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8707 && (pcs_adv & TXCW_ASYM_PAUSE)
8708 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8709 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8710 mii->mii_media_active |= IFM_FLOW
8711 | IFM_ETH_RXPAUSE;
8712 } else {
8713 }
8714 }
8715 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8716 | (mii->mii_media_active & IFM_ETH_FMASK);
8717 setled:
8718 wm_tbi_serdes_set_linkled(sc);
8719 }
8720
8721 /*
8722 * wm_serdes_tick:
8723 *
8724 * Check the link on serdes devices.
8725 */
8726 static void
8727 wm_serdes_tick(struct wm_softc *sc)
8728 {
8729 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8730 struct mii_data *mii = &sc->sc_mii;
8731 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8732 uint32_t reg;
8733
8734 KASSERT(WM_TX_LOCKED(sc));
8735
8736 mii->mii_media_status = IFM_AVALID;
8737 mii->mii_media_active = IFM_ETHER;
8738
8739 /* Check PCS */
8740 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8741 if ((reg & PCS_LSTS_LINKOK) != 0) {
8742 mii->mii_media_status |= IFM_ACTIVE;
8743 sc->sc_tbi_linkup = 1;
8744 sc->sc_tbi_serdes_ticks = 0;
8745 mii->mii_media_active |= IFM_1000_SX; /* XXX */
8746 if ((reg & PCS_LSTS_FDX) != 0)
8747 mii->mii_media_active |= IFM_FDX;
8748 else
8749 mii->mii_media_active |= IFM_HDX;
8750 } else {
8751 mii->mii_media_status |= IFM_NONE;
8752 sc->sc_tbi_linkup = 0;
8753 /* If the timer expired, retry autonegotiation */
8754 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8755 && (++sc->sc_tbi_serdes_ticks
8756 >= sc->sc_tbi_serdes_anegticks)) {
8757 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8758 sc->sc_tbi_serdes_ticks = 0;
8759 /* XXX */
8760 wm_serdes_mediachange(ifp);
8761 }
8762 }
8763
8764 wm_tbi_serdes_set_linkled(sc);
8765 }
8766
8767 /* SFP related */
8768
8769 static int
8770 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
8771 {
8772 uint32_t i2ccmd;
8773 int i;
8774
8775 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
8776 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8777
8778 /* Poll the ready bit */
8779 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8780 delay(50);
8781 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8782 if (i2ccmd & I2CCMD_READY)
8783 break;
8784 }
8785 if ((i2ccmd & I2CCMD_READY) == 0)
8786 return -1;
8787 if ((i2ccmd & I2CCMD_ERROR) != 0)
8788 return -1;
8789
8790 *data = i2ccmd & 0x00ff;
8791
8792 return 0;
8793 }
8794
8795 static uint32_t
8796 wm_sfp_get_media_type(struct wm_softc *sc)
8797 {
8798 uint32_t ctrl_ext;
8799 uint8_t val = 0;
8800 int timeout = 3;
8801 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
8802 int rv = -1;
8803
8804 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8805 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
8806 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
8807 CSR_WRITE_FLUSH(sc);
8808
8809 /* Read SFP module data */
8810 while (timeout) {
8811 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
8812 if (rv == 0)
8813 break;
8814 delay(100*1000); /* XXX too big */
8815 timeout--;
8816 }
8817 if (rv != 0)
8818 goto out;
8819 switch (val) {
8820 case SFF_SFP_ID_SFF:
8821 aprint_normal_dev(sc->sc_dev,
8822 "Module/Connector soldered to board\n");
8823 break;
8824 case SFF_SFP_ID_SFP:
8825 aprint_normal_dev(sc->sc_dev, "SFP\n");
8826 break;
8827 case SFF_SFP_ID_UNKNOWN:
8828 goto out;
8829 default:
8830 break;
8831 }
8832
8833 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
8834 if (rv != 0) {
8835 goto out;
8836 }
8837
8838 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
8839 mediatype = WM_MEDIATYPE_SERDES;
8840 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
8841 sc->sc_flags |= WM_F_SGMII;
8842 mediatype = WM_MEDIATYPE_COPPER;
8843 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
8844 sc->sc_flags |= WM_F_SGMII;
8845 mediatype = WM_MEDIATYPE_SERDES;
8846 }
8847
8848 out:
8849 /* Restore I2C interface setting */
8850 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8851
8852 return mediatype;
8853 }
8854 /*
8855 * NVM related.
8856 * Microwire, SPI (w/wo EERD) and Flash.
8857 */
8858
8859 /* Both spi and uwire */
8860
8861 /*
8862 * wm_eeprom_sendbits:
8863 *
8864 * Send a series of bits to the EEPROM.
8865 */
8866 static void
8867 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
8868 {
8869 uint32_t reg;
8870 int x;
8871
8872 reg = CSR_READ(sc, WMREG_EECD);
8873
8874 for (x = nbits; x > 0; x--) {
8875 if (bits & (1U << (x - 1)))
8876 reg |= EECD_DI;
8877 else
8878 reg &= ~EECD_DI;
8879 CSR_WRITE(sc, WMREG_EECD, reg);
8880 CSR_WRITE_FLUSH(sc);
8881 delay(2);
8882 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8883 CSR_WRITE_FLUSH(sc);
8884 delay(2);
8885 CSR_WRITE(sc, WMREG_EECD, reg);
8886 CSR_WRITE_FLUSH(sc);
8887 delay(2);
8888 }
8889 }
8890
8891 /*
8892 * wm_eeprom_recvbits:
8893 *
8894 * Receive a series of bits from the EEPROM.
8895 */
8896 static void
8897 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
8898 {
8899 uint32_t reg, val;
8900 int x;
8901
8902 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
8903
8904 val = 0;
8905 for (x = nbits; x > 0; x--) {
8906 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8907 CSR_WRITE_FLUSH(sc);
8908 delay(2);
8909 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8910 val |= (1U << (x - 1));
8911 CSR_WRITE(sc, WMREG_EECD, reg);
8912 CSR_WRITE_FLUSH(sc);
8913 delay(2);
8914 }
8915 *valp = val;
8916 }
8917
8918 /* Microwire */
8919
8920 /*
8921 * wm_nvm_read_uwire:
8922 *
8923 * Read a word from the EEPROM using the MicroWire protocol.
8924 */
8925 static int
8926 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8927 {
8928 uint32_t reg, val;
8929 int i;
8930
8931 for (i = 0; i < wordcnt; i++) {
8932 /* Clear SK and DI. */
8933 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8934 CSR_WRITE(sc, WMREG_EECD, reg);
8935
8936 /*
8937 * XXX: workaround for a bug in qemu-0.12.x and prior
8938 * and Xen.
8939 *
8940 * We use this workaround only for 82540 because qemu's
8941 * e1000 act as 82540.
8942 */
8943 if (sc->sc_type == WM_T_82540) {
8944 reg |= EECD_SK;
8945 CSR_WRITE(sc, WMREG_EECD, reg);
8946 reg &= ~EECD_SK;
8947 CSR_WRITE(sc, WMREG_EECD, reg);
8948 CSR_WRITE_FLUSH(sc);
8949 delay(2);
8950 }
8951 /* XXX: end of workaround */
8952
8953 /* Set CHIP SELECT. */
8954 reg |= EECD_CS;
8955 CSR_WRITE(sc, WMREG_EECD, reg);
8956 CSR_WRITE_FLUSH(sc);
8957 delay(2);
8958
8959 /* Shift in the READ command. */
8960 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8961
8962 /* Shift in address. */
8963 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8964
8965 /* Shift out the data. */
8966 wm_eeprom_recvbits(sc, &val, 16);
8967 data[i] = val & 0xffff;
8968
8969 /* Clear CHIP SELECT. */
8970 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8971 CSR_WRITE(sc, WMREG_EECD, reg);
8972 CSR_WRITE_FLUSH(sc);
8973 delay(2);
8974 }
8975
8976 return 0;
8977 }
8978
8979 /* SPI */
8980
8981 /*
8982 * Set SPI and FLASH related information from the EECD register.
8983 * For 82541 and 82547, the word size is taken from EEPROM.
8984 */
8985 static int
8986 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8987 {
8988 int size;
8989 uint32_t reg;
8990 uint16_t data;
8991
8992 reg = CSR_READ(sc, WMREG_EECD);
8993 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8994
8995 /* Read the size of NVM from EECD by default */
8996 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8997 switch (sc->sc_type) {
8998 case WM_T_82541:
8999 case WM_T_82541_2:
9000 case WM_T_82547:
9001 case WM_T_82547_2:
9002 /* Set dummy value to access EEPROM */
9003 sc->sc_nvm_wordsize = 64;
9004 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9005 reg = data;
9006 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9007 if (size == 0)
9008 size = 6; /* 64 word size */
9009 else
9010 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9011 break;
9012 case WM_T_80003:
9013 case WM_T_82571:
9014 case WM_T_82572:
9015 case WM_T_82573: /* SPI case */
9016 case WM_T_82574: /* SPI case */
9017 case WM_T_82583: /* SPI case */
9018 size += NVM_WORD_SIZE_BASE_SHIFT;
9019 if (size > 14)
9020 size = 14;
9021 break;
9022 case WM_T_82575:
9023 case WM_T_82576:
9024 case WM_T_82580:
9025 case WM_T_I350:
9026 case WM_T_I354:
9027 case WM_T_I210:
9028 case WM_T_I211:
9029 size += NVM_WORD_SIZE_BASE_SHIFT;
9030 if (size > 15)
9031 size = 15;
9032 break;
9033 default:
9034 aprint_error_dev(sc->sc_dev,
9035 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9036 return -1;
9037 break;
9038 }
9039
9040 sc->sc_nvm_wordsize = 1 << size;
9041
9042 return 0;
9043 }
9044
9045 /*
9046 * wm_nvm_ready_spi:
9047 *
9048 * Wait for a SPI EEPROM to be ready for commands.
9049 */
9050 static int
9051 wm_nvm_ready_spi(struct wm_softc *sc)
9052 {
9053 uint32_t val;
9054 int usec;
9055
9056 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9057 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9058 wm_eeprom_recvbits(sc, &val, 8);
9059 if ((val & SPI_SR_RDY) == 0)
9060 break;
9061 }
9062 if (usec >= SPI_MAX_RETRIES) {
9063 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9064 return 1;
9065 }
9066 return 0;
9067 }
9068
9069 /*
9070 * wm_nvm_read_spi:
9071 *
9072 * Read a work from the EEPROM using the SPI protocol.
9073 */
9074 static int
9075 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9076 {
9077 uint32_t reg, val;
9078 int i;
9079 uint8_t opc;
9080
9081 /* Clear SK and CS. */
9082 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9083 CSR_WRITE(sc, WMREG_EECD, reg);
9084 CSR_WRITE_FLUSH(sc);
9085 delay(2);
9086
9087 if (wm_nvm_ready_spi(sc))
9088 return 1;
9089
9090 /* Toggle CS to flush commands. */
9091 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9092 CSR_WRITE_FLUSH(sc);
9093 delay(2);
9094 CSR_WRITE(sc, WMREG_EECD, reg);
9095 CSR_WRITE_FLUSH(sc);
9096 delay(2);
9097
9098 opc = SPI_OPC_READ;
9099 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9100 opc |= SPI_OPC_A8;
9101
9102 wm_eeprom_sendbits(sc, opc, 8);
9103 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9104
9105 for (i = 0; i < wordcnt; i++) {
9106 wm_eeprom_recvbits(sc, &val, 16);
9107 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9108 }
9109
9110 /* Raise CS and clear SK. */
9111 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9112 CSR_WRITE(sc, WMREG_EECD, reg);
9113 CSR_WRITE_FLUSH(sc);
9114 delay(2);
9115
9116 return 0;
9117 }
9118
9119 /* Using with EERD */
9120
9121 static int
9122 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9123 {
9124 uint32_t attempts = 100000;
9125 uint32_t i, reg = 0;
9126 int32_t done = -1;
9127
9128 for (i = 0; i < attempts; i++) {
9129 reg = CSR_READ(sc, rw);
9130
9131 if (reg & EERD_DONE) {
9132 done = 0;
9133 break;
9134 }
9135 delay(5);
9136 }
9137
9138 return done;
9139 }
9140
9141 static int
9142 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9143 uint16_t *data)
9144 {
9145 int i, eerd = 0;
9146 int error = 0;
9147
9148 for (i = 0; i < wordcnt; i++) {
9149 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9150
9151 CSR_WRITE(sc, WMREG_EERD, eerd);
9152 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9153 if (error != 0)
9154 break;
9155
9156 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9157 }
9158
9159 return error;
9160 }
9161
9162 /* Flash */
9163
9164 static int
9165 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9166 {
9167 uint32_t eecd;
9168 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9169 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9170 uint8_t sig_byte = 0;
9171
9172 switch (sc->sc_type) {
9173 case WM_T_ICH8:
9174 case WM_T_ICH9:
9175 eecd = CSR_READ(sc, WMREG_EECD);
9176 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9177 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9178 return 0;
9179 }
9180 /* FALLTHROUGH */
9181 default:
9182 /* Default to 0 */
9183 *bank = 0;
9184
9185 /* Check bank 0 */
9186 wm_read_ich8_byte(sc, act_offset, &sig_byte);
9187 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9188 *bank = 0;
9189 return 0;
9190 }
9191
9192 /* Check bank 1 */
9193 wm_read_ich8_byte(sc, act_offset + bank1_offset,
9194 &sig_byte);
9195 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9196 *bank = 1;
9197 return 0;
9198 }
9199 }
9200
9201 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9202 device_xname(sc->sc_dev)));
9203 return -1;
9204 }
9205
9206 /******************************************************************************
9207 * This function does initial flash setup so that a new read/write/erase cycle
9208 * can be started.
9209 *
9210 * sc - The pointer to the hw structure
9211 ****************************************************************************/
9212 static int32_t
9213 wm_ich8_cycle_init(struct wm_softc *sc)
9214 {
9215 uint16_t hsfsts;
9216 int32_t error = 1;
9217 int32_t i = 0;
9218
9219 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9220
9221 /* May be check the Flash Des Valid bit in Hw status */
9222 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
9223 return error;
9224 }
9225
9226 /* Clear FCERR in Hw status by writing 1 */
9227 /* Clear DAEL in Hw status by writing a 1 */
9228 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
9229
9230 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9231
9232 /*
9233 * Either we should have a hardware SPI cycle in progress bit to check
9234 * against, in order to start a new cycle or FDONE bit should be
9235 * changed in the hardware so that it is 1 after harware reset, which
9236 * can then be used as an indication whether a cycle is in progress or
9237 * has been completed .. we should also have some software semaphore
9238 * mechanism to guard FDONE or the cycle in progress bit so that two
9239 * threads access to those bits can be sequentiallized or a way so that
9240 * 2 threads dont start the cycle at the same time
9241 */
9242
9243 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9244 /*
9245 * There is no cycle running at present, so we can start a
9246 * cycle
9247 */
9248
9249 /* Begin by setting Flash Cycle Done. */
9250 hsfsts |= HSFSTS_DONE;
9251 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9252 error = 0;
9253 } else {
9254 /*
9255 * otherwise poll for sometime so the current cycle has a
9256 * chance to end before giving up.
9257 */
9258 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
9259 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9260 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9261 error = 0;
9262 break;
9263 }
9264 delay(1);
9265 }
9266 if (error == 0) {
9267 /*
9268 * Successful in waiting for previous cycle to timeout,
9269 * now set the Flash Cycle Done.
9270 */
9271 hsfsts |= HSFSTS_DONE;
9272 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9273 }
9274 }
9275 return error;
9276 }
9277
9278 /******************************************************************************
9279 * This function starts a flash cycle and waits for its completion
9280 *
9281 * sc - The pointer to the hw structure
9282 ****************************************************************************/
9283 static int32_t
9284 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
9285 {
9286 uint16_t hsflctl;
9287 uint16_t hsfsts;
9288 int32_t error = 1;
9289 uint32_t i = 0;
9290
9291 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
9292 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9293 hsflctl |= HSFCTL_GO;
9294 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9295
9296 /* Wait till FDONE bit is set to 1 */
9297 do {
9298 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9299 if (hsfsts & HSFSTS_DONE)
9300 break;
9301 delay(1);
9302 i++;
9303 } while (i < timeout);
9304 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
9305 error = 0;
9306
9307 return error;
9308 }
9309
9310 /******************************************************************************
9311 * Reads a byte or word from the NVM using the ICH8 flash access registers.
9312 *
9313 * sc - The pointer to the hw structure
9314 * index - The index of the byte or word to read.
9315 * size - Size of data to read, 1=byte 2=word
9316 * data - Pointer to the word to store the value read.
9317 *****************************************************************************/
9318 static int32_t
9319 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
9320 uint32_t size, uint16_t *data)
9321 {
9322 uint16_t hsfsts;
9323 uint16_t hsflctl;
9324 uint32_t flash_linear_address;
9325 uint32_t flash_data = 0;
9326 int32_t error = 1;
9327 int32_t count = 0;
9328
9329 if (size < 1 || size > 2 || data == 0x0 ||
9330 index > ICH_FLASH_LINEAR_ADDR_MASK)
9331 return error;
9332
9333 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
9334 sc->sc_ich8_flash_base;
9335
9336 do {
9337 delay(1);
9338 /* Steps */
9339 error = wm_ich8_cycle_init(sc);
9340 if (error)
9341 break;
9342
9343 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9344 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
9345 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
9346 & HSFCTL_BCOUNT_MASK;
9347 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
9348 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9349
9350 /*
9351 * Write the last 24 bits of index into Flash Linear address
9352 * field in Flash Address
9353 */
9354 /* TODO: TBD maybe check the index against the size of flash */
9355
9356 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
9357
9358 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
9359
9360 /*
9361 * Check if FCERR is set to 1, if set to 1, clear it and try
9362 * the whole sequence a few more times, else read in (shift in)
9363 * the Flash Data0, the order is least significant byte first
9364 * msb to lsb
9365 */
9366 if (error == 0) {
9367 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
9368 if (size == 1)
9369 *data = (uint8_t)(flash_data & 0x000000FF);
9370 else if (size == 2)
9371 *data = (uint16_t)(flash_data & 0x0000FFFF);
9372 break;
9373 } else {
9374 /*
9375 * If we've gotten here, then things are probably
9376 * completely hosed, but if the error condition is
9377 * detected, it won't hurt to give it another try...
9378 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
9379 */
9380 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9381 if (hsfsts & HSFSTS_ERR) {
9382 /* Repeat for some time before giving up. */
9383 continue;
9384 } else if ((hsfsts & HSFSTS_DONE) == 0)
9385 break;
9386 }
9387 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
9388
9389 return error;
9390 }
9391
9392 /******************************************************************************
9393 * Reads a single byte from the NVM using the ICH8 flash access registers.
9394 *
9395 * sc - pointer to wm_hw structure
9396 * index - The index of the byte to read.
9397 * data - Pointer to a byte to store the value read.
9398 *****************************************************************************/
9399 static int32_t
9400 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
9401 {
9402 int32_t status;
9403 uint16_t word = 0;
9404
9405 status = wm_read_ich8_data(sc, index, 1, &word);
9406 if (status == 0)
9407 *data = (uint8_t)word;
9408 else
9409 *data = 0;
9410
9411 return status;
9412 }
9413
9414 /******************************************************************************
9415 * Reads a word from the NVM using the ICH8 flash access registers.
9416 *
9417 * sc - pointer to wm_hw structure
9418 * index - The starting byte index of the word to read.
9419 * data - Pointer to a word to store the value read.
9420 *****************************************************************************/
9421 static int32_t
9422 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
9423 {
9424 int32_t status;
9425
9426 status = wm_read_ich8_data(sc, index, 2, data);
9427 return status;
9428 }
9429
9430 /******************************************************************************
9431 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
9432 * register.
9433 *
9434 * sc - Struct containing variables accessed by shared code
9435 * offset - offset of word in the EEPROM to read
9436 * data - word read from the EEPROM
9437 * words - number of words to read
9438 *****************************************************************************/
9439 static int
9440 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
9441 {
9442 int32_t error = 0;
9443 uint32_t flash_bank = 0;
9444 uint32_t act_offset = 0;
9445 uint32_t bank_offset = 0;
9446 uint16_t word = 0;
9447 uint16_t i = 0;
9448
9449 /*
9450 * We need to know which is the valid flash bank. In the event
9451 * that we didn't allocate eeprom_shadow_ram, we may not be
9452 * managing flash_bank. So it cannot be trusted and needs
9453 * to be updated with each read.
9454 */
9455 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
9456 if (error) {
9457 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
9458 device_xname(sc->sc_dev)));
9459 flash_bank = 0;
9460 }
9461
9462 /*
9463 * Adjust offset appropriately if we're on bank 1 - adjust for word
9464 * size
9465 */
9466 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
9467
9468 error = wm_get_swfwhw_semaphore(sc);
9469 if (error) {
9470 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9471 __func__);
9472 return error;
9473 }
9474
9475 for (i = 0; i < words; i++) {
9476 /* The NVM part needs a byte offset, hence * 2 */
9477 act_offset = bank_offset + ((offset + i) * 2);
9478 error = wm_read_ich8_word(sc, act_offset, &word);
9479 if (error) {
9480 aprint_error_dev(sc->sc_dev,
9481 "%s: failed to read NVM\n", __func__);
9482 break;
9483 }
9484 data[i] = word;
9485 }
9486
9487 wm_put_swfwhw_semaphore(sc);
9488 return error;
9489 }
9490
9491 /* iNVM */
9492
9493 static int
9494 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
9495 {
9496 int32_t rv = 0;
9497 uint32_t invm_dword;
9498 uint16_t i;
9499 uint8_t record_type, word_address;
9500
9501 for (i = 0; i < INVM_SIZE; i++) {
9502 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
9503 /* Get record type */
9504 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
9505 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
9506 break;
9507 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
9508 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
9509 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
9510 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
9511 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
9512 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
9513 if (word_address == address) {
9514 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
9515 rv = 0;
9516 break;
9517 }
9518 }
9519 }
9520
9521 return rv;
9522 }
9523
9524 static int
9525 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
9526 {
9527 int rv = 0;
9528 int i;
9529
9530 for (i = 0; i < words; i++) {
9531 switch (offset + i) {
9532 case NVM_OFF_MACADDR:
9533 case NVM_OFF_MACADDR1:
9534 case NVM_OFF_MACADDR2:
9535 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
9536 if (rv != 0) {
9537 data[i] = 0xffff;
9538 rv = -1;
9539 }
9540 break;
9541 case NVM_OFF_CFG2:
9542 rv = wm_nvm_read_word_invm(sc, offset, data);
9543 if (rv != 0) {
9544 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
9545 rv = 0;
9546 }
9547 break;
9548 case NVM_OFF_CFG4:
9549 rv = wm_nvm_read_word_invm(sc, offset, data);
9550 if (rv != 0) {
9551 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
9552 rv = 0;
9553 }
9554 break;
9555 case NVM_OFF_LED_1_CFG:
9556 rv = wm_nvm_read_word_invm(sc, offset, data);
9557 if (rv != 0) {
9558 *data = NVM_LED_1_CFG_DEFAULT_I211;
9559 rv = 0;
9560 }
9561 break;
9562 case NVM_OFF_LED_0_2_CFG:
9563 rv = wm_nvm_read_word_invm(sc, offset, data);
9564 if (rv != 0) {
9565 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
9566 rv = 0;
9567 }
9568 break;
9569 case NVM_OFF_ID_LED_SETTINGS:
9570 rv = wm_nvm_read_word_invm(sc, offset, data);
9571 if (rv != 0) {
9572 *data = ID_LED_RESERVED_FFFF;
9573 rv = 0;
9574 }
9575 break;
9576 default:
9577 DPRINTF(WM_DEBUG_NVM,
9578 ("NVM word 0x%02x is not mapped.\n", offset));
9579 *data = NVM_RESERVED_WORD;
9580 break;
9581 }
9582 }
9583
9584 return rv;
9585 }
9586
9587 /* Lock, detecting NVM type, validate checksum, version and read */
9588
9589 /*
9590 * wm_nvm_acquire:
9591 *
9592 * Perform the EEPROM handshake required on some chips.
9593 */
9594 static int
9595 wm_nvm_acquire(struct wm_softc *sc)
9596 {
9597 uint32_t reg;
9598 int x;
9599 int ret = 0;
9600
9601 /* always success */
9602 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9603 return 0;
9604
9605 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9606 ret = wm_get_swfwhw_semaphore(sc);
9607 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9608 /* This will also do wm_get_swsm_semaphore() if needed */
9609 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9610 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9611 ret = wm_get_swsm_semaphore(sc);
9612 }
9613
9614 if (ret) {
9615 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9616 __func__);
9617 return 1;
9618 }
9619
9620 if (sc->sc_flags & WM_F_LOCK_EECD) {
9621 reg = CSR_READ(sc, WMREG_EECD);
9622
9623 /* Request EEPROM access. */
9624 reg |= EECD_EE_REQ;
9625 CSR_WRITE(sc, WMREG_EECD, reg);
9626
9627 /* ..and wait for it to be granted. */
9628 for (x = 0; x < 1000; x++) {
9629 reg = CSR_READ(sc, WMREG_EECD);
9630 if (reg & EECD_EE_GNT)
9631 break;
9632 delay(5);
9633 }
9634 if ((reg & EECD_EE_GNT) == 0) {
9635 aprint_error_dev(sc->sc_dev,
9636 "could not acquire EEPROM GNT\n");
9637 reg &= ~EECD_EE_REQ;
9638 CSR_WRITE(sc, WMREG_EECD, reg);
9639 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9640 wm_put_swfwhw_semaphore(sc);
9641 if (sc->sc_flags & WM_F_LOCK_SWFW)
9642 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9643 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9644 wm_put_swsm_semaphore(sc);
9645 return 1;
9646 }
9647 }
9648
9649 return 0;
9650 }
9651
9652 /*
9653 * wm_nvm_release:
9654 *
9655 * Release the EEPROM mutex.
9656 */
9657 static void
9658 wm_nvm_release(struct wm_softc *sc)
9659 {
9660 uint32_t reg;
9661
9662 /* always success */
9663 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9664 return;
9665
9666 if (sc->sc_flags & WM_F_LOCK_EECD) {
9667 reg = CSR_READ(sc, WMREG_EECD);
9668 reg &= ~EECD_EE_REQ;
9669 CSR_WRITE(sc, WMREG_EECD, reg);
9670 }
9671
9672 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9673 wm_put_swfwhw_semaphore(sc);
9674 if (sc->sc_flags & WM_F_LOCK_SWFW)
9675 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9676 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9677 wm_put_swsm_semaphore(sc);
9678 }
9679
9680 static int
9681 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9682 {
9683 uint32_t eecd = 0;
9684
9685 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9686 || sc->sc_type == WM_T_82583) {
9687 eecd = CSR_READ(sc, WMREG_EECD);
9688
9689 /* Isolate bits 15 & 16 */
9690 eecd = ((eecd >> 15) & 0x03);
9691
9692 /* If both bits are set, device is Flash type */
9693 if (eecd == 0x03)
9694 return 0;
9695 }
9696 return 1;
9697 }
9698
9699 static int
9700 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9701 {
9702 uint32_t eec;
9703
9704 eec = CSR_READ(sc, WMREG_EEC);
9705 if ((eec & EEC_FLASH_DETECTED) != 0)
9706 return 1;
9707
9708 return 0;
9709 }
9710
9711 /*
9712 * wm_nvm_validate_checksum
9713 *
9714 * The checksum is defined as the sum of the first 64 (16 bit) words.
9715 */
9716 static int
9717 wm_nvm_validate_checksum(struct wm_softc *sc)
9718 {
9719 uint16_t checksum;
9720 uint16_t eeprom_data;
9721 #ifdef WM_DEBUG
9722 uint16_t csum_wordaddr, valid_checksum;
9723 #endif
9724 int i;
9725
9726 checksum = 0;
9727
9728 /* Don't check for I211 */
9729 if (sc->sc_type == WM_T_I211)
9730 return 0;
9731
9732 #ifdef WM_DEBUG
9733 if (sc->sc_type == WM_T_PCH_LPT) {
9734 csum_wordaddr = NVM_OFF_COMPAT;
9735 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
9736 } else {
9737 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
9738 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
9739 }
9740
9741 /* Dump EEPROM image for debug */
9742 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9743 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9744 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
9745 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
9746 if ((eeprom_data & valid_checksum) == 0) {
9747 DPRINTF(WM_DEBUG_NVM,
9748 ("%s: NVM need to be updated (%04x != %04x)\n",
9749 device_xname(sc->sc_dev), eeprom_data,
9750 valid_checksum));
9751 }
9752 }
9753
9754 if ((wm_debug & WM_DEBUG_NVM) != 0) {
9755 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
9756 for (i = 0; i < NVM_SIZE; i++) {
9757 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9758 printf("XXXX ");
9759 else
9760 printf("%04hx ", eeprom_data);
9761 if (i % 8 == 7)
9762 printf("\n");
9763 }
9764 }
9765
9766 #endif /* WM_DEBUG */
9767
9768 for (i = 0; i < NVM_SIZE; i++) {
9769 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9770 return 1;
9771 checksum += eeprom_data;
9772 }
9773
9774 if (checksum != (uint16_t) NVM_CHECKSUM) {
9775 #ifdef WM_DEBUG
9776 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
9777 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
9778 #endif
9779 }
9780
9781 return 0;
9782 }
9783
9784 static void
9785 wm_nvm_version_invm(struct wm_softc *sc)
9786 {
9787 uint32_t dword;
9788
9789 /*
9790 * Linux's code to decode version is very strange, so we don't
9791 * obey that algorithm and just use word 61 as the document.
9792 * Perhaps it's not perfect though...
9793 *
9794 * Example:
9795 *
9796 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
9797 */
9798 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
9799 dword = __SHIFTOUT(dword, INVM_VER_1);
9800 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
9801 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
9802 }
9803
9804 static void
9805 wm_nvm_version(struct wm_softc *sc)
9806 {
9807 uint16_t major, minor, build, patch;
9808 uint16_t uid0, uid1;
9809 uint16_t nvm_data;
9810 uint16_t off;
9811 bool check_version = false;
9812 bool check_optionrom = false;
9813 bool have_build = false;
9814
9815 /*
9816 * Version format:
9817 *
9818 * XYYZ
9819 * X0YZ
9820 * X0YY
9821 *
9822 * Example:
9823 *
9824 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
9825 * 82571 0x50a6 5.10.6?
9826 * 82572 0x506a 5.6.10?
9827 * 82572EI 0x5069 5.6.9?
9828 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
9829 * 0x2013 2.1.3?
9830 * 82583 0x10a0 1.10.0? (document says it's default vaule)
9831 */
9832 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
9833 switch (sc->sc_type) {
9834 case WM_T_82571:
9835 case WM_T_82572:
9836 case WM_T_82574:
9837 case WM_T_82583:
9838 check_version = true;
9839 check_optionrom = true;
9840 have_build = true;
9841 break;
9842 case WM_T_82575:
9843 case WM_T_82576:
9844 case WM_T_82580:
9845 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
9846 check_version = true;
9847 break;
9848 case WM_T_I211:
9849 wm_nvm_version_invm(sc);
9850 goto printver;
9851 case WM_T_I210:
9852 if (!wm_nvm_get_flash_presence_i210(sc)) {
9853 wm_nvm_version_invm(sc);
9854 goto printver;
9855 }
9856 /* FALLTHROUGH */
9857 case WM_T_I350:
9858 case WM_T_I354:
9859 check_version = true;
9860 check_optionrom = true;
9861 break;
9862 default:
9863 return;
9864 }
9865 if (check_version) {
9866 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
9867 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
9868 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
9869 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
9870 build = nvm_data & NVM_BUILD_MASK;
9871 have_build = true;
9872 } else
9873 minor = nvm_data & 0x00ff;
9874
9875 /* Decimal */
9876 minor = (minor / 16) * 10 + (minor % 16);
9877 sc->sc_nvm_ver_major = major;
9878 sc->sc_nvm_ver_minor = minor;
9879
9880 printver:
9881 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
9882 sc->sc_nvm_ver_minor);
9883 if (have_build) {
9884 sc->sc_nvm_ver_build = build;
9885 aprint_verbose(".%d", build);
9886 }
9887 }
9888 if (check_optionrom) {
9889 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
9890 /* Option ROM Version */
9891 if ((off != 0x0000) && (off != 0xffff)) {
9892 off += NVM_COMBO_VER_OFF;
9893 wm_nvm_read(sc, off + 1, 1, &uid1);
9894 wm_nvm_read(sc, off, 1, &uid0);
9895 if ((uid0 != 0) && (uid0 != 0xffff)
9896 && (uid1 != 0) && (uid1 != 0xffff)) {
9897 /* 16bits */
9898 major = uid0 >> 8;
9899 build = (uid0 << 8) | (uid1 >> 8);
9900 patch = uid1 & 0x00ff;
9901 aprint_verbose(", option ROM Version %d.%d.%d",
9902 major, build, patch);
9903 }
9904 }
9905 }
9906
9907 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
9908 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
9909 }
9910
9911 /*
9912 * wm_nvm_read:
9913 *
9914 * Read data from the serial EEPROM.
9915 */
9916 static int
9917 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9918 {
9919 int rv;
9920
9921 if (sc->sc_flags & WM_F_EEPROM_INVALID)
9922 return 1;
9923
9924 if (wm_nvm_acquire(sc))
9925 return 1;
9926
9927 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9928 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9929 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
9930 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
9931 else if (sc->sc_flags & WM_F_EEPROM_INVM)
9932 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
9933 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
9934 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
9935 else if (sc->sc_flags & WM_F_EEPROM_SPI)
9936 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
9937 else
9938 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
9939
9940 wm_nvm_release(sc);
9941 return rv;
9942 }
9943
9944 /*
9945 * Hardware semaphores.
9946 * Very complexed...
9947 */
9948
9949 static int
9950 wm_get_swsm_semaphore(struct wm_softc *sc)
9951 {
9952 int32_t timeout;
9953 uint32_t swsm;
9954
9955 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9956 /* Get the SW semaphore. */
9957 timeout = sc->sc_nvm_wordsize + 1;
9958 while (timeout) {
9959 swsm = CSR_READ(sc, WMREG_SWSM);
9960
9961 if ((swsm & SWSM_SMBI) == 0)
9962 break;
9963
9964 delay(50);
9965 timeout--;
9966 }
9967
9968 if (timeout == 0) {
9969 aprint_error_dev(sc->sc_dev,
9970 "could not acquire SWSM SMBI\n");
9971 return 1;
9972 }
9973 }
9974
9975 /* Get the FW semaphore. */
9976 timeout = sc->sc_nvm_wordsize + 1;
9977 while (timeout) {
9978 swsm = CSR_READ(sc, WMREG_SWSM);
9979 swsm |= SWSM_SWESMBI;
9980 CSR_WRITE(sc, WMREG_SWSM, swsm);
9981 /* If we managed to set the bit we got the semaphore. */
9982 swsm = CSR_READ(sc, WMREG_SWSM);
9983 if (swsm & SWSM_SWESMBI)
9984 break;
9985
9986 delay(50);
9987 timeout--;
9988 }
9989
9990 if (timeout == 0) {
9991 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
9992 /* Release semaphores */
9993 wm_put_swsm_semaphore(sc);
9994 return 1;
9995 }
9996 return 0;
9997 }
9998
9999 static void
10000 wm_put_swsm_semaphore(struct wm_softc *sc)
10001 {
10002 uint32_t swsm;
10003
10004 swsm = CSR_READ(sc, WMREG_SWSM);
10005 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
10006 CSR_WRITE(sc, WMREG_SWSM, swsm);
10007 }
10008
10009 static int
10010 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10011 {
10012 uint32_t swfw_sync;
10013 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10014 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10015 int timeout = 200;
10016
10017 for (timeout = 0; timeout < 200; timeout++) {
10018 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10019 if (wm_get_swsm_semaphore(sc)) {
10020 aprint_error_dev(sc->sc_dev,
10021 "%s: failed to get semaphore\n",
10022 __func__);
10023 return 1;
10024 }
10025 }
10026 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10027 if ((swfw_sync & (swmask | fwmask)) == 0) {
10028 swfw_sync |= swmask;
10029 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10030 if (sc->sc_flags & WM_F_LOCK_SWSM)
10031 wm_put_swsm_semaphore(sc);
10032 return 0;
10033 }
10034 if (sc->sc_flags & WM_F_LOCK_SWSM)
10035 wm_put_swsm_semaphore(sc);
10036 delay(5000);
10037 }
10038 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10039 device_xname(sc->sc_dev), mask, swfw_sync);
10040 return 1;
10041 }
10042
10043 static void
10044 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10045 {
10046 uint32_t swfw_sync;
10047
10048 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10049 while (wm_get_swsm_semaphore(sc) != 0)
10050 continue;
10051 }
10052 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10053 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10054 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10055 if (sc->sc_flags & WM_F_LOCK_SWSM)
10056 wm_put_swsm_semaphore(sc);
10057 }
10058
10059 static int
10060 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10061 {
10062 uint32_t ext_ctrl;
10063 int timeout = 200;
10064
10065 for (timeout = 0; timeout < 200; timeout++) {
10066 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10067 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10068 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10069
10070 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10071 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10072 return 0;
10073 delay(5000);
10074 }
10075 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10076 device_xname(sc->sc_dev), ext_ctrl);
10077 return 1;
10078 }
10079
10080 static void
10081 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10082 {
10083 uint32_t ext_ctrl;
10084 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10085 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10086 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10087 }
10088
10089 static int
10090 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10091 {
10092 int i = 0;
10093 uint32_t reg;
10094
10095 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10096 do {
10097 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10098 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10099 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10100 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10101 break;
10102 delay(2*1000);
10103 i++;
10104 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10105
10106 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10107 wm_put_hw_semaphore_82573(sc);
10108 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10109 device_xname(sc->sc_dev));
10110 return -1;
10111 }
10112
10113 return 0;
10114 }
10115
10116 static void
10117 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10118 {
10119 uint32_t reg;
10120
10121 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10122 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10123 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10124 }
10125
10126 /*
10127 * Management mode and power management related subroutines.
10128 * BMC, AMT, suspend/resume and EEE.
10129 */
10130
10131 static int
10132 wm_check_mng_mode(struct wm_softc *sc)
10133 {
10134 int rv;
10135
10136 switch (sc->sc_type) {
10137 case WM_T_ICH8:
10138 case WM_T_ICH9:
10139 case WM_T_ICH10:
10140 case WM_T_PCH:
10141 case WM_T_PCH2:
10142 case WM_T_PCH_LPT:
10143 rv = wm_check_mng_mode_ich8lan(sc);
10144 break;
10145 case WM_T_82574:
10146 case WM_T_82583:
10147 rv = wm_check_mng_mode_82574(sc);
10148 break;
10149 case WM_T_82571:
10150 case WM_T_82572:
10151 case WM_T_82573:
10152 case WM_T_80003:
10153 rv = wm_check_mng_mode_generic(sc);
10154 break;
10155 default:
10156 /* noting to do */
10157 rv = 0;
10158 break;
10159 }
10160
10161 return rv;
10162 }
10163
10164 static int
10165 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10166 {
10167 uint32_t fwsm;
10168
10169 fwsm = CSR_READ(sc, WMREG_FWSM);
10170
10171 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10172 return 1;
10173
10174 return 0;
10175 }
10176
10177 static int
10178 wm_check_mng_mode_82574(struct wm_softc *sc)
10179 {
10180 uint16_t data;
10181
10182 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10183
10184 if ((data & NVM_CFG2_MNGM_MASK) != 0)
10185 return 1;
10186
10187 return 0;
10188 }
10189
10190 static int
10191 wm_check_mng_mode_generic(struct wm_softc *sc)
10192 {
10193 uint32_t fwsm;
10194
10195 fwsm = CSR_READ(sc, WMREG_FWSM);
10196
10197 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10198 return 1;
10199
10200 return 0;
10201 }
10202
10203 static int
10204 wm_enable_mng_pass_thru(struct wm_softc *sc)
10205 {
10206 uint32_t manc, fwsm, factps;
10207
10208 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
10209 return 0;
10210
10211 manc = CSR_READ(sc, WMREG_MANC);
10212
10213 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
10214 device_xname(sc->sc_dev), manc));
10215 if ((manc & MANC_RECV_TCO_EN) == 0)
10216 return 0;
10217
10218 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
10219 fwsm = CSR_READ(sc, WMREG_FWSM);
10220 factps = CSR_READ(sc, WMREG_FACTPS);
10221 if (((factps & FACTPS_MNGCG) == 0)
10222 && ((fwsm & FWSM_MODE_MASK)
10223 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
10224 return 1;
10225 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10226 uint16_t data;
10227
10228 factps = CSR_READ(sc, WMREG_FACTPS);
10229 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10230 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
10231 device_xname(sc->sc_dev), factps, data));
10232 if (((factps & FACTPS_MNGCG) == 0)
10233 && ((data & NVM_CFG2_MNGM_MASK)
10234 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
10235 return 1;
10236 } else if (((manc & MANC_SMBUS_EN) != 0)
10237 && ((manc & MANC_ASF_EN) == 0))
10238 return 1;
10239
10240 return 0;
10241 }
10242
10243 static int
10244 wm_check_reset_block(struct wm_softc *sc)
10245 {
10246 uint32_t reg;
10247
10248 switch (sc->sc_type) {
10249 case WM_T_ICH8:
10250 case WM_T_ICH9:
10251 case WM_T_ICH10:
10252 case WM_T_PCH:
10253 case WM_T_PCH2:
10254 case WM_T_PCH_LPT:
10255 reg = CSR_READ(sc, WMREG_FWSM);
10256 if ((reg & FWSM_RSPCIPHY) != 0)
10257 return 0;
10258 else
10259 return -1;
10260 break;
10261 case WM_T_82571:
10262 case WM_T_82572:
10263 case WM_T_82573:
10264 case WM_T_82574:
10265 case WM_T_82583:
10266 case WM_T_80003:
10267 reg = CSR_READ(sc, WMREG_MANC);
10268 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
10269 return -1;
10270 else
10271 return 0;
10272 break;
10273 default:
10274 /* no problem */
10275 break;
10276 }
10277
10278 return 0;
10279 }
10280
10281 static void
10282 wm_get_hw_control(struct wm_softc *sc)
10283 {
10284 uint32_t reg;
10285
10286 switch (sc->sc_type) {
10287 case WM_T_82573:
10288 reg = CSR_READ(sc, WMREG_SWSM);
10289 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
10290 break;
10291 case WM_T_82571:
10292 case WM_T_82572:
10293 case WM_T_82574:
10294 case WM_T_82583:
10295 case WM_T_80003:
10296 case WM_T_ICH8:
10297 case WM_T_ICH9:
10298 case WM_T_ICH10:
10299 case WM_T_PCH:
10300 case WM_T_PCH2:
10301 case WM_T_PCH_LPT:
10302 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10303 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
10304 break;
10305 default:
10306 break;
10307 }
10308 }
10309
10310 static void
10311 wm_release_hw_control(struct wm_softc *sc)
10312 {
10313 uint32_t reg;
10314
10315 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
10316 return;
10317
10318 if (sc->sc_type == WM_T_82573) {
10319 reg = CSR_READ(sc, WMREG_SWSM);
10320 reg &= ~SWSM_DRV_LOAD;
10321 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
10322 } else {
10323 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10324 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
10325 }
10326 }
10327
10328 static void
10329 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
10330 {
10331 uint32_t reg;
10332
10333 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10334
10335 if (on != 0)
10336 reg |= EXTCNFCTR_GATE_PHY_CFG;
10337 else
10338 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
10339
10340 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10341 }
10342
10343 static void
10344 wm_smbustopci(struct wm_softc *sc)
10345 {
10346 uint32_t fwsm;
10347
10348 fwsm = CSR_READ(sc, WMREG_FWSM);
10349 if (((fwsm & FWSM_FW_VALID) == 0)
10350 && ((wm_check_reset_block(sc) == 0))) {
10351 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
10352 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
10353 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10354 CSR_WRITE_FLUSH(sc);
10355 delay(10);
10356 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
10357 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10358 CSR_WRITE_FLUSH(sc);
10359 delay(50*1000);
10360
10361 /*
10362 * Gate automatic PHY configuration by hardware on non-managed
10363 * 82579
10364 */
10365 if (sc->sc_type == WM_T_PCH2)
10366 wm_gate_hw_phy_config_ich8lan(sc, 1);
10367 }
10368 }
10369
10370 static void
10371 wm_init_manageability(struct wm_softc *sc)
10372 {
10373
10374 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10375 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
10376 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10377
10378 /* Disable hardware interception of ARP */
10379 manc &= ~MANC_ARP_EN;
10380
10381 /* Enable receiving management packets to the host */
10382 if (sc->sc_type >= WM_T_82571) {
10383 manc |= MANC_EN_MNG2HOST;
10384 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
10385 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
10386 }
10387
10388 CSR_WRITE(sc, WMREG_MANC, manc);
10389 }
10390 }
10391
10392 static void
10393 wm_release_manageability(struct wm_softc *sc)
10394 {
10395
10396 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10397 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10398
10399 manc |= MANC_ARP_EN;
10400 if (sc->sc_type >= WM_T_82571)
10401 manc &= ~MANC_EN_MNG2HOST;
10402
10403 CSR_WRITE(sc, WMREG_MANC, manc);
10404 }
10405 }
10406
10407 static void
10408 wm_get_wakeup(struct wm_softc *sc)
10409 {
10410
10411 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
10412 switch (sc->sc_type) {
10413 case WM_T_82573:
10414 case WM_T_82583:
10415 sc->sc_flags |= WM_F_HAS_AMT;
10416 /* FALLTHROUGH */
10417 case WM_T_80003:
10418 case WM_T_82541:
10419 case WM_T_82547:
10420 case WM_T_82571:
10421 case WM_T_82572:
10422 case WM_T_82574:
10423 case WM_T_82575:
10424 case WM_T_82576:
10425 case WM_T_82580:
10426 case WM_T_I350:
10427 case WM_T_I354:
10428 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
10429 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
10430 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10431 break;
10432 case WM_T_ICH8:
10433 case WM_T_ICH9:
10434 case WM_T_ICH10:
10435 case WM_T_PCH:
10436 case WM_T_PCH2:
10437 case WM_T_PCH_LPT:
10438 sc->sc_flags |= WM_F_HAS_AMT;
10439 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10440 break;
10441 default:
10442 break;
10443 }
10444
10445 /* 1: HAS_MANAGE */
10446 if (wm_enable_mng_pass_thru(sc) != 0)
10447 sc->sc_flags |= WM_F_HAS_MANAGE;
10448
10449 #ifdef WM_DEBUG
10450 printf("\n");
10451 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
10452 printf("HAS_AMT,");
10453 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
10454 printf("ARC_SUBSYS_VALID,");
10455 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
10456 printf("ASF_FIRMWARE_PRES,");
10457 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
10458 printf("HAS_MANAGE,");
10459 printf("\n");
10460 #endif
10461 /*
10462 * Note that the WOL flags is set after the resetting of the eeprom
10463 * stuff
10464 */
10465 }
10466
10467 #ifdef WM_WOL
10468 /* WOL in the newer chipset interfaces (pchlan) */
10469 static void
10470 wm_enable_phy_wakeup(struct wm_softc *sc)
10471 {
10472 #if 0
10473 uint16_t preg;
10474
10475 /* Copy MAC RARs to PHY RARs */
10476
10477 /* Copy MAC MTA to PHY MTA */
10478
10479 /* Configure PHY Rx Control register */
10480
10481 /* Enable PHY wakeup in MAC register */
10482
10483 /* Configure and enable PHY wakeup in PHY registers */
10484
10485 /* Activate PHY wakeup */
10486
10487 /* XXX */
10488 #endif
10489 }
10490
10491 /* Power down workaround on D3 */
10492 static void
10493 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
10494 {
10495 uint32_t reg;
10496 int i;
10497
10498 for (i = 0; i < 2; i++) {
10499 /* Disable link */
10500 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10501 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10502 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10503
10504 /*
10505 * Call gig speed drop workaround on Gig disable before
10506 * accessing any PHY registers
10507 */
10508 if (sc->sc_type == WM_T_ICH8)
10509 wm_gig_downshift_workaround_ich8lan(sc);
10510
10511 /* Write VR power-down enable */
10512 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10513 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10514 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
10515 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
10516
10517 /* Read it back and test */
10518 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10519 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10520 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
10521 break;
10522
10523 /* Issue PHY reset and repeat at most one more time */
10524 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10525 }
10526 }
10527
10528 static void
10529 wm_enable_wakeup(struct wm_softc *sc)
10530 {
10531 uint32_t reg, pmreg;
10532 pcireg_t pmode;
10533
10534 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10535 &pmreg, NULL) == 0)
10536 return;
10537
10538 /* Advertise the wakeup capability */
10539 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
10540 | CTRL_SWDPIN(3));
10541 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
10542
10543 /* ICH workaround */
10544 switch (sc->sc_type) {
10545 case WM_T_ICH8:
10546 case WM_T_ICH9:
10547 case WM_T_ICH10:
10548 case WM_T_PCH:
10549 case WM_T_PCH2:
10550 case WM_T_PCH_LPT:
10551 /* Disable gig during WOL */
10552 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10553 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
10554 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10555 if (sc->sc_type == WM_T_PCH)
10556 wm_gmii_reset(sc);
10557
10558 /* Power down workaround */
10559 if (sc->sc_phytype == WMPHY_82577) {
10560 struct mii_softc *child;
10561
10562 /* Assume that the PHY is copper */
10563 child = LIST_FIRST(&sc->sc_mii.mii_phys);
10564 if (child->mii_mpd_rev <= 2)
10565 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
10566 (768 << 5) | 25, 0x0444); /* magic num */
10567 }
10568 break;
10569 default:
10570 break;
10571 }
10572
10573 /* Keep the laser running on fiber adapters */
10574 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
10575 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
10576 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10577 reg |= CTRL_EXT_SWDPIN(3);
10578 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10579 }
10580
10581 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
10582 #if 0 /* for the multicast packet */
10583 reg |= WUFC_MC;
10584 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
10585 #endif
10586
10587 if (sc->sc_type == WM_T_PCH) {
10588 wm_enable_phy_wakeup(sc);
10589 } else {
10590 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
10591 CSR_WRITE(sc, WMREG_WUFC, reg);
10592 }
10593
10594 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10595 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10596 || (sc->sc_type == WM_T_PCH2))
10597 && (sc->sc_phytype == WMPHY_IGP_3))
10598 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
10599
10600 /* Request PME */
10601 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
10602 #if 0
10603 /* Disable WOL */
10604 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
10605 #else
10606 /* For WOL */
10607 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
10608 #endif
10609 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
10610 }
10611 #endif /* WM_WOL */
10612
10613 /* EEE */
10614
10615 static void
10616 wm_set_eee_i350(struct wm_softc *sc)
10617 {
10618 uint32_t ipcnfg, eeer;
10619
10620 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
10621 eeer = CSR_READ(sc, WMREG_EEER);
10622
10623 if ((sc->sc_flags & WM_F_EEE) != 0) {
10624 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10625 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
10626 | EEER_LPI_FC);
10627 } else {
10628 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10629 ipcnfg &= ~IPCNFG_10BASE_TE;
10630 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
10631 | EEER_LPI_FC);
10632 }
10633
10634 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
10635 CSR_WRITE(sc, WMREG_EEER, eeer);
10636 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
10637 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
10638 }
10639
10640 /*
10641 * Workarounds (mainly PHY related).
10642 * Basically, PHY's workarounds are in the PHY drivers.
10643 */
10644
10645 /* Work-around for 82566 Kumeran PCS lock loss */
10646 static void
10647 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
10648 {
10649 int miistatus, active, i;
10650 int reg;
10651
10652 miistatus = sc->sc_mii.mii_media_status;
10653
10654 /* If the link is not up, do nothing */
10655 if ((miistatus & IFM_ACTIVE) != 0)
10656 return;
10657
10658 active = sc->sc_mii.mii_media_active;
10659
10660 /* Nothing to do if the link is other than 1Gbps */
10661 if (IFM_SUBTYPE(active) != IFM_1000_T)
10662 return;
10663
10664 for (i = 0; i < 10; i++) {
10665 /* read twice */
10666 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10667 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10668 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
10669 goto out; /* GOOD! */
10670
10671 /* Reset the PHY */
10672 wm_gmii_reset(sc);
10673 delay(5*1000);
10674 }
10675
10676 /* Disable GigE link negotiation */
10677 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10678 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10679 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10680
10681 /*
10682 * Call gig speed drop workaround on Gig disable before accessing
10683 * any PHY registers.
10684 */
10685 wm_gig_downshift_workaround_ich8lan(sc);
10686
10687 out:
10688 return;
10689 }
10690
10691 /* WOL from S5 stops working */
10692 static void
10693 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
10694 {
10695 uint16_t kmrn_reg;
10696
10697 /* Only for igp3 */
10698 if (sc->sc_phytype == WMPHY_IGP_3) {
10699 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
10700 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
10701 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10702 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
10703 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10704 }
10705 }
10706
10707 /*
10708 * Workaround for pch's PHYs
10709 * XXX should be moved to new PHY driver?
10710 */
10711 static void
10712 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
10713 {
10714 if (sc->sc_phytype == WMPHY_82577)
10715 wm_set_mdio_slow_mode_hv(sc);
10716
10717 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
10718
10719 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
10720
10721 /* 82578 */
10722 if (sc->sc_phytype == WMPHY_82578) {
10723 /* PCH rev. < 3 */
10724 if (sc->sc_rev < 3) {
10725 /* XXX 6 bit shift? Why? Is it page2? */
10726 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10727 0x66c0);
10728 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
10729 0xffff);
10730 }
10731
10732 /* XXX phy rev. < 2 */
10733 }
10734
10735 /* Select page 0 */
10736
10737 /* XXX acquire semaphore */
10738 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
10739 /* XXX release semaphore */
10740
10741 /*
10742 * Configure the K1 Si workaround during phy reset assuming there is
10743 * link so that it disables K1 if link is in 1Gbps.
10744 */
10745 wm_k1_gig_workaround_hv(sc, 1);
10746 }
10747
10748 static void
10749 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
10750 {
10751
10752 wm_set_mdio_slow_mode_hv(sc);
10753 }
10754
10755 static void
10756 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
10757 {
10758 int k1_enable = sc->sc_nvm_k1_enabled;
10759
10760 /* XXX acquire semaphore */
10761
10762 if (link) {
10763 k1_enable = 0;
10764
10765 /* Link stall fix for link up */
10766 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
10767 } else {
10768 /* Link stall fix for link down */
10769 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
10770 }
10771
10772 wm_configure_k1_ich8lan(sc, k1_enable);
10773
10774 /* XXX release semaphore */
10775 }
10776
10777 static void
10778 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
10779 {
10780 uint32_t reg;
10781
10782 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
10783 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
10784 reg | HV_KMRN_MDIO_SLOW);
10785 }
10786
10787 static void
10788 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
10789 {
10790 uint32_t ctrl, ctrl_ext, tmp;
10791 uint16_t kmrn_reg;
10792
10793 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
10794
10795 if (k1_enable)
10796 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
10797 else
10798 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
10799
10800 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
10801
10802 delay(20);
10803
10804 ctrl = CSR_READ(sc, WMREG_CTRL);
10805 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10806
10807 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
10808 tmp |= CTRL_FRCSPD;
10809
10810 CSR_WRITE(sc, WMREG_CTRL, tmp);
10811 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
10812 CSR_WRITE_FLUSH(sc);
10813 delay(20);
10814
10815 CSR_WRITE(sc, WMREG_CTRL, ctrl);
10816 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10817 CSR_WRITE_FLUSH(sc);
10818 delay(20);
10819 }
10820
10821 /* special case - for 82575 - need to do manual init ... */
10822 static void
10823 wm_reset_init_script_82575(struct wm_softc *sc)
10824 {
10825 /*
10826 * remark: this is untested code - we have no board without EEPROM
10827 * same setup as mentioned int the FreeBSD driver for the i82575
10828 */
10829
10830 /* SerDes configuration via SERDESCTRL */
10831 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
10832 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
10833 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
10834 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
10835
10836 /* CCM configuration via CCMCTL register */
10837 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
10838 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
10839
10840 /* PCIe lanes configuration */
10841 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
10842 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
10843 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
10844 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
10845
10846 /* PCIe PLL Configuration */
10847 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
10848 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
10849 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
10850 }
10851
10852 static void
10853 wm_reset_mdicnfg_82580(struct wm_softc *sc)
10854 {
10855 uint32_t reg;
10856 uint16_t nvmword;
10857 int rv;
10858
10859 if ((sc->sc_flags & WM_F_SGMII) == 0)
10860 return;
10861
10862 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
10863 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
10864 if (rv != 0) {
10865 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
10866 __func__);
10867 return;
10868 }
10869
10870 reg = CSR_READ(sc, WMREG_MDICNFG);
10871 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
10872 reg |= MDICNFG_DEST;
10873 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
10874 reg |= MDICNFG_COM_MDIO;
10875 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10876 }
10877
10878 /*
10879 * I210 Errata 25 and I211 Errata 10
10880 * Slow System Clock.
10881 */
10882 static void
10883 wm_pll_workaround_i210(struct wm_softc *sc)
10884 {
10885 uint32_t mdicnfg, wuc;
10886 uint32_t reg;
10887 pcireg_t pcireg;
10888 uint32_t pmreg;
10889 uint16_t nvmword, tmp_nvmword;
10890 int phyval;
10891 bool wa_done = false;
10892 int i;
10893
10894 /* Save WUC and MDICNFG registers */
10895 wuc = CSR_READ(sc, WMREG_WUC);
10896 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
10897
10898 reg = mdicnfg & ~MDICNFG_DEST;
10899 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10900
10901 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
10902 nvmword = INVM_DEFAULT_AL;
10903 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
10904
10905 /* Get Power Management cap offset */
10906 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10907 &pmreg, NULL) == 0)
10908 return;
10909 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
10910 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
10911 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
10912
10913 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
10914 break; /* OK */
10915 }
10916
10917 wa_done = true;
10918 /* Directly reset the internal PHY */
10919 reg = CSR_READ(sc, WMREG_CTRL);
10920 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
10921
10922 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10923 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
10924 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10925
10926 CSR_WRITE(sc, WMREG_WUC, 0);
10927 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
10928 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10929
10930 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
10931 pmreg + PCI_PMCSR);
10932 pcireg |= PCI_PMCSR_STATE_D3;
10933 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10934 pmreg + PCI_PMCSR, pcireg);
10935 delay(1000);
10936 pcireg &= ~PCI_PMCSR_STATE_D3;
10937 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10938 pmreg + PCI_PMCSR, pcireg);
10939
10940 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
10941 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10942
10943 /* Restore WUC register */
10944 CSR_WRITE(sc, WMREG_WUC, wuc);
10945 }
10946
10947 /* Restore MDICNFG setting */
10948 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
10949 if (wa_done)
10950 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
10951 }
10952