if_wm.c revision 1.347 1 /* $NetBSD: if_wm.c,v 1.347 2015/09/07 15:19:05 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - Multi queue
78 * - Image Unique ID
79 * - LPLU other than PCH*
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.347 2015/09/07 15:19:05 msaitoh Exp $");
87
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kernel.h>
98 #include <sys/socket.h>
99 #include <sys/ioctl.h>
100 #include <sys/errno.h>
101 #include <sys/device.h>
102 #include <sys/queue.h>
103 #include <sys/syslog.h>
104 #include <sys/interrupt.h>
105
106 #include <sys/rndsource.h>
107
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_media.h>
111 #include <net/if_ether.h>
112
113 #include <net/bpf.h>
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/miidevs.h>
128 #include <dev/mii/mii_bitbang.h>
129 #include <dev/mii/ikphyreg.h>
130 #include <dev/mii/igphyreg.h>
131 #include <dev/mii/igphyvar.h>
132 #include <dev/mii/inbmphyreg.h>
133
134 #include <dev/pci/pcireg.h>
135 #include <dev/pci/pcivar.h>
136 #include <dev/pci/pcidevs.h>
137
138 #include <dev/pci/if_wmreg.h>
139 #include <dev/pci/if_wmvar.h>
140
141 #ifdef WM_DEBUG
142 #define WM_DEBUG_LINK 0x01
143 #define WM_DEBUG_TX 0x02
144 #define WM_DEBUG_RX 0x04
145 #define WM_DEBUG_GMII 0x08
146 #define WM_DEBUG_MANAGE 0x10
147 #define WM_DEBUG_NVM 0x20
148 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
149 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
150
151 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
152 #else
153 #define DPRINTF(x, y) /* nothing */
154 #endif /* WM_DEBUG */
155
156 #ifdef NET_MPSAFE
157 #define WM_MPSAFE 1
158 #endif
159
160 #ifdef __HAVE_PCI_MSI_MSIX
161 #define WM_MSI_MSIX 1 /* Enable by default */
162 #endif
163
164 /*
165 * This device driver divides interrupt to TX, RX and link state.
166 * Each MSI-X vector indexes are below.
167 */
168 #define WM_MSIX_NINTR 3
169 #define WM_MSIX_TXINTR_IDX 0
170 #define WM_MSIX_RXINTR_IDX 1
171 #define WM_MSIX_LINKINTR_IDX 2
172 #define WM_MAX_NINTR WM_MSIX_NINTR
173
174 /*
175 * This device driver set affinity to each interrupts like below (round-robin).
176 * If the number CPUs is less than the number of interrupts, this driver usase
177 * the same CPU for multiple interrupts.
178 */
179 #define WM_MSIX_TXINTR_CPUID 0
180 #define WM_MSIX_RXINTR_CPUID 1
181 #define WM_MSIX_LINKINTR_CPUID 2
182
183 /*
184 * Transmit descriptor list size. Due to errata, we can only have
185 * 256 hardware descriptors in the ring on < 82544, but we use 4096
186 * on >= 82544. We tell the upper layers that they can queue a lot
187 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
188 * of them at a time.
189 *
190 * We allow up to 256 (!) DMA segments per packet. Pathological packet
191 * chains containing many small mbufs have been observed in zero-copy
192 * situations with jumbo frames.
193 */
194 #define WM_NTXSEGS 256
195 #define WM_IFQUEUELEN 256
196 #define WM_TXQUEUELEN_MAX 64
197 #define WM_TXQUEUELEN_MAX_82547 16
198 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
199 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
200 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
201 #define WM_NTXDESC_82542 256
202 #define WM_NTXDESC_82544 4096
203 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
204 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
205 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
206 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
207 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
208
209 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
210
211 /*
212 * Receive descriptor list size. We have one Rx buffer for normal
213 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
214 * packet. We allocate 256 receive descriptors, each with a 2k
215 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
216 */
217 #define WM_NRXDESC 256
218 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
219 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
220 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
221
222 /*
223 * Control structures are DMA'd to the i82542 chip. We allocate them in
224 * a single clump that maps to a single DMA segment to make several things
225 * easier.
226 */
227 struct wm_control_data_82544 {
228 /*
229 * The receive descriptors.
230 */
231 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
232
233 /*
234 * The transmit descriptors. Put these at the end, because
235 * we might use a smaller number of them.
236 */
237 union {
238 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
239 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
240 } wdc_u;
241 };
242
243 struct wm_control_data_82542 {
244 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
245 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
246 };
247
248 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
249 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
250 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
251
252 /*
253 * Software state for transmit jobs.
254 */
255 struct wm_txsoft {
256 struct mbuf *txs_mbuf; /* head of our mbuf chain */
257 bus_dmamap_t txs_dmamap; /* our DMA map */
258 int txs_firstdesc; /* first descriptor in packet */
259 int txs_lastdesc; /* last descriptor in packet */
260 int txs_ndesc; /* # of descriptors used */
261 };
262
263 /*
264 * Software state for receive buffers. Each descriptor gets a
265 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
266 * more than one buffer, we chain them together.
267 */
268 struct wm_rxsoft {
269 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
270 bus_dmamap_t rxs_dmamap; /* our DMA map */
271 };
272
273 #define WM_LINKUP_TIMEOUT 50
274
275 static uint16_t swfwphysem[] = {
276 SWFW_PHY0_SM,
277 SWFW_PHY1_SM,
278 SWFW_PHY2_SM,
279 SWFW_PHY3_SM
280 };
281
282 static const uint32_t wm_82580_rxpbs_table[] = {
283 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
284 };
285
286 /*
287 * Software state per device.
288 */
289 struct wm_softc {
290 device_t sc_dev; /* generic device information */
291 bus_space_tag_t sc_st; /* bus space tag */
292 bus_space_handle_t sc_sh; /* bus space handle */
293 bus_size_t sc_ss; /* bus space size */
294 bus_space_tag_t sc_iot; /* I/O space tag */
295 bus_space_handle_t sc_ioh; /* I/O space handle */
296 bus_size_t sc_ios; /* I/O space size */
297 bus_space_tag_t sc_flasht; /* flash registers space tag */
298 bus_space_handle_t sc_flashh; /* flash registers space handle */
299 bus_size_t sc_flashs; /* flash registers space size */
300 bus_dma_tag_t sc_dmat; /* bus DMA tag */
301
302 struct ethercom sc_ethercom; /* ethernet common data */
303 struct mii_data sc_mii; /* MII/media information */
304
305 pci_chipset_tag_t sc_pc;
306 pcitag_t sc_pcitag;
307 int sc_bus_speed; /* PCI/PCIX bus speed */
308 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
309
310 uint16_t sc_pcidevid; /* PCI device ID */
311 wm_chip_type sc_type; /* MAC type */
312 int sc_rev; /* MAC revision */
313 wm_phy_type sc_phytype; /* PHY type */
314 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
315 #define WM_MEDIATYPE_UNKNOWN 0x00
316 #define WM_MEDIATYPE_FIBER 0x01
317 #define WM_MEDIATYPE_COPPER 0x02
318 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
319 int sc_funcid; /* unit number of the chip (0 to 3) */
320 int sc_flags; /* flags; see below */
321 int sc_if_flags; /* last if_flags */
322 int sc_flowflags; /* 802.3x flow control flags */
323 int sc_align_tweak;
324
325 void *sc_ihs[WM_MAX_NINTR]; /*
326 * interrupt cookie.
327 * legacy and msi use sc_ihs[0].
328 */
329 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
330 int sc_nintrs; /* number of interrupts */
331
332 callout_t sc_tick_ch; /* tick callout */
333 bool sc_stopping;
334
335 int sc_nvm_ver_major;
336 int sc_nvm_ver_minor;
337 int sc_nvm_addrbits; /* NVM address bits */
338 unsigned int sc_nvm_wordsize; /* NVM word size */
339 int sc_ich8_flash_base;
340 int sc_ich8_flash_bank_size;
341 int sc_nvm_k1_enabled;
342
343 /* Software state for the transmit and receive descriptors. */
344 int sc_txnum; /* must be a power of two */
345 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
346 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
347
348 /* Control data structures. */
349 int sc_ntxdesc; /* must be a power of two */
350 struct wm_control_data_82544 *sc_control_data;
351 bus_dmamap_t sc_cddmamap; /* control data DMA map */
352 bus_dma_segment_t sc_cd_seg; /* control data segment */
353 int sc_cd_rseg; /* real number of control segment */
354 size_t sc_cd_size; /* control data size */
355 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
356 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
357 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
358 #define sc_rxdescs sc_control_data->wcd_rxdescs
359
360 #ifdef WM_EVENT_COUNTERS
361 /* Event counters. */
362 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
363 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
364 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
365 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
366 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
367 struct evcnt sc_ev_rxintr; /* Rx interrupts */
368 struct evcnt sc_ev_linkintr; /* Link interrupts */
369
370 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
371 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
372 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
373 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
374 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
375 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
376 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
377 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
378
379 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
380 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
381
382 struct evcnt sc_ev_tu; /* Tx underrun */
383
384 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
385 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
386 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
387 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
388 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
389 #endif /* WM_EVENT_COUNTERS */
390
391 bus_addr_t sc_tdt_reg; /* offset of TDT register */
392
393 int sc_txfree; /* number of free Tx descriptors */
394 int sc_txnext; /* next ready Tx descriptor */
395
396 int sc_txsfree; /* number of free Tx jobs */
397 int sc_txsnext; /* next free Tx job */
398 int sc_txsdirty; /* dirty Tx jobs */
399
400 /* These 5 variables are used only on the 82547. */
401 int sc_txfifo_size; /* Tx FIFO size */
402 int sc_txfifo_head; /* current head of FIFO */
403 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
404 int sc_txfifo_stall; /* Tx FIFO is stalled */
405 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
406
407 bus_addr_t sc_rdt_reg; /* offset of RDT register */
408
409 int sc_rxptr; /* next ready Rx descriptor/queue ent */
410 int sc_rxdiscard;
411 int sc_rxlen;
412 struct mbuf *sc_rxhead;
413 struct mbuf *sc_rxtail;
414 struct mbuf **sc_rxtailp;
415
416 uint32_t sc_ctrl; /* prototype CTRL register */
417 #if 0
418 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
419 #endif
420 uint32_t sc_icr; /* prototype interrupt bits */
421 uint32_t sc_itr; /* prototype intr throttling reg */
422 uint32_t sc_tctl; /* prototype TCTL register */
423 uint32_t sc_rctl; /* prototype RCTL register */
424 uint32_t sc_txcw; /* prototype TXCW register */
425 uint32_t sc_tipg; /* prototype TIPG register */
426 uint32_t sc_fcrtl; /* prototype FCRTL register */
427 uint32_t sc_pba; /* prototype PBA register */
428
429 int sc_tbi_linkup; /* TBI link status */
430 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
431 int sc_tbi_serdes_ticks; /* tbi ticks */
432
433 int sc_mchash_type; /* multicast filter offset */
434
435 krndsource_t rnd_source; /* random source */
436
437 kmutex_t *sc_tx_lock; /* lock for tx operations */
438 kmutex_t *sc_rx_lock; /* lock for rx operations */
439 };
440
441 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
442 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
443 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
444 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
445 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
446 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
447 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
448 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
449 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
450
451 #ifdef WM_MPSAFE
452 #define CALLOUT_FLAGS CALLOUT_MPSAFE
453 #else
454 #define CALLOUT_FLAGS 0
455 #endif
456
457 #define WM_RXCHAIN_RESET(sc) \
458 do { \
459 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
460 *(sc)->sc_rxtailp = NULL; \
461 (sc)->sc_rxlen = 0; \
462 } while (/*CONSTCOND*/0)
463
464 #define WM_RXCHAIN_LINK(sc, m) \
465 do { \
466 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
467 (sc)->sc_rxtailp = &(m)->m_next; \
468 } while (/*CONSTCOND*/0)
469
470 #ifdef WM_EVENT_COUNTERS
471 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
472 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
473 #else
474 #define WM_EVCNT_INCR(ev) /* nothing */
475 #define WM_EVCNT_ADD(ev, val) /* nothing */
476 #endif
477
478 #define CSR_READ(sc, reg) \
479 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
480 #define CSR_WRITE(sc, reg, val) \
481 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
482 #define CSR_WRITE_FLUSH(sc) \
483 (void) CSR_READ((sc), WMREG_STATUS)
484
485 #define ICH8_FLASH_READ32(sc, reg) \
486 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
487 #define ICH8_FLASH_WRITE32(sc, reg, data) \
488 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
489
490 #define ICH8_FLASH_READ16(sc, reg) \
491 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
492 #define ICH8_FLASH_WRITE16(sc, reg, data) \
493 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
494
495 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
496 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
497
498 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
499 #define WM_CDTXADDR_HI(sc, x) \
500 (sizeof(bus_addr_t) == 8 ? \
501 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
502
503 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
504 #define WM_CDRXADDR_HI(sc, x) \
505 (sizeof(bus_addr_t) == 8 ? \
506 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
507
508 #define WM_CDTXSYNC(sc, x, n, ops) \
509 do { \
510 int __x, __n; \
511 \
512 __x = (x); \
513 __n = (n); \
514 \
515 /* If it will wrap around, sync to the end of the ring. */ \
516 if ((__x + __n) > WM_NTXDESC(sc)) { \
517 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
518 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
519 (WM_NTXDESC(sc) - __x), (ops)); \
520 __n -= (WM_NTXDESC(sc) - __x); \
521 __x = 0; \
522 } \
523 \
524 /* Now sync whatever is left. */ \
525 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
526 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
527 } while (/*CONSTCOND*/0)
528
529 #define WM_CDRXSYNC(sc, x, ops) \
530 do { \
531 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
532 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
533 } while (/*CONSTCOND*/0)
534
535 #define WM_INIT_RXDESC(sc, x) \
536 do { \
537 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
538 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
539 struct mbuf *__m = __rxs->rxs_mbuf; \
540 \
541 /* \
542 * Note: We scoot the packet forward 2 bytes in the buffer \
543 * so that the payload after the Ethernet header is aligned \
544 * to a 4-byte boundary. \
545 * \
546 * XXX BRAINDAMAGE ALERT! \
547 * The stupid chip uses the same size for every buffer, which \
548 * is set in the Receive Control register. We are using the 2K \
549 * size option, but what we REALLY want is (2K - 2)! For this \
550 * reason, we can't "scoot" packets longer than the standard \
551 * Ethernet MTU. On strict-alignment platforms, if the total \
552 * size exceeds (2K - 2) we set align_tweak to 0 and let \
553 * the upper layer copy the headers. \
554 */ \
555 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
556 \
557 wm_set_dma_addr(&__rxd->wrx_addr, \
558 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
559 __rxd->wrx_len = 0; \
560 __rxd->wrx_cksum = 0; \
561 __rxd->wrx_status = 0; \
562 __rxd->wrx_errors = 0; \
563 __rxd->wrx_special = 0; \
564 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
565 \
566 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
567 } while (/*CONSTCOND*/0)
568
569 /*
570 * Register read/write functions.
571 * Other than CSR_{READ|WRITE}().
572 */
573 #if 0
574 static inline uint32_t wm_io_read(struct wm_softc *, int);
575 #endif
576 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
577 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
578 uint32_t, uint32_t);
579 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
580
581 /*
582 * Device driver interface functions and commonly used functions.
583 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
584 */
585 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
586 static int wm_match(device_t, cfdata_t, void *);
587 static void wm_attach(device_t, device_t, void *);
588 static int wm_detach(device_t, int);
589 static bool wm_suspend(device_t, const pmf_qual_t *);
590 static bool wm_resume(device_t, const pmf_qual_t *);
591 static void wm_watchdog(struct ifnet *);
592 static void wm_tick(void *);
593 static int wm_ifflags_cb(struct ethercom *);
594 static int wm_ioctl(struct ifnet *, u_long, void *);
595 /* MAC address related */
596 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
597 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
598 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
599 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
600 static void wm_set_filter(struct wm_softc *);
601 /* Reset and init related */
602 static void wm_set_vlan(struct wm_softc *);
603 static void wm_set_pcie_completion_timeout(struct wm_softc *);
604 static void wm_get_auto_rd_done(struct wm_softc *);
605 static void wm_lan_init_done(struct wm_softc *);
606 static void wm_get_cfg_done(struct wm_softc *);
607 static void wm_initialize_hardware_bits(struct wm_softc *);
608 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
609 static void wm_reset(struct wm_softc *);
610 static int wm_add_rxbuf(struct wm_softc *, int);
611 static void wm_rxdrain(struct wm_softc *);
612 static int wm_init(struct ifnet *);
613 static int wm_init_locked(struct ifnet *);
614 static void wm_stop(struct ifnet *, int);
615 static void wm_stop_locked(struct ifnet *, int);
616 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
617 uint32_t *, uint8_t *);
618 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
619 static void wm_82547_txfifo_stall(void *);
620 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
621 /* Start */
622 static void wm_start(struct ifnet *);
623 static void wm_start_locked(struct ifnet *);
624 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
625 uint32_t *, uint32_t *, bool *);
626 static void wm_nq_start(struct ifnet *);
627 static void wm_nq_start_locked(struct ifnet *);
628 /* Interrupt */
629 static int wm_txeof(struct wm_softc *);
630 static void wm_rxeof(struct wm_softc *);
631 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
632 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
633 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
634 static void wm_linkintr(struct wm_softc *, uint32_t);
635 static int wm_intr_legacy(void *);
636 #ifdef WM_MSI_MSIX
637 static int wm_txintr_msix(void *);
638 static int wm_rxintr_msix(void *);
639 static int wm_linkintr_msix(void *);
640 #endif
641
642 /*
643 * Media related.
644 * GMII, SGMII, TBI, SERDES and SFP.
645 */
646 /* Common */
647 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
648 /* GMII related */
649 static void wm_gmii_reset(struct wm_softc *);
650 static int wm_get_phy_id_82575(struct wm_softc *);
651 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
652 static int wm_gmii_mediachange(struct ifnet *);
653 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
654 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
655 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
656 static int wm_gmii_i82543_readreg(device_t, int, int);
657 static void wm_gmii_i82543_writereg(device_t, int, int, int);
658 static int wm_gmii_i82544_readreg(device_t, int, int);
659 static void wm_gmii_i82544_writereg(device_t, int, int, int);
660 static int wm_gmii_i80003_readreg(device_t, int, int);
661 static void wm_gmii_i80003_writereg(device_t, int, int, int);
662 static int wm_gmii_bm_readreg(device_t, int, int);
663 static void wm_gmii_bm_writereg(device_t, int, int, int);
664 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
665 static int wm_gmii_hv_readreg(device_t, int, int);
666 static void wm_gmii_hv_writereg(device_t, int, int, int);
667 static int wm_gmii_82580_readreg(device_t, int, int);
668 static void wm_gmii_82580_writereg(device_t, int, int, int);
669 static int wm_gmii_gs40g_readreg(device_t, int, int);
670 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
671 static void wm_gmii_statchg(struct ifnet *);
672 static int wm_kmrn_readreg(struct wm_softc *, int);
673 static void wm_kmrn_writereg(struct wm_softc *, int, int);
674 /* SGMII */
675 static bool wm_sgmii_uses_mdio(struct wm_softc *);
676 static int wm_sgmii_readreg(device_t, int, int);
677 static void wm_sgmii_writereg(device_t, int, int, int);
678 /* TBI related */
679 static void wm_tbi_mediainit(struct wm_softc *);
680 static int wm_tbi_mediachange(struct ifnet *);
681 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
682 static int wm_check_for_link(struct wm_softc *);
683 static void wm_tbi_tick(struct wm_softc *);
684 /* SERDES related */
685 static void wm_serdes_power_up_link_82575(struct wm_softc *);
686 static int wm_serdes_mediachange(struct ifnet *);
687 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
688 static void wm_serdes_tick(struct wm_softc *);
689 /* SFP related */
690 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
691 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
692
693 /*
694 * NVM related.
695 * Microwire, SPI (w/wo EERD) and Flash.
696 */
697 /* Misc functions */
698 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
699 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
700 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
701 /* Microwire */
702 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
703 /* SPI */
704 static int wm_nvm_ready_spi(struct wm_softc *);
705 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
706 /* Using with EERD */
707 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
708 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
709 /* Flash */
710 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
711 unsigned int *);
712 static int32_t wm_ich8_cycle_init(struct wm_softc *);
713 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
714 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
715 uint16_t *);
716 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
717 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
718 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
719 /* iNVM */
720 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
721 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
722 /* Lock, detecting NVM type, validate checksum and read */
723 static int wm_nvm_acquire(struct wm_softc *);
724 static void wm_nvm_release(struct wm_softc *);
725 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
726 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
727 static int wm_nvm_validate_checksum(struct wm_softc *);
728 static void wm_nvm_version_invm(struct wm_softc *);
729 static void wm_nvm_version(struct wm_softc *);
730 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
731
732 /*
733 * Hardware semaphores.
734 * Very complexed...
735 */
736 static int wm_get_swsm_semaphore(struct wm_softc *);
737 static void wm_put_swsm_semaphore(struct wm_softc *);
738 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
739 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
740 static int wm_get_swfwhw_semaphore(struct wm_softc *);
741 static void wm_put_swfwhw_semaphore(struct wm_softc *);
742 static int wm_get_hw_semaphore_82573(struct wm_softc *);
743 static void wm_put_hw_semaphore_82573(struct wm_softc *);
744
745 /*
746 * Management mode and power management related subroutines.
747 * BMC, AMT, suspend/resume and EEE.
748 */
749 static int wm_check_mng_mode(struct wm_softc *);
750 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
751 static int wm_check_mng_mode_82574(struct wm_softc *);
752 static int wm_check_mng_mode_generic(struct wm_softc *);
753 static int wm_enable_mng_pass_thru(struct wm_softc *);
754 static int wm_check_reset_block(struct wm_softc *);
755 static void wm_get_hw_control(struct wm_softc *);
756 static void wm_release_hw_control(struct wm_softc *);
757 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
758 static void wm_smbustopci(struct wm_softc *);
759 static void wm_init_manageability(struct wm_softc *);
760 static void wm_release_manageability(struct wm_softc *);
761 static void wm_get_wakeup(struct wm_softc *);
762 #ifdef WM_WOL
763 static void wm_enable_phy_wakeup(struct wm_softc *);
764 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
765 static void wm_enable_wakeup(struct wm_softc *);
766 #endif
767 /* EEE */
768 static void wm_set_eee_i350(struct wm_softc *);
769
770 /*
771 * Workarounds (mainly PHY related).
772 * Basically, PHY's workarounds are in the PHY drivers.
773 */
774 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
775 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
776 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
777 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
778 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
779 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
780 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
781 static void wm_reset_init_script_82575(struct wm_softc *);
782 static void wm_reset_mdicnfg_82580(struct wm_softc *);
783 static void wm_pll_workaround_i210(struct wm_softc *);
784
785 #ifdef WM_MSI_MSIX
786 struct _msix_matrix {
787 const char *intrname;
788 int(*func)(void *);
789 int intridx;
790 int cpuid;
791 } msix_matrix[WM_MSIX_NINTR] = {
792 { "TX", wm_txintr_msix, WM_MSIX_TXINTR_IDX, WM_MSIX_TXINTR_CPUID },
793 { "RX", wm_rxintr_msix, WM_MSIX_RXINTR_IDX, WM_MSIX_RXINTR_CPUID },
794 { "LINK", wm_linkintr_msix, WM_MSIX_LINKINTR_IDX,
795 WM_MSIX_LINKINTR_CPUID },
796 };
797 #endif
798
799 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
800 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
801
802 /*
803 * Devices supported by this driver.
804 */
805 static const struct wm_product {
806 pci_vendor_id_t wmp_vendor;
807 pci_product_id_t wmp_product;
808 const char *wmp_name;
809 wm_chip_type wmp_type;
810 uint32_t wmp_flags;
811 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
812 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
813 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
814 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
815 #define WMP_MEDIATYPE(x) ((x) & 0x03)
816 } wm_products[] = {
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
818 "Intel i82542 1000BASE-X Ethernet",
819 WM_T_82542_2_1, WMP_F_FIBER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
822 "Intel i82543GC 1000BASE-X Ethernet",
823 WM_T_82543, WMP_F_FIBER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
826 "Intel i82543GC 1000BASE-T Ethernet",
827 WM_T_82543, WMP_F_COPPER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
830 "Intel i82544EI 1000BASE-T Ethernet",
831 WM_T_82544, WMP_F_COPPER },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
834 "Intel i82544EI 1000BASE-X Ethernet",
835 WM_T_82544, WMP_F_FIBER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
838 "Intel i82544GC 1000BASE-T Ethernet",
839 WM_T_82544, WMP_F_COPPER },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
842 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
843 WM_T_82544, WMP_F_COPPER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
846 "Intel i82540EM 1000BASE-T Ethernet",
847 WM_T_82540, WMP_F_COPPER },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
850 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
851 WM_T_82540, WMP_F_COPPER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
854 "Intel i82540EP 1000BASE-T Ethernet",
855 WM_T_82540, WMP_F_COPPER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
858 "Intel i82540EP 1000BASE-T Ethernet",
859 WM_T_82540, WMP_F_COPPER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
862 "Intel i82540EP 1000BASE-T Ethernet",
863 WM_T_82540, WMP_F_COPPER },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
866 "Intel i82545EM 1000BASE-T Ethernet",
867 WM_T_82545, WMP_F_COPPER },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
870 "Intel i82545GM 1000BASE-T Ethernet",
871 WM_T_82545_3, WMP_F_COPPER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
874 "Intel i82545GM 1000BASE-X Ethernet",
875 WM_T_82545_3, WMP_F_FIBER },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
878 "Intel i82545GM Gigabit Ethernet (SERDES)",
879 WM_T_82545_3, WMP_F_SERDES },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
882 "Intel i82546EB 1000BASE-T Ethernet",
883 WM_T_82546, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
886 "Intel i82546EB 1000BASE-T Ethernet",
887 WM_T_82546, WMP_F_COPPER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
890 "Intel i82545EM 1000BASE-X Ethernet",
891 WM_T_82545, WMP_F_FIBER },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
894 "Intel i82546EB 1000BASE-X Ethernet",
895 WM_T_82546, WMP_F_FIBER },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
898 "Intel i82546GB 1000BASE-T Ethernet",
899 WM_T_82546_3, WMP_F_COPPER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
902 "Intel i82546GB 1000BASE-X Ethernet",
903 WM_T_82546_3, WMP_F_FIBER },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
906 "Intel i82546GB Gigabit Ethernet (SERDES)",
907 WM_T_82546_3, WMP_F_SERDES },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
910 "i82546GB quad-port Gigabit Ethernet",
911 WM_T_82546_3, WMP_F_COPPER },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
914 "i82546GB quad-port Gigabit Ethernet (KSP3)",
915 WM_T_82546_3, WMP_F_COPPER },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
918 "Intel PRO/1000MT (82546GB)",
919 WM_T_82546_3, WMP_F_COPPER },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
922 "Intel i82541EI 1000BASE-T Ethernet",
923 WM_T_82541, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
926 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
927 WM_T_82541, WMP_F_COPPER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
930 "Intel i82541EI Mobile 1000BASE-T Ethernet",
931 WM_T_82541, WMP_F_COPPER },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
934 "Intel i82541ER 1000BASE-T Ethernet",
935 WM_T_82541_2, WMP_F_COPPER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
938 "Intel i82541GI 1000BASE-T Ethernet",
939 WM_T_82541_2, WMP_F_COPPER },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
942 "Intel i82541GI Mobile 1000BASE-T Ethernet",
943 WM_T_82541_2, WMP_F_COPPER },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
946 "Intel i82541PI 1000BASE-T Ethernet",
947 WM_T_82541_2, WMP_F_COPPER },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
950 "Intel i82547EI 1000BASE-T Ethernet",
951 WM_T_82547, WMP_F_COPPER },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
954 "Intel i82547EI Mobile 1000BASE-T Ethernet",
955 WM_T_82547, WMP_F_COPPER },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
958 "Intel i82547GI 1000BASE-T Ethernet",
959 WM_T_82547_2, WMP_F_COPPER },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
962 "Intel PRO/1000 PT (82571EB)",
963 WM_T_82571, WMP_F_COPPER },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
966 "Intel PRO/1000 PF (82571EB)",
967 WM_T_82571, WMP_F_FIBER },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
970 "Intel PRO/1000 PB (82571EB)",
971 WM_T_82571, WMP_F_SERDES },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
974 "Intel PRO/1000 QT (82571EB)",
975 WM_T_82571, WMP_F_COPPER },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
978 "Intel PRO/1000 PT Quad Port Server Adapter",
979 WM_T_82571, WMP_F_COPPER, },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
982 "Intel Gigabit PT Quad Port Server ExpressModule",
983 WM_T_82571, WMP_F_COPPER, },
984
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
986 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
987 WM_T_82571, WMP_F_SERDES, },
988
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
990 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
991 WM_T_82571, WMP_F_SERDES, },
992
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
994 "Intel 82571EB Quad 1000baseX Ethernet",
995 WM_T_82571, WMP_F_FIBER, },
996
997 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
998 "Intel i82572EI 1000baseT Ethernet",
999 WM_T_82572, WMP_F_COPPER },
1000
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1002 "Intel i82572EI 1000baseX Ethernet",
1003 WM_T_82572, WMP_F_FIBER },
1004
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1006 "Intel i82572EI Gigabit Ethernet (SERDES)",
1007 WM_T_82572, WMP_F_SERDES },
1008
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1010 "Intel i82572EI 1000baseT Ethernet",
1011 WM_T_82572, WMP_F_COPPER },
1012
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1014 "Intel i82573E",
1015 WM_T_82573, WMP_F_COPPER },
1016
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1018 "Intel i82573E IAMT",
1019 WM_T_82573, WMP_F_COPPER },
1020
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1022 "Intel i82573L Gigabit Ethernet",
1023 WM_T_82573, WMP_F_COPPER },
1024
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1026 "Intel i82574L",
1027 WM_T_82574, WMP_F_COPPER },
1028
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1030 "Intel i82574L",
1031 WM_T_82574, WMP_F_COPPER },
1032
1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1034 "Intel i82583V",
1035 WM_T_82583, WMP_F_COPPER },
1036
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1038 "i80003 dual 1000baseT Ethernet",
1039 WM_T_80003, WMP_F_COPPER },
1040
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1042 "i80003 dual 1000baseX Ethernet",
1043 WM_T_80003, WMP_F_COPPER },
1044
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1046 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1047 WM_T_80003, WMP_F_SERDES },
1048
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1050 "Intel i80003 1000baseT Ethernet",
1051 WM_T_80003, WMP_F_COPPER },
1052
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1054 "Intel i80003 Gigabit Ethernet (SERDES)",
1055 WM_T_80003, WMP_F_SERDES },
1056
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1058 "Intel i82801H (M_AMT) LAN Controller",
1059 WM_T_ICH8, WMP_F_COPPER },
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1061 "Intel i82801H (AMT) LAN Controller",
1062 WM_T_ICH8, WMP_F_COPPER },
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1064 "Intel i82801H LAN Controller",
1065 WM_T_ICH8, WMP_F_COPPER },
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1067 "Intel i82801H (IFE) LAN Controller",
1068 WM_T_ICH8, WMP_F_COPPER },
1069 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1070 "Intel i82801H (M) LAN Controller",
1071 WM_T_ICH8, WMP_F_COPPER },
1072 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1073 "Intel i82801H IFE (GT) LAN Controller",
1074 WM_T_ICH8, WMP_F_COPPER },
1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1076 "Intel i82801H IFE (G) LAN Controller",
1077 WM_T_ICH8, WMP_F_COPPER },
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1079 "82801I (AMT) LAN Controller",
1080 WM_T_ICH9, WMP_F_COPPER },
1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1082 "82801I LAN Controller",
1083 WM_T_ICH9, WMP_F_COPPER },
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1085 "82801I (G) LAN Controller",
1086 WM_T_ICH9, WMP_F_COPPER },
1087 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1088 "82801I (GT) LAN Controller",
1089 WM_T_ICH9, WMP_F_COPPER },
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1091 "82801I (C) LAN Controller",
1092 WM_T_ICH9, WMP_F_COPPER },
1093 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1094 "82801I mobile LAN Controller",
1095 WM_T_ICH9, WMP_F_COPPER },
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1097 "82801I mobile (V) LAN Controller",
1098 WM_T_ICH9, WMP_F_COPPER },
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1100 "82801I mobile (AMT) LAN Controller",
1101 WM_T_ICH9, WMP_F_COPPER },
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1103 "82567LM-4 LAN Controller",
1104 WM_T_ICH9, WMP_F_COPPER },
1105 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1106 "82567V-3 LAN Controller",
1107 WM_T_ICH9, WMP_F_COPPER },
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1109 "82567LM-2 LAN Controller",
1110 WM_T_ICH10, WMP_F_COPPER },
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1112 "82567LF-2 LAN Controller",
1113 WM_T_ICH10, WMP_F_COPPER },
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1115 "82567LM-3 LAN Controller",
1116 WM_T_ICH10, WMP_F_COPPER },
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1118 "82567LF-3 LAN Controller",
1119 WM_T_ICH10, WMP_F_COPPER },
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1121 "82567V-2 LAN Controller",
1122 WM_T_ICH10, WMP_F_COPPER },
1123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1124 "82567V-3? LAN Controller",
1125 WM_T_ICH10, WMP_F_COPPER },
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1127 "HANKSVILLE LAN Controller",
1128 WM_T_ICH10, WMP_F_COPPER },
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1130 "PCH LAN (82577LM) Controller",
1131 WM_T_PCH, WMP_F_COPPER },
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1133 "PCH LAN (82577LC) Controller",
1134 WM_T_PCH, WMP_F_COPPER },
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1136 "PCH LAN (82578DM) Controller",
1137 WM_T_PCH, WMP_F_COPPER },
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1139 "PCH LAN (82578DC) Controller",
1140 WM_T_PCH, WMP_F_COPPER },
1141 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1142 "PCH2 LAN (82579LM) Controller",
1143 WM_T_PCH2, WMP_F_COPPER },
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1145 "PCH2 LAN (82579V) Controller",
1146 WM_T_PCH2, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1148 "82575EB dual-1000baseT Ethernet",
1149 WM_T_82575, WMP_F_COPPER },
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1151 "82575EB dual-1000baseX Ethernet (SERDES)",
1152 WM_T_82575, WMP_F_SERDES },
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1154 "82575GB quad-1000baseT Ethernet",
1155 WM_T_82575, WMP_F_COPPER },
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1157 "82575GB quad-1000baseT Ethernet (PM)",
1158 WM_T_82575, WMP_F_COPPER },
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1160 "82576 1000BaseT Ethernet",
1161 WM_T_82576, WMP_F_COPPER },
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1163 "82576 1000BaseX Ethernet",
1164 WM_T_82576, WMP_F_FIBER },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1167 "82576 gigabit Ethernet (SERDES)",
1168 WM_T_82576, WMP_F_SERDES },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1171 "82576 quad-1000BaseT Ethernet",
1172 WM_T_82576, WMP_F_COPPER },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1175 "82576 Gigabit ET2 Quad Port Server Adapter",
1176 WM_T_82576, WMP_F_COPPER },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1179 "82576 gigabit Ethernet",
1180 WM_T_82576, WMP_F_COPPER },
1181
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1183 "82576 gigabit Ethernet (SERDES)",
1184 WM_T_82576, WMP_F_SERDES },
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1186 "82576 quad-gigabit Ethernet (SERDES)",
1187 WM_T_82576, WMP_F_SERDES },
1188
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1190 "82580 1000BaseT Ethernet",
1191 WM_T_82580, WMP_F_COPPER },
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1193 "82580 1000BaseX Ethernet",
1194 WM_T_82580, WMP_F_FIBER },
1195
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1197 "82580 1000BaseT Ethernet (SERDES)",
1198 WM_T_82580, WMP_F_SERDES },
1199
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1201 "82580 gigabit Ethernet (SGMII)",
1202 WM_T_82580, WMP_F_COPPER },
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1204 "82580 dual-1000BaseT Ethernet",
1205 WM_T_82580, WMP_F_COPPER },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1208 "82580 quad-1000BaseX Ethernet",
1209 WM_T_82580, WMP_F_FIBER },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1212 "DH89XXCC Gigabit Ethernet (SGMII)",
1213 WM_T_82580, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1216 "DH89XXCC Gigabit Ethernet (SERDES)",
1217 WM_T_82580, WMP_F_SERDES },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1220 "DH89XXCC 1000BASE-KX Ethernet",
1221 WM_T_82580, WMP_F_SERDES },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1224 "DH89XXCC Gigabit Ethernet (SFP)",
1225 WM_T_82580, WMP_F_SERDES },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1228 "I350 Gigabit Network Connection",
1229 WM_T_I350, WMP_F_COPPER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1232 "I350 Gigabit Fiber Network Connection",
1233 WM_T_I350, WMP_F_FIBER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1236 "I350 Gigabit Backplane Connection",
1237 WM_T_I350, WMP_F_SERDES },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1240 "I350 Quad Port Gigabit Ethernet",
1241 WM_T_I350, WMP_F_SERDES },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1244 "I350 Gigabit Connection",
1245 WM_T_I350, WMP_F_COPPER },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1248 "I354 Gigabit Ethernet (KX)",
1249 WM_T_I354, WMP_F_SERDES },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1252 "I354 Gigabit Ethernet (SGMII)",
1253 WM_T_I354, WMP_F_COPPER },
1254
1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1256 "I354 Gigabit Ethernet (2.5G)",
1257 WM_T_I354, WMP_F_COPPER },
1258
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1260 "I210-T1 Ethernet Server Adapter",
1261 WM_T_I210, WMP_F_COPPER },
1262
1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1264 "I210 Ethernet (Copper OEM)",
1265 WM_T_I210, WMP_F_COPPER },
1266
1267 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1268 "I210 Ethernet (Copper IT)",
1269 WM_T_I210, WMP_F_COPPER },
1270
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1272 "I210 Ethernet (FLASH less)",
1273 WM_T_I210, WMP_F_COPPER },
1274
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1276 "I210 Gigabit Ethernet (Fiber)",
1277 WM_T_I210, WMP_F_FIBER },
1278
1279 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1280 "I210 Gigabit Ethernet (SERDES)",
1281 WM_T_I210, WMP_F_SERDES },
1282
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1284 "I210 Gigabit Ethernet (FLASH less)",
1285 WM_T_I210, WMP_F_SERDES },
1286
1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1288 "I210 Gigabit Ethernet (SGMII)",
1289 WM_T_I210, WMP_F_COPPER },
1290
1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1292 "I211 Ethernet (COPPER)",
1293 WM_T_I211, WMP_F_COPPER },
1294 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1295 "I217 V Ethernet Connection",
1296 WM_T_PCH_LPT, WMP_F_COPPER },
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1298 "I217 LM Ethernet Connection",
1299 WM_T_PCH_LPT, WMP_F_COPPER },
1300 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1301 "I218 V Ethernet Connection",
1302 WM_T_PCH_LPT, WMP_F_COPPER },
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1304 "I218 V Ethernet Connection",
1305 WM_T_PCH_LPT, WMP_F_COPPER },
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1307 "I218 V Ethernet Connection",
1308 WM_T_PCH_LPT, WMP_F_COPPER },
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1310 "I218 LM Ethernet Connection",
1311 WM_T_PCH_LPT, WMP_F_COPPER },
1312 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1313 "I218 LM Ethernet Connection",
1314 WM_T_PCH_LPT, WMP_F_COPPER },
1315 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1316 "I218 LM Ethernet Connection",
1317 WM_T_PCH_LPT, WMP_F_COPPER },
1318 { 0, 0,
1319 NULL,
1320 0, 0 },
1321 };
1322
1323 #ifdef WM_EVENT_COUNTERS
1324 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1325 #endif /* WM_EVENT_COUNTERS */
1326
1327
1328 /*
1329 * Register read/write functions.
1330 * Other than CSR_{READ|WRITE}().
1331 */
1332
1333 #if 0 /* Not currently used */
1334 static inline uint32_t
1335 wm_io_read(struct wm_softc *sc, int reg)
1336 {
1337
1338 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1339 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1340 }
1341 #endif
1342
1343 static inline void
1344 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1345 {
1346
1347 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1348 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1349 }
1350
1351 static inline void
1352 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1353 uint32_t data)
1354 {
1355 uint32_t regval;
1356 int i;
1357
1358 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1359
1360 CSR_WRITE(sc, reg, regval);
1361
1362 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1363 delay(5);
1364 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1365 break;
1366 }
1367 if (i == SCTL_CTL_POLL_TIMEOUT) {
1368 aprint_error("%s: WARNING:"
1369 " i82575 reg 0x%08x setup did not indicate ready\n",
1370 device_xname(sc->sc_dev), reg);
1371 }
1372 }
1373
1374 static inline void
1375 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1376 {
1377 wa->wa_low = htole32(v & 0xffffffffU);
1378 if (sizeof(bus_addr_t) == 8)
1379 wa->wa_high = htole32((uint64_t) v >> 32);
1380 else
1381 wa->wa_high = 0;
1382 }
1383
1384 /*
1385 * Device driver interface functions and commonly used functions.
1386 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1387 */
1388
1389 /* Lookup supported device table */
1390 static const struct wm_product *
1391 wm_lookup(const struct pci_attach_args *pa)
1392 {
1393 const struct wm_product *wmp;
1394
1395 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1396 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1397 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1398 return wmp;
1399 }
1400 return NULL;
1401 }
1402
1403 /* The match function (ca_match) */
1404 static int
1405 wm_match(device_t parent, cfdata_t cf, void *aux)
1406 {
1407 struct pci_attach_args *pa = aux;
1408
1409 if (wm_lookup(pa) != NULL)
1410 return 1;
1411
1412 return 0;
1413 }
1414
1415 /* The attach function (ca_attach) */
1416 static void
1417 wm_attach(device_t parent, device_t self, void *aux)
1418 {
1419 struct wm_softc *sc = device_private(self);
1420 struct pci_attach_args *pa = aux;
1421 prop_dictionary_t dict;
1422 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1423 pci_chipset_tag_t pc = pa->pa_pc;
1424 #ifndef WM_MSI_MSIX
1425 pci_intr_handle_t ih;
1426 #else
1427 int counts[PCI_INTR_TYPE_SIZE];
1428 pci_intr_type_t max_type;
1429 #endif
1430 const char *intrstr = NULL;
1431 const char *eetype, *xname;
1432 bus_space_tag_t memt;
1433 bus_space_handle_t memh;
1434 bus_size_t memsize;
1435 int memh_valid;
1436 int i, error;
1437 const struct wm_product *wmp;
1438 prop_data_t ea;
1439 prop_number_t pn;
1440 uint8_t enaddr[ETHER_ADDR_LEN];
1441 uint16_t cfg1, cfg2, swdpin, nvmword;
1442 pcireg_t preg, memtype;
1443 uint16_t eeprom_data, apme_mask;
1444 bool force_clear_smbi;
1445 uint32_t link_mode;
1446 uint32_t reg;
1447 char intrbuf[PCI_INTRSTR_LEN];
1448
1449 sc->sc_dev = self;
1450 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1451 sc->sc_stopping = false;
1452
1453 wmp = wm_lookup(pa);
1454 #ifdef DIAGNOSTIC
1455 if (wmp == NULL) {
1456 printf("\n");
1457 panic("wm_attach: impossible");
1458 }
1459 #endif
1460 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1461
1462 sc->sc_pc = pa->pa_pc;
1463 sc->sc_pcitag = pa->pa_tag;
1464
1465 if (pci_dma64_available(pa))
1466 sc->sc_dmat = pa->pa_dmat64;
1467 else
1468 sc->sc_dmat = pa->pa_dmat;
1469
1470 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1471 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1472 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1473
1474 sc->sc_type = wmp->wmp_type;
1475 if (sc->sc_type < WM_T_82543) {
1476 if (sc->sc_rev < 2) {
1477 aprint_error_dev(sc->sc_dev,
1478 "i82542 must be at least rev. 2\n");
1479 return;
1480 }
1481 if (sc->sc_rev < 3)
1482 sc->sc_type = WM_T_82542_2_0;
1483 }
1484
1485 /*
1486 * Disable MSI for Errata:
1487 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1488 *
1489 * 82544: Errata 25
1490 * 82540: Errata 6 (easy to reproduce device timeout)
1491 * 82545: Errata 4 (easy to reproduce device timeout)
1492 * 82546: Errata 26 (easy to reproduce device timeout)
1493 * 82541: Errata 7 (easy to reproduce device timeout)
1494 *
1495 * "Byte Enables 2 and 3 are not set on MSI writes"
1496 *
1497 * 82571 & 82572: Errata 63
1498 */
1499 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1500 || (sc->sc_type == WM_T_82572))
1501 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1502
1503 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1504 || (sc->sc_type == WM_T_82580)
1505 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1506 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1507 sc->sc_flags |= WM_F_NEWQUEUE;
1508
1509 /* Set device properties (mactype) */
1510 dict = device_properties(sc->sc_dev);
1511 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1512
1513 /*
1514 * Map the device. All devices support memory-mapped acccess,
1515 * and it is really required for normal operation.
1516 */
1517 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1518 switch (memtype) {
1519 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1520 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1521 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1522 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1523 break;
1524 default:
1525 memh_valid = 0;
1526 break;
1527 }
1528
1529 if (memh_valid) {
1530 sc->sc_st = memt;
1531 sc->sc_sh = memh;
1532 sc->sc_ss = memsize;
1533 } else {
1534 aprint_error_dev(sc->sc_dev,
1535 "unable to map device registers\n");
1536 return;
1537 }
1538
1539 /*
1540 * In addition, i82544 and later support I/O mapped indirect
1541 * register access. It is not desirable (nor supported in
1542 * this driver) to use it for normal operation, though it is
1543 * required to work around bugs in some chip versions.
1544 */
1545 if (sc->sc_type >= WM_T_82544) {
1546 /* First we have to find the I/O BAR. */
1547 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1548 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1549 if (memtype == PCI_MAPREG_TYPE_IO)
1550 break;
1551 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1552 PCI_MAPREG_MEM_TYPE_64BIT)
1553 i += 4; /* skip high bits, too */
1554 }
1555 if (i < PCI_MAPREG_END) {
1556 /*
1557 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1558 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1559 * It's no problem because newer chips has no this
1560 * bug.
1561 *
1562 * The i8254x doesn't apparently respond when the
1563 * I/O BAR is 0, which looks somewhat like it's not
1564 * been configured.
1565 */
1566 preg = pci_conf_read(pc, pa->pa_tag, i);
1567 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1568 aprint_error_dev(sc->sc_dev,
1569 "WARNING: I/O BAR at zero.\n");
1570 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1571 0, &sc->sc_iot, &sc->sc_ioh,
1572 NULL, &sc->sc_ios) == 0) {
1573 sc->sc_flags |= WM_F_IOH_VALID;
1574 } else {
1575 aprint_error_dev(sc->sc_dev,
1576 "WARNING: unable to map I/O space\n");
1577 }
1578 }
1579
1580 }
1581
1582 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1583 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1584 preg |= PCI_COMMAND_MASTER_ENABLE;
1585 if (sc->sc_type < WM_T_82542_2_1)
1586 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1587 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1588
1589 /* power up chip */
1590 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1591 NULL)) && error != EOPNOTSUPP) {
1592 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1593 return;
1594 }
1595
1596 #ifndef WM_MSI_MSIX
1597 /*
1598 * Map and establish our interrupt.
1599 */
1600 if (pci_intr_map(pa, &ih)) {
1601 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1602 return;
1603 }
1604 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1605 #ifdef WM_MPSAFE
1606 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1607 #endif
1608 sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
1609 wm_intr_legacy, sc, device_xname(sc->sc_dev));
1610 if (sc->sc_ihs[0] == NULL) {
1611 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1612 if (intrstr != NULL)
1613 aprint_error(" at %s", intrstr);
1614 aprint_error("\n");
1615 return;
1616 }
1617 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1618 sc->sc_nintrs = 1;
1619 #else /* WM_MSI_MSIX */
1620 /* Allocation settings */
1621 max_type = PCI_INTR_TYPE_MSIX;
1622 counts[PCI_INTR_TYPE_MSIX] = WM_MAX_NINTR;
1623 counts[PCI_INTR_TYPE_MSI] = 1;
1624 counts[PCI_INTR_TYPE_INTX] = 1;
1625
1626 alloc_retry:
1627 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1628 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1629 return;
1630 }
1631
1632 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1633 void *vih;
1634 kcpuset_t *affinity;
1635 char intr_xname[INTRDEVNAMEBUF];
1636
1637 kcpuset_create(&affinity, false);
1638
1639 for (i = 0; i < WM_MSIX_NINTR; i++) {
1640 intrstr = pci_intr_string(pc,
1641 sc->sc_intrs[msix_matrix[i].intridx], intrbuf,
1642 sizeof(intrbuf));
1643 #ifdef WM_MPSAFE
1644 pci_intr_setattr(pc,
1645 &sc->sc_intrs[msix_matrix[i].intridx],
1646 PCI_INTR_MPSAFE, true);
1647 #endif
1648 memset(intr_xname, 0, sizeof(intr_xname));
1649 strlcat(intr_xname, device_xname(sc->sc_dev),
1650 sizeof(intr_xname));
1651 strlcat(intr_xname, msix_matrix[i].intrname,
1652 sizeof(intr_xname));
1653 vih = pci_intr_establish_xname(pc,
1654 sc->sc_intrs[msix_matrix[i].intridx], IPL_NET,
1655 msix_matrix[i].func, sc, intr_xname);
1656 if (vih == NULL) {
1657 aprint_error_dev(sc->sc_dev,
1658 "unable to establish MSI-X(for %s)%s%s\n",
1659 msix_matrix[i].intrname,
1660 intrstr ? " at " : "",
1661 intrstr ? intrstr : "");
1662 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1663 WM_MSIX_NINTR);
1664 kcpuset_destroy(affinity);
1665
1666 /* Setup for MSI: Disable MSI-X */
1667 max_type = PCI_INTR_TYPE_MSI;
1668 counts[PCI_INTR_TYPE_MSI] = 1;
1669 counts[PCI_INTR_TYPE_INTX] = 1;
1670 goto alloc_retry;
1671 }
1672 kcpuset_zero(affinity);
1673 /* Round-robin affinity */
1674 kcpuset_set(affinity, msix_matrix[i].cpuid % ncpu);
1675 error = interrupt_distribute(vih, affinity, NULL);
1676 if (error == 0) {
1677 aprint_normal_dev(sc->sc_dev,
1678 "for %s interrupting at %s affinity to %u\n",
1679 msix_matrix[i].intrname, intrstr,
1680 msix_matrix[i].cpuid % ncpu);
1681 } else {
1682 aprint_normal_dev(sc->sc_dev,
1683 "for %s interrupting at %s\n",
1684 msix_matrix[i].intrname, intrstr);
1685 }
1686 sc->sc_ihs[msix_matrix[i].intridx] = vih;
1687 }
1688
1689 sc->sc_nintrs = WM_MSIX_NINTR;
1690 kcpuset_destroy(affinity);
1691 } else {
1692 /* MSI or INTx */
1693 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
1694 sizeof(intrbuf));
1695 #ifdef WM_MPSAFE
1696 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
1697 #endif
1698 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
1699 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
1700 if (sc->sc_ihs[0] == NULL) {
1701 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
1702 (pci_intr_type(sc->sc_intrs[0])
1703 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
1704 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1);
1705 switch (pci_intr_type(sc->sc_intrs[0])) {
1706 case PCI_INTR_TYPE_MSI:
1707 /* The next try is for INTx: Disable MSI */
1708 max_type = PCI_INTR_TYPE_INTX;
1709 counts[PCI_INTR_TYPE_INTX] = 1;
1710 goto alloc_retry;
1711 case PCI_INTR_TYPE_INTX:
1712 default:
1713 return;
1714 }
1715 }
1716 aprint_normal_dev(sc->sc_dev, "%s at %s\n",
1717 (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI)
1718 ? "MSI" : "interrupting", intrstr);
1719
1720 sc->sc_nintrs = 1;
1721 }
1722 #endif /* WM_MSI_MSIX */
1723
1724 /*
1725 * Check the function ID (unit number of the chip).
1726 */
1727 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1728 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1729 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1730 || (sc->sc_type == WM_T_82580)
1731 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1732 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1733 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1734 else
1735 sc->sc_funcid = 0;
1736
1737 /*
1738 * Determine a few things about the bus we're connected to.
1739 */
1740 if (sc->sc_type < WM_T_82543) {
1741 /* We don't really know the bus characteristics here. */
1742 sc->sc_bus_speed = 33;
1743 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1744 /*
1745 * CSA (Communication Streaming Architecture) is about as fast
1746 * a 32-bit 66MHz PCI Bus.
1747 */
1748 sc->sc_flags |= WM_F_CSA;
1749 sc->sc_bus_speed = 66;
1750 aprint_verbose_dev(sc->sc_dev,
1751 "Communication Streaming Architecture\n");
1752 if (sc->sc_type == WM_T_82547) {
1753 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1754 callout_setfunc(&sc->sc_txfifo_ch,
1755 wm_82547_txfifo_stall, sc);
1756 aprint_verbose_dev(sc->sc_dev,
1757 "using 82547 Tx FIFO stall work-around\n");
1758 }
1759 } else if (sc->sc_type >= WM_T_82571) {
1760 sc->sc_flags |= WM_F_PCIE;
1761 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1762 && (sc->sc_type != WM_T_ICH10)
1763 && (sc->sc_type != WM_T_PCH)
1764 && (sc->sc_type != WM_T_PCH2)
1765 && (sc->sc_type != WM_T_PCH_LPT)) {
1766 /* ICH* and PCH* have no PCIe capability registers */
1767 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1768 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1769 NULL) == 0)
1770 aprint_error_dev(sc->sc_dev,
1771 "unable to find PCIe capability\n");
1772 }
1773 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1774 } else {
1775 reg = CSR_READ(sc, WMREG_STATUS);
1776 if (reg & STATUS_BUS64)
1777 sc->sc_flags |= WM_F_BUS64;
1778 if ((reg & STATUS_PCIX_MODE) != 0) {
1779 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1780
1781 sc->sc_flags |= WM_F_PCIX;
1782 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1783 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1784 aprint_error_dev(sc->sc_dev,
1785 "unable to find PCIX capability\n");
1786 else if (sc->sc_type != WM_T_82545_3 &&
1787 sc->sc_type != WM_T_82546_3) {
1788 /*
1789 * Work around a problem caused by the BIOS
1790 * setting the max memory read byte count
1791 * incorrectly.
1792 */
1793 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1794 sc->sc_pcixe_capoff + PCIX_CMD);
1795 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1796 sc->sc_pcixe_capoff + PCIX_STATUS);
1797
1798 bytecnt =
1799 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1800 PCIX_CMD_BYTECNT_SHIFT;
1801 maxb =
1802 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1803 PCIX_STATUS_MAXB_SHIFT;
1804 if (bytecnt > maxb) {
1805 aprint_verbose_dev(sc->sc_dev,
1806 "resetting PCI-X MMRBC: %d -> %d\n",
1807 512 << bytecnt, 512 << maxb);
1808 pcix_cmd = (pcix_cmd &
1809 ~PCIX_CMD_BYTECNT_MASK) |
1810 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1811 pci_conf_write(pa->pa_pc, pa->pa_tag,
1812 sc->sc_pcixe_capoff + PCIX_CMD,
1813 pcix_cmd);
1814 }
1815 }
1816 }
1817 /*
1818 * The quad port adapter is special; it has a PCIX-PCIX
1819 * bridge on the board, and can run the secondary bus at
1820 * a higher speed.
1821 */
1822 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1823 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1824 : 66;
1825 } else if (sc->sc_flags & WM_F_PCIX) {
1826 switch (reg & STATUS_PCIXSPD_MASK) {
1827 case STATUS_PCIXSPD_50_66:
1828 sc->sc_bus_speed = 66;
1829 break;
1830 case STATUS_PCIXSPD_66_100:
1831 sc->sc_bus_speed = 100;
1832 break;
1833 case STATUS_PCIXSPD_100_133:
1834 sc->sc_bus_speed = 133;
1835 break;
1836 default:
1837 aprint_error_dev(sc->sc_dev,
1838 "unknown PCIXSPD %d; assuming 66MHz\n",
1839 reg & STATUS_PCIXSPD_MASK);
1840 sc->sc_bus_speed = 66;
1841 break;
1842 }
1843 } else
1844 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1845 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1846 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1847 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1848 }
1849
1850 /*
1851 * Allocate the control data structures, and create and load the
1852 * DMA map for it.
1853 *
1854 * NOTE: All Tx descriptors must be in the same 4G segment of
1855 * memory. So must Rx descriptors. We simplify by allocating
1856 * both sets within the same 4G segment.
1857 */
1858 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1859 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1860 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1861 sizeof(struct wm_control_data_82542) :
1862 sizeof(struct wm_control_data_82544);
1863 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1864 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1865 &sc->sc_cd_rseg, 0)) != 0) {
1866 aprint_error_dev(sc->sc_dev,
1867 "unable to allocate control data, error = %d\n",
1868 error);
1869 goto fail_0;
1870 }
1871
1872 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1873 sc->sc_cd_rseg, sc->sc_cd_size,
1874 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1875 aprint_error_dev(sc->sc_dev,
1876 "unable to map control data, error = %d\n", error);
1877 goto fail_1;
1878 }
1879
1880 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1881 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1882 aprint_error_dev(sc->sc_dev,
1883 "unable to create control data DMA map, error = %d\n",
1884 error);
1885 goto fail_2;
1886 }
1887
1888 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1889 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1890 aprint_error_dev(sc->sc_dev,
1891 "unable to load control data DMA map, error = %d\n",
1892 error);
1893 goto fail_3;
1894 }
1895
1896 /* Create the transmit buffer DMA maps. */
1897 WM_TXQUEUELEN(sc) =
1898 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1899 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1900 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1901 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1902 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1903 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1904 aprint_error_dev(sc->sc_dev,
1905 "unable to create Tx DMA map %d, error = %d\n",
1906 i, error);
1907 goto fail_4;
1908 }
1909 }
1910
1911 /* Create the receive buffer DMA maps. */
1912 for (i = 0; i < WM_NRXDESC; i++) {
1913 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1914 MCLBYTES, 0, 0,
1915 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1916 aprint_error_dev(sc->sc_dev,
1917 "unable to create Rx DMA map %d error = %d\n",
1918 i, error);
1919 goto fail_5;
1920 }
1921 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1922 }
1923
1924 /* clear interesting stat counters */
1925 CSR_READ(sc, WMREG_COLC);
1926 CSR_READ(sc, WMREG_RXERRC);
1927
1928 /* get PHY control from SMBus to PCIe */
1929 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1930 || (sc->sc_type == WM_T_PCH_LPT))
1931 wm_smbustopci(sc);
1932
1933 /* Reset the chip to a known state. */
1934 wm_reset(sc);
1935
1936 /* Get some information about the EEPROM. */
1937 switch (sc->sc_type) {
1938 case WM_T_82542_2_0:
1939 case WM_T_82542_2_1:
1940 case WM_T_82543:
1941 case WM_T_82544:
1942 /* Microwire */
1943 sc->sc_nvm_wordsize = 64;
1944 sc->sc_nvm_addrbits = 6;
1945 break;
1946 case WM_T_82540:
1947 case WM_T_82545:
1948 case WM_T_82545_3:
1949 case WM_T_82546:
1950 case WM_T_82546_3:
1951 /* Microwire */
1952 reg = CSR_READ(sc, WMREG_EECD);
1953 if (reg & EECD_EE_SIZE) {
1954 sc->sc_nvm_wordsize = 256;
1955 sc->sc_nvm_addrbits = 8;
1956 } else {
1957 sc->sc_nvm_wordsize = 64;
1958 sc->sc_nvm_addrbits = 6;
1959 }
1960 sc->sc_flags |= WM_F_LOCK_EECD;
1961 break;
1962 case WM_T_82541:
1963 case WM_T_82541_2:
1964 case WM_T_82547:
1965 case WM_T_82547_2:
1966 sc->sc_flags |= WM_F_LOCK_EECD;
1967 reg = CSR_READ(sc, WMREG_EECD);
1968 if (reg & EECD_EE_TYPE) {
1969 /* SPI */
1970 sc->sc_flags |= WM_F_EEPROM_SPI;
1971 wm_nvm_set_addrbits_size_eecd(sc);
1972 } else {
1973 /* Microwire */
1974 if ((reg & EECD_EE_ABITS) != 0) {
1975 sc->sc_nvm_wordsize = 256;
1976 sc->sc_nvm_addrbits = 8;
1977 } else {
1978 sc->sc_nvm_wordsize = 64;
1979 sc->sc_nvm_addrbits = 6;
1980 }
1981 }
1982 break;
1983 case WM_T_82571:
1984 case WM_T_82572:
1985 /* SPI */
1986 sc->sc_flags |= WM_F_EEPROM_SPI;
1987 wm_nvm_set_addrbits_size_eecd(sc);
1988 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1989 break;
1990 case WM_T_82573:
1991 sc->sc_flags |= WM_F_LOCK_SWSM;
1992 /* FALLTHROUGH */
1993 case WM_T_82574:
1994 case WM_T_82583:
1995 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1996 sc->sc_flags |= WM_F_EEPROM_FLASH;
1997 sc->sc_nvm_wordsize = 2048;
1998 } else {
1999 /* SPI */
2000 sc->sc_flags |= WM_F_EEPROM_SPI;
2001 wm_nvm_set_addrbits_size_eecd(sc);
2002 }
2003 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2004 break;
2005 case WM_T_82575:
2006 case WM_T_82576:
2007 case WM_T_82580:
2008 case WM_T_I350:
2009 case WM_T_I354:
2010 case WM_T_80003:
2011 /* SPI */
2012 sc->sc_flags |= WM_F_EEPROM_SPI;
2013 wm_nvm_set_addrbits_size_eecd(sc);
2014 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2015 | WM_F_LOCK_SWSM;
2016 break;
2017 case WM_T_ICH8:
2018 case WM_T_ICH9:
2019 case WM_T_ICH10:
2020 case WM_T_PCH:
2021 case WM_T_PCH2:
2022 case WM_T_PCH_LPT:
2023 /* FLASH */
2024 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2025 sc->sc_nvm_wordsize = 2048;
2026 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
2027 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2028 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2029 aprint_error_dev(sc->sc_dev,
2030 "can't map FLASH registers\n");
2031 goto fail_5;
2032 }
2033 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2034 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2035 ICH_FLASH_SECTOR_SIZE;
2036 sc->sc_ich8_flash_bank_size =
2037 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2038 sc->sc_ich8_flash_bank_size -=
2039 (reg & ICH_GFPREG_BASE_MASK);
2040 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2041 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2042 break;
2043 case WM_T_I210:
2044 case WM_T_I211:
2045 if (wm_nvm_get_flash_presence_i210(sc)) {
2046 wm_nvm_set_addrbits_size_eecd(sc);
2047 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2048 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2049 } else {
2050 sc->sc_nvm_wordsize = INVM_SIZE;
2051 sc->sc_flags |= WM_F_EEPROM_INVM;
2052 sc->sc_flags |= WM_F_LOCK_SWFW;
2053 }
2054 break;
2055 default:
2056 break;
2057 }
2058
2059 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2060 switch (sc->sc_type) {
2061 case WM_T_82571:
2062 case WM_T_82572:
2063 reg = CSR_READ(sc, WMREG_SWSM2);
2064 if ((reg & SWSM2_LOCK) == 0) {
2065 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2066 force_clear_smbi = true;
2067 } else
2068 force_clear_smbi = false;
2069 break;
2070 case WM_T_82573:
2071 case WM_T_82574:
2072 case WM_T_82583:
2073 force_clear_smbi = true;
2074 break;
2075 default:
2076 force_clear_smbi = false;
2077 break;
2078 }
2079 if (force_clear_smbi) {
2080 reg = CSR_READ(sc, WMREG_SWSM);
2081 if ((reg & SWSM_SMBI) != 0)
2082 aprint_error_dev(sc->sc_dev,
2083 "Please update the Bootagent\n");
2084 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2085 }
2086
2087 /*
2088 * Defer printing the EEPROM type until after verifying the checksum
2089 * This allows the EEPROM type to be printed correctly in the case
2090 * that no EEPROM is attached.
2091 */
2092 /*
2093 * Validate the EEPROM checksum. If the checksum fails, flag
2094 * this for later, so we can fail future reads from the EEPROM.
2095 */
2096 if (wm_nvm_validate_checksum(sc)) {
2097 /*
2098 * Read twice again because some PCI-e parts fail the
2099 * first check due to the link being in sleep state.
2100 */
2101 if (wm_nvm_validate_checksum(sc))
2102 sc->sc_flags |= WM_F_EEPROM_INVALID;
2103 }
2104
2105 /* Set device properties (macflags) */
2106 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2107
2108 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2109 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2110 else {
2111 aprint_verbose_dev(sc->sc_dev, "%u words ",
2112 sc->sc_nvm_wordsize);
2113 if (sc->sc_flags & WM_F_EEPROM_INVM)
2114 aprint_verbose("iNVM");
2115 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2116 aprint_verbose("FLASH(HW)");
2117 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2118 aprint_verbose("FLASH");
2119 else {
2120 if (sc->sc_flags & WM_F_EEPROM_SPI)
2121 eetype = "SPI";
2122 else
2123 eetype = "MicroWire";
2124 aprint_verbose("(%d address bits) %s EEPROM",
2125 sc->sc_nvm_addrbits, eetype);
2126 }
2127 }
2128 wm_nvm_version(sc);
2129 aprint_verbose("\n");
2130
2131 /* Check for I21[01] PLL workaround */
2132 if (sc->sc_type == WM_T_I210)
2133 sc->sc_flags |= WM_F_PLL_WA_I210;
2134 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2135 /* NVM image release 3.25 has a workaround */
2136 if ((sc->sc_nvm_ver_major < 3)
2137 || ((sc->sc_nvm_ver_major == 3)
2138 && (sc->sc_nvm_ver_minor < 25))) {
2139 aprint_verbose_dev(sc->sc_dev,
2140 "ROM image version %d.%d is older than 3.25\n",
2141 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2142 sc->sc_flags |= WM_F_PLL_WA_I210;
2143 }
2144 }
2145 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2146 wm_pll_workaround_i210(sc);
2147
2148 switch (sc->sc_type) {
2149 case WM_T_82571:
2150 case WM_T_82572:
2151 case WM_T_82573:
2152 case WM_T_82574:
2153 case WM_T_82583:
2154 case WM_T_80003:
2155 case WM_T_ICH8:
2156 case WM_T_ICH9:
2157 case WM_T_ICH10:
2158 case WM_T_PCH:
2159 case WM_T_PCH2:
2160 case WM_T_PCH_LPT:
2161 if (wm_check_mng_mode(sc) != 0)
2162 wm_get_hw_control(sc);
2163 break;
2164 default:
2165 break;
2166 }
2167 wm_get_wakeup(sc);
2168 /*
2169 * Read the Ethernet address from the EEPROM, if not first found
2170 * in device properties.
2171 */
2172 ea = prop_dictionary_get(dict, "mac-address");
2173 if (ea != NULL) {
2174 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2175 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2176 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2177 } else {
2178 if (wm_read_mac_addr(sc, enaddr) != 0) {
2179 aprint_error_dev(sc->sc_dev,
2180 "unable to read Ethernet address\n");
2181 goto fail_5;
2182 }
2183 }
2184
2185 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2186 ether_sprintf(enaddr));
2187
2188 /*
2189 * Read the config info from the EEPROM, and set up various
2190 * bits in the control registers based on their contents.
2191 */
2192 pn = prop_dictionary_get(dict, "i82543-cfg1");
2193 if (pn != NULL) {
2194 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2195 cfg1 = (uint16_t) prop_number_integer_value(pn);
2196 } else {
2197 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2198 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2199 goto fail_5;
2200 }
2201 }
2202
2203 pn = prop_dictionary_get(dict, "i82543-cfg2");
2204 if (pn != NULL) {
2205 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2206 cfg2 = (uint16_t) prop_number_integer_value(pn);
2207 } else {
2208 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2209 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2210 goto fail_5;
2211 }
2212 }
2213
2214 /* check for WM_F_WOL */
2215 switch (sc->sc_type) {
2216 case WM_T_82542_2_0:
2217 case WM_T_82542_2_1:
2218 case WM_T_82543:
2219 /* dummy? */
2220 eeprom_data = 0;
2221 apme_mask = NVM_CFG3_APME;
2222 break;
2223 case WM_T_82544:
2224 apme_mask = NVM_CFG2_82544_APM_EN;
2225 eeprom_data = cfg2;
2226 break;
2227 case WM_T_82546:
2228 case WM_T_82546_3:
2229 case WM_T_82571:
2230 case WM_T_82572:
2231 case WM_T_82573:
2232 case WM_T_82574:
2233 case WM_T_82583:
2234 case WM_T_80003:
2235 default:
2236 apme_mask = NVM_CFG3_APME;
2237 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2238 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2239 break;
2240 case WM_T_82575:
2241 case WM_T_82576:
2242 case WM_T_82580:
2243 case WM_T_I350:
2244 case WM_T_I354: /* XXX ok? */
2245 case WM_T_ICH8:
2246 case WM_T_ICH9:
2247 case WM_T_ICH10:
2248 case WM_T_PCH:
2249 case WM_T_PCH2:
2250 case WM_T_PCH_LPT:
2251 /* XXX The funcid should be checked on some devices */
2252 apme_mask = WUC_APME;
2253 eeprom_data = CSR_READ(sc, WMREG_WUC);
2254 break;
2255 }
2256
2257 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2258 if ((eeprom_data & apme_mask) != 0)
2259 sc->sc_flags |= WM_F_WOL;
2260 #ifdef WM_DEBUG
2261 if ((sc->sc_flags & WM_F_WOL) != 0)
2262 printf("WOL\n");
2263 #endif
2264
2265 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2266 /* Check NVM for autonegotiation */
2267 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2268 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2269 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2270 }
2271 }
2272
2273 /*
2274 * XXX need special handling for some multiple port cards
2275 * to disable a paticular port.
2276 */
2277
2278 if (sc->sc_type >= WM_T_82544) {
2279 pn = prop_dictionary_get(dict, "i82543-swdpin");
2280 if (pn != NULL) {
2281 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2282 swdpin = (uint16_t) prop_number_integer_value(pn);
2283 } else {
2284 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2285 aprint_error_dev(sc->sc_dev,
2286 "unable to read SWDPIN\n");
2287 goto fail_5;
2288 }
2289 }
2290 }
2291
2292 if (cfg1 & NVM_CFG1_ILOS)
2293 sc->sc_ctrl |= CTRL_ILOS;
2294
2295 /*
2296 * XXX
2297 * This code isn't correct because pin 2 and 3 are located
2298 * in different position on newer chips. Check all datasheet.
2299 *
2300 * Until resolve this problem, check if a chip < 82580
2301 */
2302 if (sc->sc_type <= WM_T_82580) {
2303 if (sc->sc_type >= WM_T_82544) {
2304 sc->sc_ctrl |=
2305 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2306 CTRL_SWDPIO_SHIFT;
2307 sc->sc_ctrl |=
2308 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2309 CTRL_SWDPINS_SHIFT;
2310 } else {
2311 sc->sc_ctrl |=
2312 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2313 CTRL_SWDPIO_SHIFT;
2314 }
2315 }
2316
2317 /* XXX For other than 82580? */
2318 if (sc->sc_type == WM_T_82580) {
2319 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2320 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2321 if (nvmword & __BIT(13)) {
2322 printf("SET ILOS\n");
2323 sc->sc_ctrl |= CTRL_ILOS;
2324 }
2325 }
2326
2327 #if 0
2328 if (sc->sc_type >= WM_T_82544) {
2329 if (cfg1 & NVM_CFG1_IPS0)
2330 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2331 if (cfg1 & NVM_CFG1_IPS1)
2332 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2333 sc->sc_ctrl_ext |=
2334 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2335 CTRL_EXT_SWDPIO_SHIFT;
2336 sc->sc_ctrl_ext |=
2337 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2338 CTRL_EXT_SWDPINS_SHIFT;
2339 } else {
2340 sc->sc_ctrl_ext |=
2341 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2342 CTRL_EXT_SWDPIO_SHIFT;
2343 }
2344 #endif
2345
2346 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2347 #if 0
2348 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2349 #endif
2350
2351 /*
2352 * Set up some register offsets that are different between
2353 * the i82542 and the i82543 and later chips.
2354 */
2355 if (sc->sc_type < WM_T_82543) {
2356 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2357 sc->sc_tdt_reg = WMREG_OLD_TDT;
2358 } else {
2359 sc->sc_rdt_reg = WMREG_RDT;
2360 sc->sc_tdt_reg = WMREG_TDT;
2361 }
2362
2363 if (sc->sc_type == WM_T_PCH) {
2364 uint16_t val;
2365
2366 /* Save the NVM K1 bit setting */
2367 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2368
2369 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2370 sc->sc_nvm_k1_enabled = 1;
2371 else
2372 sc->sc_nvm_k1_enabled = 0;
2373 }
2374
2375 /*
2376 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2377 * media structures accordingly.
2378 */
2379 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2380 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2381 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2382 || sc->sc_type == WM_T_82573
2383 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2384 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2385 wm_gmii_mediainit(sc, wmp->wmp_product);
2386 } else if (sc->sc_type < WM_T_82543 ||
2387 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2388 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2389 aprint_error_dev(sc->sc_dev,
2390 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2391 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2392 }
2393 wm_tbi_mediainit(sc);
2394 } else {
2395 switch (sc->sc_type) {
2396 case WM_T_82575:
2397 case WM_T_82576:
2398 case WM_T_82580:
2399 case WM_T_I350:
2400 case WM_T_I354:
2401 case WM_T_I210:
2402 case WM_T_I211:
2403 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2404 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2405 switch (link_mode) {
2406 case CTRL_EXT_LINK_MODE_1000KX:
2407 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2408 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2409 break;
2410 case CTRL_EXT_LINK_MODE_SGMII:
2411 if (wm_sgmii_uses_mdio(sc)) {
2412 aprint_verbose_dev(sc->sc_dev,
2413 "SGMII(MDIO)\n");
2414 sc->sc_flags |= WM_F_SGMII;
2415 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2416 break;
2417 }
2418 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2419 /*FALLTHROUGH*/
2420 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2421 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2422 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2423 if (link_mode
2424 == CTRL_EXT_LINK_MODE_SGMII) {
2425 sc->sc_mediatype
2426 = WM_MEDIATYPE_COPPER;
2427 sc->sc_flags |= WM_F_SGMII;
2428 } else {
2429 sc->sc_mediatype
2430 = WM_MEDIATYPE_SERDES;
2431 aprint_verbose_dev(sc->sc_dev,
2432 "SERDES\n");
2433 }
2434 break;
2435 }
2436 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2437 aprint_verbose_dev(sc->sc_dev,
2438 "SERDES\n");
2439
2440 /* Change current link mode setting */
2441 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2442 switch (sc->sc_mediatype) {
2443 case WM_MEDIATYPE_COPPER:
2444 reg |= CTRL_EXT_LINK_MODE_SGMII;
2445 break;
2446 case WM_MEDIATYPE_SERDES:
2447 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2448 break;
2449 default:
2450 break;
2451 }
2452 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2453 break;
2454 case CTRL_EXT_LINK_MODE_GMII:
2455 default:
2456 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2457 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2458 break;
2459 }
2460
2461 reg &= ~CTRL_EXT_I2C_ENA;
2462 if ((sc->sc_flags & WM_F_SGMII) != 0)
2463 reg |= CTRL_EXT_I2C_ENA;
2464 else
2465 reg &= ~CTRL_EXT_I2C_ENA;
2466 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2467
2468 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2469 wm_gmii_mediainit(sc, wmp->wmp_product);
2470 else
2471 wm_tbi_mediainit(sc);
2472 break;
2473 default:
2474 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2475 aprint_error_dev(sc->sc_dev,
2476 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2477 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2478 wm_gmii_mediainit(sc, wmp->wmp_product);
2479 }
2480 }
2481
2482 ifp = &sc->sc_ethercom.ec_if;
2483 xname = device_xname(sc->sc_dev);
2484 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2485 ifp->if_softc = sc;
2486 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2487 ifp->if_ioctl = wm_ioctl;
2488 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2489 ifp->if_start = wm_nq_start;
2490 else
2491 ifp->if_start = wm_start;
2492 ifp->if_watchdog = wm_watchdog;
2493 ifp->if_init = wm_init;
2494 ifp->if_stop = wm_stop;
2495 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2496 IFQ_SET_READY(&ifp->if_snd);
2497
2498 /* Check for jumbo frame */
2499 switch (sc->sc_type) {
2500 case WM_T_82573:
2501 /* XXX limited to 9234 if ASPM is disabled */
2502 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2503 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2504 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2505 break;
2506 case WM_T_82571:
2507 case WM_T_82572:
2508 case WM_T_82574:
2509 case WM_T_82575:
2510 case WM_T_82576:
2511 case WM_T_82580:
2512 case WM_T_I350:
2513 case WM_T_I354: /* XXXX ok? */
2514 case WM_T_I210:
2515 case WM_T_I211:
2516 case WM_T_80003:
2517 case WM_T_ICH9:
2518 case WM_T_ICH10:
2519 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2520 case WM_T_PCH_LPT:
2521 /* XXX limited to 9234 */
2522 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2523 break;
2524 case WM_T_PCH:
2525 /* XXX limited to 4096 */
2526 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2527 break;
2528 case WM_T_82542_2_0:
2529 case WM_T_82542_2_1:
2530 case WM_T_82583:
2531 case WM_T_ICH8:
2532 /* No support for jumbo frame */
2533 break;
2534 default:
2535 /* ETHER_MAX_LEN_JUMBO */
2536 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2537 break;
2538 }
2539
2540 /* If we're a i82543 or greater, we can support VLANs. */
2541 if (sc->sc_type >= WM_T_82543)
2542 sc->sc_ethercom.ec_capabilities |=
2543 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2544
2545 /*
2546 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2547 * on i82543 and later.
2548 */
2549 if (sc->sc_type >= WM_T_82543) {
2550 ifp->if_capabilities |=
2551 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2552 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2553 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2554 IFCAP_CSUM_TCPv6_Tx |
2555 IFCAP_CSUM_UDPv6_Tx;
2556 }
2557
2558 /*
2559 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2560 *
2561 * 82541GI (8086:1076) ... no
2562 * 82572EI (8086:10b9) ... yes
2563 */
2564 if (sc->sc_type >= WM_T_82571) {
2565 ifp->if_capabilities |=
2566 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2567 }
2568
2569 /*
2570 * If we're a i82544 or greater (except i82547), we can do
2571 * TCP segmentation offload.
2572 */
2573 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2574 ifp->if_capabilities |= IFCAP_TSOv4;
2575 }
2576
2577 if (sc->sc_type >= WM_T_82571) {
2578 ifp->if_capabilities |= IFCAP_TSOv6;
2579 }
2580
2581 #ifdef WM_MPSAFE
2582 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2583 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2584 #else
2585 sc->sc_tx_lock = NULL;
2586 sc->sc_rx_lock = NULL;
2587 #endif
2588
2589 /* Attach the interface. */
2590 if_attach(ifp);
2591 ether_ifattach(ifp, enaddr);
2592 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2593 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2594 RND_FLAG_DEFAULT);
2595
2596 #ifdef WM_EVENT_COUNTERS
2597 /* Attach event counters. */
2598 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2599 NULL, xname, "txsstall");
2600 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2601 NULL, xname, "txdstall");
2602 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2603 NULL, xname, "txfifo_stall");
2604 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2605 NULL, xname, "txdw");
2606 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2607 NULL, xname, "txqe");
2608 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2609 NULL, xname, "rxintr");
2610 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2611 NULL, xname, "linkintr");
2612
2613 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2614 NULL, xname, "rxipsum");
2615 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2616 NULL, xname, "rxtusum");
2617 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2618 NULL, xname, "txipsum");
2619 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2620 NULL, xname, "txtusum");
2621 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2622 NULL, xname, "txtusum6");
2623
2624 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2625 NULL, xname, "txtso");
2626 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2627 NULL, xname, "txtso6");
2628 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2629 NULL, xname, "txtsopain");
2630
2631 for (i = 0; i < WM_NTXSEGS; i++) {
2632 snprintf(wm_txseg_evcnt_names[i],
2633 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2634 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2635 NULL, xname, wm_txseg_evcnt_names[i]);
2636 }
2637
2638 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2639 NULL, xname, "txdrop");
2640
2641 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2642 NULL, xname, "tu");
2643
2644 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2645 NULL, xname, "tx_xoff");
2646 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2647 NULL, xname, "tx_xon");
2648 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2649 NULL, xname, "rx_xoff");
2650 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2651 NULL, xname, "rx_xon");
2652 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2653 NULL, xname, "rx_macctl");
2654 #endif /* WM_EVENT_COUNTERS */
2655
2656 if (pmf_device_register(self, wm_suspend, wm_resume))
2657 pmf_class_network_register(self, ifp);
2658 else
2659 aprint_error_dev(self, "couldn't establish power handler\n");
2660
2661 sc->sc_flags |= WM_F_ATTACHED;
2662 return;
2663
2664 /*
2665 * Free any resources we've allocated during the failed attach
2666 * attempt. Do this in reverse order and fall through.
2667 */
2668 fail_5:
2669 for (i = 0; i < WM_NRXDESC; i++) {
2670 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2671 bus_dmamap_destroy(sc->sc_dmat,
2672 sc->sc_rxsoft[i].rxs_dmamap);
2673 }
2674 fail_4:
2675 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2676 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2677 bus_dmamap_destroy(sc->sc_dmat,
2678 sc->sc_txsoft[i].txs_dmamap);
2679 }
2680 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2681 fail_3:
2682 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2683 fail_2:
2684 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2685 sc->sc_cd_size);
2686 fail_1:
2687 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2688 fail_0:
2689 return;
2690 }
2691
2692 /* The detach function (ca_detach) */
2693 static int
2694 wm_detach(device_t self, int flags __unused)
2695 {
2696 struct wm_softc *sc = device_private(self);
2697 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2698 int i;
2699 #ifndef WM_MPSAFE
2700 int s;
2701 #endif
2702
2703 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2704 return 0;
2705
2706 #ifndef WM_MPSAFE
2707 s = splnet();
2708 #endif
2709 /* Stop the interface. Callouts are stopped in it. */
2710 wm_stop(ifp, 1);
2711
2712 #ifndef WM_MPSAFE
2713 splx(s);
2714 #endif
2715
2716 pmf_device_deregister(self);
2717
2718 /* Tell the firmware about the release */
2719 WM_BOTH_LOCK(sc);
2720 wm_release_manageability(sc);
2721 wm_release_hw_control(sc);
2722 WM_BOTH_UNLOCK(sc);
2723
2724 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2725
2726 /* Delete all remaining media. */
2727 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2728
2729 ether_ifdetach(ifp);
2730 if_detach(ifp);
2731
2732
2733 /* Unload RX dmamaps and free mbufs */
2734 WM_RX_LOCK(sc);
2735 wm_rxdrain(sc);
2736 WM_RX_UNLOCK(sc);
2737 /* Must unlock here */
2738
2739 /* Free dmamap. It's the same as the end of the wm_attach() function */
2740 for (i = 0; i < WM_NRXDESC; i++) {
2741 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2742 bus_dmamap_destroy(sc->sc_dmat,
2743 sc->sc_rxsoft[i].rxs_dmamap);
2744 }
2745 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2746 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2747 bus_dmamap_destroy(sc->sc_dmat,
2748 sc->sc_txsoft[i].txs_dmamap);
2749 }
2750 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2751 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2752 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2753 sc->sc_cd_size);
2754 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2755
2756 /* Disestablish the interrupt handler */
2757 for (i = 0; i < sc->sc_nintrs; i++) {
2758 if (sc->sc_ihs[i] != NULL) {
2759 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2760 sc->sc_ihs[i] = NULL;
2761 }
2762 }
2763 #ifdef WM_MSI_MSIX
2764 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2765 #endif /* WM_MSI_MSIX */
2766
2767 /* Unmap the registers */
2768 if (sc->sc_ss) {
2769 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2770 sc->sc_ss = 0;
2771 }
2772 if (sc->sc_ios) {
2773 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2774 sc->sc_ios = 0;
2775 }
2776 if (sc->sc_flashs) {
2777 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2778 sc->sc_flashs = 0;
2779 }
2780
2781 if (sc->sc_tx_lock)
2782 mutex_obj_free(sc->sc_tx_lock);
2783 if (sc->sc_rx_lock)
2784 mutex_obj_free(sc->sc_rx_lock);
2785
2786 return 0;
2787 }
2788
2789 static bool
2790 wm_suspend(device_t self, const pmf_qual_t *qual)
2791 {
2792 struct wm_softc *sc = device_private(self);
2793
2794 wm_release_manageability(sc);
2795 wm_release_hw_control(sc);
2796 #ifdef WM_WOL
2797 wm_enable_wakeup(sc);
2798 #endif
2799
2800 return true;
2801 }
2802
2803 static bool
2804 wm_resume(device_t self, const pmf_qual_t *qual)
2805 {
2806 struct wm_softc *sc = device_private(self);
2807
2808 wm_init_manageability(sc);
2809
2810 return true;
2811 }
2812
2813 /*
2814 * wm_watchdog: [ifnet interface function]
2815 *
2816 * Watchdog timer handler.
2817 */
2818 static void
2819 wm_watchdog(struct ifnet *ifp)
2820 {
2821 struct wm_softc *sc = ifp->if_softc;
2822
2823 /*
2824 * Since we're using delayed interrupts, sweep up
2825 * before we report an error.
2826 */
2827 WM_TX_LOCK(sc);
2828 wm_txeof(sc);
2829 WM_TX_UNLOCK(sc);
2830
2831 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2832 #ifdef WM_DEBUG
2833 int i, j;
2834 struct wm_txsoft *txs;
2835 #endif
2836 log(LOG_ERR,
2837 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2838 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2839 sc->sc_txnext);
2840 ifp->if_oerrors++;
2841 #ifdef WM_DEBUG
2842 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2843 i = WM_NEXTTXS(sc, i)) {
2844 txs = &sc->sc_txsoft[i];
2845 printf("txs %d tx %d -> %d\n",
2846 i, txs->txs_firstdesc, txs->txs_lastdesc);
2847 for (j = txs->txs_firstdesc; ;
2848 j = WM_NEXTTX(sc, j)) {
2849 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2850 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2851 printf("\t %#08x%08x\n",
2852 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2853 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2854 if (j == txs->txs_lastdesc)
2855 break;
2856 }
2857 }
2858 #endif
2859 /* Reset the interface. */
2860 (void) wm_init(ifp);
2861 }
2862
2863 /* Try to get more packets going. */
2864 ifp->if_start(ifp);
2865 }
2866
2867 /*
2868 * wm_tick:
2869 *
2870 * One second timer, used to check link status, sweep up
2871 * completed transmit jobs, etc.
2872 */
2873 static void
2874 wm_tick(void *arg)
2875 {
2876 struct wm_softc *sc = arg;
2877 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2878 #ifndef WM_MPSAFE
2879 int s;
2880
2881 s = splnet();
2882 #endif
2883
2884 WM_TX_LOCK(sc);
2885
2886 if (sc->sc_stopping)
2887 goto out;
2888
2889 if (sc->sc_type >= WM_T_82542_2_1) {
2890 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2891 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2892 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2893 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2894 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2895 }
2896
2897 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2898 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2899 + CSR_READ(sc, WMREG_CRCERRS)
2900 + CSR_READ(sc, WMREG_ALGNERRC)
2901 + CSR_READ(sc, WMREG_SYMERRC)
2902 + CSR_READ(sc, WMREG_RXERRC)
2903 + CSR_READ(sc, WMREG_SEC)
2904 + CSR_READ(sc, WMREG_CEXTERR)
2905 + CSR_READ(sc, WMREG_RLEC);
2906 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2907
2908 if (sc->sc_flags & WM_F_HAS_MII)
2909 mii_tick(&sc->sc_mii);
2910 else if ((sc->sc_type >= WM_T_82575)
2911 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2912 wm_serdes_tick(sc);
2913 else
2914 wm_tbi_tick(sc);
2915
2916 out:
2917 WM_TX_UNLOCK(sc);
2918 #ifndef WM_MPSAFE
2919 splx(s);
2920 #endif
2921
2922 if (!sc->sc_stopping)
2923 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2924 }
2925
2926 static int
2927 wm_ifflags_cb(struct ethercom *ec)
2928 {
2929 struct ifnet *ifp = &ec->ec_if;
2930 struct wm_softc *sc = ifp->if_softc;
2931 int change = ifp->if_flags ^ sc->sc_if_flags;
2932 int rc = 0;
2933
2934 WM_BOTH_LOCK(sc);
2935
2936 if (change != 0)
2937 sc->sc_if_flags = ifp->if_flags;
2938
2939 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2940 rc = ENETRESET;
2941 goto out;
2942 }
2943
2944 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2945 wm_set_filter(sc);
2946
2947 wm_set_vlan(sc);
2948
2949 out:
2950 WM_BOTH_UNLOCK(sc);
2951
2952 return rc;
2953 }
2954
2955 /*
2956 * wm_ioctl: [ifnet interface function]
2957 *
2958 * Handle control requests from the operator.
2959 */
2960 static int
2961 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2962 {
2963 struct wm_softc *sc = ifp->if_softc;
2964 struct ifreq *ifr = (struct ifreq *) data;
2965 struct ifaddr *ifa = (struct ifaddr *)data;
2966 struct sockaddr_dl *sdl;
2967 int s, error;
2968
2969 #ifndef WM_MPSAFE
2970 s = splnet();
2971 #endif
2972 switch (cmd) {
2973 case SIOCSIFMEDIA:
2974 case SIOCGIFMEDIA:
2975 WM_BOTH_LOCK(sc);
2976 /* Flow control requires full-duplex mode. */
2977 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2978 (ifr->ifr_media & IFM_FDX) == 0)
2979 ifr->ifr_media &= ~IFM_ETH_FMASK;
2980 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2981 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2982 /* We can do both TXPAUSE and RXPAUSE. */
2983 ifr->ifr_media |=
2984 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2985 }
2986 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2987 }
2988 WM_BOTH_UNLOCK(sc);
2989 #ifdef WM_MPSAFE
2990 s = splnet();
2991 #endif
2992 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2993 #ifdef WM_MPSAFE
2994 splx(s);
2995 #endif
2996 break;
2997 case SIOCINITIFADDR:
2998 WM_BOTH_LOCK(sc);
2999 if (ifa->ifa_addr->sa_family == AF_LINK) {
3000 sdl = satosdl(ifp->if_dl->ifa_addr);
3001 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3002 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3003 /* unicast address is first multicast entry */
3004 wm_set_filter(sc);
3005 error = 0;
3006 WM_BOTH_UNLOCK(sc);
3007 break;
3008 }
3009 WM_BOTH_UNLOCK(sc);
3010 /*FALLTHROUGH*/
3011 default:
3012 #ifdef WM_MPSAFE
3013 s = splnet();
3014 #endif
3015 /* It may call wm_start, so unlock here */
3016 error = ether_ioctl(ifp, cmd, data);
3017 #ifdef WM_MPSAFE
3018 splx(s);
3019 #endif
3020 if (error != ENETRESET)
3021 break;
3022
3023 error = 0;
3024
3025 if (cmd == SIOCSIFCAP) {
3026 error = (*ifp->if_init)(ifp);
3027 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3028 ;
3029 else if (ifp->if_flags & IFF_RUNNING) {
3030 /*
3031 * Multicast list has changed; set the hardware filter
3032 * accordingly.
3033 */
3034 WM_BOTH_LOCK(sc);
3035 wm_set_filter(sc);
3036 WM_BOTH_UNLOCK(sc);
3037 }
3038 break;
3039 }
3040
3041 #ifndef WM_MPSAFE
3042 splx(s);
3043 #endif
3044 return error;
3045 }
3046
3047 /* MAC address related */
3048
3049 /*
3050 * Get the offset of MAC address and return it.
3051 * If error occured, use offset 0.
3052 */
3053 static uint16_t
3054 wm_check_alt_mac_addr(struct wm_softc *sc)
3055 {
3056 uint16_t myea[ETHER_ADDR_LEN / 2];
3057 uint16_t offset = NVM_OFF_MACADDR;
3058
3059 /* Try to read alternative MAC address pointer */
3060 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3061 return 0;
3062
3063 /* Check pointer if it's valid or not. */
3064 if ((offset == 0x0000) || (offset == 0xffff))
3065 return 0;
3066
3067 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3068 /*
3069 * Check whether alternative MAC address is valid or not.
3070 * Some cards have non 0xffff pointer but those don't use
3071 * alternative MAC address in reality.
3072 *
3073 * Check whether the broadcast bit is set or not.
3074 */
3075 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3076 if (((myea[0] & 0xff) & 0x01) == 0)
3077 return offset; /* Found */
3078
3079 /* Not found */
3080 return 0;
3081 }
3082
3083 static int
3084 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3085 {
3086 uint16_t myea[ETHER_ADDR_LEN / 2];
3087 uint16_t offset = NVM_OFF_MACADDR;
3088 int do_invert = 0;
3089
3090 switch (sc->sc_type) {
3091 case WM_T_82580:
3092 case WM_T_I350:
3093 case WM_T_I354:
3094 /* EEPROM Top Level Partitioning */
3095 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3096 break;
3097 case WM_T_82571:
3098 case WM_T_82575:
3099 case WM_T_82576:
3100 case WM_T_80003:
3101 case WM_T_I210:
3102 case WM_T_I211:
3103 offset = wm_check_alt_mac_addr(sc);
3104 if (offset == 0)
3105 if ((sc->sc_funcid & 0x01) == 1)
3106 do_invert = 1;
3107 break;
3108 default:
3109 if ((sc->sc_funcid & 0x01) == 1)
3110 do_invert = 1;
3111 break;
3112 }
3113
3114 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3115 myea) != 0)
3116 goto bad;
3117
3118 enaddr[0] = myea[0] & 0xff;
3119 enaddr[1] = myea[0] >> 8;
3120 enaddr[2] = myea[1] & 0xff;
3121 enaddr[3] = myea[1] >> 8;
3122 enaddr[4] = myea[2] & 0xff;
3123 enaddr[5] = myea[2] >> 8;
3124
3125 /*
3126 * Toggle the LSB of the MAC address on the second port
3127 * of some dual port cards.
3128 */
3129 if (do_invert != 0)
3130 enaddr[5] ^= 1;
3131
3132 return 0;
3133
3134 bad:
3135 return -1;
3136 }
3137
3138 /*
3139 * wm_set_ral:
3140 *
3141 * Set an entery in the receive address list.
3142 */
3143 static void
3144 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3145 {
3146 uint32_t ral_lo, ral_hi;
3147
3148 if (enaddr != NULL) {
3149 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3150 (enaddr[3] << 24);
3151 ral_hi = enaddr[4] | (enaddr[5] << 8);
3152 ral_hi |= RAL_AV;
3153 } else {
3154 ral_lo = 0;
3155 ral_hi = 0;
3156 }
3157
3158 if (sc->sc_type >= WM_T_82544) {
3159 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3160 ral_lo);
3161 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3162 ral_hi);
3163 } else {
3164 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3165 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3166 }
3167 }
3168
3169 /*
3170 * wm_mchash:
3171 *
3172 * Compute the hash of the multicast address for the 4096-bit
3173 * multicast filter.
3174 */
3175 static uint32_t
3176 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3177 {
3178 static const int lo_shift[4] = { 4, 3, 2, 0 };
3179 static const int hi_shift[4] = { 4, 5, 6, 8 };
3180 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3181 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3182 uint32_t hash;
3183
3184 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3185 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3186 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3187 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3188 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3189 return (hash & 0x3ff);
3190 }
3191 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3192 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3193
3194 return (hash & 0xfff);
3195 }
3196
3197 /*
3198 * wm_set_filter:
3199 *
3200 * Set up the receive filter.
3201 */
3202 static void
3203 wm_set_filter(struct wm_softc *sc)
3204 {
3205 struct ethercom *ec = &sc->sc_ethercom;
3206 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3207 struct ether_multi *enm;
3208 struct ether_multistep step;
3209 bus_addr_t mta_reg;
3210 uint32_t hash, reg, bit;
3211 int i, size;
3212
3213 if (sc->sc_type >= WM_T_82544)
3214 mta_reg = WMREG_CORDOVA_MTA;
3215 else
3216 mta_reg = WMREG_MTA;
3217
3218 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3219
3220 if (ifp->if_flags & IFF_BROADCAST)
3221 sc->sc_rctl |= RCTL_BAM;
3222 if (ifp->if_flags & IFF_PROMISC) {
3223 sc->sc_rctl |= RCTL_UPE;
3224 goto allmulti;
3225 }
3226
3227 /*
3228 * Set the station address in the first RAL slot, and
3229 * clear the remaining slots.
3230 */
3231 if (sc->sc_type == WM_T_ICH8)
3232 size = WM_RAL_TABSIZE_ICH8 -1;
3233 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3234 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3235 || (sc->sc_type == WM_T_PCH_LPT))
3236 size = WM_RAL_TABSIZE_ICH8;
3237 else if (sc->sc_type == WM_T_82575)
3238 size = WM_RAL_TABSIZE_82575;
3239 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3240 size = WM_RAL_TABSIZE_82576;
3241 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3242 size = WM_RAL_TABSIZE_I350;
3243 else
3244 size = WM_RAL_TABSIZE;
3245 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3246 for (i = 1; i < size; i++)
3247 wm_set_ral(sc, NULL, i);
3248
3249 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3250 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3251 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3252 size = WM_ICH8_MC_TABSIZE;
3253 else
3254 size = WM_MC_TABSIZE;
3255 /* Clear out the multicast table. */
3256 for (i = 0; i < size; i++)
3257 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3258
3259 ETHER_FIRST_MULTI(step, ec, enm);
3260 while (enm != NULL) {
3261 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3262 /*
3263 * We must listen to a range of multicast addresses.
3264 * For now, just accept all multicasts, rather than
3265 * trying to set only those filter bits needed to match
3266 * the range. (At this time, the only use of address
3267 * ranges is for IP multicast routing, for which the
3268 * range is big enough to require all bits set.)
3269 */
3270 goto allmulti;
3271 }
3272
3273 hash = wm_mchash(sc, enm->enm_addrlo);
3274
3275 reg = (hash >> 5);
3276 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3277 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3278 || (sc->sc_type == WM_T_PCH2)
3279 || (sc->sc_type == WM_T_PCH_LPT))
3280 reg &= 0x1f;
3281 else
3282 reg &= 0x7f;
3283 bit = hash & 0x1f;
3284
3285 hash = CSR_READ(sc, mta_reg + (reg << 2));
3286 hash |= 1U << bit;
3287
3288 /* XXX Hardware bug?? */
3289 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3290 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3291 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3292 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3293 } else
3294 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3295
3296 ETHER_NEXT_MULTI(step, enm);
3297 }
3298
3299 ifp->if_flags &= ~IFF_ALLMULTI;
3300 goto setit;
3301
3302 allmulti:
3303 ifp->if_flags |= IFF_ALLMULTI;
3304 sc->sc_rctl |= RCTL_MPE;
3305
3306 setit:
3307 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3308 }
3309
3310 /* Reset and init related */
3311
3312 static void
3313 wm_set_vlan(struct wm_softc *sc)
3314 {
3315 /* Deal with VLAN enables. */
3316 if (VLAN_ATTACHED(&sc->sc_ethercom))
3317 sc->sc_ctrl |= CTRL_VME;
3318 else
3319 sc->sc_ctrl &= ~CTRL_VME;
3320
3321 /* Write the control registers. */
3322 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3323 }
3324
3325 static void
3326 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3327 {
3328 uint32_t gcr;
3329 pcireg_t ctrl2;
3330
3331 gcr = CSR_READ(sc, WMREG_GCR);
3332
3333 /* Only take action if timeout value is defaulted to 0 */
3334 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3335 goto out;
3336
3337 if ((gcr & GCR_CAP_VER2) == 0) {
3338 gcr |= GCR_CMPL_TMOUT_10MS;
3339 goto out;
3340 }
3341
3342 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3343 sc->sc_pcixe_capoff + PCIE_DCSR2);
3344 ctrl2 |= WM_PCIE_DCSR2_16MS;
3345 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3346 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3347
3348 out:
3349 /* Disable completion timeout resend */
3350 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3351
3352 CSR_WRITE(sc, WMREG_GCR, gcr);
3353 }
3354
3355 void
3356 wm_get_auto_rd_done(struct wm_softc *sc)
3357 {
3358 int i;
3359
3360 /* wait for eeprom to reload */
3361 switch (sc->sc_type) {
3362 case WM_T_82571:
3363 case WM_T_82572:
3364 case WM_T_82573:
3365 case WM_T_82574:
3366 case WM_T_82583:
3367 case WM_T_82575:
3368 case WM_T_82576:
3369 case WM_T_82580:
3370 case WM_T_I350:
3371 case WM_T_I354:
3372 case WM_T_I210:
3373 case WM_T_I211:
3374 case WM_T_80003:
3375 case WM_T_ICH8:
3376 case WM_T_ICH9:
3377 for (i = 0; i < 10; i++) {
3378 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3379 break;
3380 delay(1000);
3381 }
3382 if (i == 10) {
3383 log(LOG_ERR, "%s: auto read from eeprom failed to "
3384 "complete\n", device_xname(sc->sc_dev));
3385 }
3386 break;
3387 default:
3388 break;
3389 }
3390 }
3391
3392 void
3393 wm_lan_init_done(struct wm_softc *sc)
3394 {
3395 uint32_t reg = 0;
3396 int i;
3397
3398 /* wait for eeprom to reload */
3399 switch (sc->sc_type) {
3400 case WM_T_ICH10:
3401 case WM_T_PCH:
3402 case WM_T_PCH2:
3403 case WM_T_PCH_LPT:
3404 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3405 reg = CSR_READ(sc, WMREG_STATUS);
3406 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3407 break;
3408 delay(100);
3409 }
3410 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3411 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3412 "complete\n", device_xname(sc->sc_dev), __func__);
3413 }
3414 break;
3415 default:
3416 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3417 __func__);
3418 break;
3419 }
3420
3421 reg &= ~STATUS_LAN_INIT_DONE;
3422 CSR_WRITE(sc, WMREG_STATUS, reg);
3423 }
3424
3425 void
3426 wm_get_cfg_done(struct wm_softc *sc)
3427 {
3428 int mask;
3429 uint32_t reg;
3430 int i;
3431
3432 /* wait for eeprom to reload */
3433 switch (sc->sc_type) {
3434 case WM_T_82542_2_0:
3435 case WM_T_82542_2_1:
3436 /* null */
3437 break;
3438 case WM_T_82543:
3439 case WM_T_82544:
3440 case WM_T_82540:
3441 case WM_T_82545:
3442 case WM_T_82545_3:
3443 case WM_T_82546:
3444 case WM_T_82546_3:
3445 case WM_T_82541:
3446 case WM_T_82541_2:
3447 case WM_T_82547:
3448 case WM_T_82547_2:
3449 case WM_T_82573:
3450 case WM_T_82574:
3451 case WM_T_82583:
3452 /* generic */
3453 delay(10*1000);
3454 break;
3455 case WM_T_80003:
3456 case WM_T_82571:
3457 case WM_T_82572:
3458 case WM_T_82575:
3459 case WM_T_82576:
3460 case WM_T_82580:
3461 case WM_T_I350:
3462 case WM_T_I354:
3463 case WM_T_I210:
3464 case WM_T_I211:
3465 if (sc->sc_type == WM_T_82571) {
3466 /* Only 82571 shares port 0 */
3467 mask = EEMNGCTL_CFGDONE_0;
3468 } else
3469 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3470 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3471 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3472 break;
3473 delay(1000);
3474 }
3475 if (i >= WM_PHY_CFG_TIMEOUT) {
3476 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3477 device_xname(sc->sc_dev), __func__));
3478 }
3479 break;
3480 case WM_T_ICH8:
3481 case WM_T_ICH9:
3482 case WM_T_ICH10:
3483 case WM_T_PCH:
3484 case WM_T_PCH2:
3485 case WM_T_PCH_LPT:
3486 delay(10*1000);
3487 if (sc->sc_type >= WM_T_ICH10)
3488 wm_lan_init_done(sc);
3489 else
3490 wm_get_auto_rd_done(sc);
3491
3492 reg = CSR_READ(sc, WMREG_STATUS);
3493 if ((reg & STATUS_PHYRA) != 0)
3494 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3495 break;
3496 default:
3497 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3498 __func__);
3499 break;
3500 }
3501 }
3502
3503 /* Init hardware bits */
3504 void
3505 wm_initialize_hardware_bits(struct wm_softc *sc)
3506 {
3507 uint32_t tarc0, tarc1, reg;
3508
3509 /* For 82571 variant, 80003 and ICHs */
3510 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3511 || (sc->sc_type >= WM_T_80003)) {
3512
3513 /* Transmit Descriptor Control 0 */
3514 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3515 reg |= TXDCTL_COUNT_DESC;
3516 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3517
3518 /* Transmit Descriptor Control 1 */
3519 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3520 reg |= TXDCTL_COUNT_DESC;
3521 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3522
3523 /* TARC0 */
3524 tarc0 = CSR_READ(sc, WMREG_TARC0);
3525 switch (sc->sc_type) {
3526 case WM_T_82571:
3527 case WM_T_82572:
3528 case WM_T_82573:
3529 case WM_T_82574:
3530 case WM_T_82583:
3531 case WM_T_80003:
3532 /* Clear bits 30..27 */
3533 tarc0 &= ~__BITS(30, 27);
3534 break;
3535 default:
3536 break;
3537 }
3538
3539 switch (sc->sc_type) {
3540 case WM_T_82571:
3541 case WM_T_82572:
3542 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3543
3544 tarc1 = CSR_READ(sc, WMREG_TARC1);
3545 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3546 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3547 /* 8257[12] Errata No.7 */
3548 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3549
3550 /* TARC1 bit 28 */
3551 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3552 tarc1 &= ~__BIT(28);
3553 else
3554 tarc1 |= __BIT(28);
3555 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3556
3557 /*
3558 * 8257[12] Errata No.13
3559 * Disable Dyamic Clock Gating.
3560 */
3561 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3562 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3563 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3564 break;
3565 case WM_T_82573:
3566 case WM_T_82574:
3567 case WM_T_82583:
3568 if ((sc->sc_type == WM_T_82574)
3569 || (sc->sc_type == WM_T_82583))
3570 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3571
3572 /* Extended Device Control */
3573 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3574 reg &= ~__BIT(23); /* Clear bit 23 */
3575 reg |= __BIT(22); /* Set bit 22 */
3576 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3577
3578 /* Device Control */
3579 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3580 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3581
3582 /* PCIe Control Register */
3583 if ((sc->sc_type == WM_T_82574)
3584 || (sc->sc_type == WM_T_82583)) {
3585 /*
3586 * Document says this bit must be set for
3587 * proper operation.
3588 */
3589 reg = CSR_READ(sc, WMREG_GCR);
3590 reg |= __BIT(22);
3591 CSR_WRITE(sc, WMREG_GCR, reg);
3592
3593 /*
3594 * Apply workaround for hardware errata
3595 * documented in errata docs Fixes issue where
3596 * some error prone or unreliable PCIe
3597 * completions are occurring, particularly
3598 * with ASPM enabled. Without fix, issue can
3599 * cause Tx timeouts.
3600 */
3601 reg = CSR_READ(sc, WMREG_GCR2);
3602 reg |= __BIT(0);
3603 CSR_WRITE(sc, WMREG_GCR2, reg);
3604 }
3605 break;
3606 case WM_T_80003:
3607 /* TARC0 */
3608 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3609 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3610 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3611
3612 /* TARC1 bit 28 */
3613 tarc1 = CSR_READ(sc, WMREG_TARC1);
3614 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3615 tarc1 &= ~__BIT(28);
3616 else
3617 tarc1 |= __BIT(28);
3618 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3619 break;
3620 case WM_T_ICH8:
3621 case WM_T_ICH9:
3622 case WM_T_ICH10:
3623 case WM_T_PCH:
3624 case WM_T_PCH2:
3625 case WM_T_PCH_LPT:
3626 /* TARC 0 */
3627 if (sc->sc_type == WM_T_ICH8) {
3628 /* Set TARC0 bits 29 and 28 */
3629 tarc0 |= __BITS(29, 28);
3630 }
3631 /* Set TARC0 bits 23,24,26,27 */
3632 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3633
3634 /* CTRL_EXT */
3635 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3636 reg |= __BIT(22); /* Set bit 22 */
3637 /*
3638 * Enable PHY low-power state when MAC is at D3
3639 * w/o WoL
3640 */
3641 if (sc->sc_type >= WM_T_PCH)
3642 reg |= CTRL_EXT_PHYPDEN;
3643 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3644
3645 /* TARC1 */
3646 tarc1 = CSR_READ(sc, WMREG_TARC1);
3647 /* bit 28 */
3648 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3649 tarc1 &= ~__BIT(28);
3650 else
3651 tarc1 |= __BIT(28);
3652 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3653 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3654
3655 /* Device Status */
3656 if (sc->sc_type == WM_T_ICH8) {
3657 reg = CSR_READ(sc, WMREG_STATUS);
3658 reg &= ~__BIT(31);
3659 CSR_WRITE(sc, WMREG_STATUS, reg);
3660
3661 }
3662
3663 /*
3664 * Work-around descriptor data corruption issue during
3665 * NFS v2 UDP traffic, just disable the NFS filtering
3666 * capability.
3667 */
3668 reg = CSR_READ(sc, WMREG_RFCTL);
3669 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3670 CSR_WRITE(sc, WMREG_RFCTL, reg);
3671 break;
3672 default:
3673 break;
3674 }
3675 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3676
3677 /*
3678 * 8257[12] Errata No.52 and some others.
3679 * Avoid RSS Hash Value bug.
3680 */
3681 switch (sc->sc_type) {
3682 case WM_T_82571:
3683 case WM_T_82572:
3684 case WM_T_82573:
3685 case WM_T_80003:
3686 case WM_T_ICH8:
3687 reg = CSR_READ(sc, WMREG_RFCTL);
3688 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3689 CSR_WRITE(sc, WMREG_RFCTL, reg);
3690 break;
3691 default:
3692 break;
3693 }
3694 }
3695 }
3696
3697 static uint32_t
3698 wm_rxpbs_adjust_82580(uint32_t val)
3699 {
3700 uint32_t rv = 0;
3701
3702 if (val < __arraycount(wm_82580_rxpbs_table))
3703 rv = wm_82580_rxpbs_table[val];
3704
3705 return rv;
3706 }
3707
3708 /*
3709 * wm_reset:
3710 *
3711 * Reset the i82542 chip.
3712 */
3713 static void
3714 wm_reset(struct wm_softc *sc)
3715 {
3716 int phy_reset = 0;
3717 int error = 0;
3718 uint32_t reg, mask;
3719
3720 /*
3721 * Allocate on-chip memory according to the MTU size.
3722 * The Packet Buffer Allocation register must be written
3723 * before the chip is reset.
3724 */
3725 switch (sc->sc_type) {
3726 case WM_T_82547:
3727 case WM_T_82547_2:
3728 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3729 PBA_22K : PBA_30K;
3730 sc->sc_txfifo_head = 0;
3731 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3732 sc->sc_txfifo_size =
3733 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3734 sc->sc_txfifo_stall = 0;
3735 break;
3736 case WM_T_82571:
3737 case WM_T_82572:
3738 case WM_T_82575: /* XXX need special handing for jumbo frames */
3739 case WM_T_80003:
3740 sc->sc_pba = PBA_32K;
3741 break;
3742 case WM_T_82573:
3743 sc->sc_pba = PBA_12K;
3744 break;
3745 case WM_T_82574:
3746 case WM_T_82583:
3747 sc->sc_pba = PBA_20K;
3748 break;
3749 case WM_T_82576:
3750 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3751 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3752 break;
3753 case WM_T_82580:
3754 case WM_T_I350:
3755 case WM_T_I354:
3756 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3757 break;
3758 case WM_T_I210:
3759 case WM_T_I211:
3760 sc->sc_pba = PBA_34K;
3761 break;
3762 case WM_T_ICH8:
3763 /* Workaround for a bit corruption issue in FIFO memory */
3764 sc->sc_pba = PBA_8K;
3765 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3766 break;
3767 case WM_T_ICH9:
3768 case WM_T_ICH10:
3769 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3770 PBA_14K : PBA_10K;
3771 break;
3772 case WM_T_PCH:
3773 case WM_T_PCH2:
3774 case WM_T_PCH_LPT:
3775 sc->sc_pba = PBA_26K;
3776 break;
3777 default:
3778 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3779 PBA_40K : PBA_48K;
3780 break;
3781 }
3782 /*
3783 * Only old or non-multiqueue devices have the PBA register
3784 * XXX Need special handling for 82575.
3785 */
3786 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3787 || (sc->sc_type == WM_T_82575))
3788 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3789
3790 /* Prevent the PCI-E bus from sticking */
3791 if (sc->sc_flags & WM_F_PCIE) {
3792 int timeout = 800;
3793
3794 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3795 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3796
3797 while (timeout--) {
3798 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3799 == 0)
3800 break;
3801 delay(100);
3802 }
3803 }
3804
3805 /* Set the completion timeout for interface */
3806 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3807 || (sc->sc_type == WM_T_82580)
3808 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3809 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3810 wm_set_pcie_completion_timeout(sc);
3811
3812 /* Clear interrupt */
3813 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3814 if (sc->sc_nintrs > 1) {
3815 if (sc->sc_type != WM_T_82574) {
3816 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3817 CSR_WRITE(sc, WMREG_EIAC, 0);
3818 } else {
3819 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3820 }
3821 }
3822
3823 /* Stop the transmit and receive processes. */
3824 CSR_WRITE(sc, WMREG_RCTL, 0);
3825 sc->sc_rctl &= ~RCTL_EN;
3826 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3827 CSR_WRITE_FLUSH(sc);
3828
3829 /* XXX set_tbi_sbp_82543() */
3830
3831 delay(10*1000);
3832
3833 /* Must acquire the MDIO ownership before MAC reset */
3834 switch (sc->sc_type) {
3835 case WM_T_82573:
3836 case WM_T_82574:
3837 case WM_T_82583:
3838 error = wm_get_hw_semaphore_82573(sc);
3839 break;
3840 default:
3841 break;
3842 }
3843
3844 /*
3845 * 82541 Errata 29? & 82547 Errata 28?
3846 * See also the description about PHY_RST bit in CTRL register
3847 * in 8254x_GBe_SDM.pdf.
3848 */
3849 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3850 CSR_WRITE(sc, WMREG_CTRL,
3851 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3852 CSR_WRITE_FLUSH(sc);
3853 delay(5000);
3854 }
3855
3856 switch (sc->sc_type) {
3857 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3858 case WM_T_82541:
3859 case WM_T_82541_2:
3860 case WM_T_82547:
3861 case WM_T_82547_2:
3862 /*
3863 * On some chipsets, a reset through a memory-mapped write
3864 * cycle can cause the chip to reset before completing the
3865 * write cycle. This causes major headache that can be
3866 * avoided by issuing the reset via indirect register writes
3867 * through I/O space.
3868 *
3869 * So, if we successfully mapped the I/O BAR at attach time,
3870 * use that. Otherwise, try our luck with a memory-mapped
3871 * reset.
3872 */
3873 if (sc->sc_flags & WM_F_IOH_VALID)
3874 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3875 else
3876 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3877 break;
3878 case WM_T_82545_3:
3879 case WM_T_82546_3:
3880 /* Use the shadow control register on these chips. */
3881 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3882 break;
3883 case WM_T_80003:
3884 mask = swfwphysem[sc->sc_funcid];
3885 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3886 wm_get_swfw_semaphore(sc, mask);
3887 CSR_WRITE(sc, WMREG_CTRL, reg);
3888 wm_put_swfw_semaphore(sc, mask);
3889 break;
3890 case WM_T_ICH8:
3891 case WM_T_ICH9:
3892 case WM_T_ICH10:
3893 case WM_T_PCH:
3894 case WM_T_PCH2:
3895 case WM_T_PCH_LPT:
3896 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3897 if (wm_check_reset_block(sc) == 0) {
3898 /*
3899 * Gate automatic PHY configuration by hardware on
3900 * non-managed 82579
3901 */
3902 if ((sc->sc_type == WM_T_PCH2)
3903 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3904 != 0))
3905 wm_gate_hw_phy_config_ich8lan(sc, 1);
3906
3907
3908 reg |= CTRL_PHY_RESET;
3909 phy_reset = 1;
3910 }
3911 wm_get_swfwhw_semaphore(sc);
3912 CSR_WRITE(sc, WMREG_CTRL, reg);
3913 /* Don't insert a completion barrier when reset */
3914 delay(20*1000);
3915 wm_put_swfwhw_semaphore(sc);
3916 break;
3917 case WM_T_82580:
3918 case WM_T_I350:
3919 case WM_T_I354:
3920 case WM_T_I210:
3921 case WM_T_I211:
3922 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3923 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3924 CSR_WRITE_FLUSH(sc);
3925 delay(5000);
3926 break;
3927 case WM_T_82542_2_0:
3928 case WM_T_82542_2_1:
3929 case WM_T_82543:
3930 case WM_T_82540:
3931 case WM_T_82545:
3932 case WM_T_82546:
3933 case WM_T_82571:
3934 case WM_T_82572:
3935 case WM_T_82573:
3936 case WM_T_82574:
3937 case WM_T_82575:
3938 case WM_T_82576:
3939 case WM_T_82583:
3940 default:
3941 /* Everything else can safely use the documented method. */
3942 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3943 break;
3944 }
3945
3946 /* Must release the MDIO ownership after MAC reset */
3947 switch (sc->sc_type) {
3948 case WM_T_82573:
3949 case WM_T_82574:
3950 case WM_T_82583:
3951 if (error == 0)
3952 wm_put_hw_semaphore_82573(sc);
3953 break;
3954 default:
3955 break;
3956 }
3957
3958 if (phy_reset != 0)
3959 wm_get_cfg_done(sc);
3960
3961 /* reload EEPROM */
3962 switch (sc->sc_type) {
3963 case WM_T_82542_2_0:
3964 case WM_T_82542_2_1:
3965 case WM_T_82543:
3966 case WM_T_82544:
3967 delay(10);
3968 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3969 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3970 CSR_WRITE_FLUSH(sc);
3971 delay(2000);
3972 break;
3973 case WM_T_82540:
3974 case WM_T_82545:
3975 case WM_T_82545_3:
3976 case WM_T_82546:
3977 case WM_T_82546_3:
3978 delay(5*1000);
3979 /* XXX Disable HW ARPs on ASF enabled adapters */
3980 break;
3981 case WM_T_82541:
3982 case WM_T_82541_2:
3983 case WM_T_82547:
3984 case WM_T_82547_2:
3985 delay(20000);
3986 /* XXX Disable HW ARPs on ASF enabled adapters */
3987 break;
3988 case WM_T_82571:
3989 case WM_T_82572:
3990 case WM_T_82573:
3991 case WM_T_82574:
3992 case WM_T_82583:
3993 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3994 delay(10);
3995 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3996 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3997 CSR_WRITE_FLUSH(sc);
3998 }
3999 /* check EECD_EE_AUTORD */
4000 wm_get_auto_rd_done(sc);
4001 /*
4002 * Phy configuration from NVM just starts after EECD_AUTO_RD
4003 * is set.
4004 */
4005 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4006 || (sc->sc_type == WM_T_82583))
4007 delay(25*1000);
4008 break;
4009 case WM_T_82575:
4010 case WM_T_82576:
4011 case WM_T_82580:
4012 case WM_T_I350:
4013 case WM_T_I354:
4014 case WM_T_I210:
4015 case WM_T_I211:
4016 case WM_T_80003:
4017 /* check EECD_EE_AUTORD */
4018 wm_get_auto_rd_done(sc);
4019 break;
4020 case WM_T_ICH8:
4021 case WM_T_ICH9:
4022 case WM_T_ICH10:
4023 case WM_T_PCH:
4024 case WM_T_PCH2:
4025 case WM_T_PCH_LPT:
4026 break;
4027 default:
4028 panic("%s: unknown type\n", __func__);
4029 }
4030
4031 /* Check whether EEPROM is present or not */
4032 switch (sc->sc_type) {
4033 case WM_T_82575:
4034 case WM_T_82576:
4035 case WM_T_82580:
4036 case WM_T_I350:
4037 case WM_T_I354:
4038 case WM_T_ICH8:
4039 case WM_T_ICH9:
4040 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4041 /* Not found */
4042 sc->sc_flags |= WM_F_EEPROM_INVALID;
4043 if (sc->sc_type == WM_T_82575)
4044 wm_reset_init_script_82575(sc);
4045 }
4046 break;
4047 default:
4048 break;
4049 }
4050
4051 if ((sc->sc_type == WM_T_82580)
4052 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4053 /* clear global device reset status bit */
4054 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4055 }
4056
4057 /* Clear any pending interrupt events. */
4058 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4059 reg = CSR_READ(sc, WMREG_ICR);
4060 if (sc->sc_nintrs > 1) {
4061 if (sc->sc_type != WM_T_82574) {
4062 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4063 CSR_WRITE(sc, WMREG_EIAC, 0);
4064 } else
4065 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4066 }
4067
4068 /* reload sc_ctrl */
4069 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4070
4071 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4072 wm_set_eee_i350(sc);
4073
4074 /* dummy read from WUC */
4075 if (sc->sc_type == WM_T_PCH)
4076 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4077 /*
4078 * For PCH, this write will make sure that any noise will be detected
4079 * as a CRC error and be dropped rather than show up as a bad packet
4080 * to the DMA engine
4081 */
4082 if (sc->sc_type == WM_T_PCH)
4083 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4084
4085 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4086 CSR_WRITE(sc, WMREG_WUC, 0);
4087
4088 wm_reset_mdicnfg_82580(sc);
4089
4090 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4091 wm_pll_workaround_i210(sc);
4092 }
4093
4094 /*
4095 * wm_add_rxbuf:
4096 *
4097 * Add a receive buffer to the indiciated descriptor.
4098 */
4099 static int
4100 wm_add_rxbuf(struct wm_softc *sc, int idx)
4101 {
4102 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4103 struct mbuf *m;
4104 int error;
4105
4106 KASSERT(WM_RX_LOCKED(sc));
4107
4108 MGETHDR(m, M_DONTWAIT, MT_DATA);
4109 if (m == NULL)
4110 return ENOBUFS;
4111
4112 MCLGET(m, M_DONTWAIT);
4113 if ((m->m_flags & M_EXT) == 0) {
4114 m_freem(m);
4115 return ENOBUFS;
4116 }
4117
4118 if (rxs->rxs_mbuf != NULL)
4119 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4120
4121 rxs->rxs_mbuf = m;
4122
4123 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4124 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4125 BUS_DMA_READ|BUS_DMA_NOWAIT);
4126 if (error) {
4127 /* XXX XXX XXX */
4128 aprint_error_dev(sc->sc_dev,
4129 "unable to load rx DMA map %d, error = %d\n",
4130 idx, error);
4131 panic("wm_add_rxbuf");
4132 }
4133
4134 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4135 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4136
4137 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4138 if ((sc->sc_rctl & RCTL_EN) != 0)
4139 WM_INIT_RXDESC(sc, idx);
4140 } else
4141 WM_INIT_RXDESC(sc, idx);
4142
4143 return 0;
4144 }
4145
4146 /*
4147 * wm_rxdrain:
4148 *
4149 * Drain the receive queue.
4150 */
4151 static void
4152 wm_rxdrain(struct wm_softc *sc)
4153 {
4154 struct wm_rxsoft *rxs;
4155 int i;
4156
4157 KASSERT(WM_RX_LOCKED(sc));
4158
4159 for (i = 0; i < WM_NRXDESC; i++) {
4160 rxs = &sc->sc_rxsoft[i];
4161 if (rxs->rxs_mbuf != NULL) {
4162 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4163 m_freem(rxs->rxs_mbuf);
4164 rxs->rxs_mbuf = NULL;
4165 }
4166 }
4167 }
4168
4169 /*
4170 * wm_init: [ifnet interface function]
4171 *
4172 * Initialize the interface.
4173 */
4174 static int
4175 wm_init(struct ifnet *ifp)
4176 {
4177 struct wm_softc *sc = ifp->if_softc;
4178 int ret;
4179
4180 WM_BOTH_LOCK(sc);
4181 ret = wm_init_locked(ifp);
4182 WM_BOTH_UNLOCK(sc);
4183
4184 return ret;
4185 }
4186
4187 static int
4188 wm_init_locked(struct ifnet *ifp)
4189 {
4190 struct wm_softc *sc = ifp->if_softc;
4191 struct wm_rxsoft *rxs;
4192 int i, j, trynum, error = 0;
4193 uint32_t reg;
4194
4195 KASSERT(WM_BOTH_LOCKED(sc));
4196 /*
4197 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4198 * There is a small but measurable benefit to avoiding the adjusment
4199 * of the descriptor so that the headers are aligned, for normal mtu,
4200 * on such platforms. One possibility is that the DMA itself is
4201 * slightly more efficient if the front of the entire packet (instead
4202 * of the front of the headers) is aligned.
4203 *
4204 * Note we must always set align_tweak to 0 if we are using
4205 * jumbo frames.
4206 */
4207 #ifdef __NO_STRICT_ALIGNMENT
4208 sc->sc_align_tweak = 0;
4209 #else
4210 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4211 sc->sc_align_tweak = 0;
4212 else
4213 sc->sc_align_tweak = 2;
4214 #endif /* __NO_STRICT_ALIGNMENT */
4215
4216 /* Cancel any pending I/O. */
4217 wm_stop_locked(ifp, 0);
4218
4219 /* update statistics before reset */
4220 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4221 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4222
4223 /* Reset the chip to a known state. */
4224 wm_reset(sc);
4225
4226 switch (sc->sc_type) {
4227 case WM_T_82571:
4228 case WM_T_82572:
4229 case WM_T_82573:
4230 case WM_T_82574:
4231 case WM_T_82583:
4232 case WM_T_80003:
4233 case WM_T_ICH8:
4234 case WM_T_ICH9:
4235 case WM_T_ICH10:
4236 case WM_T_PCH:
4237 case WM_T_PCH2:
4238 case WM_T_PCH_LPT:
4239 if (wm_check_mng_mode(sc) != 0)
4240 wm_get_hw_control(sc);
4241 break;
4242 default:
4243 break;
4244 }
4245
4246 /* Init hardware bits */
4247 wm_initialize_hardware_bits(sc);
4248
4249 /* Reset the PHY. */
4250 if (sc->sc_flags & WM_F_HAS_MII)
4251 wm_gmii_reset(sc);
4252
4253 /* Calculate (E)ITR value */
4254 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4255 sc->sc_itr = 450; /* For EITR */
4256 } else if (sc->sc_type >= WM_T_82543) {
4257 /*
4258 * Set up the interrupt throttling register (units of 256ns)
4259 * Note that a footnote in Intel's documentation says this
4260 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4261 * or 10Mbit mode. Empirically, it appears to be the case
4262 * that that is also true for the 1024ns units of the other
4263 * interrupt-related timer registers -- so, really, we ought
4264 * to divide this value by 4 when the link speed is low.
4265 *
4266 * XXX implement this division at link speed change!
4267 */
4268
4269 /*
4270 * For N interrupts/sec, set this value to:
4271 * 1000000000 / (N * 256). Note that we set the
4272 * absolute and packet timer values to this value
4273 * divided by 4 to get "simple timer" behavior.
4274 */
4275
4276 sc->sc_itr = 1500; /* 2604 ints/sec */
4277 }
4278
4279 /* Initialize the transmit descriptor ring. */
4280 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4281 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4282 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4283 sc->sc_txfree = WM_NTXDESC(sc);
4284 sc->sc_txnext = 0;
4285
4286 if (sc->sc_type < WM_T_82543) {
4287 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4288 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4289 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4290 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4291 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4292 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4293 } else {
4294 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4295 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4296 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4297 CSR_WRITE(sc, WMREG_TDH, 0);
4298
4299 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4300 /*
4301 * Don't write TDT before TCTL.EN is set.
4302 * See the document.
4303 */
4304 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4305 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4306 | TXDCTL_WTHRESH(0));
4307 else {
4308 /* ITR / 4 */
4309 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4310 if (sc->sc_type >= WM_T_82540) {
4311 /* should be same */
4312 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4313 }
4314
4315 CSR_WRITE(sc, WMREG_TDT, 0);
4316 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4317 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4318 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4319 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4320 }
4321 }
4322
4323 /* Initialize the transmit job descriptors. */
4324 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4325 sc->sc_txsoft[i].txs_mbuf = NULL;
4326 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4327 sc->sc_txsnext = 0;
4328 sc->sc_txsdirty = 0;
4329
4330 /*
4331 * Initialize the receive descriptor and receive job
4332 * descriptor rings.
4333 */
4334 if (sc->sc_type < WM_T_82543) {
4335 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4336 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4337 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4338 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4339 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4340 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4341
4342 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4343 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4344 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4345 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4346 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4347 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4348 } else {
4349 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4350 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4351 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4352
4353 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4354 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4355 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4356 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4357 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4358 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4359 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4360 | RXDCTL_WTHRESH(1));
4361 } else {
4362 CSR_WRITE(sc, WMREG_RDH, 0);
4363 CSR_WRITE(sc, WMREG_RDT, 0);
4364 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4365 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4366 }
4367 }
4368 for (i = 0; i < WM_NRXDESC; i++) {
4369 rxs = &sc->sc_rxsoft[i];
4370 if (rxs->rxs_mbuf == NULL) {
4371 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4372 log(LOG_ERR, "%s: unable to allocate or map "
4373 "rx buffer %d, error = %d\n",
4374 device_xname(sc->sc_dev), i, error);
4375 /*
4376 * XXX Should attempt to run with fewer receive
4377 * XXX buffers instead of just failing.
4378 */
4379 wm_rxdrain(sc);
4380 goto out;
4381 }
4382 } else {
4383 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4384 WM_INIT_RXDESC(sc, i);
4385 /*
4386 * For 82575 and newer device, the RX descriptors
4387 * must be initialized after the setting of RCTL.EN in
4388 * wm_set_filter()
4389 */
4390 }
4391 }
4392 sc->sc_rxptr = 0;
4393 sc->sc_rxdiscard = 0;
4394 WM_RXCHAIN_RESET(sc);
4395
4396 /*
4397 * Clear out the VLAN table -- we don't use it (yet).
4398 */
4399 CSR_WRITE(sc, WMREG_VET, 0);
4400 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4401 trynum = 10; /* Due to hw errata */
4402 else
4403 trynum = 1;
4404 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4405 for (j = 0; j < trynum; j++)
4406 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4407
4408 /*
4409 * Set up flow-control parameters.
4410 *
4411 * XXX Values could probably stand some tuning.
4412 */
4413 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4414 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4415 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4416 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4417 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4418 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4419 }
4420
4421 sc->sc_fcrtl = FCRTL_DFLT;
4422 if (sc->sc_type < WM_T_82543) {
4423 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4424 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4425 } else {
4426 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4427 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4428 }
4429
4430 if (sc->sc_type == WM_T_80003)
4431 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4432 else
4433 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4434
4435 /* Writes the control register. */
4436 wm_set_vlan(sc);
4437
4438 if (sc->sc_flags & WM_F_HAS_MII) {
4439 int val;
4440
4441 switch (sc->sc_type) {
4442 case WM_T_80003:
4443 case WM_T_ICH8:
4444 case WM_T_ICH9:
4445 case WM_T_ICH10:
4446 case WM_T_PCH:
4447 case WM_T_PCH2:
4448 case WM_T_PCH_LPT:
4449 /*
4450 * Set the mac to wait the maximum time between each
4451 * iteration and increase the max iterations when
4452 * polling the phy; this fixes erroneous timeouts at
4453 * 10Mbps.
4454 */
4455 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4456 0xFFFF);
4457 val = wm_kmrn_readreg(sc,
4458 KUMCTRLSTA_OFFSET_INB_PARAM);
4459 val |= 0x3F;
4460 wm_kmrn_writereg(sc,
4461 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4462 break;
4463 default:
4464 break;
4465 }
4466
4467 if (sc->sc_type == WM_T_80003) {
4468 val = CSR_READ(sc, WMREG_CTRL_EXT);
4469 val &= ~CTRL_EXT_LINK_MODE_MASK;
4470 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4471
4472 /* Bypass RX and TX FIFO's */
4473 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4474 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4475 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4476 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4477 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4478 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4479 }
4480 }
4481 #if 0
4482 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4483 #endif
4484
4485 /* Set up checksum offload parameters. */
4486 reg = CSR_READ(sc, WMREG_RXCSUM);
4487 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4488 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4489 reg |= RXCSUM_IPOFL;
4490 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4491 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4492 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4493 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4494 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4495
4496 /* Set up MSI-X */
4497 if (sc->sc_nintrs > 1) {
4498 uint32_t ivar;
4499
4500 if (sc->sc_type == WM_T_82575) {
4501 /* Interrupt control */
4502 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4503 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4504 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4505
4506 /* TX */
4507 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_TXINTR_IDX),
4508 EITR_TX_QUEUE0);
4509 /* RX */
4510 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_RXINTR_IDX),
4511 EITR_RX_QUEUE0);
4512 /* Link status */
4513 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_LINKINTR_IDX),
4514 EITR_OTHER);
4515 } else if (sc->sc_type == WM_T_82574) {
4516 /* Interrupt control */
4517 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4518 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4519 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4520
4521 /* TX, RX and Link status */
4522 ivar = __SHIFTIN((IVAR_VALID_82574|WM_MSIX_TXINTR_IDX),
4523 IVAR_TX_MASK_Q_82574(0));
4524 ivar |= __SHIFTIN((IVAR_VALID_82574
4525 | WM_MSIX_RXINTR_IDX),
4526 IVAR_RX_MASK_Q_82574(0));
4527 ivar |=__SHIFTIN((IVAR_VALID_82574|WM_MSIX_LINKINTR_IDX),
4528 IVAR_OTHER_MASK);
4529 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4530 } else {
4531 /* Interrupt control */
4532 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4533 | GPIE_MULTI_MSIX | GPIE_EIAME
4534 | GPIE_PBA);
4535
4536 switch (sc->sc_type) {
4537 case WM_T_82580:
4538 case WM_T_I350:
4539 case WM_T_I354:
4540 case WM_T_I210:
4541 case WM_T_I211:
4542 /* TX */
4543 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4544 ivar &= ~IVAR_TX_MASK_Q(0);
4545 ivar |= __SHIFTIN(
4546 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4547 IVAR_TX_MASK_Q(0));
4548 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4549
4550 /* RX */
4551 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4552 ivar &= ~IVAR_RX_MASK_Q(0);
4553 ivar |= __SHIFTIN(
4554 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4555 IVAR_RX_MASK_Q(0));
4556 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4557 break;
4558 case WM_T_82576:
4559 /* TX */
4560 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4561 ivar &= ~IVAR_TX_MASK_Q_82576(0);
4562 ivar |= __SHIFTIN(
4563 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4564 IVAR_TX_MASK_Q_82576(0));
4565 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4566
4567 /* RX */
4568 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4569 ivar &= ~IVAR_RX_MASK_Q_82576(0);
4570 ivar |= __SHIFTIN(
4571 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4572 IVAR_RX_MASK_Q_82576(0));
4573 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4574 break;
4575 default:
4576 break;
4577 }
4578
4579 /* Link status */
4580 ivar = __SHIFTIN((WM_MSIX_LINKINTR_IDX | IVAR_VALID),
4581 IVAR_MISC_OTHER);
4582 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4583 }
4584 }
4585
4586 /* Set up the interrupt registers. */
4587 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4588 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4589 ICR_RXO | ICR_RXT0;
4590 if (sc->sc_nintrs > 1) {
4591 uint32_t mask;
4592 switch (sc->sc_type) {
4593 case WM_T_82574:
4594 CSR_WRITE(sc, WMREG_EIAC_82574,
4595 WMREG_EIAC_82574_MSIX_MASK);
4596 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4597 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4598 break;
4599 default:
4600 if (sc->sc_type == WM_T_82575)
4601 mask = EITR_RX_QUEUE0 |EITR_TX_QUEUE0
4602 | EITR_OTHER;
4603 else
4604 mask = (1 << WM_MSIX_RXINTR_IDX)
4605 | (1 << WM_MSIX_TXINTR_IDX)
4606 | (1 << WM_MSIX_LINKINTR_IDX);
4607 CSR_WRITE(sc, WMREG_EIAC, mask);
4608 CSR_WRITE(sc, WMREG_EIAM, mask);
4609 CSR_WRITE(sc, WMREG_EIMS, mask);
4610 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4611 break;
4612 }
4613 } else
4614 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4615
4616 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4617 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4618 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4619 reg = CSR_READ(sc, WMREG_KABGTXD);
4620 reg |= KABGTXD_BGSQLBIAS;
4621 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4622 }
4623
4624 /* Set up the inter-packet gap. */
4625 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4626
4627 if (sc->sc_type >= WM_T_82543) {
4628 /*
4629 * XXX 82574 has both ITR and EITR. SET EITR when we use
4630 * the multi queue function with MSI-X.
4631 */
4632 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4633 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4634 else
4635 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4636 }
4637
4638 /* Set the VLAN ethernetype. */
4639 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4640
4641 /*
4642 * Set up the transmit control register; we start out with
4643 * a collision distance suitable for FDX, but update it whe
4644 * we resolve the media type.
4645 */
4646 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4647 | TCTL_CT(TX_COLLISION_THRESHOLD)
4648 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4649 if (sc->sc_type >= WM_T_82571)
4650 sc->sc_tctl |= TCTL_MULR;
4651 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4652
4653 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4654 /* Write TDT after TCTL.EN is set. See the document. */
4655 CSR_WRITE(sc, WMREG_TDT, 0);
4656 }
4657
4658 if (sc->sc_type == WM_T_80003) {
4659 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4660 reg &= ~TCTL_EXT_GCEX_MASK;
4661 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4662 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4663 }
4664
4665 /* Set the media. */
4666 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4667 goto out;
4668
4669 /* Configure for OS presence */
4670 wm_init_manageability(sc);
4671
4672 /*
4673 * Set up the receive control register; we actually program
4674 * the register when we set the receive filter. Use multicast
4675 * address offset type 0.
4676 *
4677 * Only the i82544 has the ability to strip the incoming
4678 * CRC, so we don't enable that feature.
4679 */
4680 sc->sc_mchash_type = 0;
4681 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4682 | RCTL_MO(sc->sc_mchash_type);
4683
4684 /*
4685 * The I350 has a bug where it always strips the CRC whether
4686 * asked to or not. So ask for stripped CRC here and cope in rxeof
4687 */
4688 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4689 || (sc->sc_type == WM_T_I210))
4690 sc->sc_rctl |= RCTL_SECRC;
4691
4692 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4693 && (ifp->if_mtu > ETHERMTU)) {
4694 sc->sc_rctl |= RCTL_LPE;
4695 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4696 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4697 }
4698
4699 if (MCLBYTES == 2048) {
4700 sc->sc_rctl |= RCTL_2k;
4701 } else {
4702 if (sc->sc_type >= WM_T_82543) {
4703 switch (MCLBYTES) {
4704 case 4096:
4705 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4706 break;
4707 case 8192:
4708 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4709 break;
4710 case 16384:
4711 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4712 break;
4713 default:
4714 panic("wm_init: MCLBYTES %d unsupported",
4715 MCLBYTES);
4716 break;
4717 }
4718 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4719 }
4720
4721 /* Set the receive filter. */
4722 wm_set_filter(sc);
4723
4724 /* Enable ECC */
4725 switch (sc->sc_type) {
4726 case WM_T_82571:
4727 reg = CSR_READ(sc, WMREG_PBA_ECC);
4728 reg |= PBA_ECC_CORR_EN;
4729 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4730 break;
4731 case WM_T_PCH_LPT:
4732 reg = CSR_READ(sc, WMREG_PBECCSTS);
4733 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4734 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4735
4736 reg = CSR_READ(sc, WMREG_CTRL);
4737 reg |= CTRL_MEHE;
4738 CSR_WRITE(sc, WMREG_CTRL, reg);
4739 break;
4740 default:
4741 break;
4742 }
4743
4744 /* On 575 and later set RDT only if RX enabled */
4745 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4746 for (i = 0; i < WM_NRXDESC; i++)
4747 WM_INIT_RXDESC(sc, i);
4748
4749 sc->sc_stopping = false;
4750
4751 /* Start the one second link check clock. */
4752 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4753
4754 /* ...all done! */
4755 ifp->if_flags |= IFF_RUNNING;
4756 ifp->if_flags &= ~IFF_OACTIVE;
4757
4758 out:
4759 sc->sc_if_flags = ifp->if_flags;
4760 if (error)
4761 log(LOG_ERR, "%s: interface not running\n",
4762 device_xname(sc->sc_dev));
4763 return error;
4764 }
4765
4766 /*
4767 * wm_stop: [ifnet interface function]
4768 *
4769 * Stop transmission on the interface.
4770 */
4771 static void
4772 wm_stop(struct ifnet *ifp, int disable)
4773 {
4774 struct wm_softc *sc = ifp->if_softc;
4775
4776 WM_BOTH_LOCK(sc);
4777 wm_stop_locked(ifp, disable);
4778 WM_BOTH_UNLOCK(sc);
4779 }
4780
4781 static void
4782 wm_stop_locked(struct ifnet *ifp, int disable)
4783 {
4784 struct wm_softc *sc = ifp->if_softc;
4785 struct wm_txsoft *txs;
4786 int i;
4787
4788 KASSERT(WM_BOTH_LOCKED(sc));
4789
4790 sc->sc_stopping = true;
4791
4792 /* Stop the one second clock. */
4793 callout_stop(&sc->sc_tick_ch);
4794
4795 /* Stop the 82547 Tx FIFO stall check timer. */
4796 if (sc->sc_type == WM_T_82547)
4797 callout_stop(&sc->sc_txfifo_ch);
4798
4799 if (sc->sc_flags & WM_F_HAS_MII) {
4800 /* Down the MII. */
4801 mii_down(&sc->sc_mii);
4802 } else {
4803 #if 0
4804 /* Should we clear PHY's status properly? */
4805 wm_reset(sc);
4806 #endif
4807 }
4808
4809 /* Stop the transmit and receive processes. */
4810 CSR_WRITE(sc, WMREG_TCTL, 0);
4811 CSR_WRITE(sc, WMREG_RCTL, 0);
4812 sc->sc_rctl &= ~RCTL_EN;
4813
4814 /*
4815 * Clear the interrupt mask to ensure the device cannot assert its
4816 * interrupt line.
4817 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
4818 * service any currently pending or shared interrupt.
4819 */
4820 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4821 sc->sc_icr = 0;
4822 if (sc->sc_nintrs > 1) {
4823 if (sc->sc_type != WM_T_82574) {
4824 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4825 CSR_WRITE(sc, WMREG_EIAC, 0);
4826 } else
4827 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4828 }
4829
4830 /* Release any queued transmit buffers. */
4831 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4832 txs = &sc->sc_txsoft[i];
4833 if (txs->txs_mbuf != NULL) {
4834 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4835 m_freem(txs->txs_mbuf);
4836 txs->txs_mbuf = NULL;
4837 }
4838 }
4839
4840 /* Mark the interface as down and cancel the watchdog timer. */
4841 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4842 ifp->if_timer = 0;
4843
4844 if (disable)
4845 wm_rxdrain(sc);
4846
4847 #if 0 /* notyet */
4848 if (sc->sc_type >= WM_T_82544)
4849 CSR_WRITE(sc, WMREG_WUC, 0);
4850 #endif
4851 }
4852
4853 /*
4854 * wm_tx_offload:
4855 *
4856 * Set up TCP/IP checksumming parameters for the
4857 * specified packet.
4858 */
4859 static int
4860 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4861 uint8_t *fieldsp)
4862 {
4863 struct mbuf *m0 = txs->txs_mbuf;
4864 struct livengood_tcpip_ctxdesc *t;
4865 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4866 uint32_t ipcse;
4867 struct ether_header *eh;
4868 int offset, iphl;
4869 uint8_t fields;
4870
4871 /*
4872 * XXX It would be nice if the mbuf pkthdr had offset
4873 * fields for the protocol headers.
4874 */
4875
4876 eh = mtod(m0, struct ether_header *);
4877 switch (htons(eh->ether_type)) {
4878 case ETHERTYPE_IP:
4879 case ETHERTYPE_IPV6:
4880 offset = ETHER_HDR_LEN;
4881 break;
4882
4883 case ETHERTYPE_VLAN:
4884 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4885 break;
4886
4887 default:
4888 /*
4889 * Don't support this protocol or encapsulation.
4890 */
4891 *fieldsp = 0;
4892 *cmdp = 0;
4893 return 0;
4894 }
4895
4896 if ((m0->m_pkthdr.csum_flags &
4897 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4898 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4899 } else {
4900 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4901 }
4902 ipcse = offset + iphl - 1;
4903
4904 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4905 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4906 seg = 0;
4907 fields = 0;
4908
4909 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4910 int hlen = offset + iphl;
4911 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4912
4913 if (__predict_false(m0->m_len <
4914 (hlen + sizeof(struct tcphdr)))) {
4915 /*
4916 * TCP/IP headers are not in the first mbuf; we need
4917 * to do this the slow and painful way. Let's just
4918 * hope this doesn't happen very often.
4919 */
4920 struct tcphdr th;
4921
4922 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4923
4924 m_copydata(m0, hlen, sizeof(th), &th);
4925 if (v4) {
4926 struct ip ip;
4927
4928 m_copydata(m0, offset, sizeof(ip), &ip);
4929 ip.ip_len = 0;
4930 m_copyback(m0,
4931 offset + offsetof(struct ip, ip_len),
4932 sizeof(ip.ip_len), &ip.ip_len);
4933 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4934 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4935 } else {
4936 struct ip6_hdr ip6;
4937
4938 m_copydata(m0, offset, sizeof(ip6), &ip6);
4939 ip6.ip6_plen = 0;
4940 m_copyback(m0,
4941 offset + offsetof(struct ip6_hdr, ip6_plen),
4942 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4943 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4944 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4945 }
4946 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4947 sizeof(th.th_sum), &th.th_sum);
4948
4949 hlen += th.th_off << 2;
4950 } else {
4951 /*
4952 * TCP/IP headers are in the first mbuf; we can do
4953 * this the easy way.
4954 */
4955 struct tcphdr *th;
4956
4957 if (v4) {
4958 struct ip *ip =
4959 (void *)(mtod(m0, char *) + offset);
4960 th = (void *)(mtod(m0, char *) + hlen);
4961
4962 ip->ip_len = 0;
4963 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4964 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4965 } else {
4966 struct ip6_hdr *ip6 =
4967 (void *)(mtod(m0, char *) + offset);
4968 th = (void *)(mtod(m0, char *) + hlen);
4969
4970 ip6->ip6_plen = 0;
4971 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4972 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4973 }
4974 hlen += th->th_off << 2;
4975 }
4976
4977 if (v4) {
4978 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4979 cmdlen |= WTX_TCPIP_CMD_IP;
4980 } else {
4981 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4982 ipcse = 0;
4983 }
4984 cmd |= WTX_TCPIP_CMD_TSE;
4985 cmdlen |= WTX_TCPIP_CMD_TSE |
4986 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4987 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4988 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4989 }
4990
4991 /*
4992 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4993 * offload feature, if we load the context descriptor, we
4994 * MUST provide valid values for IPCSS and TUCSS fields.
4995 */
4996
4997 ipcs = WTX_TCPIP_IPCSS(offset) |
4998 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4999 WTX_TCPIP_IPCSE(ipcse);
5000 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
5001 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5002 fields |= WTX_IXSM;
5003 }
5004
5005 offset += iphl;
5006
5007 if (m0->m_pkthdr.csum_flags &
5008 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5009 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5010 fields |= WTX_TXSM;
5011 tucs = WTX_TCPIP_TUCSS(offset) |
5012 WTX_TCPIP_TUCSO(offset +
5013 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5014 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5015 } else if ((m0->m_pkthdr.csum_flags &
5016 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5017 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5018 fields |= WTX_TXSM;
5019 tucs = WTX_TCPIP_TUCSS(offset) |
5020 WTX_TCPIP_TUCSO(offset +
5021 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5022 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5023 } else {
5024 /* Just initialize it to a valid TCP context. */
5025 tucs = WTX_TCPIP_TUCSS(offset) |
5026 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5027 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5028 }
5029
5030 /* Fill in the context descriptor. */
5031 t = (struct livengood_tcpip_ctxdesc *)
5032 &sc->sc_txdescs[sc->sc_txnext];
5033 t->tcpip_ipcs = htole32(ipcs);
5034 t->tcpip_tucs = htole32(tucs);
5035 t->tcpip_cmdlen = htole32(cmdlen);
5036 t->tcpip_seg = htole32(seg);
5037 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5038
5039 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5040 txs->txs_ndesc++;
5041
5042 *cmdp = cmd;
5043 *fieldsp = fields;
5044
5045 return 0;
5046 }
5047
5048 static void
5049 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5050 {
5051 struct mbuf *m;
5052 int i;
5053
5054 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5055 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5056 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5057 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5058 m->m_data, m->m_len, m->m_flags);
5059 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5060 i, i == 1 ? "" : "s");
5061 }
5062
5063 /*
5064 * wm_82547_txfifo_stall:
5065 *
5066 * Callout used to wait for the 82547 Tx FIFO to drain,
5067 * reset the FIFO pointers, and restart packet transmission.
5068 */
5069 static void
5070 wm_82547_txfifo_stall(void *arg)
5071 {
5072 struct wm_softc *sc = arg;
5073 #ifndef WM_MPSAFE
5074 int s;
5075
5076 s = splnet();
5077 #endif
5078 WM_TX_LOCK(sc);
5079
5080 if (sc->sc_stopping)
5081 goto out;
5082
5083 if (sc->sc_txfifo_stall) {
5084 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
5085 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5086 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5087 /*
5088 * Packets have drained. Stop transmitter, reset
5089 * FIFO pointers, restart transmitter, and kick
5090 * the packet queue.
5091 */
5092 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5093 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5094 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
5095 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
5096 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
5097 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
5098 CSR_WRITE(sc, WMREG_TCTL, tctl);
5099 CSR_WRITE_FLUSH(sc);
5100
5101 sc->sc_txfifo_head = 0;
5102 sc->sc_txfifo_stall = 0;
5103 wm_start_locked(&sc->sc_ethercom.ec_if);
5104 } else {
5105 /*
5106 * Still waiting for packets to drain; try again in
5107 * another tick.
5108 */
5109 callout_schedule(&sc->sc_txfifo_ch, 1);
5110 }
5111 }
5112
5113 out:
5114 WM_TX_UNLOCK(sc);
5115 #ifndef WM_MPSAFE
5116 splx(s);
5117 #endif
5118 }
5119
5120 /*
5121 * wm_82547_txfifo_bugchk:
5122 *
5123 * Check for bug condition in the 82547 Tx FIFO. We need to
5124 * prevent enqueueing a packet that would wrap around the end
5125 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5126 *
5127 * We do this by checking the amount of space before the end
5128 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5129 * the Tx FIFO, wait for all remaining packets to drain, reset
5130 * the internal FIFO pointers to the beginning, and restart
5131 * transmission on the interface.
5132 */
5133 #define WM_FIFO_HDR 0x10
5134 #define WM_82547_PAD_LEN 0x3e0
5135 static int
5136 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5137 {
5138 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
5139 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5140
5141 /* Just return if already stalled. */
5142 if (sc->sc_txfifo_stall)
5143 return 1;
5144
5145 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5146 /* Stall only occurs in half-duplex mode. */
5147 goto send_packet;
5148 }
5149
5150 if (len >= WM_82547_PAD_LEN + space) {
5151 sc->sc_txfifo_stall = 1;
5152 callout_schedule(&sc->sc_txfifo_ch, 1);
5153 return 1;
5154 }
5155
5156 send_packet:
5157 sc->sc_txfifo_head += len;
5158 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
5159 sc->sc_txfifo_head -= sc->sc_txfifo_size;
5160
5161 return 0;
5162 }
5163
5164 /*
5165 * wm_start: [ifnet interface function]
5166 *
5167 * Start packet transmission on the interface.
5168 */
5169 static void
5170 wm_start(struct ifnet *ifp)
5171 {
5172 struct wm_softc *sc = ifp->if_softc;
5173
5174 WM_TX_LOCK(sc);
5175 if (!sc->sc_stopping)
5176 wm_start_locked(ifp);
5177 WM_TX_UNLOCK(sc);
5178 }
5179
5180 static void
5181 wm_start_locked(struct ifnet *ifp)
5182 {
5183 struct wm_softc *sc = ifp->if_softc;
5184 struct mbuf *m0;
5185 struct m_tag *mtag;
5186 struct wm_txsoft *txs;
5187 bus_dmamap_t dmamap;
5188 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
5189 bus_addr_t curaddr;
5190 bus_size_t seglen, curlen;
5191 uint32_t cksumcmd;
5192 uint8_t cksumfields;
5193
5194 KASSERT(WM_TX_LOCKED(sc));
5195
5196 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5197 return;
5198
5199 /* Remember the previous number of free descriptors. */
5200 ofree = sc->sc_txfree;
5201
5202 /*
5203 * Loop through the send queue, setting up transmit descriptors
5204 * until we drain the queue, or use up all available transmit
5205 * descriptors.
5206 */
5207 for (;;) {
5208 m0 = NULL;
5209
5210 /* Get a work queue entry. */
5211 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5212 wm_txeof(sc);
5213 if (sc->sc_txsfree == 0) {
5214 DPRINTF(WM_DEBUG_TX,
5215 ("%s: TX: no free job descriptors\n",
5216 device_xname(sc->sc_dev)));
5217 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5218 break;
5219 }
5220 }
5221
5222 /* Grab a packet off the queue. */
5223 IFQ_DEQUEUE(&ifp->if_snd, m0);
5224 if (m0 == NULL)
5225 break;
5226
5227 DPRINTF(WM_DEBUG_TX,
5228 ("%s: TX: have packet to transmit: %p\n",
5229 device_xname(sc->sc_dev), m0));
5230
5231 txs = &sc->sc_txsoft[sc->sc_txsnext];
5232 dmamap = txs->txs_dmamap;
5233
5234 use_tso = (m0->m_pkthdr.csum_flags &
5235 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
5236
5237 /*
5238 * So says the Linux driver:
5239 * The controller does a simple calculation to make sure
5240 * there is enough room in the FIFO before initiating the
5241 * DMA for each buffer. The calc is:
5242 * 4 = ceil(buffer len / MSS)
5243 * To make sure we don't overrun the FIFO, adjust the max
5244 * buffer len if the MSS drops.
5245 */
5246 dmamap->dm_maxsegsz =
5247 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
5248 ? m0->m_pkthdr.segsz << 2
5249 : WTX_MAX_LEN;
5250
5251 /*
5252 * Load the DMA map. If this fails, the packet either
5253 * didn't fit in the allotted number of segments, or we
5254 * were short on resources. For the too-many-segments
5255 * case, we simply report an error and drop the packet,
5256 * since we can't sanely copy a jumbo packet to a single
5257 * buffer.
5258 */
5259 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5260 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5261 if (error) {
5262 if (error == EFBIG) {
5263 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5264 log(LOG_ERR, "%s: Tx packet consumes too many "
5265 "DMA segments, dropping...\n",
5266 device_xname(sc->sc_dev));
5267 wm_dump_mbuf_chain(sc, m0);
5268 m_freem(m0);
5269 continue;
5270 }
5271 /* Short on resources, just stop for now. */
5272 DPRINTF(WM_DEBUG_TX,
5273 ("%s: TX: dmamap load failed: %d\n",
5274 device_xname(sc->sc_dev), error));
5275 break;
5276 }
5277
5278 segs_needed = dmamap->dm_nsegs;
5279 if (use_tso) {
5280 /* For sentinel descriptor; see below. */
5281 segs_needed++;
5282 }
5283
5284 /*
5285 * Ensure we have enough descriptors free to describe
5286 * the packet. Note, we always reserve one descriptor
5287 * at the end of the ring due to the semantics of the
5288 * TDT register, plus one more in the event we need
5289 * to load offload context.
5290 */
5291 if (segs_needed > sc->sc_txfree - 2) {
5292 /*
5293 * Not enough free descriptors to transmit this
5294 * packet. We haven't committed anything yet,
5295 * so just unload the DMA map, put the packet
5296 * pack on the queue, and punt. Notify the upper
5297 * layer that there are no more slots left.
5298 */
5299 DPRINTF(WM_DEBUG_TX,
5300 ("%s: TX: need %d (%d) descriptors, have %d\n",
5301 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5302 segs_needed, sc->sc_txfree - 1));
5303 ifp->if_flags |= IFF_OACTIVE;
5304 bus_dmamap_unload(sc->sc_dmat, dmamap);
5305 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5306 break;
5307 }
5308
5309 /*
5310 * Check for 82547 Tx FIFO bug. We need to do this
5311 * once we know we can transmit the packet, since we
5312 * do some internal FIFO space accounting here.
5313 */
5314 if (sc->sc_type == WM_T_82547 &&
5315 wm_82547_txfifo_bugchk(sc, m0)) {
5316 DPRINTF(WM_DEBUG_TX,
5317 ("%s: TX: 82547 Tx FIFO bug detected\n",
5318 device_xname(sc->sc_dev)));
5319 ifp->if_flags |= IFF_OACTIVE;
5320 bus_dmamap_unload(sc->sc_dmat, dmamap);
5321 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
5322 break;
5323 }
5324
5325 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5326
5327 DPRINTF(WM_DEBUG_TX,
5328 ("%s: TX: packet has %d (%d) DMA segments\n",
5329 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5330
5331 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5332
5333 /*
5334 * Store a pointer to the packet so that we can free it
5335 * later.
5336 *
5337 * Initially, we consider the number of descriptors the
5338 * packet uses the number of DMA segments. This may be
5339 * incremented by 1 if we do checksum offload (a descriptor
5340 * is used to set the checksum context).
5341 */
5342 txs->txs_mbuf = m0;
5343 txs->txs_firstdesc = sc->sc_txnext;
5344 txs->txs_ndesc = segs_needed;
5345
5346 /* Set up offload parameters for this packet. */
5347 if (m0->m_pkthdr.csum_flags &
5348 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5349 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5350 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5351 if (wm_tx_offload(sc, txs, &cksumcmd,
5352 &cksumfields) != 0) {
5353 /* Error message already displayed. */
5354 bus_dmamap_unload(sc->sc_dmat, dmamap);
5355 continue;
5356 }
5357 } else {
5358 cksumcmd = 0;
5359 cksumfields = 0;
5360 }
5361
5362 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5363
5364 /* Sync the DMA map. */
5365 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5366 BUS_DMASYNC_PREWRITE);
5367
5368 /* Initialize the transmit descriptor. */
5369 for (nexttx = sc->sc_txnext, seg = 0;
5370 seg < dmamap->dm_nsegs; seg++) {
5371 for (seglen = dmamap->dm_segs[seg].ds_len,
5372 curaddr = dmamap->dm_segs[seg].ds_addr;
5373 seglen != 0;
5374 curaddr += curlen, seglen -= curlen,
5375 nexttx = WM_NEXTTX(sc, nexttx)) {
5376 curlen = seglen;
5377
5378 /*
5379 * So says the Linux driver:
5380 * Work around for premature descriptor
5381 * write-backs in TSO mode. Append a
5382 * 4-byte sentinel descriptor.
5383 */
5384 if (use_tso &&
5385 seg == dmamap->dm_nsegs - 1 &&
5386 curlen > 8)
5387 curlen -= 4;
5388
5389 wm_set_dma_addr(
5390 &sc->sc_txdescs[nexttx].wtx_addr,
5391 curaddr);
5392 sc->sc_txdescs[nexttx].wtx_cmdlen =
5393 htole32(cksumcmd | curlen);
5394 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5395 0;
5396 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5397 cksumfields;
5398 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5399 lasttx = nexttx;
5400
5401 DPRINTF(WM_DEBUG_TX,
5402 ("%s: TX: desc %d: low %#" PRIx64 ", "
5403 "len %#04zx\n",
5404 device_xname(sc->sc_dev), nexttx,
5405 (uint64_t)curaddr, curlen));
5406 }
5407 }
5408
5409 KASSERT(lasttx != -1);
5410
5411 /*
5412 * Set up the command byte on the last descriptor of
5413 * the packet. If we're in the interrupt delay window,
5414 * delay the interrupt.
5415 */
5416 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5417 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5418
5419 /*
5420 * If VLANs are enabled and the packet has a VLAN tag, set
5421 * up the descriptor to encapsulate the packet for us.
5422 *
5423 * This is only valid on the last descriptor of the packet.
5424 */
5425 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5426 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5427 htole32(WTX_CMD_VLE);
5428 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5429 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5430 }
5431
5432 txs->txs_lastdesc = lasttx;
5433
5434 DPRINTF(WM_DEBUG_TX,
5435 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5436 device_xname(sc->sc_dev),
5437 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5438
5439 /* Sync the descriptors we're using. */
5440 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5441 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5442
5443 /* Give the packet to the chip. */
5444 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5445
5446 DPRINTF(WM_DEBUG_TX,
5447 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5448
5449 DPRINTF(WM_DEBUG_TX,
5450 ("%s: TX: finished transmitting packet, job %d\n",
5451 device_xname(sc->sc_dev), sc->sc_txsnext));
5452
5453 /* Advance the tx pointer. */
5454 sc->sc_txfree -= txs->txs_ndesc;
5455 sc->sc_txnext = nexttx;
5456
5457 sc->sc_txsfree--;
5458 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5459
5460 /* Pass the packet to any BPF listeners. */
5461 bpf_mtap(ifp, m0);
5462 }
5463
5464 if (m0 != NULL) {
5465 ifp->if_flags |= IFF_OACTIVE;
5466 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5467 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5468 m_freem(m0);
5469 }
5470
5471 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5472 /* No more slots; notify upper layer. */
5473 ifp->if_flags |= IFF_OACTIVE;
5474 }
5475
5476 if (sc->sc_txfree != ofree) {
5477 /* Set a watchdog timer in case the chip flakes out. */
5478 ifp->if_timer = 5;
5479 }
5480 }
5481
5482 /*
5483 * wm_nq_tx_offload:
5484 *
5485 * Set up TCP/IP checksumming parameters for the
5486 * specified packet, for NEWQUEUE devices
5487 */
5488 static int
5489 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5490 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5491 {
5492 struct mbuf *m0 = txs->txs_mbuf;
5493 struct m_tag *mtag;
5494 uint32_t vl_len, mssidx, cmdc;
5495 struct ether_header *eh;
5496 int offset, iphl;
5497
5498 /*
5499 * XXX It would be nice if the mbuf pkthdr had offset
5500 * fields for the protocol headers.
5501 */
5502 *cmdlenp = 0;
5503 *fieldsp = 0;
5504
5505 eh = mtod(m0, struct ether_header *);
5506 switch (htons(eh->ether_type)) {
5507 case ETHERTYPE_IP:
5508 case ETHERTYPE_IPV6:
5509 offset = ETHER_HDR_LEN;
5510 break;
5511
5512 case ETHERTYPE_VLAN:
5513 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5514 break;
5515
5516 default:
5517 /* Don't support this protocol or encapsulation. */
5518 *do_csum = false;
5519 return 0;
5520 }
5521 *do_csum = true;
5522 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5523 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5524
5525 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5526 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5527
5528 if ((m0->m_pkthdr.csum_flags &
5529 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5530 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5531 } else {
5532 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5533 }
5534 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5535 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5536
5537 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5538 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5539 << NQTXC_VLLEN_VLAN_SHIFT);
5540 *cmdlenp |= NQTX_CMD_VLE;
5541 }
5542
5543 mssidx = 0;
5544
5545 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5546 int hlen = offset + iphl;
5547 int tcp_hlen;
5548 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5549
5550 if (__predict_false(m0->m_len <
5551 (hlen + sizeof(struct tcphdr)))) {
5552 /*
5553 * TCP/IP headers are not in the first mbuf; we need
5554 * to do this the slow and painful way. Let's just
5555 * hope this doesn't happen very often.
5556 */
5557 struct tcphdr th;
5558
5559 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5560
5561 m_copydata(m0, hlen, sizeof(th), &th);
5562 if (v4) {
5563 struct ip ip;
5564
5565 m_copydata(m0, offset, sizeof(ip), &ip);
5566 ip.ip_len = 0;
5567 m_copyback(m0,
5568 offset + offsetof(struct ip, ip_len),
5569 sizeof(ip.ip_len), &ip.ip_len);
5570 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5571 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5572 } else {
5573 struct ip6_hdr ip6;
5574
5575 m_copydata(m0, offset, sizeof(ip6), &ip6);
5576 ip6.ip6_plen = 0;
5577 m_copyback(m0,
5578 offset + offsetof(struct ip6_hdr, ip6_plen),
5579 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5580 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5581 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5582 }
5583 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5584 sizeof(th.th_sum), &th.th_sum);
5585
5586 tcp_hlen = th.th_off << 2;
5587 } else {
5588 /*
5589 * TCP/IP headers are in the first mbuf; we can do
5590 * this the easy way.
5591 */
5592 struct tcphdr *th;
5593
5594 if (v4) {
5595 struct ip *ip =
5596 (void *)(mtod(m0, char *) + offset);
5597 th = (void *)(mtod(m0, char *) + hlen);
5598
5599 ip->ip_len = 0;
5600 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5601 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5602 } else {
5603 struct ip6_hdr *ip6 =
5604 (void *)(mtod(m0, char *) + offset);
5605 th = (void *)(mtod(m0, char *) + hlen);
5606
5607 ip6->ip6_plen = 0;
5608 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5609 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5610 }
5611 tcp_hlen = th->th_off << 2;
5612 }
5613 hlen += tcp_hlen;
5614 *cmdlenp |= NQTX_CMD_TSE;
5615
5616 if (v4) {
5617 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5618 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5619 } else {
5620 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5621 *fieldsp |= NQTXD_FIELDS_TUXSM;
5622 }
5623 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5624 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5625 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5626 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5627 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5628 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5629 } else {
5630 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5631 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5632 }
5633
5634 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5635 *fieldsp |= NQTXD_FIELDS_IXSM;
5636 cmdc |= NQTXC_CMD_IP4;
5637 }
5638
5639 if (m0->m_pkthdr.csum_flags &
5640 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5641 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5642 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5643 cmdc |= NQTXC_CMD_TCP;
5644 } else {
5645 cmdc |= NQTXC_CMD_UDP;
5646 }
5647 cmdc |= NQTXC_CMD_IP4;
5648 *fieldsp |= NQTXD_FIELDS_TUXSM;
5649 }
5650 if (m0->m_pkthdr.csum_flags &
5651 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5652 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5653 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5654 cmdc |= NQTXC_CMD_TCP;
5655 } else {
5656 cmdc |= NQTXC_CMD_UDP;
5657 }
5658 cmdc |= NQTXC_CMD_IP6;
5659 *fieldsp |= NQTXD_FIELDS_TUXSM;
5660 }
5661
5662 /* Fill in the context descriptor. */
5663 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5664 htole32(vl_len);
5665 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5666 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5667 htole32(cmdc);
5668 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5669 htole32(mssidx);
5670 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5671 DPRINTF(WM_DEBUG_TX,
5672 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5673 sc->sc_txnext, 0, vl_len));
5674 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5675 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5676 txs->txs_ndesc++;
5677 return 0;
5678 }
5679
5680 /*
5681 * wm_nq_start: [ifnet interface function]
5682 *
5683 * Start packet transmission on the interface for NEWQUEUE devices
5684 */
5685 static void
5686 wm_nq_start(struct ifnet *ifp)
5687 {
5688 struct wm_softc *sc = ifp->if_softc;
5689
5690 WM_TX_LOCK(sc);
5691 if (!sc->sc_stopping)
5692 wm_nq_start_locked(ifp);
5693 WM_TX_UNLOCK(sc);
5694 }
5695
5696 static void
5697 wm_nq_start_locked(struct ifnet *ifp)
5698 {
5699 struct wm_softc *sc = ifp->if_softc;
5700 struct mbuf *m0;
5701 struct m_tag *mtag;
5702 struct wm_txsoft *txs;
5703 bus_dmamap_t dmamap;
5704 int error, nexttx, lasttx = -1, seg, segs_needed;
5705 bool do_csum, sent;
5706
5707 KASSERT(WM_TX_LOCKED(sc));
5708
5709 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5710 return;
5711
5712 sent = false;
5713
5714 /*
5715 * Loop through the send queue, setting up transmit descriptors
5716 * until we drain the queue, or use up all available transmit
5717 * descriptors.
5718 */
5719 for (;;) {
5720 m0 = NULL;
5721
5722 /* Get a work queue entry. */
5723 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5724 wm_txeof(sc);
5725 if (sc->sc_txsfree == 0) {
5726 DPRINTF(WM_DEBUG_TX,
5727 ("%s: TX: no free job descriptors\n",
5728 device_xname(sc->sc_dev)));
5729 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5730 break;
5731 }
5732 }
5733
5734 /* Grab a packet off the queue. */
5735 IFQ_DEQUEUE(&ifp->if_snd, m0);
5736 if (m0 == NULL)
5737 break;
5738
5739 DPRINTF(WM_DEBUG_TX,
5740 ("%s: TX: have packet to transmit: %p\n",
5741 device_xname(sc->sc_dev), m0));
5742
5743 txs = &sc->sc_txsoft[sc->sc_txsnext];
5744 dmamap = txs->txs_dmamap;
5745
5746 /*
5747 * Load the DMA map. If this fails, the packet either
5748 * didn't fit in the allotted number of segments, or we
5749 * were short on resources. For the too-many-segments
5750 * case, we simply report an error and drop the packet,
5751 * since we can't sanely copy a jumbo packet to a single
5752 * buffer.
5753 */
5754 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5755 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5756 if (error) {
5757 if (error == EFBIG) {
5758 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5759 log(LOG_ERR, "%s: Tx packet consumes too many "
5760 "DMA segments, dropping...\n",
5761 device_xname(sc->sc_dev));
5762 wm_dump_mbuf_chain(sc, m0);
5763 m_freem(m0);
5764 continue;
5765 }
5766 /* Short on resources, just stop for now. */
5767 DPRINTF(WM_DEBUG_TX,
5768 ("%s: TX: dmamap load failed: %d\n",
5769 device_xname(sc->sc_dev), error));
5770 break;
5771 }
5772
5773 segs_needed = dmamap->dm_nsegs;
5774
5775 /*
5776 * Ensure we have enough descriptors free to describe
5777 * the packet. Note, we always reserve one descriptor
5778 * at the end of the ring due to the semantics of the
5779 * TDT register, plus one more in the event we need
5780 * to load offload context.
5781 */
5782 if (segs_needed > sc->sc_txfree - 2) {
5783 /*
5784 * Not enough free descriptors to transmit this
5785 * packet. We haven't committed anything yet,
5786 * so just unload the DMA map, put the packet
5787 * pack on the queue, and punt. Notify the upper
5788 * layer that there are no more slots left.
5789 */
5790 DPRINTF(WM_DEBUG_TX,
5791 ("%s: TX: need %d (%d) descriptors, have %d\n",
5792 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5793 segs_needed, sc->sc_txfree - 1));
5794 ifp->if_flags |= IFF_OACTIVE;
5795 bus_dmamap_unload(sc->sc_dmat, dmamap);
5796 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5797 break;
5798 }
5799
5800 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5801
5802 DPRINTF(WM_DEBUG_TX,
5803 ("%s: TX: packet has %d (%d) DMA segments\n",
5804 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5805
5806 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5807
5808 /*
5809 * Store a pointer to the packet so that we can free it
5810 * later.
5811 *
5812 * Initially, we consider the number of descriptors the
5813 * packet uses the number of DMA segments. This may be
5814 * incremented by 1 if we do checksum offload (a descriptor
5815 * is used to set the checksum context).
5816 */
5817 txs->txs_mbuf = m0;
5818 txs->txs_firstdesc = sc->sc_txnext;
5819 txs->txs_ndesc = segs_needed;
5820
5821 /* Set up offload parameters for this packet. */
5822 uint32_t cmdlen, fields, dcmdlen;
5823 if (m0->m_pkthdr.csum_flags &
5824 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5825 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5826 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5827 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5828 &do_csum) != 0) {
5829 /* Error message already displayed. */
5830 bus_dmamap_unload(sc->sc_dmat, dmamap);
5831 continue;
5832 }
5833 } else {
5834 do_csum = false;
5835 cmdlen = 0;
5836 fields = 0;
5837 }
5838
5839 /* Sync the DMA map. */
5840 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5841 BUS_DMASYNC_PREWRITE);
5842
5843 /* Initialize the first transmit descriptor. */
5844 nexttx = sc->sc_txnext;
5845 if (!do_csum) {
5846 /* setup a legacy descriptor */
5847 wm_set_dma_addr(
5848 &sc->sc_txdescs[nexttx].wtx_addr,
5849 dmamap->dm_segs[0].ds_addr);
5850 sc->sc_txdescs[nexttx].wtx_cmdlen =
5851 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5852 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5853 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5854 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5855 NULL) {
5856 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5857 htole32(WTX_CMD_VLE);
5858 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5859 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5860 } else {
5861 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =0;
5862 }
5863 dcmdlen = 0;
5864 } else {
5865 /* setup an advanced data descriptor */
5866 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5867 htole64(dmamap->dm_segs[0].ds_addr);
5868 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5869 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5870 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5871 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5872 htole32(fields);
5873 DPRINTF(WM_DEBUG_TX,
5874 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5875 device_xname(sc->sc_dev), nexttx,
5876 (uint64_t)dmamap->dm_segs[0].ds_addr));
5877 DPRINTF(WM_DEBUG_TX,
5878 ("\t 0x%08x%08x\n", fields,
5879 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5880 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5881 }
5882
5883 lasttx = nexttx;
5884 nexttx = WM_NEXTTX(sc, nexttx);
5885 /*
5886 * fill in the next descriptors. legacy or adcanced format
5887 * is the same here
5888 */
5889 for (seg = 1; seg < dmamap->dm_nsegs;
5890 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5891 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5892 htole64(dmamap->dm_segs[seg].ds_addr);
5893 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5894 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5895 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5896 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5897 lasttx = nexttx;
5898
5899 DPRINTF(WM_DEBUG_TX,
5900 ("%s: TX: desc %d: %#" PRIx64 ", "
5901 "len %#04zx\n",
5902 device_xname(sc->sc_dev), nexttx,
5903 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5904 dmamap->dm_segs[seg].ds_len));
5905 }
5906
5907 KASSERT(lasttx != -1);
5908
5909 /*
5910 * Set up the command byte on the last descriptor of
5911 * the packet. If we're in the interrupt delay window,
5912 * delay the interrupt.
5913 */
5914 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5915 (NQTX_CMD_EOP | NQTX_CMD_RS));
5916 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5917 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5918
5919 txs->txs_lastdesc = lasttx;
5920
5921 DPRINTF(WM_DEBUG_TX,
5922 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5923 device_xname(sc->sc_dev),
5924 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5925
5926 /* Sync the descriptors we're using. */
5927 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5928 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5929
5930 /* Give the packet to the chip. */
5931 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5932 sent = true;
5933
5934 DPRINTF(WM_DEBUG_TX,
5935 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5936
5937 DPRINTF(WM_DEBUG_TX,
5938 ("%s: TX: finished transmitting packet, job %d\n",
5939 device_xname(sc->sc_dev), sc->sc_txsnext));
5940
5941 /* Advance the tx pointer. */
5942 sc->sc_txfree -= txs->txs_ndesc;
5943 sc->sc_txnext = nexttx;
5944
5945 sc->sc_txsfree--;
5946 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5947
5948 /* Pass the packet to any BPF listeners. */
5949 bpf_mtap(ifp, m0);
5950 }
5951
5952 if (m0 != NULL) {
5953 ifp->if_flags |= IFF_OACTIVE;
5954 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5955 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5956 m_freem(m0);
5957 }
5958
5959 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5960 /* No more slots; notify upper layer. */
5961 ifp->if_flags |= IFF_OACTIVE;
5962 }
5963
5964 if (sent) {
5965 /* Set a watchdog timer in case the chip flakes out. */
5966 ifp->if_timer = 5;
5967 }
5968 }
5969
5970 /* Interrupt */
5971
5972 /*
5973 * wm_txeof:
5974 *
5975 * Helper; handle transmit interrupts.
5976 */
5977 static int
5978 wm_txeof(struct wm_softc *sc)
5979 {
5980 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5981 struct wm_txsoft *txs;
5982 bool processed = false;
5983 int count = 0;
5984 int i;
5985 uint8_t status;
5986
5987 if (sc->sc_stopping)
5988 return 0;
5989
5990 ifp->if_flags &= ~IFF_OACTIVE;
5991
5992 /*
5993 * Go through the Tx list and free mbufs for those
5994 * frames which have been transmitted.
5995 */
5996 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5997 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5998 txs = &sc->sc_txsoft[i];
5999
6000 DPRINTF(WM_DEBUG_TX,
6001 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6002
6003 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
6004 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6005
6006 status =
6007 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6008 if ((status & WTX_ST_DD) == 0) {
6009 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
6010 BUS_DMASYNC_PREREAD);
6011 break;
6012 }
6013
6014 processed = true;
6015 count++;
6016 DPRINTF(WM_DEBUG_TX,
6017 ("%s: TX: job %d done: descs %d..%d\n",
6018 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6019 txs->txs_lastdesc));
6020
6021 /*
6022 * XXX We should probably be using the statistics
6023 * XXX registers, but I don't know if they exist
6024 * XXX on chips before the i82544.
6025 */
6026
6027 #ifdef WM_EVENT_COUNTERS
6028 if (status & WTX_ST_TU)
6029 WM_EVCNT_INCR(&sc->sc_ev_tu);
6030 #endif /* WM_EVENT_COUNTERS */
6031
6032 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6033 ifp->if_oerrors++;
6034 if (status & WTX_ST_LC)
6035 log(LOG_WARNING, "%s: late collision\n",
6036 device_xname(sc->sc_dev));
6037 else if (status & WTX_ST_EC) {
6038 ifp->if_collisions += 16;
6039 log(LOG_WARNING, "%s: excessive collisions\n",
6040 device_xname(sc->sc_dev));
6041 }
6042 } else
6043 ifp->if_opackets++;
6044
6045 sc->sc_txfree += txs->txs_ndesc;
6046 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6047 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6048 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6049 m_freem(txs->txs_mbuf);
6050 txs->txs_mbuf = NULL;
6051 }
6052
6053 /* Update the dirty transmit buffer pointer. */
6054 sc->sc_txsdirty = i;
6055 DPRINTF(WM_DEBUG_TX,
6056 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6057
6058 if (count != 0)
6059 rnd_add_uint32(&sc->rnd_source, count);
6060
6061 /*
6062 * If there are no more pending transmissions, cancel the watchdog
6063 * timer.
6064 */
6065 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
6066 ifp->if_timer = 0;
6067
6068 return processed;
6069 }
6070
6071 /*
6072 * wm_rxeof:
6073 *
6074 * Helper; handle receive interrupts.
6075 */
6076 static void
6077 wm_rxeof(struct wm_softc *sc)
6078 {
6079 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6080 struct wm_rxsoft *rxs;
6081 struct mbuf *m;
6082 int i, len;
6083 int count = 0;
6084 uint8_t status, errors;
6085 uint16_t vlantag;
6086
6087 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
6088 rxs = &sc->sc_rxsoft[i];
6089
6090 DPRINTF(WM_DEBUG_RX,
6091 ("%s: RX: checking descriptor %d\n",
6092 device_xname(sc->sc_dev), i));
6093
6094 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6095
6096 status = sc->sc_rxdescs[i].wrx_status;
6097 errors = sc->sc_rxdescs[i].wrx_errors;
6098 len = le16toh(sc->sc_rxdescs[i].wrx_len);
6099 vlantag = sc->sc_rxdescs[i].wrx_special;
6100
6101 if ((status & WRX_ST_DD) == 0) {
6102 /* We have processed all of the receive descriptors. */
6103 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
6104 break;
6105 }
6106
6107 count++;
6108 if (__predict_false(sc->sc_rxdiscard)) {
6109 DPRINTF(WM_DEBUG_RX,
6110 ("%s: RX: discarding contents of descriptor %d\n",
6111 device_xname(sc->sc_dev), i));
6112 WM_INIT_RXDESC(sc, i);
6113 if (status & WRX_ST_EOP) {
6114 /* Reset our state. */
6115 DPRINTF(WM_DEBUG_RX,
6116 ("%s: RX: resetting rxdiscard -> 0\n",
6117 device_xname(sc->sc_dev)));
6118 sc->sc_rxdiscard = 0;
6119 }
6120 continue;
6121 }
6122
6123 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6124 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6125
6126 m = rxs->rxs_mbuf;
6127
6128 /*
6129 * Add a new receive buffer to the ring, unless of
6130 * course the length is zero. Treat the latter as a
6131 * failed mapping.
6132 */
6133 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
6134 /*
6135 * Failed, throw away what we've done so
6136 * far, and discard the rest of the packet.
6137 */
6138 ifp->if_ierrors++;
6139 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6140 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6141 WM_INIT_RXDESC(sc, i);
6142 if ((status & WRX_ST_EOP) == 0)
6143 sc->sc_rxdiscard = 1;
6144 if (sc->sc_rxhead != NULL)
6145 m_freem(sc->sc_rxhead);
6146 WM_RXCHAIN_RESET(sc);
6147 DPRINTF(WM_DEBUG_RX,
6148 ("%s: RX: Rx buffer allocation failed, "
6149 "dropping packet%s\n", device_xname(sc->sc_dev),
6150 sc->sc_rxdiscard ? " (discard)" : ""));
6151 continue;
6152 }
6153
6154 m->m_len = len;
6155 sc->sc_rxlen += len;
6156 DPRINTF(WM_DEBUG_RX,
6157 ("%s: RX: buffer at %p len %d\n",
6158 device_xname(sc->sc_dev), m->m_data, len));
6159
6160 /* If this is not the end of the packet, keep looking. */
6161 if ((status & WRX_ST_EOP) == 0) {
6162 WM_RXCHAIN_LINK(sc, m);
6163 DPRINTF(WM_DEBUG_RX,
6164 ("%s: RX: not yet EOP, rxlen -> %d\n",
6165 device_xname(sc->sc_dev), sc->sc_rxlen));
6166 continue;
6167 }
6168
6169 /*
6170 * Okay, we have the entire packet now. The chip is
6171 * configured to include the FCS except I350 and I21[01]
6172 * (not all chips can be configured to strip it),
6173 * so we need to trim it.
6174 * May need to adjust length of previous mbuf in the
6175 * chain if the current mbuf is too short.
6176 * For an eratta, the RCTL_SECRC bit in RCTL register
6177 * is always set in I350, so we don't trim it.
6178 */
6179 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6180 && (sc->sc_type != WM_T_I210)
6181 && (sc->sc_type != WM_T_I211)) {
6182 if (m->m_len < ETHER_CRC_LEN) {
6183 sc->sc_rxtail->m_len
6184 -= (ETHER_CRC_LEN - m->m_len);
6185 m->m_len = 0;
6186 } else
6187 m->m_len -= ETHER_CRC_LEN;
6188 len = sc->sc_rxlen - ETHER_CRC_LEN;
6189 } else
6190 len = sc->sc_rxlen;
6191
6192 WM_RXCHAIN_LINK(sc, m);
6193
6194 *sc->sc_rxtailp = NULL;
6195 m = sc->sc_rxhead;
6196
6197 WM_RXCHAIN_RESET(sc);
6198
6199 DPRINTF(WM_DEBUG_RX,
6200 ("%s: RX: have entire packet, len -> %d\n",
6201 device_xname(sc->sc_dev), len));
6202
6203 /* If an error occurred, update stats and drop the packet. */
6204 if (errors &
6205 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
6206 if (errors & WRX_ER_SE)
6207 log(LOG_WARNING, "%s: symbol error\n",
6208 device_xname(sc->sc_dev));
6209 else if (errors & WRX_ER_SEQ)
6210 log(LOG_WARNING, "%s: receive sequence error\n",
6211 device_xname(sc->sc_dev));
6212 else if (errors & WRX_ER_CE)
6213 log(LOG_WARNING, "%s: CRC error\n",
6214 device_xname(sc->sc_dev));
6215 m_freem(m);
6216 continue;
6217 }
6218
6219 /* No errors. Receive the packet. */
6220 m->m_pkthdr.rcvif = ifp;
6221 m->m_pkthdr.len = len;
6222
6223 /*
6224 * If VLANs are enabled, VLAN packets have been unwrapped
6225 * for us. Associate the tag with the packet.
6226 */
6227 /* XXXX should check for i350 and i354 */
6228 if ((status & WRX_ST_VP) != 0) {
6229 VLAN_INPUT_TAG(ifp, m,
6230 le16toh(vlantag),
6231 continue);
6232 }
6233
6234 /* Set up checksum info for this packet. */
6235 if ((status & WRX_ST_IXSM) == 0) {
6236 if (status & WRX_ST_IPCS) {
6237 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
6238 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6239 if (errors & WRX_ER_IPE)
6240 m->m_pkthdr.csum_flags |=
6241 M_CSUM_IPv4_BAD;
6242 }
6243 if (status & WRX_ST_TCPCS) {
6244 /*
6245 * Note: we don't know if this was TCP or UDP,
6246 * so we just set both bits, and expect the
6247 * upper layers to deal.
6248 */
6249 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
6250 m->m_pkthdr.csum_flags |=
6251 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6252 M_CSUM_TCPv6 | M_CSUM_UDPv6;
6253 if (errors & WRX_ER_TCPE)
6254 m->m_pkthdr.csum_flags |=
6255 M_CSUM_TCP_UDP_BAD;
6256 }
6257 }
6258
6259 ifp->if_ipackets++;
6260
6261 WM_RX_UNLOCK(sc);
6262
6263 /* Pass this up to any BPF listeners. */
6264 bpf_mtap(ifp, m);
6265
6266 /* Pass it on. */
6267 (*ifp->if_input)(ifp, m);
6268
6269 WM_RX_LOCK(sc);
6270
6271 if (sc->sc_stopping)
6272 break;
6273 }
6274
6275 /* Update the receive pointer. */
6276 sc->sc_rxptr = i;
6277 if (count != 0)
6278 rnd_add_uint32(&sc->rnd_source, count);
6279
6280 DPRINTF(WM_DEBUG_RX,
6281 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
6282 }
6283
6284 /*
6285 * wm_linkintr_gmii:
6286 *
6287 * Helper; handle link interrupts for GMII.
6288 */
6289 static void
6290 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
6291 {
6292
6293 KASSERT(WM_TX_LOCKED(sc));
6294
6295 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6296 __func__));
6297
6298 if (icr & ICR_LSC) {
6299 DPRINTF(WM_DEBUG_LINK,
6300 ("%s: LINK: LSC -> mii_pollstat\n",
6301 device_xname(sc->sc_dev)));
6302 mii_pollstat(&sc->sc_mii);
6303 if (sc->sc_type == WM_T_82543) {
6304 int miistatus, active;
6305
6306 /*
6307 * With 82543, we need to force speed and
6308 * duplex on the MAC equal to what the PHY
6309 * speed and duplex configuration is.
6310 */
6311 miistatus = sc->sc_mii.mii_media_status;
6312
6313 if (miistatus & IFM_ACTIVE) {
6314 active = sc->sc_mii.mii_media_active;
6315 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6316 switch (IFM_SUBTYPE(active)) {
6317 case IFM_10_T:
6318 sc->sc_ctrl |= CTRL_SPEED_10;
6319 break;
6320 case IFM_100_TX:
6321 sc->sc_ctrl |= CTRL_SPEED_100;
6322 break;
6323 case IFM_1000_T:
6324 sc->sc_ctrl |= CTRL_SPEED_1000;
6325 break;
6326 default:
6327 /*
6328 * fiber?
6329 * Shoud not enter here.
6330 */
6331 printf("unknown media (%x)\n",
6332 active);
6333 break;
6334 }
6335 if (active & IFM_FDX)
6336 sc->sc_ctrl |= CTRL_FD;
6337 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6338 }
6339 } else if ((sc->sc_type == WM_T_ICH8)
6340 && (sc->sc_phytype == WMPHY_IGP_3)) {
6341 wm_kmrn_lock_loss_workaround_ich8lan(sc);
6342 } else if (sc->sc_type == WM_T_PCH) {
6343 wm_k1_gig_workaround_hv(sc,
6344 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
6345 }
6346
6347 if ((sc->sc_phytype == WMPHY_82578)
6348 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
6349 == IFM_1000_T)) {
6350
6351 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
6352 delay(200*1000); /* XXX too big */
6353
6354 /* Link stall fix for link up */
6355 wm_gmii_hv_writereg(sc->sc_dev, 1,
6356 HV_MUX_DATA_CTRL,
6357 HV_MUX_DATA_CTRL_GEN_TO_MAC
6358 | HV_MUX_DATA_CTRL_FORCE_SPEED);
6359 wm_gmii_hv_writereg(sc->sc_dev, 1,
6360 HV_MUX_DATA_CTRL,
6361 HV_MUX_DATA_CTRL_GEN_TO_MAC);
6362 }
6363 }
6364 } else if (icr & ICR_RXSEQ) {
6365 DPRINTF(WM_DEBUG_LINK,
6366 ("%s: LINK Receive sequence error\n",
6367 device_xname(sc->sc_dev)));
6368 }
6369 }
6370
6371 /*
6372 * wm_linkintr_tbi:
6373 *
6374 * Helper; handle link interrupts for TBI mode.
6375 */
6376 static void
6377 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6378 {
6379 uint32_t status;
6380
6381 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6382 __func__));
6383
6384 status = CSR_READ(sc, WMREG_STATUS);
6385 if (icr & ICR_LSC) {
6386 if (status & STATUS_LU) {
6387 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6388 device_xname(sc->sc_dev),
6389 (status & STATUS_FD) ? "FDX" : "HDX"));
6390 /*
6391 * NOTE: CTRL will update TFCE and RFCE automatically,
6392 * so we should update sc->sc_ctrl
6393 */
6394
6395 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6396 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6397 sc->sc_fcrtl &= ~FCRTL_XONE;
6398 if (status & STATUS_FD)
6399 sc->sc_tctl |=
6400 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6401 else
6402 sc->sc_tctl |=
6403 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6404 if (sc->sc_ctrl & CTRL_TFCE)
6405 sc->sc_fcrtl |= FCRTL_XONE;
6406 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6407 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6408 WMREG_OLD_FCRTL : WMREG_FCRTL,
6409 sc->sc_fcrtl);
6410 sc->sc_tbi_linkup = 1;
6411 } else {
6412 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6413 device_xname(sc->sc_dev)));
6414 sc->sc_tbi_linkup = 0;
6415 }
6416 /* Update LED */
6417 wm_tbi_serdes_set_linkled(sc);
6418 } else if (icr & ICR_RXSEQ) {
6419 DPRINTF(WM_DEBUG_LINK,
6420 ("%s: LINK: Receive sequence error\n",
6421 device_xname(sc->sc_dev)));
6422 }
6423 }
6424
6425 /*
6426 * wm_linkintr_serdes:
6427 *
6428 * Helper; handle link interrupts for TBI mode.
6429 */
6430 static void
6431 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6432 {
6433 struct mii_data *mii = &sc->sc_mii;
6434 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6435 uint32_t pcs_adv, pcs_lpab, reg;
6436
6437 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6438 __func__));
6439
6440 if (icr & ICR_LSC) {
6441 /* Check PCS */
6442 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6443 if ((reg & PCS_LSTS_LINKOK) != 0) {
6444 mii->mii_media_status |= IFM_ACTIVE;
6445 sc->sc_tbi_linkup = 1;
6446 } else {
6447 mii->mii_media_status |= IFM_NONE;
6448 sc->sc_tbi_linkup = 0;
6449 wm_tbi_serdes_set_linkled(sc);
6450 return;
6451 }
6452 mii->mii_media_active |= IFM_1000_SX;
6453 if ((reg & PCS_LSTS_FDX) != 0)
6454 mii->mii_media_active |= IFM_FDX;
6455 else
6456 mii->mii_media_active |= IFM_HDX;
6457 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6458 /* Check flow */
6459 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6460 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6461 DPRINTF(WM_DEBUG_LINK,
6462 ("XXX LINKOK but not ACOMP\n"));
6463 return;
6464 }
6465 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6466 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6467 DPRINTF(WM_DEBUG_LINK,
6468 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6469 if ((pcs_adv & TXCW_SYM_PAUSE)
6470 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6471 mii->mii_media_active |= IFM_FLOW
6472 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6473 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6474 && (pcs_adv & TXCW_ASYM_PAUSE)
6475 && (pcs_lpab & TXCW_SYM_PAUSE)
6476 && (pcs_lpab & TXCW_ASYM_PAUSE))
6477 mii->mii_media_active |= IFM_FLOW
6478 | IFM_ETH_TXPAUSE;
6479 else if ((pcs_adv & TXCW_SYM_PAUSE)
6480 && (pcs_adv & TXCW_ASYM_PAUSE)
6481 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6482 && (pcs_lpab & TXCW_ASYM_PAUSE))
6483 mii->mii_media_active |= IFM_FLOW
6484 | IFM_ETH_RXPAUSE;
6485 }
6486 /* Update LED */
6487 wm_tbi_serdes_set_linkled(sc);
6488 } else {
6489 DPRINTF(WM_DEBUG_LINK,
6490 ("%s: LINK: Receive sequence error\n",
6491 device_xname(sc->sc_dev)));
6492 }
6493 }
6494
6495 /*
6496 * wm_linkintr:
6497 *
6498 * Helper; handle link interrupts.
6499 */
6500 static void
6501 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6502 {
6503
6504 if (sc->sc_flags & WM_F_HAS_MII)
6505 wm_linkintr_gmii(sc, icr);
6506 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6507 && (sc->sc_type >= WM_T_82575))
6508 wm_linkintr_serdes(sc, icr);
6509 else
6510 wm_linkintr_tbi(sc, icr);
6511 }
6512
6513 /*
6514 * wm_intr_legacy:
6515 *
6516 * Interrupt service routine for INTx and MSI.
6517 */
6518 static int
6519 wm_intr_legacy(void *arg)
6520 {
6521 struct wm_softc *sc = arg;
6522 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6523 uint32_t icr, rndval = 0;
6524 int handled = 0;
6525
6526 DPRINTF(WM_DEBUG_TX,
6527 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
6528 while (1 /* CONSTCOND */) {
6529 icr = CSR_READ(sc, WMREG_ICR);
6530 if ((icr & sc->sc_icr) == 0)
6531 break;
6532 if (rndval == 0)
6533 rndval = icr;
6534
6535 WM_RX_LOCK(sc);
6536
6537 if (sc->sc_stopping) {
6538 WM_RX_UNLOCK(sc);
6539 break;
6540 }
6541
6542 handled = 1;
6543
6544 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6545 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6546 DPRINTF(WM_DEBUG_RX,
6547 ("%s: RX: got Rx intr 0x%08x\n",
6548 device_xname(sc->sc_dev),
6549 icr & (ICR_RXDMT0|ICR_RXT0)));
6550 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6551 }
6552 #endif
6553 wm_rxeof(sc);
6554
6555 WM_RX_UNLOCK(sc);
6556 WM_TX_LOCK(sc);
6557
6558 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6559 if (icr & ICR_TXDW) {
6560 DPRINTF(WM_DEBUG_TX,
6561 ("%s: TX: got TXDW interrupt\n",
6562 device_xname(sc->sc_dev)));
6563 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6564 }
6565 #endif
6566 wm_txeof(sc);
6567
6568 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6569 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6570 wm_linkintr(sc, icr);
6571 }
6572
6573 WM_TX_UNLOCK(sc);
6574
6575 if (icr & ICR_RXO) {
6576 #if defined(WM_DEBUG)
6577 log(LOG_WARNING, "%s: Receive overrun\n",
6578 device_xname(sc->sc_dev));
6579 #endif /* defined(WM_DEBUG) */
6580 }
6581 }
6582
6583 rnd_add_uint32(&sc->rnd_source, rndval);
6584
6585 if (handled) {
6586 /* Try to get more packets going. */
6587 ifp->if_start(ifp);
6588 }
6589
6590 return handled;
6591 }
6592
6593 #ifdef WM_MSI_MSIX
6594 /*
6595 * wm_txintr_msix:
6596 *
6597 * Interrupt service routine for TX complete interrupt for MSI-X.
6598 */
6599 static int
6600 wm_txintr_msix(void *arg)
6601 {
6602 struct wm_softc *sc = arg;
6603 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6604 int handled = 0;
6605
6606 DPRINTF(WM_DEBUG_TX,
6607 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
6608
6609 if (sc->sc_type == WM_T_82574)
6610 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ0); /* 82574 only */
6611 else if (sc->sc_type == WM_T_82575)
6612 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE0);
6613 else
6614 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_TXINTR_IDX);
6615
6616 WM_TX_LOCK(sc);
6617
6618 if (sc->sc_stopping)
6619 goto out;
6620
6621 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6622 handled = wm_txeof(sc);
6623
6624 out:
6625 WM_TX_UNLOCK(sc);
6626
6627 if (sc->sc_type == WM_T_82574)
6628 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ0); /* 82574 only */
6629 else if (sc->sc_type == WM_T_82575)
6630 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE0);
6631 else
6632 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_TXINTR_IDX);
6633
6634 if (handled) {
6635 /* Try to get more packets going. */
6636 ifp->if_start(ifp);
6637 }
6638
6639 return handled;
6640 }
6641
6642 /*
6643 * wm_rxintr_msix:
6644 *
6645 * Interrupt service routine for RX interrupt for MSI-X.
6646 */
6647 static int
6648 wm_rxintr_msix(void *arg)
6649 {
6650 struct wm_softc *sc = arg;
6651
6652 DPRINTF(WM_DEBUG_TX,
6653 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
6654
6655 if (sc->sc_type == WM_T_82574)
6656 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ0); /* 82574 only */
6657 else if (sc->sc_type == WM_T_82575)
6658 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE0);
6659 else
6660 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
6661
6662 WM_RX_LOCK(sc);
6663
6664 if (sc->sc_stopping)
6665 goto out;
6666
6667 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6668 wm_rxeof(sc);
6669
6670 out:
6671 WM_RX_UNLOCK(sc);
6672
6673 if (sc->sc_type == WM_T_82574)
6674 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ0);
6675 else if (sc->sc_type == WM_T_82575)
6676 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE0);
6677 else
6678 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
6679
6680 return 1;
6681 }
6682
6683 /*
6684 * wm_linkintr_msix:
6685 *
6686 * Interrupt service routine for link status change for MSI-X.
6687 */
6688 static int
6689 wm_linkintr_msix(void *arg)
6690 {
6691 struct wm_softc *sc = arg;
6692
6693 DPRINTF(WM_DEBUG_TX,
6694 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
6695
6696 if (sc->sc_type == WM_T_82574)
6697 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); /* 82574 only */
6698 else if (sc->sc_type == WM_T_82575)
6699 CSR_WRITE(sc, WMREG_EIMC, EITR_OTHER);
6700 else
6701 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_LINKINTR_IDX);
6702 WM_TX_LOCK(sc);
6703 if (sc->sc_stopping)
6704 goto out;
6705
6706 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6707 wm_linkintr(sc, ICR_LSC);
6708
6709 out:
6710 WM_TX_UNLOCK(sc);
6711
6712 if (sc->sc_type == WM_T_82574)
6713 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
6714 else if (sc->sc_type == WM_T_82575)
6715 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
6716 else
6717 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_LINKINTR_IDX);
6718
6719 return 1;
6720 }
6721 #endif /* WM_MSI_MSIX */
6722
6723 /*
6724 * Media related.
6725 * GMII, SGMII, TBI (and SERDES)
6726 */
6727
6728 /* Common */
6729
6730 /*
6731 * wm_tbi_serdes_set_linkled:
6732 *
6733 * Update the link LED on TBI and SERDES devices.
6734 */
6735 static void
6736 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
6737 {
6738
6739 if (sc->sc_tbi_linkup)
6740 sc->sc_ctrl |= CTRL_SWDPIN(0);
6741 else
6742 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6743
6744 /* 82540 or newer devices are active low */
6745 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6746
6747 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6748 }
6749
6750 /* GMII related */
6751
6752 /*
6753 * wm_gmii_reset:
6754 *
6755 * Reset the PHY.
6756 */
6757 static void
6758 wm_gmii_reset(struct wm_softc *sc)
6759 {
6760 uint32_t reg;
6761 int rv;
6762
6763 /* get phy semaphore */
6764 switch (sc->sc_type) {
6765 case WM_T_82571:
6766 case WM_T_82572:
6767 case WM_T_82573:
6768 case WM_T_82574:
6769 case WM_T_82583:
6770 /* XXX should get sw semaphore, too */
6771 rv = wm_get_swsm_semaphore(sc);
6772 break;
6773 case WM_T_82575:
6774 case WM_T_82576:
6775 case WM_T_82580:
6776 case WM_T_I350:
6777 case WM_T_I354:
6778 case WM_T_I210:
6779 case WM_T_I211:
6780 case WM_T_80003:
6781 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6782 break;
6783 case WM_T_ICH8:
6784 case WM_T_ICH9:
6785 case WM_T_ICH10:
6786 case WM_T_PCH:
6787 case WM_T_PCH2:
6788 case WM_T_PCH_LPT:
6789 rv = wm_get_swfwhw_semaphore(sc);
6790 break;
6791 default:
6792 /* nothing to do*/
6793 rv = 0;
6794 break;
6795 }
6796 if (rv != 0) {
6797 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6798 __func__);
6799 return;
6800 }
6801
6802 switch (sc->sc_type) {
6803 case WM_T_82542_2_0:
6804 case WM_T_82542_2_1:
6805 /* null */
6806 break;
6807 case WM_T_82543:
6808 /*
6809 * With 82543, we need to force speed and duplex on the MAC
6810 * equal to what the PHY speed and duplex configuration is.
6811 * In addition, we need to perform a hardware reset on the PHY
6812 * to take it out of reset.
6813 */
6814 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6815 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6816
6817 /* The PHY reset pin is active-low. */
6818 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6819 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6820 CTRL_EXT_SWDPIN(4));
6821 reg |= CTRL_EXT_SWDPIO(4);
6822
6823 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6824 CSR_WRITE_FLUSH(sc);
6825 delay(10*1000);
6826
6827 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6828 CSR_WRITE_FLUSH(sc);
6829 delay(150);
6830 #if 0
6831 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6832 #endif
6833 delay(20*1000); /* XXX extra delay to get PHY ID? */
6834 break;
6835 case WM_T_82544: /* reset 10000us */
6836 case WM_T_82540:
6837 case WM_T_82545:
6838 case WM_T_82545_3:
6839 case WM_T_82546:
6840 case WM_T_82546_3:
6841 case WM_T_82541:
6842 case WM_T_82541_2:
6843 case WM_T_82547:
6844 case WM_T_82547_2:
6845 case WM_T_82571: /* reset 100us */
6846 case WM_T_82572:
6847 case WM_T_82573:
6848 case WM_T_82574:
6849 case WM_T_82575:
6850 case WM_T_82576:
6851 case WM_T_82580:
6852 case WM_T_I350:
6853 case WM_T_I354:
6854 case WM_T_I210:
6855 case WM_T_I211:
6856 case WM_T_82583:
6857 case WM_T_80003:
6858 /* generic reset */
6859 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6860 CSR_WRITE_FLUSH(sc);
6861 delay(20000);
6862 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6863 CSR_WRITE_FLUSH(sc);
6864 delay(20000);
6865
6866 if ((sc->sc_type == WM_T_82541)
6867 || (sc->sc_type == WM_T_82541_2)
6868 || (sc->sc_type == WM_T_82547)
6869 || (sc->sc_type == WM_T_82547_2)) {
6870 /* workaround for igp are done in igp_reset() */
6871 /* XXX add code to set LED after phy reset */
6872 }
6873 break;
6874 case WM_T_ICH8:
6875 case WM_T_ICH9:
6876 case WM_T_ICH10:
6877 case WM_T_PCH:
6878 case WM_T_PCH2:
6879 case WM_T_PCH_LPT:
6880 /* generic reset */
6881 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6882 CSR_WRITE_FLUSH(sc);
6883 delay(100);
6884 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6885 CSR_WRITE_FLUSH(sc);
6886 delay(150);
6887 break;
6888 default:
6889 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6890 __func__);
6891 break;
6892 }
6893
6894 /* release PHY semaphore */
6895 switch (sc->sc_type) {
6896 case WM_T_82571:
6897 case WM_T_82572:
6898 case WM_T_82573:
6899 case WM_T_82574:
6900 case WM_T_82583:
6901 /* XXX should put sw semaphore, too */
6902 wm_put_swsm_semaphore(sc);
6903 break;
6904 case WM_T_82575:
6905 case WM_T_82576:
6906 case WM_T_82580:
6907 case WM_T_I350:
6908 case WM_T_I354:
6909 case WM_T_I210:
6910 case WM_T_I211:
6911 case WM_T_80003:
6912 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6913 break;
6914 case WM_T_ICH8:
6915 case WM_T_ICH9:
6916 case WM_T_ICH10:
6917 case WM_T_PCH:
6918 case WM_T_PCH2:
6919 case WM_T_PCH_LPT:
6920 wm_put_swfwhw_semaphore(sc);
6921 break;
6922 default:
6923 /* nothing to do*/
6924 rv = 0;
6925 break;
6926 }
6927
6928 /* get_cfg_done */
6929 wm_get_cfg_done(sc);
6930
6931 /* extra setup */
6932 switch (sc->sc_type) {
6933 case WM_T_82542_2_0:
6934 case WM_T_82542_2_1:
6935 case WM_T_82543:
6936 case WM_T_82544:
6937 case WM_T_82540:
6938 case WM_T_82545:
6939 case WM_T_82545_3:
6940 case WM_T_82546:
6941 case WM_T_82546_3:
6942 case WM_T_82541_2:
6943 case WM_T_82547_2:
6944 case WM_T_82571:
6945 case WM_T_82572:
6946 case WM_T_82573:
6947 case WM_T_82574:
6948 case WM_T_82575:
6949 case WM_T_82576:
6950 case WM_T_82580:
6951 case WM_T_I350:
6952 case WM_T_I354:
6953 case WM_T_I210:
6954 case WM_T_I211:
6955 case WM_T_82583:
6956 case WM_T_80003:
6957 /* null */
6958 break;
6959 case WM_T_82541:
6960 case WM_T_82547:
6961 /* XXX Configure actively LED after PHY reset */
6962 break;
6963 case WM_T_ICH8:
6964 case WM_T_ICH9:
6965 case WM_T_ICH10:
6966 case WM_T_PCH:
6967 case WM_T_PCH2:
6968 case WM_T_PCH_LPT:
6969 /* Allow time for h/w to get to a quiescent state afer reset */
6970 delay(10*1000);
6971
6972 if (sc->sc_type == WM_T_PCH)
6973 wm_hv_phy_workaround_ich8lan(sc);
6974
6975 if (sc->sc_type == WM_T_PCH2)
6976 wm_lv_phy_workaround_ich8lan(sc);
6977
6978 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6979 /*
6980 * dummy read to clear the phy wakeup bit after lcd
6981 * reset
6982 */
6983 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6984 }
6985
6986 /*
6987 * XXX Configure the LCD with th extended configuration region
6988 * in NVM
6989 */
6990
6991 /* Configure the LCD with the OEM bits in NVM */
6992 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6993 || (sc->sc_type == WM_T_PCH_LPT)) {
6994 /*
6995 * Disable LPLU.
6996 * XXX It seems that 82567 has LPLU, too.
6997 */
6998 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6999 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
7000 reg |= HV_OEM_BITS_ANEGNOW;
7001 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
7002 }
7003 break;
7004 default:
7005 panic("%s: unknown type\n", __func__);
7006 break;
7007 }
7008 }
7009
7010 /*
7011 * wm_get_phy_id_82575:
7012 *
7013 * Return PHY ID. Return -1 if it failed.
7014 */
7015 static int
7016 wm_get_phy_id_82575(struct wm_softc *sc)
7017 {
7018 uint32_t reg;
7019 int phyid = -1;
7020
7021 /* XXX */
7022 if ((sc->sc_flags & WM_F_SGMII) == 0)
7023 return -1;
7024
7025 if (wm_sgmii_uses_mdio(sc)) {
7026 switch (sc->sc_type) {
7027 case WM_T_82575:
7028 case WM_T_82576:
7029 reg = CSR_READ(sc, WMREG_MDIC);
7030 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7031 break;
7032 case WM_T_82580:
7033 case WM_T_I350:
7034 case WM_T_I354:
7035 case WM_T_I210:
7036 case WM_T_I211:
7037 reg = CSR_READ(sc, WMREG_MDICNFG);
7038 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7039 break;
7040 default:
7041 return -1;
7042 }
7043 }
7044
7045 return phyid;
7046 }
7047
7048
7049 /*
7050 * wm_gmii_mediainit:
7051 *
7052 * Initialize media for use on 1000BASE-T devices.
7053 */
7054 static void
7055 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7056 {
7057 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7058 struct mii_data *mii = &sc->sc_mii;
7059 uint32_t reg;
7060
7061 /* We have GMII. */
7062 sc->sc_flags |= WM_F_HAS_MII;
7063
7064 if (sc->sc_type == WM_T_80003)
7065 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7066 else
7067 sc->sc_tipg = TIPG_1000T_DFLT;
7068
7069 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7070 if ((sc->sc_type == WM_T_82580)
7071 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7072 || (sc->sc_type == WM_T_I211)) {
7073 reg = CSR_READ(sc, WMREG_PHPM);
7074 reg &= ~PHPM_GO_LINK_D;
7075 CSR_WRITE(sc, WMREG_PHPM, reg);
7076 }
7077
7078 /*
7079 * Let the chip set speed/duplex on its own based on
7080 * signals from the PHY.
7081 * XXXbouyer - I'm not sure this is right for the 80003,
7082 * the em driver only sets CTRL_SLU here - but it seems to work.
7083 */
7084 sc->sc_ctrl |= CTRL_SLU;
7085 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7086
7087 /* Initialize our media structures and probe the GMII. */
7088 mii->mii_ifp = ifp;
7089
7090 /*
7091 * Determine the PHY access method.
7092 *
7093 * For SGMII, use SGMII specific method.
7094 *
7095 * For some devices, we can determine the PHY access method
7096 * from sc_type.
7097 *
7098 * For ICH and PCH variants, it's difficult to determine the PHY
7099 * access method by sc_type, so use the PCI product ID for some
7100 * devices.
7101 * For other ICH8 variants, try to use igp's method. If the PHY
7102 * can't detect, then use bm's method.
7103 */
7104 switch (prodid) {
7105 case PCI_PRODUCT_INTEL_PCH_M_LM:
7106 case PCI_PRODUCT_INTEL_PCH_M_LC:
7107 /* 82577 */
7108 sc->sc_phytype = WMPHY_82577;
7109 break;
7110 case PCI_PRODUCT_INTEL_PCH_D_DM:
7111 case PCI_PRODUCT_INTEL_PCH_D_DC:
7112 /* 82578 */
7113 sc->sc_phytype = WMPHY_82578;
7114 break;
7115 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7116 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7117 /* 82579 */
7118 sc->sc_phytype = WMPHY_82579;
7119 break;
7120 case PCI_PRODUCT_INTEL_82801I_BM:
7121 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7122 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7123 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7124 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7125 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7126 /* 82567 */
7127 sc->sc_phytype = WMPHY_BM;
7128 mii->mii_readreg = wm_gmii_bm_readreg;
7129 mii->mii_writereg = wm_gmii_bm_writereg;
7130 break;
7131 default:
7132 if (((sc->sc_flags & WM_F_SGMII) != 0)
7133 && !wm_sgmii_uses_mdio(sc)){
7134 /* SGMII */
7135 mii->mii_readreg = wm_sgmii_readreg;
7136 mii->mii_writereg = wm_sgmii_writereg;
7137 } else if (sc->sc_type >= WM_T_80003) {
7138 /* 80003 */
7139 mii->mii_readreg = wm_gmii_i80003_readreg;
7140 mii->mii_writereg = wm_gmii_i80003_writereg;
7141 } else if (sc->sc_type >= WM_T_I210) {
7142 /* I210 and I211 */
7143 mii->mii_readreg = wm_gmii_gs40g_readreg;
7144 mii->mii_writereg = wm_gmii_gs40g_writereg;
7145 } else if (sc->sc_type >= WM_T_82580) {
7146 /* 82580, I350 and I354 */
7147 sc->sc_phytype = WMPHY_82580;
7148 mii->mii_readreg = wm_gmii_82580_readreg;
7149 mii->mii_writereg = wm_gmii_82580_writereg;
7150 } else if (sc->sc_type >= WM_T_82544) {
7151 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
7152 mii->mii_readreg = wm_gmii_i82544_readreg;
7153 mii->mii_writereg = wm_gmii_i82544_writereg;
7154 } else {
7155 mii->mii_readreg = wm_gmii_i82543_readreg;
7156 mii->mii_writereg = wm_gmii_i82543_writereg;
7157 }
7158 break;
7159 }
7160 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7161 /* All PCH* use _hv_ */
7162 mii->mii_readreg = wm_gmii_hv_readreg;
7163 mii->mii_writereg = wm_gmii_hv_writereg;
7164 }
7165 mii->mii_statchg = wm_gmii_statchg;
7166
7167 wm_gmii_reset(sc);
7168
7169 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7170 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7171 wm_gmii_mediastatus);
7172
7173 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7174 || (sc->sc_type == WM_T_82580)
7175 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7176 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7177 if ((sc->sc_flags & WM_F_SGMII) == 0) {
7178 /* Attach only one port */
7179 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7180 MII_OFFSET_ANY, MIIF_DOPAUSE);
7181 } else {
7182 int i, id;
7183 uint32_t ctrl_ext;
7184
7185 id = wm_get_phy_id_82575(sc);
7186 if (id != -1) {
7187 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
7188 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
7189 }
7190 if ((id == -1)
7191 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
7192 /* Power on sgmii phy if it is disabled */
7193 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7194 CSR_WRITE(sc, WMREG_CTRL_EXT,
7195 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
7196 CSR_WRITE_FLUSH(sc);
7197 delay(300*1000); /* XXX too long */
7198
7199 /* from 1 to 8 */
7200 for (i = 1; i < 8; i++)
7201 mii_attach(sc->sc_dev, &sc->sc_mii,
7202 0xffffffff, i, MII_OFFSET_ANY,
7203 MIIF_DOPAUSE);
7204
7205 /* restore previous sfp cage power state */
7206 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7207 }
7208 }
7209 } else {
7210 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7211 MII_OFFSET_ANY, MIIF_DOPAUSE);
7212 }
7213
7214 /*
7215 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
7216 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
7217 */
7218 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
7219 (LIST_FIRST(&mii->mii_phys) == NULL)) {
7220 wm_set_mdio_slow_mode_hv(sc);
7221 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7222 MII_OFFSET_ANY, MIIF_DOPAUSE);
7223 }
7224
7225 /*
7226 * (For ICH8 variants)
7227 * If PHY detection failed, use BM's r/w function and retry.
7228 */
7229 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7230 /* if failed, retry with *_bm_* */
7231 mii->mii_readreg = wm_gmii_bm_readreg;
7232 mii->mii_writereg = wm_gmii_bm_writereg;
7233
7234 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7235 MII_OFFSET_ANY, MIIF_DOPAUSE);
7236 }
7237
7238 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7239 /* Any PHY wasn't find */
7240 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
7241 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
7242 sc->sc_phytype = WMPHY_NONE;
7243 } else {
7244 /*
7245 * PHY Found!
7246 * Check PHY type.
7247 */
7248 uint32_t model;
7249 struct mii_softc *child;
7250
7251 child = LIST_FIRST(&mii->mii_phys);
7252 if (device_is_a(child->mii_dev, "igphy")) {
7253 struct igphy_softc *isc = (struct igphy_softc *)child;
7254
7255 model = isc->sc_mii.mii_mpd_model;
7256 if (model == MII_MODEL_yyINTEL_I82566)
7257 sc->sc_phytype = WMPHY_IGP_3;
7258 }
7259
7260 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
7261 }
7262 }
7263
7264 /*
7265 * wm_gmii_mediachange: [ifmedia interface function]
7266 *
7267 * Set hardware to newly-selected media on a 1000BASE-T device.
7268 */
7269 static int
7270 wm_gmii_mediachange(struct ifnet *ifp)
7271 {
7272 struct wm_softc *sc = ifp->if_softc;
7273 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7274 int rc;
7275
7276 if ((ifp->if_flags & IFF_UP) == 0)
7277 return 0;
7278
7279 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7280 sc->sc_ctrl |= CTRL_SLU;
7281 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7282 || (sc->sc_type > WM_T_82543)) {
7283 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
7284 } else {
7285 sc->sc_ctrl &= ~CTRL_ASDE;
7286 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7287 if (ife->ifm_media & IFM_FDX)
7288 sc->sc_ctrl |= CTRL_FD;
7289 switch (IFM_SUBTYPE(ife->ifm_media)) {
7290 case IFM_10_T:
7291 sc->sc_ctrl |= CTRL_SPEED_10;
7292 break;
7293 case IFM_100_TX:
7294 sc->sc_ctrl |= CTRL_SPEED_100;
7295 break;
7296 case IFM_1000_T:
7297 sc->sc_ctrl |= CTRL_SPEED_1000;
7298 break;
7299 default:
7300 panic("wm_gmii_mediachange: bad media 0x%x",
7301 ife->ifm_media);
7302 }
7303 }
7304 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7305 if (sc->sc_type <= WM_T_82543)
7306 wm_gmii_reset(sc);
7307
7308 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
7309 return 0;
7310 return rc;
7311 }
7312
7313 /*
7314 * wm_gmii_mediastatus: [ifmedia interface function]
7315 *
7316 * Get the current interface media status on a 1000BASE-T device.
7317 */
7318 static void
7319 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7320 {
7321 struct wm_softc *sc = ifp->if_softc;
7322
7323 ether_mediastatus(ifp, ifmr);
7324 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
7325 | sc->sc_flowflags;
7326 }
7327
7328 #define MDI_IO CTRL_SWDPIN(2)
7329 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
7330 #define MDI_CLK CTRL_SWDPIN(3)
7331
7332 static void
7333 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
7334 {
7335 uint32_t i, v;
7336
7337 v = CSR_READ(sc, WMREG_CTRL);
7338 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7339 v |= MDI_DIR | CTRL_SWDPIO(3);
7340
7341 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
7342 if (data & i)
7343 v |= MDI_IO;
7344 else
7345 v &= ~MDI_IO;
7346 CSR_WRITE(sc, WMREG_CTRL, v);
7347 CSR_WRITE_FLUSH(sc);
7348 delay(10);
7349 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7350 CSR_WRITE_FLUSH(sc);
7351 delay(10);
7352 CSR_WRITE(sc, WMREG_CTRL, v);
7353 CSR_WRITE_FLUSH(sc);
7354 delay(10);
7355 }
7356 }
7357
7358 static uint32_t
7359 wm_i82543_mii_recvbits(struct wm_softc *sc)
7360 {
7361 uint32_t v, i, data = 0;
7362
7363 v = CSR_READ(sc, WMREG_CTRL);
7364 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7365 v |= CTRL_SWDPIO(3);
7366
7367 CSR_WRITE(sc, WMREG_CTRL, v);
7368 CSR_WRITE_FLUSH(sc);
7369 delay(10);
7370 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7371 CSR_WRITE_FLUSH(sc);
7372 delay(10);
7373 CSR_WRITE(sc, WMREG_CTRL, v);
7374 CSR_WRITE_FLUSH(sc);
7375 delay(10);
7376
7377 for (i = 0; i < 16; i++) {
7378 data <<= 1;
7379 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7380 CSR_WRITE_FLUSH(sc);
7381 delay(10);
7382 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7383 data |= 1;
7384 CSR_WRITE(sc, WMREG_CTRL, v);
7385 CSR_WRITE_FLUSH(sc);
7386 delay(10);
7387 }
7388
7389 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7390 CSR_WRITE_FLUSH(sc);
7391 delay(10);
7392 CSR_WRITE(sc, WMREG_CTRL, v);
7393 CSR_WRITE_FLUSH(sc);
7394 delay(10);
7395
7396 return data;
7397 }
7398
7399 #undef MDI_IO
7400 #undef MDI_DIR
7401 #undef MDI_CLK
7402
7403 /*
7404 * wm_gmii_i82543_readreg: [mii interface function]
7405 *
7406 * Read a PHY register on the GMII (i82543 version).
7407 */
7408 static int
7409 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7410 {
7411 struct wm_softc *sc = device_private(self);
7412 int rv;
7413
7414 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7415 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
7416 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7417 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
7418
7419 DPRINTF(WM_DEBUG_GMII,
7420 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7421 device_xname(sc->sc_dev), phy, reg, rv));
7422
7423 return rv;
7424 }
7425
7426 /*
7427 * wm_gmii_i82543_writereg: [mii interface function]
7428 *
7429 * Write a PHY register on the GMII (i82543 version).
7430 */
7431 static void
7432 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7433 {
7434 struct wm_softc *sc = device_private(self);
7435
7436 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7437 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7438 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7439 (MII_COMMAND_START << 30), 32);
7440 }
7441
7442 /*
7443 * wm_gmii_i82544_readreg: [mii interface function]
7444 *
7445 * Read a PHY register on the GMII.
7446 */
7447 static int
7448 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7449 {
7450 struct wm_softc *sc = device_private(self);
7451 uint32_t mdic = 0;
7452 int i, rv;
7453
7454 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7455 MDIC_REGADD(reg));
7456
7457 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7458 mdic = CSR_READ(sc, WMREG_MDIC);
7459 if (mdic & MDIC_READY)
7460 break;
7461 delay(50);
7462 }
7463
7464 if ((mdic & MDIC_READY) == 0) {
7465 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7466 device_xname(sc->sc_dev), phy, reg);
7467 rv = 0;
7468 } else if (mdic & MDIC_E) {
7469 #if 0 /* This is normal if no PHY is present. */
7470 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7471 device_xname(sc->sc_dev), phy, reg);
7472 #endif
7473 rv = 0;
7474 } else {
7475 rv = MDIC_DATA(mdic);
7476 if (rv == 0xffff)
7477 rv = 0;
7478 }
7479
7480 return rv;
7481 }
7482
7483 /*
7484 * wm_gmii_i82544_writereg: [mii interface function]
7485 *
7486 * Write a PHY register on the GMII.
7487 */
7488 static void
7489 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7490 {
7491 struct wm_softc *sc = device_private(self);
7492 uint32_t mdic = 0;
7493 int i;
7494
7495 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7496 MDIC_REGADD(reg) | MDIC_DATA(val));
7497
7498 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7499 mdic = CSR_READ(sc, WMREG_MDIC);
7500 if (mdic & MDIC_READY)
7501 break;
7502 delay(50);
7503 }
7504
7505 if ((mdic & MDIC_READY) == 0)
7506 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7507 device_xname(sc->sc_dev), phy, reg);
7508 else if (mdic & MDIC_E)
7509 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7510 device_xname(sc->sc_dev), phy, reg);
7511 }
7512
7513 /*
7514 * wm_gmii_i80003_readreg: [mii interface function]
7515 *
7516 * Read a PHY register on the kumeran
7517 * This could be handled by the PHY layer if we didn't have to lock the
7518 * ressource ...
7519 */
7520 static int
7521 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7522 {
7523 struct wm_softc *sc = device_private(self);
7524 int sem;
7525 int rv;
7526
7527 if (phy != 1) /* only one PHY on kumeran bus */
7528 return 0;
7529
7530 sem = swfwphysem[sc->sc_funcid];
7531 if (wm_get_swfw_semaphore(sc, sem)) {
7532 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7533 __func__);
7534 return 0;
7535 }
7536
7537 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7538 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7539 reg >> GG82563_PAGE_SHIFT);
7540 } else {
7541 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7542 reg >> GG82563_PAGE_SHIFT);
7543 }
7544 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7545 delay(200);
7546 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7547 delay(200);
7548
7549 wm_put_swfw_semaphore(sc, sem);
7550 return rv;
7551 }
7552
7553 /*
7554 * wm_gmii_i80003_writereg: [mii interface function]
7555 *
7556 * Write a PHY register on the kumeran.
7557 * This could be handled by the PHY layer if we didn't have to lock the
7558 * ressource ...
7559 */
7560 static void
7561 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7562 {
7563 struct wm_softc *sc = device_private(self);
7564 int sem;
7565
7566 if (phy != 1) /* only one PHY on kumeran bus */
7567 return;
7568
7569 sem = swfwphysem[sc->sc_funcid];
7570 if (wm_get_swfw_semaphore(sc, sem)) {
7571 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7572 __func__);
7573 return;
7574 }
7575
7576 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7577 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7578 reg >> GG82563_PAGE_SHIFT);
7579 } else {
7580 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7581 reg >> GG82563_PAGE_SHIFT);
7582 }
7583 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7584 delay(200);
7585 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7586 delay(200);
7587
7588 wm_put_swfw_semaphore(sc, sem);
7589 }
7590
7591 /*
7592 * wm_gmii_bm_readreg: [mii interface function]
7593 *
7594 * Read a PHY register on the kumeran
7595 * This could be handled by the PHY layer if we didn't have to lock the
7596 * ressource ...
7597 */
7598 static int
7599 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7600 {
7601 struct wm_softc *sc = device_private(self);
7602 int sem;
7603 int rv;
7604
7605 sem = swfwphysem[sc->sc_funcid];
7606 if (wm_get_swfw_semaphore(sc, sem)) {
7607 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7608 __func__);
7609 return 0;
7610 }
7611
7612 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7613 if (phy == 1)
7614 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7615 reg);
7616 else
7617 wm_gmii_i82544_writereg(self, phy,
7618 GG82563_PHY_PAGE_SELECT,
7619 reg >> GG82563_PAGE_SHIFT);
7620 }
7621
7622 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7623 wm_put_swfw_semaphore(sc, sem);
7624 return rv;
7625 }
7626
7627 /*
7628 * wm_gmii_bm_writereg: [mii interface function]
7629 *
7630 * Write a PHY register on the kumeran.
7631 * This could be handled by the PHY layer if we didn't have to lock the
7632 * ressource ...
7633 */
7634 static void
7635 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7636 {
7637 struct wm_softc *sc = device_private(self);
7638 int sem;
7639
7640 sem = swfwphysem[sc->sc_funcid];
7641 if (wm_get_swfw_semaphore(sc, sem)) {
7642 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7643 __func__);
7644 return;
7645 }
7646
7647 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7648 if (phy == 1)
7649 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7650 reg);
7651 else
7652 wm_gmii_i82544_writereg(self, phy,
7653 GG82563_PHY_PAGE_SELECT,
7654 reg >> GG82563_PAGE_SHIFT);
7655 }
7656
7657 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7658 wm_put_swfw_semaphore(sc, sem);
7659 }
7660
7661 static void
7662 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7663 {
7664 struct wm_softc *sc = device_private(self);
7665 uint16_t regnum = BM_PHY_REG_NUM(offset);
7666 uint16_t wuce;
7667
7668 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7669 if (sc->sc_type == WM_T_PCH) {
7670 /* XXX e1000 driver do nothing... why? */
7671 }
7672
7673 /* Set page 769 */
7674 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7675 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7676
7677 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7678
7679 wuce &= ~BM_WUC_HOST_WU_BIT;
7680 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7681 wuce | BM_WUC_ENABLE_BIT);
7682
7683 /* Select page 800 */
7684 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7685 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7686
7687 /* Write page 800 */
7688 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7689
7690 if (rd)
7691 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7692 else
7693 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7694
7695 /* Set page 769 */
7696 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7697 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7698
7699 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7700 }
7701
7702 /*
7703 * wm_gmii_hv_readreg: [mii interface function]
7704 *
7705 * Read a PHY register on the kumeran
7706 * This could be handled by the PHY layer if we didn't have to lock the
7707 * ressource ...
7708 */
7709 static int
7710 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7711 {
7712 struct wm_softc *sc = device_private(self);
7713 uint16_t page = BM_PHY_REG_PAGE(reg);
7714 uint16_t regnum = BM_PHY_REG_NUM(reg);
7715 uint16_t val;
7716 int rv;
7717
7718 if (wm_get_swfwhw_semaphore(sc)) {
7719 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7720 __func__);
7721 return 0;
7722 }
7723
7724 /* XXX Workaround failure in MDIO access while cable is disconnected */
7725 if (sc->sc_phytype == WMPHY_82577) {
7726 /* XXX must write */
7727 }
7728
7729 /* Page 800 works differently than the rest so it has its own func */
7730 if (page == BM_WUC_PAGE) {
7731 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7732 return val;
7733 }
7734
7735 /*
7736 * Lower than page 768 works differently than the rest so it has its
7737 * own func
7738 */
7739 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7740 printf("gmii_hv_readreg!!!\n");
7741 return 0;
7742 }
7743
7744 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7745 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7746 page << BME1000_PAGE_SHIFT);
7747 }
7748
7749 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7750 wm_put_swfwhw_semaphore(sc);
7751 return rv;
7752 }
7753
7754 /*
7755 * wm_gmii_hv_writereg: [mii interface function]
7756 *
7757 * Write a PHY register on the kumeran.
7758 * This could be handled by the PHY layer if we didn't have to lock the
7759 * ressource ...
7760 */
7761 static void
7762 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7763 {
7764 struct wm_softc *sc = device_private(self);
7765 uint16_t page = BM_PHY_REG_PAGE(reg);
7766 uint16_t regnum = BM_PHY_REG_NUM(reg);
7767
7768 if (wm_get_swfwhw_semaphore(sc)) {
7769 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7770 __func__);
7771 return;
7772 }
7773
7774 /* XXX Workaround failure in MDIO access while cable is disconnected */
7775
7776 /* Page 800 works differently than the rest so it has its own func */
7777 if (page == BM_WUC_PAGE) {
7778 uint16_t tmp;
7779
7780 tmp = val;
7781 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7782 return;
7783 }
7784
7785 /*
7786 * Lower than page 768 works differently than the rest so it has its
7787 * own func
7788 */
7789 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7790 printf("gmii_hv_writereg!!!\n");
7791 return;
7792 }
7793
7794 /*
7795 * XXX Workaround MDIO accesses being disabled after entering IEEE
7796 * Power Down (whenever bit 11 of the PHY control register is set)
7797 */
7798
7799 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7800 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7801 page << BME1000_PAGE_SHIFT);
7802 }
7803
7804 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7805 wm_put_swfwhw_semaphore(sc);
7806 }
7807
7808 /*
7809 * wm_gmii_82580_readreg: [mii interface function]
7810 *
7811 * Read a PHY register on the 82580 and I350.
7812 * This could be handled by the PHY layer if we didn't have to lock the
7813 * ressource ...
7814 */
7815 static int
7816 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7817 {
7818 struct wm_softc *sc = device_private(self);
7819 int sem;
7820 int rv;
7821
7822 sem = swfwphysem[sc->sc_funcid];
7823 if (wm_get_swfw_semaphore(sc, sem)) {
7824 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7825 __func__);
7826 return 0;
7827 }
7828
7829 rv = wm_gmii_i82544_readreg(self, phy, reg);
7830
7831 wm_put_swfw_semaphore(sc, sem);
7832 return rv;
7833 }
7834
7835 /*
7836 * wm_gmii_82580_writereg: [mii interface function]
7837 *
7838 * Write a PHY register on the 82580 and I350.
7839 * This could be handled by the PHY layer if we didn't have to lock the
7840 * ressource ...
7841 */
7842 static void
7843 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7844 {
7845 struct wm_softc *sc = device_private(self);
7846 int sem;
7847
7848 sem = swfwphysem[sc->sc_funcid];
7849 if (wm_get_swfw_semaphore(sc, sem)) {
7850 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7851 __func__);
7852 return;
7853 }
7854
7855 wm_gmii_i82544_writereg(self, phy, reg, val);
7856
7857 wm_put_swfw_semaphore(sc, sem);
7858 }
7859
7860 /*
7861 * wm_gmii_gs40g_readreg: [mii interface function]
7862 *
7863 * Read a PHY register on the I2100 and I211.
7864 * This could be handled by the PHY layer if we didn't have to lock the
7865 * ressource ...
7866 */
7867 static int
7868 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
7869 {
7870 struct wm_softc *sc = device_private(self);
7871 int sem;
7872 int page, offset;
7873 int rv;
7874
7875 /* Acquire semaphore */
7876 sem = swfwphysem[sc->sc_funcid];
7877 if (wm_get_swfw_semaphore(sc, sem)) {
7878 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7879 __func__);
7880 return 0;
7881 }
7882
7883 /* Page select */
7884 page = reg >> GS40G_PAGE_SHIFT;
7885 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7886
7887 /* Read reg */
7888 offset = reg & GS40G_OFFSET_MASK;
7889 rv = wm_gmii_i82544_readreg(self, phy, offset);
7890
7891 wm_put_swfw_semaphore(sc, sem);
7892 return rv;
7893 }
7894
7895 /*
7896 * wm_gmii_gs40g_writereg: [mii interface function]
7897 *
7898 * Write a PHY register on the I210 and I211.
7899 * This could be handled by the PHY layer if we didn't have to lock the
7900 * ressource ...
7901 */
7902 static void
7903 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
7904 {
7905 struct wm_softc *sc = device_private(self);
7906 int sem;
7907 int page, offset;
7908
7909 /* Acquire semaphore */
7910 sem = swfwphysem[sc->sc_funcid];
7911 if (wm_get_swfw_semaphore(sc, sem)) {
7912 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7913 __func__);
7914 return;
7915 }
7916
7917 /* Page select */
7918 page = reg >> GS40G_PAGE_SHIFT;
7919 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7920
7921 /* Write reg */
7922 offset = reg & GS40G_OFFSET_MASK;
7923 wm_gmii_i82544_writereg(self, phy, offset, val);
7924
7925 /* Release semaphore */
7926 wm_put_swfw_semaphore(sc, sem);
7927 }
7928
7929 /*
7930 * wm_gmii_statchg: [mii interface function]
7931 *
7932 * Callback from MII layer when media changes.
7933 */
7934 static void
7935 wm_gmii_statchg(struct ifnet *ifp)
7936 {
7937 struct wm_softc *sc = ifp->if_softc;
7938 struct mii_data *mii = &sc->sc_mii;
7939
7940 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7941 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7942 sc->sc_fcrtl &= ~FCRTL_XONE;
7943
7944 /*
7945 * Get flow control negotiation result.
7946 */
7947 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7948 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7949 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7950 mii->mii_media_active &= ~IFM_ETH_FMASK;
7951 }
7952
7953 if (sc->sc_flowflags & IFM_FLOW) {
7954 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7955 sc->sc_ctrl |= CTRL_TFCE;
7956 sc->sc_fcrtl |= FCRTL_XONE;
7957 }
7958 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7959 sc->sc_ctrl |= CTRL_RFCE;
7960 }
7961
7962 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7963 DPRINTF(WM_DEBUG_LINK,
7964 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7965 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7966 } else {
7967 DPRINTF(WM_DEBUG_LINK,
7968 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7969 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7970 }
7971
7972 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7973 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7974 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7975 : WMREG_FCRTL, sc->sc_fcrtl);
7976 if (sc->sc_type == WM_T_80003) {
7977 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7978 case IFM_1000_T:
7979 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7980 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7981 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7982 break;
7983 default:
7984 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7985 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7986 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7987 break;
7988 }
7989 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7990 }
7991 }
7992
7993 /*
7994 * wm_kmrn_readreg:
7995 *
7996 * Read a kumeran register
7997 */
7998 static int
7999 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8000 {
8001 int rv;
8002
8003 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8004 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8005 aprint_error_dev(sc->sc_dev,
8006 "%s: failed to get semaphore\n", __func__);
8007 return 0;
8008 }
8009 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8010 if (wm_get_swfwhw_semaphore(sc)) {
8011 aprint_error_dev(sc->sc_dev,
8012 "%s: failed to get semaphore\n", __func__);
8013 return 0;
8014 }
8015 }
8016
8017 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8018 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8019 KUMCTRLSTA_REN);
8020 CSR_WRITE_FLUSH(sc);
8021 delay(2);
8022
8023 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8024
8025 if (sc->sc_flags & WM_F_LOCK_SWFW)
8026 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8027 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8028 wm_put_swfwhw_semaphore(sc);
8029
8030 return rv;
8031 }
8032
8033 /*
8034 * wm_kmrn_writereg:
8035 *
8036 * Write a kumeran register
8037 */
8038 static void
8039 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8040 {
8041
8042 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8043 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8044 aprint_error_dev(sc->sc_dev,
8045 "%s: failed to get semaphore\n", __func__);
8046 return;
8047 }
8048 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8049 if (wm_get_swfwhw_semaphore(sc)) {
8050 aprint_error_dev(sc->sc_dev,
8051 "%s: failed to get semaphore\n", __func__);
8052 return;
8053 }
8054 }
8055
8056 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8057 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8058 (val & KUMCTRLSTA_MASK));
8059
8060 if (sc->sc_flags & WM_F_LOCK_SWFW)
8061 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8062 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8063 wm_put_swfwhw_semaphore(sc);
8064 }
8065
8066 /* SGMII related */
8067
8068 /*
8069 * wm_sgmii_uses_mdio
8070 *
8071 * Check whether the transaction is to the internal PHY or the external
8072 * MDIO interface. Return true if it's MDIO.
8073 */
8074 static bool
8075 wm_sgmii_uses_mdio(struct wm_softc *sc)
8076 {
8077 uint32_t reg;
8078 bool ismdio = false;
8079
8080 switch (sc->sc_type) {
8081 case WM_T_82575:
8082 case WM_T_82576:
8083 reg = CSR_READ(sc, WMREG_MDIC);
8084 ismdio = ((reg & MDIC_DEST) != 0);
8085 break;
8086 case WM_T_82580:
8087 case WM_T_I350:
8088 case WM_T_I354:
8089 case WM_T_I210:
8090 case WM_T_I211:
8091 reg = CSR_READ(sc, WMREG_MDICNFG);
8092 ismdio = ((reg & MDICNFG_DEST) != 0);
8093 break;
8094 default:
8095 break;
8096 }
8097
8098 return ismdio;
8099 }
8100
8101 /*
8102 * wm_sgmii_readreg: [mii interface function]
8103 *
8104 * Read a PHY register on the SGMII
8105 * This could be handled by the PHY layer if we didn't have to lock the
8106 * ressource ...
8107 */
8108 static int
8109 wm_sgmii_readreg(device_t self, int phy, int reg)
8110 {
8111 struct wm_softc *sc = device_private(self);
8112 uint32_t i2ccmd;
8113 int i, rv;
8114
8115 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8116 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8117 __func__);
8118 return 0;
8119 }
8120
8121 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8122 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8123 | I2CCMD_OPCODE_READ;
8124 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8125
8126 /* Poll the ready bit */
8127 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8128 delay(50);
8129 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8130 if (i2ccmd & I2CCMD_READY)
8131 break;
8132 }
8133 if ((i2ccmd & I2CCMD_READY) == 0)
8134 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8135 if ((i2ccmd & I2CCMD_ERROR) != 0)
8136 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8137
8138 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8139
8140 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8141 return rv;
8142 }
8143
8144 /*
8145 * wm_sgmii_writereg: [mii interface function]
8146 *
8147 * Write a PHY register on the SGMII.
8148 * This could be handled by the PHY layer if we didn't have to lock the
8149 * ressource ...
8150 */
8151 static void
8152 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8153 {
8154 struct wm_softc *sc = device_private(self);
8155 uint32_t i2ccmd;
8156 int i;
8157 int val_swapped;
8158
8159 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8160 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8161 __func__);
8162 return;
8163 }
8164 /* Swap the data bytes for the I2C interface */
8165 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8166 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8167 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8168 | I2CCMD_OPCODE_WRITE | val_swapped;
8169 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8170
8171 /* Poll the ready bit */
8172 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8173 delay(50);
8174 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8175 if (i2ccmd & I2CCMD_READY)
8176 break;
8177 }
8178 if ((i2ccmd & I2CCMD_READY) == 0)
8179 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8180 if ((i2ccmd & I2CCMD_ERROR) != 0)
8181 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8182
8183 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8184 }
8185
8186 /* TBI related */
8187
8188 /*
8189 * wm_tbi_mediainit:
8190 *
8191 * Initialize media for use on 1000BASE-X devices.
8192 */
8193 static void
8194 wm_tbi_mediainit(struct wm_softc *sc)
8195 {
8196 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8197 const char *sep = "";
8198
8199 if (sc->sc_type < WM_T_82543)
8200 sc->sc_tipg = TIPG_WM_DFLT;
8201 else
8202 sc->sc_tipg = TIPG_LG_DFLT;
8203
8204 sc->sc_tbi_serdes_anegticks = 5;
8205
8206 /* Initialize our media structures */
8207 sc->sc_mii.mii_ifp = ifp;
8208 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8209
8210 if ((sc->sc_type >= WM_T_82575)
8211 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
8212 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8213 wm_serdes_mediachange, wm_serdes_mediastatus);
8214 else
8215 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8216 wm_tbi_mediachange, wm_tbi_mediastatus);
8217
8218 /*
8219 * SWD Pins:
8220 *
8221 * 0 = Link LED (output)
8222 * 1 = Loss Of Signal (input)
8223 */
8224 sc->sc_ctrl |= CTRL_SWDPIO(0);
8225
8226 /* XXX Perhaps this is only for TBI */
8227 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8228 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
8229
8230 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8231 sc->sc_ctrl &= ~CTRL_LRST;
8232
8233 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8234
8235 #define ADD(ss, mm, dd) \
8236 do { \
8237 aprint_normal("%s%s", sep, ss); \
8238 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
8239 sep = ", "; \
8240 } while (/*CONSTCOND*/0)
8241
8242 aprint_normal_dev(sc->sc_dev, "");
8243
8244 /* Only 82545 is LX */
8245 if (sc->sc_type == WM_T_82545) {
8246 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
8247 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
8248 } else {
8249 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
8250 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
8251 }
8252 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
8253 aprint_normal("\n");
8254
8255 #undef ADD
8256
8257 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
8258 }
8259
8260 /*
8261 * wm_tbi_mediachange: [ifmedia interface function]
8262 *
8263 * Set hardware to newly-selected media on a 1000BASE-X device.
8264 */
8265 static int
8266 wm_tbi_mediachange(struct ifnet *ifp)
8267 {
8268 struct wm_softc *sc = ifp->if_softc;
8269 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8270 uint32_t status;
8271 int i;
8272
8273 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8274 /* XXX need some work for >= 82571 and < 82575 */
8275 if (sc->sc_type < WM_T_82575)
8276 return 0;
8277 }
8278
8279 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8280 || (sc->sc_type >= WM_T_82575))
8281 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8282
8283 sc->sc_ctrl &= ~CTRL_LRST;
8284 sc->sc_txcw = TXCW_ANE;
8285 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8286 sc->sc_txcw |= TXCW_FD | TXCW_HD;
8287 else if (ife->ifm_media & IFM_FDX)
8288 sc->sc_txcw |= TXCW_FD;
8289 else
8290 sc->sc_txcw |= TXCW_HD;
8291
8292 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
8293 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
8294
8295 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
8296 device_xname(sc->sc_dev), sc->sc_txcw));
8297 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8298 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8299 CSR_WRITE_FLUSH(sc);
8300 delay(1000);
8301
8302 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
8303 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
8304
8305 /*
8306 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
8307 * optics detect a signal, 0 if they don't.
8308 */
8309 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
8310 /* Have signal; wait for the link to come up. */
8311 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
8312 delay(10000);
8313 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
8314 break;
8315 }
8316
8317 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
8318 device_xname(sc->sc_dev),i));
8319
8320 status = CSR_READ(sc, WMREG_STATUS);
8321 DPRINTF(WM_DEBUG_LINK,
8322 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
8323 device_xname(sc->sc_dev),status, STATUS_LU));
8324 if (status & STATUS_LU) {
8325 /* Link is up. */
8326 DPRINTF(WM_DEBUG_LINK,
8327 ("%s: LINK: set media -> link up %s\n",
8328 device_xname(sc->sc_dev),
8329 (status & STATUS_FD) ? "FDX" : "HDX"));
8330
8331 /*
8332 * NOTE: CTRL will update TFCE and RFCE automatically,
8333 * so we should update sc->sc_ctrl
8334 */
8335 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8336 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8337 sc->sc_fcrtl &= ~FCRTL_XONE;
8338 if (status & STATUS_FD)
8339 sc->sc_tctl |=
8340 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8341 else
8342 sc->sc_tctl |=
8343 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8344 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
8345 sc->sc_fcrtl |= FCRTL_XONE;
8346 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8347 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8348 WMREG_OLD_FCRTL : WMREG_FCRTL,
8349 sc->sc_fcrtl);
8350 sc->sc_tbi_linkup = 1;
8351 } else {
8352 if (i == WM_LINKUP_TIMEOUT)
8353 wm_check_for_link(sc);
8354 /* Link is down. */
8355 DPRINTF(WM_DEBUG_LINK,
8356 ("%s: LINK: set media -> link down\n",
8357 device_xname(sc->sc_dev)));
8358 sc->sc_tbi_linkup = 0;
8359 }
8360 } else {
8361 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
8362 device_xname(sc->sc_dev)));
8363 sc->sc_tbi_linkup = 0;
8364 }
8365
8366 wm_tbi_serdes_set_linkled(sc);
8367
8368 return 0;
8369 }
8370
8371 /*
8372 * wm_tbi_mediastatus: [ifmedia interface function]
8373 *
8374 * Get the current interface media status on a 1000BASE-X device.
8375 */
8376 static void
8377 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8378 {
8379 struct wm_softc *sc = ifp->if_softc;
8380 uint32_t ctrl, status;
8381
8382 ifmr->ifm_status = IFM_AVALID;
8383 ifmr->ifm_active = IFM_ETHER;
8384
8385 status = CSR_READ(sc, WMREG_STATUS);
8386 if ((status & STATUS_LU) == 0) {
8387 ifmr->ifm_active |= IFM_NONE;
8388 return;
8389 }
8390
8391 ifmr->ifm_status |= IFM_ACTIVE;
8392 /* Only 82545 is LX */
8393 if (sc->sc_type == WM_T_82545)
8394 ifmr->ifm_active |= IFM_1000_LX;
8395 else
8396 ifmr->ifm_active |= IFM_1000_SX;
8397 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
8398 ifmr->ifm_active |= IFM_FDX;
8399 else
8400 ifmr->ifm_active |= IFM_HDX;
8401 ctrl = CSR_READ(sc, WMREG_CTRL);
8402 if (ctrl & CTRL_RFCE)
8403 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
8404 if (ctrl & CTRL_TFCE)
8405 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
8406 }
8407
8408 /* XXX TBI only */
8409 static int
8410 wm_check_for_link(struct wm_softc *sc)
8411 {
8412 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8413 uint32_t rxcw;
8414 uint32_t ctrl;
8415 uint32_t status;
8416 uint32_t sig;
8417
8418 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8419 /* XXX need some work for >= 82571 */
8420 if (sc->sc_type >= WM_T_82571) {
8421 sc->sc_tbi_linkup = 1;
8422 return 0;
8423 }
8424 }
8425
8426 rxcw = CSR_READ(sc, WMREG_RXCW);
8427 ctrl = CSR_READ(sc, WMREG_CTRL);
8428 status = CSR_READ(sc, WMREG_STATUS);
8429
8430 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8431
8432 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8433 device_xname(sc->sc_dev), __func__,
8434 ((ctrl & CTRL_SWDPIN(1)) == sig),
8435 ((status & STATUS_LU) != 0),
8436 ((rxcw & RXCW_C) != 0)
8437 ));
8438
8439 /*
8440 * SWDPIN LU RXCW
8441 * 0 0 0
8442 * 0 0 1 (should not happen)
8443 * 0 1 0 (should not happen)
8444 * 0 1 1 (should not happen)
8445 * 1 0 0 Disable autonego and force linkup
8446 * 1 0 1 got /C/ but not linkup yet
8447 * 1 1 0 (linkup)
8448 * 1 1 1 If IFM_AUTO, back to autonego
8449 *
8450 */
8451 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8452 && ((status & STATUS_LU) == 0)
8453 && ((rxcw & RXCW_C) == 0)) {
8454 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8455 __func__));
8456 sc->sc_tbi_linkup = 0;
8457 /* Disable auto-negotiation in the TXCW register */
8458 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8459
8460 /*
8461 * Force link-up and also force full-duplex.
8462 *
8463 * NOTE: CTRL was updated TFCE and RFCE automatically,
8464 * so we should update sc->sc_ctrl
8465 */
8466 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8467 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8468 } else if (((status & STATUS_LU) != 0)
8469 && ((rxcw & RXCW_C) != 0)
8470 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8471 sc->sc_tbi_linkup = 1;
8472 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8473 __func__));
8474 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8475 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8476 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8477 && ((rxcw & RXCW_C) != 0)) {
8478 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8479 } else {
8480 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8481 status));
8482 }
8483
8484 return 0;
8485 }
8486
8487 /*
8488 * wm_tbi_tick:
8489 *
8490 * Check the link on TBI devices.
8491 * This function acts as mii_tick().
8492 */
8493 static void
8494 wm_tbi_tick(struct wm_softc *sc)
8495 {
8496 struct mii_data *mii = &sc->sc_mii;
8497 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8498 uint32_t status;
8499
8500 KASSERT(WM_TX_LOCKED(sc));
8501
8502 status = CSR_READ(sc, WMREG_STATUS);
8503
8504 /* XXX is this needed? */
8505 (void)CSR_READ(sc, WMREG_RXCW);
8506 (void)CSR_READ(sc, WMREG_CTRL);
8507
8508 /* set link status */
8509 if ((status & STATUS_LU) == 0) {
8510 DPRINTF(WM_DEBUG_LINK,
8511 ("%s: LINK: checklink -> down\n",
8512 device_xname(sc->sc_dev)));
8513 sc->sc_tbi_linkup = 0;
8514 } else if (sc->sc_tbi_linkup == 0) {
8515 DPRINTF(WM_DEBUG_LINK,
8516 ("%s: LINK: checklink -> up %s\n",
8517 device_xname(sc->sc_dev),
8518 (status & STATUS_FD) ? "FDX" : "HDX"));
8519 sc->sc_tbi_linkup = 1;
8520 sc->sc_tbi_serdes_ticks = 0;
8521 }
8522
8523 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
8524 goto setled;
8525
8526 if ((status & STATUS_LU) == 0) {
8527 sc->sc_tbi_linkup = 0;
8528 /* If the timer expired, retry autonegotiation */
8529 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8530 && (++sc->sc_tbi_serdes_ticks
8531 >= sc->sc_tbi_serdes_anegticks)) {
8532 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8533 sc->sc_tbi_serdes_ticks = 0;
8534 /*
8535 * Reset the link, and let autonegotiation do
8536 * its thing
8537 */
8538 sc->sc_ctrl |= CTRL_LRST;
8539 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8540 CSR_WRITE_FLUSH(sc);
8541 delay(1000);
8542 sc->sc_ctrl &= ~CTRL_LRST;
8543 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8544 CSR_WRITE_FLUSH(sc);
8545 delay(1000);
8546 CSR_WRITE(sc, WMREG_TXCW,
8547 sc->sc_txcw & ~TXCW_ANE);
8548 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8549 }
8550 }
8551
8552 setled:
8553 wm_tbi_serdes_set_linkled(sc);
8554 }
8555
8556 /* SERDES related */
8557 static void
8558 wm_serdes_power_up_link_82575(struct wm_softc *sc)
8559 {
8560 uint32_t reg;
8561
8562 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8563 && ((sc->sc_flags & WM_F_SGMII) == 0))
8564 return;
8565
8566 reg = CSR_READ(sc, WMREG_PCS_CFG);
8567 reg |= PCS_CFG_PCS_EN;
8568 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
8569
8570 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8571 reg &= ~CTRL_EXT_SWDPIN(3);
8572 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8573 CSR_WRITE_FLUSH(sc);
8574 }
8575
8576 static int
8577 wm_serdes_mediachange(struct ifnet *ifp)
8578 {
8579 struct wm_softc *sc = ifp->if_softc;
8580 bool pcs_autoneg = true; /* XXX */
8581 uint32_t ctrl_ext, pcs_lctl, reg;
8582
8583 /* XXX Currently, this function is not called on 8257[12] */
8584 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8585 || (sc->sc_type >= WM_T_82575))
8586 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8587
8588 wm_serdes_power_up_link_82575(sc);
8589
8590 sc->sc_ctrl |= CTRL_SLU;
8591
8592 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8593 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8594
8595 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8596 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8597 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8598 case CTRL_EXT_LINK_MODE_SGMII:
8599 pcs_autoneg = true;
8600 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8601 break;
8602 case CTRL_EXT_LINK_MODE_1000KX:
8603 pcs_autoneg = false;
8604 /* FALLTHROUGH */
8605 default:
8606 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8607 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8608 pcs_autoneg = false;
8609 }
8610 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8611 | CTRL_FRCFDX;
8612 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8613 }
8614 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8615
8616 if (pcs_autoneg) {
8617 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8618 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8619
8620 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8621 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8622 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8623 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8624 } else
8625 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8626
8627 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8628
8629
8630 return 0;
8631 }
8632
8633 static void
8634 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8635 {
8636 struct wm_softc *sc = ifp->if_softc;
8637 struct mii_data *mii = &sc->sc_mii;
8638 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8639 uint32_t pcs_adv, pcs_lpab, reg;
8640
8641 ifmr->ifm_status = IFM_AVALID;
8642 ifmr->ifm_active = IFM_ETHER;
8643
8644 /* Check PCS */
8645 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8646 if ((reg & PCS_LSTS_LINKOK) == 0) {
8647 ifmr->ifm_active |= IFM_NONE;
8648 sc->sc_tbi_linkup = 0;
8649 goto setled;
8650 }
8651
8652 sc->sc_tbi_linkup = 1;
8653 ifmr->ifm_status |= IFM_ACTIVE;
8654 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8655 if ((reg & PCS_LSTS_FDX) != 0)
8656 ifmr->ifm_active |= IFM_FDX;
8657 else
8658 ifmr->ifm_active |= IFM_HDX;
8659 mii->mii_media_active &= ~IFM_ETH_FMASK;
8660 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8661 /* Check flow */
8662 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8663 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8664 printf("XXX LINKOK but not ACOMP\n");
8665 goto setled;
8666 }
8667 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8668 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8669 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8670 if ((pcs_adv & TXCW_SYM_PAUSE)
8671 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8672 mii->mii_media_active |= IFM_FLOW
8673 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8674 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8675 && (pcs_adv & TXCW_ASYM_PAUSE)
8676 && (pcs_lpab & TXCW_SYM_PAUSE)
8677 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8678 mii->mii_media_active |= IFM_FLOW
8679 | IFM_ETH_TXPAUSE;
8680 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8681 && (pcs_adv & TXCW_ASYM_PAUSE)
8682 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8683 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8684 mii->mii_media_active |= IFM_FLOW
8685 | IFM_ETH_RXPAUSE;
8686 } else {
8687 }
8688 }
8689 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8690 | (mii->mii_media_active & IFM_ETH_FMASK);
8691 setled:
8692 wm_tbi_serdes_set_linkled(sc);
8693 }
8694
8695 /*
8696 * wm_serdes_tick:
8697 *
8698 * Check the link on serdes devices.
8699 */
8700 static void
8701 wm_serdes_tick(struct wm_softc *sc)
8702 {
8703 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8704 struct mii_data *mii = &sc->sc_mii;
8705 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8706 uint32_t reg;
8707
8708 KASSERT(WM_TX_LOCKED(sc));
8709
8710 mii->mii_media_status = IFM_AVALID;
8711 mii->mii_media_active = IFM_ETHER;
8712
8713 /* Check PCS */
8714 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8715 if ((reg & PCS_LSTS_LINKOK) != 0) {
8716 mii->mii_media_status |= IFM_ACTIVE;
8717 sc->sc_tbi_linkup = 1;
8718 sc->sc_tbi_serdes_ticks = 0;
8719 mii->mii_media_active |= IFM_1000_SX; /* XXX */
8720 if ((reg & PCS_LSTS_FDX) != 0)
8721 mii->mii_media_active |= IFM_FDX;
8722 else
8723 mii->mii_media_active |= IFM_HDX;
8724 } else {
8725 mii->mii_media_status |= IFM_NONE;
8726 sc->sc_tbi_linkup = 0;
8727 /* If the timer expired, retry autonegotiation */
8728 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8729 && (++sc->sc_tbi_serdes_ticks
8730 >= sc->sc_tbi_serdes_anegticks)) {
8731 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8732 sc->sc_tbi_serdes_ticks = 0;
8733 /* XXX */
8734 wm_serdes_mediachange(ifp);
8735 }
8736 }
8737
8738 wm_tbi_serdes_set_linkled(sc);
8739 }
8740
8741 /* SFP related */
8742
8743 static int
8744 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
8745 {
8746 uint32_t i2ccmd;
8747 int i;
8748
8749 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
8750 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8751
8752 /* Poll the ready bit */
8753 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8754 delay(50);
8755 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8756 if (i2ccmd & I2CCMD_READY)
8757 break;
8758 }
8759 if ((i2ccmd & I2CCMD_READY) == 0)
8760 return -1;
8761 if ((i2ccmd & I2CCMD_ERROR) != 0)
8762 return -1;
8763
8764 *data = i2ccmd & 0x00ff;
8765
8766 return 0;
8767 }
8768
8769 static uint32_t
8770 wm_sfp_get_media_type(struct wm_softc *sc)
8771 {
8772 uint32_t ctrl_ext;
8773 uint8_t val = 0;
8774 int timeout = 3;
8775 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
8776 int rv = -1;
8777
8778 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8779 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
8780 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
8781 CSR_WRITE_FLUSH(sc);
8782
8783 /* Read SFP module data */
8784 while (timeout) {
8785 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
8786 if (rv == 0)
8787 break;
8788 delay(100*1000); /* XXX too big */
8789 timeout--;
8790 }
8791 if (rv != 0)
8792 goto out;
8793 switch (val) {
8794 case SFF_SFP_ID_SFF:
8795 aprint_normal_dev(sc->sc_dev,
8796 "Module/Connector soldered to board\n");
8797 break;
8798 case SFF_SFP_ID_SFP:
8799 aprint_normal_dev(sc->sc_dev, "SFP\n");
8800 break;
8801 case SFF_SFP_ID_UNKNOWN:
8802 goto out;
8803 default:
8804 break;
8805 }
8806
8807 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
8808 if (rv != 0) {
8809 goto out;
8810 }
8811
8812 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
8813 mediatype = WM_MEDIATYPE_SERDES;
8814 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
8815 sc->sc_flags |= WM_F_SGMII;
8816 mediatype = WM_MEDIATYPE_COPPER;
8817 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
8818 sc->sc_flags |= WM_F_SGMII;
8819 mediatype = WM_MEDIATYPE_SERDES;
8820 }
8821
8822 out:
8823 /* Restore I2C interface setting */
8824 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8825
8826 return mediatype;
8827 }
8828 /*
8829 * NVM related.
8830 * Microwire, SPI (w/wo EERD) and Flash.
8831 */
8832
8833 /* Both spi and uwire */
8834
8835 /*
8836 * wm_eeprom_sendbits:
8837 *
8838 * Send a series of bits to the EEPROM.
8839 */
8840 static void
8841 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
8842 {
8843 uint32_t reg;
8844 int x;
8845
8846 reg = CSR_READ(sc, WMREG_EECD);
8847
8848 for (x = nbits; x > 0; x--) {
8849 if (bits & (1U << (x - 1)))
8850 reg |= EECD_DI;
8851 else
8852 reg &= ~EECD_DI;
8853 CSR_WRITE(sc, WMREG_EECD, reg);
8854 CSR_WRITE_FLUSH(sc);
8855 delay(2);
8856 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8857 CSR_WRITE_FLUSH(sc);
8858 delay(2);
8859 CSR_WRITE(sc, WMREG_EECD, reg);
8860 CSR_WRITE_FLUSH(sc);
8861 delay(2);
8862 }
8863 }
8864
8865 /*
8866 * wm_eeprom_recvbits:
8867 *
8868 * Receive a series of bits from the EEPROM.
8869 */
8870 static void
8871 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
8872 {
8873 uint32_t reg, val;
8874 int x;
8875
8876 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
8877
8878 val = 0;
8879 for (x = nbits; x > 0; x--) {
8880 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8881 CSR_WRITE_FLUSH(sc);
8882 delay(2);
8883 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8884 val |= (1U << (x - 1));
8885 CSR_WRITE(sc, WMREG_EECD, reg);
8886 CSR_WRITE_FLUSH(sc);
8887 delay(2);
8888 }
8889 *valp = val;
8890 }
8891
8892 /* Microwire */
8893
8894 /*
8895 * wm_nvm_read_uwire:
8896 *
8897 * Read a word from the EEPROM using the MicroWire protocol.
8898 */
8899 static int
8900 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8901 {
8902 uint32_t reg, val;
8903 int i;
8904
8905 for (i = 0; i < wordcnt; i++) {
8906 /* Clear SK and DI. */
8907 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8908 CSR_WRITE(sc, WMREG_EECD, reg);
8909
8910 /*
8911 * XXX: workaround for a bug in qemu-0.12.x and prior
8912 * and Xen.
8913 *
8914 * We use this workaround only for 82540 because qemu's
8915 * e1000 act as 82540.
8916 */
8917 if (sc->sc_type == WM_T_82540) {
8918 reg |= EECD_SK;
8919 CSR_WRITE(sc, WMREG_EECD, reg);
8920 reg &= ~EECD_SK;
8921 CSR_WRITE(sc, WMREG_EECD, reg);
8922 CSR_WRITE_FLUSH(sc);
8923 delay(2);
8924 }
8925 /* XXX: end of workaround */
8926
8927 /* Set CHIP SELECT. */
8928 reg |= EECD_CS;
8929 CSR_WRITE(sc, WMREG_EECD, reg);
8930 CSR_WRITE_FLUSH(sc);
8931 delay(2);
8932
8933 /* Shift in the READ command. */
8934 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8935
8936 /* Shift in address. */
8937 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8938
8939 /* Shift out the data. */
8940 wm_eeprom_recvbits(sc, &val, 16);
8941 data[i] = val & 0xffff;
8942
8943 /* Clear CHIP SELECT. */
8944 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8945 CSR_WRITE(sc, WMREG_EECD, reg);
8946 CSR_WRITE_FLUSH(sc);
8947 delay(2);
8948 }
8949
8950 return 0;
8951 }
8952
8953 /* SPI */
8954
8955 /*
8956 * Set SPI and FLASH related information from the EECD register.
8957 * For 82541 and 82547, the word size is taken from EEPROM.
8958 */
8959 static int
8960 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8961 {
8962 int size;
8963 uint32_t reg;
8964 uint16_t data;
8965
8966 reg = CSR_READ(sc, WMREG_EECD);
8967 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8968
8969 /* Read the size of NVM from EECD by default */
8970 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8971 switch (sc->sc_type) {
8972 case WM_T_82541:
8973 case WM_T_82541_2:
8974 case WM_T_82547:
8975 case WM_T_82547_2:
8976 /* Set dummy value to access EEPROM */
8977 sc->sc_nvm_wordsize = 64;
8978 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8979 reg = data;
8980 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8981 if (size == 0)
8982 size = 6; /* 64 word size */
8983 else
8984 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8985 break;
8986 case WM_T_80003:
8987 case WM_T_82571:
8988 case WM_T_82572:
8989 case WM_T_82573: /* SPI case */
8990 case WM_T_82574: /* SPI case */
8991 case WM_T_82583: /* SPI case */
8992 size += NVM_WORD_SIZE_BASE_SHIFT;
8993 if (size > 14)
8994 size = 14;
8995 break;
8996 case WM_T_82575:
8997 case WM_T_82576:
8998 case WM_T_82580:
8999 case WM_T_I350:
9000 case WM_T_I354:
9001 case WM_T_I210:
9002 case WM_T_I211:
9003 size += NVM_WORD_SIZE_BASE_SHIFT;
9004 if (size > 15)
9005 size = 15;
9006 break;
9007 default:
9008 aprint_error_dev(sc->sc_dev,
9009 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9010 return -1;
9011 break;
9012 }
9013
9014 sc->sc_nvm_wordsize = 1 << size;
9015
9016 return 0;
9017 }
9018
9019 /*
9020 * wm_nvm_ready_spi:
9021 *
9022 * Wait for a SPI EEPROM to be ready for commands.
9023 */
9024 static int
9025 wm_nvm_ready_spi(struct wm_softc *sc)
9026 {
9027 uint32_t val;
9028 int usec;
9029
9030 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9031 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9032 wm_eeprom_recvbits(sc, &val, 8);
9033 if ((val & SPI_SR_RDY) == 0)
9034 break;
9035 }
9036 if (usec >= SPI_MAX_RETRIES) {
9037 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9038 return 1;
9039 }
9040 return 0;
9041 }
9042
9043 /*
9044 * wm_nvm_read_spi:
9045 *
9046 * Read a work from the EEPROM using the SPI protocol.
9047 */
9048 static int
9049 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9050 {
9051 uint32_t reg, val;
9052 int i;
9053 uint8_t opc;
9054
9055 /* Clear SK and CS. */
9056 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9057 CSR_WRITE(sc, WMREG_EECD, reg);
9058 CSR_WRITE_FLUSH(sc);
9059 delay(2);
9060
9061 if (wm_nvm_ready_spi(sc))
9062 return 1;
9063
9064 /* Toggle CS to flush commands. */
9065 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9066 CSR_WRITE_FLUSH(sc);
9067 delay(2);
9068 CSR_WRITE(sc, WMREG_EECD, reg);
9069 CSR_WRITE_FLUSH(sc);
9070 delay(2);
9071
9072 opc = SPI_OPC_READ;
9073 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9074 opc |= SPI_OPC_A8;
9075
9076 wm_eeprom_sendbits(sc, opc, 8);
9077 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9078
9079 for (i = 0; i < wordcnt; i++) {
9080 wm_eeprom_recvbits(sc, &val, 16);
9081 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9082 }
9083
9084 /* Raise CS and clear SK. */
9085 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9086 CSR_WRITE(sc, WMREG_EECD, reg);
9087 CSR_WRITE_FLUSH(sc);
9088 delay(2);
9089
9090 return 0;
9091 }
9092
9093 /* Using with EERD */
9094
9095 static int
9096 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9097 {
9098 uint32_t attempts = 100000;
9099 uint32_t i, reg = 0;
9100 int32_t done = -1;
9101
9102 for (i = 0; i < attempts; i++) {
9103 reg = CSR_READ(sc, rw);
9104
9105 if (reg & EERD_DONE) {
9106 done = 0;
9107 break;
9108 }
9109 delay(5);
9110 }
9111
9112 return done;
9113 }
9114
9115 static int
9116 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9117 uint16_t *data)
9118 {
9119 int i, eerd = 0;
9120 int error = 0;
9121
9122 for (i = 0; i < wordcnt; i++) {
9123 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9124
9125 CSR_WRITE(sc, WMREG_EERD, eerd);
9126 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9127 if (error != 0)
9128 break;
9129
9130 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9131 }
9132
9133 return error;
9134 }
9135
9136 /* Flash */
9137
9138 static int
9139 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9140 {
9141 uint32_t eecd;
9142 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9143 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9144 uint8_t sig_byte = 0;
9145
9146 switch (sc->sc_type) {
9147 case WM_T_ICH8:
9148 case WM_T_ICH9:
9149 eecd = CSR_READ(sc, WMREG_EECD);
9150 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9151 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9152 return 0;
9153 }
9154 /* FALLTHROUGH */
9155 default:
9156 /* Default to 0 */
9157 *bank = 0;
9158
9159 /* Check bank 0 */
9160 wm_read_ich8_byte(sc, act_offset, &sig_byte);
9161 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9162 *bank = 0;
9163 return 0;
9164 }
9165
9166 /* Check bank 1 */
9167 wm_read_ich8_byte(sc, act_offset + bank1_offset,
9168 &sig_byte);
9169 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9170 *bank = 1;
9171 return 0;
9172 }
9173 }
9174
9175 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9176 device_xname(sc->sc_dev)));
9177 return -1;
9178 }
9179
9180 /******************************************************************************
9181 * This function does initial flash setup so that a new read/write/erase cycle
9182 * can be started.
9183 *
9184 * sc - The pointer to the hw structure
9185 ****************************************************************************/
9186 static int32_t
9187 wm_ich8_cycle_init(struct wm_softc *sc)
9188 {
9189 uint16_t hsfsts;
9190 int32_t error = 1;
9191 int32_t i = 0;
9192
9193 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9194
9195 /* May be check the Flash Des Valid bit in Hw status */
9196 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
9197 return error;
9198 }
9199
9200 /* Clear FCERR in Hw status by writing 1 */
9201 /* Clear DAEL in Hw status by writing a 1 */
9202 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
9203
9204 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9205
9206 /*
9207 * Either we should have a hardware SPI cycle in progress bit to check
9208 * against, in order to start a new cycle or FDONE bit should be
9209 * changed in the hardware so that it is 1 after harware reset, which
9210 * can then be used as an indication whether a cycle is in progress or
9211 * has been completed .. we should also have some software semaphore
9212 * mechanism to guard FDONE or the cycle in progress bit so that two
9213 * threads access to those bits can be sequentiallized or a way so that
9214 * 2 threads dont start the cycle at the same time
9215 */
9216
9217 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9218 /*
9219 * There is no cycle running at present, so we can start a
9220 * cycle
9221 */
9222
9223 /* Begin by setting Flash Cycle Done. */
9224 hsfsts |= HSFSTS_DONE;
9225 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9226 error = 0;
9227 } else {
9228 /*
9229 * otherwise poll for sometime so the current cycle has a
9230 * chance to end before giving up.
9231 */
9232 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
9233 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9234 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9235 error = 0;
9236 break;
9237 }
9238 delay(1);
9239 }
9240 if (error == 0) {
9241 /*
9242 * Successful in waiting for previous cycle to timeout,
9243 * now set the Flash Cycle Done.
9244 */
9245 hsfsts |= HSFSTS_DONE;
9246 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9247 }
9248 }
9249 return error;
9250 }
9251
9252 /******************************************************************************
9253 * This function starts a flash cycle and waits for its completion
9254 *
9255 * sc - The pointer to the hw structure
9256 ****************************************************************************/
9257 static int32_t
9258 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
9259 {
9260 uint16_t hsflctl;
9261 uint16_t hsfsts;
9262 int32_t error = 1;
9263 uint32_t i = 0;
9264
9265 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
9266 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9267 hsflctl |= HSFCTL_GO;
9268 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9269
9270 /* Wait till FDONE bit is set to 1 */
9271 do {
9272 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9273 if (hsfsts & HSFSTS_DONE)
9274 break;
9275 delay(1);
9276 i++;
9277 } while (i < timeout);
9278 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
9279 error = 0;
9280
9281 return error;
9282 }
9283
9284 /******************************************************************************
9285 * Reads a byte or word from the NVM using the ICH8 flash access registers.
9286 *
9287 * sc - The pointer to the hw structure
9288 * index - The index of the byte or word to read.
9289 * size - Size of data to read, 1=byte 2=word
9290 * data - Pointer to the word to store the value read.
9291 *****************************************************************************/
9292 static int32_t
9293 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
9294 uint32_t size, uint16_t *data)
9295 {
9296 uint16_t hsfsts;
9297 uint16_t hsflctl;
9298 uint32_t flash_linear_address;
9299 uint32_t flash_data = 0;
9300 int32_t error = 1;
9301 int32_t count = 0;
9302
9303 if (size < 1 || size > 2 || data == 0x0 ||
9304 index > ICH_FLASH_LINEAR_ADDR_MASK)
9305 return error;
9306
9307 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
9308 sc->sc_ich8_flash_base;
9309
9310 do {
9311 delay(1);
9312 /* Steps */
9313 error = wm_ich8_cycle_init(sc);
9314 if (error)
9315 break;
9316
9317 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9318 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
9319 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
9320 & HSFCTL_BCOUNT_MASK;
9321 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
9322 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9323
9324 /*
9325 * Write the last 24 bits of index into Flash Linear address
9326 * field in Flash Address
9327 */
9328 /* TODO: TBD maybe check the index against the size of flash */
9329
9330 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
9331
9332 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
9333
9334 /*
9335 * Check if FCERR is set to 1, if set to 1, clear it and try
9336 * the whole sequence a few more times, else read in (shift in)
9337 * the Flash Data0, the order is least significant byte first
9338 * msb to lsb
9339 */
9340 if (error == 0) {
9341 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
9342 if (size == 1)
9343 *data = (uint8_t)(flash_data & 0x000000FF);
9344 else if (size == 2)
9345 *data = (uint16_t)(flash_data & 0x0000FFFF);
9346 break;
9347 } else {
9348 /*
9349 * If we've gotten here, then things are probably
9350 * completely hosed, but if the error condition is
9351 * detected, it won't hurt to give it another try...
9352 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
9353 */
9354 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9355 if (hsfsts & HSFSTS_ERR) {
9356 /* Repeat for some time before giving up. */
9357 continue;
9358 } else if ((hsfsts & HSFSTS_DONE) == 0)
9359 break;
9360 }
9361 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
9362
9363 return error;
9364 }
9365
9366 /******************************************************************************
9367 * Reads a single byte from the NVM using the ICH8 flash access registers.
9368 *
9369 * sc - pointer to wm_hw structure
9370 * index - The index of the byte to read.
9371 * data - Pointer to a byte to store the value read.
9372 *****************************************************************************/
9373 static int32_t
9374 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
9375 {
9376 int32_t status;
9377 uint16_t word = 0;
9378
9379 status = wm_read_ich8_data(sc, index, 1, &word);
9380 if (status == 0)
9381 *data = (uint8_t)word;
9382 else
9383 *data = 0;
9384
9385 return status;
9386 }
9387
9388 /******************************************************************************
9389 * Reads a word from the NVM using the ICH8 flash access registers.
9390 *
9391 * sc - pointer to wm_hw structure
9392 * index - The starting byte index of the word to read.
9393 * data - Pointer to a word to store the value read.
9394 *****************************************************************************/
9395 static int32_t
9396 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
9397 {
9398 int32_t status;
9399
9400 status = wm_read_ich8_data(sc, index, 2, data);
9401 return status;
9402 }
9403
9404 /******************************************************************************
9405 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
9406 * register.
9407 *
9408 * sc - Struct containing variables accessed by shared code
9409 * offset - offset of word in the EEPROM to read
9410 * data - word read from the EEPROM
9411 * words - number of words to read
9412 *****************************************************************************/
9413 static int
9414 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
9415 {
9416 int32_t error = 0;
9417 uint32_t flash_bank = 0;
9418 uint32_t act_offset = 0;
9419 uint32_t bank_offset = 0;
9420 uint16_t word = 0;
9421 uint16_t i = 0;
9422
9423 /*
9424 * We need to know which is the valid flash bank. In the event
9425 * that we didn't allocate eeprom_shadow_ram, we may not be
9426 * managing flash_bank. So it cannot be trusted and needs
9427 * to be updated with each read.
9428 */
9429 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
9430 if (error) {
9431 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
9432 device_xname(sc->sc_dev)));
9433 flash_bank = 0;
9434 }
9435
9436 /*
9437 * Adjust offset appropriately if we're on bank 1 - adjust for word
9438 * size
9439 */
9440 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
9441
9442 error = wm_get_swfwhw_semaphore(sc);
9443 if (error) {
9444 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9445 __func__);
9446 return error;
9447 }
9448
9449 for (i = 0; i < words; i++) {
9450 /* The NVM part needs a byte offset, hence * 2 */
9451 act_offset = bank_offset + ((offset + i) * 2);
9452 error = wm_read_ich8_word(sc, act_offset, &word);
9453 if (error) {
9454 aprint_error_dev(sc->sc_dev,
9455 "%s: failed to read NVM\n", __func__);
9456 break;
9457 }
9458 data[i] = word;
9459 }
9460
9461 wm_put_swfwhw_semaphore(sc);
9462 return error;
9463 }
9464
9465 /* iNVM */
9466
9467 static int
9468 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
9469 {
9470 int32_t rv = 0;
9471 uint32_t invm_dword;
9472 uint16_t i;
9473 uint8_t record_type, word_address;
9474
9475 for (i = 0; i < INVM_SIZE; i++) {
9476 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
9477 /* Get record type */
9478 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
9479 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
9480 break;
9481 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
9482 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
9483 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
9484 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
9485 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
9486 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
9487 if (word_address == address) {
9488 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
9489 rv = 0;
9490 break;
9491 }
9492 }
9493 }
9494
9495 return rv;
9496 }
9497
9498 static int
9499 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
9500 {
9501 int rv = 0;
9502 int i;
9503
9504 for (i = 0; i < words; i++) {
9505 switch (offset + i) {
9506 case NVM_OFF_MACADDR:
9507 case NVM_OFF_MACADDR1:
9508 case NVM_OFF_MACADDR2:
9509 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
9510 if (rv != 0) {
9511 data[i] = 0xffff;
9512 rv = -1;
9513 }
9514 break;
9515 case NVM_OFF_CFG2:
9516 rv = wm_nvm_read_word_invm(sc, offset, data);
9517 if (rv != 0) {
9518 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
9519 rv = 0;
9520 }
9521 break;
9522 case NVM_OFF_CFG4:
9523 rv = wm_nvm_read_word_invm(sc, offset, data);
9524 if (rv != 0) {
9525 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
9526 rv = 0;
9527 }
9528 break;
9529 case NVM_OFF_LED_1_CFG:
9530 rv = wm_nvm_read_word_invm(sc, offset, data);
9531 if (rv != 0) {
9532 *data = NVM_LED_1_CFG_DEFAULT_I211;
9533 rv = 0;
9534 }
9535 break;
9536 case NVM_OFF_LED_0_2_CFG:
9537 rv = wm_nvm_read_word_invm(sc, offset, data);
9538 if (rv != 0) {
9539 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
9540 rv = 0;
9541 }
9542 break;
9543 case NVM_OFF_ID_LED_SETTINGS:
9544 rv = wm_nvm_read_word_invm(sc, offset, data);
9545 if (rv != 0) {
9546 *data = ID_LED_RESERVED_FFFF;
9547 rv = 0;
9548 }
9549 break;
9550 default:
9551 DPRINTF(WM_DEBUG_NVM,
9552 ("NVM word 0x%02x is not mapped.\n", offset));
9553 *data = NVM_RESERVED_WORD;
9554 break;
9555 }
9556 }
9557
9558 return rv;
9559 }
9560
9561 /* Lock, detecting NVM type, validate checksum, version and read */
9562
9563 /*
9564 * wm_nvm_acquire:
9565 *
9566 * Perform the EEPROM handshake required on some chips.
9567 */
9568 static int
9569 wm_nvm_acquire(struct wm_softc *sc)
9570 {
9571 uint32_t reg;
9572 int x;
9573 int ret = 0;
9574
9575 /* always success */
9576 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9577 return 0;
9578
9579 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9580 ret = wm_get_swfwhw_semaphore(sc);
9581 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9582 /* This will also do wm_get_swsm_semaphore() if needed */
9583 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9584 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9585 ret = wm_get_swsm_semaphore(sc);
9586 }
9587
9588 if (ret) {
9589 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9590 __func__);
9591 return 1;
9592 }
9593
9594 if (sc->sc_flags & WM_F_LOCK_EECD) {
9595 reg = CSR_READ(sc, WMREG_EECD);
9596
9597 /* Request EEPROM access. */
9598 reg |= EECD_EE_REQ;
9599 CSR_WRITE(sc, WMREG_EECD, reg);
9600
9601 /* ..and wait for it to be granted. */
9602 for (x = 0; x < 1000; x++) {
9603 reg = CSR_READ(sc, WMREG_EECD);
9604 if (reg & EECD_EE_GNT)
9605 break;
9606 delay(5);
9607 }
9608 if ((reg & EECD_EE_GNT) == 0) {
9609 aprint_error_dev(sc->sc_dev,
9610 "could not acquire EEPROM GNT\n");
9611 reg &= ~EECD_EE_REQ;
9612 CSR_WRITE(sc, WMREG_EECD, reg);
9613 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9614 wm_put_swfwhw_semaphore(sc);
9615 if (sc->sc_flags & WM_F_LOCK_SWFW)
9616 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9617 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9618 wm_put_swsm_semaphore(sc);
9619 return 1;
9620 }
9621 }
9622
9623 return 0;
9624 }
9625
9626 /*
9627 * wm_nvm_release:
9628 *
9629 * Release the EEPROM mutex.
9630 */
9631 static void
9632 wm_nvm_release(struct wm_softc *sc)
9633 {
9634 uint32_t reg;
9635
9636 /* always success */
9637 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9638 return;
9639
9640 if (sc->sc_flags & WM_F_LOCK_EECD) {
9641 reg = CSR_READ(sc, WMREG_EECD);
9642 reg &= ~EECD_EE_REQ;
9643 CSR_WRITE(sc, WMREG_EECD, reg);
9644 }
9645
9646 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9647 wm_put_swfwhw_semaphore(sc);
9648 if (sc->sc_flags & WM_F_LOCK_SWFW)
9649 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9650 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9651 wm_put_swsm_semaphore(sc);
9652 }
9653
9654 static int
9655 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9656 {
9657 uint32_t eecd = 0;
9658
9659 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9660 || sc->sc_type == WM_T_82583) {
9661 eecd = CSR_READ(sc, WMREG_EECD);
9662
9663 /* Isolate bits 15 & 16 */
9664 eecd = ((eecd >> 15) & 0x03);
9665
9666 /* If both bits are set, device is Flash type */
9667 if (eecd == 0x03)
9668 return 0;
9669 }
9670 return 1;
9671 }
9672
9673 static int
9674 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9675 {
9676 uint32_t eec;
9677
9678 eec = CSR_READ(sc, WMREG_EEC);
9679 if ((eec & EEC_FLASH_DETECTED) != 0)
9680 return 1;
9681
9682 return 0;
9683 }
9684
9685 /*
9686 * wm_nvm_validate_checksum
9687 *
9688 * The checksum is defined as the sum of the first 64 (16 bit) words.
9689 */
9690 static int
9691 wm_nvm_validate_checksum(struct wm_softc *sc)
9692 {
9693 uint16_t checksum;
9694 uint16_t eeprom_data;
9695 #ifdef WM_DEBUG
9696 uint16_t csum_wordaddr, valid_checksum;
9697 #endif
9698 int i;
9699
9700 checksum = 0;
9701
9702 /* Don't check for I211 */
9703 if (sc->sc_type == WM_T_I211)
9704 return 0;
9705
9706 #ifdef WM_DEBUG
9707 if (sc->sc_type == WM_T_PCH_LPT) {
9708 csum_wordaddr = NVM_OFF_COMPAT;
9709 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
9710 } else {
9711 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
9712 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
9713 }
9714
9715 /* Dump EEPROM image for debug */
9716 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9717 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9718 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
9719 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
9720 if ((eeprom_data & valid_checksum) == 0) {
9721 DPRINTF(WM_DEBUG_NVM,
9722 ("%s: NVM need to be updated (%04x != %04x)\n",
9723 device_xname(sc->sc_dev), eeprom_data,
9724 valid_checksum));
9725 }
9726 }
9727
9728 if ((wm_debug & WM_DEBUG_NVM) != 0) {
9729 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
9730 for (i = 0; i < NVM_SIZE; i++) {
9731 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9732 printf("XXXX ");
9733 else
9734 printf("%04hx ", eeprom_data);
9735 if (i % 8 == 7)
9736 printf("\n");
9737 }
9738 }
9739
9740 #endif /* WM_DEBUG */
9741
9742 for (i = 0; i < NVM_SIZE; i++) {
9743 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9744 return 1;
9745 checksum += eeprom_data;
9746 }
9747
9748 if (checksum != (uint16_t) NVM_CHECKSUM) {
9749 #ifdef WM_DEBUG
9750 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
9751 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
9752 #endif
9753 }
9754
9755 return 0;
9756 }
9757
9758 static void
9759 wm_nvm_version_invm(struct wm_softc *sc)
9760 {
9761 uint32_t dword;
9762
9763 /*
9764 * Linux's code to decode version is very strange, so we don't
9765 * obey that algorithm and just use word 61 as the document.
9766 * Perhaps it's not perfect though...
9767 *
9768 * Example:
9769 *
9770 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
9771 */
9772 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
9773 dword = __SHIFTOUT(dword, INVM_VER_1);
9774 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
9775 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
9776 }
9777
9778 static void
9779 wm_nvm_version(struct wm_softc *sc)
9780 {
9781 uint16_t major, minor, build, patch;
9782 uint16_t uid0, uid1;
9783 uint16_t nvm_data;
9784 uint16_t off;
9785 bool check_version = false;
9786 bool check_optionrom = false;
9787 bool have_build = false;
9788
9789 /*
9790 * Version format:
9791 *
9792 * XYYZ
9793 * X0YZ
9794 * X0YY
9795 *
9796 * Example:
9797 *
9798 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
9799 * 82571 0x50a6 5.10.6?
9800 * 82572 0x506a 5.6.10?
9801 * 82572EI 0x5069 5.6.9?
9802 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
9803 * 0x2013 2.1.3?
9804 * 82583 0x10a0 1.10.0? (document says it's default vaule)
9805 */
9806 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
9807 switch (sc->sc_type) {
9808 case WM_T_82571:
9809 case WM_T_82572:
9810 case WM_T_82574:
9811 check_version = true;
9812 check_optionrom = true;
9813 have_build = true;
9814 break;
9815 case WM_T_82575:
9816 case WM_T_82576:
9817 case WM_T_82580:
9818 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
9819 check_version = true;
9820 break;
9821 case WM_T_I211:
9822 wm_nvm_version_invm(sc);
9823 goto printver;
9824 case WM_T_I210:
9825 if (!wm_nvm_get_flash_presence_i210(sc)) {
9826 wm_nvm_version_invm(sc);
9827 goto printver;
9828 }
9829 /* FALLTHROUGH */
9830 case WM_T_I350:
9831 case WM_T_I354:
9832 check_version = true;
9833 check_optionrom = true;
9834 break;
9835 default:
9836 return;
9837 }
9838 if (check_version) {
9839 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
9840 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
9841 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
9842 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
9843 build = nvm_data & NVM_BUILD_MASK;
9844 have_build = true;
9845 } else
9846 minor = nvm_data & 0x00ff;
9847
9848 /* Decimal */
9849 minor = (minor / 16) * 10 + (minor % 16);
9850 sc->sc_nvm_ver_major = major;
9851 sc->sc_nvm_ver_minor = minor;
9852
9853 printver:
9854 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
9855 sc->sc_nvm_ver_minor);
9856 if (have_build)
9857 aprint_verbose(".%d", build);
9858 }
9859 if (check_optionrom) {
9860 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
9861 /* Option ROM Version */
9862 if ((off != 0x0000) && (off != 0xffff)) {
9863 off += NVM_COMBO_VER_OFF;
9864 wm_nvm_read(sc, off + 1, 1, &uid1);
9865 wm_nvm_read(sc, off, 1, &uid0);
9866 if ((uid0 != 0) && (uid0 != 0xffff)
9867 && (uid1 != 0) && (uid1 != 0xffff)) {
9868 /* 16bits */
9869 major = uid0 >> 8;
9870 build = (uid0 << 8) | (uid1 >> 8);
9871 patch = uid1 & 0x00ff;
9872 aprint_verbose(", option ROM Version %d.%d.%d",
9873 major, build, patch);
9874 }
9875 }
9876 }
9877
9878 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
9879 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
9880 }
9881
9882 /*
9883 * wm_nvm_read:
9884 *
9885 * Read data from the serial EEPROM.
9886 */
9887 static int
9888 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9889 {
9890 int rv;
9891
9892 if (sc->sc_flags & WM_F_EEPROM_INVALID)
9893 return 1;
9894
9895 if (wm_nvm_acquire(sc))
9896 return 1;
9897
9898 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9899 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9900 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
9901 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
9902 else if (sc->sc_flags & WM_F_EEPROM_INVM)
9903 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
9904 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
9905 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
9906 else if (sc->sc_flags & WM_F_EEPROM_SPI)
9907 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
9908 else
9909 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
9910
9911 wm_nvm_release(sc);
9912 return rv;
9913 }
9914
9915 /*
9916 * Hardware semaphores.
9917 * Very complexed...
9918 */
9919
9920 static int
9921 wm_get_swsm_semaphore(struct wm_softc *sc)
9922 {
9923 int32_t timeout;
9924 uint32_t swsm;
9925
9926 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9927 /* Get the SW semaphore. */
9928 timeout = sc->sc_nvm_wordsize + 1;
9929 while (timeout) {
9930 swsm = CSR_READ(sc, WMREG_SWSM);
9931
9932 if ((swsm & SWSM_SMBI) == 0)
9933 break;
9934
9935 delay(50);
9936 timeout--;
9937 }
9938
9939 if (timeout == 0) {
9940 aprint_error_dev(sc->sc_dev,
9941 "could not acquire SWSM SMBI\n");
9942 return 1;
9943 }
9944 }
9945
9946 /* Get the FW semaphore. */
9947 timeout = sc->sc_nvm_wordsize + 1;
9948 while (timeout) {
9949 swsm = CSR_READ(sc, WMREG_SWSM);
9950 swsm |= SWSM_SWESMBI;
9951 CSR_WRITE(sc, WMREG_SWSM, swsm);
9952 /* If we managed to set the bit we got the semaphore. */
9953 swsm = CSR_READ(sc, WMREG_SWSM);
9954 if (swsm & SWSM_SWESMBI)
9955 break;
9956
9957 delay(50);
9958 timeout--;
9959 }
9960
9961 if (timeout == 0) {
9962 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
9963 /* Release semaphores */
9964 wm_put_swsm_semaphore(sc);
9965 return 1;
9966 }
9967 return 0;
9968 }
9969
9970 static void
9971 wm_put_swsm_semaphore(struct wm_softc *sc)
9972 {
9973 uint32_t swsm;
9974
9975 swsm = CSR_READ(sc, WMREG_SWSM);
9976 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
9977 CSR_WRITE(sc, WMREG_SWSM, swsm);
9978 }
9979
9980 static int
9981 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9982 {
9983 uint32_t swfw_sync;
9984 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
9985 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
9986 int timeout = 200;
9987
9988 for (timeout = 0; timeout < 200; timeout++) {
9989 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9990 if (wm_get_swsm_semaphore(sc)) {
9991 aprint_error_dev(sc->sc_dev,
9992 "%s: failed to get semaphore\n",
9993 __func__);
9994 return 1;
9995 }
9996 }
9997 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9998 if ((swfw_sync & (swmask | fwmask)) == 0) {
9999 swfw_sync |= swmask;
10000 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10001 if (sc->sc_flags & WM_F_LOCK_SWSM)
10002 wm_put_swsm_semaphore(sc);
10003 return 0;
10004 }
10005 if (sc->sc_flags & WM_F_LOCK_SWSM)
10006 wm_put_swsm_semaphore(sc);
10007 delay(5000);
10008 }
10009 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10010 device_xname(sc->sc_dev), mask, swfw_sync);
10011 return 1;
10012 }
10013
10014 static void
10015 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10016 {
10017 uint32_t swfw_sync;
10018
10019 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10020 while (wm_get_swsm_semaphore(sc) != 0)
10021 continue;
10022 }
10023 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10024 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10025 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10026 if (sc->sc_flags & WM_F_LOCK_SWSM)
10027 wm_put_swsm_semaphore(sc);
10028 }
10029
10030 static int
10031 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10032 {
10033 uint32_t ext_ctrl;
10034 int timeout = 200;
10035
10036 for (timeout = 0; timeout < 200; timeout++) {
10037 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10038 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10039 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10040
10041 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10042 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10043 return 0;
10044 delay(5000);
10045 }
10046 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10047 device_xname(sc->sc_dev), ext_ctrl);
10048 return 1;
10049 }
10050
10051 static void
10052 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10053 {
10054 uint32_t ext_ctrl;
10055 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10056 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10057 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10058 }
10059
10060 static int
10061 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10062 {
10063 int i = 0;
10064 uint32_t reg;
10065
10066 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10067 do {
10068 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10069 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10070 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10071 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10072 break;
10073 delay(2*1000);
10074 i++;
10075 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10076
10077 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10078 wm_put_hw_semaphore_82573(sc);
10079 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10080 device_xname(sc->sc_dev));
10081 return -1;
10082 }
10083
10084 return 0;
10085 }
10086
10087 static void
10088 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10089 {
10090 uint32_t reg;
10091
10092 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10093 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10094 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10095 }
10096
10097 /*
10098 * Management mode and power management related subroutines.
10099 * BMC, AMT, suspend/resume and EEE.
10100 */
10101
10102 static int
10103 wm_check_mng_mode(struct wm_softc *sc)
10104 {
10105 int rv;
10106
10107 switch (sc->sc_type) {
10108 case WM_T_ICH8:
10109 case WM_T_ICH9:
10110 case WM_T_ICH10:
10111 case WM_T_PCH:
10112 case WM_T_PCH2:
10113 case WM_T_PCH_LPT:
10114 rv = wm_check_mng_mode_ich8lan(sc);
10115 break;
10116 case WM_T_82574:
10117 case WM_T_82583:
10118 rv = wm_check_mng_mode_82574(sc);
10119 break;
10120 case WM_T_82571:
10121 case WM_T_82572:
10122 case WM_T_82573:
10123 case WM_T_80003:
10124 rv = wm_check_mng_mode_generic(sc);
10125 break;
10126 default:
10127 /* noting to do */
10128 rv = 0;
10129 break;
10130 }
10131
10132 return rv;
10133 }
10134
10135 static int
10136 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10137 {
10138 uint32_t fwsm;
10139
10140 fwsm = CSR_READ(sc, WMREG_FWSM);
10141
10142 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10143 return 1;
10144
10145 return 0;
10146 }
10147
10148 static int
10149 wm_check_mng_mode_82574(struct wm_softc *sc)
10150 {
10151 uint16_t data;
10152
10153 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10154
10155 if ((data & NVM_CFG2_MNGM_MASK) != 0)
10156 return 1;
10157
10158 return 0;
10159 }
10160
10161 static int
10162 wm_check_mng_mode_generic(struct wm_softc *sc)
10163 {
10164 uint32_t fwsm;
10165
10166 fwsm = CSR_READ(sc, WMREG_FWSM);
10167
10168 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10169 return 1;
10170
10171 return 0;
10172 }
10173
10174 static int
10175 wm_enable_mng_pass_thru(struct wm_softc *sc)
10176 {
10177 uint32_t manc, fwsm, factps;
10178
10179 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
10180 return 0;
10181
10182 manc = CSR_READ(sc, WMREG_MANC);
10183
10184 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
10185 device_xname(sc->sc_dev), manc));
10186 if ((manc & MANC_RECV_TCO_EN) == 0)
10187 return 0;
10188
10189 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
10190 fwsm = CSR_READ(sc, WMREG_FWSM);
10191 factps = CSR_READ(sc, WMREG_FACTPS);
10192 if (((factps & FACTPS_MNGCG) == 0)
10193 && ((fwsm & FWSM_MODE_MASK)
10194 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
10195 return 1;
10196 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10197 uint16_t data;
10198
10199 factps = CSR_READ(sc, WMREG_FACTPS);
10200 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10201 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
10202 device_xname(sc->sc_dev), factps, data));
10203 if (((factps & FACTPS_MNGCG) == 0)
10204 && ((data & NVM_CFG2_MNGM_MASK)
10205 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
10206 return 1;
10207 } else if (((manc & MANC_SMBUS_EN) != 0)
10208 && ((manc & MANC_ASF_EN) == 0))
10209 return 1;
10210
10211 return 0;
10212 }
10213
10214 static int
10215 wm_check_reset_block(struct wm_softc *sc)
10216 {
10217 uint32_t reg;
10218
10219 switch (sc->sc_type) {
10220 case WM_T_ICH8:
10221 case WM_T_ICH9:
10222 case WM_T_ICH10:
10223 case WM_T_PCH:
10224 case WM_T_PCH2:
10225 case WM_T_PCH_LPT:
10226 reg = CSR_READ(sc, WMREG_FWSM);
10227 if ((reg & FWSM_RSPCIPHY) != 0)
10228 return 0;
10229 else
10230 return -1;
10231 break;
10232 case WM_T_82571:
10233 case WM_T_82572:
10234 case WM_T_82573:
10235 case WM_T_82574:
10236 case WM_T_82583:
10237 case WM_T_80003:
10238 reg = CSR_READ(sc, WMREG_MANC);
10239 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
10240 return -1;
10241 else
10242 return 0;
10243 break;
10244 default:
10245 /* no problem */
10246 break;
10247 }
10248
10249 return 0;
10250 }
10251
10252 static void
10253 wm_get_hw_control(struct wm_softc *sc)
10254 {
10255 uint32_t reg;
10256
10257 switch (sc->sc_type) {
10258 case WM_T_82573:
10259 reg = CSR_READ(sc, WMREG_SWSM);
10260 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
10261 break;
10262 case WM_T_82571:
10263 case WM_T_82572:
10264 case WM_T_82574:
10265 case WM_T_82583:
10266 case WM_T_80003:
10267 case WM_T_ICH8:
10268 case WM_T_ICH9:
10269 case WM_T_ICH10:
10270 case WM_T_PCH:
10271 case WM_T_PCH2:
10272 case WM_T_PCH_LPT:
10273 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10274 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
10275 break;
10276 default:
10277 break;
10278 }
10279 }
10280
10281 static void
10282 wm_release_hw_control(struct wm_softc *sc)
10283 {
10284 uint32_t reg;
10285
10286 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
10287 return;
10288
10289 if (sc->sc_type == WM_T_82573) {
10290 reg = CSR_READ(sc, WMREG_SWSM);
10291 reg &= ~SWSM_DRV_LOAD;
10292 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
10293 } else {
10294 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10295 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
10296 }
10297 }
10298
10299 static void
10300 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
10301 {
10302 uint32_t reg;
10303
10304 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10305
10306 if (on != 0)
10307 reg |= EXTCNFCTR_GATE_PHY_CFG;
10308 else
10309 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
10310
10311 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10312 }
10313
10314 static void
10315 wm_smbustopci(struct wm_softc *sc)
10316 {
10317 uint32_t fwsm;
10318
10319 fwsm = CSR_READ(sc, WMREG_FWSM);
10320 if (((fwsm & FWSM_FW_VALID) == 0)
10321 && ((wm_check_reset_block(sc) == 0))) {
10322 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
10323 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
10324 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10325 CSR_WRITE_FLUSH(sc);
10326 delay(10);
10327 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
10328 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10329 CSR_WRITE_FLUSH(sc);
10330 delay(50*1000);
10331
10332 /*
10333 * Gate automatic PHY configuration by hardware on non-managed
10334 * 82579
10335 */
10336 if (sc->sc_type == WM_T_PCH2)
10337 wm_gate_hw_phy_config_ich8lan(sc, 1);
10338 }
10339 }
10340
10341 static void
10342 wm_init_manageability(struct wm_softc *sc)
10343 {
10344
10345 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10346 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
10347 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10348
10349 /* Disable hardware interception of ARP */
10350 manc &= ~MANC_ARP_EN;
10351
10352 /* Enable receiving management packets to the host */
10353 if (sc->sc_type >= WM_T_82571) {
10354 manc |= MANC_EN_MNG2HOST;
10355 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
10356 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
10357 }
10358
10359 CSR_WRITE(sc, WMREG_MANC, manc);
10360 }
10361 }
10362
10363 static void
10364 wm_release_manageability(struct wm_softc *sc)
10365 {
10366
10367 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10368 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10369
10370 manc |= MANC_ARP_EN;
10371 if (sc->sc_type >= WM_T_82571)
10372 manc &= ~MANC_EN_MNG2HOST;
10373
10374 CSR_WRITE(sc, WMREG_MANC, manc);
10375 }
10376 }
10377
10378 static void
10379 wm_get_wakeup(struct wm_softc *sc)
10380 {
10381
10382 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
10383 switch (sc->sc_type) {
10384 case WM_T_82573:
10385 case WM_T_82583:
10386 sc->sc_flags |= WM_F_HAS_AMT;
10387 /* FALLTHROUGH */
10388 case WM_T_80003:
10389 case WM_T_82541:
10390 case WM_T_82547:
10391 case WM_T_82571:
10392 case WM_T_82572:
10393 case WM_T_82574:
10394 case WM_T_82575:
10395 case WM_T_82576:
10396 case WM_T_82580:
10397 case WM_T_I350:
10398 case WM_T_I354:
10399 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
10400 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
10401 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10402 break;
10403 case WM_T_ICH8:
10404 case WM_T_ICH9:
10405 case WM_T_ICH10:
10406 case WM_T_PCH:
10407 case WM_T_PCH2:
10408 case WM_T_PCH_LPT:
10409 sc->sc_flags |= WM_F_HAS_AMT;
10410 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10411 break;
10412 default:
10413 break;
10414 }
10415
10416 /* 1: HAS_MANAGE */
10417 if (wm_enable_mng_pass_thru(sc) != 0)
10418 sc->sc_flags |= WM_F_HAS_MANAGE;
10419
10420 #ifdef WM_DEBUG
10421 printf("\n");
10422 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
10423 printf("HAS_AMT,");
10424 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
10425 printf("ARC_SUBSYS_VALID,");
10426 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
10427 printf("ASF_FIRMWARE_PRES,");
10428 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
10429 printf("HAS_MANAGE,");
10430 printf("\n");
10431 #endif
10432 /*
10433 * Note that the WOL flags is set after the resetting of the eeprom
10434 * stuff
10435 */
10436 }
10437
10438 #ifdef WM_WOL
10439 /* WOL in the newer chipset interfaces (pchlan) */
10440 static void
10441 wm_enable_phy_wakeup(struct wm_softc *sc)
10442 {
10443 #if 0
10444 uint16_t preg;
10445
10446 /* Copy MAC RARs to PHY RARs */
10447
10448 /* Copy MAC MTA to PHY MTA */
10449
10450 /* Configure PHY Rx Control register */
10451
10452 /* Enable PHY wakeup in MAC register */
10453
10454 /* Configure and enable PHY wakeup in PHY registers */
10455
10456 /* Activate PHY wakeup */
10457
10458 /* XXX */
10459 #endif
10460 }
10461
10462 /* Power down workaround on D3 */
10463 static void
10464 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
10465 {
10466 uint32_t reg;
10467 int i;
10468
10469 for (i = 0; i < 2; i++) {
10470 /* Disable link */
10471 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10472 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10473 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10474
10475 /*
10476 * Call gig speed drop workaround on Gig disable before
10477 * accessing any PHY registers
10478 */
10479 if (sc->sc_type == WM_T_ICH8)
10480 wm_gig_downshift_workaround_ich8lan(sc);
10481
10482 /* Write VR power-down enable */
10483 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10484 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10485 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
10486 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
10487
10488 /* Read it back and test */
10489 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10490 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10491 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
10492 break;
10493
10494 /* Issue PHY reset and repeat at most one more time */
10495 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10496 }
10497 }
10498
10499 static void
10500 wm_enable_wakeup(struct wm_softc *sc)
10501 {
10502 uint32_t reg, pmreg;
10503 pcireg_t pmode;
10504
10505 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10506 &pmreg, NULL) == 0)
10507 return;
10508
10509 /* Advertise the wakeup capability */
10510 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
10511 | CTRL_SWDPIN(3));
10512 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
10513
10514 /* ICH workaround */
10515 switch (sc->sc_type) {
10516 case WM_T_ICH8:
10517 case WM_T_ICH9:
10518 case WM_T_ICH10:
10519 case WM_T_PCH:
10520 case WM_T_PCH2:
10521 case WM_T_PCH_LPT:
10522 /* Disable gig during WOL */
10523 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10524 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
10525 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10526 if (sc->sc_type == WM_T_PCH)
10527 wm_gmii_reset(sc);
10528
10529 /* Power down workaround */
10530 if (sc->sc_phytype == WMPHY_82577) {
10531 struct mii_softc *child;
10532
10533 /* Assume that the PHY is copper */
10534 child = LIST_FIRST(&sc->sc_mii.mii_phys);
10535 if (child->mii_mpd_rev <= 2)
10536 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
10537 (768 << 5) | 25, 0x0444); /* magic num */
10538 }
10539 break;
10540 default:
10541 break;
10542 }
10543
10544 /* Keep the laser running on fiber adapters */
10545 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
10546 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
10547 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10548 reg |= CTRL_EXT_SWDPIN(3);
10549 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10550 }
10551
10552 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
10553 #if 0 /* for the multicast packet */
10554 reg |= WUFC_MC;
10555 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
10556 #endif
10557
10558 if (sc->sc_type == WM_T_PCH) {
10559 wm_enable_phy_wakeup(sc);
10560 } else {
10561 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
10562 CSR_WRITE(sc, WMREG_WUFC, reg);
10563 }
10564
10565 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10566 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10567 || (sc->sc_type == WM_T_PCH2))
10568 && (sc->sc_phytype == WMPHY_IGP_3))
10569 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
10570
10571 /* Request PME */
10572 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
10573 #if 0
10574 /* Disable WOL */
10575 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
10576 #else
10577 /* For WOL */
10578 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
10579 #endif
10580 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
10581 }
10582 #endif /* WM_WOL */
10583
10584 /* EEE */
10585
10586 static void
10587 wm_set_eee_i350(struct wm_softc *sc)
10588 {
10589 uint32_t ipcnfg, eeer;
10590
10591 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
10592 eeer = CSR_READ(sc, WMREG_EEER);
10593
10594 if ((sc->sc_flags & WM_F_EEE) != 0) {
10595 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10596 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
10597 | EEER_LPI_FC);
10598 } else {
10599 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10600 ipcnfg &= ~IPCNFG_10BASE_TE;
10601 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
10602 | EEER_LPI_FC);
10603 }
10604
10605 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
10606 CSR_WRITE(sc, WMREG_EEER, eeer);
10607 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
10608 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
10609 }
10610
10611 /*
10612 * Workarounds (mainly PHY related).
10613 * Basically, PHY's workarounds are in the PHY drivers.
10614 */
10615
10616 /* Work-around for 82566 Kumeran PCS lock loss */
10617 static void
10618 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
10619 {
10620 int miistatus, active, i;
10621 int reg;
10622
10623 miistatus = sc->sc_mii.mii_media_status;
10624
10625 /* If the link is not up, do nothing */
10626 if ((miistatus & IFM_ACTIVE) != 0)
10627 return;
10628
10629 active = sc->sc_mii.mii_media_active;
10630
10631 /* Nothing to do if the link is other than 1Gbps */
10632 if (IFM_SUBTYPE(active) != IFM_1000_T)
10633 return;
10634
10635 for (i = 0; i < 10; i++) {
10636 /* read twice */
10637 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10638 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10639 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
10640 goto out; /* GOOD! */
10641
10642 /* Reset the PHY */
10643 wm_gmii_reset(sc);
10644 delay(5*1000);
10645 }
10646
10647 /* Disable GigE link negotiation */
10648 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10649 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10650 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10651
10652 /*
10653 * Call gig speed drop workaround on Gig disable before accessing
10654 * any PHY registers.
10655 */
10656 wm_gig_downshift_workaround_ich8lan(sc);
10657
10658 out:
10659 return;
10660 }
10661
10662 /* WOL from S5 stops working */
10663 static void
10664 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
10665 {
10666 uint16_t kmrn_reg;
10667
10668 /* Only for igp3 */
10669 if (sc->sc_phytype == WMPHY_IGP_3) {
10670 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
10671 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
10672 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10673 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
10674 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10675 }
10676 }
10677
10678 /*
10679 * Workaround for pch's PHYs
10680 * XXX should be moved to new PHY driver?
10681 */
10682 static void
10683 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
10684 {
10685 if (sc->sc_phytype == WMPHY_82577)
10686 wm_set_mdio_slow_mode_hv(sc);
10687
10688 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
10689
10690 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
10691
10692 /* 82578 */
10693 if (sc->sc_phytype == WMPHY_82578) {
10694 /* PCH rev. < 3 */
10695 if (sc->sc_rev < 3) {
10696 /* XXX 6 bit shift? Why? Is it page2? */
10697 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10698 0x66c0);
10699 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
10700 0xffff);
10701 }
10702
10703 /* XXX phy rev. < 2 */
10704 }
10705
10706 /* Select page 0 */
10707
10708 /* XXX acquire semaphore */
10709 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
10710 /* XXX release semaphore */
10711
10712 /*
10713 * Configure the K1 Si workaround during phy reset assuming there is
10714 * link so that it disables K1 if link is in 1Gbps.
10715 */
10716 wm_k1_gig_workaround_hv(sc, 1);
10717 }
10718
10719 static void
10720 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
10721 {
10722
10723 wm_set_mdio_slow_mode_hv(sc);
10724 }
10725
10726 static void
10727 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
10728 {
10729 int k1_enable = sc->sc_nvm_k1_enabled;
10730
10731 /* XXX acquire semaphore */
10732
10733 if (link) {
10734 k1_enable = 0;
10735
10736 /* Link stall fix for link up */
10737 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
10738 } else {
10739 /* Link stall fix for link down */
10740 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
10741 }
10742
10743 wm_configure_k1_ich8lan(sc, k1_enable);
10744
10745 /* XXX release semaphore */
10746 }
10747
10748 static void
10749 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
10750 {
10751 uint32_t reg;
10752
10753 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
10754 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
10755 reg | HV_KMRN_MDIO_SLOW);
10756 }
10757
10758 static void
10759 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
10760 {
10761 uint32_t ctrl, ctrl_ext, tmp;
10762 uint16_t kmrn_reg;
10763
10764 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
10765
10766 if (k1_enable)
10767 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
10768 else
10769 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
10770
10771 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
10772
10773 delay(20);
10774
10775 ctrl = CSR_READ(sc, WMREG_CTRL);
10776 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10777
10778 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
10779 tmp |= CTRL_FRCSPD;
10780
10781 CSR_WRITE(sc, WMREG_CTRL, tmp);
10782 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
10783 CSR_WRITE_FLUSH(sc);
10784 delay(20);
10785
10786 CSR_WRITE(sc, WMREG_CTRL, ctrl);
10787 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10788 CSR_WRITE_FLUSH(sc);
10789 delay(20);
10790 }
10791
10792 /* special case - for 82575 - need to do manual init ... */
10793 static void
10794 wm_reset_init_script_82575(struct wm_softc *sc)
10795 {
10796 /*
10797 * remark: this is untested code - we have no board without EEPROM
10798 * same setup as mentioned int the FreeBSD driver for the i82575
10799 */
10800
10801 /* SerDes configuration via SERDESCTRL */
10802 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
10803 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
10804 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
10805 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
10806
10807 /* CCM configuration via CCMCTL register */
10808 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
10809 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
10810
10811 /* PCIe lanes configuration */
10812 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
10813 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
10814 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
10815 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
10816
10817 /* PCIe PLL Configuration */
10818 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
10819 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
10820 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
10821 }
10822
10823 static void
10824 wm_reset_mdicnfg_82580(struct wm_softc *sc)
10825 {
10826 uint32_t reg;
10827 uint16_t nvmword;
10828 int rv;
10829
10830 if ((sc->sc_flags & WM_F_SGMII) == 0)
10831 return;
10832
10833 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
10834 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
10835 if (rv != 0) {
10836 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
10837 __func__);
10838 return;
10839 }
10840
10841 reg = CSR_READ(sc, WMREG_MDICNFG);
10842 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
10843 reg |= MDICNFG_DEST;
10844 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
10845 reg |= MDICNFG_COM_MDIO;
10846 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10847 }
10848
10849 /*
10850 * I210 Errata 25 and I211 Errata 10
10851 * Slow System Clock.
10852 */
10853 static void
10854 wm_pll_workaround_i210(struct wm_softc *sc)
10855 {
10856 uint32_t mdicnfg, wuc;
10857 uint32_t reg;
10858 pcireg_t pcireg;
10859 uint32_t pmreg;
10860 uint16_t nvmword, tmp_nvmword;
10861 int phyval;
10862 bool wa_done = false;
10863 int i;
10864
10865 /* Save WUC and MDICNFG registers */
10866 wuc = CSR_READ(sc, WMREG_WUC);
10867 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
10868
10869 reg = mdicnfg & ~MDICNFG_DEST;
10870 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10871
10872 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
10873 nvmword = INVM_DEFAULT_AL;
10874 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
10875
10876 /* Get Power Management cap offset */
10877 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10878 &pmreg, NULL) == 0)
10879 return;
10880 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
10881 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
10882 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
10883
10884 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
10885 break; /* OK */
10886 }
10887
10888 wa_done = true;
10889 /* Directly reset the internal PHY */
10890 reg = CSR_READ(sc, WMREG_CTRL);
10891 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
10892
10893 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10894 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
10895 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10896
10897 CSR_WRITE(sc, WMREG_WUC, 0);
10898 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
10899 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10900
10901 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
10902 pmreg + PCI_PMCSR);
10903 pcireg |= PCI_PMCSR_STATE_D3;
10904 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10905 pmreg + PCI_PMCSR, pcireg);
10906 delay(1000);
10907 pcireg &= ~PCI_PMCSR_STATE_D3;
10908 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10909 pmreg + PCI_PMCSR, pcireg);
10910
10911 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
10912 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10913
10914 /* Restore WUC register */
10915 CSR_WRITE(sc, WMREG_WUC, wuc);
10916 }
10917
10918 /* Restore MDICNFG setting */
10919 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
10920 if (wa_done)
10921 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
10922 }
10923