if_wm.c revision 1.348 1 /* $NetBSD: if_wm.c,v 1.348 2015/09/28 06:04:04 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - Multi queue
78 * - Image Unique ID
79 * - LPLU other than PCH*
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.348 2015/09/28 06:04:04 knakahara Exp $");
87
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kernel.h>
98 #include <sys/socket.h>
99 #include <sys/ioctl.h>
100 #include <sys/errno.h>
101 #include <sys/device.h>
102 #include <sys/queue.h>
103 #include <sys/syslog.h>
104 #include <sys/interrupt.h>
105
106 #include <sys/rndsource.h>
107
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_media.h>
111 #include <net/if_ether.h>
112
113 #include <net/bpf.h>
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/miidevs.h>
128 #include <dev/mii/mii_bitbang.h>
129 #include <dev/mii/ikphyreg.h>
130 #include <dev/mii/igphyreg.h>
131 #include <dev/mii/igphyvar.h>
132 #include <dev/mii/inbmphyreg.h>
133
134 #include <dev/pci/pcireg.h>
135 #include <dev/pci/pcivar.h>
136 #include <dev/pci/pcidevs.h>
137
138 #include <dev/pci/if_wmreg.h>
139 #include <dev/pci/if_wmvar.h>
140
141 #ifdef WM_DEBUG
142 #define WM_DEBUG_LINK 0x01
143 #define WM_DEBUG_TX 0x02
144 #define WM_DEBUG_RX 0x04
145 #define WM_DEBUG_GMII 0x08
146 #define WM_DEBUG_MANAGE 0x10
147 #define WM_DEBUG_NVM 0x20
148 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
149 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
150
151 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
152 #else
153 #define DPRINTF(x, y) /* nothing */
154 #endif /* WM_DEBUG */
155
156 #ifdef NET_MPSAFE
157 #define WM_MPSAFE 1
158 #endif
159
160 #ifdef __HAVE_PCI_MSI_MSIX
161 #define WM_MSI_MSIX 1 /* Enable by default */
162 #endif
163
164 /*
165 * This device driver divides interrupt to TX, RX and link state.
166 * Each MSI-X vector indexes are below.
167 */
168 #define WM_MSIX_NINTR 3
169 #define WM_MSIX_TXINTR_IDX 0
170 #define WM_MSIX_RXINTR_IDX 1
171 #define WM_MSIX_LINKINTR_IDX 2
172 #define WM_MAX_NINTR WM_MSIX_NINTR
173
174 /*
175 * This device driver set affinity to each interrupts like below (round-robin).
176 * If the number CPUs is less than the number of interrupts, this driver usase
177 * the same CPU for multiple interrupts.
178 */
179 #define WM_MSIX_TXINTR_CPUID 0
180 #define WM_MSIX_RXINTR_CPUID 1
181 #define WM_MSIX_LINKINTR_CPUID 2
182
183 /*
184 * Transmit descriptor list size. Due to errata, we can only have
185 * 256 hardware descriptors in the ring on < 82544, but we use 4096
186 * on >= 82544. We tell the upper layers that they can queue a lot
187 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
188 * of them at a time.
189 *
190 * We allow up to 256 (!) DMA segments per packet. Pathological packet
191 * chains containing many small mbufs have been observed in zero-copy
192 * situations with jumbo frames.
193 */
194 #define WM_NTXSEGS 256
195 #define WM_IFQUEUELEN 256
196 #define WM_TXQUEUELEN_MAX 64
197 #define WM_TXQUEUELEN_MAX_82547 16
198 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
199 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
200 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
201 #define WM_NTXDESC_82542 256
202 #define WM_NTXDESC_82544 4096
203 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
204 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
205 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
206 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
207 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
208
209 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
210
211 /*
212 * Receive descriptor list size. We have one Rx buffer for normal
213 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
214 * packet. We allocate 256 receive descriptors, each with a 2k
215 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
216 */
217 #define WM_NRXDESC 256
218 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
219 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
220 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
221
222 /*
223 * Control structures are DMA'd to the i82542 chip. We allocate them in
224 * a single clump that maps to a single DMA segment to make several things
225 * easier.
226 */
227 struct wm_control_data_82544 {
228 /*
229 * The receive descriptors.
230 */
231 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
232
233 /*
234 * The transmit descriptors. Put these at the end, because
235 * we might use a smaller number of them.
236 */
237 union {
238 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
239 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
240 } wdc_u;
241 };
242
243 struct wm_control_data_82542 {
244 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
245 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
246 };
247
248 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
249 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
250 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
251
252 /*
253 * Software state for transmit jobs.
254 */
255 struct wm_txsoft {
256 struct mbuf *txs_mbuf; /* head of our mbuf chain */
257 bus_dmamap_t txs_dmamap; /* our DMA map */
258 int txs_firstdesc; /* first descriptor in packet */
259 int txs_lastdesc; /* last descriptor in packet */
260 int txs_ndesc; /* # of descriptors used */
261 };
262
263 /*
264 * Software state for receive buffers. Each descriptor gets a
265 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
266 * more than one buffer, we chain them together.
267 */
268 struct wm_rxsoft {
269 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
270 bus_dmamap_t rxs_dmamap; /* our DMA map */
271 };
272
273 #define WM_LINKUP_TIMEOUT 50
274
275 static uint16_t swfwphysem[] = {
276 SWFW_PHY0_SM,
277 SWFW_PHY1_SM,
278 SWFW_PHY2_SM,
279 SWFW_PHY3_SM
280 };
281
282 static const uint32_t wm_82580_rxpbs_table[] = {
283 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
284 };
285
286 /*
287 * Software state per device.
288 */
289 struct wm_softc {
290 device_t sc_dev; /* generic device information */
291 bus_space_tag_t sc_st; /* bus space tag */
292 bus_space_handle_t sc_sh; /* bus space handle */
293 bus_size_t sc_ss; /* bus space size */
294 bus_space_tag_t sc_iot; /* I/O space tag */
295 bus_space_handle_t sc_ioh; /* I/O space handle */
296 bus_size_t sc_ios; /* I/O space size */
297 bus_space_tag_t sc_flasht; /* flash registers space tag */
298 bus_space_handle_t sc_flashh; /* flash registers space handle */
299 bus_size_t sc_flashs; /* flash registers space size */
300 bus_dma_tag_t sc_dmat; /* bus DMA tag */
301
302 struct ethercom sc_ethercom; /* ethernet common data */
303 struct mii_data sc_mii; /* MII/media information */
304
305 pci_chipset_tag_t sc_pc;
306 pcitag_t sc_pcitag;
307 int sc_bus_speed; /* PCI/PCIX bus speed */
308 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
309
310 uint16_t sc_pcidevid; /* PCI device ID */
311 wm_chip_type sc_type; /* MAC type */
312 int sc_rev; /* MAC revision */
313 wm_phy_type sc_phytype; /* PHY type */
314 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
315 #define WM_MEDIATYPE_UNKNOWN 0x00
316 #define WM_MEDIATYPE_FIBER 0x01
317 #define WM_MEDIATYPE_COPPER 0x02
318 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
319 int sc_funcid; /* unit number of the chip (0 to 3) */
320 int sc_flags; /* flags; see below */
321 int sc_if_flags; /* last if_flags */
322 int sc_flowflags; /* 802.3x flow control flags */
323 int sc_align_tweak;
324
325 void *sc_ihs[WM_MAX_NINTR]; /*
326 * interrupt cookie.
327 * legacy and msi use sc_ihs[0].
328 */
329 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
330 int sc_nintrs; /* number of interrupts */
331
332 callout_t sc_tick_ch; /* tick callout */
333 bool sc_stopping;
334
335 int sc_nvm_ver_major;
336 int sc_nvm_ver_minor;
337 int sc_nvm_addrbits; /* NVM address bits */
338 unsigned int sc_nvm_wordsize; /* NVM word size */
339 int sc_ich8_flash_base;
340 int sc_ich8_flash_bank_size;
341 int sc_nvm_k1_enabled;
342
343 /* Software state for the transmit and receive descriptors. */
344 int sc_txnum; /* must be a power of two */
345 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
346 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
347
348 /* Control data structures. */
349 int sc_ntxdesc; /* must be a power of two */
350 struct wm_control_data_82544 *sc_control_data;
351 bus_dmamap_t sc_cddmamap; /* control data DMA map */
352 bus_dma_segment_t sc_cd_seg; /* control data segment */
353 int sc_cd_rseg; /* real number of control segment */
354 size_t sc_cd_size; /* control data size */
355 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
356 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
357 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
358 #define sc_rxdescs sc_control_data->wcd_rxdescs
359
360 #ifdef WM_EVENT_COUNTERS
361 /* Event counters. */
362 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
363 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
364 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
365 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
366 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
367 struct evcnt sc_ev_rxintr; /* Rx interrupts */
368 struct evcnt sc_ev_linkintr; /* Link interrupts */
369
370 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
371 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
372 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
373 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
374 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
375 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
376 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
377 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
378
379 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
380 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
381
382 struct evcnt sc_ev_tu; /* Tx underrun */
383
384 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
385 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
386 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
387 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
388 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
389 #endif /* WM_EVENT_COUNTERS */
390
391 bus_addr_t sc_tdt_reg; /* offset of TDT register */
392
393 int sc_txfree; /* number of free Tx descriptors */
394 int sc_txnext; /* next ready Tx descriptor */
395
396 int sc_txsfree; /* number of free Tx jobs */
397 int sc_txsnext; /* next free Tx job */
398 int sc_txsdirty; /* dirty Tx jobs */
399
400 /* These 5 variables are used only on the 82547. */
401 int sc_txfifo_size; /* Tx FIFO size */
402 int sc_txfifo_head; /* current head of FIFO */
403 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
404 int sc_txfifo_stall; /* Tx FIFO is stalled */
405 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
406
407 bus_addr_t sc_rdt_reg; /* offset of RDT register */
408
409 int sc_rxptr; /* next ready Rx descriptor/queue ent */
410 int sc_rxdiscard;
411 int sc_rxlen;
412 struct mbuf *sc_rxhead;
413 struct mbuf *sc_rxtail;
414 struct mbuf **sc_rxtailp;
415
416 uint32_t sc_ctrl; /* prototype CTRL register */
417 #if 0
418 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
419 #endif
420 uint32_t sc_icr; /* prototype interrupt bits */
421 uint32_t sc_itr; /* prototype intr throttling reg */
422 uint32_t sc_tctl; /* prototype TCTL register */
423 uint32_t sc_rctl; /* prototype RCTL register */
424 uint32_t sc_txcw; /* prototype TXCW register */
425 uint32_t sc_tipg; /* prototype TIPG register */
426 uint32_t sc_fcrtl; /* prototype FCRTL register */
427 uint32_t sc_pba; /* prototype PBA register */
428
429 int sc_tbi_linkup; /* TBI link status */
430 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
431 int sc_tbi_serdes_ticks; /* tbi ticks */
432
433 int sc_mchash_type; /* multicast filter offset */
434
435 krndsource_t rnd_source; /* random source */
436
437 kmutex_t *sc_tx_lock; /* lock for tx operations */
438 kmutex_t *sc_rx_lock; /* lock for rx operations */
439 };
440
441 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
442 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
443 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
444 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
445 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
446 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
447 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
448 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
449 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
450
451 #ifdef WM_MPSAFE
452 #define CALLOUT_FLAGS CALLOUT_MPSAFE
453 #else
454 #define CALLOUT_FLAGS 0
455 #endif
456
457 #define WM_RXCHAIN_RESET(sc) \
458 do { \
459 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
460 *(sc)->sc_rxtailp = NULL; \
461 (sc)->sc_rxlen = 0; \
462 } while (/*CONSTCOND*/0)
463
464 #define WM_RXCHAIN_LINK(sc, m) \
465 do { \
466 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
467 (sc)->sc_rxtailp = &(m)->m_next; \
468 } while (/*CONSTCOND*/0)
469
470 #ifdef WM_EVENT_COUNTERS
471 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
472 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
473 #else
474 #define WM_EVCNT_INCR(ev) /* nothing */
475 #define WM_EVCNT_ADD(ev, val) /* nothing */
476 #endif
477
478 #define CSR_READ(sc, reg) \
479 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
480 #define CSR_WRITE(sc, reg, val) \
481 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
482 #define CSR_WRITE_FLUSH(sc) \
483 (void) CSR_READ((sc), WMREG_STATUS)
484
485 #define ICH8_FLASH_READ32(sc, reg) \
486 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
487 #define ICH8_FLASH_WRITE32(sc, reg, data) \
488 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
489
490 #define ICH8_FLASH_READ16(sc, reg) \
491 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
492 #define ICH8_FLASH_WRITE16(sc, reg, data) \
493 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
494
495 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
496 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
497
498 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
499 #define WM_CDTXADDR_HI(sc, x) \
500 (sizeof(bus_addr_t) == 8 ? \
501 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
502
503 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
504 #define WM_CDRXADDR_HI(sc, x) \
505 (sizeof(bus_addr_t) == 8 ? \
506 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
507
508 #define WM_CDTXSYNC(sc, x, n, ops) \
509 do { \
510 int __x, __n; \
511 \
512 __x = (x); \
513 __n = (n); \
514 \
515 /* If it will wrap around, sync to the end of the ring. */ \
516 if ((__x + __n) > WM_NTXDESC(sc)) { \
517 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
518 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
519 (WM_NTXDESC(sc) - __x), (ops)); \
520 __n -= (WM_NTXDESC(sc) - __x); \
521 __x = 0; \
522 } \
523 \
524 /* Now sync whatever is left. */ \
525 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
526 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
527 } while (/*CONSTCOND*/0)
528
529 #define WM_CDRXSYNC(sc, x, ops) \
530 do { \
531 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
532 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
533 } while (/*CONSTCOND*/0)
534
535 #define WM_INIT_RXDESC(sc, x) \
536 do { \
537 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
538 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
539 struct mbuf *__m = __rxs->rxs_mbuf; \
540 \
541 /* \
542 * Note: We scoot the packet forward 2 bytes in the buffer \
543 * so that the payload after the Ethernet header is aligned \
544 * to a 4-byte boundary. \
545 * \
546 * XXX BRAINDAMAGE ALERT! \
547 * The stupid chip uses the same size for every buffer, which \
548 * is set in the Receive Control register. We are using the 2K \
549 * size option, but what we REALLY want is (2K - 2)! For this \
550 * reason, we can't "scoot" packets longer than the standard \
551 * Ethernet MTU. On strict-alignment platforms, if the total \
552 * size exceeds (2K - 2) we set align_tweak to 0 and let \
553 * the upper layer copy the headers. \
554 */ \
555 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
556 \
557 wm_set_dma_addr(&__rxd->wrx_addr, \
558 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
559 __rxd->wrx_len = 0; \
560 __rxd->wrx_cksum = 0; \
561 __rxd->wrx_status = 0; \
562 __rxd->wrx_errors = 0; \
563 __rxd->wrx_special = 0; \
564 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
565 \
566 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
567 } while (/*CONSTCOND*/0)
568
569 /*
570 * Register read/write functions.
571 * Other than CSR_{READ|WRITE}().
572 */
573 #if 0
574 static inline uint32_t wm_io_read(struct wm_softc *, int);
575 #endif
576 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
577 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
578 uint32_t, uint32_t);
579 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
580
581 /*
582 * Device driver interface functions and commonly used functions.
583 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
584 */
585 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
586 static int wm_match(device_t, cfdata_t, void *);
587 static void wm_attach(device_t, device_t, void *);
588 static int wm_detach(device_t, int);
589 static bool wm_suspend(device_t, const pmf_qual_t *);
590 static bool wm_resume(device_t, const pmf_qual_t *);
591 static void wm_watchdog(struct ifnet *);
592 static void wm_tick(void *);
593 static int wm_ifflags_cb(struct ethercom *);
594 static int wm_ioctl(struct ifnet *, u_long, void *);
595 /* MAC address related */
596 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
597 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
598 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
599 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
600 static void wm_set_filter(struct wm_softc *);
601 /* Reset and init related */
602 static void wm_set_vlan(struct wm_softc *);
603 static void wm_set_pcie_completion_timeout(struct wm_softc *);
604 static void wm_get_auto_rd_done(struct wm_softc *);
605 static void wm_lan_init_done(struct wm_softc *);
606 static void wm_get_cfg_done(struct wm_softc *);
607 static void wm_initialize_hardware_bits(struct wm_softc *);
608 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
609 static void wm_reset(struct wm_softc *);
610 static int wm_add_rxbuf(struct wm_softc *, int);
611 static void wm_rxdrain(struct wm_softc *);
612 static int wm_init(struct ifnet *);
613 static int wm_init_locked(struct ifnet *);
614 static void wm_stop(struct ifnet *, int);
615 static void wm_stop_locked(struct ifnet *, int);
616 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
617 uint32_t *, uint8_t *);
618 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
619 static void wm_82547_txfifo_stall(void *);
620 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
621 /* Start */
622 static void wm_start(struct ifnet *);
623 static void wm_start_locked(struct ifnet *);
624 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
625 uint32_t *, uint32_t *, bool *);
626 static void wm_nq_start(struct ifnet *);
627 static void wm_nq_start_locked(struct ifnet *);
628 /* Interrupt */
629 static int wm_txeof(struct wm_softc *);
630 static void wm_rxeof(struct wm_softc *);
631 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
632 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
633 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
634 static void wm_linkintr(struct wm_softc *, uint32_t);
635 static int wm_intr_legacy(void *);
636 #ifdef WM_MSI_MSIX
637 static int wm_txintr_msix(void *);
638 static int wm_rxintr_msix(void *);
639 static int wm_linkintr_msix(void *);
640 #endif
641
642 /*
643 * Media related.
644 * GMII, SGMII, TBI, SERDES and SFP.
645 */
646 /* Common */
647 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
648 /* GMII related */
649 static void wm_gmii_reset(struct wm_softc *);
650 static int wm_get_phy_id_82575(struct wm_softc *);
651 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
652 static int wm_gmii_mediachange(struct ifnet *);
653 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
654 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
655 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
656 static int wm_gmii_i82543_readreg(device_t, int, int);
657 static void wm_gmii_i82543_writereg(device_t, int, int, int);
658 static int wm_gmii_i82544_readreg(device_t, int, int);
659 static void wm_gmii_i82544_writereg(device_t, int, int, int);
660 static int wm_gmii_i80003_readreg(device_t, int, int);
661 static void wm_gmii_i80003_writereg(device_t, int, int, int);
662 static int wm_gmii_bm_readreg(device_t, int, int);
663 static void wm_gmii_bm_writereg(device_t, int, int, int);
664 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
665 static int wm_gmii_hv_readreg(device_t, int, int);
666 static void wm_gmii_hv_writereg(device_t, int, int, int);
667 static int wm_gmii_82580_readreg(device_t, int, int);
668 static void wm_gmii_82580_writereg(device_t, int, int, int);
669 static int wm_gmii_gs40g_readreg(device_t, int, int);
670 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
671 static void wm_gmii_statchg(struct ifnet *);
672 static int wm_kmrn_readreg(struct wm_softc *, int);
673 static void wm_kmrn_writereg(struct wm_softc *, int, int);
674 /* SGMII */
675 static bool wm_sgmii_uses_mdio(struct wm_softc *);
676 static int wm_sgmii_readreg(device_t, int, int);
677 static void wm_sgmii_writereg(device_t, int, int, int);
678 /* TBI related */
679 static void wm_tbi_mediainit(struct wm_softc *);
680 static int wm_tbi_mediachange(struct ifnet *);
681 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
682 static int wm_check_for_link(struct wm_softc *);
683 static void wm_tbi_tick(struct wm_softc *);
684 /* SERDES related */
685 static void wm_serdes_power_up_link_82575(struct wm_softc *);
686 static int wm_serdes_mediachange(struct ifnet *);
687 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
688 static void wm_serdes_tick(struct wm_softc *);
689 /* SFP related */
690 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
691 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
692
693 /*
694 * NVM related.
695 * Microwire, SPI (w/wo EERD) and Flash.
696 */
697 /* Misc functions */
698 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
699 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
700 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
701 /* Microwire */
702 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
703 /* SPI */
704 static int wm_nvm_ready_spi(struct wm_softc *);
705 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
706 /* Using with EERD */
707 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
708 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
709 /* Flash */
710 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
711 unsigned int *);
712 static int32_t wm_ich8_cycle_init(struct wm_softc *);
713 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
714 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
715 uint16_t *);
716 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
717 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
718 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
719 /* iNVM */
720 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
721 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
722 /* Lock, detecting NVM type, validate checksum and read */
723 static int wm_nvm_acquire(struct wm_softc *);
724 static void wm_nvm_release(struct wm_softc *);
725 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
726 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
727 static int wm_nvm_validate_checksum(struct wm_softc *);
728 static void wm_nvm_version_invm(struct wm_softc *);
729 static void wm_nvm_version(struct wm_softc *);
730 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
731
732 /*
733 * Hardware semaphores.
734 * Very complexed...
735 */
736 static int wm_get_swsm_semaphore(struct wm_softc *);
737 static void wm_put_swsm_semaphore(struct wm_softc *);
738 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
739 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
740 static int wm_get_swfwhw_semaphore(struct wm_softc *);
741 static void wm_put_swfwhw_semaphore(struct wm_softc *);
742 static int wm_get_hw_semaphore_82573(struct wm_softc *);
743 static void wm_put_hw_semaphore_82573(struct wm_softc *);
744
745 /*
746 * Management mode and power management related subroutines.
747 * BMC, AMT, suspend/resume and EEE.
748 */
749 static int wm_check_mng_mode(struct wm_softc *);
750 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
751 static int wm_check_mng_mode_82574(struct wm_softc *);
752 static int wm_check_mng_mode_generic(struct wm_softc *);
753 static int wm_enable_mng_pass_thru(struct wm_softc *);
754 static int wm_check_reset_block(struct wm_softc *);
755 static void wm_get_hw_control(struct wm_softc *);
756 static void wm_release_hw_control(struct wm_softc *);
757 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
758 static void wm_smbustopci(struct wm_softc *);
759 static void wm_init_manageability(struct wm_softc *);
760 static void wm_release_manageability(struct wm_softc *);
761 static void wm_get_wakeup(struct wm_softc *);
762 #ifdef WM_WOL
763 static void wm_enable_phy_wakeup(struct wm_softc *);
764 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
765 static void wm_enable_wakeup(struct wm_softc *);
766 #endif
767 /* EEE */
768 static void wm_set_eee_i350(struct wm_softc *);
769
770 /*
771 * Workarounds (mainly PHY related).
772 * Basically, PHY's workarounds are in the PHY drivers.
773 */
774 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
775 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
776 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
777 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
778 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
779 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
780 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
781 static void wm_reset_init_script_82575(struct wm_softc *);
782 static void wm_reset_mdicnfg_82580(struct wm_softc *);
783 static void wm_pll_workaround_i210(struct wm_softc *);
784
785 #ifdef WM_MSI_MSIX
786 struct _msix_matrix {
787 const char *intrname;
788 int(*func)(void *);
789 int intridx;
790 int cpuid;
791 } msix_matrix[WM_MSIX_NINTR] = {
792 { "TX", wm_txintr_msix, WM_MSIX_TXINTR_IDX, WM_MSIX_TXINTR_CPUID },
793 { "RX", wm_rxintr_msix, WM_MSIX_RXINTR_IDX, WM_MSIX_RXINTR_CPUID },
794 { "LINK", wm_linkintr_msix, WM_MSIX_LINKINTR_IDX,
795 WM_MSIX_LINKINTR_CPUID },
796 };
797 #endif
798
799 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
800 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
801
802 /*
803 * Devices supported by this driver.
804 */
805 static const struct wm_product {
806 pci_vendor_id_t wmp_vendor;
807 pci_product_id_t wmp_product;
808 const char *wmp_name;
809 wm_chip_type wmp_type;
810 uint32_t wmp_flags;
811 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
812 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
813 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
814 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
815 #define WMP_MEDIATYPE(x) ((x) & 0x03)
816 } wm_products[] = {
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
818 "Intel i82542 1000BASE-X Ethernet",
819 WM_T_82542_2_1, WMP_F_FIBER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
822 "Intel i82543GC 1000BASE-X Ethernet",
823 WM_T_82543, WMP_F_FIBER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
826 "Intel i82543GC 1000BASE-T Ethernet",
827 WM_T_82543, WMP_F_COPPER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
830 "Intel i82544EI 1000BASE-T Ethernet",
831 WM_T_82544, WMP_F_COPPER },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
834 "Intel i82544EI 1000BASE-X Ethernet",
835 WM_T_82544, WMP_F_FIBER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
838 "Intel i82544GC 1000BASE-T Ethernet",
839 WM_T_82544, WMP_F_COPPER },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
842 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
843 WM_T_82544, WMP_F_COPPER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
846 "Intel i82540EM 1000BASE-T Ethernet",
847 WM_T_82540, WMP_F_COPPER },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
850 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
851 WM_T_82540, WMP_F_COPPER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
854 "Intel i82540EP 1000BASE-T Ethernet",
855 WM_T_82540, WMP_F_COPPER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
858 "Intel i82540EP 1000BASE-T Ethernet",
859 WM_T_82540, WMP_F_COPPER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
862 "Intel i82540EP 1000BASE-T Ethernet",
863 WM_T_82540, WMP_F_COPPER },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
866 "Intel i82545EM 1000BASE-T Ethernet",
867 WM_T_82545, WMP_F_COPPER },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
870 "Intel i82545GM 1000BASE-T Ethernet",
871 WM_T_82545_3, WMP_F_COPPER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
874 "Intel i82545GM 1000BASE-X Ethernet",
875 WM_T_82545_3, WMP_F_FIBER },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
878 "Intel i82545GM Gigabit Ethernet (SERDES)",
879 WM_T_82545_3, WMP_F_SERDES },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
882 "Intel i82546EB 1000BASE-T Ethernet",
883 WM_T_82546, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
886 "Intel i82546EB 1000BASE-T Ethernet",
887 WM_T_82546, WMP_F_COPPER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
890 "Intel i82545EM 1000BASE-X Ethernet",
891 WM_T_82545, WMP_F_FIBER },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
894 "Intel i82546EB 1000BASE-X Ethernet",
895 WM_T_82546, WMP_F_FIBER },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
898 "Intel i82546GB 1000BASE-T Ethernet",
899 WM_T_82546_3, WMP_F_COPPER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
902 "Intel i82546GB 1000BASE-X Ethernet",
903 WM_T_82546_3, WMP_F_FIBER },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
906 "Intel i82546GB Gigabit Ethernet (SERDES)",
907 WM_T_82546_3, WMP_F_SERDES },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
910 "i82546GB quad-port Gigabit Ethernet",
911 WM_T_82546_3, WMP_F_COPPER },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
914 "i82546GB quad-port Gigabit Ethernet (KSP3)",
915 WM_T_82546_3, WMP_F_COPPER },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
918 "Intel PRO/1000MT (82546GB)",
919 WM_T_82546_3, WMP_F_COPPER },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
922 "Intel i82541EI 1000BASE-T Ethernet",
923 WM_T_82541, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
926 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
927 WM_T_82541, WMP_F_COPPER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
930 "Intel i82541EI Mobile 1000BASE-T Ethernet",
931 WM_T_82541, WMP_F_COPPER },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
934 "Intel i82541ER 1000BASE-T Ethernet",
935 WM_T_82541_2, WMP_F_COPPER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
938 "Intel i82541GI 1000BASE-T Ethernet",
939 WM_T_82541_2, WMP_F_COPPER },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
942 "Intel i82541GI Mobile 1000BASE-T Ethernet",
943 WM_T_82541_2, WMP_F_COPPER },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
946 "Intel i82541PI 1000BASE-T Ethernet",
947 WM_T_82541_2, WMP_F_COPPER },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
950 "Intel i82547EI 1000BASE-T Ethernet",
951 WM_T_82547, WMP_F_COPPER },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
954 "Intel i82547EI Mobile 1000BASE-T Ethernet",
955 WM_T_82547, WMP_F_COPPER },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
958 "Intel i82547GI 1000BASE-T Ethernet",
959 WM_T_82547_2, WMP_F_COPPER },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
962 "Intel PRO/1000 PT (82571EB)",
963 WM_T_82571, WMP_F_COPPER },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
966 "Intel PRO/1000 PF (82571EB)",
967 WM_T_82571, WMP_F_FIBER },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
970 "Intel PRO/1000 PB (82571EB)",
971 WM_T_82571, WMP_F_SERDES },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
974 "Intel PRO/1000 QT (82571EB)",
975 WM_T_82571, WMP_F_COPPER },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
978 "Intel PRO/1000 PT Quad Port Server Adapter",
979 WM_T_82571, WMP_F_COPPER, },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
982 "Intel Gigabit PT Quad Port Server ExpressModule",
983 WM_T_82571, WMP_F_COPPER, },
984
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
986 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
987 WM_T_82571, WMP_F_SERDES, },
988
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
990 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
991 WM_T_82571, WMP_F_SERDES, },
992
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
994 "Intel 82571EB Quad 1000baseX Ethernet",
995 WM_T_82571, WMP_F_FIBER, },
996
997 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
998 "Intel i82572EI 1000baseT Ethernet",
999 WM_T_82572, WMP_F_COPPER },
1000
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1002 "Intel i82572EI 1000baseX Ethernet",
1003 WM_T_82572, WMP_F_FIBER },
1004
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1006 "Intel i82572EI Gigabit Ethernet (SERDES)",
1007 WM_T_82572, WMP_F_SERDES },
1008
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1010 "Intel i82572EI 1000baseT Ethernet",
1011 WM_T_82572, WMP_F_COPPER },
1012
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1014 "Intel i82573E",
1015 WM_T_82573, WMP_F_COPPER },
1016
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1018 "Intel i82573E IAMT",
1019 WM_T_82573, WMP_F_COPPER },
1020
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1022 "Intel i82573L Gigabit Ethernet",
1023 WM_T_82573, WMP_F_COPPER },
1024
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1026 "Intel i82574L",
1027 WM_T_82574, WMP_F_COPPER },
1028
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1030 "Intel i82574L",
1031 WM_T_82574, WMP_F_COPPER },
1032
1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1034 "Intel i82583V",
1035 WM_T_82583, WMP_F_COPPER },
1036
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1038 "i80003 dual 1000baseT Ethernet",
1039 WM_T_80003, WMP_F_COPPER },
1040
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1042 "i80003 dual 1000baseX Ethernet",
1043 WM_T_80003, WMP_F_COPPER },
1044
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1046 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1047 WM_T_80003, WMP_F_SERDES },
1048
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1050 "Intel i80003 1000baseT Ethernet",
1051 WM_T_80003, WMP_F_COPPER },
1052
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1054 "Intel i80003 Gigabit Ethernet (SERDES)",
1055 WM_T_80003, WMP_F_SERDES },
1056
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1058 "Intel i82801H (M_AMT) LAN Controller",
1059 WM_T_ICH8, WMP_F_COPPER },
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1061 "Intel i82801H (AMT) LAN Controller",
1062 WM_T_ICH8, WMP_F_COPPER },
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1064 "Intel i82801H LAN Controller",
1065 WM_T_ICH8, WMP_F_COPPER },
1066 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1067 "Intel i82801H (IFE) LAN Controller",
1068 WM_T_ICH8, WMP_F_COPPER },
1069 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1070 "Intel i82801H (M) LAN Controller",
1071 WM_T_ICH8, WMP_F_COPPER },
1072 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1073 "Intel i82801H IFE (GT) LAN Controller",
1074 WM_T_ICH8, WMP_F_COPPER },
1075 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1076 "Intel i82801H IFE (G) LAN Controller",
1077 WM_T_ICH8, WMP_F_COPPER },
1078 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1079 "82801I (AMT) LAN Controller",
1080 WM_T_ICH9, WMP_F_COPPER },
1081 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1082 "82801I LAN Controller",
1083 WM_T_ICH9, WMP_F_COPPER },
1084 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1085 "82801I (G) LAN Controller",
1086 WM_T_ICH9, WMP_F_COPPER },
1087 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1088 "82801I (GT) LAN Controller",
1089 WM_T_ICH9, WMP_F_COPPER },
1090 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1091 "82801I (C) LAN Controller",
1092 WM_T_ICH9, WMP_F_COPPER },
1093 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1094 "82801I mobile LAN Controller",
1095 WM_T_ICH9, WMP_F_COPPER },
1096 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1097 "82801I mobile (V) LAN Controller",
1098 WM_T_ICH9, WMP_F_COPPER },
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1100 "82801I mobile (AMT) LAN Controller",
1101 WM_T_ICH9, WMP_F_COPPER },
1102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1103 "82567LM-4 LAN Controller",
1104 WM_T_ICH9, WMP_F_COPPER },
1105 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1106 "82567V-3 LAN Controller",
1107 WM_T_ICH9, WMP_F_COPPER },
1108 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1109 "82567LM-2 LAN Controller",
1110 WM_T_ICH10, WMP_F_COPPER },
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1112 "82567LF-2 LAN Controller",
1113 WM_T_ICH10, WMP_F_COPPER },
1114 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1115 "82567LM-3 LAN Controller",
1116 WM_T_ICH10, WMP_F_COPPER },
1117 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1118 "82567LF-3 LAN Controller",
1119 WM_T_ICH10, WMP_F_COPPER },
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1121 "82567V-2 LAN Controller",
1122 WM_T_ICH10, WMP_F_COPPER },
1123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1124 "82567V-3? LAN Controller",
1125 WM_T_ICH10, WMP_F_COPPER },
1126 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1127 "HANKSVILLE LAN Controller",
1128 WM_T_ICH10, WMP_F_COPPER },
1129 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1130 "PCH LAN (82577LM) Controller",
1131 WM_T_PCH, WMP_F_COPPER },
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1133 "PCH LAN (82577LC) Controller",
1134 WM_T_PCH, WMP_F_COPPER },
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1136 "PCH LAN (82578DM) Controller",
1137 WM_T_PCH, WMP_F_COPPER },
1138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1139 "PCH LAN (82578DC) Controller",
1140 WM_T_PCH, WMP_F_COPPER },
1141 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1142 "PCH2 LAN (82579LM) Controller",
1143 WM_T_PCH2, WMP_F_COPPER },
1144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1145 "PCH2 LAN (82579V) Controller",
1146 WM_T_PCH2, WMP_F_COPPER },
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1148 "82575EB dual-1000baseT Ethernet",
1149 WM_T_82575, WMP_F_COPPER },
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1151 "82575EB dual-1000baseX Ethernet (SERDES)",
1152 WM_T_82575, WMP_F_SERDES },
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1154 "82575GB quad-1000baseT Ethernet",
1155 WM_T_82575, WMP_F_COPPER },
1156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1157 "82575GB quad-1000baseT Ethernet (PM)",
1158 WM_T_82575, WMP_F_COPPER },
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1160 "82576 1000BaseT Ethernet",
1161 WM_T_82576, WMP_F_COPPER },
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1163 "82576 1000BaseX Ethernet",
1164 WM_T_82576, WMP_F_FIBER },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1167 "82576 gigabit Ethernet (SERDES)",
1168 WM_T_82576, WMP_F_SERDES },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1171 "82576 quad-1000BaseT Ethernet",
1172 WM_T_82576, WMP_F_COPPER },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1175 "82576 Gigabit ET2 Quad Port Server Adapter",
1176 WM_T_82576, WMP_F_COPPER },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1179 "82576 gigabit Ethernet",
1180 WM_T_82576, WMP_F_COPPER },
1181
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1183 "82576 gigabit Ethernet (SERDES)",
1184 WM_T_82576, WMP_F_SERDES },
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1186 "82576 quad-gigabit Ethernet (SERDES)",
1187 WM_T_82576, WMP_F_SERDES },
1188
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1190 "82580 1000BaseT Ethernet",
1191 WM_T_82580, WMP_F_COPPER },
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1193 "82580 1000BaseX Ethernet",
1194 WM_T_82580, WMP_F_FIBER },
1195
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1197 "82580 1000BaseT Ethernet (SERDES)",
1198 WM_T_82580, WMP_F_SERDES },
1199
1200 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1201 "82580 gigabit Ethernet (SGMII)",
1202 WM_T_82580, WMP_F_COPPER },
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1204 "82580 dual-1000BaseT Ethernet",
1205 WM_T_82580, WMP_F_COPPER },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1208 "82580 quad-1000BaseX Ethernet",
1209 WM_T_82580, WMP_F_FIBER },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1212 "DH89XXCC Gigabit Ethernet (SGMII)",
1213 WM_T_82580, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1216 "DH89XXCC Gigabit Ethernet (SERDES)",
1217 WM_T_82580, WMP_F_SERDES },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1220 "DH89XXCC 1000BASE-KX Ethernet",
1221 WM_T_82580, WMP_F_SERDES },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1224 "DH89XXCC Gigabit Ethernet (SFP)",
1225 WM_T_82580, WMP_F_SERDES },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1228 "I350 Gigabit Network Connection",
1229 WM_T_I350, WMP_F_COPPER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1232 "I350 Gigabit Fiber Network Connection",
1233 WM_T_I350, WMP_F_FIBER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1236 "I350 Gigabit Backplane Connection",
1237 WM_T_I350, WMP_F_SERDES },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1240 "I350 Quad Port Gigabit Ethernet",
1241 WM_T_I350, WMP_F_SERDES },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1244 "I350 Gigabit Connection",
1245 WM_T_I350, WMP_F_COPPER },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1248 "I354 Gigabit Ethernet (KX)",
1249 WM_T_I354, WMP_F_SERDES },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1252 "I354 Gigabit Ethernet (SGMII)",
1253 WM_T_I354, WMP_F_COPPER },
1254
1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1256 "I354 Gigabit Ethernet (2.5G)",
1257 WM_T_I354, WMP_F_COPPER },
1258
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1260 "I210-T1 Ethernet Server Adapter",
1261 WM_T_I210, WMP_F_COPPER },
1262
1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1264 "I210 Ethernet (Copper OEM)",
1265 WM_T_I210, WMP_F_COPPER },
1266
1267 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1268 "I210 Ethernet (Copper IT)",
1269 WM_T_I210, WMP_F_COPPER },
1270
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1272 "I210 Ethernet (FLASH less)",
1273 WM_T_I210, WMP_F_COPPER },
1274
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1276 "I210 Gigabit Ethernet (Fiber)",
1277 WM_T_I210, WMP_F_FIBER },
1278
1279 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1280 "I210 Gigabit Ethernet (SERDES)",
1281 WM_T_I210, WMP_F_SERDES },
1282
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1284 "I210 Gigabit Ethernet (FLASH less)",
1285 WM_T_I210, WMP_F_SERDES },
1286
1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1288 "I210 Gigabit Ethernet (SGMII)",
1289 WM_T_I210, WMP_F_COPPER },
1290
1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1292 "I211 Ethernet (COPPER)",
1293 WM_T_I211, WMP_F_COPPER },
1294 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1295 "I217 V Ethernet Connection",
1296 WM_T_PCH_LPT, WMP_F_COPPER },
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1298 "I217 LM Ethernet Connection",
1299 WM_T_PCH_LPT, WMP_F_COPPER },
1300 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1301 "I218 V Ethernet Connection",
1302 WM_T_PCH_LPT, WMP_F_COPPER },
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1304 "I218 V Ethernet Connection",
1305 WM_T_PCH_LPT, WMP_F_COPPER },
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1307 "I218 V Ethernet Connection",
1308 WM_T_PCH_LPT, WMP_F_COPPER },
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1310 "I218 LM Ethernet Connection",
1311 WM_T_PCH_LPT, WMP_F_COPPER },
1312 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1313 "I218 LM Ethernet Connection",
1314 WM_T_PCH_LPT, WMP_F_COPPER },
1315 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1316 "I218 LM Ethernet Connection",
1317 WM_T_PCH_LPT, WMP_F_COPPER },
1318 { 0, 0,
1319 NULL,
1320 0, 0 },
1321 };
1322
1323 #ifdef WM_EVENT_COUNTERS
1324 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1325 #endif /* WM_EVENT_COUNTERS */
1326
1327
1328 /*
1329 * Register read/write functions.
1330 * Other than CSR_{READ|WRITE}().
1331 */
1332
1333 #if 0 /* Not currently used */
1334 static inline uint32_t
1335 wm_io_read(struct wm_softc *sc, int reg)
1336 {
1337
1338 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1339 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1340 }
1341 #endif
1342
1343 static inline void
1344 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1345 {
1346
1347 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1348 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1349 }
1350
1351 static inline void
1352 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1353 uint32_t data)
1354 {
1355 uint32_t regval;
1356 int i;
1357
1358 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1359
1360 CSR_WRITE(sc, reg, regval);
1361
1362 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1363 delay(5);
1364 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1365 break;
1366 }
1367 if (i == SCTL_CTL_POLL_TIMEOUT) {
1368 aprint_error("%s: WARNING:"
1369 " i82575 reg 0x%08x setup did not indicate ready\n",
1370 device_xname(sc->sc_dev), reg);
1371 }
1372 }
1373
1374 static inline void
1375 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1376 {
1377 wa->wa_low = htole32(v & 0xffffffffU);
1378 if (sizeof(bus_addr_t) == 8)
1379 wa->wa_high = htole32((uint64_t) v >> 32);
1380 else
1381 wa->wa_high = 0;
1382 }
1383
1384 /*
1385 * Device driver interface functions and commonly used functions.
1386 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1387 */
1388
1389 /* Lookup supported device table */
1390 static const struct wm_product *
1391 wm_lookup(const struct pci_attach_args *pa)
1392 {
1393 const struct wm_product *wmp;
1394
1395 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1396 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1397 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1398 return wmp;
1399 }
1400 return NULL;
1401 }
1402
1403 /* The match function (ca_match) */
1404 static int
1405 wm_match(device_t parent, cfdata_t cf, void *aux)
1406 {
1407 struct pci_attach_args *pa = aux;
1408
1409 if (wm_lookup(pa) != NULL)
1410 return 1;
1411
1412 return 0;
1413 }
1414
1415 /* The attach function (ca_attach) */
1416 static void
1417 wm_attach(device_t parent, device_t self, void *aux)
1418 {
1419 struct wm_softc *sc = device_private(self);
1420 struct pci_attach_args *pa = aux;
1421 prop_dictionary_t dict;
1422 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1423 pci_chipset_tag_t pc = pa->pa_pc;
1424 #ifndef WM_MSI_MSIX
1425 pci_intr_handle_t ih;
1426 #else
1427 int counts[PCI_INTR_TYPE_SIZE];
1428 pci_intr_type_t max_type;
1429 #endif
1430 const char *intrstr = NULL;
1431 const char *eetype, *xname;
1432 bus_space_tag_t memt;
1433 bus_space_handle_t memh;
1434 bus_size_t memsize;
1435 int memh_valid;
1436 int i, error;
1437 const struct wm_product *wmp;
1438 prop_data_t ea;
1439 prop_number_t pn;
1440 uint8_t enaddr[ETHER_ADDR_LEN];
1441 uint16_t cfg1, cfg2, swdpin, nvmword;
1442 pcireg_t preg, memtype;
1443 uint16_t eeprom_data, apme_mask;
1444 bool force_clear_smbi;
1445 uint32_t link_mode;
1446 uint32_t reg;
1447 char intrbuf[PCI_INTRSTR_LEN];
1448
1449 sc->sc_dev = self;
1450 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1451 sc->sc_stopping = false;
1452
1453 wmp = wm_lookup(pa);
1454 #ifdef DIAGNOSTIC
1455 if (wmp == NULL) {
1456 printf("\n");
1457 panic("wm_attach: impossible");
1458 }
1459 #endif
1460 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1461
1462 sc->sc_pc = pa->pa_pc;
1463 sc->sc_pcitag = pa->pa_tag;
1464
1465 if (pci_dma64_available(pa))
1466 sc->sc_dmat = pa->pa_dmat64;
1467 else
1468 sc->sc_dmat = pa->pa_dmat;
1469
1470 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1471 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1472 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1473
1474 sc->sc_type = wmp->wmp_type;
1475 if (sc->sc_type < WM_T_82543) {
1476 if (sc->sc_rev < 2) {
1477 aprint_error_dev(sc->sc_dev,
1478 "i82542 must be at least rev. 2\n");
1479 return;
1480 }
1481 if (sc->sc_rev < 3)
1482 sc->sc_type = WM_T_82542_2_0;
1483 }
1484
1485 /*
1486 * Disable MSI for Errata:
1487 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1488 *
1489 * 82544: Errata 25
1490 * 82540: Errata 6 (easy to reproduce device timeout)
1491 * 82545: Errata 4 (easy to reproduce device timeout)
1492 * 82546: Errata 26 (easy to reproduce device timeout)
1493 * 82541: Errata 7 (easy to reproduce device timeout)
1494 *
1495 * "Byte Enables 2 and 3 are not set on MSI writes"
1496 *
1497 * 82571 & 82572: Errata 63
1498 */
1499 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1500 || (sc->sc_type == WM_T_82572))
1501 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1502
1503 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1504 || (sc->sc_type == WM_T_82580)
1505 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1506 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1507 sc->sc_flags |= WM_F_NEWQUEUE;
1508
1509 /* Set device properties (mactype) */
1510 dict = device_properties(sc->sc_dev);
1511 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1512
1513 /*
1514 * Map the device. All devices support memory-mapped acccess,
1515 * and it is really required for normal operation.
1516 */
1517 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1518 switch (memtype) {
1519 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1520 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1521 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1522 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1523 break;
1524 default:
1525 memh_valid = 0;
1526 break;
1527 }
1528
1529 if (memh_valid) {
1530 sc->sc_st = memt;
1531 sc->sc_sh = memh;
1532 sc->sc_ss = memsize;
1533 } else {
1534 aprint_error_dev(sc->sc_dev,
1535 "unable to map device registers\n");
1536 return;
1537 }
1538
1539 /*
1540 * In addition, i82544 and later support I/O mapped indirect
1541 * register access. It is not desirable (nor supported in
1542 * this driver) to use it for normal operation, though it is
1543 * required to work around bugs in some chip versions.
1544 */
1545 if (sc->sc_type >= WM_T_82544) {
1546 /* First we have to find the I/O BAR. */
1547 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1548 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1549 if (memtype == PCI_MAPREG_TYPE_IO)
1550 break;
1551 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1552 PCI_MAPREG_MEM_TYPE_64BIT)
1553 i += 4; /* skip high bits, too */
1554 }
1555 if (i < PCI_MAPREG_END) {
1556 /*
1557 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1558 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1559 * It's no problem because newer chips has no this
1560 * bug.
1561 *
1562 * The i8254x doesn't apparently respond when the
1563 * I/O BAR is 0, which looks somewhat like it's not
1564 * been configured.
1565 */
1566 preg = pci_conf_read(pc, pa->pa_tag, i);
1567 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1568 aprint_error_dev(sc->sc_dev,
1569 "WARNING: I/O BAR at zero.\n");
1570 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1571 0, &sc->sc_iot, &sc->sc_ioh,
1572 NULL, &sc->sc_ios) == 0) {
1573 sc->sc_flags |= WM_F_IOH_VALID;
1574 } else {
1575 aprint_error_dev(sc->sc_dev,
1576 "WARNING: unable to map I/O space\n");
1577 }
1578 }
1579
1580 }
1581
1582 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1583 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1584 preg |= PCI_COMMAND_MASTER_ENABLE;
1585 if (sc->sc_type < WM_T_82542_2_1)
1586 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1587 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1588
1589 /* power up chip */
1590 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1591 NULL)) && error != EOPNOTSUPP) {
1592 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1593 return;
1594 }
1595
1596 #ifndef WM_MSI_MSIX
1597 /*
1598 * Map and establish our interrupt.
1599 */
1600 if (pci_intr_map(pa, &ih)) {
1601 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1602 return;
1603 }
1604 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1605 #ifdef WM_MPSAFE
1606 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1607 #endif
1608 sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
1609 wm_intr_legacy, sc, device_xname(sc->sc_dev));
1610 if (sc->sc_ihs[0] == NULL) {
1611 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1612 if (intrstr != NULL)
1613 aprint_error(" at %s", intrstr);
1614 aprint_error("\n");
1615 return;
1616 }
1617 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1618 sc->sc_nintrs = 1;
1619 #else /* WM_MSI_MSIX */
1620 /* Allocation settings */
1621 max_type = PCI_INTR_TYPE_MSIX;
1622 counts[PCI_INTR_TYPE_MSIX] = WM_MAX_NINTR;
1623 counts[PCI_INTR_TYPE_MSI] = 1;
1624 counts[PCI_INTR_TYPE_INTX] = 1;
1625
1626 alloc_retry:
1627 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1628 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1629 return;
1630 }
1631
1632 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1633 void *vih;
1634 kcpuset_t *affinity;
1635 char intr_xname[INTRDEVNAMEBUF];
1636
1637 kcpuset_create(&affinity, false);
1638
1639 for (i = 0; i < WM_MSIX_NINTR; i++) {
1640 intrstr = pci_intr_string(pc,
1641 sc->sc_intrs[msix_matrix[i].intridx], intrbuf,
1642 sizeof(intrbuf));
1643 #ifdef WM_MPSAFE
1644 pci_intr_setattr(pc,
1645 &sc->sc_intrs[msix_matrix[i].intridx],
1646 PCI_INTR_MPSAFE, true);
1647 #endif
1648 memset(intr_xname, 0, sizeof(intr_xname));
1649 strlcat(intr_xname, device_xname(sc->sc_dev),
1650 sizeof(intr_xname));
1651 strlcat(intr_xname, msix_matrix[i].intrname,
1652 sizeof(intr_xname));
1653 vih = pci_intr_establish_xname(pc,
1654 sc->sc_intrs[msix_matrix[i].intridx], IPL_NET,
1655 msix_matrix[i].func, sc, intr_xname);
1656 if (vih == NULL) {
1657 aprint_error_dev(sc->sc_dev,
1658 "unable to establish MSI-X(for %s)%s%s\n",
1659 msix_matrix[i].intrname,
1660 intrstr ? " at " : "",
1661 intrstr ? intrstr : "");
1662 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1663 WM_MSIX_NINTR);
1664 kcpuset_destroy(affinity);
1665
1666 /* Setup for MSI: Disable MSI-X */
1667 max_type = PCI_INTR_TYPE_MSI;
1668 counts[PCI_INTR_TYPE_MSI] = 1;
1669 counts[PCI_INTR_TYPE_INTX] = 1;
1670 goto alloc_retry;
1671 }
1672 kcpuset_zero(affinity);
1673 /* Round-robin affinity */
1674 kcpuset_set(affinity, msix_matrix[i].cpuid % ncpu);
1675 error = interrupt_distribute(vih, affinity, NULL);
1676 if (error == 0) {
1677 aprint_normal_dev(sc->sc_dev,
1678 "for %s interrupting at %s affinity to %u\n",
1679 msix_matrix[i].intrname, intrstr,
1680 msix_matrix[i].cpuid % ncpu);
1681 } else {
1682 aprint_normal_dev(sc->sc_dev,
1683 "for %s interrupting at %s\n",
1684 msix_matrix[i].intrname, intrstr);
1685 }
1686 sc->sc_ihs[msix_matrix[i].intridx] = vih;
1687 }
1688
1689 sc->sc_nintrs = WM_MSIX_NINTR;
1690 kcpuset_destroy(affinity);
1691 } else {
1692 /* MSI or INTx */
1693 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
1694 sizeof(intrbuf));
1695 #ifdef WM_MPSAFE
1696 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
1697 #endif
1698 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
1699 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
1700 if (sc->sc_ihs[0] == NULL) {
1701 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
1702 (pci_intr_type(sc->sc_intrs[0])
1703 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
1704 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1);
1705 switch (pci_intr_type(sc->sc_intrs[0])) {
1706 case PCI_INTR_TYPE_MSI:
1707 /* The next try is for INTx: Disable MSI */
1708 max_type = PCI_INTR_TYPE_INTX;
1709 counts[PCI_INTR_TYPE_INTX] = 1;
1710 goto alloc_retry;
1711 case PCI_INTR_TYPE_INTX:
1712 default:
1713 return;
1714 }
1715 }
1716 aprint_normal_dev(sc->sc_dev, "%s at %s\n",
1717 (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI)
1718 ? "MSI" : "interrupting", intrstr);
1719
1720 sc->sc_nintrs = 1;
1721 }
1722 #endif /* WM_MSI_MSIX */
1723
1724 /*
1725 * Check the function ID (unit number of the chip).
1726 */
1727 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1728 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1729 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1730 || (sc->sc_type == WM_T_82580)
1731 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1732 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1733 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1734 else
1735 sc->sc_funcid = 0;
1736
1737 /*
1738 * Determine a few things about the bus we're connected to.
1739 */
1740 if (sc->sc_type < WM_T_82543) {
1741 /* We don't really know the bus characteristics here. */
1742 sc->sc_bus_speed = 33;
1743 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1744 /*
1745 * CSA (Communication Streaming Architecture) is about as fast
1746 * a 32-bit 66MHz PCI Bus.
1747 */
1748 sc->sc_flags |= WM_F_CSA;
1749 sc->sc_bus_speed = 66;
1750 aprint_verbose_dev(sc->sc_dev,
1751 "Communication Streaming Architecture\n");
1752 if (sc->sc_type == WM_T_82547) {
1753 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1754 callout_setfunc(&sc->sc_txfifo_ch,
1755 wm_82547_txfifo_stall, sc);
1756 aprint_verbose_dev(sc->sc_dev,
1757 "using 82547 Tx FIFO stall work-around\n");
1758 }
1759 } else if (sc->sc_type >= WM_T_82571) {
1760 sc->sc_flags |= WM_F_PCIE;
1761 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1762 && (sc->sc_type != WM_T_ICH10)
1763 && (sc->sc_type != WM_T_PCH)
1764 && (sc->sc_type != WM_T_PCH2)
1765 && (sc->sc_type != WM_T_PCH_LPT)) {
1766 /* ICH* and PCH* have no PCIe capability registers */
1767 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1768 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1769 NULL) == 0)
1770 aprint_error_dev(sc->sc_dev,
1771 "unable to find PCIe capability\n");
1772 }
1773 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1774 } else {
1775 reg = CSR_READ(sc, WMREG_STATUS);
1776 if (reg & STATUS_BUS64)
1777 sc->sc_flags |= WM_F_BUS64;
1778 if ((reg & STATUS_PCIX_MODE) != 0) {
1779 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1780
1781 sc->sc_flags |= WM_F_PCIX;
1782 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1783 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1784 aprint_error_dev(sc->sc_dev,
1785 "unable to find PCIX capability\n");
1786 else if (sc->sc_type != WM_T_82545_3 &&
1787 sc->sc_type != WM_T_82546_3) {
1788 /*
1789 * Work around a problem caused by the BIOS
1790 * setting the max memory read byte count
1791 * incorrectly.
1792 */
1793 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1794 sc->sc_pcixe_capoff + PCIX_CMD);
1795 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1796 sc->sc_pcixe_capoff + PCIX_STATUS);
1797
1798 bytecnt =
1799 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1800 PCIX_CMD_BYTECNT_SHIFT;
1801 maxb =
1802 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1803 PCIX_STATUS_MAXB_SHIFT;
1804 if (bytecnt > maxb) {
1805 aprint_verbose_dev(sc->sc_dev,
1806 "resetting PCI-X MMRBC: %d -> %d\n",
1807 512 << bytecnt, 512 << maxb);
1808 pcix_cmd = (pcix_cmd &
1809 ~PCIX_CMD_BYTECNT_MASK) |
1810 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1811 pci_conf_write(pa->pa_pc, pa->pa_tag,
1812 sc->sc_pcixe_capoff + PCIX_CMD,
1813 pcix_cmd);
1814 }
1815 }
1816 }
1817 /*
1818 * The quad port adapter is special; it has a PCIX-PCIX
1819 * bridge on the board, and can run the secondary bus at
1820 * a higher speed.
1821 */
1822 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1823 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1824 : 66;
1825 } else if (sc->sc_flags & WM_F_PCIX) {
1826 switch (reg & STATUS_PCIXSPD_MASK) {
1827 case STATUS_PCIXSPD_50_66:
1828 sc->sc_bus_speed = 66;
1829 break;
1830 case STATUS_PCIXSPD_66_100:
1831 sc->sc_bus_speed = 100;
1832 break;
1833 case STATUS_PCIXSPD_100_133:
1834 sc->sc_bus_speed = 133;
1835 break;
1836 default:
1837 aprint_error_dev(sc->sc_dev,
1838 "unknown PCIXSPD %d; assuming 66MHz\n",
1839 reg & STATUS_PCIXSPD_MASK);
1840 sc->sc_bus_speed = 66;
1841 break;
1842 }
1843 } else
1844 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1845 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1846 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1847 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1848 }
1849
1850 /*
1851 * Allocate the control data structures, and create and load the
1852 * DMA map for it.
1853 *
1854 * NOTE: All Tx descriptors must be in the same 4G segment of
1855 * memory. So must Rx descriptors. We simplify by allocating
1856 * both sets within the same 4G segment.
1857 */
1858 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1859 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1860 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1861 sizeof(struct wm_control_data_82542) :
1862 sizeof(struct wm_control_data_82544);
1863 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1864 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1865 &sc->sc_cd_rseg, 0)) != 0) {
1866 aprint_error_dev(sc->sc_dev,
1867 "unable to allocate control data, error = %d\n",
1868 error);
1869 goto fail_0;
1870 }
1871
1872 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1873 sc->sc_cd_rseg, sc->sc_cd_size,
1874 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1875 aprint_error_dev(sc->sc_dev,
1876 "unable to map control data, error = %d\n", error);
1877 goto fail_1;
1878 }
1879
1880 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1881 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1882 aprint_error_dev(sc->sc_dev,
1883 "unable to create control data DMA map, error = %d\n",
1884 error);
1885 goto fail_2;
1886 }
1887
1888 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1889 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1890 aprint_error_dev(sc->sc_dev,
1891 "unable to load control data DMA map, error = %d\n",
1892 error);
1893 goto fail_3;
1894 }
1895
1896 /* Create the transmit buffer DMA maps. */
1897 WM_TXQUEUELEN(sc) =
1898 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1899 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1900 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1901 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1902 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1903 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1904 aprint_error_dev(sc->sc_dev,
1905 "unable to create Tx DMA map %d, error = %d\n",
1906 i, error);
1907 goto fail_4;
1908 }
1909 }
1910
1911 /* Create the receive buffer DMA maps. */
1912 for (i = 0; i < WM_NRXDESC; i++) {
1913 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1914 MCLBYTES, 0, 0,
1915 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1916 aprint_error_dev(sc->sc_dev,
1917 "unable to create Rx DMA map %d error = %d\n",
1918 i, error);
1919 goto fail_5;
1920 }
1921 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1922 }
1923
1924 /* clear interesting stat counters */
1925 CSR_READ(sc, WMREG_COLC);
1926 CSR_READ(sc, WMREG_RXERRC);
1927
1928 /* get PHY control from SMBus to PCIe */
1929 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1930 || (sc->sc_type == WM_T_PCH_LPT))
1931 wm_smbustopci(sc);
1932
1933 /* Reset the chip to a known state. */
1934 wm_reset(sc);
1935
1936 /* Get some information about the EEPROM. */
1937 switch (sc->sc_type) {
1938 case WM_T_82542_2_0:
1939 case WM_T_82542_2_1:
1940 case WM_T_82543:
1941 case WM_T_82544:
1942 /* Microwire */
1943 sc->sc_nvm_wordsize = 64;
1944 sc->sc_nvm_addrbits = 6;
1945 break;
1946 case WM_T_82540:
1947 case WM_T_82545:
1948 case WM_T_82545_3:
1949 case WM_T_82546:
1950 case WM_T_82546_3:
1951 /* Microwire */
1952 reg = CSR_READ(sc, WMREG_EECD);
1953 if (reg & EECD_EE_SIZE) {
1954 sc->sc_nvm_wordsize = 256;
1955 sc->sc_nvm_addrbits = 8;
1956 } else {
1957 sc->sc_nvm_wordsize = 64;
1958 sc->sc_nvm_addrbits = 6;
1959 }
1960 sc->sc_flags |= WM_F_LOCK_EECD;
1961 break;
1962 case WM_T_82541:
1963 case WM_T_82541_2:
1964 case WM_T_82547:
1965 case WM_T_82547_2:
1966 sc->sc_flags |= WM_F_LOCK_EECD;
1967 reg = CSR_READ(sc, WMREG_EECD);
1968 if (reg & EECD_EE_TYPE) {
1969 /* SPI */
1970 sc->sc_flags |= WM_F_EEPROM_SPI;
1971 wm_nvm_set_addrbits_size_eecd(sc);
1972 } else {
1973 /* Microwire */
1974 if ((reg & EECD_EE_ABITS) != 0) {
1975 sc->sc_nvm_wordsize = 256;
1976 sc->sc_nvm_addrbits = 8;
1977 } else {
1978 sc->sc_nvm_wordsize = 64;
1979 sc->sc_nvm_addrbits = 6;
1980 }
1981 }
1982 break;
1983 case WM_T_82571:
1984 case WM_T_82572:
1985 /* SPI */
1986 sc->sc_flags |= WM_F_EEPROM_SPI;
1987 wm_nvm_set_addrbits_size_eecd(sc);
1988 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1989 break;
1990 case WM_T_82573:
1991 sc->sc_flags |= WM_F_LOCK_SWSM;
1992 /* FALLTHROUGH */
1993 case WM_T_82574:
1994 case WM_T_82583:
1995 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1996 sc->sc_flags |= WM_F_EEPROM_FLASH;
1997 sc->sc_nvm_wordsize = 2048;
1998 } else {
1999 /* SPI */
2000 sc->sc_flags |= WM_F_EEPROM_SPI;
2001 wm_nvm_set_addrbits_size_eecd(sc);
2002 }
2003 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2004 break;
2005 case WM_T_82575:
2006 case WM_T_82576:
2007 case WM_T_82580:
2008 case WM_T_I350:
2009 case WM_T_I354:
2010 case WM_T_80003:
2011 /* SPI */
2012 sc->sc_flags |= WM_F_EEPROM_SPI;
2013 wm_nvm_set_addrbits_size_eecd(sc);
2014 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2015 | WM_F_LOCK_SWSM;
2016 break;
2017 case WM_T_ICH8:
2018 case WM_T_ICH9:
2019 case WM_T_ICH10:
2020 case WM_T_PCH:
2021 case WM_T_PCH2:
2022 case WM_T_PCH_LPT:
2023 /* FLASH */
2024 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2025 sc->sc_nvm_wordsize = 2048;
2026 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
2027 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2028 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2029 aprint_error_dev(sc->sc_dev,
2030 "can't map FLASH registers\n");
2031 goto fail_5;
2032 }
2033 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2034 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2035 ICH_FLASH_SECTOR_SIZE;
2036 sc->sc_ich8_flash_bank_size =
2037 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2038 sc->sc_ich8_flash_bank_size -=
2039 (reg & ICH_GFPREG_BASE_MASK);
2040 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2041 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2042 break;
2043 case WM_T_I210:
2044 case WM_T_I211:
2045 if (wm_nvm_get_flash_presence_i210(sc)) {
2046 wm_nvm_set_addrbits_size_eecd(sc);
2047 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2048 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2049 } else {
2050 sc->sc_nvm_wordsize = INVM_SIZE;
2051 sc->sc_flags |= WM_F_EEPROM_INVM;
2052 sc->sc_flags |= WM_F_LOCK_SWFW;
2053 }
2054 break;
2055 default:
2056 break;
2057 }
2058
2059 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2060 switch (sc->sc_type) {
2061 case WM_T_82571:
2062 case WM_T_82572:
2063 reg = CSR_READ(sc, WMREG_SWSM2);
2064 if ((reg & SWSM2_LOCK) == 0) {
2065 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2066 force_clear_smbi = true;
2067 } else
2068 force_clear_smbi = false;
2069 break;
2070 case WM_T_82573:
2071 case WM_T_82574:
2072 case WM_T_82583:
2073 force_clear_smbi = true;
2074 break;
2075 default:
2076 force_clear_smbi = false;
2077 break;
2078 }
2079 if (force_clear_smbi) {
2080 reg = CSR_READ(sc, WMREG_SWSM);
2081 if ((reg & SWSM_SMBI) != 0)
2082 aprint_error_dev(sc->sc_dev,
2083 "Please update the Bootagent\n");
2084 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2085 }
2086
2087 /*
2088 * Defer printing the EEPROM type until after verifying the checksum
2089 * This allows the EEPROM type to be printed correctly in the case
2090 * that no EEPROM is attached.
2091 */
2092 /*
2093 * Validate the EEPROM checksum. If the checksum fails, flag
2094 * this for later, so we can fail future reads from the EEPROM.
2095 */
2096 if (wm_nvm_validate_checksum(sc)) {
2097 /*
2098 * Read twice again because some PCI-e parts fail the
2099 * first check due to the link being in sleep state.
2100 */
2101 if (wm_nvm_validate_checksum(sc))
2102 sc->sc_flags |= WM_F_EEPROM_INVALID;
2103 }
2104
2105 /* Set device properties (macflags) */
2106 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2107
2108 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2109 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2110 else {
2111 aprint_verbose_dev(sc->sc_dev, "%u words ",
2112 sc->sc_nvm_wordsize);
2113 if (sc->sc_flags & WM_F_EEPROM_INVM)
2114 aprint_verbose("iNVM");
2115 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2116 aprint_verbose("FLASH(HW)");
2117 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2118 aprint_verbose("FLASH");
2119 else {
2120 if (sc->sc_flags & WM_F_EEPROM_SPI)
2121 eetype = "SPI";
2122 else
2123 eetype = "MicroWire";
2124 aprint_verbose("(%d address bits) %s EEPROM",
2125 sc->sc_nvm_addrbits, eetype);
2126 }
2127 }
2128 wm_nvm_version(sc);
2129 aprint_verbose("\n");
2130
2131 /* Check for I21[01] PLL workaround */
2132 if (sc->sc_type == WM_T_I210)
2133 sc->sc_flags |= WM_F_PLL_WA_I210;
2134 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2135 /* NVM image release 3.25 has a workaround */
2136 if ((sc->sc_nvm_ver_major < 3)
2137 || ((sc->sc_nvm_ver_major == 3)
2138 && (sc->sc_nvm_ver_minor < 25))) {
2139 aprint_verbose_dev(sc->sc_dev,
2140 "ROM image version %d.%d is older than 3.25\n",
2141 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2142 sc->sc_flags |= WM_F_PLL_WA_I210;
2143 }
2144 }
2145 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2146 wm_pll_workaround_i210(sc);
2147
2148 switch (sc->sc_type) {
2149 case WM_T_82571:
2150 case WM_T_82572:
2151 case WM_T_82573:
2152 case WM_T_82574:
2153 case WM_T_82583:
2154 case WM_T_80003:
2155 case WM_T_ICH8:
2156 case WM_T_ICH9:
2157 case WM_T_ICH10:
2158 case WM_T_PCH:
2159 case WM_T_PCH2:
2160 case WM_T_PCH_LPT:
2161 if (wm_check_mng_mode(sc) != 0)
2162 wm_get_hw_control(sc);
2163 break;
2164 default:
2165 break;
2166 }
2167 wm_get_wakeup(sc);
2168 /*
2169 * Read the Ethernet address from the EEPROM, if not first found
2170 * in device properties.
2171 */
2172 ea = prop_dictionary_get(dict, "mac-address");
2173 if (ea != NULL) {
2174 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2175 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2176 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2177 } else {
2178 if (wm_read_mac_addr(sc, enaddr) != 0) {
2179 aprint_error_dev(sc->sc_dev,
2180 "unable to read Ethernet address\n");
2181 goto fail_5;
2182 }
2183 }
2184
2185 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2186 ether_sprintf(enaddr));
2187
2188 /*
2189 * Read the config info from the EEPROM, and set up various
2190 * bits in the control registers based on their contents.
2191 */
2192 pn = prop_dictionary_get(dict, "i82543-cfg1");
2193 if (pn != NULL) {
2194 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2195 cfg1 = (uint16_t) prop_number_integer_value(pn);
2196 } else {
2197 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2198 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2199 goto fail_5;
2200 }
2201 }
2202
2203 pn = prop_dictionary_get(dict, "i82543-cfg2");
2204 if (pn != NULL) {
2205 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2206 cfg2 = (uint16_t) prop_number_integer_value(pn);
2207 } else {
2208 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2209 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2210 goto fail_5;
2211 }
2212 }
2213
2214 /* check for WM_F_WOL */
2215 switch (sc->sc_type) {
2216 case WM_T_82542_2_0:
2217 case WM_T_82542_2_1:
2218 case WM_T_82543:
2219 /* dummy? */
2220 eeprom_data = 0;
2221 apme_mask = NVM_CFG3_APME;
2222 break;
2223 case WM_T_82544:
2224 apme_mask = NVM_CFG2_82544_APM_EN;
2225 eeprom_data = cfg2;
2226 break;
2227 case WM_T_82546:
2228 case WM_T_82546_3:
2229 case WM_T_82571:
2230 case WM_T_82572:
2231 case WM_T_82573:
2232 case WM_T_82574:
2233 case WM_T_82583:
2234 case WM_T_80003:
2235 default:
2236 apme_mask = NVM_CFG3_APME;
2237 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2238 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2239 break;
2240 case WM_T_82575:
2241 case WM_T_82576:
2242 case WM_T_82580:
2243 case WM_T_I350:
2244 case WM_T_I354: /* XXX ok? */
2245 case WM_T_ICH8:
2246 case WM_T_ICH9:
2247 case WM_T_ICH10:
2248 case WM_T_PCH:
2249 case WM_T_PCH2:
2250 case WM_T_PCH_LPT:
2251 /* XXX The funcid should be checked on some devices */
2252 apme_mask = WUC_APME;
2253 eeprom_data = CSR_READ(sc, WMREG_WUC);
2254 break;
2255 }
2256
2257 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2258 if ((eeprom_data & apme_mask) != 0)
2259 sc->sc_flags |= WM_F_WOL;
2260 #ifdef WM_DEBUG
2261 if ((sc->sc_flags & WM_F_WOL) != 0)
2262 printf("WOL\n");
2263 #endif
2264
2265 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2266 /* Check NVM for autonegotiation */
2267 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2268 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2269 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2270 }
2271 }
2272
2273 /*
2274 * XXX need special handling for some multiple port cards
2275 * to disable a paticular port.
2276 */
2277
2278 if (sc->sc_type >= WM_T_82544) {
2279 pn = prop_dictionary_get(dict, "i82543-swdpin");
2280 if (pn != NULL) {
2281 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2282 swdpin = (uint16_t) prop_number_integer_value(pn);
2283 } else {
2284 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2285 aprint_error_dev(sc->sc_dev,
2286 "unable to read SWDPIN\n");
2287 goto fail_5;
2288 }
2289 }
2290 }
2291
2292 if (cfg1 & NVM_CFG1_ILOS)
2293 sc->sc_ctrl |= CTRL_ILOS;
2294
2295 /*
2296 * XXX
2297 * This code isn't correct because pin 2 and 3 are located
2298 * in different position on newer chips. Check all datasheet.
2299 *
2300 * Until resolve this problem, check if a chip < 82580
2301 */
2302 if (sc->sc_type <= WM_T_82580) {
2303 if (sc->sc_type >= WM_T_82544) {
2304 sc->sc_ctrl |=
2305 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2306 CTRL_SWDPIO_SHIFT;
2307 sc->sc_ctrl |=
2308 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2309 CTRL_SWDPINS_SHIFT;
2310 } else {
2311 sc->sc_ctrl |=
2312 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2313 CTRL_SWDPIO_SHIFT;
2314 }
2315 }
2316
2317 /* XXX For other than 82580? */
2318 if (sc->sc_type == WM_T_82580) {
2319 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2320 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2321 if (nvmword & __BIT(13)) {
2322 printf("SET ILOS\n");
2323 sc->sc_ctrl |= CTRL_ILOS;
2324 }
2325 }
2326
2327 #if 0
2328 if (sc->sc_type >= WM_T_82544) {
2329 if (cfg1 & NVM_CFG1_IPS0)
2330 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2331 if (cfg1 & NVM_CFG1_IPS1)
2332 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2333 sc->sc_ctrl_ext |=
2334 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2335 CTRL_EXT_SWDPIO_SHIFT;
2336 sc->sc_ctrl_ext |=
2337 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2338 CTRL_EXT_SWDPINS_SHIFT;
2339 } else {
2340 sc->sc_ctrl_ext |=
2341 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2342 CTRL_EXT_SWDPIO_SHIFT;
2343 }
2344 #endif
2345
2346 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2347 #if 0
2348 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2349 #endif
2350
2351 /*
2352 * Set up some register offsets that are different between
2353 * the i82542 and the i82543 and later chips.
2354 */
2355 if (sc->sc_type < WM_T_82543) {
2356 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2357 sc->sc_tdt_reg = WMREG_OLD_TDT;
2358 } else {
2359 sc->sc_rdt_reg = WMREG_RDT;
2360 sc->sc_tdt_reg = WMREG_TDT;
2361 }
2362
2363 if (sc->sc_type == WM_T_PCH) {
2364 uint16_t val;
2365
2366 /* Save the NVM K1 bit setting */
2367 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2368
2369 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2370 sc->sc_nvm_k1_enabled = 1;
2371 else
2372 sc->sc_nvm_k1_enabled = 0;
2373 }
2374
2375 /*
2376 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2377 * media structures accordingly.
2378 */
2379 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2380 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2381 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2382 || sc->sc_type == WM_T_82573
2383 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2384 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2385 wm_gmii_mediainit(sc, wmp->wmp_product);
2386 } else if (sc->sc_type < WM_T_82543 ||
2387 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2388 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2389 aprint_error_dev(sc->sc_dev,
2390 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2391 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2392 }
2393 wm_tbi_mediainit(sc);
2394 } else {
2395 switch (sc->sc_type) {
2396 case WM_T_82575:
2397 case WM_T_82576:
2398 case WM_T_82580:
2399 case WM_T_I350:
2400 case WM_T_I354:
2401 case WM_T_I210:
2402 case WM_T_I211:
2403 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2404 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2405 switch (link_mode) {
2406 case CTRL_EXT_LINK_MODE_1000KX:
2407 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2408 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2409 break;
2410 case CTRL_EXT_LINK_MODE_SGMII:
2411 if (wm_sgmii_uses_mdio(sc)) {
2412 aprint_verbose_dev(sc->sc_dev,
2413 "SGMII(MDIO)\n");
2414 sc->sc_flags |= WM_F_SGMII;
2415 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2416 break;
2417 }
2418 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2419 /*FALLTHROUGH*/
2420 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2421 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2422 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2423 if (link_mode
2424 == CTRL_EXT_LINK_MODE_SGMII) {
2425 sc->sc_mediatype
2426 = WM_MEDIATYPE_COPPER;
2427 sc->sc_flags |= WM_F_SGMII;
2428 } else {
2429 sc->sc_mediatype
2430 = WM_MEDIATYPE_SERDES;
2431 aprint_verbose_dev(sc->sc_dev,
2432 "SERDES\n");
2433 }
2434 break;
2435 }
2436 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2437 aprint_verbose_dev(sc->sc_dev,
2438 "SERDES\n");
2439
2440 /* Change current link mode setting */
2441 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2442 switch (sc->sc_mediatype) {
2443 case WM_MEDIATYPE_COPPER:
2444 reg |= CTRL_EXT_LINK_MODE_SGMII;
2445 break;
2446 case WM_MEDIATYPE_SERDES:
2447 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2448 break;
2449 default:
2450 break;
2451 }
2452 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2453 break;
2454 case CTRL_EXT_LINK_MODE_GMII:
2455 default:
2456 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2457 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2458 break;
2459 }
2460
2461 reg &= ~CTRL_EXT_I2C_ENA;
2462 if ((sc->sc_flags & WM_F_SGMII) != 0)
2463 reg |= CTRL_EXT_I2C_ENA;
2464 else
2465 reg &= ~CTRL_EXT_I2C_ENA;
2466 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2467
2468 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2469 wm_gmii_mediainit(sc, wmp->wmp_product);
2470 else
2471 wm_tbi_mediainit(sc);
2472 break;
2473 default:
2474 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2475 aprint_error_dev(sc->sc_dev,
2476 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2477 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2478 wm_gmii_mediainit(sc, wmp->wmp_product);
2479 }
2480 }
2481
2482 ifp = &sc->sc_ethercom.ec_if;
2483 xname = device_xname(sc->sc_dev);
2484 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2485 ifp->if_softc = sc;
2486 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2487 ifp->if_ioctl = wm_ioctl;
2488 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2489 ifp->if_start = wm_nq_start;
2490 else
2491 ifp->if_start = wm_start;
2492 ifp->if_watchdog = wm_watchdog;
2493 ifp->if_init = wm_init;
2494 ifp->if_stop = wm_stop;
2495 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2496 IFQ_SET_READY(&ifp->if_snd);
2497
2498 /* Check for jumbo frame */
2499 switch (sc->sc_type) {
2500 case WM_T_82573:
2501 /* XXX limited to 9234 if ASPM is disabled */
2502 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2503 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2504 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2505 break;
2506 case WM_T_82571:
2507 case WM_T_82572:
2508 case WM_T_82574:
2509 case WM_T_82575:
2510 case WM_T_82576:
2511 case WM_T_82580:
2512 case WM_T_I350:
2513 case WM_T_I354: /* XXXX ok? */
2514 case WM_T_I210:
2515 case WM_T_I211:
2516 case WM_T_80003:
2517 case WM_T_ICH9:
2518 case WM_T_ICH10:
2519 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2520 case WM_T_PCH_LPT:
2521 /* XXX limited to 9234 */
2522 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2523 break;
2524 case WM_T_PCH:
2525 /* XXX limited to 4096 */
2526 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2527 break;
2528 case WM_T_82542_2_0:
2529 case WM_T_82542_2_1:
2530 case WM_T_82583:
2531 case WM_T_ICH8:
2532 /* No support for jumbo frame */
2533 break;
2534 default:
2535 /* ETHER_MAX_LEN_JUMBO */
2536 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2537 break;
2538 }
2539
2540 /* If we're a i82543 or greater, we can support VLANs. */
2541 if (sc->sc_type >= WM_T_82543)
2542 sc->sc_ethercom.ec_capabilities |=
2543 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2544
2545 /*
2546 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2547 * on i82543 and later.
2548 */
2549 if (sc->sc_type >= WM_T_82543) {
2550 ifp->if_capabilities |=
2551 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2552 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2553 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2554 IFCAP_CSUM_TCPv6_Tx |
2555 IFCAP_CSUM_UDPv6_Tx;
2556 }
2557
2558 /*
2559 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2560 *
2561 * 82541GI (8086:1076) ... no
2562 * 82572EI (8086:10b9) ... yes
2563 */
2564 if (sc->sc_type >= WM_T_82571) {
2565 ifp->if_capabilities |=
2566 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2567 }
2568
2569 /*
2570 * If we're a i82544 or greater (except i82547), we can do
2571 * TCP segmentation offload.
2572 */
2573 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2574 ifp->if_capabilities |= IFCAP_TSOv4;
2575 }
2576
2577 if (sc->sc_type >= WM_T_82571) {
2578 ifp->if_capabilities |= IFCAP_TSOv6;
2579 }
2580
2581 #ifdef WM_MPSAFE
2582 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2583 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2584 #else
2585 sc->sc_tx_lock = NULL;
2586 sc->sc_rx_lock = NULL;
2587 #endif
2588
2589 /* Attach the interface. */
2590 if_attach(ifp);
2591 ether_ifattach(ifp, enaddr);
2592 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2593 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2594 RND_FLAG_DEFAULT);
2595
2596 #ifdef WM_EVENT_COUNTERS
2597 /* Attach event counters. */
2598 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2599 NULL, xname, "txsstall");
2600 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2601 NULL, xname, "txdstall");
2602 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2603 NULL, xname, "txfifo_stall");
2604 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2605 NULL, xname, "txdw");
2606 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2607 NULL, xname, "txqe");
2608 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2609 NULL, xname, "rxintr");
2610 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2611 NULL, xname, "linkintr");
2612
2613 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2614 NULL, xname, "rxipsum");
2615 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2616 NULL, xname, "rxtusum");
2617 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2618 NULL, xname, "txipsum");
2619 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2620 NULL, xname, "txtusum");
2621 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2622 NULL, xname, "txtusum6");
2623
2624 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2625 NULL, xname, "txtso");
2626 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2627 NULL, xname, "txtso6");
2628 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2629 NULL, xname, "txtsopain");
2630
2631 for (i = 0; i < WM_NTXSEGS; i++) {
2632 snprintf(wm_txseg_evcnt_names[i],
2633 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2634 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2635 NULL, xname, wm_txseg_evcnt_names[i]);
2636 }
2637
2638 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2639 NULL, xname, "txdrop");
2640
2641 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2642 NULL, xname, "tu");
2643
2644 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2645 NULL, xname, "tx_xoff");
2646 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2647 NULL, xname, "tx_xon");
2648 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2649 NULL, xname, "rx_xoff");
2650 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2651 NULL, xname, "rx_xon");
2652 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2653 NULL, xname, "rx_macctl");
2654 #endif /* WM_EVENT_COUNTERS */
2655
2656 if (pmf_device_register(self, wm_suspend, wm_resume))
2657 pmf_class_network_register(self, ifp);
2658 else
2659 aprint_error_dev(self, "couldn't establish power handler\n");
2660
2661 sc->sc_flags |= WM_F_ATTACHED;
2662 return;
2663
2664 /*
2665 * Free any resources we've allocated during the failed attach
2666 * attempt. Do this in reverse order and fall through.
2667 */
2668 fail_5:
2669 for (i = 0; i < WM_NRXDESC; i++) {
2670 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2671 bus_dmamap_destroy(sc->sc_dmat,
2672 sc->sc_rxsoft[i].rxs_dmamap);
2673 }
2674 fail_4:
2675 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2676 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2677 bus_dmamap_destroy(sc->sc_dmat,
2678 sc->sc_txsoft[i].txs_dmamap);
2679 }
2680 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2681 fail_3:
2682 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2683 fail_2:
2684 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2685 sc->sc_cd_size);
2686 fail_1:
2687 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2688 fail_0:
2689 return;
2690 }
2691
2692 /* The detach function (ca_detach) */
2693 static int
2694 wm_detach(device_t self, int flags __unused)
2695 {
2696 struct wm_softc *sc = device_private(self);
2697 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2698 int i;
2699 #ifndef WM_MPSAFE
2700 int s;
2701 #endif
2702
2703 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2704 return 0;
2705
2706 #ifndef WM_MPSAFE
2707 s = splnet();
2708 #endif
2709 /* Stop the interface. Callouts are stopped in it. */
2710 wm_stop(ifp, 1);
2711
2712 #ifndef WM_MPSAFE
2713 splx(s);
2714 #endif
2715
2716 pmf_device_deregister(self);
2717
2718 /* Tell the firmware about the release */
2719 WM_BOTH_LOCK(sc);
2720 wm_release_manageability(sc);
2721 wm_release_hw_control(sc);
2722 WM_BOTH_UNLOCK(sc);
2723
2724 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2725
2726 /* Delete all remaining media. */
2727 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2728
2729 ether_ifdetach(ifp);
2730 if_detach(ifp);
2731
2732
2733 /* Unload RX dmamaps and free mbufs */
2734 WM_RX_LOCK(sc);
2735 wm_rxdrain(sc);
2736 WM_RX_UNLOCK(sc);
2737 /* Must unlock here */
2738
2739 /* Free dmamap. It's the same as the end of the wm_attach() function */
2740 for (i = 0; i < WM_NRXDESC; i++) {
2741 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2742 bus_dmamap_destroy(sc->sc_dmat,
2743 sc->sc_rxsoft[i].rxs_dmamap);
2744 }
2745 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2746 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2747 bus_dmamap_destroy(sc->sc_dmat,
2748 sc->sc_txsoft[i].txs_dmamap);
2749 }
2750 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2751 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2752 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2753 sc->sc_cd_size);
2754 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2755
2756 /* Disestablish the interrupt handler */
2757 for (i = 0; i < sc->sc_nintrs; i++) {
2758 if (sc->sc_ihs[i] != NULL) {
2759 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2760 sc->sc_ihs[i] = NULL;
2761 }
2762 }
2763 #ifdef WM_MSI_MSIX
2764 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2765 #endif /* WM_MSI_MSIX */
2766
2767 /* Unmap the registers */
2768 if (sc->sc_ss) {
2769 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2770 sc->sc_ss = 0;
2771 }
2772 if (sc->sc_ios) {
2773 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2774 sc->sc_ios = 0;
2775 }
2776 if (sc->sc_flashs) {
2777 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2778 sc->sc_flashs = 0;
2779 }
2780
2781 if (sc->sc_tx_lock)
2782 mutex_obj_free(sc->sc_tx_lock);
2783 if (sc->sc_rx_lock)
2784 mutex_obj_free(sc->sc_rx_lock);
2785
2786 return 0;
2787 }
2788
2789 static bool
2790 wm_suspend(device_t self, const pmf_qual_t *qual)
2791 {
2792 struct wm_softc *sc = device_private(self);
2793
2794 wm_release_manageability(sc);
2795 wm_release_hw_control(sc);
2796 #ifdef WM_WOL
2797 wm_enable_wakeup(sc);
2798 #endif
2799
2800 return true;
2801 }
2802
2803 static bool
2804 wm_resume(device_t self, const pmf_qual_t *qual)
2805 {
2806 struct wm_softc *sc = device_private(self);
2807
2808 wm_init_manageability(sc);
2809
2810 return true;
2811 }
2812
2813 /*
2814 * wm_watchdog: [ifnet interface function]
2815 *
2816 * Watchdog timer handler.
2817 */
2818 static void
2819 wm_watchdog(struct ifnet *ifp)
2820 {
2821 struct wm_softc *sc = ifp->if_softc;
2822
2823 /*
2824 * Since we're using delayed interrupts, sweep up
2825 * before we report an error.
2826 */
2827 WM_TX_LOCK(sc);
2828 wm_txeof(sc);
2829 WM_TX_UNLOCK(sc);
2830
2831 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2832 #ifdef WM_DEBUG
2833 int i, j;
2834 struct wm_txsoft *txs;
2835 #endif
2836 log(LOG_ERR,
2837 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2838 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2839 sc->sc_txnext);
2840 ifp->if_oerrors++;
2841 #ifdef WM_DEBUG
2842 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2843 i = WM_NEXTTXS(sc, i)) {
2844 txs = &sc->sc_txsoft[i];
2845 printf("txs %d tx %d -> %d\n",
2846 i, txs->txs_firstdesc, txs->txs_lastdesc);
2847 for (j = txs->txs_firstdesc; ;
2848 j = WM_NEXTTX(sc, j)) {
2849 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2850 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2851 printf("\t %#08x%08x\n",
2852 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2853 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2854 if (j == txs->txs_lastdesc)
2855 break;
2856 }
2857 }
2858 #endif
2859 /* Reset the interface. */
2860 (void) wm_init(ifp);
2861 }
2862
2863 /* Try to get more packets going. */
2864 ifp->if_start(ifp);
2865 }
2866
2867 /*
2868 * wm_tick:
2869 *
2870 * One second timer, used to check link status, sweep up
2871 * completed transmit jobs, etc.
2872 */
2873 static void
2874 wm_tick(void *arg)
2875 {
2876 struct wm_softc *sc = arg;
2877 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2878 #ifndef WM_MPSAFE
2879 int s;
2880
2881 s = splnet();
2882 #endif
2883
2884 WM_TX_LOCK(sc);
2885
2886 if (sc->sc_stopping)
2887 goto out;
2888
2889 if (sc->sc_type >= WM_T_82542_2_1) {
2890 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2891 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2892 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2893 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2894 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2895 }
2896
2897 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2898 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2899 + CSR_READ(sc, WMREG_CRCERRS)
2900 + CSR_READ(sc, WMREG_ALGNERRC)
2901 + CSR_READ(sc, WMREG_SYMERRC)
2902 + CSR_READ(sc, WMREG_RXERRC)
2903 + CSR_READ(sc, WMREG_SEC)
2904 + CSR_READ(sc, WMREG_CEXTERR)
2905 + CSR_READ(sc, WMREG_RLEC);
2906 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2907
2908 if (sc->sc_flags & WM_F_HAS_MII)
2909 mii_tick(&sc->sc_mii);
2910 else if ((sc->sc_type >= WM_T_82575)
2911 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2912 wm_serdes_tick(sc);
2913 else
2914 wm_tbi_tick(sc);
2915
2916 out:
2917 WM_TX_UNLOCK(sc);
2918 #ifndef WM_MPSAFE
2919 splx(s);
2920 #endif
2921
2922 if (!sc->sc_stopping)
2923 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2924 }
2925
2926 static int
2927 wm_ifflags_cb(struct ethercom *ec)
2928 {
2929 struct ifnet *ifp = &ec->ec_if;
2930 struct wm_softc *sc = ifp->if_softc;
2931 int change = ifp->if_flags ^ sc->sc_if_flags;
2932 int rc = 0;
2933
2934 WM_BOTH_LOCK(sc);
2935
2936 if (change != 0)
2937 sc->sc_if_flags = ifp->if_flags;
2938
2939 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2940 rc = ENETRESET;
2941 goto out;
2942 }
2943
2944 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2945 wm_set_filter(sc);
2946
2947 wm_set_vlan(sc);
2948
2949 out:
2950 WM_BOTH_UNLOCK(sc);
2951
2952 return rc;
2953 }
2954
2955 /*
2956 * wm_ioctl: [ifnet interface function]
2957 *
2958 * Handle control requests from the operator.
2959 */
2960 static int
2961 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2962 {
2963 struct wm_softc *sc = ifp->if_softc;
2964 struct ifreq *ifr = (struct ifreq *) data;
2965 struct ifaddr *ifa = (struct ifaddr *)data;
2966 struct sockaddr_dl *sdl;
2967 int s, error;
2968
2969 #ifndef WM_MPSAFE
2970 s = splnet();
2971 #endif
2972 switch (cmd) {
2973 case SIOCSIFMEDIA:
2974 case SIOCGIFMEDIA:
2975 WM_BOTH_LOCK(sc);
2976 /* Flow control requires full-duplex mode. */
2977 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2978 (ifr->ifr_media & IFM_FDX) == 0)
2979 ifr->ifr_media &= ~IFM_ETH_FMASK;
2980 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2981 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2982 /* We can do both TXPAUSE and RXPAUSE. */
2983 ifr->ifr_media |=
2984 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2985 }
2986 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2987 }
2988 WM_BOTH_UNLOCK(sc);
2989 #ifdef WM_MPSAFE
2990 s = splnet();
2991 #endif
2992 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2993 #ifdef WM_MPSAFE
2994 splx(s);
2995 #endif
2996 break;
2997 case SIOCINITIFADDR:
2998 WM_BOTH_LOCK(sc);
2999 if (ifa->ifa_addr->sa_family == AF_LINK) {
3000 sdl = satosdl(ifp->if_dl->ifa_addr);
3001 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3002 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3003 /* unicast address is first multicast entry */
3004 wm_set_filter(sc);
3005 error = 0;
3006 WM_BOTH_UNLOCK(sc);
3007 break;
3008 }
3009 WM_BOTH_UNLOCK(sc);
3010 /*FALLTHROUGH*/
3011 default:
3012 #ifdef WM_MPSAFE
3013 s = splnet();
3014 #endif
3015 /* It may call wm_start, so unlock here */
3016 error = ether_ioctl(ifp, cmd, data);
3017 #ifdef WM_MPSAFE
3018 splx(s);
3019 #endif
3020 if (error != ENETRESET)
3021 break;
3022
3023 error = 0;
3024
3025 if (cmd == SIOCSIFCAP) {
3026 error = (*ifp->if_init)(ifp);
3027 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3028 ;
3029 else if (ifp->if_flags & IFF_RUNNING) {
3030 /*
3031 * Multicast list has changed; set the hardware filter
3032 * accordingly.
3033 */
3034 WM_BOTH_LOCK(sc);
3035 wm_set_filter(sc);
3036 WM_BOTH_UNLOCK(sc);
3037 }
3038 break;
3039 }
3040
3041 #ifndef WM_MPSAFE
3042 splx(s);
3043 #endif
3044 return error;
3045 }
3046
3047 /* MAC address related */
3048
3049 /*
3050 * Get the offset of MAC address and return it.
3051 * If error occured, use offset 0.
3052 */
3053 static uint16_t
3054 wm_check_alt_mac_addr(struct wm_softc *sc)
3055 {
3056 uint16_t myea[ETHER_ADDR_LEN / 2];
3057 uint16_t offset = NVM_OFF_MACADDR;
3058
3059 /* Try to read alternative MAC address pointer */
3060 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3061 return 0;
3062
3063 /* Check pointer if it's valid or not. */
3064 if ((offset == 0x0000) || (offset == 0xffff))
3065 return 0;
3066
3067 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3068 /*
3069 * Check whether alternative MAC address is valid or not.
3070 * Some cards have non 0xffff pointer but those don't use
3071 * alternative MAC address in reality.
3072 *
3073 * Check whether the broadcast bit is set or not.
3074 */
3075 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3076 if (((myea[0] & 0xff) & 0x01) == 0)
3077 return offset; /* Found */
3078
3079 /* Not found */
3080 return 0;
3081 }
3082
3083 static int
3084 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3085 {
3086 uint16_t myea[ETHER_ADDR_LEN / 2];
3087 uint16_t offset = NVM_OFF_MACADDR;
3088 int do_invert = 0;
3089
3090 switch (sc->sc_type) {
3091 case WM_T_82580:
3092 case WM_T_I350:
3093 case WM_T_I354:
3094 /* EEPROM Top Level Partitioning */
3095 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3096 break;
3097 case WM_T_82571:
3098 case WM_T_82575:
3099 case WM_T_82576:
3100 case WM_T_80003:
3101 case WM_T_I210:
3102 case WM_T_I211:
3103 offset = wm_check_alt_mac_addr(sc);
3104 if (offset == 0)
3105 if ((sc->sc_funcid & 0x01) == 1)
3106 do_invert = 1;
3107 break;
3108 default:
3109 if ((sc->sc_funcid & 0x01) == 1)
3110 do_invert = 1;
3111 break;
3112 }
3113
3114 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3115 myea) != 0)
3116 goto bad;
3117
3118 enaddr[0] = myea[0] & 0xff;
3119 enaddr[1] = myea[0] >> 8;
3120 enaddr[2] = myea[1] & 0xff;
3121 enaddr[3] = myea[1] >> 8;
3122 enaddr[4] = myea[2] & 0xff;
3123 enaddr[5] = myea[2] >> 8;
3124
3125 /*
3126 * Toggle the LSB of the MAC address on the second port
3127 * of some dual port cards.
3128 */
3129 if (do_invert != 0)
3130 enaddr[5] ^= 1;
3131
3132 return 0;
3133
3134 bad:
3135 return -1;
3136 }
3137
3138 /*
3139 * wm_set_ral:
3140 *
3141 * Set an entery in the receive address list.
3142 */
3143 static void
3144 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3145 {
3146 uint32_t ral_lo, ral_hi;
3147
3148 if (enaddr != NULL) {
3149 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3150 (enaddr[3] << 24);
3151 ral_hi = enaddr[4] | (enaddr[5] << 8);
3152 ral_hi |= RAL_AV;
3153 } else {
3154 ral_lo = 0;
3155 ral_hi = 0;
3156 }
3157
3158 if (sc->sc_type >= WM_T_82544) {
3159 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3160 ral_lo);
3161 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3162 ral_hi);
3163 } else {
3164 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3165 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3166 }
3167 }
3168
3169 /*
3170 * wm_mchash:
3171 *
3172 * Compute the hash of the multicast address for the 4096-bit
3173 * multicast filter.
3174 */
3175 static uint32_t
3176 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3177 {
3178 static const int lo_shift[4] = { 4, 3, 2, 0 };
3179 static const int hi_shift[4] = { 4, 5, 6, 8 };
3180 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3181 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3182 uint32_t hash;
3183
3184 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3185 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3186 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3187 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3188 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3189 return (hash & 0x3ff);
3190 }
3191 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3192 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3193
3194 return (hash & 0xfff);
3195 }
3196
3197 /*
3198 * wm_set_filter:
3199 *
3200 * Set up the receive filter.
3201 */
3202 static void
3203 wm_set_filter(struct wm_softc *sc)
3204 {
3205 struct ethercom *ec = &sc->sc_ethercom;
3206 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3207 struct ether_multi *enm;
3208 struct ether_multistep step;
3209 bus_addr_t mta_reg;
3210 uint32_t hash, reg, bit;
3211 int i, size;
3212
3213 if (sc->sc_type >= WM_T_82544)
3214 mta_reg = WMREG_CORDOVA_MTA;
3215 else
3216 mta_reg = WMREG_MTA;
3217
3218 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3219
3220 if (ifp->if_flags & IFF_BROADCAST)
3221 sc->sc_rctl |= RCTL_BAM;
3222 if (ifp->if_flags & IFF_PROMISC) {
3223 sc->sc_rctl |= RCTL_UPE;
3224 goto allmulti;
3225 }
3226
3227 /*
3228 * Set the station address in the first RAL slot, and
3229 * clear the remaining slots.
3230 */
3231 if (sc->sc_type == WM_T_ICH8)
3232 size = WM_RAL_TABSIZE_ICH8 -1;
3233 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3234 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3235 || (sc->sc_type == WM_T_PCH_LPT))
3236 size = WM_RAL_TABSIZE_ICH8;
3237 else if (sc->sc_type == WM_T_82575)
3238 size = WM_RAL_TABSIZE_82575;
3239 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3240 size = WM_RAL_TABSIZE_82576;
3241 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3242 size = WM_RAL_TABSIZE_I350;
3243 else
3244 size = WM_RAL_TABSIZE;
3245 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3246 for (i = 1; i < size; i++)
3247 wm_set_ral(sc, NULL, i);
3248
3249 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3250 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3251 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3252 size = WM_ICH8_MC_TABSIZE;
3253 else
3254 size = WM_MC_TABSIZE;
3255 /* Clear out the multicast table. */
3256 for (i = 0; i < size; i++)
3257 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3258
3259 ETHER_FIRST_MULTI(step, ec, enm);
3260 while (enm != NULL) {
3261 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3262 /*
3263 * We must listen to a range of multicast addresses.
3264 * For now, just accept all multicasts, rather than
3265 * trying to set only those filter bits needed to match
3266 * the range. (At this time, the only use of address
3267 * ranges is for IP multicast routing, for which the
3268 * range is big enough to require all bits set.)
3269 */
3270 goto allmulti;
3271 }
3272
3273 hash = wm_mchash(sc, enm->enm_addrlo);
3274
3275 reg = (hash >> 5);
3276 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3277 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3278 || (sc->sc_type == WM_T_PCH2)
3279 || (sc->sc_type == WM_T_PCH_LPT))
3280 reg &= 0x1f;
3281 else
3282 reg &= 0x7f;
3283 bit = hash & 0x1f;
3284
3285 hash = CSR_READ(sc, mta_reg + (reg << 2));
3286 hash |= 1U << bit;
3287
3288 /* XXX Hardware bug?? */
3289 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3290 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3291 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3292 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3293 } else
3294 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3295
3296 ETHER_NEXT_MULTI(step, enm);
3297 }
3298
3299 ifp->if_flags &= ~IFF_ALLMULTI;
3300 goto setit;
3301
3302 allmulti:
3303 ifp->if_flags |= IFF_ALLMULTI;
3304 sc->sc_rctl |= RCTL_MPE;
3305
3306 setit:
3307 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3308 }
3309
3310 /* Reset and init related */
3311
3312 static void
3313 wm_set_vlan(struct wm_softc *sc)
3314 {
3315 /* Deal with VLAN enables. */
3316 if (VLAN_ATTACHED(&sc->sc_ethercom))
3317 sc->sc_ctrl |= CTRL_VME;
3318 else
3319 sc->sc_ctrl &= ~CTRL_VME;
3320
3321 /* Write the control registers. */
3322 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3323 }
3324
3325 static void
3326 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3327 {
3328 uint32_t gcr;
3329 pcireg_t ctrl2;
3330
3331 gcr = CSR_READ(sc, WMREG_GCR);
3332
3333 /* Only take action if timeout value is defaulted to 0 */
3334 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3335 goto out;
3336
3337 if ((gcr & GCR_CAP_VER2) == 0) {
3338 gcr |= GCR_CMPL_TMOUT_10MS;
3339 goto out;
3340 }
3341
3342 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3343 sc->sc_pcixe_capoff + PCIE_DCSR2);
3344 ctrl2 |= WM_PCIE_DCSR2_16MS;
3345 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3346 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3347
3348 out:
3349 /* Disable completion timeout resend */
3350 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3351
3352 CSR_WRITE(sc, WMREG_GCR, gcr);
3353 }
3354
3355 void
3356 wm_get_auto_rd_done(struct wm_softc *sc)
3357 {
3358 int i;
3359
3360 /* wait for eeprom to reload */
3361 switch (sc->sc_type) {
3362 case WM_T_82571:
3363 case WM_T_82572:
3364 case WM_T_82573:
3365 case WM_T_82574:
3366 case WM_T_82583:
3367 case WM_T_82575:
3368 case WM_T_82576:
3369 case WM_T_82580:
3370 case WM_T_I350:
3371 case WM_T_I354:
3372 case WM_T_I210:
3373 case WM_T_I211:
3374 case WM_T_80003:
3375 case WM_T_ICH8:
3376 case WM_T_ICH9:
3377 for (i = 0; i < 10; i++) {
3378 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3379 break;
3380 delay(1000);
3381 }
3382 if (i == 10) {
3383 log(LOG_ERR, "%s: auto read from eeprom failed to "
3384 "complete\n", device_xname(sc->sc_dev));
3385 }
3386 break;
3387 default:
3388 break;
3389 }
3390 }
3391
3392 void
3393 wm_lan_init_done(struct wm_softc *sc)
3394 {
3395 uint32_t reg = 0;
3396 int i;
3397
3398 /* wait for eeprom to reload */
3399 switch (sc->sc_type) {
3400 case WM_T_ICH10:
3401 case WM_T_PCH:
3402 case WM_T_PCH2:
3403 case WM_T_PCH_LPT:
3404 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3405 reg = CSR_READ(sc, WMREG_STATUS);
3406 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3407 break;
3408 delay(100);
3409 }
3410 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3411 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3412 "complete\n", device_xname(sc->sc_dev), __func__);
3413 }
3414 break;
3415 default:
3416 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3417 __func__);
3418 break;
3419 }
3420
3421 reg &= ~STATUS_LAN_INIT_DONE;
3422 CSR_WRITE(sc, WMREG_STATUS, reg);
3423 }
3424
3425 void
3426 wm_get_cfg_done(struct wm_softc *sc)
3427 {
3428 int mask;
3429 uint32_t reg;
3430 int i;
3431
3432 /* wait for eeprom to reload */
3433 switch (sc->sc_type) {
3434 case WM_T_82542_2_0:
3435 case WM_T_82542_2_1:
3436 /* null */
3437 break;
3438 case WM_T_82543:
3439 case WM_T_82544:
3440 case WM_T_82540:
3441 case WM_T_82545:
3442 case WM_T_82545_3:
3443 case WM_T_82546:
3444 case WM_T_82546_3:
3445 case WM_T_82541:
3446 case WM_T_82541_2:
3447 case WM_T_82547:
3448 case WM_T_82547_2:
3449 case WM_T_82573:
3450 case WM_T_82574:
3451 case WM_T_82583:
3452 /* generic */
3453 delay(10*1000);
3454 break;
3455 case WM_T_80003:
3456 case WM_T_82571:
3457 case WM_T_82572:
3458 case WM_T_82575:
3459 case WM_T_82576:
3460 case WM_T_82580:
3461 case WM_T_I350:
3462 case WM_T_I354:
3463 case WM_T_I210:
3464 case WM_T_I211:
3465 if (sc->sc_type == WM_T_82571) {
3466 /* Only 82571 shares port 0 */
3467 mask = EEMNGCTL_CFGDONE_0;
3468 } else
3469 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3470 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3471 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3472 break;
3473 delay(1000);
3474 }
3475 if (i >= WM_PHY_CFG_TIMEOUT) {
3476 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3477 device_xname(sc->sc_dev), __func__));
3478 }
3479 break;
3480 case WM_T_ICH8:
3481 case WM_T_ICH9:
3482 case WM_T_ICH10:
3483 case WM_T_PCH:
3484 case WM_T_PCH2:
3485 case WM_T_PCH_LPT:
3486 delay(10*1000);
3487 if (sc->sc_type >= WM_T_ICH10)
3488 wm_lan_init_done(sc);
3489 else
3490 wm_get_auto_rd_done(sc);
3491
3492 reg = CSR_READ(sc, WMREG_STATUS);
3493 if ((reg & STATUS_PHYRA) != 0)
3494 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3495 break;
3496 default:
3497 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3498 __func__);
3499 break;
3500 }
3501 }
3502
3503 /* Init hardware bits */
3504 void
3505 wm_initialize_hardware_bits(struct wm_softc *sc)
3506 {
3507 uint32_t tarc0, tarc1, reg;
3508
3509 /* For 82571 variant, 80003 and ICHs */
3510 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3511 || (sc->sc_type >= WM_T_80003)) {
3512
3513 /* Transmit Descriptor Control 0 */
3514 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3515 reg |= TXDCTL_COUNT_DESC;
3516 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3517
3518 /* Transmit Descriptor Control 1 */
3519 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3520 reg |= TXDCTL_COUNT_DESC;
3521 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3522
3523 /* TARC0 */
3524 tarc0 = CSR_READ(sc, WMREG_TARC0);
3525 switch (sc->sc_type) {
3526 case WM_T_82571:
3527 case WM_T_82572:
3528 case WM_T_82573:
3529 case WM_T_82574:
3530 case WM_T_82583:
3531 case WM_T_80003:
3532 /* Clear bits 30..27 */
3533 tarc0 &= ~__BITS(30, 27);
3534 break;
3535 default:
3536 break;
3537 }
3538
3539 switch (sc->sc_type) {
3540 case WM_T_82571:
3541 case WM_T_82572:
3542 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3543
3544 tarc1 = CSR_READ(sc, WMREG_TARC1);
3545 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3546 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3547 /* 8257[12] Errata No.7 */
3548 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3549
3550 /* TARC1 bit 28 */
3551 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3552 tarc1 &= ~__BIT(28);
3553 else
3554 tarc1 |= __BIT(28);
3555 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3556
3557 /*
3558 * 8257[12] Errata No.13
3559 * Disable Dyamic Clock Gating.
3560 */
3561 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3562 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3563 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3564 break;
3565 case WM_T_82573:
3566 case WM_T_82574:
3567 case WM_T_82583:
3568 if ((sc->sc_type == WM_T_82574)
3569 || (sc->sc_type == WM_T_82583))
3570 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3571
3572 /* Extended Device Control */
3573 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3574 reg &= ~__BIT(23); /* Clear bit 23 */
3575 reg |= __BIT(22); /* Set bit 22 */
3576 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3577
3578 /* Device Control */
3579 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3580 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3581
3582 /* PCIe Control Register */
3583 if ((sc->sc_type == WM_T_82574)
3584 || (sc->sc_type == WM_T_82583)) {
3585 /*
3586 * Document says this bit must be set for
3587 * proper operation.
3588 */
3589 reg = CSR_READ(sc, WMREG_GCR);
3590 reg |= __BIT(22);
3591 CSR_WRITE(sc, WMREG_GCR, reg);
3592
3593 /*
3594 * Apply workaround for hardware errata
3595 * documented in errata docs Fixes issue where
3596 * some error prone or unreliable PCIe
3597 * completions are occurring, particularly
3598 * with ASPM enabled. Without fix, issue can
3599 * cause Tx timeouts.
3600 */
3601 reg = CSR_READ(sc, WMREG_GCR2);
3602 reg |= __BIT(0);
3603 CSR_WRITE(sc, WMREG_GCR2, reg);
3604 }
3605 break;
3606 case WM_T_80003:
3607 /* TARC0 */
3608 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3609 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3610 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3611
3612 /* TARC1 bit 28 */
3613 tarc1 = CSR_READ(sc, WMREG_TARC1);
3614 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3615 tarc1 &= ~__BIT(28);
3616 else
3617 tarc1 |= __BIT(28);
3618 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3619 break;
3620 case WM_T_ICH8:
3621 case WM_T_ICH9:
3622 case WM_T_ICH10:
3623 case WM_T_PCH:
3624 case WM_T_PCH2:
3625 case WM_T_PCH_LPT:
3626 /* TARC 0 */
3627 if (sc->sc_type == WM_T_ICH8) {
3628 /* Set TARC0 bits 29 and 28 */
3629 tarc0 |= __BITS(29, 28);
3630 }
3631 /* Set TARC0 bits 23,24,26,27 */
3632 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3633
3634 /* CTRL_EXT */
3635 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3636 reg |= __BIT(22); /* Set bit 22 */
3637 /*
3638 * Enable PHY low-power state when MAC is at D3
3639 * w/o WoL
3640 */
3641 if (sc->sc_type >= WM_T_PCH)
3642 reg |= CTRL_EXT_PHYPDEN;
3643 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3644
3645 /* TARC1 */
3646 tarc1 = CSR_READ(sc, WMREG_TARC1);
3647 /* bit 28 */
3648 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3649 tarc1 &= ~__BIT(28);
3650 else
3651 tarc1 |= __BIT(28);
3652 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3653 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3654
3655 /* Device Status */
3656 if (sc->sc_type == WM_T_ICH8) {
3657 reg = CSR_READ(sc, WMREG_STATUS);
3658 reg &= ~__BIT(31);
3659 CSR_WRITE(sc, WMREG_STATUS, reg);
3660
3661 }
3662
3663 /*
3664 * Work-around descriptor data corruption issue during
3665 * NFS v2 UDP traffic, just disable the NFS filtering
3666 * capability.
3667 */
3668 reg = CSR_READ(sc, WMREG_RFCTL);
3669 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3670 CSR_WRITE(sc, WMREG_RFCTL, reg);
3671 break;
3672 default:
3673 break;
3674 }
3675 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3676
3677 /*
3678 * 8257[12] Errata No.52 and some others.
3679 * Avoid RSS Hash Value bug.
3680 */
3681 switch (sc->sc_type) {
3682 case WM_T_82571:
3683 case WM_T_82572:
3684 case WM_T_82573:
3685 case WM_T_80003:
3686 case WM_T_ICH8:
3687 reg = CSR_READ(sc, WMREG_RFCTL);
3688 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3689 CSR_WRITE(sc, WMREG_RFCTL, reg);
3690 break;
3691 default:
3692 break;
3693 }
3694 }
3695 }
3696
3697 static uint32_t
3698 wm_rxpbs_adjust_82580(uint32_t val)
3699 {
3700 uint32_t rv = 0;
3701
3702 if (val < __arraycount(wm_82580_rxpbs_table))
3703 rv = wm_82580_rxpbs_table[val];
3704
3705 return rv;
3706 }
3707
3708 /*
3709 * wm_reset:
3710 *
3711 * Reset the i82542 chip.
3712 */
3713 static void
3714 wm_reset(struct wm_softc *sc)
3715 {
3716 int phy_reset = 0;
3717 int error = 0;
3718 uint32_t reg, mask;
3719
3720 /*
3721 * Allocate on-chip memory according to the MTU size.
3722 * The Packet Buffer Allocation register must be written
3723 * before the chip is reset.
3724 */
3725 switch (sc->sc_type) {
3726 case WM_T_82547:
3727 case WM_T_82547_2:
3728 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3729 PBA_22K : PBA_30K;
3730 sc->sc_txfifo_head = 0;
3731 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3732 sc->sc_txfifo_size =
3733 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3734 sc->sc_txfifo_stall = 0;
3735 break;
3736 case WM_T_82571:
3737 case WM_T_82572:
3738 case WM_T_82575: /* XXX need special handing for jumbo frames */
3739 case WM_T_80003:
3740 sc->sc_pba = PBA_32K;
3741 break;
3742 case WM_T_82573:
3743 sc->sc_pba = PBA_12K;
3744 break;
3745 case WM_T_82574:
3746 case WM_T_82583:
3747 sc->sc_pba = PBA_20K;
3748 break;
3749 case WM_T_82576:
3750 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3751 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3752 break;
3753 case WM_T_82580:
3754 case WM_T_I350:
3755 case WM_T_I354:
3756 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3757 break;
3758 case WM_T_I210:
3759 case WM_T_I211:
3760 sc->sc_pba = PBA_34K;
3761 break;
3762 case WM_T_ICH8:
3763 /* Workaround for a bit corruption issue in FIFO memory */
3764 sc->sc_pba = PBA_8K;
3765 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3766 break;
3767 case WM_T_ICH9:
3768 case WM_T_ICH10:
3769 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3770 PBA_14K : PBA_10K;
3771 break;
3772 case WM_T_PCH:
3773 case WM_T_PCH2:
3774 case WM_T_PCH_LPT:
3775 sc->sc_pba = PBA_26K;
3776 break;
3777 default:
3778 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3779 PBA_40K : PBA_48K;
3780 break;
3781 }
3782 /*
3783 * Only old or non-multiqueue devices have the PBA register
3784 * XXX Need special handling for 82575.
3785 */
3786 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3787 || (sc->sc_type == WM_T_82575))
3788 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3789
3790 /* Prevent the PCI-E bus from sticking */
3791 if (sc->sc_flags & WM_F_PCIE) {
3792 int timeout = 800;
3793
3794 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3795 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3796
3797 while (timeout--) {
3798 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3799 == 0)
3800 break;
3801 delay(100);
3802 }
3803 }
3804
3805 /* Set the completion timeout for interface */
3806 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3807 || (sc->sc_type == WM_T_82580)
3808 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3809 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3810 wm_set_pcie_completion_timeout(sc);
3811
3812 /* Clear interrupt */
3813 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3814 if (sc->sc_nintrs > 1) {
3815 if (sc->sc_type != WM_T_82574) {
3816 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3817 CSR_WRITE(sc, WMREG_EIAC, 0);
3818 } else {
3819 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3820 }
3821 }
3822
3823 /* Stop the transmit and receive processes. */
3824 CSR_WRITE(sc, WMREG_RCTL, 0);
3825 sc->sc_rctl &= ~RCTL_EN;
3826 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3827 CSR_WRITE_FLUSH(sc);
3828
3829 /* XXX set_tbi_sbp_82543() */
3830
3831 delay(10*1000);
3832
3833 /* Must acquire the MDIO ownership before MAC reset */
3834 switch (sc->sc_type) {
3835 case WM_T_82573:
3836 case WM_T_82574:
3837 case WM_T_82583:
3838 error = wm_get_hw_semaphore_82573(sc);
3839 break;
3840 default:
3841 break;
3842 }
3843
3844 /*
3845 * 82541 Errata 29? & 82547 Errata 28?
3846 * See also the description about PHY_RST bit in CTRL register
3847 * in 8254x_GBe_SDM.pdf.
3848 */
3849 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3850 CSR_WRITE(sc, WMREG_CTRL,
3851 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3852 CSR_WRITE_FLUSH(sc);
3853 delay(5000);
3854 }
3855
3856 switch (sc->sc_type) {
3857 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3858 case WM_T_82541:
3859 case WM_T_82541_2:
3860 case WM_T_82547:
3861 case WM_T_82547_2:
3862 /*
3863 * On some chipsets, a reset through a memory-mapped write
3864 * cycle can cause the chip to reset before completing the
3865 * write cycle. This causes major headache that can be
3866 * avoided by issuing the reset via indirect register writes
3867 * through I/O space.
3868 *
3869 * So, if we successfully mapped the I/O BAR at attach time,
3870 * use that. Otherwise, try our luck with a memory-mapped
3871 * reset.
3872 */
3873 if (sc->sc_flags & WM_F_IOH_VALID)
3874 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3875 else
3876 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3877 break;
3878 case WM_T_82545_3:
3879 case WM_T_82546_3:
3880 /* Use the shadow control register on these chips. */
3881 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3882 break;
3883 case WM_T_80003:
3884 mask = swfwphysem[sc->sc_funcid];
3885 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3886 wm_get_swfw_semaphore(sc, mask);
3887 CSR_WRITE(sc, WMREG_CTRL, reg);
3888 wm_put_swfw_semaphore(sc, mask);
3889 break;
3890 case WM_T_ICH8:
3891 case WM_T_ICH9:
3892 case WM_T_ICH10:
3893 case WM_T_PCH:
3894 case WM_T_PCH2:
3895 case WM_T_PCH_LPT:
3896 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3897 if (wm_check_reset_block(sc) == 0) {
3898 /*
3899 * Gate automatic PHY configuration by hardware on
3900 * non-managed 82579
3901 */
3902 if ((sc->sc_type == WM_T_PCH2)
3903 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3904 != 0))
3905 wm_gate_hw_phy_config_ich8lan(sc, 1);
3906
3907
3908 reg |= CTRL_PHY_RESET;
3909 phy_reset = 1;
3910 }
3911 wm_get_swfwhw_semaphore(sc);
3912 CSR_WRITE(sc, WMREG_CTRL, reg);
3913 /* Don't insert a completion barrier when reset */
3914 delay(20*1000);
3915 wm_put_swfwhw_semaphore(sc);
3916 break;
3917 case WM_T_82580:
3918 case WM_T_I350:
3919 case WM_T_I354:
3920 case WM_T_I210:
3921 case WM_T_I211:
3922 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3923 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3924 CSR_WRITE_FLUSH(sc);
3925 delay(5000);
3926 break;
3927 case WM_T_82542_2_0:
3928 case WM_T_82542_2_1:
3929 case WM_T_82543:
3930 case WM_T_82540:
3931 case WM_T_82545:
3932 case WM_T_82546:
3933 case WM_T_82571:
3934 case WM_T_82572:
3935 case WM_T_82573:
3936 case WM_T_82574:
3937 case WM_T_82575:
3938 case WM_T_82576:
3939 case WM_T_82583:
3940 default:
3941 /* Everything else can safely use the documented method. */
3942 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3943 break;
3944 }
3945
3946 /* Must release the MDIO ownership after MAC reset */
3947 switch (sc->sc_type) {
3948 case WM_T_82573:
3949 case WM_T_82574:
3950 case WM_T_82583:
3951 if (error == 0)
3952 wm_put_hw_semaphore_82573(sc);
3953 break;
3954 default:
3955 break;
3956 }
3957
3958 if (phy_reset != 0)
3959 wm_get_cfg_done(sc);
3960
3961 /* reload EEPROM */
3962 switch (sc->sc_type) {
3963 case WM_T_82542_2_0:
3964 case WM_T_82542_2_1:
3965 case WM_T_82543:
3966 case WM_T_82544:
3967 delay(10);
3968 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3969 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3970 CSR_WRITE_FLUSH(sc);
3971 delay(2000);
3972 break;
3973 case WM_T_82540:
3974 case WM_T_82545:
3975 case WM_T_82545_3:
3976 case WM_T_82546:
3977 case WM_T_82546_3:
3978 delay(5*1000);
3979 /* XXX Disable HW ARPs on ASF enabled adapters */
3980 break;
3981 case WM_T_82541:
3982 case WM_T_82541_2:
3983 case WM_T_82547:
3984 case WM_T_82547_2:
3985 delay(20000);
3986 /* XXX Disable HW ARPs on ASF enabled adapters */
3987 break;
3988 case WM_T_82571:
3989 case WM_T_82572:
3990 case WM_T_82573:
3991 case WM_T_82574:
3992 case WM_T_82583:
3993 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3994 delay(10);
3995 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3996 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3997 CSR_WRITE_FLUSH(sc);
3998 }
3999 /* check EECD_EE_AUTORD */
4000 wm_get_auto_rd_done(sc);
4001 /*
4002 * Phy configuration from NVM just starts after EECD_AUTO_RD
4003 * is set.
4004 */
4005 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4006 || (sc->sc_type == WM_T_82583))
4007 delay(25*1000);
4008 break;
4009 case WM_T_82575:
4010 case WM_T_82576:
4011 case WM_T_82580:
4012 case WM_T_I350:
4013 case WM_T_I354:
4014 case WM_T_I210:
4015 case WM_T_I211:
4016 case WM_T_80003:
4017 /* check EECD_EE_AUTORD */
4018 wm_get_auto_rd_done(sc);
4019 break;
4020 case WM_T_ICH8:
4021 case WM_T_ICH9:
4022 case WM_T_ICH10:
4023 case WM_T_PCH:
4024 case WM_T_PCH2:
4025 case WM_T_PCH_LPT:
4026 break;
4027 default:
4028 panic("%s: unknown type\n", __func__);
4029 }
4030
4031 /* Check whether EEPROM is present or not */
4032 switch (sc->sc_type) {
4033 case WM_T_82575:
4034 case WM_T_82576:
4035 case WM_T_82580:
4036 case WM_T_I350:
4037 case WM_T_I354:
4038 case WM_T_ICH8:
4039 case WM_T_ICH9:
4040 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4041 /* Not found */
4042 sc->sc_flags |= WM_F_EEPROM_INVALID;
4043 if (sc->sc_type == WM_T_82575)
4044 wm_reset_init_script_82575(sc);
4045 }
4046 break;
4047 default:
4048 break;
4049 }
4050
4051 if ((sc->sc_type == WM_T_82580)
4052 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4053 /* clear global device reset status bit */
4054 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4055 }
4056
4057 /* Clear any pending interrupt events. */
4058 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4059 reg = CSR_READ(sc, WMREG_ICR);
4060 if (sc->sc_nintrs > 1) {
4061 if (sc->sc_type != WM_T_82574) {
4062 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4063 CSR_WRITE(sc, WMREG_EIAC, 0);
4064 } else
4065 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4066 }
4067
4068 /* reload sc_ctrl */
4069 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4070
4071 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4072 wm_set_eee_i350(sc);
4073
4074 /* dummy read from WUC */
4075 if (sc->sc_type == WM_T_PCH)
4076 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4077 /*
4078 * For PCH, this write will make sure that any noise will be detected
4079 * as a CRC error and be dropped rather than show up as a bad packet
4080 * to the DMA engine
4081 */
4082 if (sc->sc_type == WM_T_PCH)
4083 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4084
4085 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4086 CSR_WRITE(sc, WMREG_WUC, 0);
4087
4088 wm_reset_mdicnfg_82580(sc);
4089
4090 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4091 wm_pll_workaround_i210(sc);
4092 }
4093
4094 /*
4095 * wm_add_rxbuf:
4096 *
4097 * Add a receive buffer to the indiciated descriptor.
4098 */
4099 static int
4100 wm_add_rxbuf(struct wm_softc *sc, int idx)
4101 {
4102 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4103 struct mbuf *m;
4104 int error;
4105
4106 KASSERT(WM_RX_LOCKED(sc));
4107
4108 MGETHDR(m, M_DONTWAIT, MT_DATA);
4109 if (m == NULL)
4110 return ENOBUFS;
4111
4112 MCLGET(m, M_DONTWAIT);
4113 if ((m->m_flags & M_EXT) == 0) {
4114 m_freem(m);
4115 return ENOBUFS;
4116 }
4117
4118 if (rxs->rxs_mbuf != NULL)
4119 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4120
4121 rxs->rxs_mbuf = m;
4122
4123 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4124 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4125 BUS_DMA_READ|BUS_DMA_NOWAIT);
4126 if (error) {
4127 /* XXX XXX XXX */
4128 aprint_error_dev(sc->sc_dev,
4129 "unable to load rx DMA map %d, error = %d\n",
4130 idx, error);
4131 panic("wm_add_rxbuf");
4132 }
4133
4134 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4135 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4136
4137 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4138 if ((sc->sc_rctl & RCTL_EN) != 0)
4139 WM_INIT_RXDESC(sc, idx);
4140 } else
4141 WM_INIT_RXDESC(sc, idx);
4142
4143 return 0;
4144 }
4145
4146 /*
4147 * wm_rxdrain:
4148 *
4149 * Drain the receive queue.
4150 */
4151 static void
4152 wm_rxdrain(struct wm_softc *sc)
4153 {
4154 struct wm_rxsoft *rxs;
4155 int i;
4156
4157 KASSERT(WM_RX_LOCKED(sc));
4158
4159 for (i = 0; i < WM_NRXDESC; i++) {
4160 rxs = &sc->sc_rxsoft[i];
4161 if (rxs->rxs_mbuf != NULL) {
4162 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4163 m_freem(rxs->rxs_mbuf);
4164 rxs->rxs_mbuf = NULL;
4165 }
4166 }
4167 }
4168
4169 /*
4170 * wm_init: [ifnet interface function]
4171 *
4172 * Initialize the interface.
4173 */
4174 static int
4175 wm_init(struct ifnet *ifp)
4176 {
4177 struct wm_softc *sc = ifp->if_softc;
4178 int ret;
4179
4180 WM_BOTH_LOCK(sc);
4181 ret = wm_init_locked(ifp);
4182 WM_BOTH_UNLOCK(sc);
4183
4184 return ret;
4185 }
4186
4187 static int
4188 wm_init_locked(struct ifnet *ifp)
4189 {
4190 struct wm_softc *sc = ifp->if_softc;
4191 struct wm_rxsoft *rxs;
4192 int i, j, trynum, error = 0;
4193 uint32_t reg;
4194
4195 KASSERT(WM_BOTH_LOCKED(sc));
4196 /*
4197 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4198 * There is a small but measurable benefit to avoiding the adjusment
4199 * of the descriptor so that the headers are aligned, for normal mtu,
4200 * on such platforms. One possibility is that the DMA itself is
4201 * slightly more efficient if the front of the entire packet (instead
4202 * of the front of the headers) is aligned.
4203 *
4204 * Note we must always set align_tweak to 0 if we are using
4205 * jumbo frames.
4206 */
4207 #ifdef __NO_STRICT_ALIGNMENT
4208 sc->sc_align_tweak = 0;
4209 #else
4210 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4211 sc->sc_align_tweak = 0;
4212 else
4213 sc->sc_align_tweak = 2;
4214 #endif /* __NO_STRICT_ALIGNMENT */
4215
4216 /* Cancel any pending I/O. */
4217 wm_stop_locked(ifp, 0);
4218
4219 /* update statistics before reset */
4220 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4221 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4222
4223 /* Reset the chip to a known state. */
4224 wm_reset(sc);
4225
4226 switch (sc->sc_type) {
4227 case WM_T_82571:
4228 case WM_T_82572:
4229 case WM_T_82573:
4230 case WM_T_82574:
4231 case WM_T_82583:
4232 case WM_T_80003:
4233 case WM_T_ICH8:
4234 case WM_T_ICH9:
4235 case WM_T_ICH10:
4236 case WM_T_PCH:
4237 case WM_T_PCH2:
4238 case WM_T_PCH_LPT:
4239 if (wm_check_mng_mode(sc) != 0)
4240 wm_get_hw_control(sc);
4241 break;
4242 default:
4243 break;
4244 }
4245
4246 /* Init hardware bits */
4247 wm_initialize_hardware_bits(sc);
4248
4249 /* Reset the PHY. */
4250 if (sc->sc_flags & WM_F_HAS_MII)
4251 wm_gmii_reset(sc);
4252
4253 /* Calculate (E)ITR value */
4254 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4255 sc->sc_itr = 450; /* For EITR */
4256 } else if (sc->sc_type >= WM_T_82543) {
4257 /*
4258 * Set up the interrupt throttling register (units of 256ns)
4259 * Note that a footnote in Intel's documentation says this
4260 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4261 * or 10Mbit mode. Empirically, it appears to be the case
4262 * that that is also true for the 1024ns units of the other
4263 * interrupt-related timer registers -- so, really, we ought
4264 * to divide this value by 4 when the link speed is low.
4265 *
4266 * XXX implement this division at link speed change!
4267 */
4268
4269 /*
4270 * For N interrupts/sec, set this value to:
4271 * 1000000000 / (N * 256). Note that we set the
4272 * absolute and packet timer values to this value
4273 * divided by 4 to get "simple timer" behavior.
4274 */
4275
4276 sc->sc_itr = 1500; /* 2604 ints/sec */
4277 }
4278
4279 /* Initialize the transmit descriptor ring. */
4280 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4281 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4282 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4283 sc->sc_txfree = WM_NTXDESC(sc);
4284 sc->sc_txnext = 0;
4285
4286 if (sc->sc_type < WM_T_82543) {
4287 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4288 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4289 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4290 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4291 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4292 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4293 } else {
4294 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4295 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4296 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4297 CSR_WRITE(sc, WMREG_TDH, 0);
4298
4299 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4300 /*
4301 * Don't write TDT before TCTL.EN is set.
4302 * See the document.
4303 */
4304 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4305 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4306 | TXDCTL_WTHRESH(0));
4307 else {
4308 /* ITR / 4 */
4309 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4310 if (sc->sc_type >= WM_T_82540) {
4311 /* should be same */
4312 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4313 }
4314
4315 CSR_WRITE(sc, WMREG_TDT, 0);
4316 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4317 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4318 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4319 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4320 }
4321 }
4322
4323 /* Initialize the transmit job descriptors. */
4324 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4325 sc->sc_txsoft[i].txs_mbuf = NULL;
4326 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4327 sc->sc_txsnext = 0;
4328 sc->sc_txsdirty = 0;
4329
4330 /*
4331 * Initialize the receive descriptor and receive job
4332 * descriptor rings.
4333 */
4334 if (sc->sc_type < WM_T_82543) {
4335 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4336 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4337 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4338 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4339 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4340 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4341
4342 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4343 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4344 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4345 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4346 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4347 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4348 } else {
4349 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4350 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4351 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4352
4353 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4354 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4355 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4356 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4357 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4358 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4359 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4360 | RXDCTL_WTHRESH(1));
4361 } else {
4362 CSR_WRITE(sc, WMREG_RDH, 0);
4363 CSR_WRITE(sc, WMREG_RDT, 0);
4364 /* ITR/4 */
4365 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
4366 /* MUST be same */
4367 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr);
4368 }
4369 }
4370 for (i = 0; i < WM_NRXDESC; i++) {
4371 rxs = &sc->sc_rxsoft[i];
4372 if (rxs->rxs_mbuf == NULL) {
4373 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4374 log(LOG_ERR, "%s: unable to allocate or map "
4375 "rx buffer %d, error = %d\n",
4376 device_xname(sc->sc_dev), i, error);
4377 /*
4378 * XXX Should attempt to run with fewer receive
4379 * XXX buffers instead of just failing.
4380 */
4381 wm_rxdrain(sc);
4382 goto out;
4383 }
4384 } else {
4385 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4386 WM_INIT_RXDESC(sc, i);
4387 /*
4388 * For 82575 and newer device, the RX descriptors
4389 * must be initialized after the setting of RCTL.EN in
4390 * wm_set_filter()
4391 */
4392 }
4393 }
4394 sc->sc_rxptr = 0;
4395 sc->sc_rxdiscard = 0;
4396 WM_RXCHAIN_RESET(sc);
4397
4398 /*
4399 * Clear out the VLAN table -- we don't use it (yet).
4400 */
4401 CSR_WRITE(sc, WMREG_VET, 0);
4402 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4403 trynum = 10; /* Due to hw errata */
4404 else
4405 trynum = 1;
4406 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4407 for (j = 0; j < trynum; j++)
4408 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4409
4410 /*
4411 * Set up flow-control parameters.
4412 *
4413 * XXX Values could probably stand some tuning.
4414 */
4415 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4416 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4417 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4418 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4419 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4420 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4421 }
4422
4423 sc->sc_fcrtl = FCRTL_DFLT;
4424 if (sc->sc_type < WM_T_82543) {
4425 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4426 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4427 } else {
4428 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4429 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4430 }
4431
4432 if (sc->sc_type == WM_T_80003)
4433 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4434 else
4435 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4436
4437 /* Writes the control register. */
4438 wm_set_vlan(sc);
4439
4440 if (sc->sc_flags & WM_F_HAS_MII) {
4441 int val;
4442
4443 switch (sc->sc_type) {
4444 case WM_T_80003:
4445 case WM_T_ICH8:
4446 case WM_T_ICH9:
4447 case WM_T_ICH10:
4448 case WM_T_PCH:
4449 case WM_T_PCH2:
4450 case WM_T_PCH_LPT:
4451 /*
4452 * Set the mac to wait the maximum time between each
4453 * iteration and increase the max iterations when
4454 * polling the phy; this fixes erroneous timeouts at
4455 * 10Mbps.
4456 */
4457 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4458 0xFFFF);
4459 val = wm_kmrn_readreg(sc,
4460 KUMCTRLSTA_OFFSET_INB_PARAM);
4461 val |= 0x3F;
4462 wm_kmrn_writereg(sc,
4463 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4464 break;
4465 default:
4466 break;
4467 }
4468
4469 if (sc->sc_type == WM_T_80003) {
4470 val = CSR_READ(sc, WMREG_CTRL_EXT);
4471 val &= ~CTRL_EXT_LINK_MODE_MASK;
4472 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4473
4474 /* Bypass RX and TX FIFO's */
4475 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4476 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4477 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4478 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4479 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4480 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4481 }
4482 }
4483 #if 0
4484 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4485 #endif
4486
4487 /* Set up checksum offload parameters. */
4488 reg = CSR_READ(sc, WMREG_RXCSUM);
4489 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4490 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4491 reg |= RXCSUM_IPOFL;
4492 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4493 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4494 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4495 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4496 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4497
4498 /* Set up MSI-X */
4499 if (sc->sc_nintrs > 1) {
4500 uint32_t ivar;
4501
4502 if (sc->sc_type == WM_T_82575) {
4503 /* Interrupt control */
4504 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4505 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4506 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4507
4508 /* TX */
4509 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_TXINTR_IDX),
4510 EITR_TX_QUEUE0);
4511 /* RX */
4512 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_RXINTR_IDX),
4513 EITR_RX_QUEUE0);
4514 /* Link status */
4515 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_LINKINTR_IDX),
4516 EITR_OTHER);
4517 } else if (sc->sc_type == WM_T_82574) {
4518 /* Interrupt control */
4519 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4520 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4521 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4522
4523 /* TX, RX and Link status */
4524 ivar = __SHIFTIN((IVAR_VALID_82574|WM_MSIX_TXINTR_IDX),
4525 IVAR_TX_MASK_Q_82574(0));
4526 ivar |= __SHIFTIN((IVAR_VALID_82574
4527 | WM_MSIX_RXINTR_IDX),
4528 IVAR_RX_MASK_Q_82574(0));
4529 ivar |=__SHIFTIN((IVAR_VALID_82574|WM_MSIX_LINKINTR_IDX),
4530 IVAR_OTHER_MASK);
4531 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4532 } else {
4533 /* Interrupt control */
4534 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4535 | GPIE_MULTI_MSIX | GPIE_EIAME
4536 | GPIE_PBA);
4537
4538 switch (sc->sc_type) {
4539 case WM_T_82580:
4540 case WM_T_I350:
4541 case WM_T_I354:
4542 case WM_T_I210:
4543 case WM_T_I211:
4544 /* TX */
4545 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4546 ivar &= ~IVAR_TX_MASK_Q(0);
4547 ivar |= __SHIFTIN(
4548 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4549 IVAR_TX_MASK_Q(0));
4550 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4551
4552 /* RX */
4553 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4554 ivar &= ~IVAR_RX_MASK_Q(0);
4555 ivar |= __SHIFTIN(
4556 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4557 IVAR_RX_MASK_Q(0));
4558 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4559 break;
4560 case WM_T_82576:
4561 /* TX */
4562 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4563 ivar &= ~IVAR_TX_MASK_Q_82576(0);
4564 ivar |= __SHIFTIN(
4565 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4566 IVAR_TX_MASK_Q_82576(0));
4567 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4568
4569 /* RX */
4570 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4571 ivar &= ~IVAR_RX_MASK_Q_82576(0);
4572 ivar |= __SHIFTIN(
4573 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4574 IVAR_RX_MASK_Q_82576(0));
4575 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4576 break;
4577 default:
4578 break;
4579 }
4580
4581 /* Link status */
4582 ivar = __SHIFTIN((WM_MSIX_LINKINTR_IDX | IVAR_VALID),
4583 IVAR_MISC_OTHER);
4584 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4585 }
4586 }
4587
4588 /* Set up the interrupt registers. */
4589 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4590 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4591 ICR_RXO | ICR_RXT0;
4592 if (sc->sc_nintrs > 1) {
4593 uint32_t mask;
4594 switch (sc->sc_type) {
4595 case WM_T_82574:
4596 CSR_WRITE(sc, WMREG_EIAC_82574,
4597 WMREG_EIAC_82574_MSIX_MASK);
4598 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4599 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4600 break;
4601 default:
4602 if (sc->sc_type == WM_T_82575)
4603 mask = EITR_RX_QUEUE0 |EITR_TX_QUEUE0
4604 | EITR_OTHER;
4605 else
4606 mask = (1 << WM_MSIX_RXINTR_IDX)
4607 | (1 << WM_MSIX_TXINTR_IDX)
4608 | (1 << WM_MSIX_LINKINTR_IDX);
4609 CSR_WRITE(sc, WMREG_EIAC, mask);
4610 CSR_WRITE(sc, WMREG_EIAM, mask);
4611 CSR_WRITE(sc, WMREG_EIMS, mask);
4612 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4613 break;
4614 }
4615 } else
4616 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4617
4618 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4619 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4620 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4621 reg = CSR_READ(sc, WMREG_KABGTXD);
4622 reg |= KABGTXD_BGSQLBIAS;
4623 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4624 }
4625
4626 /* Set up the inter-packet gap. */
4627 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4628
4629 if (sc->sc_type >= WM_T_82543) {
4630 /*
4631 * XXX 82574 has both ITR and EITR. SET EITR when we use
4632 * the multi queue function with MSI-X.
4633 */
4634 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4635 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4636 else
4637 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4638 }
4639
4640 /* Set the VLAN ethernetype. */
4641 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4642
4643 /*
4644 * Set up the transmit control register; we start out with
4645 * a collision distance suitable for FDX, but update it whe
4646 * we resolve the media type.
4647 */
4648 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4649 | TCTL_CT(TX_COLLISION_THRESHOLD)
4650 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4651 if (sc->sc_type >= WM_T_82571)
4652 sc->sc_tctl |= TCTL_MULR;
4653 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4654
4655 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4656 /* Write TDT after TCTL.EN is set. See the document. */
4657 CSR_WRITE(sc, WMREG_TDT, 0);
4658 }
4659
4660 if (sc->sc_type == WM_T_80003) {
4661 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4662 reg &= ~TCTL_EXT_GCEX_MASK;
4663 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4664 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4665 }
4666
4667 /* Set the media. */
4668 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4669 goto out;
4670
4671 /* Configure for OS presence */
4672 wm_init_manageability(sc);
4673
4674 /*
4675 * Set up the receive control register; we actually program
4676 * the register when we set the receive filter. Use multicast
4677 * address offset type 0.
4678 *
4679 * Only the i82544 has the ability to strip the incoming
4680 * CRC, so we don't enable that feature.
4681 */
4682 sc->sc_mchash_type = 0;
4683 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4684 | RCTL_MO(sc->sc_mchash_type);
4685
4686 /*
4687 * The I350 has a bug where it always strips the CRC whether
4688 * asked to or not. So ask for stripped CRC here and cope in rxeof
4689 */
4690 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4691 || (sc->sc_type == WM_T_I210))
4692 sc->sc_rctl |= RCTL_SECRC;
4693
4694 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4695 && (ifp->if_mtu > ETHERMTU)) {
4696 sc->sc_rctl |= RCTL_LPE;
4697 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4698 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4699 }
4700
4701 if (MCLBYTES == 2048) {
4702 sc->sc_rctl |= RCTL_2k;
4703 } else {
4704 if (sc->sc_type >= WM_T_82543) {
4705 switch (MCLBYTES) {
4706 case 4096:
4707 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4708 break;
4709 case 8192:
4710 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4711 break;
4712 case 16384:
4713 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4714 break;
4715 default:
4716 panic("wm_init: MCLBYTES %d unsupported",
4717 MCLBYTES);
4718 break;
4719 }
4720 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4721 }
4722
4723 /* Set the receive filter. */
4724 wm_set_filter(sc);
4725
4726 /* Enable ECC */
4727 switch (sc->sc_type) {
4728 case WM_T_82571:
4729 reg = CSR_READ(sc, WMREG_PBA_ECC);
4730 reg |= PBA_ECC_CORR_EN;
4731 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4732 break;
4733 case WM_T_PCH_LPT:
4734 reg = CSR_READ(sc, WMREG_PBECCSTS);
4735 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4736 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4737
4738 reg = CSR_READ(sc, WMREG_CTRL);
4739 reg |= CTRL_MEHE;
4740 CSR_WRITE(sc, WMREG_CTRL, reg);
4741 break;
4742 default:
4743 break;
4744 }
4745
4746 /* On 575 and later set RDT only if RX enabled */
4747 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4748 for (i = 0; i < WM_NRXDESC; i++)
4749 WM_INIT_RXDESC(sc, i);
4750
4751 sc->sc_stopping = false;
4752
4753 /* Start the one second link check clock. */
4754 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4755
4756 /* ...all done! */
4757 ifp->if_flags |= IFF_RUNNING;
4758 ifp->if_flags &= ~IFF_OACTIVE;
4759
4760 out:
4761 sc->sc_if_flags = ifp->if_flags;
4762 if (error)
4763 log(LOG_ERR, "%s: interface not running\n",
4764 device_xname(sc->sc_dev));
4765 return error;
4766 }
4767
4768 /*
4769 * wm_stop: [ifnet interface function]
4770 *
4771 * Stop transmission on the interface.
4772 */
4773 static void
4774 wm_stop(struct ifnet *ifp, int disable)
4775 {
4776 struct wm_softc *sc = ifp->if_softc;
4777
4778 WM_BOTH_LOCK(sc);
4779 wm_stop_locked(ifp, disable);
4780 WM_BOTH_UNLOCK(sc);
4781 }
4782
4783 static void
4784 wm_stop_locked(struct ifnet *ifp, int disable)
4785 {
4786 struct wm_softc *sc = ifp->if_softc;
4787 struct wm_txsoft *txs;
4788 int i;
4789
4790 KASSERT(WM_BOTH_LOCKED(sc));
4791
4792 sc->sc_stopping = true;
4793
4794 /* Stop the one second clock. */
4795 callout_stop(&sc->sc_tick_ch);
4796
4797 /* Stop the 82547 Tx FIFO stall check timer. */
4798 if (sc->sc_type == WM_T_82547)
4799 callout_stop(&sc->sc_txfifo_ch);
4800
4801 if (sc->sc_flags & WM_F_HAS_MII) {
4802 /* Down the MII. */
4803 mii_down(&sc->sc_mii);
4804 } else {
4805 #if 0
4806 /* Should we clear PHY's status properly? */
4807 wm_reset(sc);
4808 #endif
4809 }
4810
4811 /* Stop the transmit and receive processes. */
4812 CSR_WRITE(sc, WMREG_TCTL, 0);
4813 CSR_WRITE(sc, WMREG_RCTL, 0);
4814 sc->sc_rctl &= ~RCTL_EN;
4815
4816 /*
4817 * Clear the interrupt mask to ensure the device cannot assert its
4818 * interrupt line.
4819 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
4820 * service any currently pending or shared interrupt.
4821 */
4822 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4823 sc->sc_icr = 0;
4824 if (sc->sc_nintrs > 1) {
4825 if (sc->sc_type != WM_T_82574) {
4826 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4827 CSR_WRITE(sc, WMREG_EIAC, 0);
4828 } else
4829 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4830 }
4831
4832 /* Release any queued transmit buffers. */
4833 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4834 txs = &sc->sc_txsoft[i];
4835 if (txs->txs_mbuf != NULL) {
4836 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4837 m_freem(txs->txs_mbuf);
4838 txs->txs_mbuf = NULL;
4839 }
4840 }
4841
4842 /* Mark the interface as down and cancel the watchdog timer. */
4843 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4844 ifp->if_timer = 0;
4845
4846 if (disable)
4847 wm_rxdrain(sc);
4848
4849 #if 0 /* notyet */
4850 if (sc->sc_type >= WM_T_82544)
4851 CSR_WRITE(sc, WMREG_WUC, 0);
4852 #endif
4853 }
4854
4855 /*
4856 * wm_tx_offload:
4857 *
4858 * Set up TCP/IP checksumming parameters for the
4859 * specified packet.
4860 */
4861 static int
4862 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4863 uint8_t *fieldsp)
4864 {
4865 struct mbuf *m0 = txs->txs_mbuf;
4866 struct livengood_tcpip_ctxdesc *t;
4867 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4868 uint32_t ipcse;
4869 struct ether_header *eh;
4870 int offset, iphl;
4871 uint8_t fields;
4872
4873 /*
4874 * XXX It would be nice if the mbuf pkthdr had offset
4875 * fields for the protocol headers.
4876 */
4877
4878 eh = mtod(m0, struct ether_header *);
4879 switch (htons(eh->ether_type)) {
4880 case ETHERTYPE_IP:
4881 case ETHERTYPE_IPV6:
4882 offset = ETHER_HDR_LEN;
4883 break;
4884
4885 case ETHERTYPE_VLAN:
4886 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4887 break;
4888
4889 default:
4890 /*
4891 * Don't support this protocol or encapsulation.
4892 */
4893 *fieldsp = 0;
4894 *cmdp = 0;
4895 return 0;
4896 }
4897
4898 if ((m0->m_pkthdr.csum_flags &
4899 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4900 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4901 } else {
4902 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4903 }
4904 ipcse = offset + iphl - 1;
4905
4906 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4907 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4908 seg = 0;
4909 fields = 0;
4910
4911 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4912 int hlen = offset + iphl;
4913 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4914
4915 if (__predict_false(m0->m_len <
4916 (hlen + sizeof(struct tcphdr)))) {
4917 /*
4918 * TCP/IP headers are not in the first mbuf; we need
4919 * to do this the slow and painful way. Let's just
4920 * hope this doesn't happen very often.
4921 */
4922 struct tcphdr th;
4923
4924 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4925
4926 m_copydata(m0, hlen, sizeof(th), &th);
4927 if (v4) {
4928 struct ip ip;
4929
4930 m_copydata(m0, offset, sizeof(ip), &ip);
4931 ip.ip_len = 0;
4932 m_copyback(m0,
4933 offset + offsetof(struct ip, ip_len),
4934 sizeof(ip.ip_len), &ip.ip_len);
4935 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4936 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4937 } else {
4938 struct ip6_hdr ip6;
4939
4940 m_copydata(m0, offset, sizeof(ip6), &ip6);
4941 ip6.ip6_plen = 0;
4942 m_copyback(m0,
4943 offset + offsetof(struct ip6_hdr, ip6_plen),
4944 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4945 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4946 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4947 }
4948 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4949 sizeof(th.th_sum), &th.th_sum);
4950
4951 hlen += th.th_off << 2;
4952 } else {
4953 /*
4954 * TCP/IP headers are in the first mbuf; we can do
4955 * this the easy way.
4956 */
4957 struct tcphdr *th;
4958
4959 if (v4) {
4960 struct ip *ip =
4961 (void *)(mtod(m0, char *) + offset);
4962 th = (void *)(mtod(m0, char *) + hlen);
4963
4964 ip->ip_len = 0;
4965 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4966 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4967 } else {
4968 struct ip6_hdr *ip6 =
4969 (void *)(mtod(m0, char *) + offset);
4970 th = (void *)(mtod(m0, char *) + hlen);
4971
4972 ip6->ip6_plen = 0;
4973 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4974 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4975 }
4976 hlen += th->th_off << 2;
4977 }
4978
4979 if (v4) {
4980 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4981 cmdlen |= WTX_TCPIP_CMD_IP;
4982 } else {
4983 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4984 ipcse = 0;
4985 }
4986 cmd |= WTX_TCPIP_CMD_TSE;
4987 cmdlen |= WTX_TCPIP_CMD_TSE |
4988 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4989 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4990 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4991 }
4992
4993 /*
4994 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4995 * offload feature, if we load the context descriptor, we
4996 * MUST provide valid values for IPCSS and TUCSS fields.
4997 */
4998
4999 ipcs = WTX_TCPIP_IPCSS(offset) |
5000 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5001 WTX_TCPIP_IPCSE(ipcse);
5002 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
5003 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5004 fields |= WTX_IXSM;
5005 }
5006
5007 offset += iphl;
5008
5009 if (m0->m_pkthdr.csum_flags &
5010 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5011 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5012 fields |= WTX_TXSM;
5013 tucs = WTX_TCPIP_TUCSS(offset) |
5014 WTX_TCPIP_TUCSO(offset +
5015 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5016 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5017 } else if ((m0->m_pkthdr.csum_flags &
5018 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5019 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5020 fields |= WTX_TXSM;
5021 tucs = WTX_TCPIP_TUCSS(offset) |
5022 WTX_TCPIP_TUCSO(offset +
5023 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5024 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5025 } else {
5026 /* Just initialize it to a valid TCP context. */
5027 tucs = WTX_TCPIP_TUCSS(offset) |
5028 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5029 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5030 }
5031
5032 /* Fill in the context descriptor. */
5033 t = (struct livengood_tcpip_ctxdesc *)
5034 &sc->sc_txdescs[sc->sc_txnext];
5035 t->tcpip_ipcs = htole32(ipcs);
5036 t->tcpip_tucs = htole32(tucs);
5037 t->tcpip_cmdlen = htole32(cmdlen);
5038 t->tcpip_seg = htole32(seg);
5039 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5040
5041 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5042 txs->txs_ndesc++;
5043
5044 *cmdp = cmd;
5045 *fieldsp = fields;
5046
5047 return 0;
5048 }
5049
5050 static void
5051 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5052 {
5053 struct mbuf *m;
5054 int i;
5055
5056 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5057 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5058 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5059 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5060 m->m_data, m->m_len, m->m_flags);
5061 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5062 i, i == 1 ? "" : "s");
5063 }
5064
5065 /*
5066 * wm_82547_txfifo_stall:
5067 *
5068 * Callout used to wait for the 82547 Tx FIFO to drain,
5069 * reset the FIFO pointers, and restart packet transmission.
5070 */
5071 static void
5072 wm_82547_txfifo_stall(void *arg)
5073 {
5074 struct wm_softc *sc = arg;
5075 #ifndef WM_MPSAFE
5076 int s;
5077
5078 s = splnet();
5079 #endif
5080 WM_TX_LOCK(sc);
5081
5082 if (sc->sc_stopping)
5083 goto out;
5084
5085 if (sc->sc_txfifo_stall) {
5086 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
5087 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5088 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5089 /*
5090 * Packets have drained. Stop transmitter, reset
5091 * FIFO pointers, restart transmitter, and kick
5092 * the packet queue.
5093 */
5094 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5095 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5096 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
5097 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
5098 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
5099 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
5100 CSR_WRITE(sc, WMREG_TCTL, tctl);
5101 CSR_WRITE_FLUSH(sc);
5102
5103 sc->sc_txfifo_head = 0;
5104 sc->sc_txfifo_stall = 0;
5105 wm_start_locked(&sc->sc_ethercom.ec_if);
5106 } else {
5107 /*
5108 * Still waiting for packets to drain; try again in
5109 * another tick.
5110 */
5111 callout_schedule(&sc->sc_txfifo_ch, 1);
5112 }
5113 }
5114
5115 out:
5116 WM_TX_UNLOCK(sc);
5117 #ifndef WM_MPSAFE
5118 splx(s);
5119 #endif
5120 }
5121
5122 /*
5123 * wm_82547_txfifo_bugchk:
5124 *
5125 * Check for bug condition in the 82547 Tx FIFO. We need to
5126 * prevent enqueueing a packet that would wrap around the end
5127 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5128 *
5129 * We do this by checking the amount of space before the end
5130 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5131 * the Tx FIFO, wait for all remaining packets to drain, reset
5132 * the internal FIFO pointers to the beginning, and restart
5133 * transmission on the interface.
5134 */
5135 #define WM_FIFO_HDR 0x10
5136 #define WM_82547_PAD_LEN 0x3e0
5137 static int
5138 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5139 {
5140 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
5141 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5142
5143 /* Just return if already stalled. */
5144 if (sc->sc_txfifo_stall)
5145 return 1;
5146
5147 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5148 /* Stall only occurs in half-duplex mode. */
5149 goto send_packet;
5150 }
5151
5152 if (len >= WM_82547_PAD_LEN + space) {
5153 sc->sc_txfifo_stall = 1;
5154 callout_schedule(&sc->sc_txfifo_ch, 1);
5155 return 1;
5156 }
5157
5158 send_packet:
5159 sc->sc_txfifo_head += len;
5160 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
5161 sc->sc_txfifo_head -= sc->sc_txfifo_size;
5162
5163 return 0;
5164 }
5165
5166 /*
5167 * wm_start: [ifnet interface function]
5168 *
5169 * Start packet transmission on the interface.
5170 */
5171 static void
5172 wm_start(struct ifnet *ifp)
5173 {
5174 struct wm_softc *sc = ifp->if_softc;
5175
5176 WM_TX_LOCK(sc);
5177 if (!sc->sc_stopping)
5178 wm_start_locked(ifp);
5179 WM_TX_UNLOCK(sc);
5180 }
5181
5182 static void
5183 wm_start_locked(struct ifnet *ifp)
5184 {
5185 struct wm_softc *sc = ifp->if_softc;
5186 struct mbuf *m0;
5187 struct m_tag *mtag;
5188 struct wm_txsoft *txs;
5189 bus_dmamap_t dmamap;
5190 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
5191 bus_addr_t curaddr;
5192 bus_size_t seglen, curlen;
5193 uint32_t cksumcmd;
5194 uint8_t cksumfields;
5195
5196 KASSERT(WM_TX_LOCKED(sc));
5197
5198 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5199 return;
5200
5201 /* Remember the previous number of free descriptors. */
5202 ofree = sc->sc_txfree;
5203
5204 /*
5205 * Loop through the send queue, setting up transmit descriptors
5206 * until we drain the queue, or use up all available transmit
5207 * descriptors.
5208 */
5209 for (;;) {
5210 m0 = NULL;
5211
5212 /* Get a work queue entry. */
5213 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5214 wm_txeof(sc);
5215 if (sc->sc_txsfree == 0) {
5216 DPRINTF(WM_DEBUG_TX,
5217 ("%s: TX: no free job descriptors\n",
5218 device_xname(sc->sc_dev)));
5219 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5220 break;
5221 }
5222 }
5223
5224 /* Grab a packet off the queue. */
5225 IFQ_DEQUEUE(&ifp->if_snd, m0);
5226 if (m0 == NULL)
5227 break;
5228
5229 DPRINTF(WM_DEBUG_TX,
5230 ("%s: TX: have packet to transmit: %p\n",
5231 device_xname(sc->sc_dev), m0));
5232
5233 txs = &sc->sc_txsoft[sc->sc_txsnext];
5234 dmamap = txs->txs_dmamap;
5235
5236 use_tso = (m0->m_pkthdr.csum_flags &
5237 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
5238
5239 /*
5240 * So says the Linux driver:
5241 * The controller does a simple calculation to make sure
5242 * there is enough room in the FIFO before initiating the
5243 * DMA for each buffer. The calc is:
5244 * 4 = ceil(buffer len / MSS)
5245 * To make sure we don't overrun the FIFO, adjust the max
5246 * buffer len if the MSS drops.
5247 */
5248 dmamap->dm_maxsegsz =
5249 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
5250 ? m0->m_pkthdr.segsz << 2
5251 : WTX_MAX_LEN;
5252
5253 /*
5254 * Load the DMA map. If this fails, the packet either
5255 * didn't fit in the allotted number of segments, or we
5256 * were short on resources. For the too-many-segments
5257 * case, we simply report an error and drop the packet,
5258 * since we can't sanely copy a jumbo packet to a single
5259 * buffer.
5260 */
5261 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5262 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5263 if (error) {
5264 if (error == EFBIG) {
5265 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5266 log(LOG_ERR, "%s: Tx packet consumes too many "
5267 "DMA segments, dropping...\n",
5268 device_xname(sc->sc_dev));
5269 wm_dump_mbuf_chain(sc, m0);
5270 m_freem(m0);
5271 continue;
5272 }
5273 /* Short on resources, just stop for now. */
5274 DPRINTF(WM_DEBUG_TX,
5275 ("%s: TX: dmamap load failed: %d\n",
5276 device_xname(sc->sc_dev), error));
5277 break;
5278 }
5279
5280 segs_needed = dmamap->dm_nsegs;
5281 if (use_tso) {
5282 /* For sentinel descriptor; see below. */
5283 segs_needed++;
5284 }
5285
5286 /*
5287 * Ensure we have enough descriptors free to describe
5288 * the packet. Note, we always reserve one descriptor
5289 * at the end of the ring due to the semantics of the
5290 * TDT register, plus one more in the event we need
5291 * to load offload context.
5292 */
5293 if (segs_needed > sc->sc_txfree - 2) {
5294 /*
5295 * Not enough free descriptors to transmit this
5296 * packet. We haven't committed anything yet,
5297 * so just unload the DMA map, put the packet
5298 * pack on the queue, and punt. Notify the upper
5299 * layer that there are no more slots left.
5300 */
5301 DPRINTF(WM_DEBUG_TX,
5302 ("%s: TX: need %d (%d) descriptors, have %d\n",
5303 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5304 segs_needed, sc->sc_txfree - 1));
5305 ifp->if_flags |= IFF_OACTIVE;
5306 bus_dmamap_unload(sc->sc_dmat, dmamap);
5307 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5308 break;
5309 }
5310
5311 /*
5312 * Check for 82547 Tx FIFO bug. We need to do this
5313 * once we know we can transmit the packet, since we
5314 * do some internal FIFO space accounting here.
5315 */
5316 if (sc->sc_type == WM_T_82547 &&
5317 wm_82547_txfifo_bugchk(sc, m0)) {
5318 DPRINTF(WM_DEBUG_TX,
5319 ("%s: TX: 82547 Tx FIFO bug detected\n",
5320 device_xname(sc->sc_dev)));
5321 ifp->if_flags |= IFF_OACTIVE;
5322 bus_dmamap_unload(sc->sc_dmat, dmamap);
5323 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
5324 break;
5325 }
5326
5327 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5328
5329 DPRINTF(WM_DEBUG_TX,
5330 ("%s: TX: packet has %d (%d) DMA segments\n",
5331 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5332
5333 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5334
5335 /*
5336 * Store a pointer to the packet so that we can free it
5337 * later.
5338 *
5339 * Initially, we consider the number of descriptors the
5340 * packet uses the number of DMA segments. This may be
5341 * incremented by 1 if we do checksum offload (a descriptor
5342 * is used to set the checksum context).
5343 */
5344 txs->txs_mbuf = m0;
5345 txs->txs_firstdesc = sc->sc_txnext;
5346 txs->txs_ndesc = segs_needed;
5347
5348 /* Set up offload parameters for this packet. */
5349 if (m0->m_pkthdr.csum_flags &
5350 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5351 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5352 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5353 if (wm_tx_offload(sc, txs, &cksumcmd,
5354 &cksumfields) != 0) {
5355 /* Error message already displayed. */
5356 bus_dmamap_unload(sc->sc_dmat, dmamap);
5357 continue;
5358 }
5359 } else {
5360 cksumcmd = 0;
5361 cksumfields = 0;
5362 }
5363
5364 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5365
5366 /* Sync the DMA map. */
5367 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5368 BUS_DMASYNC_PREWRITE);
5369
5370 /* Initialize the transmit descriptor. */
5371 for (nexttx = sc->sc_txnext, seg = 0;
5372 seg < dmamap->dm_nsegs; seg++) {
5373 for (seglen = dmamap->dm_segs[seg].ds_len,
5374 curaddr = dmamap->dm_segs[seg].ds_addr;
5375 seglen != 0;
5376 curaddr += curlen, seglen -= curlen,
5377 nexttx = WM_NEXTTX(sc, nexttx)) {
5378 curlen = seglen;
5379
5380 /*
5381 * So says the Linux driver:
5382 * Work around for premature descriptor
5383 * write-backs in TSO mode. Append a
5384 * 4-byte sentinel descriptor.
5385 */
5386 if (use_tso &&
5387 seg == dmamap->dm_nsegs - 1 &&
5388 curlen > 8)
5389 curlen -= 4;
5390
5391 wm_set_dma_addr(
5392 &sc->sc_txdescs[nexttx].wtx_addr,
5393 curaddr);
5394 sc->sc_txdescs[nexttx].wtx_cmdlen =
5395 htole32(cksumcmd | curlen);
5396 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5397 0;
5398 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5399 cksumfields;
5400 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5401 lasttx = nexttx;
5402
5403 DPRINTF(WM_DEBUG_TX,
5404 ("%s: TX: desc %d: low %#" PRIx64 ", "
5405 "len %#04zx\n",
5406 device_xname(sc->sc_dev), nexttx,
5407 (uint64_t)curaddr, curlen));
5408 }
5409 }
5410
5411 KASSERT(lasttx != -1);
5412
5413 /*
5414 * Set up the command byte on the last descriptor of
5415 * the packet. If we're in the interrupt delay window,
5416 * delay the interrupt.
5417 */
5418 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5419 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5420
5421 /*
5422 * If VLANs are enabled and the packet has a VLAN tag, set
5423 * up the descriptor to encapsulate the packet for us.
5424 *
5425 * This is only valid on the last descriptor of the packet.
5426 */
5427 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5428 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5429 htole32(WTX_CMD_VLE);
5430 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5431 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5432 }
5433
5434 txs->txs_lastdesc = lasttx;
5435
5436 DPRINTF(WM_DEBUG_TX,
5437 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5438 device_xname(sc->sc_dev),
5439 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5440
5441 /* Sync the descriptors we're using. */
5442 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5443 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5444
5445 /* Give the packet to the chip. */
5446 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5447
5448 DPRINTF(WM_DEBUG_TX,
5449 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5450
5451 DPRINTF(WM_DEBUG_TX,
5452 ("%s: TX: finished transmitting packet, job %d\n",
5453 device_xname(sc->sc_dev), sc->sc_txsnext));
5454
5455 /* Advance the tx pointer. */
5456 sc->sc_txfree -= txs->txs_ndesc;
5457 sc->sc_txnext = nexttx;
5458
5459 sc->sc_txsfree--;
5460 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5461
5462 /* Pass the packet to any BPF listeners. */
5463 bpf_mtap(ifp, m0);
5464 }
5465
5466 if (m0 != NULL) {
5467 ifp->if_flags |= IFF_OACTIVE;
5468 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5469 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5470 m_freem(m0);
5471 }
5472
5473 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5474 /* No more slots; notify upper layer. */
5475 ifp->if_flags |= IFF_OACTIVE;
5476 }
5477
5478 if (sc->sc_txfree != ofree) {
5479 /* Set a watchdog timer in case the chip flakes out. */
5480 ifp->if_timer = 5;
5481 }
5482 }
5483
5484 /*
5485 * wm_nq_tx_offload:
5486 *
5487 * Set up TCP/IP checksumming parameters for the
5488 * specified packet, for NEWQUEUE devices
5489 */
5490 static int
5491 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5492 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5493 {
5494 struct mbuf *m0 = txs->txs_mbuf;
5495 struct m_tag *mtag;
5496 uint32_t vl_len, mssidx, cmdc;
5497 struct ether_header *eh;
5498 int offset, iphl;
5499
5500 /*
5501 * XXX It would be nice if the mbuf pkthdr had offset
5502 * fields for the protocol headers.
5503 */
5504 *cmdlenp = 0;
5505 *fieldsp = 0;
5506
5507 eh = mtod(m0, struct ether_header *);
5508 switch (htons(eh->ether_type)) {
5509 case ETHERTYPE_IP:
5510 case ETHERTYPE_IPV6:
5511 offset = ETHER_HDR_LEN;
5512 break;
5513
5514 case ETHERTYPE_VLAN:
5515 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5516 break;
5517
5518 default:
5519 /* Don't support this protocol or encapsulation. */
5520 *do_csum = false;
5521 return 0;
5522 }
5523 *do_csum = true;
5524 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5525 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5526
5527 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5528 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5529
5530 if ((m0->m_pkthdr.csum_flags &
5531 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5532 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5533 } else {
5534 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5535 }
5536 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5537 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5538
5539 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5540 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5541 << NQTXC_VLLEN_VLAN_SHIFT);
5542 *cmdlenp |= NQTX_CMD_VLE;
5543 }
5544
5545 mssidx = 0;
5546
5547 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5548 int hlen = offset + iphl;
5549 int tcp_hlen;
5550 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5551
5552 if (__predict_false(m0->m_len <
5553 (hlen + sizeof(struct tcphdr)))) {
5554 /*
5555 * TCP/IP headers are not in the first mbuf; we need
5556 * to do this the slow and painful way. Let's just
5557 * hope this doesn't happen very often.
5558 */
5559 struct tcphdr th;
5560
5561 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5562
5563 m_copydata(m0, hlen, sizeof(th), &th);
5564 if (v4) {
5565 struct ip ip;
5566
5567 m_copydata(m0, offset, sizeof(ip), &ip);
5568 ip.ip_len = 0;
5569 m_copyback(m0,
5570 offset + offsetof(struct ip, ip_len),
5571 sizeof(ip.ip_len), &ip.ip_len);
5572 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5573 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5574 } else {
5575 struct ip6_hdr ip6;
5576
5577 m_copydata(m0, offset, sizeof(ip6), &ip6);
5578 ip6.ip6_plen = 0;
5579 m_copyback(m0,
5580 offset + offsetof(struct ip6_hdr, ip6_plen),
5581 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5582 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5583 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5584 }
5585 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5586 sizeof(th.th_sum), &th.th_sum);
5587
5588 tcp_hlen = th.th_off << 2;
5589 } else {
5590 /*
5591 * TCP/IP headers are in the first mbuf; we can do
5592 * this the easy way.
5593 */
5594 struct tcphdr *th;
5595
5596 if (v4) {
5597 struct ip *ip =
5598 (void *)(mtod(m0, char *) + offset);
5599 th = (void *)(mtod(m0, char *) + hlen);
5600
5601 ip->ip_len = 0;
5602 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5603 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5604 } else {
5605 struct ip6_hdr *ip6 =
5606 (void *)(mtod(m0, char *) + offset);
5607 th = (void *)(mtod(m0, char *) + hlen);
5608
5609 ip6->ip6_plen = 0;
5610 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5611 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5612 }
5613 tcp_hlen = th->th_off << 2;
5614 }
5615 hlen += tcp_hlen;
5616 *cmdlenp |= NQTX_CMD_TSE;
5617
5618 if (v4) {
5619 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5620 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5621 } else {
5622 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5623 *fieldsp |= NQTXD_FIELDS_TUXSM;
5624 }
5625 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5626 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5627 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5628 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5629 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5630 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5631 } else {
5632 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5633 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5634 }
5635
5636 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5637 *fieldsp |= NQTXD_FIELDS_IXSM;
5638 cmdc |= NQTXC_CMD_IP4;
5639 }
5640
5641 if (m0->m_pkthdr.csum_flags &
5642 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5643 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5644 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5645 cmdc |= NQTXC_CMD_TCP;
5646 } else {
5647 cmdc |= NQTXC_CMD_UDP;
5648 }
5649 cmdc |= NQTXC_CMD_IP4;
5650 *fieldsp |= NQTXD_FIELDS_TUXSM;
5651 }
5652 if (m0->m_pkthdr.csum_flags &
5653 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5654 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5655 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5656 cmdc |= NQTXC_CMD_TCP;
5657 } else {
5658 cmdc |= NQTXC_CMD_UDP;
5659 }
5660 cmdc |= NQTXC_CMD_IP6;
5661 *fieldsp |= NQTXD_FIELDS_TUXSM;
5662 }
5663
5664 /* Fill in the context descriptor. */
5665 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5666 htole32(vl_len);
5667 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5668 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5669 htole32(cmdc);
5670 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5671 htole32(mssidx);
5672 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5673 DPRINTF(WM_DEBUG_TX,
5674 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5675 sc->sc_txnext, 0, vl_len));
5676 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5677 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5678 txs->txs_ndesc++;
5679 return 0;
5680 }
5681
5682 /*
5683 * wm_nq_start: [ifnet interface function]
5684 *
5685 * Start packet transmission on the interface for NEWQUEUE devices
5686 */
5687 static void
5688 wm_nq_start(struct ifnet *ifp)
5689 {
5690 struct wm_softc *sc = ifp->if_softc;
5691
5692 WM_TX_LOCK(sc);
5693 if (!sc->sc_stopping)
5694 wm_nq_start_locked(ifp);
5695 WM_TX_UNLOCK(sc);
5696 }
5697
5698 static void
5699 wm_nq_start_locked(struct ifnet *ifp)
5700 {
5701 struct wm_softc *sc = ifp->if_softc;
5702 struct mbuf *m0;
5703 struct m_tag *mtag;
5704 struct wm_txsoft *txs;
5705 bus_dmamap_t dmamap;
5706 int error, nexttx, lasttx = -1, seg, segs_needed;
5707 bool do_csum, sent;
5708
5709 KASSERT(WM_TX_LOCKED(sc));
5710
5711 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5712 return;
5713
5714 sent = false;
5715
5716 /*
5717 * Loop through the send queue, setting up transmit descriptors
5718 * until we drain the queue, or use up all available transmit
5719 * descriptors.
5720 */
5721 for (;;) {
5722 m0 = NULL;
5723
5724 /* Get a work queue entry. */
5725 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5726 wm_txeof(sc);
5727 if (sc->sc_txsfree == 0) {
5728 DPRINTF(WM_DEBUG_TX,
5729 ("%s: TX: no free job descriptors\n",
5730 device_xname(sc->sc_dev)));
5731 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5732 break;
5733 }
5734 }
5735
5736 /* Grab a packet off the queue. */
5737 IFQ_DEQUEUE(&ifp->if_snd, m0);
5738 if (m0 == NULL)
5739 break;
5740
5741 DPRINTF(WM_DEBUG_TX,
5742 ("%s: TX: have packet to transmit: %p\n",
5743 device_xname(sc->sc_dev), m0));
5744
5745 txs = &sc->sc_txsoft[sc->sc_txsnext];
5746 dmamap = txs->txs_dmamap;
5747
5748 /*
5749 * Load the DMA map. If this fails, the packet either
5750 * didn't fit in the allotted number of segments, or we
5751 * were short on resources. For the too-many-segments
5752 * case, we simply report an error and drop the packet,
5753 * since we can't sanely copy a jumbo packet to a single
5754 * buffer.
5755 */
5756 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5757 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5758 if (error) {
5759 if (error == EFBIG) {
5760 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5761 log(LOG_ERR, "%s: Tx packet consumes too many "
5762 "DMA segments, dropping...\n",
5763 device_xname(sc->sc_dev));
5764 wm_dump_mbuf_chain(sc, m0);
5765 m_freem(m0);
5766 continue;
5767 }
5768 /* Short on resources, just stop for now. */
5769 DPRINTF(WM_DEBUG_TX,
5770 ("%s: TX: dmamap load failed: %d\n",
5771 device_xname(sc->sc_dev), error));
5772 break;
5773 }
5774
5775 segs_needed = dmamap->dm_nsegs;
5776
5777 /*
5778 * Ensure we have enough descriptors free to describe
5779 * the packet. Note, we always reserve one descriptor
5780 * at the end of the ring due to the semantics of the
5781 * TDT register, plus one more in the event we need
5782 * to load offload context.
5783 */
5784 if (segs_needed > sc->sc_txfree - 2) {
5785 /*
5786 * Not enough free descriptors to transmit this
5787 * packet. We haven't committed anything yet,
5788 * so just unload the DMA map, put the packet
5789 * pack on the queue, and punt. Notify the upper
5790 * layer that there are no more slots left.
5791 */
5792 DPRINTF(WM_DEBUG_TX,
5793 ("%s: TX: need %d (%d) descriptors, have %d\n",
5794 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5795 segs_needed, sc->sc_txfree - 1));
5796 ifp->if_flags |= IFF_OACTIVE;
5797 bus_dmamap_unload(sc->sc_dmat, dmamap);
5798 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5799 break;
5800 }
5801
5802 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5803
5804 DPRINTF(WM_DEBUG_TX,
5805 ("%s: TX: packet has %d (%d) DMA segments\n",
5806 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5807
5808 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5809
5810 /*
5811 * Store a pointer to the packet so that we can free it
5812 * later.
5813 *
5814 * Initially, we consider the number of descriptors the
5815 * packet uses the number of DMA segments. This may be
5816 * incremented by 1 if we do checksum offload (a descriptor
5817 * is used to set the checksum context).
5818 */
5819 txs->txs_mbuf = m0;
5820 txs->txs_firstdesc = sc->sc_txnext;
5821 txs->txs_ndesc = segs_needed;
5822
5823 /* Set up offload parameters for this packet. */
5824 uint32_t cmdlen, fields, dcmdlen;
5825 if (m0->m_pkthdr.csum_flags &
5826 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5827 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5828 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5829 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5830 &do_csum) != 0) {
5831 /* Error message already displayed. */
5832 bus_dmamap_unload(sc->sc_dmat, dmamap);
5833 continue;
5834 }
5835 } else {
5836 do_csum = false;
5837 cmdlen = 0;
5838 fields = 0;
5839 }
5840
5841 /* Sync the DMA map. */
5842 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5843 BUS_DMASYNC_PREWRITE);
5844
5845 /* Initialize the first transmit descriptor. */
5846 nexttx = sc->sc_txnext;
5847 if (!do_csum) {
5848 /* setup a legacy descriptor */
5849 wm_set_dma_addr(
5850 &sc->sc_txdescs[nexttx].wtx_addr,
5851 dmamap->dm_segs[0].ds_addr);
5852 sc->sc_txdescs[nexttx].wtx_cmdlen =
5853 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5854 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5855 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5856 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5857 NULL) {
5858 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5859 htole32(WTX_CMD_VLE);
5860 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5861 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5862 } else {
5863 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =0;
5864 }
5865 dcmdlen = 0;
5866 } else {
5867 /* setup an advanced data descriptor */
5868 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5869 htole64(dmamap->dm_segs[0].ds_addr);
5870 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5871 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5872 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5873 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5874 htole32(fields);
5875 DPRINTF(WM_DEBUG_TX,
5876 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5877 device_xname(sc->sc_dev), nexttx,
5878 (uint64_t)dmamap->dm_segs[0].ds_addr));
5879 DPRINTF(WM_DEBUG_TX,
5880 ("\t 0x%08x%08x\n", fields,
5881 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5882 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5883 }
5884
5885 lasttx = nexttx;
5886 nexttx = WM_NEXTTX(sc, nexttx);
5887 /*
5888 * fill in the next descriptors. legacy or adcanced format
5889 * is the same here
5890 */
5891 for (seg = 1; seg < dmamap->dm_nsegs;
5892 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5893 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5894 htole64(dmamap->dm_segs[seg].ds_addr);
5895 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5896 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5897 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5898 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5899 lasttx = nexttx;
5900
5901 DPRINTF(WM_DEBUG_TX,
5902 ("%s: TX: desc %d: %#" PRIx64 ", "
5903 "len %#04zx\n",
5904 device_xname(sc->sc_dev), nexttx,
5905 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5906 dmamap->dm_segs[seg].ds_len));
5907 }
5908
5909 KASSERT(lasttx != -1);
5910
5911 /*
5912 * Set up the command byte on the last descriptor of
5913 * the packet. If we're in the interrupt delay window,
5914 * delay the interrupt.
5915 */
5916 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5917 (NQTX_CMD_EOP | NQTX_CMD_RS));
5918 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5919 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5920
5921 txs->txs_lastdesc = lasttx;
5922
5923 DPRINTF(WM_DEBUG_TX,
5924 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5925 device_xname(sc->sc_dev),
5926 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5927
5928 /* Sync the descriptors we're using. */
5929 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5930 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5931
5932 /* Give the packet to the chip. */
5933 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5934 sent = true;
5935
5936 DPRINTF(WM_DEBUG_TX,
5937 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5938
5939 DPRINTF(WM_DEBUG_TX,
5940 ("%s: TX: finished transmitting packet, job %d\n",
5941 device_xname(sc->sc_dev), sc->sc_txsnext));
5942
5943 /* Advance the tx pointer. */
5944 sc->sc_txfree -= txs->txs_ndesc;
5945 sc->sc_txnext = nexttx;
5946
5947 sc->sc_txsfree--;
5948 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5949
5950 /* Pass the packet to any BPF listeners. */
5951 bpf_mtap(ifp, m0);
5952 }
5953
5954 if (m0 != NULL) {
5955 ifp->if_flags |= IFF_OACTIVE;
5956 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5957 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5958 m_freem(m0);
5959 }
5960
5961 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5962 /* No more slots; notify upper layer. */
5963 ifp->if_flags |= IFF_OACTIVE;
5964 }
5965
5966 if (sent) {
5967 /* Set a watchdog timer in case the chip flakes out. */
5968 ifp->if_timer = 5;
5969 }
5970 }
5971
5972 /* Interrupt */
5973
5974 /*
5975 * wm_txeof:
5976 *
5977 * Helper; handle transmit interrupts.
5978 */
5979 static int
5980 wm_txeof(struct wm_softc *sc)
5981 {
5982 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5983 struct wm_txsoft *txs;
5984 bool processed = false;
5985 int count = 0;
5986 int i;
5987 uint8_t status;
5988
5989 if (sc->sc_stopping)
5990 return 0;
5991
5992 ifp->if_flags &= ~IFF_OACTIVE;
5993
5994 /*
5995 * Go through the Tx list and free mbufs for those
5996 * frames which have been transmitted.
5997 */
5998 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5999 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
6000 txs = &sc->sc_txsoft[i];
6001
6002 DPRINTF(WM_DEBUG_TX,
6003 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6004
6005 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
6006 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6007
6008 status =
6009 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6010 if ((status & WTX_ST_DD) == 0) {
6011 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
6012 BUS_DMASYNC_PREREAD);
6013 break;
6014 }
6015
6016 processed = true;
6017 count++;
6018 DPRINTF(WM_DEBUG_TX,
6019 ("%s: TX: job %d done: descs %d..%d\n",
6020 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6021 txs->txs_lastdesc));
6022
6023 /*
6024 * XXX We should probably be using the statistics
6025 * XXX registers, but I don't know if they exist
6026 * XXX on chips before the i82544.
6027 */
6028
6029 #ifdef WM_EVENT_COUNTERS
6030 if (status & WTX_ST_TU)
6031 WM_EVCNT_INCR(&sc->sc_ev_tu);
6032 #endif /* WM_EVENT_COUNTERS */
6033
6034 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6035 ifp->if_oerrors++;
6036 if (status & WTX_ST_LC)
6037 log(LOG_WARNING, "%s: late collision\n",
6038 device_xname(sc->sc_dev));
6039 else if (status & WTX_ST_EC) {
6040 ifp->if_collisions += 16;
6041 log(LOG_WARNING, "%s: excessive collisions\n",
6042 device_xname(sc->sc_dev));
6043 }
6044 } else
6045 ifp->if_opackets++;
6046
6047 sc->sc_txfree += txs->txs_ndesc;
6048 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6049 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6050 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6051 m_freem(txs->txs_mbuf);
6052 txs->txs_mbuf = NULL;
6053 }
6054
6055 /* Update the dirty transmit buffer pointer. */
6056 sc->sc_txsdirty = i;
6057 DPRINTF(WM_DEBUG_TX,
6058 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6059
6060 if (count != 0)
6061 rnd_add_uint32(&sc->rnd_source, count);
6062
6063 /*
6064 * If there are no more pending transmissions, cancel the watchdog
6065 * timer.
6066 */
6067 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
6068 ifp->if_timer = 0;
6069
6070 return processed;
6071 }
6072
6073 /*
6074 * wm_rxeof:
6075 *
6076 * Helper; handle receive interrupts.
6077 */
6078 static void
6079 wm_rxeof(struct wm_softc *sc)
6080 {
6081 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6082 struct wm_rxsoft *rxs;
6083 struct mbuf *m;
6084 int i, len;
6085 int count = 0;
6086 uint8_t status, errors;
6087 uint16_t vlantag;
6088
6089 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
6090 rxs = &sc->sc_rxsoft[i];
6091
6092 DPRINTF(WM_DEBUG_RX,
6093 ("%s: RX: checking descriptor %d\n",
6094 device_xname(sc->sc_dev), i));
6095
6096 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6097
6098 status = sc->sc_rxdescs[i].wrx_status;
6099 errors = sc->sc_rxdescs[i].wrx_errors;
6100 len = le16toh(sc->sc_rxdescs[i].wrx_len);
6101 vlantag = sc->sc_rxdescs[i].wrx_special;
6102
6103 if ((status & WRX_ST_DD) == 0) {
6104 /* We have processed all of the receive descriptors. */
6105 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
6106 break;
6107 }
6108
6109 count++;
6110 if (__predict_false(sc->sc_rxdiscard)) {
6111 DPRINTF(WM_DEBUG_RX,
6112 ("%s: RX: discarding contents of descriptor %d\n",
6113 device_xname(sc->sc_dev), i));
6114 WM_INIT_RXDESC(sc, i);
6115 if (status & WRX_ST_EOP) {
6116 /* Reset our state. */
6117 DPRINTF(WM_DEBUG_RX,
6118 ("%s: RX: resetting rxdiscard -> 0\n",
6119 device_xname(sc->sc_dev)));
6120 sc->sc_rxdiscard = 0;
6121 }
6122 continue;
6123 }
6124
6125 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6126 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6127
6128 m = rxs->rxs_mbuf;
6129
6130 /*
6131 * Add a new receive buffer to the ring, unless of
6132 * course the length is zero. Treat the latter as a
6133 * failed mapping.
6134 */
6135 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
6136 /*
6137 * Failed, throw away what we've done so
6138 * far, and discard the rest of the packet.
6139 */
6140 ifp->if_ierrors++;
6141 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6142 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6143 WM_INIT_RXDESC(sc, i);
6144 if ((status & WRX_ST_EOP) == 0)
6145 sc->sc_rxdiscard = 1;
6146 if (sc->sc_rxhead != NULL)
6147 m_freem(sc->sc_rxhead);
6148 WM_RXCHAIN_RESET(sc);
6149 DPRINTF(WM_DEBUG_RX,
6150 ("%s: RX: Rx buffer allocation failed, "
6151 "dropping packet%s\n", device_xname(sc->sc_dev),
6152 sc->sc_rxdiscard ? " (discard)" : ""));
6153 continue;
6154 }
6155
6156 m->m_len = len;
6157 sc->sc_rxlen += len;
6158 DPRINTF(WM_DEBUG_RX,
6159 ("%s: RX: buffer at %p len %d\n",
6160 device_xname(sc->sc_dev), m->m_data, len));
6161
6162 /* If this is not the end of the packet, keep looking. */
6163 if ((status & WRX_ST_EOP) == 0) {
6164 WM_RXCHAIN_LINK(sc, m);
6165 DPRINTF(WM_DEBUG_RX,
6166 ("%s: RX: not yet EOP, rxlen -> %d\n",
6167 device_xname(sc->sc_dev), sc->sc_rxlen));
6168 continue;
6169 }
6170
6171 /*
6172 * Okay, we have the entire packet now. The chip is
6173 * configured to include the FCS except I350 and I21[01]
6174 * (not all chips can be configured to strip it),
6175 * so we need to trim it.
6176 * May need to adjust length of previous mbuf in the
6177 * chain if the current mbuf is too short.
6178 * For an eratta, the RCTL_SECRC bit in RCTL register
6179 * is always set in I350, so we don't trim it.
6180 */
6181 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6182 && (sc->sc_type != WM_T_I210)
6183 && (sc->sc_type != WM_T_I211)) {
6184 if (m->m_len < ETHER_CRC_LEN) {
6185 sc->sc_rxtail->m_len
6186 -= (ETHER_CRC_LEN - m->m_len);
6187 m->m_len = 0;
6188 } else
6189 m->m_len -= ETHER_CRC_LEN;
6190 len = sc->sc_rxlen - ETHER_CRC_LEN;
6191 } else
6192 len = sc->sc_rxlen;
6193
6194 WM_RXCHAIN_LINK(sc, m);
6195
6196 *sc->sc_rxtailp = NULL;
6197 m = sc->sc_rxhead;
6198
6199 WM_RXCHAIN_RESET(sc);
6200
6201 DPRINTF(WM_DEBUG_RX,
6202 ("%s: RX: have entire packet, len -> %d\n",
6203 device_xname(sc->sc_dev), len));
6204
6205 /* If an error occurred, update stats and drop the packet. */
6206 if (errors &
6207 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
6208 if (errors & WRX_ER_SE)
6209 log(LOG_WARNING, "%s: symbol error\n",
6210 device_xname(sc->sc_dev));
6211 else if (errors & WRX_ER_SEQ)
6212 log(LOG_WARNING, "%s: receive sequence error\n",
6213 device_xname(sc->sc_dev));
6214 else if (errors & WRX_ER_CE)
6215 log(LOG_WARNING, "%s: CRC error\n",
6216 device_xname(sc->sc_dev));
6217 m_freem(m);
6218 continue;
6219 }
6220
6221 /* No errors. Receive the packet. */
6222 m->m_pkthdr.rcvif = ifp;
6223 m->m_pkthdr.len = len;
6224
6225 /*
6226 * If VLANs are enabled, VLAN packets have been unwrapped
6227 * for us. Associate the tag with the packet.
6228 */
6229 /* XXXX should check for i350 and i354 */
6230 if ((status & WRX_ST_VP) != 0) {
6231 VLAN_INPUT_TAG(ifp, m,
6232 le16toh(vlantag),
6233 continue);
6234 }
6235
6236 /* Set up checksum info for this packet. */
6237 if ((status & WRX_ST_IXSM) == 0) {
6238 if (status & WRX_ST_IPCS) {
6239 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
6240 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6241 if (errors & WRX_ER_IPE)
6242 m->m_pkthdr.csum_flags |=
6243 M_CSUM_IPv4_BAD;
6244 }
6245 if (status & WRX_ST_TCPCS) {
6246 /*
6247 * Note: we don't know if this was TCP or UDP,
6248 * so we just set both bits, and expect the
6249 * upper layers to deal.
6250 */
6251 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
6252 m->m_pkthdr.csum_flags |=
6253 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6254 M_CSUM_TCPv6 | M_CSUM_UDPv6;
6255 if (errors & WRX_ER_TCPE)
6256 m->m_pkthdr.csum_flags |=
6257 M_CSUM_TCP_UDP_BAD;
6258 }
6259 }
6260
6261 ifp->if_ipackets++;
6262
6263 WM_RX_UNLOCK(sc);
6264
6265 /* Pass this up to any BPF listeners. */
6266 bpf_mtap(ifp, m);
6267
6268 /* Pass it on. */
6269 (*ifp->if_input)(ifp, m);
6270
6271 WM_RX_LOCK(sc);
6272
6273 if (sc->sc_stopping)
6274 break;
6275 }
6276
6277 /* Update the receive pointer. */
6278 sc->sc_rxptr = i;
6279 if (count != 0)
6280 rnd_add_uint32(&sc->rnd_source, count);
6281
6282 DPRINTF(WM_DEBUG_RX,
6283 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
6284 }
6285
6286 /*
6287 * wm_linkintr_gmii:
6288 *
6289 * Helper; handle link interrupts for GMII.
6290 */
6291 static void
6292 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
6293 {
6294
6295 KASSERT(WM_TX_LOCKED(sc));
6296
6297 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6298 __func__));
6299
6300 if (icr & ICR_LSC) {
6301 DPRINTF(WM_DEBUG_LINK,
6302 ("%s: LINK: LSC -> mii_pollstat\n",
6303 device_xname(sc->sc_dev)));
6304 mii_pollstat(&sc->sc_mii);
6305 if (sc->sc_type == WM_T_82543) {
6306 int miistatus, active;
6307
6308 /*
6309 * With 82543, we need to force speed and
6310 * duplex on the MAC equal to what the PHY
6311 * speed and duplex configuration is.
6312 */
6313 miistatus = sc->sc_mii.mii_media_status;
6314
6315 if (miistatus & IFM_ACTIVE) {
6316 active = sc->sc_mii.mii_media_active;
6317 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6318 switch (IFM_SUBTYPE(active)) {
6319 case IFM_10_T:
6320 sc->sc_ctrl |= CTRL_SPEED_10;
6321 break;
6322 case IFM_100_TX:
6323 sc->sc_ctrl |= CTRL_SPEED_100;
6324 break;
6325 case IFM_1000_T:
6326 sc->sc_ctrl |= CTRL_SPEED_1000;
6327 break;
6328 default:
6329 /*
6330 * fiber?
6331 * Shoud not enter here.
6332 */
6333 printf("unknown media (%x)\n",
6334 active);
6335 break;
6336 }
6337 if (active & IFM_FDX)
6338 sc->sc_ctrl |= CTRL_FD;
6339 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6340 }
6341 } else if ((sc->sc_type == WM_T_ICH8)
6342 && (sc->sc_phytype == WMPHY_IGP_3)) {
6343 wm_kmrn_lock_loss_workaround_ich8lan(sc);
6344 } else if (sc->sc_type == WM_T_PCH) {
6345 wm_k1_gig_workaround_hv(sc,
6346 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
6347 }
6348
6349 if ((sc->sc_phytype == WMPHY_82578)
6350 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
6351 == IFM_1000_T)) {
6352
6353 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
6354 delay(200*1000); /* XXX too big */
6355
6356 /* Link stall fix for link up */
6357 wm_gmii_hv_writereg(sc->sc_dev, 1,
6358 HV_MUX_DATA_CTRL,
6359 HV_MUX_DATA_CTRL_GEN_TO_MAC
6360 | HV_MUX_DATA_CTRL_FORCE_SPEED);
6361 wm_gmii_hv_writereg(sc->sc_dev, 1,
6362 HV_MUX_DATA_CTRL,
6363 HV_MUX_DATA_CTRL_GEN_TO_MAC);
6364 }
6365 }
6366 } else if (icr & ICR_RXSEQ) {
6367 DPRINTF(WM_DEBUG_LINK,
6368 ("%s: LINK Receive sequence error\n",
6369 device_xname(sc->sc_dev)));
6370 }
6371 }
6372
6373 /*
6374 * wm_linkintr_tbi:
6375 *
6376 * Helper; handle link interrupts for TBI mode.
6377 */
6378 static void
6379 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6380 {
6381 uint32_t status;
6382
6383 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6384 __func__));
6385
6386 status = CSR_READ(sc, WMREG_STATUS);
6387 if (icr & ICR_LSC) {
6388 if (status & STATUS_LU) {
6389 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6390 device_xname(sc->sc_dev),
6391 (status & STATUS_FD) ? "FDX" : "HDX"));
6392 /*
6393 * NOTE: CTRL will update TFCE and RFCE automatically,
6394 * so we should update sc->sc_ctrl
6395 */
6396
6397 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6398 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6399 sc->sc_fcrtl &= ~FCRTL_XONE;
6400 if (status & STATUS_FD)
6401 sc->sc_tctl |=
6402 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6403 else
6404 sc->sc_tctl |=
6405 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6406 if (sc->sc_ctrl & CTRL_TFCE)
6407 sc->sc_fcrtl |= FCRTL_XONE;
6408 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6409 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6410 WMREG_OLD_FCRTL : WMREG_FCRTL,
6411 sc->sc_fcrtl);
6412 sc->sc_tbi_linkup = 1;
6413 } else {
6414 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6415 device_xname(sc->sc_dev)));
6416 sc->sc_tbi_linkup = 0;
6417 }
6418 /* Update LED */
6419 wm_tbi_serdes_set_linkled(sc);
6420 } else if (icr & ICR_RXSEQ) {
6421 DPRINTF(WM_DEBUG_LINK,
6422 ("%s: LINK: Receive sequence error\n",
6423 device_xname(sc->sc_dev)));
6424 }
6425 }
6426
6427 /*
6428 * wm_linkintr_serdes:
6429 *
6430 * Helper; handle link interrupts for TBI mode.
6431 */
6432 static void
6433 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6434 {
6435 struct mii_data *mii = &sc->sc_mii;
6436 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6437 uint32_t pcs_adv, pcs_lpab, reg;
6438
6439 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6440 __func__));
6441
6442 if (icr & ICR_LSC) {
6443 /* Check PCS */
6444 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6445 if ((reg & PCS_LSTS_LINKOK) != 0) {
6446 mii->mii_media_status |= IFM_ACTIVE;
6447 sc->sc_tbi_linkup = 1;
6448 } else {
6449 mii->mii_media_status |= IFM_NONE;
6450 sc->sc_tbi_linkup = 0;
6451 wm_tbi_serdes_set_linkled(sc);
6452 return;
6453 }
6454 mii->mii_media_active |= IFM_1000_SX;
6455 if ((reg & PCS_LSTS_FDX) != 0)
6456 mii->mii_media_active |= IFM_FDX;
6457 else
6458 mii->mii_media_active |= IFM_HDX;
6459 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6460 /* Check flow */
6461 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6462 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6463 DPRINTF(WM_DEBUG_LINK,
6464 ("XXX LINKOK but not ACOMP\n"));
6465 return;
6466 }
6467 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6468 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6469 DPRINTF(WM_DEBUG_LINK,
6470 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6471 if ((pcs_adv & TXCW_SYM_PAUSE)
6472 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6473 mii->mii_media_active |= IFM_FLOW
6474 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6475 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6476 && (pcs_adv & TXCW_ASYM_PAUSE)
6477 && (pcs_lpab & TXCW_SYM_PAUSE)
6478 && (pcs_lpab & TXCW_ASYM_PAUSE))
6479 mii->mii_media_active |= IFM_FLOW
6480 | IFM_ETH_TXPAUSE;
6481 else if ((pcs_adv & TXCW_SYM_PAUSE)
6482 && (pcs_adv & TXCW_ASYM_PAUSE)
6483 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6484 && (pcs_lpab & TXCW_ASYM_PAUSE))
6485 mii->mii_media_active |= IFM_FLOW
6486 | IFM_ETH_RXPAUSE;
6487 }
6488 /* Update LED */
6489 wm_tbi_serdes_set_linkled(sc);
6490 } else {
6491 DPRINTF(WM_DEBUG_LINK,
6492 ("%s: LINK: Receive sequence error\n",
6493 device_xname(sc->sc_dev)));
6494 }
6495 }
6496
6497 /*
6498 * wm_linkintr:
6499 *
6500 * Helper; handle link interrupts.
6501 */
6502 static void
6503 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6504 {
6505
6506 if (sc->sc_flags & WM_F_HAS_MII)
6507 wm_linkintr_gmii(sc, icr);
6508 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6509 && (sc->sc_type >= WM_T_82575))
6510 wm_linkintr_serdes(sc, icr);
6511 else
6512 wm_linkintr_tbi(sc, icr);
6513 }
6514
6515 /*
6516 * wm_intr_legacy:
6517 *
6518 * Interrupt service routine for INTx and MSI.
6519 */
6520 static int
6521 wm_intr_legacy(void *arg)
6522 {
6523 struct wm_softc *sc = arg;
6524 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6525 uint32_t icr, rndval = 0;
6526 int handled = 0;
6527
6528 DPRINTF(WM_DEBUG_TX,
6529 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
6530 while (1 /* CONSTCOND */) {
6531 icr = CSR_READ(sc, WMREG_ICR);
6532 if ((icr & sc->sc_icr) == 0)
6533 break;
6534 if (rndval == 0)
6535 rndval = icr;
6536
6537 WM_RX_LOCK(sc);
6538
6539 if (sc->sc_stopping) {
6540 WM_RX_UNLOCK(sc);
6541 break;
6542 }
6543
6544 handled = 1;
6545
6546 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6547 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6548 DPRINTF(WM_DEBUG_RX,
6549 ("%s: RX: got Rx intr 0x%08x\n",
6550 device_xname(sc->sc_dev),
6551 icr & (ICR_RXDMT0|ICR_RXT0)));
6552 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6553 }
6554 #endif
6555 wm_rxeof(sc);
6556
6557 WM_RX_UNLOCK(sc);
6558 WM_TX_LOCK(sc);
6559
6560 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6561 if (icr & ICR_TXDW) {
6562 DPRINTF(WM_DEBUG_TX,
6563 ("%s: TX: got TXDW interrupt\n",
6564 device_xname(sc->sc_dev)));
6565 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6566 }
6567 #endif
6568 wm_txeof(sc);
6569
6570 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6571 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6572 wm_linkintr(sc, icr);
6573 }
6574
6575 WM_TX_UNLOCK(sc);
6576
6577 if (icr & ICR_RXO) {
6578 #if defined(WM_DEBUG)
6579 log(LOG_WARNING, "%s: Receive overrun\n",
6580 device_xname(sc->sc_dev));
6581 #endif /* defined(WM_DEBUG) */
6582 }
6583 }
6584
6585 rnd_add_uint32(&sc->rnd_source, rndval);
6586
6587 if (handled) {
6588 /* Try to get more packets going. */
6589 ifp->if_start(ifp);
6590 }
6591
6592 return handled;
6593 }
6594
6595 #ifdef WM_MSI_MSIX
6596 /*
6597 * wm_txintr_msix:
6598 *
6599 * Interrupt service routine for TX complete interrupt for MSI-X.
6600 */
6601 static int
6602 wm_txintr_msix(void *arg)
6603 {
6604 struct wm_softc *sc = arg;
6605 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6606 int handled = 0;
6607
6608 DPRINTF(WM_DEBUG_TX,
6609 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
6610
6611 if (sc->sc_type == WM_T_82574)
6612 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ0); /* 82574 only */
6613 else if (sc->sc_type == WM_T_82575)
6614 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE0);
6615 else
6616 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_TXINTR_IDX);
6617
6618 WM_TX_LOCK(sc);
6619
6620 if (sc->sc_stopping)
6621 goto out;
6622
6623 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6624 handled = wm_txeof(sc);
6625
6626 out:
6627 WM_TX_UNLOCK(sc);
6628
6629 if (sc->sc_type == WM_T_82574)
6630 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ0); /* 82574 only */
6631 else if (sc->sc_type == WM_T_82575)
6632 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE0);
6633 else
6634 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_TXINTR_IDX);
6635
6636 if (handled) {
6637 /* Try to get more packets going. */
6638 ifp->if_start(ifp);
6639 }
6640
6641 return handled;
6642 }
6643
6644 /*
6645 * wm_rxintr_msix:
6646 *
6647 * Interrupt service routine for RX interrupt for MSI-X.
6648 */
6649 static int
6650 wm_rxintr_msix(void *arg)
6651 {
6652 struct wm_softc *sc = arg;
6653
6654 DPRINTF(WM_DEBUG_TX,
6655 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
6656
6657 if (sc->sc_type == WM_T_82574)
6658 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ0); /* 82574 only */
6659 else if (sc->sc_type == WM_T_82575)
6660 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE0);
6661 else
6662 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
6663
6664 WM_RX_LOCK(sc);
6665
6666 if (sc->sc_stopping)
6667 goto out;
6668
6669 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6670 wm_rxeof(sc);
6671
6672 out:
6673 WM_RX_UNLOCK(sc);
6674
6675 if (sc->sc_type == WM_T_82574)
6676 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ0);
6677 else if (sc->sc_type == WM_T_82575)
6678 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE0);
6679 else
6680 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
6681
6682 return 1;
6683 }
6684
6685 /*
6686 * wm_linkintr_msix:
6687 *
6688 * Interrupt service routine for link status change for MSI-X.
6689 */
6690 static int
6691 wm_linkintr_msix(void *arg)
6692 {
6693 struct wm_softc *sc = arg;
6694
6695 DPRINTF(WM_DEBUG_TX,
6696 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
6697
6698 if (sc->sc_type == WM_T_82574)
6699 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); /* 82574 only */
6700 else if (sc->sc_type == WM_T_82575)
6701 CSR_WRITE(sc, WMREG_EIMC, EITR_OTHER);
6702 else
6703 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_LINKINTR_IDX);
6704 WM_TX_LOCK(sc);
6705 if (sc->sc_stopping)
6706 goto out;
6707
6708 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6709 wm_linkintr(sc, ICR_LSC);
6710
6711 out:
6712 WM_TX_UNLOCK(sc);
6713
6714 if (sc->sc_type == WM_T_82574)
6715 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
6716 else if (sc->sc_type == WM_T_82575)
6717 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
6718 else
6719 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_LINKINTR_IDX);
6720
6721 return 1;
6722 }
6723 #endif /* WM_MSI_MSIX */
6724
6725 /*
6726 * Media related.
6727 * GMII, SGMII, TBI (and SERDES)
6728 */
6729
6730 /* Common */
6731
6732 /*
6733 * wm_tbi_serdes_set_linkled:
6734 *
6735 * Update the link LED on TBI and SERDES devices.
6736 */
6737 static void
6738 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
6739 {
6740
6741 if (sc->sc_tbi_linkup)
6742 sc->sc_ctrl |= CTRL_SWDPIN(0);
6743 else
6744 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6745
6746 /* 82540 or newer devices are active low */
6747 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6748
6749 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6750 }
6751
6752 /* GMII related */
6753
6754 /*
6755 * wm_gmii_reset:
6756 *
6757 * Reset the PHY.
6758 */
6759 static void
6760 wm_gmii_reset(struct wm_softc *sc)
6761 {
6762 uint32_t reg;
6763 int rv;
6764
6765 /* get phy semaphore */
6766 switch (sc->sc_type) {
6767 case WM_T_82571:
6768 case WM_T_82572:
6769 case WM_T_82573:
6770 case WM_T_82574:
6771 case WM_T_82583:
6772 /* XXX should get sw semaphore, too */
6773 rv = wm_get_swsm_semaphore(sc);
6774 break;
6775 case WM_T_82575:
6776 case WM_T_82576:
6777 case WM_T_82580:
6778 case WM_T_I350:
6779 case WM_T_I354:
6780 case WM_T_I210:
6781 case WM_T_I211:
6782 case WM_T_80003:
6783 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6784 break;
6785 case WM_T_ICH8:
6786 case WM_T_ICH9:
6787 case WM_T_ICH10:
6788 case WM_T_PCH:
6789 case WM_T_PCH2:
6790 case WM_T_PCH_LPT:
6791 rv = wm_get_swfwhw_semaphore(sc);
6792 break;
6793 default:
6794 /* nothing to do*/
6795 rv = 0;
6796 break;
6797 }
6798 if (rv != 0) {
6799 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6800 __func__);
6801 return;
6802 }
6803
6804 switch (sc->sc_type) {
6805 case WM_T_82542_2_0:
6806 case WM_T_82542_2_1:
6807 /* null */
6808 break;
6809 case WM_T_82543:
6810 /*
6811 * With 82543, we need to force speed and duplex on the MAC
6812 * equal to what the PHY speed and duplex configuration is.
6813 * In addition, we need to perform a hardware reset on the PHY
6814 * to take it out of reset.
6815 */
6816 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6817 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6818
6819 /* The PHY reset pin is active-low. */
6820 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6821 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6822 CTRL_EXT_SWDPIN(4));
6823 reg |= CTRL_EXT_SWDPIO(4);
6824
6825 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6826 CSR_WRITE_FLUSH(sc);
6827 delay(10*1000);
6828
6829 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6830 CSR_WRITE_FLUSH(sc);
6831 delay(150);
6832 #if 0
6833 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6834 #endif
6835 delay(20*1000); /* XXX extra delay to get PHY ID? */
6836 break;
6837 case WM_T_82544: /* reset 10000us */
6838 case WM_T_82540:
6839 case WM_T_82545:
6840 case WM_T_82545_3:
6841 case WM_T_82546:
6842 case WM_T_82546_3:
6843 case WM_T_82541:
6844 case WM_T_82541_2:
6845 case WM_T_82547:
6846 case WM_T_82547_2:
6847 case WM_T_82571: /* reset 100us */
6848 case WM_T_82572:
6849 case WM_T_82573:
6850 case WM_T_82574:
6851 case WM_T_82575:
6852 case WM_T_82576:
6853 case WM_T_82580:
6854 case WM_T_I350:
6855 case WM_T_I354:
6856 case WM_T_I210:
6857 case WM_T_I211:
6858 case WM_T_82583:
6859 case WM_T_80003:
6860 /* generic reset */
6861 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6862 CSR_WRITE_FLUSH(sc);
6863 delay(20000);
6864 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6865 CSR_WRITE_FLUSH(sc);
6866 delay(20000);
6867
6868 if ((sc->sc_type == WM_T_82541)
6869 || (sc->sc_type == WM_T_82541_2)
6870 || (sc->sc_type == WM_T_82547)
6871 || (sc->sc_type == WM_T_82547_2)) {
6872 /* workaround for igp are done in igp_reset() */
6873 /* XXX add code to set LED after phy reset */
6874 }
6875 break;
6876 case WM_T_ICH8:
6877 case WM_T_ICH9:
6878 case WM_T_ICH10:
6879 case WM_T_PCH:
6880 case WM_T_PCH2:
6881 case WM_T_PCH_LPT:
6882 /* generic reset */
6883 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6884 CSR_WRITE_FLUSH(sc);
6885 delay(100);
6886 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6887 CSR_WRITE_FLUSH(sc);
6888 delay(150);
6889 break;
6890 default:
6891 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6892 __func__);
6893 break;
6894 }
6895
6896 /* release PHY semaphore */
6897 switch (sc->sc_type) {
6898 case WM_T_82571:
6899 case WM_T_82572:
6900 case WM_T_82573:
6901 case WM_T_82574:
6902 case WM_T_82583:
6903 /* XXX should put sw semaphore, too */
6904 wm_put_swsm_semaphore(sc);
6905 break;
6906 case WM_T_82575:
6907 case WM_T_82576:
6908 case WM_T_82580:
6909 case WM_T_I350:
6910 case WM_T_I354:
6911 case WM_T_I210:
6912 case WM_T_I211:
6913 case WM_T_80003:
6914 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6915 break;
6916 case WM_T_ICH8:
6917 case WM_T_ICH9:
6918 case WM_T_ICH10:
6919 case WM_T_PCH:
6920 case WM_T_PCH2:
6921 case WM_T_PCH_LPT:
6922 wm_put_swfwhw_semaphore(sc);
6923 break;
6924 default:
6925 /* nothing to do*/
6926 rv = 0;
6927 break;
6928 }
6929
6930 /* get_cfg_done */
6931 wm_get_cfg_done(sc);
6932
6933 /* extra setup */
6934 switch (sc->sc_type) {
6935 case WM_T_82542_2_0:
6936 case WM_T_82542_2_1:
6937 case WM_T_82543:
6938 case WM_T_82544:
6939 case WM_T_82540:
6940 case WM_T_82545:
6941 case WM_T_82545_3:
6942 case WM_T_82546:
6943 case WM_T_82546_3:
6944 case WM_T_82541_2:
6945 case WM_T_82547_2:
6946 case WM_T_82571:
6947 case WM_T_82572:
6948 case WM_T_82573:
6949 case WM_T_82574:
6950 case WM_T_82575:
6951 case WM_T_82576:
6952 case WM_T_82580:
6953 case WM_T_I350:
6954 case WM_T_I354:
6955 case WM_T_I210:
6956 case WM_T_I211:
6957 case WM_T_82583:
6958 case WM_T_80003:
6959 /* null */
6960 break;
6961 case WM_T_82541:
6962 case WM_T_82547:
6963 /* XXX Configure actively LED after PHY reset */
6964 break;
6965 case WM_T_ICH8:
6966 case WM_T_ICH9:
6967 case WM_T_ICH10:
6968 case WM_T_PCH:
6969 case WM_T_PCH2:
6970 case WM_T_PCH_LPT:
6971 /* Allow time for h/w to get to a quiescent state afer reset */
6972 delay(10*1000);
6973
6974 if (sc->sc_type == WM_T_PCH)
6975 wm_hv_phy_workaround_ich8lan(sc);
6976
6977 if (sc->sc_type == WM_T_PCH2)
6978 wm_lv_phy_workaround_ich8lan(sc);
6979
6980 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6981 /*
6982 * dummy read to clear the phy wakeup bit after lcd
6983 * reset
6984 */
6985 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6986 }
6987
6988 /*
6989 * XXX Configure the LCD with th extended configuration region
6990 * in NVM
6991 */
6992
6993 /* Configure the LCD with the OEM bits in NVM */
6994 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6995 || (sc->sc_type == WM_T_PCH_LPT)) {
6996 /*
6997 * Disable LPLU.
6998 * XXX It seems that 82567 has LPLU, too.
6999 */
7000 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
7001 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
7002 reg |= HV_OEM_BITS_ANEGNOW;
7003 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
7004 }
7005 break;
7006 default:
7007 panic("%s: unknown type\n", __func__);
7008 break;
7009 }
7010 }
7011
7012 /*
7013 * wm_get_phy_id_82575:
7014 *
7015 * Return PHY ID. Return -1 if it failed.
7016 */
7017 static int
7018 wm_get_phy_id_82575(struct wm_softc *sc)
7019 {
7020 uint32_t reg;
7021 int phyid = -1;
7022
7023 /* XXX */
7024 if ((sc->sc_flags & WM_F_SGMII) == 0)
7025 return -1;
7026
7027 if (wm_sgmii_uses_mdio(sc)) {
7028 switch (sc->sc_type) {
7029 case WM_T_82575:
7030 case WM_T_82576:
7031 reg = CSR_READ(sc, WMREG_MDIC);
7032 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7033 break;
7034 case WM_T_82580:
7035 case WM_T_I350:
7036 case WM_T_I354:
7037 case WM_T_I210:
7038 case WM_T_I211:
7039 reg = CSR_READ(sc, WMREG_MDICNFG);
7040 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7041 break;
7042 default:
7043 return -1;
7044 }
7045 }
7046
7047 return phyid;
7048 }
7049
7050
7051 /*
7052 * wm_gmii_mediainit:
7053 *
7054 * Initialize media for use on 1000BASE-T devices.
7055 */
7056 static void
7057 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7058 {
7059 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7060 struct mii_data *mii = &sc->sc_mii;
7061 uint32_t reg;
7062
7063 /* We have GMII. */
7064 sc->sc_flags |= WM_F_HAS_MII;
7065
7066 if (sc->sc_type == WM_T_80003)
7067 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7068 else
7069 sc->sc_tipg = TIPG_1000T_DFLT;
7070
7071 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7072 if ((sc->sc_type == WM_T_82580)
7073 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7074 || (sc->sc_type == WM_T_I211)) {
7075 reg = CSR_READ(sc, WMREG_PHPM);
7076 reg &= ~PHPM_GO_LINK_D;
7077 CSR_WRITE(sc, WMREG_PHPM, reg);
7078 }
7079
7080 /*
7081 * Let the chip set speed/duplex on its own based on
7082 * signals from the PHY.
7083 * XXXbouyer - I'm not sure this is right for the 80003,
7084 * the em driver only sets CTRL_SLU here - but it seems to work.
7085 */
7086 sc->sc_ctrl |= CTRL_SLU;
7087 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7088
7089 /* Initialize our media structures and probe the GMII. */
7090 mii->mii_ifp = ifp;
7091
7092 /*
7093 * Determine the PHY access method.
7094 *
7095 * For SGMII, use SGMII specific method.
7096 *
7097 * For some devices, we can determine the PHY access method
7098 * from sc_type.
7099 *
7100 * For ICH and PCH variants, it's difficult to determine the PHY
7101 * access method by sc_type, so use the PCI product ID for some
7102 * devices.
7103 * For other ICH8 variants, try to use igp's method. If the PHY
7104 * can't detect, then use bm's method.
7105 */
7106 switch (prodid) {
7107 case PCI_PRODUCT_INTEL_PCH_M_LM:
7108 case PCI_PRODUCT_INTEL_PCH_M_LC:
7109 /* 82577 */
7110 sc->sc_phytype = WMPHY_82577;
7111 break;
7112 case PCI_PRODUCT_INTEL_PCH_D_DM:
7113 case PCI_PRODUCT_INTEL_PCH_D_DC:
7114 /* 82578 */
7115 sc->sc_phytype = WMPHY_82578;
7116 break;
7117 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7118 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7119 /* 82579 */
7120 sc->sc_phytype = WMPHY_82579;
7121 break;
7122 case PCI_PRODUCT_INTEL_82801I_BM:
7123 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7124 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7125 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7126 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7127 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7128 /* 82567 */
7129 sc->sc_phytype = WMPHY_BM;
7130 mii->mii_readreg = wm_gmii_bm_readreg;
7131 mii->mii_writereg = wm_gmii_bm_writereg;
7132 break;
7133 default:
7134 if (((sc->sc_flags & WM_F_SGMII) != 0)
7135 && !wm_sgmii_uses_mdio(sc)){
7136 /* SGMII */
7137 mii->mii_readreg = wm_sgmii_readreg;
7138 mii->mii_writereg = wm_sgmii_writereg;
7139 } else if (sc->sc_type >= WM_T_80003) {
7140 /* 80003 */
7141 mii->mii_readreg = wm_gmii_i80003_readreg;
7142 mii->mii_writereg = wm_gmii_i80003_writereg;
7143 } else if (sc->sc_type >= WM_T_I210) {
7144 /* I210 and I211 */
7145 mii->mii_readreg = wm_gmii_gs40g_readreg;
7146 mii->mii_writereg = wm_gmii_gs40g_writereg;
7147 } else if (sc->sc_type >= WM_T_82580) {
7148 /* 82580, I350 and I354 */
7149 sc->sc_phytype = WMPHY_82580;
7150 mii->mii_readreg = wm_gmii_82580_readreg;
7151 mii->mii_writereg = wm_gmii_82580_writereg;
7152 } else if (sc->sc_type >= WM_T_82544) {
7153 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
7154 mii->mii_readreg = wm_gmii_i82544_readreg;
7155 mii->mii_writereg = wm_gmii_i82544_writereg;
7156 } else {
7157 mii->mii_readreg = wm_gmii_i82543_readreg;
7158 mii->mii_writereg = wm_gmii_i82543_writereg;
7159 }
7160 break;
7161 }
7162 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7163 /* All PCH* use _hv_ */
7164 mii->mii_readreg = wm_gmii_hv_readreg;
7165 mii->mii_writereg = wm_gmii_hv_writereg;
7166 }
7167 mii->mii_statchg = wm_gmii_statchg;
7168
7169 wm_gmii_reset(sc);
7170
7171 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7172 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7173 wm_gmii_mediastatus);
7174
7175 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7176 || (sc->sc_type == WM_T_82580)
7177 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7178 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7179 if ((sc->sc_flags & WM_F_SGMII) == 0) {
7180 /* Attach only one port */
7181 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7182 MII_OFFSET_ANY, MIIF_DOPAUSE);
7183 } else {
7184 int i, id;
7185 uint32_t ctrl_ext;
7186
7187 id = wm_get_phy_id_82575(sc);
7188 if (id != -1) {
7189 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
7190 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
7191 }
7192 if ((id == -1)
7193 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
7194 /* Power on sgmii phy if it is disabled */
7195 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7196 CSR_WRITE(sc, WMREG_CTRL_EXT,
7197 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
7198 CSR_WRITE_FLUSH(sc);
7199 delay(300*1000); /* XXX too long */
7200
7201 /* from 1 to 8 */
7202 for (i = 1; i < 8; i++)
7203 mii_attach(sc->sc_dev, &sc->sc_mii,
7204 0xffffffff, i, MII_OFFSET_ANY,
7205 MIIF_DOPAUSE);
7206
7207 /* restore previous sfp cage power state */
7208 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7209 }
7210 }
7211 } else {
7212 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7213 MII_OFFSET_ANY, MIIF_DOPAUSE);
7214 }
7215
7216 /*
7217 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
7218 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
7219 */
7220 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
7221 (LIST_FIRST(&mii->mii_phys) == NULL)) {
7222 wm_set_mdio_slow_mode_hv(sc);
7223 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7224 MII_OFFSET_ANY, MIIF_DOPAUSE);
7225 }
7226
7227 /*
7228 * (For ICH8 variants)
7229 * If PHY detection failed, use BM's r/w function and retry.
7230 */
7231 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7232 /* if failed, retry with *_bm_* */
7233 mii->mii_readreg = wm_gmii_bm_readreg;
7234 mii->mii_writereg = wm_gmii_bm_writereg;
7235
7236 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7237 MII_OFFSET_ANY, MIIF_DOPAUSE);
7238 }
7239
7240 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7241 /* Any PHY wasn't find */
7242 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
7243 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
7244 sc->sc_phytype = WMPHY_NONE;
7245 } else {
7246 /*
7247 * PHY Found!
7248 * Check PHY type.
7249 */
7250 uint32_t model;
7251 struct mii_softc *child;
7252
7253 child = LIST_FIRST(&mii->mii_phys);
7254 if (device_is_a(child->mii_dev, "igphy")) {
7255 struct igphy_softc *isc = (struct igphy_softc *)child;
7256
7257 model = isc->sc_mii.mii_mpd_model;
7258 if (model == MII_MODEL_yyINTEL_I82566)
7259 sc->sc_phytype = WMPHY_IGP_3;
7260 }
7261
7262 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
7263 }
7264 }
7265
7266 /*
7267 * wm_gmii_mediachange: [ifmedia interface function]
7268 *
7269 * Set hardware to newly-selected media on a 1000BASE-T device.
7270 */
7271 static int
7272 wm_gmii_mediachange(struct ifnet *ifp)
7273 {
7274 struct wm_softc *sc = ifp->if_softc;
7275 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7276 int rc;
7277
7278 if ((ifp->if_flags & IFF_UP) == 0)
7279 return 0;
7280
7281 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7282 sc->sc_ctrl |= CTRL_SLU;
7283 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7284 || (sc->sc_type > WM_T_82543)) {
7285 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
7286 } else {
7287 sc->sc_ctrl &= ~CTRL_ASDE;
7288 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7289 if (ife->ifm_media & IFM_FDX)
7290 sc->sc_ctrl |= CTRL_FD;
7291 switch (IFM_SUBTYPE(ife->ifm_media)) {
7292 case IFM_10_T:
7293 sc->sc_ctrl |= CTRL_SPEED_10;
7294 break;
7295 case IFM_100_TX:
7296 sc->sc_ctrl |= CTRL_SPEED_100;
7297 break;
7298 case IFM_1000_T:
7299 sc->sc_ctrl |= CTRL_SPEED_1000;
7300 break;
7301 default:
7302 panic("wm_gmii_mediachange: bad media 0x%x",
7303 ife->ifm_media);
7304 }
7305 }
7306 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7307 if (sc->sc_type <= WM_T_82543)
7308 wm_gmii_reset(sc);
7309
7310 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
7311 return 0;
7312 return rc;
7313 }
7314
7315 /*
7316 * wm_gmii_mediastatus: [ifmedia interface function]
7317 *
7318 * Get the current interface media status on a 1000BASE-T device.
7319 */
7320 static void
7321 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7322 {
7323 struct wm_softc *sc = ifp->if_softc;
7324
7325 ether_mediastatus(ifp, ifmr);
7326 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
7327 | sc->sc_flowflags;
7328 }
7329
7330 #define MDI_IO CTRL_SWDPIN(2)
7331 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
7332 #define MDI_CLK CTRL_SWDPIN(3)
7333
7334 static void
7335 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
7336 {
7337 uint32_t i, v;
7338
7339 v = CSR_READ(sc, WMREG_CTRL);
7340 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7341 v |= MDI_DIR | CTRL_SWDPIO(3);
7342
7343 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
7344 if (data & i)
7345 v |= MDI_IO;
7346 else
7347 v &= ~MDI_IO;
7348 CSR_WRITE(sc, WMREG_CTRL, v);
7349 CSR_WRITE_FLUSH(sc);
7350 delay(10);
7351 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7352 CSR_WRITE_FLUSH(sc);
7353 delay(10);
7354 CSR_WRITE(sc, WMREG_CTRL, v);
7355 CSR_WRITE_FLUSH(sc);
7356 delay(10);
7357 }
7358 }
7359
7360 static uint32_t
7361 wm_i82543_mii_recvbits(struct wm_softc *sc)
7362 {
7363 uint32_t v, i, data = 0;
7364
7365 v = CSR_READ(sc, WMREG_CTRL);
7366 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7367 v |= CTRL_SWDPIO(3);
7368
7369 CSR_WRITE(sc, WMREG_CTRL, v);
7370 CSR_WRITE_FLUSH(sc);
7371 delay(10);
7372 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7373 CSR_WRITE_FLUSH(sc);
7374 delay(10);
7375 CSR_WRITE(sc, WMREG_CTRL, v);
7376 CSR_WRITE_FLUSH(sc);
7377 delay(10);
7378
7379 for (i = 0; i < 16; i++) {
7380 data <<= 1;
7381 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7382 CSR_WRITE_FLUSH(sc);
7383 delay(10);
7384 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7385 data |= 1;
7386 CSR_WRITE(sc, WMREG_CTRL, v);
7387 CSR_WRITE_FLUSH(sc);
7388 delay(10);
7389 }
7390
7391 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7392 CSR_WRITE_FLUSH(sc);
7393 delay(10);
7394 CSR_WRITE(sc, WMREG_CTRL, v);
7395 CSR_WRITE_FLUSH(sc);
7396 delay(10);
7397
7398 return data;
7399 }
7400
7401 #undef MDI_IO
7402 #undef MDI_DIR
7403 #undef MDI_CLK
7404
7405 /*
7406 * wm_gmii_i82543_readreg: [mii interface function]
7407 *
7408 * Read a PHY register on the GMII (i82543 version).
7409 */
7410 static int
7411 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7412 {
7413 struct wm_softc *sc = device_private(self);
7414 int rv;
7415
7416 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7417 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
7418 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7419 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
7420
7421 DPRINTF(WM_DEBUG_GMII,
7422 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7423 device_xname(sc->sc_dev), phy, reg, rv));
7424
7425 return rv;
7426 }
7427
7428 /*
7429 * wm_gmii_i82543_writereg: [mii interface function]
7430 *
7431 * Write a PHY register on the GMII (i82543 version).
7432 */
7433 static void
7434 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7435 {
7436 struct wm_softc *sc = device_private(self);
7437
7438 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7439 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7440 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7441 (MII_COMMAND_START << 30), 32);
7442 }
7443
7444 /*
7445 * wm_gmii_i82544_readreg: [mii interface function]
7446 *
7447 * Read a PHY register on the GMII.
7448 */
7449 static int
7450 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7451 {
7452 struct wm_softc *sc = device_private(self);
7453 uint32_t mdic = 0;
7454 int i, rv;
7455
7456 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7457 MDIC_REGADD(reg));
7458
7459 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7460 mdic = CSR_READ(sc, WMREG_MDIC);
7461 if (mdic & MDIC_READY)
7462 break;
7463 delay(50);
7464 }
7465
7466 if ((mdic & MDIC_READY) == 0) {
7467 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7468 device_xname(sc->sc_dev), phy, reg);
7469 rv = 0;
7470 } else if (mdic & MDIC_E) {
7471 #if 0 /* This is normal if no PHY is present. */
7472 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7473 device_xname(sc->sc_dev), phy, reg);
7474 #endif
7475 rv = 0;
7476 } else {
7477 rv = MDIC_DATA(mdic);
7478 if (rv == 0xffff)
7479 rv = 0;
7480 }
7481
7482 return rv;
7483 }
7484
7485 /*
7486 * wm_gmii_i82544_writereg: [mii interface function]
7487 *
7488 * Write a PHY register on the GMII.
7489 */
7490 static void
7491 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7492 {
7493 struct wm_softc *sc = device_private(self);
7494 uint32_t mdic = 0;
7495 int i;
7496
7497 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7498 MDIC_REGADD(reg) | MDIC_DATA(val));
7499
7500 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7501 mdic = CSR_READ(sc, WMREG_MDIC);
7502 if (mdic & MDIC_READY)
7503 break;
7504 delay(50);
7505 }
7506
7507 if ((mdic & MDIC_READY) == 0)
7508 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7509 device_xname(sc->sc_dev), phy, reg);
7510 else if (mdic & MDIC_E)
7511 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7512 device_xname(sc->sc_dev), phy, reg);
7513 }
7514
7515 /*
7516 * wm_gmii_i80003_readreg: [mii interface function]
7517 *
7518 * Read a PHY register on the kumeran
7519 * This could be handled by the PHY layer if we didn't have to lock the
7520 * ressource ...
7521 */
7522 static int
7523 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7524 {
7525 struct wm_softc *sc = device_private(self);
7526 int sem;
7527 int rv;
7528
7529 if (phy != 1) /* only one PHY on kumeran bus */
7530 return 0;
7531
7532 sem = swfwphysem[sc->sc_funcid];
7533 if (wm_get_swfw_semaphore(sc, sem)) {
7534 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7535 __func__);
7536 return 0;
7537 }
7538
7539 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7540 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7541 reg >> GG82563_PAGE_SHIFT);
7542 } else {
7543 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7544 reg >> GG82563_PAGE_SHIFT);
7545 }
7546 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7547 delay(200);
7548 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7549 delay(200);
7550
7551 wm_put_swfw_semaphore(sc, sem);
7552 return rv;
7553 }
7554
7555 /*
7556 * wm_gmii_i80003_writereg: [mii interface function]
7557 *
7558 * Write a PHY register on the kumeran.
7559 * This could be handled by the PHY layer if we didn't have to lock the
7560 * ressource ...
7561 */
7562 static void
7563 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7564 {
7565 struct wm_softc *sc = device_private(self);
7566 int sem;
7567
7568 if (phy != 1) /* only one PHY on kumeran bus */
7569 return;
7570
7571 sem = swfwphysem[sc->sc_funcid];
7572 if (wm_get_swfw_semaphore(sc, sem)) {
7573 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7574 __func__);
7575 return;
7576 }
7577
7578 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7579 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7580 reg >> GG82563_PAGE_SHIFT);
7581 } else {
7582 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7583 reg >> GG82563_PAGE_SHIFT);
7584 }
7585 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7586 delay(200);
7587 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7588 delay(200);
7589
7590 wm_put_swfw_semaphore(sc, sem);
7591 }
7592
7593 /*
7594 * wm_gmii_bm_readreg: [mii interface function]
7595 *
7596 * Read a PHY register on the kumeran
7597 * This could be handled by the PHY layer if we didn't have to lock the
7598 * ressource ...
7599 */
7600 static int
7601 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7602 {
7603 struct wm_softc *sc = device_private(self);
7604 int sem;
7605 int rv;
7606
7607 sem = swfwphysem[sc->sc_funcid];
7608 if (wm_get_swfw_semaphore(sc, sem)) {
7609 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7610 __func__);
7611 return 0;
7612 }
7613
7614 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7615 if (phy == 1)
7616 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7617 reg);
7618 else
7619 wm_gmii_i82544_writereg(self, phy,
7620 GG82563_PHY_PAGE_SELECT,
7621 reg >> GG82563_PAGE_SHIFT);
7622 }
7623
7624 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7625 wm_put_swfw_semaphore(sc, sem);
7626 return rv;
7627 }
7628
7629 /*
7630 * wm_gmii_bm_writereg: [mii interface function]
7631 *
7632 * Write a PHY register on the kumeran.
7633 * This could be handled by the PHY layer if we didn't have to lock the
7634 * ressource ...
7635 */
7636 static void
7637 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7638 {
7639 struct wm_softc *sc = device_private(self);
7640 int sem;
7641
7642 sem = swfwphysem[sc->sc_funcid];
7643 if (wm_get_swfw_semaphore(sc, sem)) {
7644 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7645 __func__);
7646 return;
7647 }
7648
7649 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7650 if (phy == 1)
7651 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7652 reg);
7653 else
7654 wm_gmii_i82544_writereg(self, phy,
7655 GG82563_PHY_PAGE_SELECT,
7656 reg >> GG82563_PAGE_SHIFT);
7657 }
7658
7659 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7660 wm_put_swfw_semaphore(sc, sem);
7661 }
7662
7663 static void
7664 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7665 {
7666 struct wm_softc *sc = device_private(self);
7667 uint16_t regnum = BM_PHY_REG_NUM(offset);
7668 uint16_t wuce;
7669
7670 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7671 if (sc->sc_type == WM_T_PCH) {
7672 /* XXX e1000 driver do nothing... why? */
7673 }
7674
7675 /* Set page 769 */
7676 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7677 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7678
7679 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7680
7681 wuce &= ~BM_WUC_HOST_WU_BIT;
7682 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7683 wuce | BM_WUC_ENABLE_BIT);
7684
7685 /* Select page 800 */
7686 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7687 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7688
7689 /* Write page 800 */
7690 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7691
7692 if (rd)
7693 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7694 else
7695 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7696
7697 /* Set page 769 */
7698 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7699 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7700
7701 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7702 }
7703
7704 /*
7705 * wm_gmii_hv_readreg: [mii interface function]
7706 *
7707 * Read a PHY register on the kumeran
7708 * This could be handled by the PHY layer if we didn't have to lock the
7709 * ressource ...
7710 */
7711 static int
7712 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7713 {
7714 struct wm_softc *sc = device_private(self);
7715 uint16_t page = BM_PHY_REG_PAGE(reg);
7716 uint16_t regnum = BM_PHY_REG_NUM(reg);
7717 uint16_t val;
7718 int rv;
7719
7720 if (wm_get_swfwhw_semaphore(sc)) {
7721 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7722 __func__);
7723 return 0;
7724 }
7725
7726 /* XXX Workaround failure in MDIO access while cable is disconnected */
7727 if (sc->sc_phytype == WMPHY_82577) {
7728 /* XXX must write */
7729 }
7730
7731 /* Page 800 works differently than the rest so it has its own func */
7732 if (page == BM_WUC_PAGE) {
7733 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7734 return val;
7735 }
7736
7737 /*
7738 * Lower than page 768 works differently than the rest so it has its
7739 * own func
7740 */
7741 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7742 printf("gmii_hv_readreg!!!\n");
7743 return 0;
7744 }
7745
7746 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7747 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7748 page << BME1000_PAGE_SHIFT);
7749 }
7750
7751 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7752 wm_put_swfwhw_semaphore(sc);
7753 return rv;
7754 }
7755
7756 /*
7757 * wm_gmii_hv_writereg: [mii interface function]
7758 *
7759 * Write a PHY register on the kumeran.
7760 * This could be handled by the PHY layer if we didn't have to lock the
7761 * ressource ...
7762 */
7763 static void
7764 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7765 {
7766 struct wm_softc *sc = device_private(self);
7767 uint16_t page = BM_PHY_REG_PAGE(reg);
7768 uint16_t regnum = BM_PHY_REG_NUM(reg);
7769
7770 if (wm_get_swfwhw_semaphore(sc)) {
7771 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7772 __func__);
7773 return;
7774 }
7775
7776 /* XXX Workaround failure in MDIO access while cable is disconnected */
7777
7778 /* Page 800 works differently than the rest so it has its own func */
7779 if (page == BM_WUC_PAGE) {
7780 uint16_t tmp;
7781
7782 tmp = val;
7783 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7784 return;
7785 }
7786
7787 /*
7788 * Lower than page 768 works differently than the rest so it has its
7789 * own func
7790 */
7791 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7792 printf("gmii_hv_writereg!!!\n");
7793 return;
7794 }
7795
7796 /*
7797 * XXX Workaround MDIO accesses being disabled after entering IEEE
7798 * Power Down (whenever bit 11 of the PHY control register is set)
7799 */
7800
7801 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7802 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7803 page << BME1000_PAGE_SHIFT);
7804 }
7805
7806 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7807 wm_put_swfwhw_semaphore(sc);
7808 }
7809
7810 /*
7811 * wm_gmii_82580_readreg: [mii interface function]
7812 *
7813 * Read a PHY register on the 82580 and I350.
7814 * This could be handled by the PHY layer if we didn't have to lock the
7815 * ressource ...
7816 */
7817 static int
7818 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7819 {
7820 struct wm_softc *sc = device_private(self);
7821 int sem;
7822 int rv;
7823
7824 sem = swfwphysem[sc->sc_funcid];
7825 if (wm_get_swfw_semaphore(sc, sem)) {
7826 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7827 __func__);
7828 return 0;
7829 }
7830
7831 rv = wm_gmii_i82544_readreg(self, phy, reg);
7832
7833 wm_put_swfw_semaphore(sc, sem);
7834 return rv;
7835 }
7836
7837 /*
7838 * wm_gmii_82580_writereg: [mii interface function]
7839 *
7840 * Write a PHY register on the 82580 and I350.
7841 * This could be handled by the PHY layer if we didn't have to lock the
7842 * ressource ...
7843 */
7844 static void
7845 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7846 {
7847 struct wm_softc *sc = device_private(self);
7848 int sem;
7849
7850 sem = swfwphysem[sc->sc_funcid];
7851 if (wm_get_swfw_semaphore(sc, sem)) {
7852 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7853 __func__);
7854 return;
7855 }
7856
7857 wm_gmii_i82544_writereg(self, phy, reg, val);
7858
7859 wm_put_swfw_semaphore(sc, sem);
7860 }
7861
7862 /*
7863 * wm_gmii_gs40g_readreg: [mii interface function]
7864 *
7865 * Read a PHY register on the I2100 and I211.
7866 * This could be handled by the PHY layer if we didn't have to lock the
7867 * ressource ...
7868 */
7869 static int
7870 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
7871 {
7872 struct wm_softc *sc = device_private(self);
7873 int sem;
7874 int page, offset;
7875 int rv;
7876
7877 /* Acquire semaphore */
7878 sem = swfwphysem[sc->sc_funcid];
7879 if (wm_get_swfw_semaphore(sc, sem)) {
7880 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7881 __func__);
7882 return 0;
7883 }
7884
7885 /* Page select */
7886 page = reg >> GS40G_PAGE_SHIFT;
7887 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7888
7889 /* Read reg */
7890 offset = reg & GS40G_OFFSET_MASK;
7891 rv = wm_gmii_i82544_readreg(self, phy, offset);
7892
7893 wm_put_swfw_semaphore(sc, sem);
7894 return rv;
7895 }
7896
7897 /*
7898 * wm_gmii_gs40g_writereg: [mii interface function]
7899 *
7900 * Write a PHY register on the I210 and I211.
7901 * This could be handled by the PHY layer if we didn't have to lock the
7902 * ressource ...
7903 */
7904 static void
7905 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
7906 {
7907 struct wm_softc *sc = device_private(self);
7908 int sem;
7909 int page, offset;
7910
7911 /* Acquire semaphore */
7912 sem = swfwphysem[sc->sc_funcid];
7913 if (wm_get_swfw_semaphore(sc, sem)) {
7914 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7915 __func__);
7916 return;
7917 }
7918
7919 /* Page select */
7920 page = reg >> GS40G_PAGE_SHIFT;
7921 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7922
7923 /* Write reg */
7924 offset = reg & GS40G_OFFSET_MASK;
7925 wm_gmii_i82544_writereg(self, phy, offset, val);
7926
7927 /* Release semaphore */
7928 wm_put_swfw_semaphore(sc, sem);
7929 }
7930
7931 /*
7932 * wm_gmii_statchg: [mii interface function]
7933 *
7934 * Callback from MII layer when media changes.
7935 */
7936 static void
7937 wm_gmii_statchg(struct ifnet *ifp)
7938 {
7939 struct wm_softc *sc = ifp->if_softc;
7940 struct mii_data *mii = &sc->sc_mii;
7941
7942 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7943 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7944 sc->sc_fcrtl &= ~FCRTL_XONE;
7945
7946 /*
7947 * Get flow control negotiation result.
7948 */
7949 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7950 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7951 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7952 mii->mii_media_active &= ~IFM_ETH_FMASK;
7953 }
7954
7955 if (sc->sc_flowflags & IFM_FLOW) {
7956 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7957 sc->sc_ctrl |= CTRL_TFCE;
7958 sc->sc_fcrtl |= FCRTL_XONE;
7959 }
7960 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7961 sc->sc_ctrl |= CTRL_RFCE;
7962 }
7963
7964 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7965 DPRINTF(WM_DEBUG_LINK,
7966 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7967 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7968 } else {
7969 DPRINTF(WM_DEBUG_LINK,
7970 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7971 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7972 }
7973
7974 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7975 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7976 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7977 : WMREG_FCRTL, sc->sc_fcrtl);
7978 if (sc->sc_type == WM_T_80003) {
7979 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7980 case IFM_1000_T:
7981 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7982 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7983 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7984 break;
7985 default:
7986 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7987 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7988 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7989 break;
7990 }
7991 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7992 }
7993 }
7994
7995 /*
7996 * wm_kmrn_readreg:
7997 *
7998 * Read a kumeran register
7999 */
8000 static int
8001 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8002 {
8003 int rv;
8004
8005 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8006 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8007 aprint_error_dev(sc->sc_dev,
8008 "%s: failed to get semaphore\n", __func__);
8009 return 0;
8010 }
8011 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8012 if (wm_get_swfwhw_semaphore(sc)) {
8013 aprint_error_dev(sc->sc_dev,
8014 "%s: failed to get semaphore\n", __func__);
8015 return 0;
8016 }
8017 }
8018
8019 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8020 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8021 KUMCTRLSTA_REN);
8022 CSR_WRITE_FLUSH(sc);
8023 delay(2);
8024
8025 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8026
8027 if (sc->sc_flags & WM_F_LOCK_SWFW)
8028 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8029 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8030 wm_put_swfwhw_semaphore(sc);
8031
8032 return rv;
8033 }
8034
8035 /*
8036 * wm_kmrn_writereg:
8037 *
8038 * Write a kumeran register
8039 */
8040 static void
8041 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8042 {
8043
8044 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8045 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8046 aprint_error_dev(sc->sc_dev,
8047 "%s: failed to get semaphore\n", __func__);
8048 return;
8049 }
8050 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8051 if (wm_get_swfwhw_semaphore(sc)) {
8052 aprint_error_dev(sc->sc_dev,
8053 "%s: failed to get semaphore\n", __func__);
8054 return;
8055 }
8056 }
8057
8058 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8059 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8060 (val & KUMCTRLSTA_MASK));
8061
8062 if (sc->sc_flags & WM_F_LOCK_SWFW)
8063 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8064 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8065 wm_put_swfwhw_semaphore(sc);
8066 }
8067
8068 /* SGMII related */
8069
8070 /*
8071 * wm_sgmii_uses_mdio
8072 *
8073 * Check whether the transaction is to the internal PHY or the external
8074 * MDIO interface. Return true if it's MDIO.
8075 */
8076 static bool
8077 wm_sgmii_uses_mdio(struct wm_softc *sc)
8078 {
8079 uint32_t reg;
8080 bool ismdio = false;
8081
8082 switch (sc->sc_type) {
8083 case WM_T_82575:
8084 case WM_T_82576:
8085 reg = CSR_READ(sc, WMREG_MDIC);
8086 ismdio = ((reg & MDIC_DEST) != 0);
8087 break;
8088 case WM_T_82580:
8089 case WM_T_I350:
8090 case WM_T_I354:
8091 case WM_T_I210:
8092 case WM_T_I211:
8093 reg = CSR_READ(sc, WMREG_MDICNFG);
8094 ismdio = ((reg & MDICNFG_DEST) != 0);
8095 break;
8096 default:
8097 break;
8098 }
8099
8100 return ismdio;
8101 }
8102
8103 /*
8104 * wm_sgmii_readreg: [mii interface function]
8105 *
8106 * Read a PHY register on the SGMII
8107 * This could be handled by the PHY layer if we didn't have to lock the
8108 * ressource ...
8109 */
8110 static int
8111 wm_sgmii_readreg(device_t self, int phy, int reg)
8112 {
8113 struct wm_softc *sc = device_private(self);
8114 uint32_t i2ccmd;
8115 int i, rv;
8116
8117 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8118 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8119 __func__);
8120 return 0;
8121 }
8122
8123 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8124 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8125 | I2CCMD_OPCODE_READ;
8126 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8127
8128 /* Poll the ready bit */
8129 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8130 delay(50);
8131 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8132 if (i2ccmd & I2CCMD_READY)
8133 break;
8134 }
8135 if ((i2ccmd & I2CCMD_READY) == 0)
8136 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8137 if ((i2ccmd & I2CCMD_ERROR) != 0)
8138 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8139
8140 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8141
8142 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8143 return rv;
8144 }
8145
8146 /*
8147 * wm_sgmii_writereg: [mii interface function]
8148 *
8149 * Write a PHY register on the SGMII.
8150 * This could be handled by the PHY layer if we didn't have to lock the
8151 * ressource ...
8152 */
8153 static void
8154 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8155 {
8156 struct wm_softc *sc = device_private(self);
8157 uint32_t i2ccmd;
8158 int i;
8159 int val_swapped;
8160
8161 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8162 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8163 __func__);
8164 return;
8165 }
8166 /* Swap the data bytes for the I2C interface */
8167 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8168 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8169 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8170 | I2CCMD_OPCODE_WRITE | val_swapped;
8171 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8172
8173 /* Poll the ready bit */
8174 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8175 delay(50);
8176 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8177 if (i2ccmd & I2CCMD_READY)
8178 break;
8179 }
8180 if ((i2ccmd & I2CCMD_READY) == 0)
8181 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8182 if ((i2ccmd & I2CCMD_ERROR) != 0)
8183 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8184
8185 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8186 }
8187
8188 /* TBI related */
8189
8190 /*
8191 * wm_tbi_mediainit:
8192 *
8193 * Initialize media for use on 1000BASE-X devices.
8194 */
8195 static void
8196 wm_tbi_mediainit(struct wm_softc *sc)
8197 {
8198 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8199 const char *sep = "";
8200
8201 if (sc->sc_type < WM_T_82543)
8202 sc->sc_tipg = TIPG_WM_DFLT;
8203 else
8204 sc->sc_tipg = TIPG_LG_DFLT;
8205
8206 sc->sc_tbi_serdes_anegticks = 5;
8207
8208 /* Initialize our media structures */
8209 sc->sc_mii.mii_ifp = ifp;
8210 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8211
8212 if ((sc->sc_type >= WM_T_82575)
8213 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
8214 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8215 wm_serdes_mediachange, wm_serdes_mediastatus);
8216 else
8217 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8218 wm_tbi_mediachange, wm_tbi_mediastatus);
8219
8220 /*
8221 * SWD Pins:
8222 *
8223 * 0 = Link LED (output)
8224 * 1 = Loss Of Signal (input)
8225 */
8226 sc->sc_ctrl |= CTRL_SWDPIO(0);
8227
8228 /* XXX Perhaps this is only for TBI */
8229 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8230 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
8231
8232 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8233 sc->sc_ctrl &= ~CTRL_LRST;
8234
8235 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8236
8237 #define ADD(ss, mm, dd) \
8238 do { \
8239 aprint_normal("%s%s", sep, ss); \
8240 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
8241 sep = ", "; \
8242 } while (/*CONSTCOND*/0)
8243
8244 aprint_normal_dev(sc->sc_dev, "");
8245
8246 /* Only 82545 is LX */
8247 if (sc->sc_type == WM_T_82545) {
8248 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
8249 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
8250 } else {
8251 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
8252 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
8253 }
8254 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
8255 aprint_normal("\n");
8256
8257 #undef ADD
8258
8259 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
8260 }
8261
8262 /*
8263 * wm_tbi_mediachange: [ifmedia interface function]
8264 *
8265 * Set hardware to newly-selected media on a 1000BASE-X device.
8266 */
8267 static int
8268 wm_tbi_mediachange(struct ifnet *ifp)
8269 {
8270 struct wm_softc *sc = ifp->if_softc;
8271 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8272 uint32_t status;
8273 int i;
8274
8275 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8276 /* XXX need some work for >= 82571 and < 82575 */
8277 if (sc->sc_type < WM_T_82575)
8278 return 0;
8279 }
8280
8281 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8282 || (sc->sc_type >= WM_T_82575))
8283 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8284
8285 sc->sc_ctrl &= ~CTRL_LRST;
8286 sc->sc_txcw = TXCW_ANE;
8287 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8288 sc->sc_txcw |= TXCW_FD | TXCW_HD;
8289 else if (ife->ifm_media & IFM_FDX)
8290 sc->sc_txcw |= TXCW_FD;
8291 else
8292 sc->sc_txcw |= TXCW_HD;
8293
8294 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
8295 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
8296
8297 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
8298 device_xname(sc->sc_dev), sc->sc_txcw));
8299 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8300 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8301 CSR_WRITE_FLUSH(sc);
8302 delay(1000);
8303
8304 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
8305 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
8306
8307 /*
8308 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
8309 * optics detect a signal, 0 if they don't.
8310 */
8311 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
8312 /* Have signal; wait for the link to come up. */
8313 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
8314 delay(10000);
8315 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
8316 break;
8317 }
8318
8319 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
8320 device_xname(sc->sc_dev),i));
8321
8322 status = CSR_READ(sc, WMREG_STATUS);
8323 DPRINTF(WM_DEBUG_LINK,
8324 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
8325 device_xname(sc->sc_dev),status, STATUS_LU));
8326 if (status & STATUS_LU) {
8327 /* Link is up. */
8328 DPRINTF(WM_DEBUG_LINK,
8329 ("%s: LINK: set media -> link up %s\n",
8330 device_xname(sc->sc_dev),
8331 (status & STATUS_FD) ? "FDX" : "HDX"));
8332
8333 /*
8334 * NOTE: CTRL will update TFCE and RFCE automatically,
8335 * so we should update sc->sc_ctrl
8336 */
8337 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8338 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8339 sc->sc_fcrtl &= ~FCRTL_XONE;
8340 if (status & STATUS_FD)
8341 sc->sc_tctl |=
8342 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8343 else
8344 sc->sc_tctl |=
8345 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8346 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
8347 sc->sc_fcrtl |= FCRTL_XONE;
8348 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8349 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8350 WMREG_OLD_FCRTL : WMREG_FCRTL,
8351 sc->sc_fcrtl);
8352 sc->sc_tbi_linkup = 1;
8353 } else {
8354 if (i == WM_LINKUP_TIMEOUT)
8355 wm_check_for_link(sc);
8356 /* Link is down. */
8357 DPRINTF(WM_DEBUG_LINK,
8358 ("%s: LINK: set media -> link down\n",
8359 device_xname(sc->sc_dev)));
8360 sc->sc_tbi_linkup = 0;
8361 }
8362 } else {
8363 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
8364 device_xname(sc->sc_dev)));
8365 sc->sc_tbi_linkup = 0;
8366 }
8367
8368 wm_tbi_serdes_set_linkled(sc);
8369
8370 return 0;
8371 }
8372
8373 /*
8374 * wm_tbi_mediastatus: [ifmedia interface function]
8375 *
8376 * Get the current interface media status on a 1000BASE-X device.
8377 */
8378 static void
8379 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8380 {
8381 struct wm_softc *sc = ifp->if_softc;
8382 uint32_t ctrl, status;
8383
8384 ifmr->ifm_status = IFM_AVALID;
8385 ifmr->ifm_active = IFM_ETHER;
8386
8387 status = CSR_READ(sc, WMREG_STATUS);
8388 if ((status & STATUS_LU) == 0) {
8389 ifmr->ifm_active |= IFM_NONE;
8390 return;
8391 }
8392
8393 ifmr->ifm_status |= IFM_ACTIVE;
8394 /* Only 82545 is LX */
8395 if (sc->sc_type == WM_T_82545)
8396 ifmr->ifm_active |= IFM_1000_LX;
8397 else
8398 ifmr->ifm_active |= IFM_1000_SX;
8399 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
8400 ifmr->ifm_active |= IFM_FDX;
8401 else
8402 ifmr->ifm_active |= IFM_HDX;
8403 ctrl = CSR_READ(sc, WMREG_CTRL);
8404 if (ctrl & CTRL_RFCE)
8405 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
8406 if (ctrl & CTRL_TFCE)
8407 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
8408 }
8409
8410 /* XXX TBI only */
8411 static int
8412 wm_check_for_link(struct wm_softc *sc)
8413 {
8414 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8415 uint32_t rxcw;
8416 uint32_t ctrl;
8417 uint32_t status;
8418 uint32_t sig;
8419
8420 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8421 /* XXX need some work for >= 82571 */
8422 if (sc->sc_type >= WM_T_82571) {
8423 sc->sc_tbi_linkup = 1;
8424 return 0;
8425 }
8426 }
8427
8428 rxcw = CSR_READ(sc, WMREG_RXCW);
8429 ctrl = CSR_READ(sc, WMREG_CTRL);
8430 status = CSR_READ(sc, WMREG_STATUS);
8431
8432 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8433
8434 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8435 device_xname(sc->sc_dev), __func__,
8436 ((ctrl & CTRL_SWDPIN(1)) == sig),
8437 ((status & STATUS_LU) != 0),
8438 ((rxcw & RXCW_C) != 0)
8439 ));
8440
8441 /*
8442 * SWDPIN LU RXCW
8443 * 0 0 0
8444 * 0 0 1 (should not happen)
8445 * 0 1 0 (should not happen)
8446 * 0 1 1 (should not happen)
8447 * 1 0 0 Disable autonego and force linkup
8448 * 1 0 1 got /C/ but not linkup yet
8449 * 1 1 0 (linkup)
8450 * 1 1 1 If IFM_AUTO, back to autonego
8451 *
8452 */
8453 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8454 && ((status & STATUS_LU) == 0)
8455 && ((rxcw & RXCW_C) == 0)) {
8456 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8457 __func__));
8458 sc->sc_tbi_linkup = 0;
8459 /* Disable auto-negotiation in the TXCW register */
8460 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8461
8462 /*
8463 * Force link-up and also force full-duplex.
8464 *
8465 * NOTE: CTRL was updated TFCE and RFCE automatically,
8466 * so we should update sc->sc_ctrl
8467 */
8468 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8469 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8470 } else if (((status & STATUS_LU) != 0)
8471 && ((rxcw & RXCW_C) != 0)
8472 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8473 sc->sc_tbi_linkup = 1;
8474 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8475 __func__));
8476 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8477 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8478 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8479 && ((rxcw & RXCW_C) != 0)) {
8480 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8481 } else {
8482 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8483 status));
8484 }
8485
8486 return 0;
8487 }
8488
8489 /*
8490 * wm_tbi_tick:
8491 *
8492 * Check the link on TBI devices.
8493 * This function acts as mii_tick().
8494 */
8495 static void
8496 wm_tbi_tick(struct wm_softc *sc)
8497 {
8498 struct mii_data *mii = &sc->sc_mii;
8499 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8500 uint32_t status;
8501
8502 KASSERT(WM_TX_LOCKED(sc));
8503
8504 status = CSR_READ(sc, WMREG_STATUS);
8505
8506 /* XXX is this needed? */
8507 (void)CSR_READ(sc, WMREG_RXCW);
8508 (void)CSR_READ(sc, WMREG_CTRL);
8509
8510 /* set link status */
8511 if ((status & STATUS_LU) == 0) {
8512 DPRINTF(WM_DEBUG_LINK,
8513 ("%s: LINK: checklink -> down\n",
8514 device_xname(sc->sc_dev)));
8515 sc->sc_tbi_linkup = 0;
8516 } else if (sc->sc_tbi_linkup == 0) {
8517 DPRINTF(WM_DEBUG_LINK,
8518 ("%s: LINK: checklink -> up %s\n",
8519 device_xname(sc->sc_dev),
8520 (status & STATUS_FD) ? "FDX" : "HDX"));
8521 sc->sc_tbi_linkup = 1;
8522 sc->sc_tbi_serdes_ticks = 0;
8523 }
8524
8525 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
8526 goto setled;
8527
8528 if ((status & STATUS_LU) == 0) {
8529 sc->sc_tbi_linkup = 0;
8530 /* If the timer expired, retry autonegotiation */
8531 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8532 && (++sc->sc_tbi_serdes_ticks
8533 >= sc->sc_tbi_serdes_anegticks)) {
8534 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8535 sc->sc_tbi_serdes_ticks = 0;
8536 /*
8537 * Reset the link, and let autonegotiation do
8538 * its thing
8539 */
8540 sc->sc_ctrl |= CTRL_LRST;
8541 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8542 CSR_WRITE_FLUSH(sc);
8543 delay(1000);
8544 sc->sc_ctrl &= ~CTRL_LRST;
8545 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8546 CSR_WRITE_FLUSH(sc);
8547 delay(1000);
8548 CSR_WRITE(sc, WMREG_TXCW,
8549 sc->sc_txcw & ~TXCW_ANE);
8550 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8551 }
8552 }
8553
8554 setled:
8555 wm_tbi_serdes_set_linkled(sc);
8556 }
8557
8558 /* SERDES related */
8559 static void
8560 wm_serdes_power_up_link_82575(struct wm_softc *sc)
8561 {
8562 uint32_t reg;
8563
8564 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8565 && ((sc->sc_flags & WM_F_SGMII) == 0))
8566 return;
8567
8568 reg = CSR_READ(sc, WMREG_PCS_CFG);
8569 reg |= PCS_CFG_PCS_EN;
8570 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
8571
8572 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8573 reg &= ~CTRL_EXT_SWDPIN(3);
8574 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8575 CSR_WRITE_FLUSH(sc);
8576 }
8577
8578 static int
8579 wm_serdes_mediachange(struct ifnet *ifp)
8580 {
8581 struct wm_softc *sc = ifp->if_softc;
8582 bool pcs_autoneg = true; /* XXX */
8583 uint32_t ctrl_ext, pcs_lctl, reg;
8584
8585 /* XXX Currently, this function is not called on 8257[12] */
8586 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8587 || (sc->sc_type >= WM_T_82575))
8588 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8589
8590 wm_serdes_power_up_link_82575(sc);
8591
8592 sc->sc_ctrl |= CTRL_SLU;
8593
8594 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8595 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8596
8597 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8598 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8599 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8600 case CTRL_EXT_LINK_MODE_SGMII:
8601 pcs_autoneg = true;
8602 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8603 break;
8604 case CTRL_EXT_LINK_MODE_1000KX:
8605 pcs_autoneg = false;
8606 /* FALLTHROUGH */
8607 default:
8608 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8609 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8610 pcs_autoneg = false;
8611 }
8612 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8613 | CTRL_FRCFDX;
8614 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8615 }
8616 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8617
8618 if (pcs_autoneg) {
8619 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8620 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8621
8622 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8623 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8624 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8625 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8626 } else
8627 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8628
8629 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8630
8631
8632 return 0;
8633 }
8634
8635 static void
8636 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8637 {
8638 struct wm_softc *sc = ifp->if_softc;
8639 struct mii_data *mii = &sc->sc_mii;
8640 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8641 uint32_t pcs_adv, pcs_lpab, reg;
8642
8643 ifmr->ifm_status = IFM_AVALID;
8644 ifmr->ifm_active = IFM_ETHER;
8645
8646 /* Check PCS */
8647 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8648 if ((reg & PCS_LSTS_LINKOK) == 0) {
8649 ifmr->ifm_active |= IFM_NONE;
8650 sc->sc_tbi_linkup = 0;
8651 goto setled;
8652 }
8653
8654 sc->sc_tbi_linkup = 1;
8655 ifmr->ifm_status |= IFM_ACTIVE;
8656 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8657 if ((reg & PCS_LSTS_FDX) != 0)
8658 ifmr->ifm_active |= IFM_FDX;
8659 else
8660 ifmr->ifm_active |= IFM_HDX;
8661 mii->mii_media_active &= ~IFM_ETH_FMASK;
8662 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8663 /* Check flow */
8664 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8665 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8666 printf("XXX LINKOK but not ACOMP\n");
8667 goto setled;
8668 }
8669 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8670 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8671 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8672 if ((pcs_adv & TXCW_SYM_PAUSE)
8673 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8674 mii->mii_media_active |= IFM_FLOW
8675 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8676 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8677 && (pcs_adv & TXCW_ASYM_PAUSE)
8678 && (pcs_lpab & TXCW_SYM_PAUSE)
8679 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8680 mii->mii_media_active |= IFM_FLOW
8681 | IFM_ETH_TXPAUSE;
8682 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8683 && (pcs_adv & TXCW_ASYM_PAUSE)
8684 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8685 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8686 mii->mii_media_active |= IFM_FLOW
8687 | IFM_ETH_RXPAUSE;
8688 } else {
8689 }
8690 }
8691 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8692 | (mii->mii_media_active & IFM_ETH_FMASK);
8693 setled:
8694 wm_tbi_serdes_set_linkled(sc);
8695 }
8696
8697 /*
8698 * wm_serdes_tick:
8699 *
8700 * Check the link on serdes devices.
8701 */
8702 static void
8703 wm_serdes_tick(struct wm_softc *sc)
8704 {
8705 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8706 struct mii_data *mii = &sc->sc_mii;
8707 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8708 uint32_t reg;
8709
8710 KASSERT(WM_TX_LOCKED(sc));
8711
8712 mii->mii_media_status = IFM_AVALID;
8713 mii->mii_media_active = IFM_ETHER;
8714
8715 /* Check PCS */
8716 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8717 if ((reg & PCS_LSTS_LINKOK) != 0) {
8718 mii->mii_media_status |= IFM_ACTIVE;
8719 sc->sc_tbi_linkup = 1;
8720 sc->sc_tbi_serdes_ticks = 0;
8721 mii->mii_media_active |= IFM_1000_SX; /* XXX */
8722 if ((reg & PCS_LSTS_FDX) != 0)
8723 mii->mii_media_active |= IFM_FDX;
8724 else
8725 mii->mii_media_active |= IFM_HDX;
8726 } else {
8727 mii->mii_media_status |= IFM_NONE;
8728 sc->sc_tbi_linkup = 0;
8729 /* If the timer expired, retry autonegotiation */
8730 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8731 && (++sc->sc_tbi_serdes_ticks
8732 >= sc->sc_tbi_serdes_anegticks)) {
8733 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8734 sc->sc_tbi_serdes_ticks = 0;
8735 /* XXX */
8736 wm_serdes_mediachange(ifp);
8737 }
8738 }
8739
8740 wm_tbi_serdes_set_linkled(sc);
8741 }
8742
8743 /* SFP related */
8744
8745 static int
8746 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
8747 {
8748 uint32_t i2ccmd;
8749 int i;
8750
8751 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
8752 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8753
8754 /* Poll the ready bit */
8755 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8756 delay(50);
8757 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8758 if (i2ccmd & I2CCMD_READY)
8759 break;
8760 }
8761 if ((i2ccmd & I2CCMD_READY) == 0)
8762 return -1;
8763 if ((i2ccmd & I2CCMD_ERROR) != 0)
8764 return -1;
8765
8766 *data = i2ccmd & 0x00ff;
8767
8768 return 0;
8769 }
8770
8771 static uint32_t
8772 wm_sfp_get_media_type(struct wm_softc *sc)
8773 {
8774 uint32_t ctrl_ext;
8775 uint8_t val = 0;
8776 int timeout = 3;
8777 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
8778 int rv = -1;
8779
8780 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8781 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
8782 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
8783 CSR_WRITE_FLUSH(sc);
8784
8785 /* Read SFP module data */
8786 while (timeout) {
8787 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
8788 if (rv == 0)
8789 break;
8790 delay(100*1000); /* XXX too big */
8791 timeout--;
8792 }
8793 if (rv != 0)
8794 goto out;
8795 switch (val) {
8796 case SFF_SFP_ID_SFF:
8797 aprint_normal_dev(sc->sc_dev,
8798 "Module/Connector soldered to board\n");
8799 break;
8800 case SFF_SFP_ID_SFP:
8801 aprint_normal_dev(sc->sc_dev, "SFP\n");
8802 break;
8803 case SFF_SFP_ID_UNKNOWN:
8804 goto out;
8805 default:
8806 break;
8807 }
8808
8809 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
8810 if (rv != 0) {
8811 goto out;
8812 }
8813
8814 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
8815 mediatype = WM_MEDIATYPE_SERDES;
8816 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
8817 sc->sc_flags |= WM_F_SGMII;
8818 mediatype = WM_MEDIATYPE_COPPER;
8819 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
8820 sc->sc_flags |= WM_F_SGMII;
8821 mediatype = WM_MEDIATYPE_SERDES;
8822 }
8823
8824 out:
8825 /* Restore I2C interface setting */
8826 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8827
8828 return mediatype;
8829 }
8830 /*
8831 * NVM related.
8832 * Microwire, SPI (w/wo EERD) and Flash.
8833 */
8834
8835 /* Both spi and uwire */
8836
8837 /*
8838 * wm_eeprom_sendbits:
8839 *
8840 * Send a series of bits to the EEPROM.
8841 */
8842 static void
8843 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
8844 {
8845 uint32_t reg;
8846 int x;
8847
8848 reg = CSR_READ(sc, WMREG_EECD);
8849
8850 for (x = nbits; x > 0; x--) {
8851 if (bits & (1U << (x - 1)))
8852 reg |= EECD_DI;
8853 else
8854 reg &= ~EECD_DI;
8855 CSR_WRITE(sc, WMREG_EECD, reg);
8856 CSR_WRITE_FLUSH(sc);
8857 delay(2);
8858 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8859 CSR_WRITE_FLUSH(sc);
8860 delay(2);
8861 CSR_WRITE(sc, WMREG_EECD, reg);
8862 CSR_WRITE_FLUSH(sc);
8863 delay(2);
8864 }
8865 }
8866
8867 /*
8868 * wm_eeprom_recvbits:
8869 *
8870 * Receive a series of bits from the EEPROM.
8871 */
8872 static void
8873 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
8874 {
8875 uint32_t reg, val;
8876 int x;
8877
8878 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
8879
8880 val = 0;
8881 for (x = nbits; x > 0; x--) {
8882 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8883 CSR_WRITE_FLUSH(sc);
8884 delay(2);
8885 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8886 val |= (1U << (x - 1));
8887 CSR_WRITE(sc, WMREG_EECD, reg);
8888 CSR_WRITE_FLUSH(sc);
8889 delay(2);
8890 }
8891 *valp = val;
8892 }
8893
8894 /* Microwire */
8895
8896 /*
8897 * wm_nvm_read_uwire:
8898 *
8899 * Read a word from the EEPROM using the MicroWire protocol.
8900 */
8901 static int
8902 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8903 {
8904 uint32_t reg, val;
8905 int i;
8906
8907 for (i = 0; i < wordcnt; i++) {
8908 /* Clear SK and DI. */
8909 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8910 CSR_WRITE(sc, WMREG_EECD, reg);
8911
8912 /*
8913 * XXX: workaround for a bug in qemu-0.12.x and prior
8914 * and Xen.
8915 *
8916 * We use this workaround only for 82540 because qemu's
8917 * e1000 act as 82540.
8918 */
8919 if (sc->sc_type == WM_T_82540) {
8920 reg |= EECD_SK;
8921 CSR_WRITE(sc, WMREG_EECD, reg);
8922 reg &= ~EECD_SK;
8923 CSR_WRITE(sc, WMREG_EECD, reg);
8924 CSR_WRITE_FLUSH(sc);
8925 delay(2);
8926 }
8927 /* XXX: end of workaround */
8928
8929 /* Set CHIP SELECT. */
8930 reg |= EECD_CS;
8931 CSR_WRITE(sc, WMREG_EECD, reg);
8932 CSR_WRITE_FLUSH(sc);
8933 delay(2);
8934
8935 /* Shift in the READ command. */
8936 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8937
8938 /* Shift in address. */
8939 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8940
8941 /* Shift out the data. */
8942 wm_eeprom_recvbits(sc, &val, 16);
8943 data[i] = val & 0xffff;
8944
8945 /* Clear CHIP SELECT. */
8946 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8947 CSR_WRITE(sc, WMREG_EECD, reg);
8948 CSR_WRITE_FLUSH(sc);
8949 delay(2);
8950 }
8951
8952 return 0;
8953 }
8954
8955 /* SPI */
8956
8957 /*
8958 * Set SPI and FLASH related information from the EECD register.
8959 * For 82541 and 82547, the word size is taken from EEPROM.
8960 */
8961 static int
8962 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8963 {
8964 int size;
8965 uint32_t reg;
8966 uint16_t data;
8967
8968 reg = CSR_READ(sc, WMREG_EECD);
8969 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8970
8971 /* Read the size of NVM from EECD by default */
8972 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8973 switch (sc->sc_type) {
8974 case WM_T_82541:
8975 case WM_T_82541_2:
8976 case WM_T_82547:
8977 case WM_T_82547_2:
8978 /* Set dummy value to access EEPROM */
8979 sc->sc_nvm_wordsize = 64;
8980 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8981 reg = data;
8982 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8983 if (size == 0)
8984 size = 6; /* 64 word size */
8985 else
8986 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8987 break;
8988 case WM_T_80003:
8989 case WM_T_82571:
8990 case WM_T_82572:
8991 case WM_T_82573: /* SPI case */
8992 case WM_T_82574: /* SPI case */
8993 case WM_T_82583: /* SPI case */
8994 size += NVM_WORD_SIZE_BASE_SHIFT;
8995 if (size > 14)
8996 size = 14;
8997 break;
8998 case WM_T_82575:
8999 case WM_T_82576:
9000 case WM_T_82580:
9001 case WM_T_I350:
9002 case WM_T_I354:
9003 case WM_T_I210:
9004 case WM_T_I211:
9005 size += NVM_WORD_SIZE_BASE_SHIFT;
9006 if (size > 15)
9007 size = 15;
9008 break;
9009 default:
9010 aprint_error_dev(sc->sc_dev,
9011 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9012 return -1;
9013 break;
9014 }
9015
9016 sc->sc_nvm_wordsize = 1 << size;
9017
9018 return 0;
9019 }
9020
9021 /*
9022 * wm_nvm_ready_spi:
9023 *
9024 * Wait for a SPI EEPROM to be ready for commands.
9025 */
9026 static int
9027 wm_nvm_ready_spi(struct wm_softc *sc)
9028 {
9029 uint32_t val;
9030 int usec;
9031
9032 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9033 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9034 wm_eeprom_recvbits(sc, &val, 8);
9035 if ((val & SPI_SR_RDY) == 0)
9036 break;
9037 }
9038 if (usec >= SPI_MAX_RETRIES) {
9039 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9040 return 1;
9041 }
9042 return 0;
9043 }
9044
9045 /*
9046 * wm_nvm_read_spi:
9047 *
9048 * Read a work from the EEPROM using the SPI protocol.
9049 */
9050 static int
9051 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9052 {
9053 uint32_t reg, val;
9054 int i;
9055 uint8_t opc;
9056
9057 /* Clear SK and CS. */
9058 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9059 CSR_WRITE(sc, WMREG_EECD, reg);
9060 CSR_WRITE_FLUSH(sc);
9061 delay(2);
9062
9063 if (wm_nvm_ready_spi(sc))
9064 return 1;
9065
9066 /* Toggle CS to flush commands. */
9067 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9068 CSR_WRITE_FLUSH(sc);
9069 delay(2);
9070 CSR_WRITE(sc, WMREG_EECD, reg);
9071 CSR_WRITE_FLUSH(sc);
9072 delay(2);
9073
9074 opc = SPI_OPC_READ;
9075 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9076 opc |= SPI_OPC_A8;
9077
9078 wm_eeprom_sendbits(sc, opc, 8);
9079 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9080
9081 for (i = 0; i < wordcnt; i++) {
9082 wm_eeprom_recvbits(sc, &val, 16);
9083 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9084 }
9085
9086 /* Raise CS and clear SK. */
9087 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9088 CSR_WRITE(sc, WMREG_EECD, reg);
9089 CSR_WRITE_FLUSH(sc);
9090 delay(2);
9091
9092 return 0;
9093 }
9094
9095 /* Using with EERD */
9096
9097 static int
9098 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9099 {
9100 uint32_t attempts = 100000;
9101 uint32_t i, reg = 0;
9102 int32_t done = -1;
9103
9104 for (i = 0; i < attempts; i++) {
9105 reg = CSR_READ(sc, rw);
9106
9107 if (reg & EERD_DONE) {
9108 done = 0;
9109 break;
9110 }
9111 delay(5);
9112 }
9113
9114 return done;
9115 }
9116
9117 static int
9118 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9119 uint16_t *data)
9120 {
9121 int i, eerd = 0;
9122 int error = 0;
9123
9124 for (i = 0; i < wordcnt; i++) {
9125 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9126
9127 CSR_WRITE(sc, WMREG_EERD, eerd);
9128 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9129 if (error != 0)
9130 break;
9131
9132 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9133 }
9134
9135 return error;
9136 }
9137
9138 /* Flash */
9139
9140 static int
9141 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9142 {
9143 uint32_t eecd;
9144 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9145 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9146 uint8_t sig_byte = 0;
9147
9148 switch (sc->sc_type) {
9149 case WM_T_ICH8:
9150 case WM_T_ICH9:
9151 eecd = CSR_READ(sc, WMREG_EECD);
9152 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9153 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9154 return 0;
9155 }
9156 /* FALLTHROUGH */
9157 default:
9158 /* Default to 0 */
9159 *bank = 0;
9160
9161 /* Check bank 0 */
9162 wm_read_ich8_byte(sc, act_offset, &sig_byte);
9163 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9164 *bank = 0;
9165 return 0;
9166 }
9167
9168 /* Check bank 1 */
9169 wm_read_ich8_byte(sc, act_offset + bank1_offset,
9170 &sig_byte);
9171 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9172 *bank = 1;
9173 return 0;
9174 }
9175 }
9176
9177 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9178 device_xname(sc->sc_dev)));
9179 return -1;
9180 }
9181
9182 /******************************************************************************
9183 * This function does initial flash setup so that a new read/write/erase cycle
9184 * can be started.
9185 *
9186 * sc - The pointer to the hw structure
9187 ****************************************************************************/
9188 static int32_t
9189 wm_ich8_cycle_init(struct wm_softc *sc)
9190 {
9191 uint16_t hsfsts;
9192 int32_t error = 1;
9193 int32_t i = 0;
9194
9195 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9196
9197 /* May be check the Flash Des Valid bit in Hw status */
9198 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
9199 return error;
9200 }
9201
9202 /* Clear FCERR in Hw status by writing 1 */
9203 /* Clear DAEL in Hw status by writing a 1 */
9204 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
9205
9206 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9207
9208 /*
9209 * Either we should have a hardware SPI cycle in progress bit to check
9210 * against, in order to start a new cycle or FDONE bit should be
9211 * changed in the hardware so that it is 1 after harware reset, which
9212 * can then be used as an indication whether a cycle is in progress or
9213 * has been completed .. we should also have some software semaphore
9214 * mechanism to guard FDONE or the cycle in progress bit so that two
9215 * threads access to those bits can be sequentiallized or a way so that
9216 * 2 threads dont start the cycle at the same time
9217 */
9218
9219 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9220 /*
9221 * There is no cycle running at present, so we can start a
9222 * cycle
9223 */
9224
9225 /* Begin by setting Flash Cycle Done. */
9226 hsfsts |= HSFSTS_DONE;
9227 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9228 error = 0;
9229 } else {
9230 /*
9231 * otherwise poll for sometime so the current cycle has a
9232 * chance to end before giving up.
9233 */
9234 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
9235 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9236 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9237 error = 0;
9238 break;
9239 }
9240 delay(1);
9241 }
9242 if (error == 0) {
9243 /*
9244 * Successful in waiting for previous cycle to timeout,
9245 * now set the Flash Cycle Done.
9246 */
9247 hsfsts |= HSFSTS_DONE;
9248 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9249 }
9250 }
9251 return error;
9252 }
9253
9254 /******************************************************************************
9255 * This function starts a flash cycle and waits for its completion
9256 *
9257 * sc - The pointer to the hw structure
9258 ****************************************************************************/
9259 static int32_t
9260 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
9261 {
9262 uint16_t hsflctl;
9263 uint16_t hsfsts;
9264 int32_t error = 1;
9265 uint32_t i = 0;
9266
9267 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
9268 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9269 hsflctl |= HSFCTL_GO;
9270 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9271
9272 /* Wait till FDONE bit is set to 1 */
9273 do {
9274 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9275 if (hsfsts & HSFSTS_DONE)
9276 break;
9277 delay(1);
9278 i++;
9279 } while (i < timeout);
9280 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
9281 error = 0;
9282
9283 return error;
9284 }
9285
9286 /******************************************************************************
9287 * Reads a byte or word from the NVM using the ICH8 flash access registers.
9288 *
9289 * sc - The pointer to the hw structure
9290 * index - The index of the byte or word to read.
9291 * size - Size of data to read, 1=byte 2=word
9292 * data - Pointer to the word to store the value read.
9293 *****************************************************************************/
9294 static int32_t
9295 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
9296 uint32_t size, uint16_t *data)
9297 {
9298 uint16_t hsfsts;
9299 uint16_t hsflctl;
9300 uint32_t flash_linear_address;
9301 uint32_t flash_data = 0;
9302 int32_t error = 1;
9303 int32_t count = 0;
9304
9305 if (size < 1 || size > 2 || data == 0x0 ||
9306 index > ICH_FLASH_LINEAR_ADDR_MASK)
9307 return error;
9308
9309 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
9310 sc->sc_ich8_flash_base;
9311
9312 do {
9313 delay(1);
9314 /* Steps */
9315 error = wm_ich8_cycle_init(sc);
9316 if (error)
9317 break;
9318
9319 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9320 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
9321 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
9322 & HSFCTL_BCOUNT_MASK;
9323 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
9324 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9325
9326 /*
9327 * Write the last 24 bits of index into Flash Linear address
9328 * field in Flash Address
9329 */
9330 /* TODO: TBD maybe check the index against the size of flash */
9331
9332 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
9333
9334 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
9335
9336 /*
9337 * Check if FCERR is set to 1, if set to 1, clear it and try
9338 * the whole sequence a few more times, else read in (shift in)
9339 * the Flash Data0, the order is least significant byte first
9340 * msb to lsb
9341 */
9342 if (error == 0) {
9343 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
9344 if (size == 1)
9345 *data = (uint8_t)(flash_data & 0x000000FF);
9346 else if (size == 2)
9347 *data = (uint16_t)(flash_data & 0x0000FFFF);
9348 break;
9349 } else {
9350 /*
9351 * If we've gotten here, then things are probably
9352 * completely hosed, but if the error condition is
9353 * detected, it won't hurt to give it another try...
9354 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
9355 */
9356 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9357 if (hsfsts & HSFSTS_ERR) {
9358 /* Repeat for some time before giving up. */
9359 continue;
9360 } else if ((hsfsts & HSFSTS_DONE) == 0)
9361 break;
9362 }
9363 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
9364
9365 return error;
9366 }
9367
9368 /******************************************************************************
9369 * Reads a single byte from the NVM using the ICH8 flash access registers.
9370 *
9371 * sc - pointer to wm_hw structure
9372 * index - The index of the byte to read.
9373 * data - Pointer to a byte to store the value read.
9374 *****************************************************************************/
9375 static int32_t
9376 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
9377 {
9378 int32_t status;
9379 uint16_t word = 0;
9380
9381 status = wm_read_ich8_data(sc, index, 1, &word);
9382 if (status == 0)
9383 *data = (uint8_t)word;
9384 else
9385 *data = 0;
9386
9387 return status;
9388 }
9389
9390 /******************************************************************************
9391 * Reads a word from the NVM using the ICH8 flash access registers.
9392 *
9393 * sc - pointer to wm_hw structure
9394 * index - The starting byte index of the word to read.
9395 * data - Pointer to a word to store the value read.
9396 *****************************************************************************/
9397 static int32_t
9398 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
9399 {
9400 int32_t status;
9401
9402 status = wm_read_ich8_data(sc, index, 2, data);
9403 return status;
9404 }
9405
9406 /******************************************************************************
9407 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
9408 * register.
9409 *
9410 * sc - Struct containing variables accessed by shared code
9411 * offset - offset of word in the EEPROM to read
9412 * data - word read from the EEPROM
9413 * words - number of words to read
9414 *****************************************************************************/
9415 static int
9416 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
9417 {
9418 int32_t error = 0;
9419 uint32_t flash_bank = 0;
9420 uint32_t act_offset = 0;
9421 uint32_t bank_offset = 0;
9422 uint16_t word = 0;
9423 uint16_t i = 0;
9424
9425 /*
9426 * We need to know which is the valid flash bank. In the event
9427 * that we didn't allocate eeprom_shadow_ram, we may not be
9428 * managing flash_bank. So it cannot be trusted and needs
9429 * to be updated with each read.
9430 */
9431 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
9432 if (error) {
9433 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
9434 device_xname(sc->sc_dev)));
9435 flash_bank = 0;
9436 }
9437
9438 /*
9439 * Adjust offset appropriately if we're on bank 1 - adjust for word
9440 * size
9441 */
9442 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
9443
9444 error = wm_get_swfwhw_semaphore(sc);
9445 if (error) {
9446 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9447 __func__);
9448 return error;
9449 }
9450
9451 for (i = 0; i < words; i++) {
9452 /* The NVM part needs a byte offset, hence * 2 */
9453 act_offset = bank_offset + ((offset + i) * 2);
9454 error = wm_read_ich8_word(sc, act_offset, &word);
9455 if (error) {
9456 aprint_error_dev(sc->sc_dev,
9457 "%s: failed to read NVM\n", __func__);
9458 break;
9459 }
9460 data[i] = word;
9461 }
9462
9463 wm_put_swfwhw_semaphore(sc);
9464 return error;
9465 }
9466
9467 /* iNVM */
9468
9469 static int
9470 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
9471 {
9472 int32_t rv = 0;
9473 uint32_t invm_dword;
9474 uint16_t i;
9475 uint8_t record_type, word_address;
9476
9477 for (i = 0; i < INVM_SIZE; i++) {
9478 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
9479 /* Get record type */
9480 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
9481 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
9482 break;
9483 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
9484 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
9485 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
9486 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
9487 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
9488 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
9489 if (word_address == address) {
9490 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
9491 rv = 0;
9492 break;
9493 }
9494 }
9495 }
9496
9497 return rv;
9498 }
9499
9500 static int
9501 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
9502 {
9503 int rv = 0;
9504 int i;
9505
9506 for (i = 0; i < words; i++) {
9507 switch (offset + i) {
9508 case NVM_OFF_MACADDR:
9509 case NVM_OFF_MACADDR1:
9510 case NVM_OFF_MACADDR2:
9511 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
9512 if (rv != 0) {
9513 data[i] = 0xffff;
9514 rv = -1;
9515 }
9516 break;
9517 case NVM_OFF_CFG2:
9518 rv = wm_nvm_read_word_invm(sc, offset, data);
9519 if (rv != 0) {
9520 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
9521 rv = 0;
9522 }
9523 break;
9524 case NVM_OFF_CFG4:
9525 rv = wm_nvm_read_word_invm(sc, offset, data);
9526 if (rv != 0) {
9527 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
9528 rv = 0;
9529 }
9530 break;
9531 case NVM_OFF_LED_1_CFG:
9532 rv = wm_nvm_read_word_invm(sc, offset, data);
9533 if (rv != 0) {
9534 *data = NVM_LED_1_CFG_DEFAULT_I211;
9535 rv = 0;
9536 }
9537 break;
9538 case NVM_OFF_LED_0_2_CFG:
9539 rv = wm_nvm_read_word_invm(sc, offset, data);
9540 if (rv != 0) {
9541 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
9542 rv = 0;
9543 }
9544 break;
9545 case NVM_OFF_ID_LED_SETTINGS:
9546 rv = wm_nvm_read_word_invm(sc, offset, data);
9547 if (rv != 0) {
9548 *data = ID_LED_RESERVED_FFFF;
9549 rv = 0;
9550 }
9551 break;
9552 default:
9553 DPRINTF(WM_DEBUG_NVM,
9554 ("NVM word 0x%02x is not mapped.\n", offset));
9555 *data = NVM_RESERVED_WORD;
9556 break;
9557 }
9558 }
9559
9560 return rv;
9561 }
9562
9563 /* Lock, detecting NVM type, validate checksum, version and read */
9564
9565 /*
9566 * wm_nvm_acquire:
9567 *
9568 * Perform the EEPROM handshake required on some chips.
9569 */
9570 static int
9571 wm_nvm_acquire(struct wm_softc *sc)
9572 {
9573 uint32_t reg;
9574 int x;
9575 int ret = 0;
9576
9577 /* always success */
9578 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9579 return 0;
9580
9581 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9582 ret = wm_get_swfwhw_semaphore(sc);
9583 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9584 /* This will also do wm_get_swsm_semaphore() if needed */
9585 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9586 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9587 ret = wm_get_swsm_semaphore(sc);
9588 }
9589
9590 if (ret) {
9591 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9592 __func__);
9593 return 1;
9594 }
9595
9596 if (sc->sc_flags & WM_F_LOCK_EECD) {
9597 reg = CSR_READ(sc, WMREG_EECD);
9598
9599 /* Request EEPROM access. */
9600 reg |= EECD_EE_REQ;
9601 CSR_WRITE(sc, WMREG_EECD, reg);
9602
9603 /* ..and wait for it to be granted. */
9604 for (x = 0; x < 1000; x++) {
9605 reg = CSR_READ(sc, WMREG_EECD);
9606 if (reg & EECD_EE_GNT)
9607 break;
9608 delay(5);
9609 }
9610 if ((reg & EECD_EE_GNT) == 0) {
9611 aprint_error_dev(sc->sc_dev,
9612 "could not acquire EEPROM GNT\n");
9613 reg &= ~EECD_EE_REQ;
9614 CSR_WRITE(sc, WMREG_EECD, reg);
9615 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9616 wm_put_swfwhw_semaphore(sc);
9617 if (sc->sc_flags & WM_F_LOCK_SWFW)
9618 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9619 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9620 wm_put_swsm_semaphore(sc);
9621 return 1;
9622 }
9623 }
9624
9625 return 0;
9626 }
9627
9628 /*
9629 * wm_nvm_release:
9630 *
9631 * Release the EEPROM mutex.
9632 */
9633 static void
9634 wm_nvm_release(struct wm_softc *sc)
9635 {
9636 uint32_t reg;
9637
9638 /* always success */
9639 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9640 return;
9641
9642 if (sc->sc_flags & WM_F_LOCK_EECD) {
9643 reg = CSR_READ(sc, WMREG_EECD);
9644 reg &= ~EECD_EE_REQ;
9645 CSR_WRITE(sc, WMREG_EECD, reg);
9646 }
9647
9648 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9649 wm_put_swfwhw_semaphore(sc);
9650 if (sc->sc_flags & WM_F_LOCK_SWFW)
9651 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9652 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9653 wm_put_swsm_semaphore(sc);
9654 }
9655
9656 static int
9657 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9658 {
9659 uint32_t eecd = 0;
9660
9661 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9662 || sc->sc_type == WM_T_82583) {
9663 eecd = CSR_READ(sc, WMREG_EECD);
9664
9665 /* Isolate bits 15 & 16 */
9666 eecd = ((eecd >> 15) & 0x03);
9667
9668 /* If both bits are set, device is Flash type */
9669 if (eecd == 0x03)
9670 return 0;
9671 }
9672 return 1;
9673 }
9674
9675 static int
9676 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9677 {
9678 uint32_t eec;
9679
9680 eec = CSR_READ(sc, WMREG_EEC);
9681 if ((eec & EEC_FLASH_DETECTED) != 0)
9682 return 1;
9683
9684 return 0;
9685 }
9686
9687 /*
9688 * wm_nvm_validate_checksum
9689 *
9690 * The checksum is defined as the sum of the first 64 (16 bit) words.
9691 */
9692 static int
9693 wm_nvm_validate_checksum(struct wm_softc *sc)
9694 {
9695 uint16_t checksum;
9696 uint16_t eeprom_data;
9697 #ifdef WM_DEBUG
9698 uint16_t csum_wordaddr, valid_checksum;
9699 #endif
9700 int i;
9701
9702 checksum = 0;
9703
9704 /* Don't check for I211 */
9705 if (sc->sc_type == WM_T_I211)
9706 return 0;
9707
9708 #ifdef WM_DEBUG
9709 if (sc->sc_type == WM_T_PCH_LPT) {
9710 csum_wordaddr = NVM_OFF_COMPAT;
9711 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
9712 } else {
9713 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
9714 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
9715 }
9716
9717 /* Dump EEPROM image for debug */
9718 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9719 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9720 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
9721 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
9722 if ((eeprom_data & valid_checksum) == 0) {
9723 DPRINTF(WM_DEBUG_NVM,
9724 ("%s: NVM need to be updated (%04x != %04x)\n",
9725 device_xname(sc->sc_dev), eeprom_data,
9726 valid_checksum));
9727 }
9728 }
9729
9730 if ((wm_debug & WM_DEBUG_NVM) != 0) {
9731 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
9732 for (i = 0; i < NVM_SIZE; i++) {
9733 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9734 printf("XXXX ");
9735 else
9736 printf("%04hx ", eeprom_data);
9737 if (i % 8 == 7)
9738 printf("\n");
9739 }
9740 }
9741
9742 #endif /* WM_DEBUG */
9743
9744 for (i = 0; i < NVM_SIZE; i++) {
9745 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9746 return 1;
9747 checksum += eeprom_data;
9748 }
9749
9750 if (checksum != (uint16_t) NVM_CHECKSUM) {
9751 #ifdef WM_DEBUG
9752 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
9753 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
9754 #endif
9755 }
9756
9757 return 0;
9758 }
9759
9760 static void
9761 wm_nvm_version_invm(struct wm_softc *sc)
9762 {
9763 uint32_t dword;
9764
9765 /*
9766 * Linux's code to decode version is very strange, so we don't
9767 * obey that algorithm and just use word 61 as the document.
9768 * Perhaps it's not perfect though...
9769 *
9770 * Example:
9771 *
9772 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
9773 */
9774 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
9775 dword = __SHIFTOUT(dword, INVM_VER_1);
9776 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
9777 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
9778 }
9779
9780 static void
9781 wm_nvm_version(struct wm_softc *sc)
9782 {
9783 uint16_t major, minor, build, patch;
9784 uint16_t uid0, uid1;
9785 uint16_t nvm_data;
9786 uint16_t off;
9787 bool check_version = false;
9788 bool check_optionrom = false;
9789 bool have_build = false;
9790
9791 /*
9792 * Version format:
9793 *
9794 * XYYZ
9795 * X0YZ
9796 * X0YY
9797 *
9798 * Example:
9799 *
9800 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
9801 * 82571 0x50a6 5.10.6?
9802 * 82572 0x506a 5.6.10?
9803 * 82572EI 0x5069 5.6.9?
9804 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
9805 * 0x2013 2.1.3?
9806 * 82583 0x10a0 1.10.0? (document says it's default vaule)
9807 */
9808 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
9809 switch (sc->sc_type) {
9810 case WM_T_82571:
9811 case WM_T_82572:
9812 case WM_T_82574:
9813 check_version = true;
9814 check_optionrom = true;
9815 have_build = true;
9816 break;
9817 case WM_T_82575:
9818 case WM_T_82576:
9819 case WM_T_82580:
9820 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
9821 check_version = true;
9822 break;
9823 case WM_T_I211:
9824 wm_nvm_version_invm(sc);
9825 goto printver;
9826 case WM_T_I210:
9827 if (!wm_nvm_get_flash_presence_i210(sc)) {
9828 wm_nvm_version_invm(sc);
9829 goto printver;
9830 }
9831 /* FALLTHROUGH */
9832 case WM_T_I350:
9833 case WM_T_I354:
9834 check_version = true;
9835 check_optionrom = true;
9836 break;
9837 default:
9838 return;
9839 }
9840 if (check_version) {
9841 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
9842 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
9843 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
9844 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
9845 build = nvm_data & NVM_BUILD_MASK;
9846 have_build = true;
9847 } else
9848 minor = nvm_data & 0x00ff;
9849
9850 /* Decimal */
9851 minor = (minor / 16) * 10 + (minor % 16);
9852 sc->sc_nvm_ver_major = major;
9853 sc->sc_nvm_ver_minor = minor;
9854
9855 printver:
9856 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
9857 sc->sc_nvm_ver_minor);
9858 if (have_build)
9859 aprint_verbose(".%d", build);
9860 }
9861 if (check_optionrom) {
9862 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
9863 /* Option ROM Version */
9864 if ((off != 0x0000) && (off != 0xffff)) {
9865 off += NVM_COMBO_VER_OFF;
9866 wm_nvm_read(sc, off + 1, 1, &uid1);
9867 wm_nvm_read(sc, off, 1, &uid0);
9868 if ((uid0 != 0) && (uid0 != 0xffff)
9869 && (uid1 != 0) && (uid1 != 0xffff)) {
9870 /* 16bits */
9871 major = uid0 >> 8;
9872 build = (uid0 << 8) | (uid1 >> 8);
9873 patch = uid1 & 0x00ff;
9874 aprint_verbose(", option ROM Version %d.%d.%d",
9875 major, build, patch);
9876 }
9877 }
9878 }
9879
9880 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
9881 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
9882 }
9883
9884 /*
9885 * wm_nvm_read:
9886 *
9887 * Read data from the serial EEPROM.
9888 */
9889 static int
9890 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9891 {
9892 int rv;
9893
9894 if (sc->sc_flags & WM_F_EEPROM_INVALID)
9895 return 1;
9896
9897 if (wm_nvm_acquire(sc))
9898 return 1;
9899
9900 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9901 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9902 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
9903 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
9904 else if (sc->sc_flags & WM_F_EEPROM_INVM)
9905 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
9906 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
9907 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
9908 else if (sc->sc_flags & WM_F_EEPROM_SPI)
9909 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
9910 else
9911 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
9912
9913 wm_nvm_release(sc);
9914 return rv;
9915 }
9916
9917 /*
9918 * Hardware semaphores.
9919 * Very complexed...
9920 */
9921
9922 static int
9923 wm_get_swsm_semaphore(struct wm_softc *sc)
9924 {
9925 int32_t timeout;
9926 uint32_t swsm;
9927
9928 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9929 /* Get the SW semaphore. */
9930 timeout = sc->sc_nvm_wordsize + 1;
9931 while (timeout) {
9932 swsm = CSR_READ(sc, WMREG_SWSM);
9933
9934 if ((swsm & SWSM_SMBI) == 0)
9935 break;
9936
9937 delay(50);
9938 timeout--;
9939 }
9940
9941 if (timeout == 0) {
9942 aprint_error_dev(sc->sc_dev,
9943 "could not acquire SWSM SMBI\n");
9944 return 1;
9945 }
9946 }
9947
9948 /* Get the FW semaphore. */
9949 timeout = sc->sc_nvm_wordsize + 1;
9950 while (timeout) {
9951 swsm = CSR_READ(sc, WMREG_SWSM);
9952 swsm |= SWSM_SWESMBI;
9953 CSR_WRITE(sc, WMREG_SWSM, swsm);
9954 /* If we managed to set the bit we got the semaphore. */
9955 swsm = CSR_READ(sc, WMREG_SWSM);
9956 if (swsm & SWSM_SWESMBI)
9957 break;
9958
9959 delay(50);
9960 timeout--;
9961 }
9962
9963 if (timeout == 0) {
9964 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
9965 /* Release semaphores */
9966 wm_put_swsm_semaphore(sc);
9967 return 1;
9968 }
9969 return 0;
9970 }
9971
9972 static void
9973 wm_put_swsm_semaphore(struct wm_softc *sc)
9974 {
9975 uint32_t swsm;
9976
9977 swsm = CSR_READ(sc, WMREG_SWSM);
9978 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
9979 CSR_WRITE(sc, WMREG_SWSM, swsm);
9980 }
9981
9982 static int
9983 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9984 {
9985 uint32_t swfw_sync;
9986 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
9987 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
9988 int timeout = 200;
9989
9990 for (timeout = 0; timeout < 200; timeout++) {
9991 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9992 if (wm_get_swsm_semaphore(sc)) {
9993 aprint_error_dev(sc->sc_dev,
9994 "%s: failed to get semaphore\n",
9995 __func__);
9996 return 1;
9997 }
9998 }
9999 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10000 if ((swfw_sync & (swmask | fwmask)) == 0) {
10001 swfw_sync |= swmask;
10002 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10003 if (sc->sc_flags & WM_F_LOCK_SWSM)
10004 wm_put_swsm_semaphore(sc);
10005 return 0;
10006 }
10007 if (sc->sc_flags & WM_F_LOCK_SWSM)
10008 wm_put_swsm_semaphore(sc);
10009 delay(5000);
10010 }
10011 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10012 device_xname(sc->sc_dev), mask, swfw_sync);
10013 return 1;
10014 }
10015
10016 static void
10017 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10018 {
10019 uint32_t swfw_sync;
10020
10021 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10022 while (wm_get_swsm_semaphore(sc) != 0)
10023 continue;
10024 }
10025 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10026 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10027 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10028 if (sc->sc_flags & WM_F_LOCK_SWSM)
10029 wm_put_swsm_semaphore(sc);
10030 }
10031
10032 static int
10033 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10034 {
10035 uint32_t ext_ctrl;
10036 int timeout = 200;
10037
10038 for (timeout = 0; timeout < 200; timeout++) {
10039 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10040 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10041 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10042
10043 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10044 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10045 return 0;
10046 delay(5000);
10047 }
10048 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10049 device_xname(sc->sc_dev), ext_ctrl);
10050 return 1;
10051 }
10052
10053 static void
10054 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10055 {
10056 uint32_t ext_ctrl;
10057 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10058 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10059 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10060 }
10061
10062 static int
10063 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10064 {
10065 int i = 0;
10066 uint32_t reg;
10067
10068 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10069 do {
10070 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10071 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10072 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10073 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10074 break;
10075 delay(2*1000);
10076 i++;
10077 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10078
10079 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10080 wm_put_hw_semaphore_82573(sc);
10081 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10082 device_xname(sc->sc_dev));
10083 return -1;
10084 }
10085
10086 return 0;
10087 }
10088
10089 static void
10090 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10091 {
10092 uint32_t reg;
10093
10094 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10095 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10096 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10097 }
10098
10099 /*
10100 * Management mode and power management related subroutines.
10101 * BMC, AMT, suspend/resume and EEE.
10102 */
10103
10104 static int
10105 wm_check_mng_mode(struct wm_softc *sc)
10106 {
10107 int rv;
10108
10109 switch (sc->sc_type) {
10110 case WM_T_ICH8:
10111 case WM_T_ICH9:
10112 case WM_T_ICH10:
10113 case WM_T_PCH:
10114 case WM_T_PCH2:
10115 case WM_T_PCH_LPT:
10116 rv = wm_check_mng_mode_ich8lan(sc);
10117 break;
10118 case WM_T_82574:
10119 case WM_T_82583:
10120 rv = wm_check_mng_mode_82574(sc);
10121 break;
10122 case WM_T_82571:
10123 case WM_T_82572:
10124 case WM_T_82573:
10125 case WM_T_80003:
10126 rv = wm_check_mng_mode_generic(sc);
10127 break;
10128 default:
10129 /* noting to do */
10130 rv = 0;
10131 break;
10132 }
10133
10134 return rv;
10135 }
10136
10137 static int
10138 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10139 {
10140 uint32_t fwsm;
10141
10142 fwsm = CSR_READ(sc, WMREG_FWSM);
10143
10144 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10145 return 1;
10146
10147 return 0;
10148 }
10149
10150 static int
10151 wm_check_mng_mode_82574(struct wm_softc *sc)
10152 {
10153 uint16_t data;
10154
10155 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10156
10157 if ((data & NVM_CFG2_MNGM_MASK) != 0)
10158 return 1;
10159
10160 return 0;
10161 }
10162
10163 static int
10164 wm_check_mng_mode_generic(struct wm_softc *sc)
10165 {
10166 uint32_t fwsm;
10167
10168 fwsm = CSR_READ(sc, WMREG_FWSM);
10169
10170 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10171 return 1;
10172
10173 return 0;
10174 }
10175
10176 static int
10177 wm_enable_mng_pass_thru(struct wm_softc *sc)
10178 {
10179 uint32_t manc, fwsm, factps;
10180
10181 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
10182 return 0;
10183
10184 manc = CSR_READ(sc, WMREG_MANC);
10185
10186 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
10187 device_xname(sc->sc_dev), manc));
10188 if ((manc & MANC_RECV_TCO_EN) == 0)
10189 return 0;
10190
10191 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
10192 fwsm = CSR_READ(sc, WMREG_FWSM);
10193 factps = CSR_READ(sc, WMREG_FACTPS);
10194 if (((factps & FACTPS_MNGCG) == 0)
10195 && ((fwsm & FWSM_MODE_MASK)
10196 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
10197 return 1;
10198 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10199 uint16_t data;
10200
10201 factps = CSR_READ(sc, WMREG_FACTPS);
10202 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10203 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
10204 device_xname(sc->sc_dev), factps, data));
10205 if (((factps & FACTPS_MNGCG) == 0)
10206 && ((data & NVM_CFG2_MNGM_MASK)
10207 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
10208 return 1;
10209 } else if (((manc & MANC_SMBUS_EN) != 0)
10210 && ((manc & MANC_ASF_EN) == 0))
10211 return 1;
10212
10213 return 0;
10214 }
10215
10216 static int
10217 wm_check_reset_block(struct wm_softc *sc)
10218 {
10219 uint32_t reg;
10220
10221 switch (sc->sc_type) {
10222 case WM_T_ICH8:
10223 case WM_T_ICH9:
10224 case WM_T_ICH10:
10225 case WM_T_PCH:
10226 case WM_T_PCH2:
10227 case WM_T_PCH_LPT:
10228 reg = CSR_READ(sc, WMREG_FWSM);
10229 if ((reg & FWSM_RSPCIPHY) != 0)
10230 return 0;
10231 else
10232 return -1;
10233 break;
10234 case WM_T_82571:
10235 case WM_T_82572:
10236 case WM_T_82573:
10237 case WM_T_82574:
10238 case WM_T_82583:
10239 case WM_T_80003:
10240 reg = CSR_READ(sc, WMREG_MANC);
10241 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
10242 return -1;
10243 else
10244 return 0;
10245 break;
10246 default:
10247 /* no problem */
10248 break;
10249 }
10250
10251 return 0;
10252 }
10253
10254 static void
10255 wm_get_hw_control(struct wm_softc *sc)
10256 {
10257 uint32_t reg;
10258
10259 switch (sc->sc_type) {
10260 case WM_T_82573:
10261 reg = CSR_READ(sc, WMREG_SWSM);
10262 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
10263 break;
10264 case WM_T_82571:
10265 case WM_T_82572:
10266 case WM_T_82574:
10267 case WM_T_82583:
10268 case WM_T_80003:
10269 case WM_T_ICH8:
10270 case WM_T_ICH9:
10271 case WM_T_ICH10:
10272 case WM_T_PCH:
10273 case WM_T_PCH2:
10274 case WM_T_PCH_LPT:
10275 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10276 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
10277 break;
10278 default:
10279 break;
10280 }
10281 }
10282
10283 static void
10284 wm_release_hw_control(struct wm_softc *sc)
10285 {
10286 uint32_t reg;
10287
10288 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
10289 return;
10290
10291 if (sc->sc_type == WM_T_82573) {
10292 reg = CSR_READ(sc, WMREG_SWSM);
10293 reg &= ~SWSM_DRV_LOAD;
10294 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
10295 } else {
10296 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10297 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
10298 }
10299 }
10300
10301 static void
10302 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
10303 {
10304 uint32_t reg;
10305
10306 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10307
10308 if (on != 0)
10309 reg |= EXTCNFCTR_GATE_PHY_CFG;
10310 else
10311 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
10312
10313 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10314 }
10315
10316 static void
10317 wm_smbustopci(struct wm_softc *sc)
10318 {
10319 uint32_t fwsm;
10320
10321 fwsm = CSR_READ(sc, WMREG_FWSM);
10322 if (((fwsm & FWSM_FW_VALID) == 0)
10323 && ((wm_check_reset_block(sc) == 0))) {
10324 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
10325 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
10326 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10327 CSR_WRITE_FLUSH(sc);
10328 delay(10);
10329 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
10330 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10331 CSR_WRITE_FLUSH(sc);
10332 delay(50*1000);
10333
10334 /*
10335 * Gate automatic PHY configuration by hardware on non-managed
10336 * 82579
10337 */
10338 if (sc->sc_type == WM_T_PCH2)
10339 wm_gate_hw_phy_config_ich8lan(sc, 1);
10340 }
10341 }
10342
10343 static void
10344 wm_init_manageability(struct wm_softc *sc)
10345 {
10346
10347 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10348 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
10349 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10350
10351 /* Disable hardware interception of ARP */
10352 manc &= ~MANC_ARP_EN;
10353
10354 /* Enable receiving management packets to the host */
10355 if (sc->sc_type >= WM_T_82571) {
10356 manc |= MANC_EN_MNG2HOST;
10357 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
10358 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
10359 }
10360
10361 CSR_WRITE(sc, WMREG_MANC, manc);
10362 }
10363 }
10364
10365 static void
10366 wm_release_manageability(struct wm_softc *sc)
10367 {
10368
10369 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10370 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10371
10372 manc |= MANC_ARP_EN;
10373 if (sc->sc_type >= WM_T_82571)
10374 manc &= ~MANC_EN_MNG2HOST;
10375
10376 CSR_WRITE(sc, WMREG_MANC, manc);
10377 }
10378 }
10379
10380 static void
10381 wm_get_wakeup(struct wm_softc *sc)
10382 {
10383
10384 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
10385 switch (sc->sc_type) {
10386 case WM_T_82573:
10387 case WM_T_82583:
10388 sc->sc_flags |= WM_F_HAS_AMT;
10389 /* FALLTHROUGH */
10390 case WM_T_80003:
10391 case WM_T_82541:
10392 case WM_T_82547:
10393 case WM_T_82571:
10394 case WM_T_82572:
10395 case WM_T_82574:
10396 case WM_T_82575:
10397 case WM_T_82576:
10398 case WM_T_82580:
10399 case WM_T_I350:
10400 case WM_T_I354:
10401 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
10402 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
10403 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10404 break;
10405 case WM_T_ICH8:
10406 case WM_T_ICH9:
10407 case WM_T_ICH10:
10408 case WM_T_PCH:
10409 case WM_T_PCH2:
10410 case WM_T_PCH_LPT:
10411 sc->sc_flags |= WM_F_HAS_AMT;
10412 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10413 break;
10414 default:
10415 break;
10416 }
10417
10418 /* 1: HAS_MANAGE */
10419 if (wm_enable_mng_pass_thru(sc) != 0)
10420 sc->sc_flags |= WM_F_HAS_MANAGE;
10421
10422 #ifdef WM_DEBUG
10423 printf("\n");
10424 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
10425 printf("HAS_AMT,");
10426 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
10427 printf("ARC_SUBSYS_VALID,");
10428 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
10429 printf("ASF_FIRMWARE_PRES,");
10430 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
10431 printf("HAS_MANAGE,");
10432 printf("\n");
10433 #endif
10434 /*
10435 * Note that the WOL flags is set after the resetting of the eeprom
10436 * stuff
10437 */
10438 }
10439
10440 #ifdef WM_WOL
10441 /* WOL in the newer chipset interfaces (pchlan) */
10442 static void
10443 wm_enable_phy_wakeup(struct wm_softc *sc)
10444 {
10445 #if 0
10446 uint16_t preg;
10447
10448 /* Copy MAC RARs to PHY RARs */
10449
10450 /* Copy MAC MTA to PHY MTA */
10451
10452 /* Configure PHY Rx Control register */
10453
10454 /* Enable PHY wakeup in MAC register */
10455
10456 /* Configure and enable PHY wakeup in PHY registers */
10457
10458 /* Activate PHY wakeup */
10459
10460 /* XXX */
10461 #endif
10462 }
10463
10464 /* Power down workaround on D3 */
10465 static void
10466 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
10467 {
10468 uint32_t reg;
10469 int i;
10470
10471 for (i = 0; i < 2; i++) {
10472 /* Disable link */
10473 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10474 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10475 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10476
10477 /*
10478 * Call gig speed drop workaround on Gig disable before
10479 * accessing any PHY registers
10480 */
10481 if (sc->sc_type == WM_T_ICH8)
10482 wm_gig_downshift_workaround_ich8lan(sc);
10483
10484 /* Write VR power-down enable */
10485 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10486 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10487 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
10488 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
10489
10490 /* Read it back and test */
10491 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10492 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10493 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
10494 break;
10495
10496 /* Issue PHY reset and repeat at most one more time */
10497 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10498 }
10499 }
10500
10501 static void
10502 wm_enable_wakeup(struct wm_softc *sc)
10503 {
10504 uint32_t reg, pmreg;
10505 pcireg_t pmode;
10506
10507 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10508 &pmreg, NULL) == 0)
10509 return;
10510
10511 /* Advertise the wakeup capability */
10512 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
10513 | CTRL_SWDPIN(3));
10514 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
10515
10516 /* ICH workaround */
10517 switch (sc->sc_type) {
10518 case WM_T_ICH8:
10519 case WM_T_ICH9:
10520 case WM_T_ICH10:
10521 case WM_T_PCH:
10522 case WM_T_PCH2:
10523 case WM_T_PCH_LPT:
10524 /* Disable gig during WOL */
10525 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10526 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
10527 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10528 if (sc->sc_type == WM_T_PCH)
10529 wm_gmii_reset(sc);
10530
10531 /* Power down workaround */
10532 if (sc->sc_phytype == WMPHY_82577) {
10533 struct mii_softc *child;
10534
10535 /* Assume that the PHY is copper */
10536 child = LIST_FIRST(&sc->sc_mii.mii_phys);
10537 if (child->mii_mpd_rev <= 2)
10538 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
10539 (768 << 5) | 25, 0x0444); /* magic num */
10540 }
10541 break;
10542 default:
10543 break;
10544 }
10545
10546 /* Keep the laser running on fiber adapters */
10547 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
10548 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
10549 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10550 reg |= CTRL_EXT_SWDPIN(3);
10551 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10552 }
10553
10554 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
10555 #if 0 /* for the multicast packet */
10556 reg |= WUFC_MC;
10557 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
10558 #endif
10559
10560 if (sc->sc_type == WM_T_PCH) {
10561 wm_enable_phy_wakeup(sc);
10562 } else {
10563 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
10564 CSR_WRITE(sc, WMREG_WUFC, reg);
10565 }
10566
10567 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10568 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10569 || (sc->sc_type == WM_T_PCH2))
10570 && (sc->sc_phytype == WMPHY_IGP_3))
10571 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
10572
10573 /* Request PME */
10574 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
10575 #if 0
10576 /* Disable WOL */
10577 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
10578 #else
10579 /* For WOL */
10580 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
10581 #endif
10582 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
10583 }
10584 #endif /* WM_WOL */
10585
10586 /* EEE */
10587
10588 static void
10589 wm_set_eee_i350(struct wm_softc *sc)
10590 {
10591 uint32_t ipcnfg, eeer;
10592
10593 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
10594 eeer = CSR_READ(sc, WMREG_EEER);
10595
10596 if ((sc->sc_flags & WM_F_EEE) != 0) {
10597 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10598 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
10599 | EEER_LPI_FC);
10600 } else {
10601 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10602 ipcnfg &= ~IPCNFG_10BASE_TE;
10603 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
10604 | EEER_LPI_FC);
10605 }
10606
10607 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
10608 CSR_WRITE(sc, WMREG_EEER, eeer);
10609 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
10610 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
10611 }
10612
10613 /*
10614 * Workarounds (mainly PHY related).
10615 * Basically, PHY's workarounds are in the PHY drivers.
10616 */
10617
10618 /* Work-around for 82566 Kumeran PCS lock loss */
10619 static void
10620 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
10621 {
10622 int miistatus, active, i;
10623 int reg;
10624
10625 miistatus = sc->sc_mii.mii_media_status;
10626
10627 /* If the link is not up, do nothing */
10628 if ((miistatus & IFM_ACTIVE) != 0)
10629 return;
10630
10631 active = sc->sc_mii.mii_media_active;
10632
10633 /* Nothing to do if the link is other than 1Gbps */
10634 if (IFM_SUBTYPE(active) != IFM_1000_T)
10635 return;
10636
10637 for (i = 0; i < 10; i++) {
10638 /* read twice */
10639 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10640 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10641 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
10642 goto out; /* GOOD! */
10643
10644 /* Reset the PHY */
10645 wm_gmii_reset(sc);
10646 delay(5*1000);
10647 }
10648
10649 /* Disable GigE link negotiation */
10650 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10651 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10652 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10653
10654 /*
10655 * Call gig speed drop workaround on Gig disable before accessing
10656 * any PHY registers.
10657 */
10658 wm_gig_downshift_workaround_ich8lan(sc);
10659
10660 out:
10661 return;
10662 }
10663
10664 /* WOL from S5 stops working */
10665 static void
10666 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
10667 {
10668 uint16_t kmrn_reg;
10669
10670 /* Only for igp3 */
10671 if (sc->sc_phytype == WMPHY_IGP_3) {
10672 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
10673 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
10674 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10675 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
10676 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10677 }
10678 }
10679
10680 /*
10681 * Workaround for pch's PHYs
10682 * XXX should be moved to new PHY driver?
10683 */
10684 static void
10685 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
10686 {
10687 if (sc->sc_phytype == WMPHY_82577)
10688 wm_set_mdio_slow_mode_hv(sc);
10689
10690 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
10691
10692 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
10693
10694 /* 82578 */
10695 if (sc->sc_phytype == WMPHY_82578) {
10696 /* PCH rev. < 3 */
10697 if (sc->sc_rev < 3) {
10698 /* XXX 6 bit shift? Why? Is it page2? */
10699 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10700 0x66c0);
10701 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
10702 0xffff);
10703 }
10704
10705 /* XXX phy rev. < 2 */
10706 }
10707
10708 /* Select page 0 */
10709
10710 /* XXX acquire semaphore */
10711 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
10712 /* XXX release semaphore */
10713
10714 /*
10715 * Configure the K1 Si workaround during phy reset assuming there is
10716 * link so that it disables K1 if link is in 1Gbps.
10717 */
10718 wm_k1_gig_workaround_hv(sc, 1);
10719 }
10720
10721 static void
10722 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
10723 {
10724
10725 wm_set_mdio_slow_mode_hv(sc);
10726 }
10727
10728 static void
10729 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
10730 {
10731 int k1_enable = sc->sc_nvm_k1_enabled;
10732
10733 /* XXX acquire semaphore */
10734
10735 if (link) {
10736 k1_enable = 0;
10737
10738 /* Link stall fix for link up */
10739 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
10740 } else {
10741 /* Link stall fix for link down */
10742 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
10743 }
10744
10745 wm_configure_k1_ich8lan(sc, k1_enable);
10746
10747 /* XXX release semaphore */
10748 }
10749
10750 static void
10751 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
10752 {
10753 uint32_t reg;
10754
10755 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
10756 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
10757 reg | HV_KMRN_MDIO_SLOW);
10758 }
10759
10760 static void
10761 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
10762 {
10763 uint32_t ctrl, ctrl_ext, tmp;
10764 uint16_t kmrn_reg;
10765
10766 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
10767
10768 if (k1_enable)
10769 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
10770 else
10771 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
10772
10773 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
10774
10775 delay(20);
10776
10777 ctrl = CSR_READ(sc, WMREG_CTRL);
10778 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10779
10780 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
10781 tmp |= CTRL_FRCSPD;
10782
10783 CSR_WRITE(sc, WMREG_CTRL, tmp);
10784 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
10785 CSR_WRITE_FLUSH(sc);
10786 delay(20);
10787
10788 CSR_WRITE(sc, WMREG_CTRL, ctrl);
10789 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10790 CSR_WRITE_FLUSH(sc);
10791 delay(20);
10792 }
10793
10794 /* special case - for 82575 - need to do manual init ... */
10795 static void
10796 wm_reset_init_script_82575(struct wm_softc *sc)
10797 {
10798 /*
10799 * remark: this is untested code - we have no board without EEPROM
10800 * same setup as mentioned int the FreeBSD driver for the i82575
10801 */
10802
10803 /* SerDes configuration via SERDESCTRL */
10804 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
10805 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
10806 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
10807 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
10808
10809 /* CCM configuration via CCMCTL register */
10810 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
10811 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
10812
10813 /* PCIe lanes configuration */
10814 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
10815 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
10816 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
10817 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
10818
10819 /* PCIe PLL Configuration */
10820 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
10821 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
10822 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
10823 }
10824
10825 static void
10826 wm_reset_mdicnfg_82580(struct wm_softc *sc)
10827 {
10828 uint32_t reg;
10829 uint16_t nvmword;
10830 int rv;
10831
10832 if ((sc->sc_flags & WM_F_SGMII) == 0)
10833 return;
10834
10835 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
10836 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
10837 if (rv != 0) {
10838 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
10839 __func__);
10840 return;
10841 }
10842
10843 reg = CSR_READ(sc, WMREG_MDICNFG);
10844 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
10845 reg |= MDICNFG_DEST;
10846 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
10847 reg |= MDICNFG_COM_MDIO;
10848 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10849 }
10850
10851 /*
10852 * I210 Errata 25 and I211 Errata 10
10853 * Slow System Clock.
10854 */
10855 static void
10856 wm_pll_workaround_i210(struct wm_softc *sc)
10857 {
10858 uint32_t mdicnfg, wuc;
10859 uint32_t reg;
10860 pcireg_t pcireg;
10861 uint32_t pmreg;
10862 uint16_t nvmword, tmp_nvmword;
10863 int phyval;
10864 bool wa_done = false;
10865 int i;
10866
10867 /* Save WUC and MDICNFG registers */
10868 wuc = CSR_READ(sc, WMREG_WUC);
10869 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
10870
10871 reg = mdicnfg & ~MDICNFG_DEST;
10872 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10873
10874 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
10875 nvmword = INVM_DEFAULT_AL;
10876 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
10877
10878 /* Get Power Management cap offset */
10879 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10880 &pmreg, NULL) == 0)
10881 return;
10882 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
10883 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
10884 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
10885
10886 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
10887 break; /* OK */
10888 }
10889
10890 wa_done = true;
10891 /* Directly reset the internal PHY */
10892 reg = CSR_READ(sc, WMREG_CTRL);
10893 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
10894
10895 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10896 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
10897 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10898
10899 CSR_WRITE(sc, WMREG_WUC, 0);
10900 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
10901 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10902
10903 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
10904 pmreg + PCI_PMCSR);
10905 pcireg |= PCI_PMCSR_STATE_D3;
10906 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10907 pmreg + PCI_PMCSR, pcireg);
10908 delay(1000);
10909 pcireg &= ~PCI_PMCSR_STATE_D3;
10910 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10911 pmreg + PCI_PMCSR, pcireg);
10912
10913 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
10914 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10915
10916 /* Restore WUC register */
10917 CSR_WRITE(sc, WMREG_WUC, wuc);
10918 }
10919
10920 /* Restore MDICNFG setting */
10921 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
10922 if (wa_done)
10923 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
10924 }
10925