if_wm.c revision 1.342 1 /* $NetBSD: if_wm.c,v 1.342 2015/07/22 11:14:13 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.342 2015/07/22 11:14:13 knakahara Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rndsource.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 #ifdef __HAVE_PCI_MSI_MSIX
158 #define WM_MSI_MSIX 1 /* Enable by default */
159 #endif
160
161 /*
162 * This device driver divides interrupt to TX, RX and link state.
163 * Each MSI-X vector indexes are below.
164 */
165 #define WM_MSIX_NINTR 3
166 #define WM_MSIX_TXINTR_IDX 0
167 #define WM_MSIX_RXINTR_IDX 1
168 #define WM_MSIX_LINKINTR_IDX 2
169 #define WM_MAX_NINTR WM_MSIX_NINTR
170
171 /*
172 * This device driver set affinity to each interrupts like below (round-robin).
173 * If the number CPUs is less than the number of interrupts, this driver usase
174 * the same CPU for multiple interrupts.
175 */
176 #define WM_MSIX_TXINTR_CPUID 0
177 #define WM_MSIX_RXINTR_CPUID 1
178 #define WM_MSIX_LINKINTR_CPUID 2
179
180 /*
181 * Transmit descriptor list size. Due to errata, we can only have
182 * 256 hardware descriptors in the ring on < 82544, but we use 4096
183 * on >= 82544. We tell the upper layers that they can queue a lot
184 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
185 * of them at a time.
186 *
187 * We allow up to 256 (!) DMA segments per packet. Pathological packet
188 * chains containing many small mbufs have been observed in zero-copy
189 * situations with jumbo frames.
190 */
191 #define WM_NTXSEGS 256
192 #define WM_IFQUEUELEN 256
193 #define WM_TXQUEUELEN_MAX 64
194 #define WM_TXQUEUELEN_MAX_82547 16
195 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
196 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
197 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
198 #define WM_NTXDESC_82542 256
199 #define WM_NTXDESC_82544 4096
200 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
201 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
202 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
203 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
204 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
205
206 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
207
208 /*
209 * Receive descriptor list size. We have one Rx buffer for normal
210 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
211 * packet. We allocate 256 receive descriptors, each with a 2k
212 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
213 */
214 #define WM_NRXDESC 256
215 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
216 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
217 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
218
219 /*
220 * Control structures are DMA'd to the i82542 chip. We allocate them in
221 * a single clump that maps to a single DMA segment to make several things
222 * easier.
223 */
224 struct wm_control_data_82544 {
225 /*
226 * The receive descriptors.
227 */
228 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
229
230 /*
231 * The transmit descriptors. Put these at the end, because
232 * we might use a smaller number of them.
233 */
234 union {
235 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
236 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
237 } wdc_u;
238 };
239
240 struct wm_control_data_82542 {
241 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
242 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
243 };
244
245 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
246 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
247 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
248
249 /*
250 * Software state for transmit jobs.
251 */
252 struct wm_txsoft {
253 struct mbuf *txs_mbuf; /* head of our mbuf chain */
254 bus_dmamap_t txs_dmamap; /* our DMA map */
255 int txs_firstdesc; /* first descriptor in packet */
256 int txs_lastdesc; /* last descriptor in packet */
257 int txs_ndesc; /* # of descriptors used */
258 };
259
260 /*
261 * Software state for receive buffers. Each descriptor gets a
262 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
263 * more than one buffer, we chain them together.
264 */
265 struct wm_rxsoft {
266 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
267 bus_dmamap_t rxs_dmamap; /* our DMA map */
268 };
269
270 #define WM_LINKUP_TIMEOUT 50
271
272 static uint16_t swfwphysem[] = {
273 SWFW_PHY0_SM,
274 SWFW_PHY1_SM,
275 SWFW_PHY2_SM,
276 SWFW_PHY3_SM
277 };
278
279 static const uint32_t wm_82580_rxpbs_table[] = {
280 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
281 };
282
283 /*
284 * Software state per device.
285 */
286 struct wm_softc {
287 device_t sc_dev; /* generic device information */
288 bus_space_tag_t sc_st; /* bus space tag */
289 bus_space_handle_t sc_sh; /* bus space handle */
290 bus_size_t sc_ss; /* bus space size */
291 bus_space_tag_t sc_iot; /* I/O space tag */
292 bus_space_handle_t sc_ioh; /* I/O space handle */
293 bus_size_t sc_ios; /* I/O space size */
294 bus_space_tag_t sc_flasht; /* flash registers space tag */
295 bus_space_handle_t sc_flashh; /* flash registers space handle */
296 bus_size_t sc_flashs; /* flash registers space size */
297 bus_dma_tag_t sc_dmat; /* bus DMA tag */
298
299 struct ethercom sc_ethercom; /* ethernet common data */
300 struct mii_data sc_mii; /* MII/media information */
301
302 pci_chipset_tag_t sc_pc;
303 pcitag_t sc_pcitag;
304 int sc_bus_speed; /* PCI/PCIX bus speed */
305 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
306
307 uint16_t sc_pcidevid; /* PCI device ID */
308 wm_chip_type sc_type; /* MAC type */
309 int sc_rev; /* MAC revision */
310 wm_phy_type sc_phytype; /* PHY type */
311 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
312 #define WM_MEDIATYPE_UNKNOWN 0x00
313 #define WM_MEDIATYPE_FIBER 0x01
314 #define WM_MEDIATYPE_COPPER 0x02
315 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
316 int sc_funcid; /* unit number of the chip (0 to 3) */
317 int sc_flags; /* flags; see below */
318 int sc_if_flags; /* last if_flags */
319 int sc_flowflags; /* 802.3x flow control flags */
320 int sc_align_tweak;
321
322 void *sc_ihs[WM_MAX_NINTR]; /*
323 * interrupt cookie.
324 * legacy and msi use sc_ihs[0].
325 */
326 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
327 int sc_nintrs; /* number of interrupts */
328
329 callout_t sc_tick_ch; /* tick callout */
330 bool sc_stopping;
331
332 int sc_nvm_ver_major;
333 int sc_nvm_ver_minor;
334 int sc_nvm_addrbits; /* NVM address bits */
335 unsigned int sc_nvm_wordsize; /* NVM word size */
336 int sc_ich8_flash_base;
337 int sc_ich8_flash_bank_size;
338 int sc_nvm_k1_enabled;
339
340 /* Software state for the transmit and receive descriptors. */
341 int sc_txnum; /* must be a power of two */
342 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
343 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
344
345 /* Control data structures. */
346 int sc_ntxdesc; /* must be a power of two */
347 struct wm_control_data_82544 *sc_control_data;
348 bus_dmamap_t sc_cddmamap; /* control data DMA map */
349 bus_dma_segment_t sc_cd_seg; /* control data segment */
350 int sc_cd_rseg; /* real number of control segment */
351 size_t sc_cd_size; /* control data size */
352 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
353 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
354 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
355 #define sc_rxdescs sc_control_data->wcd_rxdescs
356
357 #ifdef WM_EVENT_COUNTERS
358 /* Event counters. */
359 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
360 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
361 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
362 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
363 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
364 struct evcnt sc_ev_rxintr; /* Rx interrupts */
365 struct evcnt sc_ev_linkintr; /* Link interrupts */
366
367 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
368 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
369 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
370 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
371 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
372 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
373 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
374 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
375
376 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
377 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
378
379 struct evcnt sc_ev_tu; /* Tx underrun */
380
381 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
382 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
383 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
384 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
385 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
386 #endif /* WM_EVENT_COUNTERS */
387
388 bus_addr_t sc_tdt_reg; /* offset of TDT register */
389
390 int sc_txfree; /* number of free Tx descriptors */
391 int sc_txnext; /* next ready Tx descriptor */
392
393 int sc_txsfree; /* number of free Tx jobs */
394 int sc_txsnext; /* next free Tx job */
395 int sc_txsdirty; /* dirty Tx jobs */
396
397 /* These 5 variables are used only on the 82547. */
398 int sc_txfifo_size; /* Tx FIFO size */
399 int sc_txfifo_head; /* current head of FIFO */
400 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
401 int sc_txfifo_stall; /* Tx FIFO is stalled */
402 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
403
404 bus_addr_t sc_rdt_reg; /* offset of RDT register */
405
406 int sc_rxptr; /* next ready Rx descriptor/queue ent */
407 int sc_rxdiscard;
408 int sc_rxlen;
409 struct mbuf *sc_rxhead;
410 struct mbuf *sc_rxtail;
411 struct mbuf **sc_rxtailp;
412
413 uint32_t sc_ctrl; /* prototype CTRL register */
414 #if 0
415 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
416 #endif
417 uint32_t sc_icr; /* prototype interrupt bits */
418 uint32_t sc_itr; /* prototype intr throttling reg */
419 uint32_t sc_tctl; /* prototype TCTL register */
420 uint32_t sc_rctl; /* prototype RCTL register */
421 uint32_t sc_txcw; /* prototype TXCW register */
422 uint32_t sc_tipg; /* prototype TIPG register */
423 uint32_t sc_fcrtl; /* prototype FCRTL register */
424 uint32_t sc_pba; /* prototype PBA register */
425
426 int sc_tbi_linkup; /* TBI link status */
427 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
428 int sc_tbi_serdes_ticks; /* tbi ticks */
429
430 int sc_mchash_type; /* multicast filter offset */
431
432 krndsource_t rnd_source; /* random source */
433
434 kmutex_t *sc_tx_lock; /* lock for tx operations */
435 kmutex_t *sc_rx_lock; /* lock for rx operations */
436 };
437
438 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
439 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
440 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
441 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
442 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
443 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
444 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
445 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
446 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
447
448 #ifdef WM_MPSAFE
449 #define CALLOUT_FLAGS CALLOUT_MPSAFE
450 #else
451 #define CALLOUT_FLAGS 0
452 #endif
453
454 #define WM_RXCHAIN_RESET(sc) \
455 do { \
456 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
457 *(sc)->sc_rxtailp = NULL; \
458 (sc)->sc_rxlen = 0; \
459 } while (/*CONSTCOND*/0)
460
461 #define WM_RXCHAIN_LINK(sc, m) \
462 do { \
463 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
464 (sc)->sc_rxtailp = &(m)->m_next; \
465 } while (/*CONSTCOND*/0)
466
467 #ifdef WM_EVENT_COUNTERS
468 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
469 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
470 #else
471 #define WM_EVCNT_INCR(ev) /* nothing */
472 #define WM_EVCNT_ADD(ev, val) /* nothing */
473 #endif
474
475 #define CSR_READ(sc, reg) \
476 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
477 #define CSR_WRITE(sc, reg, val) \
478 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
479 #define CSR_WRITE_FLUSH(sc) \
480 (void) CSR_READ((sc), WMREG_STATUS)
481
482 #define ICH8_FLASH_READ32(sc, reg) \
483 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
484 #define ICH8_FLASH_WRITE32(sc, reg, data) \
485 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
486
487 #define ICH8_FLASH_READ16(sc, reg) \
488 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
489 #define ICH8_FLASH_WRITE16(sc, reg, data) \
490 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
491
492 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
493 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
494
495 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
496 #define WM_CDTXADDR_HI(sc, x) \
497 (sizeof(bus_addr_t) == 8 ? \
498 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
499
500 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
501 #define WM_CDRXADDR_HI(sc, x) \
502 (sizeof(bus_addr_t) == 8 ? \
503 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
504
505 #define WM_CDTXSYNC(sc, x, n, ops) \
506 do { \
507 int __x, __n; \
508 \
509 __x = (x); \
510 __n = (n); \
511 \
512 /* If it will wrap around, sync to the end of the ring. */ \
513 if ((__x + __n) > WM_NTXDESC(sc)) { \
514 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
515 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
516 (WM_NTXDESC(sc) - __x), (ops)); \
517 __n -= (WM_NTXDESC(sc) - __x); \
518 __x = 0; \
519 } \
520 \
521 /* Now sync whatever is left. */ \
522 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
523 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
524 } while (/*CONSTCOND*/0)
525
526 #define WM_CDRXSYNC(sc, x, ops) \
527 do { \
528 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
529 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
530 } while (/*CONSTCOND*/0)
531
532 #define WM_INIT_RXDESC(sc, x) \
533 do { \
534 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
535 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
536 struct mbuf *__m = __rxs->rxs_mbuf; \
537 \
538 /* \
539 * Note: We scoot the packet forward 2 bytes in the buffer \
540 * so that the payload after the Ethernet header is aligned \
541 * to a 4-byte boundary. \
542 * \
543 * XXX BRAINDAMAGE ALERT! \
544 * The stupid chip uses the same size for every buffer, which \
545 * is set in the Receive Control register. We are using the 2K \
546 * size option, but what we REALLY want is (2K - 2)! For this \
547 * reason, we can't "scoot" packets longer than the standard \
548 * Ethernet MTU. On strict-alignment platforms, if the total \
549 * size exceeds (2K - 2) we set align_tweak to 0 and let \
550 * the upper layer copy the headers. \
551 */ \
552 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
553 \
554 wm_set_dma_addr(&__rxd->wrx_addr, \
555 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
556 __rxd->wrx_len = 0; \
557 __rxd->wrx_cksum = 0; \
558 __rxd->wrx_status = 0; \
559 __rxd->wrx_errors = 0; \
560 __rxd->wrx_special = 0; \
561 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
562 \
563 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
564 } while (/*CONSTCOND*/0)
565
566 /*
567 * Register read/write functions.
568 * Other than CSR_{READ|WRITE}().
569 */
570 #if 0
571 static inline uint32_t wm_io_read(struct wm_softc *, int);
572 #endif
573 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
574 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
575 uint32_t, uint32_t);
576 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
577
578 /*
579 * Device driver interface functions and commonly used functions.
580 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
581 */
582 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
583 static int wm_match(device_t, cfdata_t, void *);
584 static void wm_attach(device_t, device_t, void *);
585 static int wm_detach(device_t, int);
586 static bool wm_suspend(device_t, const pmf_qual_t *);
587 static bool wm_resume(device_t, const pmf_qual_t *);
588 static void wm_watchdog(struct ifnet *);
589 static void wm_tick(void *);
590 static int wm_ifflags_cb(struct ethercom *);
591 static int wm_ioctl(struct ifnet *, u_long, void *);
592 /* MAC address related */
593 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
594 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
595 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
596 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
597 static void wm_set_filter(struct wm_softc *);
598 /* Reset and init related */
599 static void wm_set_vlan(struct wm_softc *);
600 static void wm_set_pcie_completion_timeout(struct wm_softc *);
601 static void wm_get_auto_rd_done(struct wm_softc *);
602 static void wm_lan_init_done(struct wm_softc *);
603 static void wm_get_cfg_done(struct wm_softc *);
604 static void wm_initialize_hardware_bits(struct wm_softc *);
605 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
606 static void wm_reset(struct wm_softc *);
607 static int wm_add_rxbuf(struct wm_softc *, int);
608 static void wm_rxdrain(struct wm_softc *);
609 static int wm_init(struct ifnet *);
610 static int wm_init_locked(struct ifnet *);
611 static void wm_stop(struct ifnet *, int);
612 static void wm_stop_locked(struct ifnet *, int);
613 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
614 uint32_t *, uint8_t *);
615 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
616 static void wm_82547_txfifo_stall(void *);
617 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
618 /* Start */
619 static void wm_start(struct ifnet *);
620 static void wm_start_locked(struct ifnet *);
621 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
622 uint32_t *, uint32_t *, bool *);
623 static void wm_nq_start(struct ifnet *);
624 static void wm_nq_start_locked(struct ifnet *);
625 /* Interrupt */
626 static int wm_txeof(struct wm_softc *);
627 static void wm_rxeof(struct wm_softc *);
628 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
629 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
630 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
631 static void wm_linkintr(struct wm_softc *, uint32_t);
632 static int wm_intr_legacy(void *);
633 #ifdef WM_MSI_MSIX
634 static int wm_txintr_msix(void *);
635 static int wm_rxintr_msix(void *);
636 static int wm_linkintr_msix(void *);
637 #endif
638
639 /*
640 * Media related.
641 * GMII, SGMII, TBI, SERDES and SFP.
642 */
643 /* Common */
644 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
645 /* GMII related */
646 static void wm_gmii_reset(struct wm_softc *);
647 static int wm_get_phy_id_82575(struct wm_softc *);
648 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
649 static int wm_gmii_mediachange(struct ifnet *);
650 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
651 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
652 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
653 static int wm_gmii_i82543_readreg(device_t, int, int);
654 static void wm_gmii_i82543_writereg(device_t, int, int, int);
655 static int wm_gmii_i82544_readreg(device_t, int, int);
656 static void wm_gmii_i82544_writereg(device_t, int, int, int);
657 static int wm_gmii_i80003_readreg(device_t, int, int);
658 static void wm_gmii_i80003_writereg(device_t, int, int, int);
659 static int wm_gmii_bm_readreg(device_t, int, int);
660 static void wm_gmii_bm_writereg(device_t, int, int, int);
661 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
662 static int wm_gmii_hv_readreg(device_t, int, int);
663 static void wm_gmii_hv_writereg(device_t, int, int, int);
664 static int wm_gmii_82580_readreg(device_t, int, int);
665 static void wm_gmii_82580_writereg(device_t, int, int, int);
666 static int wm_gmii_gs40g_readreg(device_t, int, int);
667 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
668 static void wm_gmii_statchg(struct ifnet *);
669 static int wm_kmrn_readreg(struct wm_softc *, int);
670 static void wm_kmrn_writereg(struct wm_softc *, int, int);
671 /* SGMII */
672 static bool wm_sgmii_uses_mdio(struct wm_softc *);
673 static int wm_sgmii_readreg(device_t, int, int);
674 static void wm_sgmii_writereg(device_t, int, int, int);
675 /* TBI related */
676 static void wm_tbi_mediainit(struct wm_softc *);
677 static int wm_tbi_mediachange(struct ifnet *);
678 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
679 static int wm_check_for_link(struct wm_softc *);
680 static void wm_tbi_tick(struct wm_softc *);
681 /* SERDES related */
682 static void wm_serdes_power_up_link_82575(struct wm_softc *);
683 static int wm_serdes_mediachange(struct ifnet *);
684 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
685 static void wm_serdes_tick(struct wm_softc *);
686 /* SFP related */
687 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
688 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
689
690 /*
691 * NVM related.
692 * Microwire, SPI (w/wo EERD) and Flash.
693 */
694 /* Misc functions */
695 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
696 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
697 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
698 /* Microwire */
699 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
700 /* SPI */
701 static int wm_nvm_ready_spi(struct wm_softc *);
702 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
703 /* Using with EERD */
704 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
705 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
706 /* Flash */
707 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
708 unsigned int *);
709 static int32_t wm_ich8_cycle_init(struct wm_softc *);
710 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
711 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
712 uint16_t *);
713 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
714 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
715 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
716 /* iNVM */
717 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
718 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
719 /* Lock, detecting NVM type, validate checksum and read */
720 static int wm_nvm_acquire(struct wm_softc *);
721 static void wm_nvm_release(struct wm_softc *);
722 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
723 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
724 static int wm_nvm_validate_checksum(struct wm_softc *);
725 static void wm_nvm_version(struct wm_softc *);
726 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
727
728 /*
729 * Hardware semaphores.
730 * Very complexed...
731 */
732 static int wm_get_swsm_semaphore(struct wm_softc *);
733 static void wm_put_swsm_semaphore(struct wm_softc *);
734 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
735 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
736 static int wm_get_swfwhw_semaphore(struct wm_softc *);
737 static void wm_put_swfwhw_semaphore(struct wm_softc *);
738 static int wm_get_hw_semaphore_82573(struct wm_softc *);
739 static void wm_put_hw_semaphore_82573(struct wm_softc *);
740
741 /*
742 * Management mode and power management related subroutines.
743 * BMC, AMT, suspend/resume and EEE.
744 */
745 static int wm_check_mng_mode(struct wm_softc *);
746 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
747 static int wm_check_mng_mode_82574(struct wm_softc *);
748 static int wm_check_mng_mode_generic(struct wm_softc *);
749 static int wm_enable_mng_pass_thru(struct wm_softc *);
750 static int wm_check_reset_block(struct wm_softc *);
751 static void wm_get_hw_control(struct wm_softc *);
752 static void wm_release_hw_control(struct wm_softc *);
753 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
754 static void wm_smbustopci(struct wm_softc *);
755 static void wm_init_manageability(struct wm_softc *);
756 static void wm_release_manageability(struct wm_softc *);
757 static void wm_get_wakeup(struct wm_softc *);
758 #ifdef WM_WOL
759 static void wm_enable_phy_wakeup(struct wm_softc *);
760 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
761 static void wm_enable_wakeup(struct wm_softc *);
762 #endif
763 /* EEE */
764 static void wm_set_eee_i350(struct wm_softc *);
765
766 /*
767 * Workarounds (mainly PHY related).
768 * Basically, PHY's workarounds are in the PHY drivers.
769 */
770 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
771 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
772 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
773 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
774 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
775 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
776 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
777 static void wm_reset_init_script_82575(struct wm_softc *);
778 static void wm_reset_mdicnfg_82580(struct wm_softc *);
779 static void wm_pll_workaround_i210(struct wm_softc *);
780
781 #ifdef WM_MSI_MSIX
782 struct _msix_matrix {
783 const char *intrname;
784 int(*func)(void *);
785 int intridx;
786 int cpuid;
787 } msix_matrix[WM_MSIX_NINTR] = {
788 { "TX", wm_txintr_msix, WM_MSIX_TXINTR_IDX, WM_MSIX_TXINTR_CPUID },
789 { "RX", wm_rxintr_msix, WM_MSIX_RXINTR_IDX, WM_MSIX_RXINTR_CPUID },
790 { "LINK", wm_linkintr_msix, WM_MSIX_LINKINTR_IDX,
791 WM_MSIX_LINKINTR_CPUID },
792 };
793 #endif
794
795 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
796 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
797
798 /*
799 * Devices supported by this driver.
800 */
801 static const struct wm_product {
802 pci_vendor_id_t wmp_vendor;
803 pci_product_id_t wmp_product;
804 const char *wmp_name;
805 wm_chip_type wmp_type;
806 uint32_t wmp_flags;
807 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
808 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
809 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
810 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
811 #define WMP_MEDIATYPE(x) ((x) & 0x03)
812 } wm_products[] = {
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
814 "Intel i82542 1000BASE-X Ethernet",
815 WM_T_82542_2_1, WMP_F_FIBER },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
818 "Intel i82543GC 1000BASE-X Ethernet",
819 WM_T_82543, WMP_F_FIBER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
822 "Intel i82543GC 1000BASE-T Ethernet",
823 WM_T_82543, WMP_F_COPPER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
826 "Intel i82544EI 1000BASE-T Ethernet",
827 WM_T_82544, WMP_F_COPPER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
830 "Intel i82544EI 1000BASE-X Ethernet",
831 WM_T_82544, WMP_F_FIBER },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
834 "Intel i82544GC 1000BASE-T Ethernet",
835 WM_T_82544, WMP_F_COPPER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
838 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
839 WM_T_82544, WMP_F_COPPER },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
842 "Intel i82540EM 1000BASE-T Ethernet",
843 WM_T_82540, WMP_F_COPPER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
846 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
847 WM_T_82540, WMP_F_COPPER },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
850 "Intel i82540EP 1000BASE-T Ethernet",
851 WM_T_82540, WMP_F_COPPER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
854 "Intel i82540EP 1000BASE-T Ethernet",
855 WM_T_82540, WMP_F_COPPER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
858 "Intel i82540EP 1000BASE-T Ethernet",
859 WM_T_82540, WMP_F_COPPER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
862 "Intel i82545EM 1000BASE-T Ethernet",
863 WM_T_82545, WMP_F_COPPER },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
866 "Intel i82545GM 1000BASE-T Ethernet",
867 WM_T_82545_3, WMP_F_COPPER },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
870 "Intel i82545GM 1000BASE-X Ethernet",
871 WM_T_82545_3, WMP_F_FIBER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
874 "Intel i82545GM Gigabit Ethernet (SERDES)",
875 WM_T_82545_3, WMP_F_SERDES },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
878 "Intel i82546EB 1000BASE-T Ethernet",
879 WM_T_82546, WMP_F_COPPER },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
882 "Intel i82546EB 1000BASE-T Ethernet",
883 WM_T_82546, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
886 "Intel i82545EM 1000BASE-X Ethernet",
887 WM_T_82545, WMP_F_FIBER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
890 "Intel i82546EB 1000BASE-X Ethernet",
891 WM_T_82546, WMP_F_FIBER },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
894 "Intel i82546GB 1000BASE-T Ethernet",
895 WM_T_82546_3, WMP_F_COPPER },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
898 "Intel i82546GB 1000BASE-X Ethernet",
899 WM_T_82546_3, WMP_F_FIBER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
902 "Intel i82546GB Gigabit Ethernet (SERDES)",
903 WM_T_82546_3, WMP_F_SERDES },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
906 "i82546GB quad-port Gigabit Ethernet",
907 WM_T_82546_3, WMP_F_COPPER },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
910 "i82546GB quad-port Gigabit Ethernet (KSP3)",
911 WM_T_82546_3, WMP_F_COPPER },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
914 "Intel PRO/1000MT (82546GB)",
915 WM_T_82546_3, WMP_F_COPPER },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
918 "Intel i82541EI 1000BASE-T Ethernet",
919 WM_T_82541, WMP_F_COPPER },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
922 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
923 WM_T_82541, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
926 "Intel i82541EI Mobile 1000BASE-T Ethernet",
927 WM_T_82541, WMP_F_COPPER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
930 "Intel i82541ER 1000BASE-T Ethernet",
931 WM_T_82541_2, WMP_F_COPPER },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
934 "Intel i82541GI 1000BASE-T Ethernet",
935 WM_T_82541_2, WMP_F_COPPER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
938 "Intel i82541GI Mobile 1000BASE-T Ethernet",
939 WM_T_82541_2, WMP_F_COPPER },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
942 "Intel i82541PI 1000BASE-T Ethernet",
943 WM_T_82541_2, WMP_F_COPPER },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
946 "Intel i82547EI 1000BASE-T Ethernet",
947 WM_T_82547, WMP_F_COPPER },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
950 "Intel i82547EI Mobile 1000BASE-T Ethernet",
951 WM_T_82547, WMP_F_COPPER },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
954 "Intel i82547GI 1000BASE-T Ethernet",
955 WM_T_82547_2, WMP_F_COPPER },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
958 "Intel PRO/1000 PT (82571EB)",
959 WM_T_82571, WMP_F_COPPER },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
962 "Intel PRO/1000 PF (82571EB)",
963 WM_T_82571, WMP_F_FIBER },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
966 "Intel PRO/1000 PB (82571EB)",
967 WM_T_82571, WMP_F_SERDES },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
970 "Intel PRO/1000 QT (82571EB)",
971 WM_T_82571, WMP_F_COPPER },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
974 "Intel PRO/1000 PT Quad Port Server Adapter",
975 WM_T_82571, WMP_F_COPPER, },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
978 "Intel Gigabit PT Quad Port Server ExpressModule",
979 WM_T_82571, WMP_F_COPPER, },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
982 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
983 WM_T_82571, WMP_F_SERDES, },
984
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
986 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
987 WM_T_82571, WMP_F_SERDES, },
988
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
990 "Intel 82571EB Quad 1000baseX Ethernet",
991 WM_T_82571, WMP_F_FIBER, },
992
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
994 "Intel i82572EI 1000baseT Ethernet",
995 WM_T_82572, WMP_F_COPPER },
996
997 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
998 "Intel i82572EI 1000baseX Ethernet",
999 WM_T_82572, WMP_F_FIBER },
1000
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1002 "Intel i82572EI Gigabit Ethernet (SERDES)",
1003 WM_T_82572, WMP_F_SERDES },
1004
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1006 "Intel i82572EI 1000baseT Ethernet",
1007 WM_T_82572, WMP_F_COPPER },
1008
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1010 "Intel i82573E",
1011 WM_T_82573, WMP_F_COPPER },
1012
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1014 "Intel i82573E IAMT",
1015 WM_T_82573, WMP_F_COPPER },
1016
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1018 "Intel i82573L Gigabit Ethernet",
1019 WM_T_82573, WMP_F_COPPER },
1020
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1022 "Intel i82574L",
1023 WM_T_82574, WMP_F_COPPER },
1024
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1026 "Intel i82574L",
1027 WM_T_82574, WMP_F_COPPER },
1028
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1030 "Intel i82583V",
1031 WM_T_82583, WMP_F_COPPER },
1032
1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1034 "i80003 dual 1000baseT Ethernet",
1035 WM_T_80003, WMP_F_COPPER },
1036
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1038 "i80003 dual 1000baseX Ethernet",
1039 WM_T_80003, WMP_F_COPPER },
1040
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1042 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1043 WM_T_80003, WMP_F_SERDES },
1044
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1046 "Intel i80003 1000baseT Ethernet",
1047 WM_T_80003, WMP_F_COPPER },
1048
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1050 "Intel i80003 Gigabit Ethernet (SERDES)",
1051 WM_T_80003, WMP_F_SERDES },
1052
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1054 "Intel i82801H (M_AMT) LAN Controller",
1055 WM_T_ICH8, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1057 "Intel i82801H (AMT) LAN Controller",
1058 WM_T_ICH8, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1060 "Intel i82801H LAN Controller",
1061 WM_T_ICH8, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1063 "Intel i82801H (IFE) LAN Controller",
1064 WM_T_ICH8, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1066 "Intel i82801H (M) LAN Controller",
1067 WM_T_ICH8, WMP_F_COPPER },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1069 "Intel i82801H IFE (GT) LAN Controller",
1070 WM_T_ICH8, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1072 "Intel i82801H IFE (G) LAN Controller",
1073 WM_T_ICH8, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1075 "82801I (AMT) LAN Controller",
1076 WM_T_ICH9, WMP_F_COPPER },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1078 "82801I LAN Controller",
1079 WM_T_ICH9, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1081 "82801I (G) LAN Controller",
1082 WM_T_ICH9, WMP_F_COPPER },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1084 "82801I (GT) LAN Controller",
1085 WM_T_ICH9, WMP_F_COPPER },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1087 "82801I (C) LAN Controller",
1088 WM_T_ICH9, WMP_F_COPPER },
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1090 "82801I mobile LAN Controller",
1091 WM_T_ICH9, WMP_F_COPPER },
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1093 "82801I mobile (V) LAN Controller",
1094 WM_T_ICH9, WMP_F_COPPER },
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1096 "82801I mobile (AMT) LAN Controller",
1097 WM_T_ICH9, WMP_F_COPPER },
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1099 "82567LM-4 LAN Controller",
1100 WM_T_ICH9, WMP_F_COPPER },
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1102 "82567V-3 LAN Controller",
1103 WM_T_ICH9, WMP_F_COPPER },
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1105 "82567LM-2 LAN Controller",
1106 WM_T_ICH10, WMP_F_COPPER },
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1108 "82567LF-2 LAN Controller",
1109 WM_T_ICH10, WMP_F_COPPER },
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1111 "82567LM-3 LAN Controller",
1112 WM_T_ICH10, WMP_F_COPPER },
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1114 "82567LF-3 LAN Controller",
1115 WM_T_ICH10, WMP_F_COPPER },
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1117 "82567V-2 LAN Controller",
1118 WM_T_ICH10, WMP_F_COPPER },
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1120 "82567V-3? LAN Controller",
1121 WM_T_ICH10, WMP_F_COPPER },
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1123 "HANKSVILLE LAN Controller",
1124 WM_T_ICH10, WMP_F_COPPER },
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1126 "PCH LAN (82577LM) Controller",
1127 WM_T_PCH, WMP_F_COPPER },
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1129 "PCH LAN (82577LC) Controller",
1130 WM_T_PCH, WMP_F_COPPER },
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1132 "PCH LAN (82578DM) Controller",
1133 WM_T_PCH, WMP_F_COPPER },
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1135 "PCH LAN (82578DC) Controller",
1136 WM_T_PCH, WMP_F_COPPER },
1137 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1138 "PCH2 LAN (82579LM) Controller",
1139 WM_T_PCH2, WMP_F_COPPER },
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1141 "PCH2 LAN (82579V) Controller",
1142 WM_T_PCH2, WMP_F_COPPER },
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1144 "82575EB dual-1000baseT Ethernet",
1145 WM_T_82575, WMP_F_COPPER },
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1147 "82575EB dual-1000baseX Ethernet (SERDES)",
1148 WM_T_82575, WMP_F_SERDES },
1149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1150 "82575GB quad-1000baseT Ethernet",
1151 WM_T_82575, WMP_F_COPPER },
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1153 "82575GB quad-1000baseT Ethernet (PM)",
1154 WM_T_82575, WMP_F_COPPER },
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1156 "82576 1000BaseT Ethernet",
1157 WM_T_82576, WMP_F_COPPER },
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1159 "82576 1000BaseX Ethernet",
1160 WM_T_82576, WMP_F_FIBER },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1163 "82576 gigabit Ethernet (SERDES)",
1164 WM_T_82576, WMP_F_SERDES },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1167 "82576 quad-1000BaseT Ethernet",
1168 WM_T_82576, WMP_F_COPPER },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1171 "82576 Gigabit ET2 Quad Port Server Adapter",
1172 WM_T_82576, WMP_F_COPPER },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1175 "82576 gigabit Ethernet",
1176 WM_T_82576, WMP_F_COPPER },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1179 "82576 gigabit Ethernet (SERDES)",
1180 WM_T_82576, WMP_F_SERDES },
1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1182 "82576 quad-gigabit Ethernet (SERDES)",
1183 WM_T_82576, WMP_F_SERDES },
1184
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1186 "82580 1000BaseT Ethernet",
1187 WM_T_82580, WMP_F_COPPER },
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1189 "82580 1000BaseX Ethernet",
1190 WM_T_82580, WMP_F_FIBER },
1191
1192 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1193 "82580 1000BaseT Ethernet (SERDES)",
1194 WM_T_82580, WMP_F_SERDES },
1195
1196 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1197 "82580 gigabit Ethernet (SGMII)",
1198 WM_T_82580, WMP_F_COPPER },
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1200 "82580 dual-1000BaseT Ethernet",
1201 WM_T_82580, WMP_F_COPPER },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1204 "82580 quad-1000BaseX Ethernet",
1205 WM_T_82580, WMP_F_FIBER },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1208 "DH89XXCC Gigabit Ethernet (SGMII)",
1209 WM_T_82580, WMP_F_COPPER },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1212 "DH89XXCC Gigabit Ethernet (SERDES)",
1213 WM_T_82580, WMP_F_SERDES },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1216 "DH89XXCC 1000BASE-KX Ethernet",
1217 WM_T_82580, WMP_F_SERDES },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1220 "DH89XXCC Gigabit Ethernet (SFP)",
1221 WM_T_82580, WMP_F_SERDES },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1224 "I350 Gigabit Network Connection",
1225 WM_T_I350, WMP_F_COPPER },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1228 "I350 Gigabit Fiber Network Connection",
1229 WM_T_I350, WMP_F_FIBER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1232 "I350 Gigabit Backplane Connection",
1233 WM_T_I350, WMP_F_SERDES },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1236 "I350 Quad Port Gigabit Ethernet",
1237 WM_T_I350, WMP_F_SERDES },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1240 "I350 Gigabit Connection",
1241 WM_T_I350, WMP_F_COPPER },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1244 "I354 Gigabit Ethernet (KX)",
1245 WM_T_I354, WMP_F_SERDES },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1248 "I354 Gigabit Ethernet (SGMII)",
1249 WM_T_I354, WMP_F_COPPER },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1252 "I354 Gigabit Ethernet (2.5G)",
1253 WM_T_I354, WMP_F_COPPER },
1254
1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1256 "I210-T1 Ethernet Server Adapter",
1257 WM_T_I210, WMP_F_COPPER },
1258
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1260 "I210 Ethernet (Copper OEM)",
1261 WM_T_I210, WMP_F_COPPER },
1262
1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1264 "I210 Ethernet (Copper IT)",
1265 WM_T_I210, WMP_F_COPPER },
1266
1267 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1268 "I210 Ethernet (FLASH less)",
1269 WM_T_I210, WMP_F_COPPER },
1270
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1272 "I210 Gigabit Ethernet (Fiber)",
1273 WM_T_I210, WMP_F_FIBER },
1274
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1276 "I210 Gigabit Ethernet (SERDES)",
1277 WM_T_I210, WMP_F_SERDES },
1278
1279 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1280 "I210 Gigabit Ethernet (FLASH less)",
1281 WM_T_I210, WMP_F_SERDES },
1282
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1284 "I210 Gigabit Ethernet (SGMII)",
1285 WM_T_I210, WMP_F_COPPER },
1286
1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1288 "I211 Ethernet (COPPER)",
1289 WM_T_I211, WMP_F_COPPER },
1290 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1291 "I217 V Ethernet Connection",
1292 WM_T_PCH_LPT, WMP_F_COPPER },
1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1294 "I217 LM Ethernet Connection",
1295 WM_T_PCH_LPT, WMP_F_COPPER },
1296 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1297 "I218 V Ethernet Connection",
1298 WM_T_PCH_LPT, WMP_F_COPPER },
1299 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1300 "I218 V Ethernet Connection",
1301 WM_T_PCH_LPT, WMP_F_COPPER },
1302 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1303 "I218 V Ethernet Connection",
1304 WM_T_PCH_LPT, WMP_F_COPPER },
1305 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1306 "I218 LM Ethernet Connection",
1307 WM_T_PCH_LPT, WMP_F_COPPER },
1308 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1309 "I218 LM Ethernet Connection",
1310 WM_T_PCH_LPT, WMP_F_COPPER },
1311 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1312 "I218 LM Ethernet Connection",
1313 WM_T_PCH_LPT, WMP_F_COPPER },
1314 { 0, 0,
1315 NULL,
1316 0, 0 },
1317 };
1318
1319 #ifdef WM_EVENT_COUNTERS
1320 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1321 #endif /* WM_EVENT_COUNTERS */
1322
1323
1324 /*
1325 * Register read/write functions.
1326 * Other than CSR_{READ|WRITE}().
1327 */
1328
1329 #if 0 /* Not currently used */
1330 static inline uint32_t
1331 wm_io_read(struct wm_softc *sc, int reg)
1332 {
1333
1334 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1335 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1336 }
1337 #endif
1338
1339 static inline void
1340 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1341 {
1342
1343 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1344 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1345 }
1346
1347 static inline void
1348 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1349 uint32_t data)
1350 {
1351 uint32_t regval;
1352 int i;
1353
1354 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1355
1356 CSR_WRITE(sc, reg, regval);
1357
1358 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1359 delay(5);
1360 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1361 break;
1362 }
1363 if (i == SCTL_CTL_POLL_TIMEOUT) {
1364 aprint_error("%s: WARNING:"
1365 " i82575 reg 0x%08x setup did not indicate ready\n",
1366 device_xname(sc->sc_dev), reg);
1367 }
1368 }
1369
1370 static inline void
1371 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1372 {
1373 wa->wa_low = htole32(v & 0xffffffffU);
1374 if (sizeof(bus_addr_t) == 8)
1375 wa->wa_high = htole32((uint64_t) v >> 32);
1376 else
1377 wa->wa_high = 0;
1378 }
1379
1380 /*
1381 * Device driver interface functions and commonly used functions.
1382 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1383 */
1384
1385 /* Lookup supported device table */
1386 static const struct wm_product *
1387 wm_lookup(const struct pci_attach_args *pa)
1388 {
1389 const struct wm_product *wmp;
1390
1391 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1392 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1393 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1394 return wmp;
1395 }
1396 return NULL;
1397 }
1398
1399 /* The match function (ca_match) */
1400 static int
1401 wm_match(device_t parent, cfdata_t cf, void *aux)
1402 {
1403 struct pci_attach_args *pa = aux;
1404
1405 if (wm_lookup(pa) != NULL)
1406 return 1;
1407
1408 return 0;
1409 }
1410
1411 /* The attach function (ca_attach) */
1412 static void
1413 wm_attach(device_t parent, device_t self, void *aux)
1414 {
1415 struct wm_softc *sc = device_private(self);
1416 struct pci_attach_args *pa = aux;
1417 prop_dictionary_t dict;
1418 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1419 pci_chipset_tag_t pc = pa->pa_pc;
1420 #ifndef WM_MSI_MSIX
1421 pci_intr_handle_t ih;
1422 #else
1423 int counts[PCI_INTR_TYPE_SIZE];
1424 pci_intr_type_t max_type;
1425 #endif
1426 const char *intrstr = NULL;
1427 const char *eetype, *xname;
1428 bus_space_tag_t memt;
1429 bus_space_handle_t memh;
1430 bus_size_t memsize;
1431 int memh_valid;
1432 int i, error;
1433 const struct wm_product *wmp;
1434 prop_data_t ea;
1435 prop_number_t pn;
1436 uint8_t enaddr[ETHER_ADDR_LEN];
1437 uint16_t cfg1, cfg2, swdpin, nvmword;
1438 pcireg_t preg, memtype;
1439 uint16_t eeprom_data, apme_mask;
1440 bool force_clear_smbi;
1441 uint32_t link_mode;
1442 uint32_t reg;
1443 char intrbuf[PCI_INTRSTR_LEN];
1444
1445 sc->sc_dev = self;
1446 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1447 sc->sc_stopping = false;
1448
1449 wmp = wm_lookup(pa);
1450 #ifdef DIAGNOSTIC
1451 if (wmp == NULL) {
1452 printf("\n");
1453 panic("wm_attach: impossible");
1454 }
1455 #endif
1456 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1457
1458 sc->sc_pc = pa->pa_pc;
1459 sc->sc_pcitag = pa->pa_tag;
1460
1461 if (pci_dma64_available(pa))
1462 sc->sc_dmat = pa->pa_dmat64;
1463 else
1464 sc->sc_dmat = pa->pa_dmat;
1465
1466 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1467 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1468 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1469
1470 sc->sc_type = wmp->wmp_type;
1471 if (sc->sc_type < WM_T_82543) {
1472 if (sc->sc_rev < 2) {
1473 aprint_error_dev(sc->sc_dev,
1474 "i82542 must be at least rev. 2\n");
1475 return;
1476 }
1477 if (sc->sc_rev < 3)
1478 sc->sc_type = WM_T_82542_2_0;
1479 }
1480
1481 /*
1482 * Disable MSI for Errata:
1483 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1484 *
1485 * 82544: Errata 25
1486 * 82540: Errata 6 (easy to reproduce device timeout)
1487 * 82545: Errata 4 (easy to reproduce device timeout)
1488 * 82546: Errata 26 (easy to reproduce device timeout)
1489 * 82541: Errata 7 (easy to reproduce device timeout)
1490 *
1491 * "Byte Enables 2 and 3 are not set on MSI writes"
1492 *
1493 * 82571 & 82572: Errata 63
1494 */
1495 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1496 || (sc->sc_type == WM_T_82572))
1497 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1498
1499 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1500 || (sc->sc_type == WM_T_82580)
1501 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1502 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1503 sc->sc_flags |= WM_F_NEWQUEUE;
1504
1505 /* Set device properties (mactype) */
1506 dict = device_properties(sc->sc_dev);
1507 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1508
1509 /*
1510 * Map the device. All devices support memory-mapped acccess,
1511 * and it is really required for normal operation.
1512 */
1513 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1514 switch (memtype) {
1515 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1516 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1517 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1518 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1519 break;
1520 default:
1521 memh_valid = 0;
1522 break;
1523 }
1524
1525 if (memh_valid) {
1526 sc->sc_st = memt;
1527 sc->sc_sh = memh;
1528 sc->sc_ss = memsize;
1529 } else {
1530 aprint_error_dev(sc->sc_dev,
1531 "unable to map device registers\n");
1532 return;
1533 }
1534
1535 /*
1536 * In addition, i82544 and later support I/O mapped indirect
1537 * register access. It is not desirable (nor supported in
1538 * this driver) to use it for normal operation, though it is
1539 * required to work around bugs in some chip versions.
1540 */
1541 if (sc->sc_type >= WM_T_82544) {
1542 /* First we have to find the I/O BAR. */
1543 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1544 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1545 if (memtype == PCI_MAPREG_TYPE_IO)
1546 break;
1547 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1548 PCI_MAPREG_MEM_TYPE_64BIT)
1549 i += 4; /* skip high bits, too */
1550 }
1551 if (i < PCI_MAPREG_END) {
1552 /*
1553 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1554 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1555 * It's no problem because newer chips has no this
1556 * bug.
1557 *
1558 * The i8254x doesn't apparently respond when the
1559 * I/O BAR is 0, which looks somewhat like it's not
1560 * been configured.
1561 */
1562 preg = pci_conf_read(pc, pa->pa_tag, i);
1563 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1564 aprint_error_dev(sc->sc_dev,
1565 "WARNING: I/O BAR at zero.\n");
1566 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1567 0, &sc->sc_iot, &sc->sc_ioh,
1568 NULL, &sc->sc_ios) == 0) {
1569 sc->sc_flags |= WM_F_IOH_VALID;
1570 } else {
1571 aprint_error_dev(sc->sc_dev,
1572 "WARNING: unable to map I/O space\n");
1573 }
1574 }
1575
1576 }
1577
1578 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1579 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1580 preg |= PCI_COMMAND_MASTER_ENABLE;
1581 if (sc->sc_type < WM_T_82542_2_1)
1582 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1583 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1584
1585 /* power up chip */
1586 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1587 NULL)) && error != EOPNOTSUPP) {
1588 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1589 return;
1590 }
1591
1592 #ifndef WM_MSI_MSIX
1593 /*
1594 * Map and establish our interrupt.
1595 */
1596 if (pci_intr_map(pa, &ih)) {
1597 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1598 return;
1599 }
1600 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1601 #ifdef WM_MPSAFE
1602 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1603 #endif
1604 sc->sc_ihs[0] = pci_intr_establish(pc, ih, IPL_NET, wm_intr_legacy,sc);
1605 if (sc->sc_ihs[0] == NULL) {
1606 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1607 if (intrstr != NULL)
1608 aprint_error(" at %s", intrstr);
1609 aprint_error("\n");
1610 return;
1611 }
1612 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1613 sc->sc_nintrs = 1;
1614 #else /* WM_MSI_MSIX */
1615 /* Allocation settings */
1616 max_type = PCI_INTR_TYPE_MSIX;
1617 counts[PCI_INTR_TYPE_MSIX] = WM_MAX_NINTR;
1618 counts[PCI_INTR_TYPE_MSI] = 1;
1619 counts[PCI_INTR_TYPE_INTX] = 1;
1620
1621 alloc_retry:
1622 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1623 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1624 return;
1625 }
1626
1627 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1628 void *vih;
1629 kcpuset_t *affinity;
1630
1631 kcpuset_create(&affinity, false);
1632
1633 for (i = 0; i < WM_MSIX_NINTR; i++) {
1634 intrstr = pci_intr_string(pc,
1635 sc->sc_intrs[msix_matrix[i].intridx], intrbuf,
1636 sizeof(intrbuf));
1637 #ifdef WM_MPSAFE
1638 pci_intr_setattr(pc,
1639 &sc->sc_intrs[msix_matrix[i].intridx],
1640 PCI_INTR_MPSAFE, true);
1641 #endif
1642 vih = pci_intr_establish(pc,
1643 sc->sc_intrs[msix_matrix[i].intridx], IPL_NET,
1644 msix_matrix[i].func, sc);
1645 if (vih == NULL) {
1646 aprint_error_dev(sc->sc_dev,
1647 "unable to establish MSI-X(for %s)%s%s\n",
1648 msix_matrix[i].intrname,
1649 intrstr ? " at " : "",
1650 intrstr ? intrstr : "");
1651 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1652 WM_MSIX_NINTR);
1653 kcpuset_destroy(affinity);
1654
1655 /* Setup for MSI: Disable MSI-X */
1656 max_type = PCI_INTR_TYPE_MSI;
1657 counts[PCI_INTR_TYPE_MSI] = 1;
1658 counts[PCI_INTR_TYPE_INTX] = 1;
1659 goto alloc_retry;
1660 }
1661 kcpuset_zero(affinity);
1662 /* Round-robin affinity */
1663 kcpuset_set(affinity, msix_matrix[i].cpuid % ncpu);
1664 error = pci_intr_distribute(vih, affinity, NULL);
1665 if (error == 0) {
1666 aprint_normal_dev(sc->sc_dev,
1667 "for TX interrupting at %s affinity to %u\n",
1668 intrstr, msix_matrix[i].cpuid % ncpu);
1669 } else {
1670 aprint_normal_dev(sc->sc_dev,
1671 "for TX interrupting at %s\n", intrstr);
1672 }
1673 sc->sc_ihs[msix_matrix[i].intridx] = vih;
1674 }
1675
1676 sc->sc_nintrs = WM_MSIX_NINTR;
1677 kcpuset_destroy(affinity);
1678 } else {
1679 /* MSI or INTx */
1680 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
1681 sizeof(intrbuf));
1682 #ifdef WM_MPSAFE
1683 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
1684 #endif
1685 sc->sc_ihs[0] = pci_intr_establish(pc, sc->sc_intrs[0],
1686 IPL_NET, wm_intr_legacy, sc);
1687 if (sc->sc_ihs[0] == NULL) {
1688 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
1689 (pci_intr_type(sc->sc_intrs[0])
1690 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
1691 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1);
1692 switch (pci_intr_type(sc->sc_intrs[0])) {
1693 case PCI_INTR_TYPE_MSI:
1694 /* The next try is for INTx: Disable MSI */
1695 max_type = PCI_INTR_TYPE_INTX;
1696 counts[PCI_INTR_TYPE_INTX] = 1;
1697 goto alloc_retry;
1698 case PCI_INTR_TYPE_INTX:
1699 default:
1700 return;
1701 }
1702 }
1703 aprint_normal_dev(sc->sc_dev, "%s at %s\n",
1704 (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI)
1705 ? "MSI" : "interrupting", intrstr);
1706
1707 sc->sc_nintrs = 1;
1708 }
1709 #endif /* WM_MSI_MSIX */
1710
1711 /*
1712 * Check the function ID (unit number of the chip).
1713 */
1714 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1715 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1716 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1717 || (sc->sc_type == WM_T_82580)
1718 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1719 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1720 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1721 else
1722 sc->sc_funcid = 0;
1723
1724 /*
1725 * Determine a few things about the bus we're connected to.
1726 */
1727 if (sc->sc_type < WM_T_82543) {
1728 /* We don't really know the bus characteristics here. */
1729 sc->sc_bus_speed = 33;
1730 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1731 /*
1732 * CSA (Communication Streaming Architecture) is about as fast
1733 * a 32-bit 66MHz PCI Bus.
1734 */
1735 sc->sc_flags |= WM_F_CSA;
1736 sc->sc_bus_speed = 66;
1737 aprint_verbose_dev(sc->sc_dev,
1738 "Communication Streaming Architecture\n");
1739 if (sc->sc_type == WM_T_82547) {
1740 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1741 callout_setfunc(&sc->sc_txfifo_ch,
1742 wm_82547_txfifo_stall, sc);
1743 aprint_verbose_dev(sc->sc_dev,
1744 "using 82547 Tx FIFO stall work-around\n");
1745 }
1746 } else if (sc->sc_type >= WM_T_82571) {
1747 sc->sc_flags |= WM_F_PCIE;
1748 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1749 && (sc->sc_type != WM_T_ICH10)
1750 && (sc->sc_type != WM_T_PCH)
1751 && (sc->sc_type != WM_T_PCH2)
1752 && (sc->sc_type != WM_T_PCH_LPT)) {
1753 /* ICH* and PCH* have no PCIe capability registers */
1754 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1755 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1756 NULL) == 0)
1757 aprint_error_dev(sc->sc_dev,
1758 "unable to find PCIe capability\n");
1759 }
1760 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1761 } else {
1762 reg = CSR_READ(sc, WMREG_STATUS);
1763 if (reg & STATUS_BUS64)
1764 sc->sc_flags |= WM_F_BUS64;
1765 if ((reg & STATUS_PCIX_MODE) != 0) {
1766 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1767
1768 sc->sc_flags |= WM_F_PCIX;
1769 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1770 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1771 aprint_error_dev(sc->sc_dev,
1772 "unable to find PCIX capability\n");
1773 else if (sc->sc_type != WM_T_82545_3 &&
1774 sc->sc_type != WM_T_82546_3) {
1775 /*
1776 * Work around a problem caused by the BIOS
1777 * setting the max memory read byte count
1778 * incorrectly.
1779 */
1780 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1781 sc->sc_pcixe_capoff + PCIX_CMD);
1782 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1783 sc->sc_pcixe_capoff + PCIX_STATUS);
1784
1785 bytecnt =
1786 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1787 PCIX_CMD_BYTECNT_SHIFT;
1788 maxb =
1789 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1790 PCIX_STATUS_MAXB_SHIFT;
1791 if (bytecnt > maxb) {
1792 aprint_verbose_dev(sc->sc_dev,
1793 "resetting PCI-X MMRBC: %d -> %d\n",
1794 512 << bytecnt, 512 << maxb);
1795 pcix_cmd = (pcix_cmd &
1796 ~PCIX_CMD_BYTECNT_MASK) |
1797 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1798 pci_conf_write(pa->pa_pc, pa->pa_tag,
1799 sc->sc_pcixe_capoff + PCIX_CMD,
1800 pcix_cmd);
1801 }
1802 }
1803 }
1804 /*
1805 * The quad port adapter is special; it has a PCIX-PCIX
1806 * bridge on the board, and can run the secondary bus at
1807 * a higher speed.
1808 */
1809 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1810 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1811 : 66;
1812 } else if (sc->sc_flags & WM_F_PCIX) {
1813 switch (reg & STATUS_PCIXSPD_MASK) {
1814 case STATUS_PCIXSPD_50_66:
1815 sc->sc_bus_speed = 66;
1816 break;
1817 case STATUS_PCIXSPD_66_100:
1818 sc->sc_bus_speed = 100;
1819 break;
1820 case STATUS_PCIXSPD_100_133:
1821 sc->sc_bus_speed = 133;
1822 break;
1823 default:
1824 aprint_error_dev(sc->sc_dev,
1825 "unknown PCIXSPD %d; assuming 66MHz\n",
1826 reg & STATUS_PCIXSPD_MASK);
1827 sc->sc_bus_speed = 66;
1828 break;
1829 }
1830 } else
1831 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1832 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1833 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1834 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1835 }
1836
1837 /*
1838 * Allocate the control data structures, and create and load the
1839 * DMA map for it.
1840 *
1841 * NOTE: All Tx descriptors must be in the same 4G segment of
1842 * memory. So must Rx descriptors. We simplify by allocating
1843 * both sets within the same 4G segment.
1844 */
1845 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1846 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1847 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1848 sizeof(struct wm_control_data_82542) :
1849 sizeof(struct wm_control_data_82544);
1850 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1851 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1852 &sc->sc_cd_rseg, 0)) != 0) {
1853 aprint_error_dev(sc->sc_dev,
1854 "unable to allocate control data, error = %d\n",
1855 error);
1856 goto fail_0;
1857 }
1858
1859 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1860 sc->sc_cd_rseg, sc->sc_cd_size,
1861 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1862 aprint_error_dev(sc->sc_dev,
1863 "unable to map control data, error = %d\n", error);
1864 goto fail_1;
1865 }
1866
1867 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1868 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1869 aprint_error_dev(sc->sc_dev,
1870 "unable to create control data DMA map, error = %d\n",
1871 error);
1872 goto fail_2;
1873 }
1874
1875 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1876 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1877 aprint_error_dev(sc->sc_dev,
1878 "unable to load control data DMA map, error = %d\n",
1879 error);
1880 goto fail_3;
1881 }
1882
1883 /* Create the transmit buffer DMA maps. */
1884 WM_TXQUEUELEN(sc) =
1885 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1886 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1887 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1888 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1889 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1890 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1891 aprint_error_dev(sc->sc_dev,
1892 "unable to create Tx DMA map %d, error = %d\n",
1893 i, error);
1894 goto fail_4;
1895 }
1896 }
1897
1898 /* Create the receive buffer DMA maps. */
1899 for (i = 0; i < WM_NRXDESC; i++) {
1900 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1901 MCLBYTES, 0, 0,
1902 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1903 aprint_error_dev(sc->sc_dev,
1904 "unable to create Rx DMA map %d error = %d\n",
1905 i, error);
1906 goto fail_5;
1907 }
1908 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1909 }
1910
1911 /* clear interesting stat counters */
1912 CSR_READ(sc, WMREG_COLC);
1913 CSR_READ(sc, WMREG_RXERRC);
1914
1915 /* get PHY control from SMBus to PCIe */
1916 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1917 || (sc->sc_type == WM_T_PCH_LPT))
1918 wm_smbustopci(sc);
1919
1920 /* Reset the chip to a known state. */
1921 wm_reset(sc);
1922
1923 /* Get some information about the EEPROM. */
1924 switch (sc->sc_type) {
1925 case WM_T_82542_2_0:
1926 case WM_T_82542_2_1:
1927 case WM_T_82543:
1928 case WM_T_82544:
1929 /* Microwire */
1930 sc->sc_nvm_wordsize = 64;
1931 sc->sc_nvm_addrbits = 6;
1932 break;
1933 case WM_T_82540:
1934 case WM_T_82545:
1935 case WM_T_82545_3:
1936 case WM_T_82546:
1937 case WM_T_82546_3:
1938 /* Microwire */
1939 reg = CSR_READ(sc, WMREG_EECD);
1940 if (reg & EECD_EE_SIZE) {
1941 sc->sc_nvm_wordsize = 256;
1942 sc->sc_nvm_addrbits = 8;
1943 } else {
1944 sc->sc_nvm_wordsize = 64;
1945 sc->sc_nvm_addrbits = 6;
1946 }
1947 sc->sc_flags |= WM_F_LOCK_EECD;
1948 break;
1949 case WM_T_82541:
1950 case WM_T_82541_2:
1951 case WM_T_82547:
1952 case WM_T_82547_2:
1953 sc->sc_flags |= WM_F_LOCK_EECD;
1954 reg = CSR_READ(sc, WMREG_EECD);
1955 if (reg & EECD_EE_TYPE) {
1956 /* SPI */
1957 sc->sc_flags |= WM_F_EEPROM_SPI;
1958 wm_nvm_set_addrbits_size_eecd(sc);
1959 } else {
1960 /* Microwire */
1961 if ((reg & EECD_EE_ABITS) != 0) {
1962 sc->sc_nvm_wordsize = 256;
1963 sc->sc_nvm_addrbits = 8;
1964 } else {
1965 sc->sc_nvm_wordsize = 64;
1966 sc->sc_nvm_addrbits = 6;
1967 }
1968 }
1969 break;
1970 case WM_T_82571:
1971 case WM_T_82572:
1972 /* SPI */
1973 sc->sc_flags |= WM_F_EEPROM_SPI;
1974 wm_nvm_set_addrbits_size_eecd(sc);
1975 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1976 break;
1977 case WM_T_82573:
1978 sc->sc_flags |= WM_F_LOCK_SWSM;
1979 /* FALLTHROUGH */
1980 case WM_T_82574:
1981 case WM_T_82583:
1982 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1983 sc->sc_flags |= WM_F_EEPROM_FLASH;
1984 sc->sc_nvm_wordsize = 2048;
1985 } else {
1986 /* SPI */
1987 sc->sc_flags |= WM_F_EEPROM_SPI;
1988 wm_nvm_set_addrbits_size_eecd(sc);
1989 }
1990 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1991 break;
1992 case WM_T_82575:
1993 case WM_T_82576:
1994 case WM_T_82580:
1995 case WM_T_I350:
1996 case WM_T_I354:
1997 case WM_T_80003:
1998 /* SPI */
1999 sc->sc_flags |= WM_F_EEPROM_SPI;
2000 wm_nvm_set_addrbits_size_eecd(sc);
2001 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2002 | WM_F_LOCK_SWSM;
2003 break;
2004 case WM_T_ICH8:
2005 case WM_T_ICH9:
2006 case WM_T_ICH10:
2007 case WM_T_PCH:
2008 case WM_T_PCH2:
2009 case WM_T_PCH_LPT:
2010 /* FLASH */
2011 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2012 sc->sc_nvm_wordsize = 2048;
2013 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
2014 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2015 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2016 aprint_error_dev(sc->sc_dev,
2017 "can't map FLASH registers\n");
2018 goto fail_5;
2019 }
2020 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2021 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2022 ICH_FLASH_SECTOR_SIZE;
2023 sc->sc_ich8_flash_bank_size =
2024 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2025 sc->sc_ich8_flash_bank_size -=
2026 (reg & ICH_GFPREG_BASE_MASK);
2027 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2028 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2029 break;
2030 case WM_T_I210:
2031 case WM_T_I211:
2032 if (wm_nvm_get_flash_presence_i210(sc)) {
2033 wm_nvm_set_addrbits_size_eecd(sc);
2034 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2035 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2036 } else {
2037 sc->sc_nvm_wordsize = INVM_SIZE;
2038 sc->sc_flags |= WM_F_EEPROM_INVM;
2039 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2040 }
2041 break;
2042 default:
2043 break;
2044 }
2045
2046 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2047 switch (sc->sc_type) {
2048 case WM_T_82571:
2049 case WM_T_82572:
2050 reg = CSR_READ(sc, WMREG_SWSM2);
2051 if ((reg & SWSM2_LOCK) == 0) {
2052 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2053 force_clear_smbi = true;
2054 } else
2055 force_clear_smbi = false;
2056 break;
2057 case WM_T_82573:
2058 case WM_T_82574:
2059 case WM_T_82583:
2060 force_clear_smbi = true;
2061 break;
2062 default:
2063 force_clear_smbi = false;
2064 break;
2065 }
2066 if (force_clear_smbi) {
2067 reg = CSR_READ(sc, WMREG_SWSM);
2068 if ((reg & SWSM_SMBI) != 0)
2069 aprint_error_dev(sc->sc_dev,
2070 "Please update the Bootagent\n");
2071 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2072 }
2073
2074 /*
2075 * Defer printing the EEPROM type until after verifying the checksum
2076 * This allows the EEPROM type to be printed correctly in the case
2077 * that no EEPROM is attached.
2078 */
2079 /*
2080 * Validate the EEPROM checksum. If the checksum fails, flag
2081 * this for later, so we can fail future reads from the EEPROM.
2082 */
2083 if (wm_nvm_validate_checksum(sc)) {
2084 /*
2085 * Read twice again because some PCI-e parts fail the
2086 * first check due to the link being in sleep state.
2087 */
2088 if (wm_nvm_validate_checksum(sc))
2089 sc->sc_flags |= WM_F_EEPROM_INVALID;
2090 }
2091
2092 /* Set device properties (macflags) */
2093 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2094
2095 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2096 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2097 else {
2098 aprint_verbose_dev(sc->sc_dev, "%u words ",
2099 sc->sc_nvm_wordsize);
2100 if (sc->sc_flags & WM_F_EEPROM_INVM)
2101 aprint_verbose("iNVM");
2102 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2103 aprint_verbose("FLASH(HW)");
2104 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2105 aprint_verbose("FLASH");
2106 else {
2107 if (sc->sc_flags & WM_F_EEPROM_SPI)
2108 eetype = "SPI";
2109 else
2110 eetype = "MicroWire";
2111 aprint_verbose("(%d address bits) %s EEPROM",
2112 sc->sc_nvm_addrbits, eetype);
2113 }
2114 }
2115 wm_nvm_version(sc);
2116 aprint_verbose("\n");
2117
2118 /* Check for I21[01] PLL workaround */
2119 if (sc->sc_type == WM_T_I210)
2120 sc->sc_flags |= WM_F_PLL_WA_I210;
2121 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2122 /* NVM image release 3.25 has a workaround */
2123 if ((sc->sc_nvm_ver_major > 3)
2124 || ((sc->sc_nvm_ver_major == 3)
2125 && (sc->sc_nvm_ver_minor >= 25)))
2126 return;
2127 else {
2128 aprint_verbose_dev(sc->sc_dev,
2129 "ROM image version %d.%d is older than 3.25\n",
2130 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2131 sc->sc_flags |= WM_F_PLL_WA_I210;
2132 }
2133 }
2134 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2135 wm_pll_workaround_i210(sc);
2136
2137 switch (sc->sc_type) {
2138 case WM_T_82571:
2139 case WM_T_82572:
2140 case WM_T_82573:
2141 case WM_T_82574:
2142 case WM_T_82583:
2143 case WM_T_80003:
2144 case WM_T_ICH8:
2145 case WM_T_ICH9:
2146 case WM_T_ICH10:
2147 case WM_T_PCH:
2148 case WM_T_PCH2:
2149 case WM_T_PCH_LPT:
2150 if (wm_check_mng_mode(sc) != 0)
2151 wm_get_hw_control(sc);
2152 break;
2153 default:
2154 break;
2155 }
2156 wm_get_wakeup(sc);
2157 /*
2158 * Read the Ethernet address from the EEPROM, if not first found
2159 * in device properties.
2160 */
2161 ea = prop_dictionary_get(dict, "mac-address");
2162 if (ea != NULL) {
2163 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2164 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2165 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2166 } else {
2167 if (wm_read_mac_addr(sc, enaddr) != 0) {
2168 aprint_error_dev(sc->sc_dev,
2169 "unable to read Ethernet address\n");
2170 goto fail_5;
2171 }
2172 }
2173
2174 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2175 ether_sprintf(enaddr));
2176
2177 /*
2178 * Read the config info from the EEPROM, and set up various
2179 * bits in the control registers based on their contents.
2180 */
2181 pn = prop_dictionary_get(dict, "i82543-cfg1");
2182 if (pn != NULL) {
2183 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2184 cfg1 = (uint16_t) prop_number_integer_value(pn);
2185 } else {
2186 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2187 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2188 goto fail_5;
2189 }
2190 }
2191
2192 pn = prop_dictionary_get(dict, "i82543-cfg2");
2193 if (pn != NULL) {
2194 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2195 cfg2 = (uint16_t) prop_number_integer_value(pn);
2196 } else {
2197 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2198 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2199 goto fail_5;
2200 }
2201 }
2202
2203 /* check for WM_F_WOL */
2204 switch (sc->sc_type) {
2205 case WM_T_82542_2_0:
2206 case WM_T_82542_2_1:
2207 case WM_T_82543:
2208 /* dummy? */
2209 eeprom_data = 0;
2210 apme_mask = NVM_CFG3_APME;
2211 break;
2212 case WM_T_82544:
2213 apme_mask = NVM_CFG2_82544_APM_EN;
2214 eeprom_data = cfg2;
2215 break;
2216 case WM_T_82546:
2217 case WM_T_82546_3:
2218 case WM_T_82571:
2219 case WM_T_82572:
2220 case WM_T_82573:
2221 case WM_T_82574:
2222 case WM_T_82583:
2223 case WM_T_80003:
2224 default:
2225 apme_mask = NVM_CFG3_APME;
2226 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2227 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2228 break;
2229 case WM_T_82575:
2230 case WM_T_82576:
2231 case WM_T_82580:
2232 case WM_T_I350:
2233 case WM_T_I354: /* XXX ok? */
2234 case WM_T_ICH8:
2235 case WM_T_ICH9:
2236 case WM_T_ICH10:
2237 case WM_T_PCH:
2238 case WM_T_PCH2:
2239 case WM_T_PCH_LPT:
2240 /* XXX The funcid should be checked on some devices */
2241 apme_mask = WUC_APME;
2242 eeprom_data = CSR_READ(sc, WMREG_WUC);
2243 break;
2244 }
2245
2246 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2247 if ((eeprom_data & apme_mask) != 0)
2248 sc->sc_flags |= WM_F_WOL;
2249 #ifdef WM_DEBUG
2250 if ((sc->sc_flags & WM_F_WOL) != 0)
2251 printf("WOL\n");
2252 #endif
2253
2254 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2255 /* Check NVM for autonegotiation */
2256 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2257 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2258 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2259 }
2260 }
2261
2262 /*
2263 * XXX need special handling for some multiple port cards
2264 * to disable a paticular port.
2265 */
2266
2267 if (sc->sc_type >= WM_T_82544) {
2268 pn = prop_dictionary_get(dict, "i82543-swdpin");
2269 if (pn != NULL) {
2270 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2271 swdpin = (uint16_t) prop_number_integer_value(pn);
2272 } else {
2273 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2274 aprint_error_dev(sc->sc_dev,
2275 "unable to read SWDPIN\n");
2276 goto fail_5;
2277 }
2278 }
2279 }
2280
2281 if (cfg1 & NVM_CFG1_ILOS)
2282 sc->sc_ctrl |= CTRL_ILOS;
2283
2284 /*
2285 * XXX
2286 * This code isn't correct because pin 2 and 3 are located
2287 * in different position on newer chips. Check all datasheet.
2288 *
2289 * Until resolve this problem, check if a chip < 82580
2290 */
2291 if (sc->sc_type <= WM_T_82580) {
2292 if (sc->sc_type >= WM_T_82544) {
2293 sc->sc_ctrl |=
2294 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2295 CTRL_SWDPIO_SHIFT;
2296 sc->sc_ctrl |=
2297 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2298 CTRL_SWDPINS_SHIFT;
2299 } else {
2300 sc->sc_ctrl |=
2301 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2302 CTRL_SWDPIO_SHIFT;
2303 }
2304 }
2305
2306 /* XXX For other than 82580? */
2307 if (sc->sc_type == WM_T_82580) {
2308 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2309 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2310 if (nvmword & __BIT(13)) {
2311 printf("SET ILOS\n");
2312 sc->sc_ctrl |= CTRL_ILOS;
2313 }
2314 }
2315
2316 #if 0
2317 if (sc->sc_type >= WM_T_82544) {
2318 if (cfg1 & NVM_CFG1_IPS0)
2319 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2320 if (cfg1 & NVM_CFG1_IPS1)
2321 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2322 sc->sc_ctrl_ext |=
2323 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2324 CTRL_EXT_SWDPIO_SHIFT;
2325 sc->sc_ctrl_ext |=
2326 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2327 CTRL_EXT_SWDPINS_SHIFT;
2328 } else {
2329 sc->sc_ctrl_ext |=
2330 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2331 CTRL_EXT_SWDPIO_SHIFT;
2332 }
2333 #endif
2334
2335 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2336 #if 0
2337 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2338 #endif
2339
2340 /*
2341 * Set up some register offsets that are different between
2342 * the i82542 and the i82543 and later chips.
2343 */
2344 if (sc->sc_type < WM_T_82543) {
2345 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2346 sc->sc_tdt_reg = WMREG_OLD_TDT;
2347 } else {
2348 sc->sc_rdt_reg = WMREG_RDT;
2349 sc->sc_tdt_reg = WMREG_TDT;
2350 }
2351
2352 if (sc->sc_type == WM_T_PCH) {
2353 uint16_t val;
2354
2355 /* Save the NVM K1 bit setting */
2356 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2357
2358 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2359 sc->sc_nvm_k1_enabled = 1;
2360 else
2361 sc->sc_nvm_k1_enabled = 0;
2362 }
2363
2364 /*
2365 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2366 * media structures accordingly.
2367 */
2368 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2369 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2370 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2371 || sc->sc_type == WM_T_82573
2372 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2373 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2374 wm_gmii_mediainit(sc, wmp->wmp_product);
2375 } else if (sc->sc_type < WM_T_82543 ||
2376 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2377 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2378 aprint_error_dev(sc->sc_dev,
2379 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2380 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2381 }
2382 wm_tbi_mediainit(sc);
2383 } else {
2384 switch (sc->sc_type) {
2385 case WM_T_82575:
2386 case WM_T_82576:
2387 case WM_T_82580:
2388 case WM_T_I350:
2389 case WM_T_I354:
2390 case WM_T_I210:
2391 case WM_T_I211:
2392 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2393 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2394 switch (link_mode) {
2395 case CTRL_EXT_LINK_MODE_1000KX:
2396 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2397 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2398 break;
2399 case CTRL_EXT_LINK_MODE_SGMII:
2400 if (wm_sgmii_uses_mdio(sc)) {
2401 aprint_verbose_dev(sc->sc_dev,
2402 "SGMII(MDIO)\n");
2403 sc->sc_flags |= WM_F_SGMII;
2404 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2405 break;
2406 }
2407 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2408 /*FALLTHROUGH*/
2409 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2410 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2411 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2412 if (link_mode
2413 == CTRL_EXT_LINK_MODE_SGMII) {
2414 sc->sc_mediatype
2415 = WM_MEDIATYPE_COPPER;
2416 sc->sc_flags |= WM_F_SGMII;
2417 } else {
2418 sc->sc_mediatype
2419 = WM_MEDIATYPE_SERDES;
2420 aprint_verbose_dev(sc->sc_dev,
2421 "SERDES\n");
2422 }
2423 break;
2424 }
2425 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2426 aprint_verbose_dev(sc->sc_dev,
2427 "SERDES\n");
2428
2429 /* Change current link mode setting */
2430 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2431 switch (sc->sc_mediatype) {
2432 case WM_MEDIATYPE_COPPER:
2433 reg |= CTRL_EXT_LINK_MODE_SGMII;
2434 break;
2435 case WM_MEDIATYPE_SERDES:
2436 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2437 break;
2438 default:
2439 break;
2440 }
2441 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2442 break;
2443 case CTRL_EXT_LINK_MODE_GMII:
2444 default:
2445 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2446 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2447 break;
2448 }
2449
2450 reg &= ~CTRL_EXT_I2C_ENA;
2451 if ((sc->sc_flags & WM_F_SGMII) != 0)
2452 reg |= CTRL_EXT_I2C_ENA;
2453 else
2454 reg &= ~CTRL_EXT_I2C_ENA;
2455 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2456
2457 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2458 wm_gmii_mediainit(sc, wmp->wmp_product);
2459 else
2460 wm_tbi_mediainit(sc);
2461 break;
2462 default:
2463 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2464 aprint_error_dev(sc->sc_dev,
2465 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2466 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2467 wm_gmii_mediainit(sc, wmp->wmp_product);
2468 }
2469 }
2470
2471 ifp = &sc->sc_ethercom.ec_if;
2472 xname = device_xname(sc->sc_dev);
2473 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2474 ifp->if_softc = sc;
2475 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2476 ifp->if_ioctl = wm_ioctl;
2477 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2478 ifp->if_start = wm_nq_start;
2479 else
2480 ifp->if_start = wm_start;
2481 ifp->if_watchdog = wm_watchdog;
2482 ifp->if_init = wm_init;
2483 ifp->if_stop = wm_stop;
2484 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2485 IFQ_SET_READY(&ifp->if_snd);
2486
2487 /* Check for jumbo frame */
2488 switch (sc->sc_type) {
2489 case WM_T_82573:
2490 /* XXX limited to 9234 if ASPM is disabled */
2491 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2492 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2493 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2494 break;
2495 case WM_T_82571:
2496 case WM_T_82572:
2497 case WM_T_82574:
2498 case WM_T_82575:
2499 case WM_T_82576:
2500 case WM_T_82580:
2501 case WM_T_I350:
2502 case WM_T_I354: /* XXXX ok? */
2503 case WM_T_I210:
2504 case WM_T_I211:
2505 case WM_T_80003:
2506 case WM_T_ICH9:
2507 case WM_T_ICH10:
2508 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2509 case WM_T_PCH_LPT:
2510 /* XXX limited to 9234 */
2511 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2512 break;
2513 case WM_T_PCH:
2514 /* XXX limited to 4096 */
2515 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2516 break;
2517 case WM_T_82542_2_0:
2518 case WM_T_82542_2_1:
2519 case WM_T_82583:
2520 case WM_T_ICH8:
2521 /* No support for jumbo frame */
2522 break;
2523 default:
2524 /* ETHER_MAX_LEN_JUMBO */
2525 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2526 break;
2527 }
2528
2529 /* If we're a i82543 or greater, we can support VLANs. */
2530 if (sc->sc_type >= WM_T_82543)
2531 sc->sc_ethercom.ec_capabilities |=
2532 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2533
2534 /*
2535 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2536 * on i82543 and later.
2537 */
2538 if (sc->sc_type >= WM_T_82543) {
2539 ifp->if_capabilities |=
2540 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2541 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2542 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2543 IFCAP_CSUM_TCPv6_Tx |
2544 IFCAP_CSUM_UDPv6_Tx;
2545 }
2546
2547 /*
2548 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2549 *
2550 * 82541GI (8086:1076) ... no
2551 * 82572EI (8086:10b9) ... yes
2552 */
2553 if (sc->sc_type >= WM_T_82571) {
2554 ifp->if_capabilities |=
2555 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2556 }
2557
2558 /*
2559 * If we're a i82544 or greater (except i82547), we can do
2560 * TCP segmentation offload.
2561 */
2562 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2563 ifp->if_capabilities |= IFCAP_TSOv4;
2564 }
2565
2566 if (sc->sc_type >= WM_T_82571) {
2567 ifp->if_capabilities |= IFCAP_TSOv6;
2568 }
2569
2570 #ifdef WM_MPSAFE
2571 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2572 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2573 #else
2574 sc->sc_tx_lock = NULL;
2575 sc->sc_rx_lock = NULL;
2576 #endif
2577
2578 /* Attach the interface. */
2579 if_attach(ifp);
2580 ether_ifattach(ifp, enaddr);
2581 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2582 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2583 RND_FLAG_DEFAULT);
2584
2585 #ifdef WM_EVENT_COUNTERS
2586 /* Attach event counters. */
2587 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2588 NULL, xname, "txsstall");
2589 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2590 NULL, xname, "txdstall");
2591 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2592 NULL, xname, "txfifo_stall");
2593 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2594 NULL, xname, "txdw");
2595 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2596 NULL, xname, "txqe");
2597 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2598 NULL, xname, "rxintr");
2599 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2600 NULL, xname, "linkintr");
2601
2602 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2603 NULL, xname, "rxipsum");
2604 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2605 NULL, xname, "rxtusum");
2606 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2607 NULL, xname, "txipsum");
2608 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2609 NULL, xname, "txtusum");
2610 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2611 NULL, xname, "txtusum6");
2612
2613 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2614 NULL, xname, "txtso");
2615 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2616 NULL, xname, "txtso6");
2617 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2618 NULL, xname, "txtsopain");
2619
2620 for (i = 0; i < WM_NTXSEGS; i++) {
2621 snprintf(wm_txseg_evcnt_names[i],
2622 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2623 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2624 NULL, xname, wm_txseg_evcnt_names[i]);
2625 }
2626
2627 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2628 NULL, xname, "txdrop");
2629
2630 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2631 NULL, xname, "tu");
2632
2633 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2634 NULL, xname, "tx_xoff");
2635 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2636 NULL, xname, "tx_xon");
2637 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2638 NULL, xname, "rx_xoff");
2639 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2640 NULL, xname, "rx_xon");
2641 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2642 NULL, xname, "rx_macctl");
2643 #endif /* WM_EVENT_COUNTERS */
2644
2645 if (pmf_device_register(self, wm_suspend, wm_resume))
2646 pmf_class_network_register(self, ifp);
2647 else
2648 aprint_error_dev(self, "couldn't establish power handler\n");
2649
2650 sc->sc_flags |= WM_F_ATTACHED;
2651 return;
2652
2653 /*
2654 * Free any resources we've allocated during the failed attach
2655 * attempt. Do this in reverse order and fall through.
2656 */
2657 fail_5:
2658 for (i = 0; i < WM_NRXDESC; i++) {
2659 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2660 bus_dmamap_destroy(sc->sc_dmat,
2661 sc->sc_rxsoft[i].rxs_dmamap);
2662 }
2663 fail_4:
2664 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2665 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2666 bus_dmamap_destroy(sc->sc_dmat,
2667 sc->sc_txsoft[i].txs_dmamap);
2668 }
2669 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2670 fail_3:
2671 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2672 fail_2:
2673 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2674 sc->sc_cd_size);
2675 fail_1:
2676 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2677 fail_0:
2678 return;
2679 }
2680
2681 /* The detach function (ca_detach) */
2682 static int
2683 wm_detach(device_t self, int flags __unused)
2684 {
2685 struct wm_softc *sc = device_private(self);
2686 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2687 int i;
2688 #ifndef WM_MPSAFE
2689 int s;
2690 #endif
2691
2692 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2693 return 0;
2694
2695 #ifndef WM_MPSAFE
2696 s = splnet();
2697 #endif
2698 /* Stop the interface. Callouts are stopped in it. */
2699 wm_stop(ifp, 1);
2700
2701 #ifndef WM_MPSAFE
2702 splx(s);
2703 #endif
2704
2705 pmf_device_deregister(self);
2706
2707 /* Tell the firmware about the release */
2708 WM_BOTH_LOCK(sc);
2709 wm_release_manageability(sc);
2710 wm_release_hw_control(sc);
2711 WM_BOTH_UNLOCK(sc);
2712
2713 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2714
2715 /* Delete all remaining media. */
2716 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2717
2718 ether_ifdetach(ifp);
2719 if_detach(ifp);
2720
2721
2722 /* Unload RX dmamaps and free mbufs */
2723 WM_RX_LOCK(sc);
2724 wm_rxdrain(sc);
2725 WM_RX_UNLOCK(sc);
2726 /* Must unlock here */
2727
2728 /* Free dmamap. It's the same as the end of the wm_attach() function */
2729 for (i = 0; i < WM_NRXDESC; i++) {
2730 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2731 bus_dmamap_destroy(sc->sc_dmat,
2732 sc->sc_rxsoft[i].rxs_dmamap);
2733 }
2734 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2735 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2736 bus_dmamap_destroy(sc->sc_dmat,
2737 sc->sc_txsoft[i].txs_dmamap);
2738 }
2739 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2740 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2741 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2742 sc->sc_cd_size);
2743 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2744
2745 /* Disestablish the interrupt handler */
2746 for (i = 0; i < sc->sc_nintrs; i++) {
2747 if (sc->sc_ihs[i] != NULL) {
2748 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2749 sc->sc_ihs[i] = NULL;
2750 }
2751 }
2752 #ifdef WM_MSI_MSIX
2753 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2754 #endif /* WM_MSI_MSIX */
2755
2756 /* Unmap the registers */
2757 if (sc->sc_ss) {
2758 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2759 sc->sc_ss = 0;
2760 }
2761 if (sc->sc_ios) {
2762 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2763 sc->sc_ios = 0;
2764 }
2765 if (sc->sc_flashs) {
2766 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2767 sc->sc_flashs = 0;
2768 }
2769
2770 if (sc->sc_tx_lock)
2771 mutex_obj_free(sc->sc_tx_lock);
2772 if (sc->sc_rx_lock)
2773 mutex_obj_free(sc->sc_rx_lock);
2774
2775 return 0;
2776 }
2777
2778 static bool
2779 wm_suspend(device_t self, const pmf_qual_t *qual)
2780 {
2781 struct wm_softc *sc = device_private(self);
2782
2783 wm_release_manageability(sc);
2784 wm_release_hw_control(sc);
2785 #ifdef WM_WOL
2786 wm_enable_wakeup(sc);
2787 #endif
2788
2789 return true;
2790 }
2791
2792 static bool
2793 wm_resume(device_t self, const pmf_qual_t *qual)
2794 {
2795 struct wm_softc *sc = device_private(self);
2796
2797 wm_init_manageability(sc);
2798
2799 return true;
2800 }
2801
2802 /*
2803 * wm_watchdog: [ifnet interface function]
2804 *
2805 * Watchdog timer handler.
2806 */
2807 static void
2808 wm_watchdog(struct ifnet *ifp)
2809 {
2810 struct wm_softc *sc = ifp->if_softc;
2811
2812 /*
2813 * Since we're using delayed interrupts, sweep up
2814 * before we report an error.
2815 */
2816 WM_TX_LOCK(sc);
2817 wm_txeof(sc);
2818 WM_TX_UNLOCK(sc);
2819
2820 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2821 #ifdef WM_DEBUG
2822 int i, j;
2823 struct wm_txsoft *txs;
2824 #endif
2825 log(LOG_ERR,
2826 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2827 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2828 sc->sc_txnext);
2829 ifp->if_oerrors++;
2830 #ifdef WM_DEBUG
2831 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2832 i = WM_NEXTTXS(sc, i)) {
2833 txs = &sc->sc_txsoft[i];
2834 printf("txs %d tx %d -> %d\n",
2835 i, txs->txs_firstdesc, txs->txs_lastdesc);
2836 for (j = txs->txs_firstdesc; ;
2837 j = WM_NEXTTX(sc, j)) {
2838 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2839 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2840 printf("\t %#08x%08x\n",
2841 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2842 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2843 if (j == txs->txs_lastdesc)
2844 break;
2845 }
2846 }
2847 #endif
2848 /* Reset the interface. */
2849 (void) wm_init(ifp);
2850 }
2851
2852 /* Try to get more packets going. */
2853 ifp->if_start(ifp);
2854 }
2855
2856 /*
2857 * wm_tick:
2858 *
2859 * One second timer, used to check link status, sweep up
2860 * completed transmit jobs, etc.
2861 */
2862 static void
2863 wm_tick(void *arg)
2864 {
2865 struct wm_softc *sc = arg;
2866 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2867 #ifndef WM_MPSAFE
2868 int s;
2869
2870 s = splnet();
2871 #endif
2872
2873 WM_TX_LOCK(sc);
2874
2875 if (sc->sc_stopping)
2876 goto out;
2877
2878 if (sc->sc_type >= WM_T_82542_2_1) {
2879 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2880 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2881 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2882 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2883 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2884 }
2885
2886 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2887 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2888 + CSR_READ(sc, WMREG_CRCERRS)
2889 + CSR_READ(sc, WMREG_ALGNERRC)
2890 + CSR_READ(sc, WMREG_SYMERRC)
2891 + CSR_READ(sc, WMREG_RXERRC)
2892 + CSR_READ(sc, WMREG_SEC)
2893 + CSR_READ(sc, WMREG_CEXTERR)
2894 + CSR_READ(sc, WMREG_RLEC);
2895 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2896
2897 if (sc->sc_flags & WM_F_HAS_MII)
2898 mii_tick(&sc->sc_mii);
2899 else if ((sc->sc_type >= WM_T_82575)
2900 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2901 wm_serdes_tick(sc);
2902 else
2903 wm_tbi_tick(sc);
2904
2905 out:
2906 WM_TX_UNLOCK(sc);
2907 #ifndef WM_MPSAFE
2908 splx(s);
2909 #endif
2910
2911 if (!sc->sc_stopping)
2912 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2913 }
2914
2915 static int
2916 wm_ifflags_cb(struct ethercom *ec)
2917 {
2918 struct ifnet *ifp = &ec->ec_if;
2919 struct wm_softc *sc = ifp->if_softc;
2920 int change = ifp->if_flags ^ sc->sc_if_flags;
2921 int rc = 0;
2922
2923 WM_BOTH_LOCK(sc);
2924
2925 if (change != 0)
2926 sc->sc_if_flags = ifp->if_flags;
2927
2928 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2929 rc = ENETRESET;
2930 goto out;
2931 }
2932
2933 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2934 wm_set_filter(sc);
2935
2936 wm_set_vlan(sc);
2937
2938 out:
2939 WM_BOTH_UNLOCK(sc);
2940
2941 return rc;
2942 }
2943
2944 /*
2945 * wm_ioctl: [ifnet interface function]
2946 *
2947 * Handle control requests from the operator.
2948 */
2949 static int
2950 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2951 {
2952 struct wm_softc *sc = ifp->if_softc;
2953 struct ifreq *ifr = (struct ifreq *) data;
2954 struct ifaddr *ifa = (struct ifaddr *)data;
2955 struct sockaddr_dl *sdl;
2956 int s, error;
2957
2958 #ifndef WM_MPSAFE
2959 s = splnet();
2960 #endif
2961 switch (cmd) {
2962 case SIOCSIFMEDIA:
2963 case SIOCGIFMEDIA:
2964 WM_BOTH_LOCK(sc);
2965 /* Flow control requires full-duplex mode. */
2966 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2967 (ifr->ifr_media & IFM_FDX) == 0)
2968 ifr->ifr_media &= ~IFM_ETH_FMASK;
2969 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2970 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2971 /* We can do both TXPAUSE and RXPAUSE. */
2972 ifr->ifr_media |=
2973 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2974 }
2975 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2976 }
2977 WM_BOTH_UNLOCK(sc);
2978 #ifdef WM_MPSAFE
2979 s = splnet();
2980 #endif
2981 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2982 #ifdef WM_MPSAFE
2983 splx(s);
2984 #endif
2985 break;
2986 case SIOCINITIFADDR:
2987 WM_BOTH_LOCK(sc);
2988 if (ifa->ifa_addr->sa_family == AF_LINK) {
2989 sdl = satosdl(ifp->if_dl->ifa_addr);
2990 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2991 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2992 /* unicast address is first multicast entry */
2993 wm_set_filter(sc);
2994 error = 0;
2995 WM_BOTH_UNLOCK(sc);
2996 break;
2997 }
2998 WM_BOTH_UNLOCK(sc);
2999 /*FALLTHROUGH*/
3000 default:
3001 #ifdef WM_MPSAFE
3002 s = splnet();
3003 #endif
3004 /* It may call wm_start, so unlock here */
3005 error = ether_ioctl(ifp, cmd, data);
3006 #ifdef WM_MPSAFE
3007 splx(s);
3008 #endif
3009 if (error != ENETRESET)
3010 break;
3011
3012 error = 0;
3013
3014 if (cmd == SIOCSIFCAP) {
3015 error = (*ifp->if_init)(ifp);
3016 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3017 ;
3018 else if (ifp->if_flags & IFF_RUNNING) {
3019 /*
3020 * Multicast list has changed; set the hardware filter
3021 * accordingly.
3022 */
3023 WM_BOTH_LOCK(sc);
3024 wm_set_filter(sc);
3025 WM_BOTH_UNLOCK(sc);
3026 }
3027 break;
3028 }
3029
3030 #ifndef WM_MPSAFE
3031 splx(s);
3032 #endif
3033 return error;
3034 }
3035
3036 /* MAC address related */
3037
3038 /*
3039 * Get the offset of MAC address and return it.
3040 * If error occured, use offset 0.
3041 */
3042 static uint16_t
3043 wm_check_alt_mac_addr(struct wm_softc *sc)
3044 {
3045 uint16_t myea[ETHER_ADDR_LEN / 2];
3046 uint16_t offset = NVM_OFF_MACADDR;
3047
3048 /* Try to read alternative MAC address pointer */
3049 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3050 return 0;
3051
3052 /* Check pointer if it's valid or not. */
3053 if ((offset == 0x0000) || (offset == 0xffff))
3054 return 0;
3055
3056 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3057 /*
3058 * Check whether alternative MAC address is valid or not.
3059 * Some cards have non 0xffff pointer but those don't use
3060 * alternative MAC address in reality.
3061 *
3062 * Check whether the broadcast bit is set or not.
3063 */
3064 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3065 if (((myea[0] & 0xff) & 0x01) == 0)
3066 return offset; /* Found */
3067
3068 /* Not found */
3069 return 0;
3070 }
3071
3072 static int
3073 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3074 {
3075 uint16_t myea[ETHER_ADDR_LEN / 2];
3076 uint16_t offset = NVM_OFF_MACADDR;
3077 int do_invert = 0;
3078
3079 switch (sc->sc_type) {
3080 case WM_T_82580:
3081 case WM_T_I350:
3082 case WM_T_I354:
3083 /* EEPROM Top Level Partitioning */
3084 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3085 break;
3086 case WM_T_82571:
3087 case WM_T_82575:
3088 case WM_T_82576:
3089 case WM_T_80003:
3090 case WM_T_I210:
3091 case WM_T_I211:
3092 offset = wm_check_alt_mac_addr(sc);
3093 if (offset == 0)
3094 if ((sc->sc_funcid & 0x01) == 1)
3095 do_invert = 1;
3096 break;
3097 default:
3098 if ((sc->sc_funcid & 0x01) == 1)
3099 do_invert = 1;
3100 break;
3101 }
3102
3103 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3104 myea) != 0)
3105 goto bad;
3106
3107 enaddr[0] = myea[0] & 0xff;
3108 enaddr[1] = myea[0] >> 8;
3109 enaddr[2] = myea[1] & 0xff;
3110 enaddr[3] = myea[1] >> 8;
3111 enaddr[4] = myea[2] & 0xff;
3112 enaddr[5] = myea[2] >> 8;
3113
3114 /*
3115 * Toggle the LSB of the MAC address on the second port
3116 * of some dual port cards.
3117 */
3118 if (do_invert != 0)
3119 enaddr[5] ^= 1;
3120
3121 return 0;
3122
3123 bad:
3124 return -1;
3125 }
3126
3127 /*
3128 * wm_set_ral:
3129 *
3130 * Set an entery in the receive address list.
3131 */
3132 static void
3133 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3134 {
3135 uint32_t ral_lo, ral_hi;
3136
3137 if (enaddr != NULL) {
3138 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3139 (enaddr[3] << 24);
3140 ral_hi = enaddr[4] | (enaddr[5] << 8);
3141 ral_hi |= RAL_AV;
3142 } else {
3143 ral_lo = 0;
3144 ral_hi = 0;
3145 }
3146
3147 if (sc->sc_type >= WM_T_82544) {
3148 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3149 ral_lo);
3150 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3151 ral_hi);
3152 } else {
3153 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3154 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3155 }
3156 }
3157
3158 /*
3159 * wm_mchash:
3160 *
3161 * Compute the hash of the multicast address for the 4096-bit
3162 * multicast filter.
3163 */
3164 static uint32_t
3165 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3166 {
3167 static const int lo_shift[4] = { 4, 3, 2, 0 };
3168 static const int hi_shift[4] = { 4, 5, 6, 8 };
3169 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3170 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3171 uint32_t hash;
3172
3173 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3174 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3175 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3176 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3177 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3178 return (hash & 0x3ff);
3179 }
3180 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3181 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3182
3183 return (hash & 0xfff);
3184 }
3185
3186 /*
3187 * wm_set_filter:
3188 *
3189 * Set up the receive filter.
3190 */
3191 static void
3192 wm_set_filter(struct wm_softc *sc)
3193 {
3194 struct ethercom *ec = &sc->sc_ethercom;
3195 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3196 struct ether_multi *enm;
3197 struct ether_multistep step;
3198 bus_addr_t mta_reg;
3199 uint32_t hash, reg, bit;
3200 int i, size;
3201
3202 if (sc->sc_type >= WM_T_82544)
3203 mta_reg = WMREG_CORDOVA_MTA;
3204 else
3205 mta_reg = WMREG_MTA;
3206
3207 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3208
3209 if (ifp->if_flags & IFF_BROADCAST)
3210 sc->sc_rctl |= RCTL_BAM;
3211 if (ifp->if_flags & IFF_PROMISC) {
3212 sc->sc_rctl |= RCTL_UPE;
3213 goto allmulti;
3214 }
3215
3216 /*
3217 * Set the station address in the first RAL slot, and
3218 * clear the remaining slots.
3219 */
3220 if (sc->sc_type == WM_T_ICH8)
3221 size = WM_RAL_TABSIZE_ICH8 -1;
3222 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3223 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3224 || (sc->sc_type == WM_T_PCH_LPT))
3225 size = WM_RAL_TABSIZE_ICH8;
3226 else if (sc->sc_type == WM_T_82575)
3227 size = WM_RAL_TABSIZE_82575;
3228 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3229 size = WM_RAL_TABSIZE_82576;
3230 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3231 size = WM_RAL_TABSIZE_I350;
3232 else
3233 size = WM_RAL_TABSIZE;
3234 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3235 for (i = 1; i < size; i++)
3236 wm_set_ral(sc, NULL, i);
3237
3238 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3239 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3240 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3241 size = WM_ICH8_MC_TABSIZE;
3242 else
3243 size = WM_MC_TABSIZE;
3244 /* Clear out the multicast table. */
3245 for (i = 0; i < size; i++)
3246 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3247
3248 ETHER_FIRST_MULTI(step, ec, enm);
3249 while (enm != NULL) {
3250 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3251 /*
3252 * We must listen to a range of multicast addresses.
3253 * For now, just accept all multicasts, rather than
3254 * trying to set only those filter bits needed to match
3255 * the range. (At this time, the only use of address
3256 * ranges is for IP multicast routing, for which the
3257 * range is big enough to require all bits set.)
3258 */
3259 goto allmulti;
3260 }
3261
3262 hash = wm_mchash(sc, enm->enm_addrlo);
3263
3264 reg = (hash >> 5);
3265 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3266 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3267 || (sc->sc_type == WM_T_PCH2)
3268 || (sc->sc_type == WM_T_PCH_LPT))
3269 reg &= 0x1f;
3270 else
3271 reg &= 0x7f;
3272 bit = hash & 0x1f;
3273
3274 hash = CSR_READ(sc, mta_reg + (reg << 2));
3275 hash |= 1U << bit;
3276
3277 /* XXX Hardware bug?? */
3278 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3279 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3280 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3281 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3282 } else
3283 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3284
3285 ETHER_NEXT_MULTI(step, enm);
3286 }
3287
3288 ifp->if_flags &= ~IFF_ALLMULTI;
3289 goto setit;
3290
3291 allmulti:
3292 ifp->if_flags |= IFF_ALLMULTI;
3293 sc->sc_rctl |= RCTL_MPE;
3294
3295 setit:
3296 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3297 }
3298
3299 /* Reset and init related */
3300
3301 static void
3302 wm_set_vlan(struct wm_softc *sc)
3303 {
3304 /* Deal with VLAN enables. */
3305 if (VLAN_ATTACHED(&sc->sc_ethercom))
3306 sc->sc_ctrl |= CTRL_VME;
3307 else
3308 sc->sc_ctrl &= ~CTRL_VME;
3309
3310 /* Write the control registers. */
3311 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3312 }
3313
3314 static void
3315 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3316 {
3317 uint32_t gcr;
3318 pcireg_t ctrl2;
3319
3320 gcr = CSR_READ(sc, WMREG_GCR);
3321
3322 /* Only take action if timeout value is defaulted to 0 */
3323 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3324 goto out;
3325
3326 if ((gcr & GCR_CAP_VER2) == 0) {
3327 gcr |= GCR_CMPL_TMOUT_10MS;
3328 goto out;
3329 }
3330
3331 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3332 sc->sc_pcixe_capoff + PCIE_DCSR2);
3333 ctrl2 |= WM_PCIE_DCSR2_16MS;
3334 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3335 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3336
3337 out:
3338 /* Disable completion timeout resend */
3339 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3340
3341 CSR_WRITE(sc, WMREG_GCR, gcr);
3342 }
3343
3344 void
3345 wm_get_auto_rd_done(struct wm_softc *sc)
3346 {
3347 int i;
3348
3349 /* wait for eeprom to reload */
3350 switch (sc->sc_type) {
3351 case WM_T_82571:
3352 case WM_T_82572:
3353 case WM_T_82573:
3354 case WM_T_82574:
3355 case WM_T_82583:
3356 case WM_T_82575:
3357 case WM_T_82576:
3358 case WM_T_82580:
3359 case WM_T_I350:
3360 case WM_T_I354:
3361 case WM_T_I210:
3362 case WM_T_I211:
3363 case WM_T_80003:
3364 case WM_T_ICH8:
3365 case WM_T_ICH9:
3366 for (i = 0; i < 10; i++) {
3367 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3368 break;
3369 delay(1000);
3370 }
3371 if (i == 10) {
3372 log(LOG_ERR, "%s: auto read from eeprom failed to "
3373 "complete\n", device_xname(sc->sc_dev));
3374 }
3375 break;
3376 default:
3377 break;
3378 }
3379 }
3380
3381 void
3382 wm_lan_init_done(struct wm_softc *sc)
3383 {
3384 uint32_t reg = 0;
3385 int i;
3386
3387 /* wait for eeprom to reload */
3388 switch (sc->sc_type) {
3389 case WM_T_ICH10:
3390 case WM_T_PCH:
3391 case WM_T_PCH2:
3392 case WM_T_PCH_LPT:
3393 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3394 reg = CSR_READ(sc, WMREG_STATUS);
3395 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3396 break;
3397 delay(100);
3398 }
3399 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3400 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3401 "complete\n", device_xname(sc->sc_dev), __func__);
3402 }
3403 break;
3404 default:
3405 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3406 __func__);
3407 break;
3408 }
3409
3410 reg &= ~STATUS_LAN_INIT_DONE;
3411 CSR_WRITE(sc, WMREG_STATUS, reg);
3412 }
3413
3414 void
3415 wm_get_cfg_done(struct wm_softc *sc)
3416 {
3417 int mask;
3418 uint32_t reg;
3419 int i;
3420
3421 /* wait for eeprom to reload */
3422 switch (sc->sc_type) {
3423 case WM_T_82542_2_0:
3424 case WM_T_82542_2_1:
3425 /* null */
3426 break;
3427 case WM_T_82543:
3428 case WM_T_82544:
3429 case WM_T_82540:
3430 case WM_T_82545:
3431 case WM_T_82545_3:
3432 case WM_T_82546:
3433 case WM_T_82546_3:
3434 case WM_T_82541:
3435 case WM_T_82541_2:
3436 case WM_T_82547:
3437 case WM_T_82547_2:
3438 case WM_T_82573:
3439 case WM_T_82574:
3440 case WM_T_82583:
3441 /* generic */
3442 delay(10*1000);
3443 break;
3444 case WM_T_80003:
3445 case WM_T_82571:
3446 case WM_T_82572:
3447 case WM_T_82575:
3448 case WM_T_82576:
3449 case WM_T_82580:
3450 case WM_T_I350:
3451 case WM_T_I354:
3452 case WM_T_I210:
3453 case WM_T_I211:
3454 if (sc->sc_type == WM_T_82571) {
3455 /* Only 82571 shares port 0 */
3456 mask = EEMNGCTL_CFGDONE_0;
3457 } else
3458 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3459 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3460 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3461 break;
3462 delay(1000);
3463 }
3464 if (i >= WM_PHY_CFG_TIMEOUT) {
3465 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3466 device_xname(sc->sc_dev), __func__));
3467 }
3468 break;
3469 case WM_T_ICH8:
3470 case WM_T_ICH9:
3471 case WM_T_ICH10:
3472 case WM_T_PCH:
3473 case WM_T_PCH2:
3474 case WM_T_PCH_LPT:
3475 delay(10*1000);
3476 if (sc->sc_type >= WM_T_ICH10)
3477 wm_lan_init_done(sc);
3478 else
3479 wm_get_auto_rd_done(sc);
3480
3481 reg = CSR_READ(sc, WMREG_STATUS);
3482 if ((reg & STATUS_PHYRA) != 0)
3483 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3484 break;
3485 default:
3486 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3487 __func__);
3488 break;
3489 }
3490 }
3491
3492 /* Init hardware bits */
3493 void
3494 wm_initialize_hardware_bits(struct wm_softc *sc)
3495 {
3496 uint32_t tarc0, tarc1, reg;
3497
3498 /* For 82571 variant, 80003 and ICHs */
3499 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3500 || (sc->sc_type >= WM_T_80003)) {
3501
3502 /* Transmit Descriptor Control 0 */
3503 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3504 reg |= TXDCTL_COUNT_DESC;
3505 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3506
3507 /* Transmit Descriptor Control 1 */
3508 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3509 reg |= TXDCTL_COUNT_DESC;
3510 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3511
3512 /* TARC0 */
3513 tarc0 = CSR_READ(sc, WMREG_TARC0);
3514 switch (sc->sc_type) {
3515 case WM_T_82571:
3516 case WM_T_82572:
3517 case WM_T_82573:
3518 case WM_T_82574:
3519 case WM_T_82583:
3520 case WM_T_80003:
3521 /* Clear bits 30..27 */
3522 tarc0 &= ~__BITS(30, 27);
3523 break;
3524 default:
3525 break;
3526 }
3527
3528 switch (sc->sc_type) {
3529 case WM_T_82571:
3530 case WM_T_82572:
3531 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3532
3533 tarc1 = CSR_READ(sc, WMREG_TARC1);
3534 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3535 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3536 /* 8257[12] Errata No.7 */
3537 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3538
3539 /* TARC1 bit 28 */
3540 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3541 tarc1 &= ~__BIT(28);
3542 else
3543 tarc1 |= __BIT(28);
3544 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3545
3546 /*
3547 * 8257[12] Errata No.13
3548 * Disable Dyamic Clock Gating.
3549 */
3550 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3551 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3552 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3553 break;
3554 case WM_T_82573:
3555 case WM_T_82574:
3556 case WM_T_82583:
3557 if ((sc->sc_type == WM_T_82574)
3558 || (sc->sc_type == WM_T_82583))
3559 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3560
3561 /* Extended Device Control */
3562 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3563 reg &= ~__BIT(23); /* Clear bit 23 */
3564 reg |= __BIT(22); /* Set bit 22 */
3565 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3566
3567 /* Device Control */
3568 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3569 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3570
3571 /* PCIe Control Register */
3572 if ((sc->sc_type == WM_T_82574)
3573 || (sc->sc_type == WM_T_82583)) {
3574 /*
3575 * Document says this bit must be set for
3576 * proper operation.
3577 */
3578 reg = CSR_READ(sc, WMREG_GCR);
3579 reg |= __BIT(22);
3580 CSR_WRITE(sc, WMREG_GCR, reg);
3581
3582 /*
3583 * Apply workaround for hardware errata
3584 * documented in errata docs Fixes issue where
3585 * some error prone or unreliable PCIe
3586 * completions are occurring, particularly
3587 * with ASPM enabled. Without fix, issue can
3588 * cause Tx timeouts.
3589 */
3590 reg = CSR_READ(sc, WMREG_GCR2);
3591 reg |= __BIT(0);
3592 CSR_WRITE(sc, WMREG_GCR2, reg);
3593 }
3594 break;
3595 case WM_T_80003:
3596 /* TARC0 */
3597 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3598 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3599 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3600
3601 /* TARC1 bit 28 */
3602 tarc1 = CSR_READ(sc, WMREG_TARC1);
3603 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3604 tarc1 &= ~__BIT(28);
3605 else
3606 tarc1 |= __BIT(28);
3607 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3608 break;
3609 case WM_T_ICH8:
3610 case WM_T_ICH9:
3611 case WM_T_ICH10:
3612 case WM_T_PCH:
3613 case WM_T_PCH2:
3614 case WM_T_PCH_LPT:
3615 /* TARC 0 */
3616 if (sc->sc_type == WM_T_ICH8) {
3617 /* Set TARC0 bits 29 and 28 */
3618 tarc0 |= __BITS(29, 28);
3619 }
3620 /* Set TARC0 bits 23,24,26,27 */
3621 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3622
3623 /* CTRL_EXT */
3624 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3625 reg |= __BIT(22); /* Set bit 22 */
3626 /*
3627 * Enable PHY low-power state when MAC is at D3
3628 * w/o WoL
3629 */
3630 if (sc->sc_type >= WM_T_PCH)
3631 reg |= CTRL_EXT_PHYPDEN;
3632 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3633
3634 /* TARC1 */
3635 tarc1 = CSR_READ(sc, WMREG_TARC1);
3636 /* bit 28 */
3637 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3638 tarc1 &= ~__BIT(28);
3639 else
3640 tarc1 |= __BIT(28);
3641 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3642 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3643
3644 /* Device Status */
3645 if (sc->sc_type == WM_T_ICH8) {
3646 reg = CSR_READ(sc, WMREG_STATUS);
3647 reg &= ~__BIT(31);
3648 CSR_WRITE(sc, WMREG_STATUS, reg);
3649
3650 }
3651
3652 /*
3653 * Work-around descriptor data corruption issue during
3654 * NFS v2 UDP traffic, just disable the NFS filtering
3655 * capability.
3656 */
3657 reg = CSR_READ(sc, WMREG_RFCTL);
3658 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3659 CSR_WRITE(sc, WMREG_RFCTL, reg);
3660 break;
3661 default:
3662 break;
3663 }
3664 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3665
3666 /*
3667 * 8257[12] Errata No.52 and some others.
3668 * Avoid RSS Hash Value bug.
3669 */
3670 switch (sc->sc_type) {
3671 case WM_T_82571:
3672 case WM_T_82572:
3673 case WM_T_82573:
3674 case WM_T_80003:
3675 case WM_T_ICH8:
3676 reg = CSR_READ(sc, WMREG_RFCTL);
3677 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3678 CSR_WRITE(sc, WMREG_RFCTL, reg);
3679 break;
3680 default:
3681 break;
3682 }
3683 }
3684 }
3685
3686 static uint32_t
3687 wm_rxpbs_adjust_82580(uint32_t val)
3688 {
3689 uint32_t rv = 0;
3690
3691 if (val < __arraycount(wm_82580_rxpbs_table))
3692 rv = wm_82580_rxpbs_table[val];
3693
3694 return rv;
3695 }
3696
3697 /*
3698 * wm_reset:
3699 *
3700 * Reset the i82542 chip.
3701 */
3702 static void
3703 wm_reset(struct wm_softc *sc)
3704 {
3705 int phy_reset = 0;
3706 int error = 0;
3707 uint32_t reg, mask;
3708
3709 /*
3710 * Allocate on-chip memory according to the MTU size.
3711 * The Packet Buffer Allocation register must be written
3712 * before the chip is reset.
3713 */
3714 switch (sc->sc_type) {
3715 case WM_T_82547:
3716 case WM_T_82547_2:
3717 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3718 PBA_22K : PBA_30K;
3719 sc->sc_txfifo_head = 0;
3720 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3721 sc->sc_txfifo_size =
3722 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3723 sc->sc_txfifo_stall = 0;
3724 break;
3725 case WM_T_82571:
3726 case WM_T_82572:
3727 case WM_T_82575: /* XXX need special handing for jumbo frames */
3728 case WM_T_80003:
3729 sc->sc_pba = PBA_32K;
3730 break;
3731 case WM_T_82573:
3732 sc->sc_pba = PBA_12K;
3733 break;
3734 case WM_T_82574:
3735 case WM_T_82583:
3736 sc->sc_pba = PBA_20K;
3737 break;
3738 case WM_T_82576:
3739 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3740 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3741 break;
3742 case WM_T_82580:
3743 case WM_T_I350:
3744 case WM_T_I354:
3745 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3746 break;
3747 case WM_T_I210:
3748 case WM_T_I211:
3749 sc->sc_pba = PBA_34K;
3750 break;
3751 case WM_T_ICH8:
3752 /* Workaround for a bit corruption issue in FIFO memory */
3753 sc->sc_pba = PBA_8K;
3754 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3755 break;
3756 case WM_T_ICH9:
3757 case WM_T_ICH10:
3758 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3759 PBA_14K : PBA_10K;
3760 break;
3761 case WM_T_PCH:
3762 case WM_T_PCH2:
3763 case WM_T_PCH_LPT:
3764 sc->sc_pba = PBA_26K;
3765 break;
3766 default:
3767 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3768 PBA_40K : PBA_48K;
3769 break;
3770 }
3771 /*
3772 * Only old or non-multiqueue devices have the PBA register
3773 * XXX Need special handling for 82575.
3774 */
3775 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3776 || (sc->sc_type == WM_T_82575))
3777 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3778
3779 /* Prevent the PCI-E bus from sticking */
3780 if (sc->sc_flags & WM_F_PCIE) {
3781 int timeout = 800;
3782
3783 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3784 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3785
3786 while (timeout--) {
3787 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3788 == 0)
3789 break;
3790 delay(100);
3791 }
3792 }
3793
3794 /* Set the completion timeout for interface */
3795 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3796 || (sc->sc_type == WM_T_82580)
3797 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3798 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3799 wm_set_pcie_completion_timeout(sc);
3800
3801 /* Clear interrupt */
3802 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3803 if (sc->sc_nintrs > 1) {
3804 if (sc->sc_type != WM_T_82574) {
3805 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3806 CSR_WRITE(sc, WMREG_EIAC, 0);
3807 } else {
3808 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3809 }
3810 }
3811
3812 /* Stop the transmit and receive processes. */
3813 CSR_WRITE(sc, WMREG_RCTL, 0);
3814 sc->sc_rctl &= ~RCTL_EN;
3815 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3816 CSR_WRITE_FLUSH(sc);
3817
3818 /* XXX set_tbi_sbp_82543() */
3819
3820 delay(10*1000);
3821
3822 /* Must acquire the MDIO ownership before MAC reset */
3823 switch (sc->sc_type) {
3824 case WM_T_82573:
3825 case WM_T_82574:
3826 case WM_T_82583:
3827 error = wm_get_hw_semaphore_82573(sc);
3828 break;
3829 default:
3830 break;
3831 }
3832
3833 /*
3834 * 82541 Errata 29? & 82547 Errata 28?
3835 * See also the description about PHY_RST bit in CTRL register
3836 * in 8254x_GBe_SDM.pdf.
3837 */
3838 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3839 CSR_WRITE(sc, WMREG_CTRL,
3840 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3841 CSR_WRITE_FLUSH(sc);
3842 delay(5000);
3843 }
3844
3845 switch (sc->sc_type) {
3846 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3847 case WM_T_82541:
3848 case WM_T_82541_2:
3849 case WM_T_82547:
3850 case WM_T_82547_2:
3851 /*
3852 * On some chipsets, a reset through a memory-mapped write
3853 * cycle can cause the chip to reset before completing the
3854 * write cycle. This causes major headache that can be
3855 * avoided by issuing the reset via indirect register writes
3856 * through I/O space.
3857 *
3858 * So, if we successfully mapped the I/O BAR at attach time,
3859 * use that. Otherwise, try our luck with a memory-mapped
3860 * reset.
3861 */
3862 if (sc->sc_flags & WM_F_IOH_VALID)
3863 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3864 else
3865 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3866 break;
3867 case WM_T_82545_3:
3868 case WM_T_82546_3:
3869 /* Use the shadow control register on these chips. */
3870 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3871 break;
3872 case WM_T_80003:
3873 mask = swfwphysem[sc->sc_funcid];
3874 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3875 wm_get_swfw_semaphore(sc, mask);
3876 CSR_WRITE(sc, WMREG_CTRL, reg);
3877 wm_put_swfw_semaphore(sc, mask);
3878 break;
3879 case WM_T_ICH8:
3880 case WM_T_ICH9:
3881 case WM_T_ICH10:
3882 case WM_T_PCH:
3883 case WM_T_PCH2:
3884 case WM_T_PCH_LPT:
3885 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3886 if (wm_check_reset_block(sc) == 0) {
3887 /*
3888 * Gate automatic PHY configuration by hardware on
3889 * non-managed 82579
3890 */
3891 if ((sc->sc_type == WM_T_PCH2)
3892 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3893 != 0))
3894 wm_gate_hw_phy_config_ich8lan(sc, 1);
3895
3896
3897 reg |= CTRL_PHY_RESET;
3898 phy_reset = 1;
3899 }
3900 wm_get_swfwhw_semaphore(sc);
3901 CSR_WRITE(sc, WMREG_CTRL, reg);
3902 /* Don't insert a completion barrier when reset */
3903 delay(20*1000);
3904 wm_put_swfwhw_semaphore(sc);
3905 break;
3906 case WM_T_82580:
3907 case WM_T_I350:
3908 case WM_T_I354:
3909 case WM_T_I210:
3910 case WM_T_I211:
3911 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3912 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3913 CSR_WRITE_FLUSH(sc);
3914 delay(5000);
3915 break;
3916 case WM_T_82542_2_0:
3917 case WM_T_82542_2_1:
3918 case WM_T_82543:
3919 case WM_T_82540:
3920 case WM_T_82545:
3921 case WM_T_82546:
3922 case WM_T_82571:
3923 case WM_T_82572:
3924 case WM_T_82573:
3925 case WM_T_82574:
3926 case WM_T_82575:
3927 case WM_T_82576:
3928 case WM_T_82583:
3929 default:
3930 /* Everything else can safely use the documented method. */
3931 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3932 break;
3933 }
3934
3935 /* Must release the MDIO ownership after MAC reset */
3936 switch (sc->sc_type) {
3937 case WM_T_82573:
3938 case WM_T_82574:
3939 case WM_T_82583:
3940 if (error == 0)
3941 wm_put_hw_semaphore_82573(sc);
3942 break;
3943 default:
3944 break;
3945 }
3946
3947 if (phy_reset != 0)
3948 wm_get_cfg_done(sc);
3949
3950 /* reload EEPROM */
3951 switch (sc->sc_type) {
3952 case WM_T_82542_2_0:
3953 case WM_T_82542_2_1:
3954 case WM_T_82543:
3955 case WM_T_82544:
3956 delay(10);
3957 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3958 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3959 CSR_WRITE_FLUSH(sc);
3960 delay(2000);
3961 break;
3962 case WM_T_82540:
3963 case WM_T_82545:
3964 case WM_T_82545_3:
3965 case WM_T_82546:
3966 case WM_T_82546_3:
3967 delay(5*1000);
3968 /* XXX Disable HW ARPs on ASF enabled adapters */
3969 break;
3970 case WM_T_82541:
3971 case WM_T_82541_2:
3972 case WM_T_82547:
3973 case WM_T_82547_2:
3974 delay(20000);
3975 /* XXX Disable HW ARPs on ASF enabled adapters */
3976 break;
3977 case WM_T_82571:
3978 case WM_T_82572:
3979 case WM_T_82573:
3980 case WM_T_82574:
3981 case WM_T_82583:
3982 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3983 delay(10);
3984 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3985 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3986 CSR_WRITE_FLUSH(sc);
3987 }
3988 /* check EECD_EE_AUTORD */
3989 wm_get_auto_rd_done(sc);
3990 /*
3991 * Phy configuration from NVM just starts after EECD_AUTO_RD
3992 * is set.
3993 */
3994 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3995 || (sc->sc_type == WM_T_82583))
3996 delay(25*1000);
3997 break;
3998 case WM_T_82575:
3999 case WM_T_82576:
4000 case WM_T_82580:
4001 case WM_T_I350:
4002 case WM_T_I354:
4003 case WM_T_I210:
4004 case WM_T_I211:
4005 case WM_T_80003:
4006 /* check EECD_EE_AUTORD */
4007 wm_get_auto_rd_done(sc);
4008 break;
4009 case WM_T_ICH8:
4010 case WM_T_ICH9:
4011 case WM_T_ICH10:
4012 case WM_T_PCH:
4013 case WM_T_PCH2:
4014 case WM_T_PCH_LPT:
4015 break;
4016 default:
4017 panic("%s: unknown type\n", __func__);
4018 }
4019
4020 /* Check whether EEPROM is present or not */
4021 switch (sc->sc_type) {
4022 case WM_T_82575:
4023 case WM_T_82576:
4024 case WM_T_82580:
4025 case WM_T_I350:
4026 case WM_T_I354:
4027 case WM_T_ICH8:
4028 case WM_T_ICH9:
4029 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4030 /* Not found */
4031 sc->sc_flags |= WM_F_EEPROM_INVALID;
4032 if (sc->sc_type == WM_T_82575)
4033 wm_reset_init_script_82575(sc);
4034 }
4035 break;
4036 default:
4037 break;
4038 }
4039
4040 if ((sc->sc_type == WM_T_82580)
4041 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4042 /* clear global device reset status bit */
4043 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4044 }
4045
4046 /* Clear any pending interrupt events. */
4047 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4048 reg = CSR_READ(sc, WMREG_ICR);
4049 if (sc->sc_nintrs > 1) {
4050 if (sc->sc_type != WM_T_82574) {
4051 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4052 CSR_WRITE(sc, WMREG_EIAC, 0);
4053 } else
4054 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4055 }
4056
4057 /* reload sc_ctrl */
4058 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4059
4060 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4061 wm_set_eee_i350(sc);
4062
4063 /* dummy read from WUC */
4064 if (sc->sc_type == WM_T_PCH)
4065 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4066 /*
4067 * For PCH, this write will make sure that any noise will be detected
4068 * as a CRC error and be dropped rather than show up as a bad packet
4069 * to the DMA engine
4070 */
4071 if (sc->sc_type == WM_T_PCH)
4072 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4073
4074 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4075 CSR_WRITE(sc, WMREG_WUC, 0);
4076
4077 wm_reset_mdicnfg_82580(sc);
4078
4079 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4080 wm_pll_workaround_i210(sc);
4081 }
4082
4083 /*
4084 * wm_add_rxbuf:
4085 *
4086 * Add a receive buffer to the indiciated descriptor.
4087 */
4088 static int
4089 wm_add_rxbuf(struct wm_softc *sc, int idx)
4090 {
4091 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4092 struct mbuf *m;
4093 int error;
4094
4095 KASSERT(WM_RX_LOCKED(sc));
4096
4097 MGETHDR(m, M_DONTWAIT, MT_DATA);
4098 if (m == NULL)
4099 return ENOBUFS;
4100
4101 MCLGET(m, M_DONTWAIT);
4102 if ((m->m_flags & M_EXT) == 0) {
4103 m_freem(m);
4104 return ENOBUFS;
4105 }
4106
4107 if (rxs->rxs_mbuf != NULL)
4108 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4109
4110 rxs->rxs_mbuf = m;
4111
4112 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4113 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4114 BUS_DMA_READ|BUS_DMA_NOWAIT);
4115 if (error) {
4116 /* XXX XXX XXX */
4117 aprint_error_dev(sc->sc_dev,
4118 "unable to load rx DMA map %d, error = %d\n",
4119 idx, error);
4120 panic("wm_add_rxbuf");
4121 }
4122
4123 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4124 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4125
4126 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4127 if ((sc->sc_rctl & RCTL_EN) != 0)
4128 WM_INIT_RXDESC(sc, idx);
4129 } else
4130 WM_INIT_RXDESC(sc, idx);
4131
4132 return 0;
4133 }
4134
4135 /*
4136 * wm_rxdrain:
4137 *
4138 * Drain the receive queue.
4139 */
4140 static void
4141 wm_rxdrain(struct wm_softc *sc)
4142 {
4143 struct wm_rxsoft *rxs;
4144 int i;
4145
4146 KASSERT(WM_RX_LOCKED(sc));
4147
4148 for (i = 0; i < WM_NRXDESC; i++) {
4149 rxs = &sc->sc_rxsoft[i];
4150 if (rxs->rxs_mbuf != NULL) {
4151 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4152 m_freem(rxs->rxs_mbuf);
4153 rxs->rxs_mbuf = NULL;
4154 }
4155 }
4156 }
4157
4158 /*
4159 * wm_init: [ifnet interface function]
4160 *
4161 * Initialize the interface.
4162 */
4163 static int
4164 wm_init(struct ifnet *ifp)
4165 {
4166 struct wm_softc *sc = ifp->if_softc;
4167 int ret;
4168
4169 WM_BOTH_LOCK(sc);
4170 ret = wm_init_locked(ifp);
4171 WM_BOTH_UNLOCK(sc);
4172
4173 return ret;
4174 }
4175
4176 static int
4177 wm_init_locked(struct ifnet *ifp)
4178 {
4179 struct wm_softc *sc = ifp->if_softc;
4180 struct wm_rxsoft *rxs;
4181 int i, j, trynum, error = 0;
4182 uint32_t reg;
4183
4184 KASSERT(WM_BOTH_LOCKED(sc));
4185 /*
4186 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4187 * There is a small but measurable benefit to avoiding the adjusment
4188 * of the descriptor so that the headers are aligned, for normal mtu,
4189 * on such platforms. One possibility is that the DMA itself is
4190 * slightly more efficient if the front of the entire packet (instead
4191 * of the front of the headers) is aligned.
4192 *
4193 * Note we must always set align_tweak to 0 if we are using
4194 * jumbo frames.
4195 */
4196 #ifdef __NO_STRICT_ALIGNMENT
4197 sc->sc_align_tweak = 0;
4198 #else
4199 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4200 sc->sc_align_tweak = 0;
4201 else
4202 sc->sc_align_tweak = 2;
4203 #endif /* __NO_STRICT_ALIGNMENT */
4204
4205 /* Cancel any pending I/O. */
4206 wm_stop_locked(ifp, 0);
4207
4208 /* update statistics before reset */
4209 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4210 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4211
4212 /* Reset the chip to a known state. */
4213 wm_reset(sc);
4214
4215 switch (sc->sc_type) {
4216 case WM_T_82571:
4217 case WM_T_82572:
4218 case WM_T_82573:
4219 case WM_T_82574:
4220 case WM_T_82583:
4221 case WM_T_80003:
4222 case WM_T_ICH8:
4223 case WM_T_ICH9:
4224 case WM_T_ICH10:
4225 case WM_T_PCH:
4226 case WM_T_PCH2:
4227 case WM_T_PCH_LPT:
4228 if (wm_check_mng_mode(sc) != 0)
4229 wm_get_hw_control(sc);
4230 break;
4231 default:
4232 break;
4233 }
4234
4235 /* Init hardware bits */
4236 wm_initialize_hardware_bits(sc);
4237
4238 /* Reset the PHY. */
4239 if (sc->sc_flags & WM_F_HAS_MII)
4240 wm_gmii_reset(sc);
4241
4242 /* Calculate (E)ITR value */
4243 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4244 sc->sc_itr = 450; /* For EITR */
4245 } else if (sc->sc_type >= WM_T_82543) {
4246 /*
4247 * Set up the interrupt throttling register (units of 256ns)
4248 * Note that a footnote in Intel's documentation says this
4249 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4250 * or 10Mbit mode. Empirically, it appears to be the case
4251 * that that is also true for the 1024ns units of the other
4252 * interrupt-related timer registers -- so, really, we ought
4253 * to divide this value by 4 when the link speed is low.
4254 *
4255 * XXX implement this division at link speed change!
4256 */
4257
4258 /*
4259 * For N interrupts/sec, set this value to:
4260 * 1000000000 / (N * 256). Note that we set the
4261 * absolute and packet timer values to this value
4262 * divided by 4 to get "simple timer" behavior.
4263 */
4264
4265 sc->sc_itr = 1500; /* 2604 ints/sec */
4266 }
4267
4268 /* Initialize the transmit descriptor ring. */
4269 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4270 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4271 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4272 sc->sc_txfree = WM_NTXDESC(sc);
4273 sc->sc_txnext = 0;
4274
4275 if (sc->sc_type < WM_T_82543) {
4276 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4277 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4278 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4279 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4280 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4281 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4282 } else {
4283 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4284 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4285 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4286 CSR_WRITE(sc, WMREG_TDH, 0);
4287
4288 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4289 /*
4290 * Don't write TDT before TCTL.EN is set.
4291 * See the document.
4292 */
4293 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4294 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4295 | TXDCTL_WTHRESH(0));
4296 else {
4297 /* ITR / 4 */
4298 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4299 if (sc->sc_type >= WM_T_82540) {
4300 /* should be same */
4301 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4302 }
4303
4304 CSR_WRITE(sc, WMREG_TDT, 0);
4305 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4306 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4307 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4308 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4309 }
4310 }
4311
4312 /* Initialize the transmit job descriptors. */
4313 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4314 sc->sc_txsoft[i].txs_mbuf = NULL;
4315 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4316 sc->sc_txsnext = 0;
4317 sc->sc_txsdirty = 0;
4318
4319 /*
4320 * Initialize the receive descriptor and receive job
4321 * descriptor rings.
4322 */
4323 if (sc->sc_type < WM_T_82543) {
4324 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4325 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4326 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4327 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4328 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4329 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4330
4331 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4332 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4333 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4334 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4335 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4336 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4337 } else {
4338 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4339 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4340 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4341
4342 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4343 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4344 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4345 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4346 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4347 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4348 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4349 | RXDCTL_WTHRESH(1));
4350 } else {
4351 CSR_WRITE(sc, WMREG_RDH, 0);
4352 CSR_WRITE(sc, WMREG_RDT, 0);
4353 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4354 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4355 }
4356 }
4357 for (i = 0; i < WM_NRXDESC; i++) {
4358 rxs = &sc->sc_rxsoft[i];
4359 if (rxs->rxs_mbuf == NULL) {
4360 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4361 log(LOG_ERR, "%s: unable to allocate or map "
4362 "rx buffer %d, error = %d\n",
4363 device_xname(sc->sc_dev), i, error);
4364 /*
4365 * XXX Should attempt to run with fewer receive
4366 * XXX buffers instead of just failing.
4367 */
4368 wm_rxdrain(sc);
4369 goto out;
4370 }
4371 } else {
4372 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4373 WM_INIT_RXDESC(sc, i);
4374 /*
4375 * For 82575 and newer device, the RX descriptors
4376 * must be initialized after the setting of RCTL.EN in
4377 * wm_set_filter()
4378 */
4379 }
4380 }
4381 sc->sc_rxptr = 0;
4382 sc->sc_rxdiscard = 0;
4383 WM_RXCHAIN_RESET(sc);
4384
4385 /*
4386 * Clear out the VLAN table -- we don't use it (yet).
4387 */
4388 CSR_WRITE(sc, WMREG_VET, 0);
4389 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4390 trynum = 10; /* Due to hw errata */
4391 else
4392 trynum = 1;
4393 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4394 for (j = 0; j < trynum; j++)
4395 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4396
4397 /*
4398 * Set up flow-control parameters.
4399 *
4400 * XXX Values could probably stand some tuning.
4401 */
4402 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4403 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4404 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4405 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4406 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4407 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4408 }
4409
4410 sc->sc_fcrtl = FCRTL_DFLT;
4411 if (sc->sc_type < WM_T_82543) {
4412 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4413 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4414 } else {
4415 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4416 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4417 }
4418
4419 if (sc->sc_type == WM_T_80003)
4420 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4421 else
4422 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4423
4424 /* Writes the control register. */
4425 wm_set_vlan(sc);
4426
4427 if (sc->sc_flags & WM_F_HAS_MII) {
4428 int val;
4429
4430 switch (sc->sc_type) {
4431 case WM_T_80003:
4432 case WM_T_ICH8:
4433 case WM_T_ICH9:
4434 case WM_T_ICH10:
4435 case WM_T_PCH:
4436 case WM_T_PCH2:
4437 case WM_T_PCH_LPT:
4438 /*
4439 * Set the mac to wait the maximum time between each
4440 * iteration and increase the max iterations when
4441 * polling the phy; this fixes erroneous timeouts at
4442 * 10Mbps.
4443 */
4444 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4445 0xFFFF);
4446 val = wm_kmrn_readreg(sc,
4447 KUMCTRLSTA_OFFSET_INB_PARAM);
4448 val |= 0x3F;
4449 wm_kmrn_writereg(sc,
4450 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4451 break;
4452 default:
4453 break;
4454 }
4455
4456 if (sc->sc_type == WM_T_80003) {
4457 val = CSR_READ(sc, WMREG_CTRL_EXT);
4458 val &= ~CTRL_EXT_LINK_MODE_MASK;
4459 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4460
4461 /* Bypass RX and TX FIFO's */
4462 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4463 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4464 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4465 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4466 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4467 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4468 }
4469 }
4470 #if 0
4471 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4472 #endif
4473
4474 /* Set up checksum offload parameters. */
4475 reg = CSR_READ(sc, WMREG_RXCSUM);
4476 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4477 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4478 reg |= RXCSUM_IPOFL;
4479 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4480 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4481 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4482 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4483 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4484
4485 /* Set up MSI-X */
4486 if (sc->sc_nintrs > 1) {
4487 uint32_t ivar;
4488
4489 if (sc->sc_type == WM_T_82575) {
4490 /* Interrupt control */
4491 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4492 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4493 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4494
4495 /* TX */
4496 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_TXINTR_IDX),
4497 EITR_TX_QUEUE0);
4498 /* RX */
4499 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_RXINTR_IDX),
4500 EITR_RX_QUEUE0);
4501 /* Link status */
4502 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_LINKINTR_IDX),
4503 EITR_OTHER);
4504 } else if (sc->sc_type == WM_T_82574) {
4505 /* Interrupt control */
4506 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4507 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4508 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4509
4510 /* TX, RX and Link status */
4511 ivar = __SHIFTIN((IVAR_VALID_82574|WM_MSIX_TXINTR_IDX),
4512 IVAR_TX_MASK_Q_82574(0));
4513 ivar |= __SHIFTIN((IVAR_VALID_82574
4514 | WM_MSIX_RXINTR_IDX),
4515 IVAR_RX_MASK_Q_82574(0));
4516 ivar |=__SHIFTIN((IVAR_VALID_82574|WM_MSIX_LINKINTR_IDX),
4517 IVAR_OTHER_MASK);
4518 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4519 } else {
4520 /* Interrupt control */
4521 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4522 | GPIE_MULTI_MSIX | GPIE_EIAME
4523 | GPIE_PBA);
4524
4525 switch (sc->sc_type) {
4526 case WM_T_82580:
4527 case WM_T_I350:
4528 case WM_T_I354:
4529 case WM_T_I210:
4530 case WM_T_I211:
4531 /* TX */
4532 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4533 ivar &= ~IVAR_TX_MASK_Q(0);
4534 ivar |= __SHIFTIN(
4535 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4536 IVAR_TX_MASK_Q(0));
4537 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4538
4539 /* RX */
4540 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4541 ivar &= ~IVAR_RX_MASK_Q(0);
4542 ivar |= __SHIFTIN(
4543 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4544 IVAR_RX_MASK_Q(0));
4545 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4546 break;
4547 case WM_T_82576:
4548 /* TX */
4549 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4550 ivar &= ~IVAR_TX_MASK_Q_82576(0);
4551 ivar |= __SHIFTIN(
4552 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4553 IVAR_TX_MASK_Q_82576(0));
4554 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4555
4556 /* RX */
4557 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4558 ivar &= ~IVAR_RX_MASK_Q_82576(0);
4559 ivar |= __SHIFTIN(
4560 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4561 IVAR_RX_MASK_Q_82576(0));
4562 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4563 break;
4564 default:
4565 break;
4566 }
4567
4568 /* Link status */
4569 ivar = __SHIFTIN((WM_MSIX_LINKINTR_IDX | IVAR_VALID),
4570 IVAR_MISC_OTHER);
4571 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4572 }
4573 }
4574
4575 /* Set up the interrupt registers. */
4576 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4577 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4578 ICR_RXO | ICR_RXT0;
4579 if (sc->sc_nintrs > 1) {
4580 uint32_t mask;
4581 switch (sc->sc_type) {
4582 case WM_T_82574:
4583 CSR_WRITE(sc, WMREG_EIAC_82574,
4584 WMREG_EIAC_82574_MSIX_MASK);
4585 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4586 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4587 break;
4588 default:
4589 if (sc->sc_type == WM_T_82575)
4590 mask = EITR_RX_QUEUE0 |EITR_TX_QUEUE0
4591 | EITR_OTHER;
4592 else
4593 mask = (1 << WM_MSIX_RXINTR_IDX)
4594 | (1 << WM_MSIX_TXINTR_IDX)
4595 | (1 << WM_MSIX_LINKINTR_IDX);
4596 CSR_WRITE(sc, WMREG_EIAC, mask);
4597 CSR_WRITE(sc, WMREG_EIAM, mask);
4598 CSR_WRITE(sc, WMREG_EIMS, mask);
4599 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4600 break;
4601 }
4602 } else
4603 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4604
4605 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4606 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4607 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4608 reg = CSR_READ(sc, WMREG_KABGTXD);
4609 reg |= KABGTXD_BGSQLBIAS;
4610 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4611 }
4612
4613 /* Set up the inter-packet gap. */
4614 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4615
4616 if (sc->sc_type >= WM_T_82543) {
4617 /*
4618 * XXX 82574 has both ITR and EITR. SET EITR when we use
4619 * the multi queue function with MSI-X.
4620 */
4621 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4622 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4623 else
4624 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4625 }
4626
4627 /* Set the VLAN ethernetype. */
4628 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4629
4630 /*
4631 * Set up the transmit control register; we start out with
4632 * a collision distance suitable for FDX, but update it whe
4633 * we resolve the media type.
4634 */
4635 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4636 | TCTL_CT(TX_COLLISION_THRESHOLD)
4637 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4638 if (sc->sc_type >= WM_T_82571)
4639 sc->sc_tctl |= TCTL_MULR;
4640 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4641
4642 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4643 /* Write TDT after TCTL.EN is set. See the document. */
4644 CSR_WRITE(sc, WMREG_TDT, 0);
4645 }
4646
4647 if (sc->sc_type == WM_T_80003) {
4648 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4649 reg &= ~TCTL_EXT_GCEX_MASK;
4650 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4651 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4652 }
4653
4654 /* Set the media. */
4655 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4656 goto out;
4657
4658 /* Configure for OS presence */
4659 wm_init_manageability(sc);
4660
4661 /*
4662 * Set up the receive control register; we actually program
4663 * the register when we set the receive filter. Use multicast
4664 * address offset type 0.
4665 *
4666 * Only the i82544 has the ability to strip the incoming
4667 * CRC, so we don't enable that feature.
4668 */
4669 sc->sc_mchash_type = 0;
4670 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4671 | RCTL_MO(sc->sc_mchash_type);
4672
4673 /*
4674 * The I350 has a bug where it always strips the CRC whether
4675 * asked to or not. So ask for stripped CRC here and cope in rxeof
4676 */
4677 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4678 || (sc->sc_type == WM_T_I210))
4679 sc->sc_rctl |= RCTL_SECRC;
4680
4681 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4682 && (ifp->if_mtu > ETHERMTU)) {
4683 sc->sc_rctl |= RCTL_LPE;
4684 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4685 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4686 }
4687
4688 if (MCLBYTES == 2048) {
4689 sc->sc_rctl |= RCTL_2k;
4690 } else {
4691 if (sc->sc_type >= WM_T_82543) {
4692 switch (MCLBYTES) {
4693 case 4096:
4694 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4695 break;
4696 case 8192:
4697 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4698 break;
4699 case 16384:
4700 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4701 break;
4702 default:
4703 panic("wm_init: MCLBYTES %d unsupported",
4704 MCLBYTES);
4705 break;
4706 }
4707 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4708 }
4709
4710 /* Set the receive filter. */
4711 wm_set_filter(sc);
4712
4713 /* Enable ECC */
4714 switch (sc->sc_type) {
4715 case WM_T_82571:
4716 reg = CSR_READ(sc, WMREG_PBA_ECC);
4717 reg |= PBA_ECC_CORR_EN;
4718 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4719 break;
4720 case WM_T_PCH_LPT:
4721 reg = CSR_READ(sc, WMREG_PBECCSTS);
4722 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4723 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4724
4725 reg = CSR_READ(sc, WMREG_CTRL);
4726 reg |= CTRL_MEHE;
4727 CSR_WRITE(sc, WMREG_CTRL, reg);
4728 break;
4729 default:
4730 break;
4731 }
4732
4733 /* On 575 and later set RDT only if RX enabled */
4734 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4735 for (i = 0; i < WM_NRXDESC; i++)
4736 WM_INIT_RXDESC(sc, i);
4737
4738 sc->sc_stopping = false;
4739
4740 /* Start the one second link check clock. */
4741 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4742
4743 /* ...all done! */
4744 ifp->if_flags |= IFF_RUNNING;
4745 ifp->if_flags &= ~IFF_OACTIVE;
4746
4747 out:
4748 sc->sc_if_flags = ifp->if_flags;
4749 if (error)
4750 log(LOG_ERR, "%s: interface not running\n",
4751 device_xname(sc->sc_dev));
4752 return error;
4753 }
4754
4755 /*
4756 * wm_stop: [ifnet interface function]
4757 *
4758 * Stop transmission on the interface.
4759 */
4760 static void
4761 wm_stop(struct ifnet *ifp, int disable)
4762 {
4763 struct wm_softc *sc = ifp->if_softc;
4764
4765 WM_BOTH_LOCK(sc);
4766 wm_stop_locked(ifp, disable);
4767 WM_BOTH_UNLOCK(sc);
4768 }
4769
4770 static void
4771 wm_stop_locked(struct ifnet *ifp, int disable)
4772 {
4773 struct wm_softc *sc = ifp->if_softc;
4774 struct wm_txsoft *txs;
4775 int i;
4776
4777 KASSERT(WM_BOTH_LOCKED(sc));
4778
4779 sc->sc_stopping = true;
4780
4781 /* Stop the one second clock. */
4782 callout_stop(&sc->sc_tick_ch);
4783
4784 /* Stop the 82547 Tx FIFO stall check timer. */
4785 if (sc->sc_type == WM_T_82547)
4786 callout_stop(&sc->sc_txfifo_ch);
4787
4788 if (sc->sc_flags & WM_F_HAS_MII) {
4789 /* Down the MII. */
4790 mii_down(&sc->sc_mii);
4791 } else {
4792 #if 0
4793 /* Should we clear PHY's status properly? */
4794 wm_reset(sc);
4795 #endif
4796 }
4797
4798 /* Stop the transmit and receive processes. */
4799 CSR_WRITE(sc, WMREG_TCTL, 0);
4800 CSR_WRITE(sc, WMREG_RCTL, 0);
4801 sc->sc_rctl &= ~RCTL_EN;
4802
4803 /*
4804 * Clear the interrupt mask to ensure the device cannot assert its
4805 * interrupt line.
4806 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
4807 * service any currently pending or shared interrupt.
4808 */
4809 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4810 sc->sc_icr = 0;
4811 if (sc->sc_nintrs > 1) {
4812 if (sc->sc_type != WM_T_82574) {
4813 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4814 CSR_WRITE(sc, WMREG_EIAC, 0);
4815 } else
4816 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4817 }
4818
4819 /* Release any queued transmit buffers. */
4820 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4821 txs = &sc->sc_txsoft[i];
4822 if (txs->txs_mbuf != NULL) {
4823 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4824 m_freem(txs->txs_mbuf);
4825 txs->txs_mbuf = NULL;
4826 }
4827 }
4828
4829 /* Mark the interface as down and cancel the watchdog timer. */
4830 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4831 ifp->if_timer = 0;
4832
4833 if (disable)
4834 wm_rxdrain(sc);
4835
4836 #if 0 /* notyet */
4837 if (sc->sc_type >= WM_T_82544)
4838 CSR_WRITE(sc, WMREG_WUC, 0);
4839 #endif
4840 }
4841
4842 /*
4843 * wm_tx_offload:
4844 *
4845 * Set up TCP/IP checksumming parameters for the
4846 * specified packet.
4847 */
4848 static int
4849 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4850 uint8_t *fieldsp)
4851 {
4852 struct mbuf *m0 = txs->txs_mbuf;
4853 struct livengood_tcpip_ctxdesc *t;
4854 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4855 uint32_t ipcse;
4856 struct ether_header *eh;
4857 int offset, iphl;
4858 uint8_t fields;
4859
4860 /*
4861 * XXX It would be nice if the mbuf pkthdr had offset
4862 * fields for the protocol headers.
4863 */
4864
4865 eh = mtod(m0, struct ether_header *);
4866 switch (htons(eh->ether_type)) {
4867 case ETHERTYPE_IP:
4868 case ETHERTYPE_IPV6:
4869 offset = ETHER_HDR_LEN;
4870 break;
4871
4872 case ETHERTYPE_VLAN:
4873 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4874 break;
4875
4876 default:
4877 /*
4878 * Don't support this protocol or encapsulation.
4879 */
4880 *fieldsp = 0;
4881 *cmdp = 0;
4882 return 0;
4883 }
4884
4885 if ((m0->m_pkthdr.csum_flags &
4886 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4887 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4888 } else {
4889 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4890 }
4891 ipcse = offset + iphl - 1;
4892
4893 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4894 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4895 seg = 0;
4896 fields = 0;
4897
4898 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4899 int hlen = offset + iphl;
4900 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4901
4902 if (__predict_false(m0->m_len <
4903 (hlen + sizeof(struct tcphdr)))) {
4904 /*
4905 * TCP/IP headers are not in the first mbuf; we need
4906 * to do this the slow and painful way. Let's just
4907 * hope this doesn't happen very often.
4908 */
4909 struct tcphdr th;
4910
4911 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4912
4913 m_copydata(m0, hlen, sizeof(th), &th);
4914 if (v4) {
4915 struct ip ip;
4916
4917 m_copydata(m0, offset, sizeof(ip), &ip);
4918 ip.ip_len = 0;
4919 m_copyback(m0,
4920 offset + offsetof(struct ip, ip_len),
4921 sizeof(ip.ip_len), &ip.ip_len);
4922 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4923 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4924 } else {
4925 struct ip6_hdr ip6;
4926
4927 m_copydata(m0, offset, sizeof(ip6), &ip6);
4928 ip6.ip6_plen = 0;
4929 m_copyback(m0,
4930 offset + offsetof(struct ip6_hdr, ip6_plen),
4931 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4932 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4933 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4934 }
4935 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4936 sizeof(th.th_sum), &th.th_sum);
4937
4938 hlen += th.th_off << 2;
4939 } else {
4940 /*
4941 * TCP/IP headers are in the first mbuf; we can do
4942 * this the easy way.
4943 */
4944 struct tcphdr *th;
4945
4946 if (v4) {
4947 struct ip *ip =
4948 (void *)(mtod(m0, char *) + offset);
4949 th = (void *)(mtod(m0, char *) + hlen);
4950
4951 ip->ip_len = 0;
4952 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4953 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4954 } else {
4955 struct ip6_hdr *ip6 =
4956 (void *)(mtod(m0, char *) + offset);
4957 th = (void *)(mtod(m0, char *) + hlen);
4958
4959 ip6->ip6_plen = 0;
4960 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4961 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4962 }
4963 hlen += th->th_off << 2;
4964 }
4965
4966 if (v4) {
4967 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4968 cmdlen |= WTX_TCPIP_CMD_IP;
4969 } else {
4970 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4971 ipcse = 0;
4972 }
4973 cmd |= WTX_TCPIP_CMD_TSE;
4974 cmdlen |= WTX_TCPIP_CMD_TSE |
4975 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4976 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4977 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4978 }
4979
4980 /*
4981 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4982 * offload feature, if we load the context descriptor, we
4983 * MUST provide valid values for IPCSS and TUCSS fields.
4984 */
4985
4986 ipcs = WTX_TCPIP_IPCSS(offset) |
4987 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4988 WTX_TCPIP_IPCSE(ipcse);
4989 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4990 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4991 fields |= WTX_IXSM;
4992 }
4993
4994 offset += iphl;
4995
4996 if (m0->m_pkthdr.csum_flags &
4997 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4998 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4999 fields |= WTX_TXSM;
5000 tucs = WTX_TCPIP_TUCSS(offset) |
5001 WTX_TCPIP_TUCSO(offset +
5002 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5003 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5004 } else if ((m0->m_pkthdr.csum_flags &
5005 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5006 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5007 fields |= WTX_TXSM;
5008 tucs = WTX_TCPIP_TUCSS(offset) |
5009 WTX_TCPIP_TUCSO(offset +
5010 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5011 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5012 } else {
5013 /* Just initialize it to a valid TCP context. */
5014 tucs = WTX_TCPIP_TUCSS(offset) |
5015 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5016 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5017 }
5018
5019 /* Fill in the context descriptor. */
5020 t = (struct livengood_tcpip_ctxdesc *)
5021 &sc->sc_txdescs[sc->sc_txnext];
5022 t->tcpip_ipcs = htole32(ipcs);
5023 t->tcpip_tucs = htole32(tucs);
5024 t->tcpip_cmdlen = htole32(cmdlen);
5025 t->tcpip_seg = htole32(seg);
5026 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5027
5028 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5029 txs->txs_ndesc++;
5030
5031 *cmdp = cmd;
5032 *fieldsp = fields;
5033
5034 return 0;
5035 }
5036
5037 static void
5038 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5039 {
5040 struct mbuf *m;
5041 int i;
5042
5043 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5044 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5045 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5046 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5047 m->m_data, m->m_len, m->m_flags);
5048 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5049 i, i == 1 ? "" : "s");
5050 }
5051
5052 /*
5053 * wm_82547_txfifo_stall:
5054 *
5055 * Callout used to wait for the 82547 Tx FIFO to drain,
5056 * reset the FIFO pointers, and restart packet transmission.
5057 */
5058 static void
5059 wm_82547_txfifo_stall(void *arg)
5060 {
5061 struct wm_softc *sc = arg;
5062 #ifndef WM_MPSAFE
5063 int s;
5064
5065 s = splnet();
5066 #endif
5067 WM_TX_LOCK(sc);
5068
5069 if (sc->sc_stopping)
5070 goto out;
5071
5072 if (sc->sc_txfifo_stall) {
5073 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
5074 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5075 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5076 /*
5077 * Packets have drained. Stop transmitter, reset
5078 * FIFO pointers, restart transmitter, and kick
5079 * the packet queue.
5080 */
5081 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5082 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5083 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
5084 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
5085 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
5086 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
5087 CSR_WRITE(sc, WMREG_TCTL, tctl);
5088 CSR_WRITE_FLUSH(sc);
5089
5090 sc->sc_txfifo_head = 0;
5091 sc->sc_txfifo_stall = 0;
5092 wm_start_locked(&sc->sc_ethercom.ec_if);
5093 } else {
5094 /*
5095 * Still waiting for packets to drain; try again in
5096 * another tick.
5097 */
5098 callout_schedule(&sc->sc_txfifo_ch, 1);
5099 }
5100 }
5101
5102 out:
5103 WM_TX_UNLOCK(sc);
5104 #ifndef WM_MPSAFE
5105 splx(s);
5106 #endif
5107 }
5108
5109 /*
5110 * wm_82547_txfifo_bugchk:
5111 *
5112 * Check for bug condition in the 82547 Tx FIFO. We need to
5113 * prevent enqueueing a packet that would wrap around the end
5114 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5115 *
5116 * We do this by checking the amount of space before the end
5117 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5118 * the Tx FIFO, wait for all remaining packets to drain, reset
5119 * the internal FIFO pointers to the beginning, and restart
5120 * transmission on the interface.
5121 */
5122 #define WM_FIFO_HDR 0x10
5123 #define WM_82547_PAD_LEN 0x3e0
5124 static int
5125 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5126 {
5127 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
5128 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5129
5130 /* Just return if already stalled. */
5131 if (sc->sc_txfifo_stall)
5132 return 1;
5133
5134 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5135 /* Stall only occurs in half-duplex mode. */
5136 goto send_packet;
5137 }
5138
5139 if (len >= WM_82547_PAD_LEN + space) {
5140 sc->sc_txfifo_stall = 1;
5141 callout_schedule(&sc->sc_txfifo_ch, 1);
5142 return 1;
5143 }
5144
5145 send_packet:
5146 sc->sc_txfifo_head += len;
5147 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
5148 sc->sc_txfifo_head -= sc->sc_txfifo_size;
5149
5150 return 0;
5151 }
5152
5153 /*
5154 * wm_start: [ifnet interface function]
5155 *
5156 * Start packet transmission on the interface.
5157 */
5158 static void
5159 wm_start(struct ifnet *ifp)
5160 {
5161 struct wm_softc *sc = ifp->if_softc;
5162
5163 WM_TX_LOCK(sc);
5164 if (!sc->sc_stopping)
5165 wm_start_locked(ifp);
5166 WM_TX_UNLOCK(sc);
5167 }
5168
5169 static void
5170 wm_start_locked(struct ifnet *ifp)
5171 {
5172 struct wm_softc *sc = ifp->if_softc;
5173 struct mbuf *m0;
5174 struct m_tag *mtag;
5175 struct wm_txsoft *txs;
5176 bus_dmamap_t dmamap;
5177 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
5178 bus_addr_t curaddr;
5179 bus_size_t seglen, curlen;
5180 uint32_t cksumcmd;
5181 uint8_t cksumfields;
5182
5183 KASSERT(WM_TX_LOCKED(sc));
5184
5185 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5186 return;
5187
5188 /* Remember the previous number of free descriptors. */
5189 ofree = sc->sc_txfree;
5190
5191 /*
5192 * Loop through the send queue, setting up transmit descriptors
5193 * until we drain the queue, or use up all available transmit
5194 * descriptors.
5195 */
5196 for (;;) {
5197 m0 = NULL;
5198
5199 /* Get a work queue entry. */
5200 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5201 wm_txeof(sc);
5202 if (sc->sc_txsfree == 0) {
5203 DPRINTF(WM_DEBUG_TX,
5204 ("%s: TX: no free job descriptors\n",
5205 device_xname(sc->sc_dev)));
5206 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5207 break;
5208 }
5209 }
5210
5211 /* Grab a packet off the queue. */
5212 IFQ_DEQUEUE(&ifp->if_snd, m0);
5213 if (m0 == NULL)
5214 break;
5215
5216 DPRINTF(WM_DEBUG_TX,
5217 ("%s: TX: have packet to transmit: %p\n",
5218 device_xname(sc->sc_dev), m0));
5219
5220 txs = &sc->sc_txsoft[sc->sc_txsnext];
5221 dmamap = txs->txs_dmamap;
5222
5223 use_tso = (m0->m_pkthdr.csum_flags &
5224 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
5225
5226 /*
5227 * So says the Linux driver:
5228 * The controller does a simple calculation to make sure
5229 * there is enough room in the FIFO before initiating the
5230 * DMA for each buffer. The calc is:
5231 * 4 = ceil(buffer len / MSS)
5232 * To make sure we don't overrun the FIFO, adjust the max
5233 * buffer len if the MSS drops.
5234 */
5235 dmamap->dm_maxsegsz =
5236 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
5237 ? m0->m_pkthdr.segsz << 2
5238 : WTX_MAX_LEN;
5239
5240 /*
5241 * Load the DMA map. If this fails, the packet either
5242 * didn't fit in the allotted number of segments, or we
5243 * were short on resources. For the too-many-segments
5244 * case, we simply report an error and drop the packet,
5245 * since we can't sanely copy a jumbo packet to a single
5246 * buffer.
5247 */
5248 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5249 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5250 if (error) {
5251 if (error == EFBIG) {
5252 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5253 log(LOG_ERR, "%s: Tx packet consumes too many "
5254 "DMA segments, dropping...\n",
5255 device_xname(sc->sc_dev));
5256 wm_dump_mbuf_chain(sc, m0);
5257 m_freem(m0);
5258 continue;
5259 }
5260 /* Short on resources, just stop for now. */
5261 DPRINTF(WM_DEBUG_TX,
5262 ("%s: TX: dmamap load failed: %d\n",
5263 device_xname(sc->sc_dev), error));
5264 break;
5265 }
5266
5267 segs_needed = dmamap->dm_nsegs;
5268 if (use_tso) {
5269 /* For sentinel descriptor; see below. */
5270 segs_needed++;
5271 }
5272
5273 /*
5274 * Ensure we have enough descriptors free to describe
5275 * the packet. Note, we always reserve one descriptor
5276 * at the end of the ring due to the semantics of the
5277 * TDT register, plus one more in the event we need
5278 * to load offload context.
5279 */
5280 if (segs_needed > sc->sc_txfree - 2) {
5281 /*
5282 * Not enough free descriptors to transmit this
5283 * packet. We haven't committed anything yet,
5284 * so just unload the DMA map, put the packet
5285 * pack on the queue, and punt. Notify the upper
5286 * layer that there are no more slots left.
5287 */
5288 DPRINTF(WM_DEBUG_TX,
5289 ("%s: TX: need %d (%d) descriptors, have %d\n",
5290 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5291 segs_needed, sc->sc_txfree - 1));
5292 ifp->if_flags |= IFF_OACTIVE;
5293 bus_dmamap_unload(sc->sc_dmat, dmamap);
5294 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5295 break;
5296 }
5297
5298 /*
5299 * Check for 82547 Tx FIFO bug. We need to do this
5300 * once we know we can transmit the packet, since we
5301 * do some internal FIFO space accounting here.
5302 */
5303 if (sc->sc_type == WM_T_82547 &&
5304 wm_82547_txfifo_bugchk(sc, m0)) {
5305 DPRINTF(WM_DEBUG_TX,
5306 ("%s: TX: 82547 Tx FIFO bug detected\n",
5307 device_xname(sc->sc_dev)));
5308 ifp->if_flags |= IFF_OACTIVE;
5309 bus_dmamap_unload(sc->sc_dmat, dmamap);
5310 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
5311 break;
5312 }
5313
5314 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5315
5316 DPRINTF(WM_DEBUG_TX,
5317 ("%s: TX: packet has %d (%d) DMA segments\n",
5318 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5319
5320 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5321
5322 /*
5323 * Store a pointer to the packet so that we can free it
5324 * later.
5325 *
5326 * Initially, we consider the number of descriptors the
5327 * packet uses the number of DMA segments. This may be
5328 * incremented by 1 if we do checksum offload (a descriptor
5329 * is used to set the checksum context).
5330 */
5331 txs->txs_mbuf = m0;
5332 txs->txs_firstdesc = sc->sc_txnext;
5333 txs->txs_ndesc = segs_needed;
5334
5335 /* Set up offload parameters for this packet. */
5336 if (m0->m_pkthdr.csum_flags &
5337 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5338 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5339 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5340 if (wm_tx_offload(sc, txs, &cksumcmd,
5341 &cksumfields) != 0) {
5342 /* Error message already displayed. */
5343 bus_dmamap_unload(sc->sc_dmat, dmamap);
5344 continue;
5345 }
5346 } else {
5347 cksumcmd = 0;
5348 cksumfields = 0;
5349 }
5350
5351 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5352
5353 /* Sync the DMA map. */
5354 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5355 BUS_DMASYNC_PREWRITE);
5356
5357 /* Initialize the transmit descriptor. */
5358 for (nexttx = sc->sc_txnext, seg = 0;
5359 seg < dmamap->dm_nsegs; seg++) {
5360 for (seglen = dmamap->dm_segs[seg].ds_len,
5361 curaddr = dmamap->dm_segs[seg].ds_addr;
5362 seglen != 0;
5363 curaddr += curlen, seglen -= curlen,
5364 nexttx = WM_NEXTTX(sc, nexttx)) {
5365 curlen = seglen;
5366
5367 /*
5368 * So says the Linux driver:
5369 * Work around for premature descriptor
5370 * write-backs in TSO mode. Append a
5371 * 4-byte sentinel descriptor.
5372 */
5373 if (use_tso &&
5374 seg == dmamap->dm_nsegs - 1 &&
5375 curlen > 8)
5376 curlen -= 4;
5377
5378 wm_set_dma_addr(
5379 &sc->sc_txdescs[nexttx].wtx_addr,
5380 curaddr);
5381 sc->sc_txdescs[nexttx].wtx_cmdlen =
5382 htole32(cksumcmd | curlen);
5383 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5384 0;
5385 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5386 cksumfields;
5387 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5388 lasttx = nexttx;
5389
5390 DPRINTF(WM_DEBUG_TX,
5391 ("%s: TX: desc %d: low %#" PRIx64 ", "
5392 "len %#04zx\n",
5393 device_xname(sc->sc_dev), nexttx,
5394 (uint64_t)curaddr, curlen));
5395 }
5396 }
5397
5398 KASSERT(lasttx != -1);
5399
5400 /*
5401 * Set up the command byte on the last descriptor of
5402 * the packet. If we're in the interrupt delay window,
5403 * delay the interrupt.
5404 */
5405 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5406 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5407
5408 /*
5409 * If VLANs are enabled and the packet has a VLAN tag, set
5410 * up the descriptor to encapsulate the packet for us.
5411 *
5412 * This is only valid on the last descriptor of the packet.
5413 */
5414 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5415 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5416 htole32(WTX_CMD_VLE);
5417 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5418 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5419 }
5420
5421 txs->txs_lastdesc = lasttx;
5422
5423 DPRINTF(WM_DEBUG_TX,
5424 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5425 device_xname(sc->sc_dev),
5426 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5427
5428 /* Sync the descriptors we're using. */
5429 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5430 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5431
5432 /* Give the packet to the chip. */
5433 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5434
5435 DPRINTF(WM_DEBUG_TX,
5436 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5437
5438 DPRINTF(WM_DEBUG_TX,
5439 ("%s: TX: finished transmitting packet, job %d\n",
5440 device_xname(sc->sc_dev), sc->sc_txsnext));
5441
5442 /* Advance the tx pointer. */
5443 sc->sc_txfree -= txs->txs_ndesc;
5444 sc->sc_txnext = nexttx;
5445
5446 sc->sc_txsfree--;
5447 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5448
5449 /* Pass the packet to any BPF listeners. */
5450 bpf_mtap(ifp, m0);
5451 }
5452
5453 if (m0 != NULL) {
5454 ifp->if_flags |= IFF_OACTIVE;
5455 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5456 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5457 m_freem(m0);
5458 }
5459
5460 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5461 /* No more slots; notify upper layer. */
5462 ifp->if_flags |= IFF_OACTIVE;
5463 }
5464
5465 if (sc->sc_txfree != ofree) {
5466 /* Set a watchdog timer in case the chip flakes out. */
5467 ifp->if_timer = 5;
5468 }
5469 }
5470
5471 /*
5472 * wm_nq_tx_offload:
5473 *
5474 * Set up TCP/IP checksumming parameters for the
5475 * specified packet, for NEWQUEUE devices
5476 */
5477 static int
5478 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5479 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5480 {
5481 struct mbuf *m0 = txs->txs_mbuf;
5482 struct m_tag *mtag;
5483 uint32_t vl_len, mssidx, cmdc;
5484 struct ether_header *eh;
5485 int offset, iphl;
5486
5487 /*
5488 * XXX It would be nice if the mbuf pkthdr had offset
5489 * fields for the protocol headers.
5490 */
5491 *cmdlenp = 0;
5492 *fieldsp = 0;
5493
5494 eh = mtod(m0, struct ether_header *);
5495 switch (htons(eh->ether_type)) {
5496 case ETHERTYPE_IP:
5497 case ETHERTYPE_IPV6:
5498 offset = ETHER_HDR_LEN;
5499 break;
5500
5501 case ETHERTYPE_VLAN:
5502 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5503 break;
5504
5505 default:
5506 /* Don't support this protocol or encapsulation. */
5507 *do_csum = false;
5508 return 0;
5509 }
5510 *do_csum = true;
5511 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5512 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5513
5514 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5515 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5516
5517 if ((m0->m_pkthdr.csum_flags &
5518 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5519 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5520 } else {
5521 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5522 }
5523 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5524 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5525
5526 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5527 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5528 << NQTXC_VLLEN_VLAN_SHIFT);
5529 *cmdlenp |= NQTX_CMD_VLE;
5530 }
5531
5532 mssidx = 0;
5533
5534 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5535 int hlen = offset + iphl;
5536 int tcp_hlen;
5537 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5538
5539 if (__predict_false(m0->m_len <
5540 (hlen + sizeof(struct tcphdr)))) {
5541 /*
5542 * TCP/IP headers are not in the first mbuf; we need
5543 * to do this the slow and painful way. Let's just
5544 * hope this doesn't happen very often.
5545 */
5546 struct tcphdr th;
5547
5548 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5549
5550 m_copydata(m0, hlen, sizeof(th), &th);
5551 if (v4) {
5552 struct ip ip;
5553
5554 m_copydata(m0, offset, sizeof(ip), &ip);
5555 ip.ip_len = 0;
5556 m_copyback(m0,
5557 offset + offsetof(struct ip, ip_len),
5558 sizeof(ip.ip_len), &ip.ip_len);
5559 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5560 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5561 } else {
5562 struct ip6_hdr ip6;
5563
5564 m_copydata(m0, offset, sizeof(ip6), &ip6);
5565 ip6.ip6_plen = 0;
5566 m_copyback(m0,
5567 offset + offsetof(struct ip6_hdr, ip6_plen),
5568 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5569 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5570 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5571 }
5572 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5573 sizeof(th.th_sum), &th.th_sum);
5574
5575 tcp_hlen = th.th_off << 2;
5576 } else {
5577 /*
5578 * TCP/IP headers are in the first mbuf; we can do
5579 * this the easy way.
5580 */
5581 struct tcphdr *th;
5582
5583 if (v4) {
5584 struct ip *ip =
5585 (void *)(mtod(m0, char *) + offset);
5586 th = (void *)(mtod(m0, char *) + hlen);
5587
5588 ip->ip_len = 0;
5589 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5590 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5591 } else {
5592 struct ip6_hdr *ip6 =
5593 (void *)(mtod(m0, char *) + offset);
5594 th = (void *)(mtod(m0, char *) + hlen);
5595
5596 ip6->ip6_plen = 0;
5597 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5598 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5599 }
5600 tcp_hlen = th->th_off << 2;
5601 }
5602 hlen += tcp_hlen;
5603 *cmdlenp |= NQTX_CMD_TSE;
5604
5605 if (v4) {
5606 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5607 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5608 } else {
5609 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5610 *fieldsp |= NQTXD_FIELDS_TUXSM;
5611 }
5612 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5613 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5614 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5615 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5616 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5617 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5618 } else {
5619 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5620 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5621 }
5622
5623 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5624 *fieldsp |= NQTXD_FIELDS_IXSM;
5625 cmdc |= NQTXC_CMD_IP4;
5626 }
5627
5628 if (m0->m_pkthdr.csum_flags &
5629 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5630 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5631 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5632 cmdc |= NQTXC_CMD_TCP;
5633 } else {
5634 cmdc |= NQTXC_CMD_UDP;
5635 }
5636 cmdc |= NQTXC_CMD_IP4;
5637 *fieldsp |= NQTXD_FIELDS_TUXSM;
5638 }
5639 if (m0->m_pkthdr.csum_flags &
5640 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5641 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5642 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5643 cmdc |= NQTXC_CMD_TCP;
5644 } else {
5645 cmdc |= NQTXC_CMD_UDP;
5646 }
5647 cmdc |= NQTXC_CMD_IP6;
5648 *fieldsp |= NQTXD_FIELDS_TUXSM;
5649 }
5650
5651 /* Fill in the context descriptor. */
5652 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5653 htole32(vl_len);
5654 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5655 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5656 htole32(cmdc);
5657 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5658 htole32(mssidx);
5659 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5660 DPRINTF(WM_DEBUG_TX,
5661 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5662 sc->sc_txnext, 0, vl_len));
5663 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5664 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5665 txs->txs_ndesc++;
5666 return 0;
5667 }
5668
5669 /*
5670 * wm_nq_start: [ifnet interface function]
5671 *
5672 * Start packet transmission on the interface for NEWQUEUE devices
5673 */
5674 static void
5675 wm_nq_start(struct ifnet *ifp)
5676 {
5677 struct wm_softc *sc = ifp->if_softc;
5678
5679 WM_TX_LOCK(sc);
5680 if (!sc->sc_stopping)
5681 wm_nq_start_locked(ifp);
5682 WM_TX_UNLOCK(sc);
5683 }
5684
5685 static void
5686 wm_nq_start_locked(struct ifnet *ifp)
5687 {
5688 struct wm_softc *sc = ifp->if_softc;
5689 struct mbuf *m0;
5690 struct m_tag *mtag;
5691 struct wm_txsoft *txs;
5692 bus_dmamap_t dmamap;
5693 int error, nexttx, lasttx = -1, seg, segs_needed;
5694 bool do_csum, sent;
5695
5696 KASSERT(WM_TX_LOCKED(sc));
5697
5698 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5699 return;
5700
5701 sent = false;
5702
5703 /*
5704 * Loop through the send queue, setting up transmit descriptors
5705 * until we drain the queue, or use up all available transmit
5706 * descriptors.
5707 */
5708 for (;;) {
5709 m0 = NULL;
5710
5711 /* Get a work queue entry. */
5712 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5713 wm_txeof(sc);
5714 if (sc->sc_txsfree == 0) {
5715 DPRINTF(WM_DEBUG_TX,
5716 ("%s: TX: no free job descriptors\n",
5717 device_xname(sc->sc_dev)));
5718 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5719 break;
5720 }
5721 }
5722
5723 /* Grab a packet off the queue. */
5724 IFQ_DEQUEUE(&ifp->if_snd, m0);
5725 if (m0 == NULL)
5726 break;
5727
5728 DPRINTF(WM_DEBUG_TX,
5729 ("%s: TX: have packet to transmit: %p\n",
5730 device_xname(sc->sc_dev), m0));
5731
5732 txs = &sc->sc_txsoft[sc->sc_txsnext];
5733 dmamap = txs->txs_dmamap;
5734
5735 /*
5736 * Load the DMA map. If this fails, the packet either
5737 * didn't fit in the allotted number of segments, or we
5738 * were short on resources. For the too-many-segments
5739 * case, we simply report an error and drop the packet,
5740 * since we can't sanely copy a jumbo packet to a single
5741 * buffer.
5742 */
5743 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5744 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5745 if (error) {
5746 if (error == EFBIG) {
5747 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5748 log(LOG_ERR, "%s: Tx packet consumes too many "
5749 "DMA segments, dropping...\n",
5750 device_xname(sc->sc_dev));
5751 wm_dump_mbuf_chain(sc, m0);
5752 m_freem(m0);
5753 continue;
5754 }
5755 /* Short on resources, just stop for now. */
5756 DPRINTF(WM_DEBUG_TX,
5757 ("%s: TX: dmamap load failed: %d\n",
5758 device_xname(sc->sc_dev), error));
5759 break;
5760 }
5761
5762 segs_needed = dmamap->dm_nsegs;
5763
5764 /*
5765 * Ensure we have enough descriptors free to describe
5766 * the packet. Note, we always reserve one descriptor
5767 * at the end of the ring due to the semantics of the
5768 * TDT register, plus one more in the event we need
5769 * to load offload context.
5770 */
5771 if (segs_needed > sc->sc_txfree - 2) {
5772 /*
5773 * Not enough free descriptors to transmit this
5774 * packet. We haven't committed anything yet,
5775 * so just unload the DMA map, put the packet
5776 * pack on the queue, and punt. Notify the upper
5777 * layer that there are no more slots left.
5778 */
5779 DPRINTF(WM_DEBUG_TX,
5780 ("%s: TX: need %d (%d) descriptors, have %d\n",
5781 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5782 segs_needed, sc->sc_txfree - 1));
5783 ifp->if_flags |= IFF_OACTIVE;
5784 bus_dmamap_unload(sc->sc_dmat, dmamap);
5785 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5786 break;
5787 }
5788
5789 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5790
5791 DPRINTF(WM_DEBUG_TX,
5792 ("%s: TX: packet has %d (%d) DMA segments\n",
5793 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5794
5795 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5796
5797 /*
5798 * Store a pointer to the packet so that we can free it
5799 * later.
5800 *
5801 * Initially, we consider the number of descriptors the
5802 * packet uses the number of DMA segments. This may be
5803 * incremented by 1 if we do checksum offload (a descriptor
5804 * is used to set the checksum context).
5805 */
5806 txs->txs_mbuf = m0;
5807 txs->txs_firstdesc = sc->sc_txnext;
5808 txs->txs_ndesc = segs_needed;
5809
5810 /* Set up offload parameters for this packet. */
5811 uint32_t cmdlen, fields, dcmdlen;
5812 if (m0->m_pkthdr.csum_flags &
5813 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5814 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5815 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5816 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5817 &do_csum) != 0) {
5818 /* Error message already displayed. */
5819 bus_dmamap_unload(sc->sc_dmat, dmamap);
5820 continue;
5821 }
5822 } else {
5823 do_csum = false;
5824 cmdlen = 0;
5825 fields = 0;
5826 }
5827
5828 /* Sync the DMA map. */
5829 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5830 BUS_DMASYNC_PREWRITE);
5831
5832 /* Initialize the first transmit descriptor. */
5833 nexttx = sc->sc_txnext;
5834 if (!do_csum) {
5835 /* setup a legacy descriptor */
5836 wm_set_dma_addr(
5837 &sc->sc_txdescs[nexttx].wtx_addr,
5838 dmamap->dm_segs[0].ds_addr);
5839 sc->sc_txdescs[nexttx].wtx_cmdlen =
5840 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5841 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5842 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5843 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5844 NULL) {
5845 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5846 htole32(WTX_CMD_VLE);
5847 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5848 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5849 } else {
5850 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =0;
5851 }
5852 dcmdlen = 0;
5853 } else {
5854 /* setup an advanced data descriptor */
5855 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5856 htole64(dmamap->dm_segs[0].ds_addr);
5857 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5858 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5859 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5860 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5861 htole32(fields);
5862 DPRINTF(WM_DEBUG_TX,
5863 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5864 device_xname(sc->sc_dev), nexttx,
5865 (uint64_t)dmamap->dm_segs[0].ds_addr));
5866 DPRINTF(WM_DEBUG_TX,
5867 ("\t 0x%08x%08x\n", fields,
5868 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5869 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5870 }
5871
5872 lasttx = nexttx;
5873 nexttx = WM_NEXTTX(sc, nexttx);
5874 /*
5875 * fill in the next descriptors. legacy or adcanced format
5876 * is the same here
5877 */
5878 for (seg = 1; seg < dmamap->dm_nsegs;
5879 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5880 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5881 htole64(dmamap->dm_segs[seg].ds_addr);
5882 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5883 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5884 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5885 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5886 lasttx = nexttx;
5887
5888 DPRINTF(WM_DEBUG_TX,
5889 ("%s: TX: desc %d: %#" PRIx64 ", "
5890 "len %#04zx\n",
5891 device_xname(sc->sc_dev), nexttx,
5892 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5893 dmamap->dm_segs[seg].ds_len));
5894 }
5895
5896 KASSERT(lasttx != -1);
5897
5898 /*
5899 * Set up the command byte on the last descriptor of
5900 * the packet. If we're in the interrupt delay window,
5901 * delay the interrupt.
5902 */
5903 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5904 (NQTX_CMD_EOP | NQTX_CMD_RS));
5905 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5906 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5907
5908 txs->txs_lastdesc = lasttx;
5909
5910 DPRINTF(WM_DEBUG_TX,
5911 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5912 device_xname(sc->sc_dev),
5913 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5914
5915 /* Sync the descriptors we're using. */
5916 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5917 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5918
5919 /* Give the packet to the chip. */
5920 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5921 sent = true;
5922
5923 DPRINTF(WM_DEBUG_TX,
5924 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5925
5926 DPRINTF(WM_DEBUG_TX,
5927 ("%s: TX: finished transmitting packet, job %d\n",
5928 device_xname(sc->sc_dev), sc->sc_txsnext));
5929
5930 /* Advance the tx pointer. */
5931 sc->sc_txfree -= txs->txs_ndesc;
5932 sc->sc_txnext = nexttx;
5933
5934 sc->sc_txsfree--;
5935 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5936
5937 /* Pass the packet to any BPF listeners. */
5938 bpf_mtap(ifp, m0);
5939 }
5940
5941 if (m0 != NULL) {
5942 ifp->if_flags |= IFF_OACTIVE;
5943 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5944 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5945 m_freem(m0);
5946 }
5947
5948 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5949 /* No more slots; notify upper layer. */
5950 ifp->if_flags |= IFF_OACTIVE;
5951 }
5952
5953 if (sent) {
5954 /* Set a watchdog timer in case the chip flakes out. */
5955 ifp->if_timer = 5;
5956 }
5957 }
5958
5959 /* Interrupt */
5960
5961 /*
5962 * wm_txeof:
5963 *
5964 * Helper; handle transmit interrupts.
5965 */
5966 static int
5967 wm_txeof(struct wm_softc *sc)
5968 {
5969 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5970 struct wm_txsoft *txs;
5971 bool processed = false;
5972 int count = 0;
5973 int i;
5974 uint8_t status;
5975
5976 if (sc->sc_stopping)
5977 return 0;
5978
5979 ifp->if_flags &= ~IFF_OACTIVE;
5980
5981 /*
5982 * Go through the Tx list and free mbufs for those
5983 * frames which have been transmitted.
5984 */
5985 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5986 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5987 txs = &sc->sc_txsoft[i];
5988
5989 DPRINTF(WM_DEBUG_TX,
5990 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5991
5992 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5993 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5994
5995 status =
5996 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5997 if ((status & WTX_ST_DD) == 0) {
5998 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5999 BUS_DMASYNC_PREREAD);
6000 break;
6001 }
6002
6003 processed = true;
6004 count++;
6005 DPRINTF(WM_DEBUG_TX,
6006 ("%s: TX: job %d done: descs %d..%d\n",
6007 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6008 txs->txs_lastdesc));
6009
6010 /*
6011 * XXX We should probably be using the statistics
6012 * XXX registers, but I don't know if they exist
6013 * XXX on chips before the i82544.
6014 */
6015
6016 #ifdef WM_EVENT_COUNTERS
6017 if (status & WTX_ST_TU)
6018 WM_EVCNT_INCR(&sc->sc_ev_tu);
6019 #endif /* WM_EVENT_COUNTERS */
6020
6021 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6022 ifp->if_oerrors++;
6023 if (status & WTX_ST_LC)
6024 log(LOG_WARNING, "%s: late collision\n",
6025 device_xname(sc->sc_dev));
6026 else if (status & WTX_ST_EC) {
6027 ifp->if_collisions += 16;
6028 log(LOG_WARNING, "%s: excessive collisions\n",
6029 device_xname(sc->sc_dev));
6030 }
6031 } else
6032 ifp->if_opackets++;
6033
6034 sc->sc_txfree += txs->txs_ndesc;
6035 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6036 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6037 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6038 m_freem(txs->txs_mbuf);
6039 txs->txs_mbuf = NULL;
6040 }
6041
6042 /* Update the dirty transmit buffer pointer. */
6043 sc->sc_txsdirty = i;
6044 DPRINTF(WM_DEBUG_TX,
6045 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6046
6047 if (count != 0)
6048 rnd_add_uint32(&sc->rnd_source, count);
6049
6050 /*
6051 * If there are no more pending transmissions, cancel the watchdog
6052 * timer.
6053 */
6054 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
6055 ifp->if_timer = 0;
6056
6057 return processed;
6058 }
6059
6060 /*
6061 * wm_rxeof:
6062 *
6063 * Helper; handle receive interrupts.
6064 */
6065 static void
6066 wm_rxeof(struct wm_softc *sc)
6067 {
6068 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6069 struct wm_rxsoft *rxs;
6070 struct mbuf *m;
6071 int i, len;
6072 int count = 0;
6073 uint8_t status, errors;
6074 uint16_t vlantag;
6075
6076 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
6077 rxs = &sc->sc_rxsoft[i];
6078
6079 DPRINTF(WM_DEBUG_RX,
6080 ("%s: RX: checking descriptor %d\n",
6081 device_xname(sc->sc_dev), i));
6082
6083 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6084
6085 status = sc->sc_rxdescs[i].wrx_status;
6086 errors = sc->sc_rxdescs[i].wrx_errors;
6087 len = le16toh(sc->sc_rxdescs[i].wrx_len);
6088 vlantag = sc->sc_rxdescs[i].wrx_special;
6089
6090 if ((status & WRX_ST_DD) == 0) {
6091 /* We have processed all of the receive descriptors. */
6092 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
6093 break;
6094 }
6095
6096 count++;
6097 if (__predict_false(sc->sc_rxdiscard)) {
6098 DPRINTF(WM_DEBUG_RX,
6099 ("%s: RX: discarding contents of descriptor %d\n",
6100 device_xname(sc->sc_dev), i));
6101 WM_INIT_RXDESC(sc, i);
6102 if (status & WRX_ST_EOP) {
6103 /* Reset our state. */
6104 DPRINTF(WM_DEBUG_RX,
6105 ("%s: RX: resetting rxdiscard -> 0\n",
6106 device_xname(sc->sc_dev)));
6107 sc->sc_rxdiscard = 0;
6108 }
6109 continue;
6110 }
6111
6112 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6113 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6114
6115 m = rxs->rxs_mbuf;
6116
6117 /*
6118 * Add a new receive buffer to the ring, unless of
6119 * course the length is zero. Treat the latter as a
6120 * failed mapping.
6121 */
6122 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
6123 /*
6124 * Failed, throw away what we've done so
6125 * far, and discard the rest of the packet.
6126 */
6127 ifp->if_ierrors++;
6128 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6129 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6130 WM_INIT_RXDESC(sc, i);
6131 if ((status & WRX_ST_EOP) == 0)
6132 sc->sc_rxdiscard = 1;
6133 if (sc->sc_rxhead != NULL)
6134 m_freem(sc->sc_rxhead);
6135 WM_RXCHAIN_RESET(sc);
6136 DPRINTF(WM_DEBUG_RX,
6137 ("%s: RX: Rx buffer allocation failed, "
6138 "dropping packet%s\n", device_xname(sc->sc_dev),
6139 sc->sc_rxdiscard ? " (discard)" : ""));
6140 continue;
6141 }
6142
6143 m->m_len = len;
6144 sc->sc_rxlen += len;
6145 DPRINTF(WM_DEBUG_RX,
6146 ("%s: RX: buffer at %p len %d\n",
6147 device_xname(sc->sc_dev), m->m_data, len));
6148
6149 /* If this is not the end of the packet, keep looking. */
6150 if ((status & WRX_ST_EOP) == 0) {
6151 WM_RXCHAIN_LINK(sc, m);
6152 DPRINTF(WM_DEBUG_RX,
6153 ("%s: RX: not yet EOP, rxlen -> %d\n",
6154 device_xname(sc->sc_dev), sc->sc_rxlen));
6155 continue;
6156 }
6157
6158 /*
6159 * Okay, we have the entire packet now. The chip is
6160 * configured to include the FCS except I350 and I21[01]
6161 * (not all chips can be configured to strip it),
6162 * so we need to trim it.
6163 * May need to adjust length of previous mbuf in the
6164 * chain if the current mbuf is too short.
6165 * For an eratta, the RCTL_SECRC bit in RCTL register
6166 * is always set in I350, so we don't trim it.
6167 */
6168 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6169 && (sc->sc_type != WM_T_I210)
6170 && (sc->sc_type != WM_T_I211)) {
6171 if (m->m_len < ETHER_CRC_LEN) {
6172 sc->sc_rxtail->m_len
6173 -= (ETHER_CRC_LEN - m->m_len);
6174 m->m_len = 0;
6175 } else
6176 m->m_len -= ETHER_CRC_LEN;
6177 len = sc->sc_rxlen - ETHER_CRC_LEN;
6178 } else
6179 len = sc->sc_rxlen;
6180
6181 WM_RXCHAIN_LINK(sc, m);
6182
6183 *sc->sc_rxtailp = NULL;
6184 m = sc->sc_rxhead;
6185
6186 WM_RXCHAIN_RESET(sc);
6187
6188 DPRINTF(WM_DEBUG_RX,
6189 ("%s: RX: have entire packet, len -> %d\n",
6190 device_xname(sc->sc_dev), len));
6191
6192 /* If an error occurred, update stats and drop the packet. */
6193 if (errors &
6194 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
6195 if (errors & WRX_ER_SE)
6196 log(LOG_WARNING, "%s: symbol error\n",
6197 device_xname(sc->sc_dev));
6198 else if (errors & WRX_ER_SEQ)
6199 log(LOG_WARNING, "%s: receive sequence error\n",
6200 device_xname(sc->sc_dev));
6201 else if (errors & WRX_ER_CE)
6202 log(LOG_WARNING, "%s: CRC error\n",
6203 device_xname(sc->sc_dev));
6204 m_freem(m);
6205 continue;
6206 }
6207
6208 /* No errors. Receive the packet. */
6209 m->m_pkthdr.rcvif = ifp;
6210 m->m_pkthdr.len = len;
6211
6212 /*
6213 * If VLANs are enabled, VLAN packets have been unwrapped
6214 * for us. Associate the tag with the packet.
6215 */
6216 /* XXXX should check for i350 and i354 */
6217 if ((status & WRX_ST_VP) != 0) {
6218 VLAN_INPUT_TAG(ifp, m,
6219 le16toh(vlantag),
6220 continue);
6221 }
6222
6223 /* Set up checksum info for this packet. */
6224 if ((status & WRX_ST_IXSM) == 0) {
6225 if (status & WRX_ST_IPCS) {
6226 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
6227 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6228 if (errors & WRX_ER_IPE)
6229 m->m_pkthdr.csum_flags |=
6230 M_CSUM_IPv4_BAD;
6231 }
6232 if (status & WRX_ST_TCPCS) {
6233 /*
6234 * Note: we don't know if this was TCP or UDP,
6235 * so we just set both bits, and expect the
6236 * upper layers to deal.
6237 */
6238 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
6239 m->m_pkthdr.csum_flags |=
6240 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6241 M_CSUM_TCPv6 | M_CSUM_UDPv6;
6242 if (errors & WRX_ER_TCPE)
6243 m->m_pkthdr.csum_flags |=
6244 M_CSUM_TCP_UDP_BAD;
6245 }
6246 }
6247
6248 ifp->if_ipackets++;
6249
6250 WM_RX_UNLOCK(sc);
6251
6252 /* Pass this up to any BPF listeners. */
6253 bpf_mtap(ifp, m);
6254
6255 /* Pass it on. */
6256 (*ifp->if_input)(ifp, m);
6257
6258 WM_RX_LOCK(sc);
6259
6260 if (sc->sc_stopping)
6261 break;
6262 }
6263
6264 /* Update the receive pointer. */
6265 sc->sc_rxptr = i;
6266 if (count != 0)
6267 rnd_add_uint32(&sc->rnd_source, count);
6268
6269 DPRINTF(WM_DEBUG_RX,
6270 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
6271 }
6272
6273 /*
6274 * wm_linkintr_gmii:
6275 *
6276 * Helper; handle link interrupts for GMII.
6277 */
6278 static void
6279 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
6280 {
6281
6282 KASSERT(WM_TX_LOCKED(sc));
6283
6284 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6285 __func__));
6286
6287 if (icr & ICR_LSC) {
6288 DPRINTF(WM_DEBUG_LINK,
6289 ("%s: LINK: LSC -> mii_pollstat\n",
6290 device_xname(sc->sc_dev)));
6291 mii_pollstat(&sc->sc_mii);
6292 if (sc->sc_type == WM_T_82543) {
6293 int miistatus, active;
6294
6295 /*
6296 * With 82543, we need to force speed and
6297 * duplex on the MAC equal to what the PHY
6298 * speed and duplex configuration is.
6299 */
6300 miistatus = sc->sc_mii.mii_media_status;
6301
6302 if (miistatus & IFM_ACTIVE) {
6303 active = sc->sc_mii.mii_media_active;
6304 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6305 switch (IFM_SUBTYPE(active)) {
6306 case IFM_10_T:
6307 sc->sc_ctrl |= CTRL_SPEED_10;
6308 break;
6309 case IFM_100_TX:
6310 sc->sc_ctrl |= CTRL_SPEED_100;
6311 break;
6312 case IFM_1000_T:
6313 sc->sc_ctrl |= CTRL_SPEED_1000;
6314 break;
6315 default:
6316 /*
6317 * fiber?
6318 * Shoud not enter here.
6319 */
6320 printf("unknown media (%x)\n",
6321 active);
6322 break;
6323 }
6324 if (active & IFM_FDX)
6325 sc->sc_ctrl |= CTRL_FD;
6326 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6327 }
6328 } else if ((sc->sc_type == WM_T_ICH8)
6329 && (sc->sc_phytype == WMPHY_IGP_3)) {
6330 wm_kmrn_lock_loss_workaround_ich8lan(sc);
6331 } else if (sc->sc_type == WM_T_PCH) {
6332 wm_k1_gig_workaround_hv(sc,
6333 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
6334 }
6335
6336 if ((sc->sc_phytype == WMPHY_82578)
6337 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
6338 == IFM_1000_T)) {
6339
6340 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
6341 delay(200*1000); /* XXX too big */
6342
6343 /* Link stall fix for link up */
6344 wm_gmii_hv_writereg(sc->sc_dev, 1,
6345 HV_MUX_DATA_CTRL,
6346 HV_MUX_DATA_CTRL_GEN_TO_MAC
6347 | HV_MUX_DATA_CTRL_FORCE_SPEED);
6348 wm_gmii_hv_writereg(sc->sc_dev, 1,
6349 HV_MUX_DATA_CTRL,
6350 HV_MUX_DATA_CTRL_GEN_TO_MAC);
6351 }
6352 }
6353 } else if (icr & ICR_RXSEQ) {
6354 DPRINTF(WM_DEBUG_LINK,
6355 ("%s: LINK Receive sequence error\n",
6356 device_xname(sc->sc_dev)));
6357 }
6358 }
6359
6360 /*
6361 * wm_linkintr_tbi:
6362 *
6363 * Helper; handle link interrupts for TBI mode.
6364 */
6365 static void
6366 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6367 {
6368 uint32_t status;
6369
6370 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6371 __func__));
6372
6373 status = CSR_READ(sc, WMREG_STATUS);
6374 if (icr & ICR_LSC) {
6375 if (status & STATUS_LU) {
6376 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6377 device_xname(sc->sc_dev),
6378 (status & STATUS_FD) ? "FDX" : "HDX"));
6379 /*
6380 * NOTE: CTRL will update TFCE and RFCE automatically,
6381 * so we should update sc->sc_ctrl
6382 */
6383
6384 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6385 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6386 sc->sc_fcrtl &= ~FCRTL_XONE;
6387 if (status & STATUS_FD)
6388 sc->sc_tctl |=
6389 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6390 else
6391 sc->sc_tctl |=
6392 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6393 if (sc->sc_ctrl & CTRL_TFCE)
6394 sc->sc_fcrtl |= FCRTL_XONE;
6395 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6396 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6397 WMREG_OLD_FCRTL : WMREG_FCRTL,
6398 sc->sc_fcrtl);
6399 sc->sc_tbi_linkup = 1;
6400 } else {
6401 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6402 device_xname(sc->sc_dev)));
6403 sc->sc_tbi_linkup = 0;
6404 }
6405 /* Update LED */
6406 wm_tbi_serdes_set_linkled(sc);
6407 } else if (icr & ICR_RXSEQ) {
6408 DPRINTF(WM_DEBUG_LINK,
6409 ("%s: LINK: Receive sequence error\n",
6410 device_xname(sc->sc_dev)));
6411 }
6412 }
6413
6414 /*
6415 * wm_linkintr_serdes:
6416 *
6417 * Helper; handle link interrupts for TBI mode.
6418 */
6419 static void
6420 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6421 {
6422 struct mii_data *mii = &sc->sc_mii;
6423 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6424 uint32_t pcs_adv, pcs_lpab, reg;
6425
6426 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6427 __func__));
6428
6429 if (icr & ICR_LSC) {
6430 /* Check PCS */
6431 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6432 if ((reg & PCS_LSTS_LINKOK) != 0) {
6433 mii->mii_media_status |= IFM_ACTIVE;
6434 sc->sc_tbi_linkup = 1;
6435 } else {
6436 mii->mii_media_status |= IFM_NONE;
6437 sc->sc_tbi_linkup = 0;
6438 wm_tbi_serdes_set_linkled(sc);
6439 return;
6440 }
6441 mii->mii_media_active |= IFM_1000_SX;
6442 if ((reg & PCS_LSTS_FDX) != 0)
6443 mii->mii_media_active |= IFM_FDX;
6444 else
6445 mii->mii_media_active |= IFM_HDX;
6446 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6447 /* Check flow */
6448 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6449 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6450 DPRINTF(WM_DEBUG_LINK,
6451 ("XXX LINKOK but not ACOMP\n"));
6452 return;
6453 }
6454 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6455 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6456 DPRINTF(WM_DEBUG_LINK,
6457 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6458 if ((pcs_adv & TXCW_SYM_PAUSE)
6459 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6460 mii->mii_media_active |= IFM_FLOW
6461 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6462 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6463 && (pcs_adv & TXCW_ASYM_PAUSE)
6464 && (pcs_lpab & TXCW_SYM_PAUSE)
6465 && (pcs_lpab & TXCW_ASYM_PAUSE))
6466 mii->mii_media_active |= IFM_FLOW
6467 | IFM_ETH_TXPAUSE;
6468 else if ((pcs_adv & TXCW_SYM_PAUSE)
6469 && (pcs_adv & TXCW_ASYM_PAUSE)
6470 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6471 && (pcs_lpab & TXCW_ASYM_PAUSE))
6472 mii->mii_media_active |= IFM_FLOW
6473 | IFM_ETH_RXPAUSE;
6474 }
6475 /* Update LED */
6476 wm_tbi_serdes_set_linkled(sc);
6477 } else {
6478 DPRINTF(WM_DEBUG_LINK,
6479 ("%s: LINK: Receive sequence error\n",
6480 device_xname(sc->sc_dev)));
6481 }
6482 }
6483
6484 /*
6485 * wm_linkintr:
6486 *
6487 * Helper; handle link interrupts.
6488 */
6489 static void
6490 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6491 {
6492
6493 if (sc->sc_flags & WM_F_HAS_MII)
6494 wm_linkintr_gmii(sc, icr);
6495 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6496 && (sc->sc_type >= WM_T_82575))
6497 wm_linkintr_serdes(sc, icr);
6498 else
6499 wm_linkintr_tbi(sc, icr);
6500 }
6501
6502 /*
6503 * wm_intr_legacy:
6504 *
6505 * Interrupt service routine for INTx and MSI.
6506 */
6507 static int
6508 wm_intr_legacy(void *arg)
6509 {
6510 struct wm_softc *sc = arg;
6511 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6512 uint32_t icr, rndval = 0;
6513 int handled = 0;
6514
6515 DPRINTF(WM_DEBUG_TX,
6516 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
6517 while (1 /* CONSTCOND */) {
6518 icr = CSR_READ(sc, WMREG_ICR);
6519 if ((icr & sc->sc_icr) == 0)
6520 break;
6521 if (rndval == 0)
6522 rndval = icr;
6523
6524 WM_RX_LOCK(sc);
6525
6526 if (sc->sc_stopping) {
6527 WM_RX_UNLOCK(sc);
6528 break;
6529 }
6530
6531 handled = 1;
6532
6533 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6534 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6535 DPRINTF(WM_DEBUG_RX,
6536 ("%s: RX: got Rx intr 0x%08x\n",
6537 device_xname(sc->sc_dev),
6538 icr & (ICR_RXDMT0|ICR_RXT0)));
6539 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6540 }
6541 #endif
6542 wm_rxeof(sc);
6543
6544 WM_RX_UNLOCK(sc);
6545 WM_TX_LOCK(sc);
6546
6547 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6548 if (icr & ICR_TXDW) {
6549 DPRINTF(WM_DEBUG_TX,
6550 ("%s: TX: got TXDW interrupt\n",
6551 device_xname(sc->sc_dev)));
6552 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6553 }
6554 #endif
6555 wm_txeof(sc);
6556
6557 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6558 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6559 wm_linkintr(sc, icr);
6560 }
6561
6562 WM_TX_UNLOCK(sc);
6563
6564 if (icr & ICR_RXO) {
6565 #if defined(WM_DEBUG)
6566 log(LOG_WARNING, "%s: Receive overrun\n",
6567 device_xname(sc->sc_dev));
6568 #endif /* defined(WM_DEBUG) */
6569 }
6570 }
6571
6572 rnd_add_uint32(&sc->rnd_source, rndval);
6573
6574 if (handled) {
6575 /* Try to get more packets going. */
6576 ifp->if_start(ifp);
6577 }
6578
6579 return handled;
6580 }
6581
6582 #ifdef WM_MSI_MSIX
6583 /*
6584 * wm_txintr_msix:
6585 *
6586 * Interrupt service routine for TX complete interrupt for MSI-X.
6587 */
6588 static int
6589 wm_txintr_msix(void *arg)
6590 {
6591 struct wm_softc *sc = arg;
6592 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6593 int handled = 0;
6594
6595 DPRINTF(WM_DEBUG_TX,
6596 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
6597
6598 if (sc->sc_type == WM_T_82574)
6599 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ0); /* 82574 only */
6600 else if (sc->sc_type == WM_T_82575)
6601 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE0);
6602 else
6603 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_TXINTR_IDX);
6604
6605 WM_TX_LOCK(sc);
6606
6607 if (sc->sc_stopping)
6608 goto out;
6609
6610 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6611 handled = wm_txeof(sc);
6612
6613 out:
6614 WM_TX_UNLOCK(sc);
6615
6616 if (sc->sc_type == WM_T_82574)
6617 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ0); /* 82574 only */
6618 else if (sc->sc_type == WM_T_82575)
6619 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE0);
6620 else
6621 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_TXINTR_IDX);
6622
6623 if (handled) {
6624 /* Try to get more packets going. */
6625 ifp->if_start(ifp);
6626 }
6627
6628 return handled;
6629 }
6630
6631 /*
6632 * wm_rxintr_msix:
6633 *
6634 * Interrupt service routine for RX interrupt for MSI-X.
6635 */
6636 static int
6637 wm_rxintr_msix(void *arg)
6638 {
6639 struct wm_softc *sc = arg;
6640
6641 DPRINTF(WM_DEBUG_TX,
6642 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
6643
6644 if (sc->sc_type == WM_T_82574)
6645 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ0); /* 82574 only */
6646 else if (sc->sc_type == WM_T_82575)
6647 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE0);
6648 else
6649 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
6650
6651 WM_RX_LOCK(sc);
6652
6653 if (sc->sc_stopping)
6654 goto out;
6655
6656 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6657 wm_rxeof(sc);
6658
6659 out:
6660 WM_RX_UNLOCK(sc);
6661
6662 if (sc->sc_type == WM_T_82574)
6663 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ0);
6664 else if (sc->sc_type == WM_T_82575)
6665 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE0);
6666 else
6667 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
6668
6669 return 1;
6670 }
6671
6672 /*
6673 * wm_linkintr_msix:
6674 *
6675 * Interrupt service routine for link status change for MSI-X.
6676 */
6677 static int
6678 wm_linkintr_msix(void *arg)
6679 {
6680 struct wm_softc *sc = arg;
6681
6682 DPRINTF(WM_DEBUG_TX,
6683 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
6684
6685 if (sc->sc_type == WM_T_82574)
6686 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); /* 82574 only */
6687 else if (sc->sc_type == WM_T_82575)
6688 CSR_WRITE(sc, WMREG_EIMC, EITR_OTHER);
6689 else
6690 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_LINKINTR_IDX);
6691 WM_TX_LOCK(sc);
6692 if (sc->sc_stopping)
6693 goto out;
6694
6695 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6696 wm_linkintr(sc, ICR_LSC);
6697
6698 out:
6699 WM_TX_UNLOCK(sc);
6700
6701 if (sc->sc_type == WM_T_82574)
6702 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
6703 else if (sc->sc_type == WM_T_82575)
6704 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
6705 else
6706 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_LINKINTR_IDX);
6707
6708 return 1;
6709 }
6710 #endif /* WM_MSI_MSIX */
6711
6712 /*
6713 * Media related.
6714 * GMII, SGMII, TBI (and SERDES)
6715 */
6716
6717 /* Common */
6718
6719 /*
6720 * wm_tbi_serdes_set_linkled:
6721 *
6722 * Update the link LED on TBI and SERDES devices.
6723 */
6724 static void
6725 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
6726 {
6727
6728 if (sc->sc_tbi_linkup)
6729 sc->sc_ctrl |= CTRL_SWDPIN(0);
6730 else
6731 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6732
6733 /* 82540 or newer devices are active low */
6734 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6735
6736 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6737 }
6738
6739 /* GMII related */
6740
6741 /*
6742 * wm_gmii_reset:
6743 *
6744 * Reset the PHY.
6745 */
6746 static void
6747 wm_gmii_reset(struct wm_softc *sc)
6748 {
6749 uint32_t reg;
6750 int rv;
6751
6752 /* get phy semaphore */
6753 switch (sc->sc_type) {
6754 case WM_T_82571:
6755 case WM_T_82572:
6756 case WM_T_82573:
6757 case WM_T_82574:
6758 case WM_T_82583:
6759 /* XXX should get sw semaphore, too */
6760 rv = wm_get_swsm_semaphore(sc);
6761 break;
6762 case WM_T_82575:
6763 case WM_T_82576:
6764 case WM_T_82580:
6765 case WM_T_I350:
6766 case WM_T_I354:
6767 case WM_T_I210:
6768 case WM_T_I211:
6769 case WM_T_80003:
6770 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6771 break;
6772 case WM_T_ICH8:
6773 case WM_T_ICH9:
6774 case WM_T_ICH10:
6775 case WM_T_PCH:
6776 case WM_T_PCH2:
6777 case WM_T_PCH_LPT:
6778 rv = wm_get_swfwhw_semaphore(sc);
6779 break;
6780 default:
6781 /* nothing to do*/
6782 rv = 0;
6783 break;
6784 }
6785 if (rv != 0) {
6786 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6787 __func__);
6788 return;
6789 }
6790
6791 switch (sc->sc_type) {
6792 case WM_T_82542_2_0:
6793 case WM_T_82542_2_1:
6794 /* null */
6795 break;
6796 case WM_T_82543:
6797 /*
6798 * With 82543, we need to force speed and duplex on the MAC
6799 * equal to what the PHY speed and duplex configuration is.
6800 * In addition, we need to perform a hardware reset on the PHY
6801 * to take it out of reset.
6802 */
6803 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6804 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6805
6806 /* The PHY reset pin is active-low. */
6807 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6808 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6809 CTRL_EXT_SWDPIN(4));
6810 reg |= CTRL_EXT_SWDPIO(4);
6811
6812 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6813 CSR_WRITE_FLUSH(sc);
6814 delay(10*1000);
6815
6816 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6817 CSR_WRITE_FLUSH(sc);
6818 delay(150);
6819 #if 0
6820 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6821 #endif
6822 delay(20*1000); /* XXX extra delay to get PHY ID? */
6823 break;
6824 case WM_T_82544: /* reset 10000us */
6825 case WM_T_82540:
6826 case WM_T_82545:
6827 case WM_T_82545_3:
6828 case WM_T_82546:
6829 case WM_T_82546_3:
6830 case WM_T_82541:
6831 case WM_T_82541_2:
6832 case WM_T_82547:
6833 case WM_T_82547_2:
6834 case WM_T_82571: /* reset 100us */
6835 case WM_T_82572:
6836 case WM_T_82573:
6837 case WM_T_82574:
6838 case WM_T_82575:
6839 case WM_T_82576:
6840 case WM_T_82580:
6841 case WM_T_I350:
6842 case WM_T_I354:
6843 case WM_T_I210:
6844 case WM_T_I211:
6845 case WM_T_82583:
6846 case WM_T_80003:
6847 /* generic reset */
6848 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6849 CSR_WRITE_FLUSH(sc);
6850 delay(20000);
6851 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6852 CSR_WRITE_FLUSH(sc);
6853 delay(20000);
6854
6855 if ((sc->sc_type == WM_T_82541)
6856 || (sc->sc_type == WM_T_82541_2)
6857 || (sc->sc_type == WM_T_82547)
6858 || (sc->sc_type == WM_T_82547_2)) {
6859 /* workaround for igp are done in igp_reset() */
6860 /* XXX add code to set LED after phy reset */
6861 }
6862 break;
6863 case WM_T_ICH8:
6864 case WM_T_ICH9:
6865 case WM_T_ICH10:
6866 case WM_T_PCH:
6867 case WM_T_PCH2:
6868 case WM_T_PCH_LPT:
6869 /* generic reset */
6870 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6871 CSR_WRITE_FLUSH(sc);
6872 delay(100);
6873 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6874 CSR_WRITE_FLUSH(sc);
6875 delay(150);
6876 break;
6877 default:
6878 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6879 __func__);
6880 break;
6881 }
6882
6883 /* release PHY semaphore */
6884 switch (sc->sc_type) {
6885 case WM_T_82571:
6886 case WM_T_82572:
6887 case WM_T_82573:
6888 case WM_T_82574:
6889 case WM_T_82583:
6890 /* XXX should put sw semaphore, too */
6891 wm_put_swsm_semaphore(sc);
6892 break;
6893 case WM_T_82575:
6894 case WM_T_82576:
6895 case WM_T_82580:
6896 case WM_T_I350:
6897 case WM_T_I354:
6898 case WM_T_I210:
6899 case WM_T_I211:
6900 case WM_T_80003:
6901 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6902 break;
6903 case WM_T_ICH8:
6904 case WM_T_ICH9:
6905 case WM_T_ICH10:
6906 case WM_T_PCH:
6907 case WM_T_PCH2:
6908 case WM_T_PCH_LPT:
6909 wm_put_swfwhw_semaphore(sc);
6910 break;
6911 default:
6912 /* nothing to do*/
6913 rv = 0;
6914 break;
6915 }
6916
6917 /* get_cfg_done */
6918 wm_get_cfg_done(sc);
6919
6920 /* extra setup */
6921 switch (sc->sc_type) {
6922 case WM_T_82542_2_0:
6923 case WM_T_82542_2_1:
6924 case WM_T_82543:
6925 case WM_T_82544:
6926 case WM_T_82540:
6927 case WM_T_82545:
6928 case WM_T_82545_3:
6929 case WM_T_82546:
6930 case WM_T_82546_3:
6931 case WM_T_82541_2:
6932 case WM_T_82547_2:
6933 case WM_T_82571:
6934 case WM_T_82572:
6935 case WM_T_82573:
6936 case WM_T_82574:
6937 case WM_T_82575:
6938 case WM_T_82576:
6939 case WM_T_82580:
6940 case WM_T_I350:
6941 case WM_T_I354:
6942 case WM_T_I210:
6943 case WM_T_I211:
6944 case WM_T_82583:
6945 case WM_T_80003:
6946 /* null */
6947 break;
6948 case WM_T_82541:
6949 case WM_T_82547:
6950 /* XXX Configure actively LED after PHY reset */
6951 break;
6952 case WM_T_ICH8:
6953 case WM_T_ICH9:
6954 case WM_T_ICH10:
6955 case WM_T_PCH:
6956 case WM_T_PCH2:
6957 case WM_T_PCH_LPT:
6958 /* Allow time for h/w to get to a quiescent state afer reset */
6959 delay(10*1000);
6960
6961 if (sc->sc_type == WM_T_PCH)
6962 wm_hv_phy_workaround_ich8lan(sc);
6963
6964 if (sc->sc_type == WM_T_PCH2)
6965 wm_lv_phy_workaround_ich8lan(sc);
6966
6967 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6968 /*
6969 * dummy read to clear the phy wakeup bit after lcd
6970 * reset
6971 */
6972 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6973 }
6974
6975 /*
6976 * XXX Configure the LCD with th extended configuration region
6977 * in NVM
6978 */
6979
6980 /* Configure the LCD with the OEM bits in NVM */
6981 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6982 || (sc->sc_type == WM_T_PCH_LPT)) {
6983 /*
6984 * Disable LPLU.
6985 * XXX It seems that 82567 has LPLU, too.
6986 */
6987 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6988 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6989 reg |= HV_OEM_BITS_ANEGNOW;
6990 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6991 }
6992 break;
6993 default:
6994 panic("%s: unknown type\n", __func__);
6995 break;
6996 }
6997 }
6998
6999 /*
7000 * wm_get_phy_id_82575:
7001 *
7002 * Return PHY ID. Return -1 if it failed.
7003 */
7004 static int
7005 wm_get_phy_id_82575(struct wm_softc *sc)
7006 {
7007 uint32_t reg;
7008 int phyid = -1;
7009
7010 /* XXX */
7011 if ((sc->sc_flags & WM_F_SGMII) == 0)
7012 return -1;
7013
7014 if (wm_sgmii_uses_mdio(sc)) {
7015 switch (sc->sc_type) {
7016 case WM_T_82575:
7017 case WM_T_82576:
7018 reg = CSR_READ(sc, WMREG_MDIC);
7019 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7020 break;
7021 case WM_T_82580:
7022 case WM_T_I350:
7023 case WM_T_I354:
7024 case WM_T_I210:
7025 case WM_T_I211:
7026 reg = CSR_READ(sc, WMREG_MDICNFG);
7027 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7028 break;
7029 default:
7030 return -1;
7031 }
7032 }
7033
7034 return phyid;
7035 }
7036
7037
7038 /*
7039 * wm_gmii_mediainit:
7040 *
7041 * Initialize media for use on 1000BASE-T devices.
7042 */
7043 static void
7044 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7045 {
7046 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7047 struct mii_data *mii = &sc->sc_mii;
7048 uint32_t reg;
7049
7050 /* We have GMII. */
7051 sc->sc_flags |= WM_F_HAS_MII;
7052
7053 if (sc->sc_type == WM_T_80003)
7054 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7055 else
7056 sc->sc_tipg = TIPG_1000T_DFLT;
7057
7058 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7059 if ((sc->sc_type == WM_T_82580)
7060 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7061 || (sc->sc_type == WM_T_I211)) {
7062 reg = CSR_READ(sc, WMREG_PHPM);
7063 reg &= ~PHPM_GO_LINK_D;
7064 CSR_WRITE(sc, WMREG_PHPM, reg);
7065 }
7066
7067 /*
7068 * Let the chip set speed/duplex on its own based on
7069 * signals from the PHY.
7070 * XXXbouyer - I'm not sure this is right for the 80003,
7071 * the em driver only sets CTRL_SLU here - but it seems to work.
7072 */
7073 sc->sc_ctrl |= CTRL_SLU;
7074 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7075
7076 /* Initialize our media structures and probe the GMII. */
7077 mii->mii_ifp = ifp;
7078
7079 /*
7080 * Determine the PHY access method.
7081 *
7082 * For SGMII, use SGMII specific method.
7083 *
7084 * For some devices, we can determine the PHY access method
7085 * from sc_type.
7086 *
7087 * For ICH and PCH variants, it's difficult to determine the PHY
7088 * access method by sc_type, so use the PCI product ID for some
7089 * devices.
7090 * For other ICH8 variants, try to use igp's method. If the PHY
7091 * can't detect, then use bm's method.
7092 */
7093 switch (prodid) {
7094 case PCI_PRODUCT_INTEL_PCH_M_LM:
7095 case PCI_PRODUCT_INTEL_PCH_M_LC:
7096 /* 82577 */
7097 sc->sc_phytype = WMPHY_82577;
7098 break;
7099 case PCI_PRODUCT_INTEL_PCH_D_DM:
7100 case PCI_PRODUCT_INTEL_PCH_D_DC:
7101 /* 82578 */
7102 sc->sc_phytype = WMPHY_82578;
7103 break;
7104 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7105 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7106 /* 82579 */
7107 sc->sc_phytype = WMPHY_82579;
7108 break;
7109 case PCI_PRODUCT_INTEL_82801I_BM:
7110 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7111 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7112 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7113 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7114 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7115 /* 82567 */
7116 sc->sc_phytype = WMPHY_BM;
7117 mii->mii_readreg = wm_gmii_bm_readreg;
7118 mii->mii_writereg = wm_gmii_bm_writereg;
7119 break;
7120 default:
7121 if (((sc->sc_flags & WM_F_SGMII) != 0)
7122 && !wm_sgmii_uses_mdio(sc)){
7123 /* SGMII */
7124 mii->mii_readreg = wm_sgmii_readreg;
7125 mii->mii_writereg = wm_sgmii_writereg;
7126 } else if (sc->sc_type >= WM_T_80003) {
7127 /* 80003 */
7128 mii->mii_readreg = wm_gmii_i80003_readreg;
7129 mii->mii_writereg = wm_gmii_i80003_writereg;
7130 } else if (sc->sc_type >= WM_T_I210) {
7131 /* I210 and I211 */
7132 mii->mii_readreg = wm_gmii_gs40g_readreg;
7133 mii->mii_writereg = wm_gmii_gs40g_writereg;
7134 } else if (sc->sc_type >= WM_T_82580) {
7135 /* 82580, I350 and I354 */
7136 sc->sc_phytype = WMPHY_82580;
7137 mii->mii_readreg = wm_gmii_82580_readreg;
7138 mii->mii_writereg = wm_gmii_82580_writereg;
7139 } else if (sc->sc_type >= WM_T_82544) {
7140 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
7141 mii->mii_readreg = wm_gmii_i82544_readreg;
7142 mii->mii_writereg = wm_gmii_i82544_writereg;
7143 } else {
7144 mii->mii_readreg = wm_gmii_i82543_readreg;
7145 mii->mii_writereg = wm_gmii_i82543_writereg;
7146 }
7147 break;
7148 }
7149 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7150 /* All PCH* use _hv_ */
7151 mii->mii_readreg = wm_gmii_hv_readreg;
7152 mii->mii_writereg = wm_gmii_hv_writereg;
7153 }
7154 mii->mii_statchg = wm_gmii_statchg;
7155
7156 wm_gmii_reset(sc);
7157
7158 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7159 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7160 wm_gmii_mediastatus);
7161
7162 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7163 || (sc->sc_type == WM_T_82580)
7164 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7165 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7166 if ((sc->sc_flags & WM_F_SGMII) == 0) {
7167 /* Attach only one port */
7168 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7169 MII_OFFSET_ANY, MIIF_DOPAUSE);
7170 } else {
7171 int i, id;
7172 uint32_t ctrl_ext;
7173
7174 id = wm_get_phy_id_82575(sc);
7175 if (id != -1) {
7176 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
7177 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
7178 }
7179 if ((id == -1)
7180 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
7181 /* Power on sgmii phy if it is disabled */
7182 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7183 CSR_WRITE(sc, WMREG_CTRL_EXT,
7184 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
7185 CSR_WRITE_FLUSH(sc);
7186 delay(300*1000); /* XXX too long */
7187
7188 /* from 1 to 8 */
7189 for (i = 1; i < 8; i++)
7190 mii_attach(sc->sc_dev, &sc->sc_mii,
7191 0xffffffff, i, MII_OFFSET_ANY,
7192 MIIF_DOPAUSE);
7193
7194 /* restore previous sfp cage power state */
7195 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7196 }
7197 }
7198 } else {
7199 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7200 MII_OFFSET_ANY, MIIF_DOPAUSE);
7201 }
7202
7203 /*
7204 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
7205 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
7206 */
7207 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
7208 (LIST_FIRST(&mii->mii_phys) == NULL)) {
7209 wm_set_mdio_slow_mode_hv(sc);
7210 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7211 MII_OFFSET_ANY, MIIF_DOPAUSE);
7212 }
7213
7214 /*
7215 * (For ICH8 variants)
7216 * If PHY detection failed, use BM's r/w function and retry.
7217 */
7218 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7219 /* if failed, retry with *_bm_* */
7220 mii->mii_readreg = wm_gmii_bm_readreg;
7221 mii->mii_writereg = wm_gmii_bm_writereg;
7222
7223 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7224 MII_OFFSET_ANY, MIIF_DOPAUSE);
7225 }
7226
7227 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7228 /* Any PHY wasn't find */
7229 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
7230 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
7231 sc->sc_phytype = WMPHY_NONE;
7232 } else {
7233 /*
7234 * PHY Found!
7235 * Check PHY type.
7236 */
7237 uint32_t model;
7238 struct mii_softc *child;
7239
7240 child = LIST_FIRST(&mii->mii_phys);
7241 if (device_is_a(child->mii_dev, "igphy")) {
7242 struct igphy_softc *isc = (struct igphy_softc *)child;
7243
7244 model = isc->sc_mii.mii_mpd_model;
7245 if (model == MII_MODEL_yyINTEL_I82566)
7246 sc->sc_phytype = WMPHY_IGP_3;
7247 }
7248
7249 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
7250 }
7251 }
7252
7253 /*
7254 * wm_gmii_mediachange: [ifmedia interface function]
7255 *
7256 * Set hardware to newly-selected media on a 1000BASE-T device.
7257 */
7258 static int
7259 wm_gmii_mediachange(struct ifnet *ifp)
7260 {
7261 struct wm_softc *sc = ifp->if_softc;
7262 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7263 int rc;
7264
7265 if ((ifp->if_flags & IFF_UP) == 0)
7266 return 0;
7267
7268 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7269 sc->sc_ctrl |= CTRL_SLU;
7270 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7271 || (sc->sc_type > WM_T_82543)) {
7272 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
7273 } else {
7274 sc->sc_ctrl &= ~CTRL_ASDE;
7275 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7276 if (ife->ifm_media & IFM_FDX)
7277 sc->sc_ctrl |= CTRL_FD;
7278 switch (IFM_SUBTYPE(ife->ifm_media)) {
7279 case IFM_10_T:
7280 sc->sc_ctrl |= CTRL_SPEED_10;
7281 break;
7282 case IFM_100_TX:
7283 sc->sc_ctrl |= CTRL_SPEED_100;
7284 break;
7285 case IFM_1000_T:
7286 sc->sc_ctrl |= CTRL_SPEED_1000;
7287 break;
7288 default:
7289 panic("wm_gmii_mediachange: bad media 0x%x",
7290 ife->ifm_media);
7291 }
7292 }
7293 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7294 if (sc->sc_type <= WM_T_82543)
7295 wm_gmii_reset(sc);
7296
7297 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
7298 return 0;
7299 return rc;
7300 }
7301
7302 /*
7303 * wm_gmii_mediastatus: [ifmedia interface function]
7304 *
7305 * Get the current interface media status on a 1000BASE-T device.
7306 */
7307 static void
7308 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7309 {
7310 struct wm_softc *sc = ifp->if_softc;
7311
7312 ether_mediastatus(ifp, ifmr);
7313 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
7314 | sc->sc_flowflags;
7315 }
7316
7317 #define MDI_IO CTRL_SWDPIN(2)
7318 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
7319 #define MDI_CLK CTRL_SWDPIN(3)
7320
7321 static void
7322 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
7323 {
7324 uint32_t i, v;
7325
7326 v = CSR_READ(sc, WMREG_CTRL);
7327 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7328 v |= MDI_DIR | CTRL_SWDPIO(3);
7329
7330 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
7331 if (data & i)
7332 v |= MDI_IO;
7333 else
7334 v &= ~MDI_IO;
7335 CSR_WRITE(sc, WMREG_CTRL, v);
7336 CSR_WRITE_FLUSH(sc);
7337 delay(10);
7338 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7339 CSR_WRITE_FLUSH(sc);
7340 delay(10);
7341 CSR_WRITE(sc, WMREG_CTRL, v);
7342 CSR_WRITE_FLUSH(sc);
7343 delay(10);
7344 }
7345 }
7346
7347 static uint32_t
7348 wm_i82543_mii_recvbits(struct wm_softc *sc)
7349 {
7350 uint32_t v, i, data = 0;
7351
7352 v = CSR_READ(sc, WMREG_CTRL);
7353 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7354 v |= CTRL_SWDPIO(3);
7355
7356 CSR_WRITE(sc, WMREG_CTRL, v);
7357 CSR_WRITE_FLUSH(sc);
7358 delay(10);
7359 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7360 CSR_WRITE_FLUSH(sc);
7361 delay(10);
7362 CSR_WRITE(sc, WMREG_CTRL, v);
7363 CSR_WRITE_FLUSH(sc);
7364 delay(10);
7365
7366 for (i = 0; i < 16; i++) {
7367 data <<= 1;
7368 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7369 CSR_WRITE_FLUSH(sc);
7370 delay(10);
7371 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7372 data |= 1;
7373 CSR_WRITE(sc, WMREG_CTRL, v);
7374 CSR_WRITE_FLUSH(sc);
7375 delay(10);
7376 }
7377
7378 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7379 CSR_WRITE_FLUSH(sc);
7380 delay(10);
7381 CSR_WRITE(sc, WMREG_CTRL, v);
7382 CSR_WRITE_FLUSH(sc);
7383 delay(10);
7384
7385 return data;
7386 }
7387
7388 #undef MDI_IO
7389 #undef MDI_DIR
7390 #undef MDI_CLK
7391
7392 /*
7393 * wm_gmii_i82543_readreg: [mii interface function]
7394 *
7395 * Read a PHY register on the GMII (i82543 version).
7396 */
7397 static int
7398 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7399 {
7400 struct wm_softc *sc = device_private(self);
7401 int rv;
7402
7403 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7404 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
7405 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7406 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
7407
7408 DPRINTF(WM_DEBUG_GMII,
7409 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7410 device_xname(sc->sc_dev), phy, reg, rv));
7411
7412 return rv;
7413 }
7414
7415 /*
7416 * wm_gmii_i82543_writereg: [mii interface function]
7417 *
7418 * Write a PHY register on the GMII (i82543 version).
7419 */
7420 static void
7421 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7422 {
7423 struct wm_softc *sc = device_private(self);
7424
7425 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7426 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7427 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7428 (MII_COMMAND_START << 30), 32);
7429 }
7430
7431 /*
7432 * wm_gmii_i82544_readreg: [mii interface function]
7433 *
7434 * Read a PHY register on the GMII.
7435 */
7436 static int
7437 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7438 {
7439 struct wm_softc *sc = device_private(self);
7440 uint32_t mdic = 0;
7441 int i, rv;
7442
7443 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7444 MDIC_REGADD(reg));
7445
7446 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7447 mdic = CSR_READ(sc, WMREG_MDIC);
7448 if (mdic & MDIC_READY)
7449 break;
7450 delay(50);
7451 }
7452
7453 if ((mdic & MDIC_READY) == 0) {
7454 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7455 device_xname(sc->sc_dev), phy, reg);
7456 rv = 0;
7457 } else if (mdic & MDIC_E) {
7458 #if 0 /* This is normal if no PHY is present. */
7459 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7460 device_xname(sc->sc_dev), phy, reg);
7461 #endif
7462 rv = 0;
7463 } else {
7464 rv = MDIC_DATA(mdic);
7465 if (rv == 0xffff)
7466 rv = 0;
7467 }
7468
7469 return rv;
7470 }
7471
7472 /*
7473 * wm_gmii_i82544_writereg: [mii interface function]
7474 *
7475 * Write a PHY register on the GMII.
7476 */
7477 static void
7478 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7479 {
7480 struct wm_softc *sc = device_private(self);
7481 uint32_t mdic = 0;
7482 int i;
7483
7484 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7485 MDIC_REGADD(reg) | MDIC_DATA(val));
7486
7487 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7488 mdic = CSR_READ(sc, WMREG_MDIC);
7489 if (mdic & MDIC_READY)
7490 break;
7491 delay(50);
7492 }
7493
7494 if ((mdic & MDIC_READY) == 0)
7495 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7496 device_xname(sc->sc_dev), phy, reg);
7497 else if (mdic & MDIC_E)
7498 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7499 device_xname(sc->sc_dev), phy, reg);
7500 }
7501
7502 /*
7503 * wm_gmii_i80003_readreg: [mii interface function]
7504 *
7505 * Read a PHY register on the kumeran
7506 * This could be handled by the PHY layer if we didn't have to lock the
7507 * ressource ...
7508 */
7509 static int
7510 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7511 {
7512 struct wm_softc *sc = device_private(self);
7513 int sem;
7514 int rv;
7515
7516 if (phy != 1) /* only one PHY on kumeran bus */
7517 return 0;
7518
7519 sem = swfwphysem[sc->sc_funcid];
7520 if (wm_get_swfw_semaphore(sc, sem)) {
7521 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7522 __func__);
7523 return 0;
7524 }
7525
7526 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7527 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7528 reg >> GG82563_PAGE_SHIFT);
7529 } else {
7530 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7531 reg >> GG82563_PAGE_SHIFT);
7532 }
7533 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7534 delay(200);
7535 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7536 delay(200);
7537
7538 wm_put_swfw_semaphore(sc, sem);
7539 return rv;
7540 }
7541
7542 /*
7543 * wm_gmii_i80003_writereg: [mii interface function]
7544 *
7545 * Write a PHY register on the kumeran.
7546 * This could be handled by the PHY layer if we didn't have to lock the
7547 * ressource ...
7548 */
7549 static void
7550 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7551 {
7552 struct wm_softc *sc = device_private(self);
7553 int sem;
7554
7555 if (phy != 1) /* only one PHY on kumeran bus */
7556 return;
7557
7558 sem = swfwphysem[sc->sc_funcid];
7559 if (wm_get_swfw_semaphore(sc, sem)) {
7560 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7561 __func__);
7562 return;
7563 }
7564
7565 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7566 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7567 reg >> GG82563_PAGE_SHIFT);
7568 } else {
7569 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7570 reg >> GG82563_PAGE_SHIFT);
7571 }
7572 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7573 delay(200);
7574 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7575 delay(200);
7576
7577 wm_put_swfw_semaphore(sc, sem);
7578 }
7579
7580 /*
7581 * wm_gmii_bm_readreg: [mii interface function]
7582 *
7583 * Read a PHY register on the kumeran
7584 * This could be handled by the PHY layer if we didn't have to lock the
7585 * ressource ...
7586 */
7587 static int
7588 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7589 {
7590 struct wm_softc *sc = device_private(self);
7591 int sem;
7592 int rv;
7593
7594 sem = swfwphysem[sc->sc_funcid];
7595 if (wm_get_swfw_semaphore(sc, sem)) {
7596 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7597 __func__);
7598 return 0;
7599 }
7600
7601 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7602 if (phy == 1)
7603 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7604 reg);
7605 else
7606 wm_gmii_i82544_writereg(self, phy,
7607 GG82563_PHY_PAGE_SELECT,
7608 reg >> GG82563_PAGE_SHIFT);
7609 }
7610
7611 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7612 wm_put_swfw_semaphore(sc, sem);
7613 return rv;
7614 }
7615
7616 /*
7617 * wm_gmii_bm_writereg: [mii interface function]
7618 *
7619 * Write a PHY register on the kumeran.
7620 * This could be handled by the PHY layer if we didn't have to lock the
7621 * ressource ...
7622 */
7623 static void
7624 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7625 {
7626 struct wm_softc *sc = device_private(self);
7627 int sem;
7628
7629 sem = swfwphysem[sc->sc_funcid];
7630 if (wm_get_swfw_semaphore(sc, sem)) {
7631 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7632 __func__);
7633 return;
7634 }
7635
7636 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7637 if (phy == 1)
7638 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7639 reg);
7640 else
7641 wm_gmii_i82544_writereg(self, phy,
7642 GG82563_PHY_PAGE_SELECT,
7643 reg >> GG82563_PAGE_SHIFT);
7644 }
7645
7646 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7647 wm_put_swfw_semaphore(sc, sem);
7648 }
7649
7650 static void
7651 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7652 {
7653 struct wm_softc *sc = device_private(self);
7654 uint16_t regnum = BM_PHY_REG_NUM(offset);
7655 uint16_t wuce;
7656
7657 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7658 if (sc->sc_type == WM_T_PCH) {
7659 /* XXX e1000 driver do nothing... why? */
7660 }
7661
7662 /* Set page 769 */
7663 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7664 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7665
7666 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7667
7668 wuce &= ~BM_WUC_HOST_WU_BIT;
7669 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7670 wuce | BM_WUC_ENABLE_BIT);
7671
7672 /* Select page 800 */
7673 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7674 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7675
7676 /* Write page 800 */
7677 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7678
7679 if (rd)
7680 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7681 else
7682 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7683
7684 /* Set page 769 */
7685 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7686 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7687
7688 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7689 }
7690
7691 /*
7692 * wm_gmii_hv_readreg: [mii interface function]
7693 *
7694 * Read a PHY register on the kumeran
7695 * This could be handled by the PHY layer if we didn't have to lock the
7696 * ressource ...
7697 */
7698 static int
7699 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7700 {
7701 struct wm_softc *sc = device_private(self);
7702 uint16_t page = BM_PHY_REG_PAGE(reg);
7703 uint16_t regnum = BM_PHY_REG_NUM(reg);
7704 uint16_t val;
7705 int rv;
7706
7707 if (wm_get_swfwhw_semaphore(sc)) {
7708 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7709 __func__);
7710 return 0;
7711 }
7712
7713 /* XXX Workaround failure in MDIO access while cable is disconnected */
7714 if (sc->sc_phytype == WMPHY_82577) {
7715 /* XXX must write */
7716 }
7717
7718 /* Page 800 works differently than the rest so it has its own func */
7719 if (page == BM_WUC_PAGE) {
7720 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7721 return val;
7722 }
7723
7724 /*
7725 * Lower than page 768 works differently than the rest so it has its
7726 * own func
7727 */
7728 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7729 printf("gmii_hv_readreg!!!\n");
7730 return 0;
7731 }
7732
7733 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7734 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7735 page << BME1000_PAGE_SHIFT);
7736 }
7737
7738 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7739 wm_put_swfwhw_semaphore(sc);
7740 return rv;
7741 }
7742
7743 /*
7744 * wm_gmii_hv_writereg: [mii interface function]
7745 *
7746 * Write a PHY register on the kumeran.
7747 * This could be handled by the PHY layer if we didn't have to lock the
7748 * ressource ...
7749 */
7750 static void
7751 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7752 {
7753 struct wm_softc *sc = device_private(self);
7754 uint16_t page = BM_PHY_REG_PAGE(reg);
7755 uint16_t regnum = BM_PHY_REG_NUM(reg);
7756
7757 if (wm_get_swfwhw_semaphore(sc)) {
7758 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7759 __func__);
7760 return;
7761 }
7762
7763 /* XXX Workaround failure in MDIO access while cable is disconnected */
7764
7765 /* Page 800 works differently than the rest so it has its own func */
7766 if (page == BM_WUC_PAGE) {
7767 uint16_t tmp;
7768
7769 tmp = val;
7770 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7771 return;
7772 }
7773
7774 /*
7775 * Lower than page 768 works differently than the rest so it has its
7776 * own func
7777 */
7778 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7779 printf("gmii_hv_writereg!!!\n");
7780 return;
7781 }
7782
7783 /*
7784 * XXX Workaround MDIO accesses being disabled after entering IEEE
7785 * Power Down (whenever bit 11 of the PHY control register is set)
7786 */
7787
7788 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7789 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7790 page << BME1000_PAGE_SHIFT);
7791 }
7792
7793 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7794 wm_put_swfwhw_semaphore(sc);
7795 }
7796
7797 /*
7798 * wm_gmii_82580_readreg: [mii interface function]
7799 *
7800 * Read a PHY register on the 82580 and I350.
7801 * This could be handled by the PHY layer if we didn't have to lock the
7802 * ressource ...
7803 */
7804 static int
7805 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7806 {
7807 struct wm_softc *sc = device_private(self);
7808 int sem;
7809 int rv;
7810
7811 sem = swfwphysem[sc->sc_funcid];
7812 if (wm_get_swfw_semaphore(sc, sem)) {
7813 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7814 __func__);
7815 return 0;
7816 }
7817
7818 rv = wm_gmii_i82544_readreg(self, phy, reg);
7819
7820 wm_put_swfw_semaphore(sc, sem);
7821 return rv;
7822 }
7823
7824 /*
7825 * wm_gmii_82580_writereg: [mii interface function]
7826 *
7827 * Write a PHY register on the 82580 and I350.
7828 * This could be handled by the PHY layer if we didn't have to lock the
7829 * ressource ...
7830 */
7831 static void
7832 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7833 {
7834 struct wm_softc *sc = device_private(self);
7835 int sem;
7836
7837 sem = swfwphysem[sc->sc_funcid];
7838 if (wm_get_swfw_semaphore(sc, sem)) {
7839 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7840 __func__);
7841 return;
7842 }
7843
7844 wm_gmii_i82544_writereg(self, phy, reg, val);
7845
7846 wm_put_swfw_semaphore(sc, sem);
7847 }
7848
7849 /*
7850 * wm_gmii_gs40g_readreg: [mii interface function]
7851 *
7852 * Read a PHY register on the I2100 and I211.
7853 * This could be handled by the PHY layer if we didn't have to lock the
7854 * ressource ...
7855 */
7856 static int
7857 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
7858 {
7859 struct wm_softc *sc = device_private(self);
7860 int sem;
7861 int page, offset;
7862 int rv;
7863
7864 /* Acquire semaphore */
7865 sem = swfwphysem[sc->sc_funcid];
7866 if (wm_get_swfw_semaphore(sc, sem)) {
7867 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7868 __func__);
7869 return 0;
7870 }
7871
7872 /* Page select */
7873 page = reg >> GS40G_PAGE_SHIFT;
7874 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7875
7876 /* Read reg */
7877 offset = reg & GS40G_OFFSET_MASK;
7878 rv = wm_gmii_i82544_readreg(self, phy, offset);
7879
7880 wm_put_swfw_semaphore(sc, sem);
7881 return rv;
7882 }
7883
7884 /*
7885 * wm_gmii_gs40g_writereg: [mii interface function]
7886 *
7887 * Write a PHY register on the I210 and I211.
7888 * This could be handled by the PHY layer if we didn't have to lock the
7889 * ressource ...
7890 */
7891 static void
7892 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
7893 {
7894 struct wm_softc *sc = device_private(self);
7895 int sem;
7896 int page, offset;
7897
7898 /* Acquire semaphore */
7899 sem = swfwphysem[sc->sc_funcid];
7900 if (wm_get_swfw_semaphore(sc, sem)) {
7901 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7902 __func__);
7903 return;
7904 }
7905
7906 /* Page select */
7907 page = reg >> GS40G_PAGE_SHIFT;
7908 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7909
7910 /* Write reg */
7911 offset = reg & GS40G_OFFSET_MASK;
7912 wm_gmii_i82544_writereg(self, phy, offset, val);
7913
7914 /* Release semaphore */
7915 wm_put_swfw_semaphore(sc, sem);
7916 }
7917
7918 /*
7919 * wm_gmii_statchg: [mii interface function]
7920 *
7921 * Callback from MII layer when media changes.
7922 */
7923 static void
7924 wm_gmii_statchg(struct ifnet *ifp)
7925 {
7926 struct wm_softc *sc = ifp->if_softc;
7927 struct mii_data *mii = &sc->sc_mii;
7928
7929 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7930 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7931 sc->sc_fcrtl &= ~FCRTL_XONE;
7932
7933 /*
7934 * Get flow control negotiation result.
7935 */
7936 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7937 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7938 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7939 mii->mii_media_active &= ~IFM_ETH_FMASK;
7940 }
7941
7942 if (sc->sc_flowflags & IFM_FLOW) {
7943 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7944 sc->sc_ctrl |= CTRL_TFCE;
7945 sc->sc_fcrtl |= FCRTL_XONE;
7946 }
7947 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7948 sc->sc_ctrl |= CTRL_RFCE;
7949 }
7950
7951 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7952 DPRINTF(WM_DEBUG_LINK,
7953 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7954 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7955 } else {
7956 DPRINTF(WM_DEBUG_LINK,
7957 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7958 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7959 }
7960
7961 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7962 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7963 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7964 : WMREG_FCRTL, sc->sc_fcrtl);
7965 if (sc->sc_type == WM_T_80003) {
7966 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7967 case IFM_1000_T:
7968 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7969 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7970 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7971 break;
7972 default:
7973 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7974 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7975 sc->sc_tipg = TIPG_10_100_80003_DFLT;
7976 break;
7977 }
7978 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7979 }
7980 }
7981
7982 /*
7983 * wm_kmrn_readreg:
7984 *
7985 * Read a kumeran register
7986 */
7987 static int
7988 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7989 {
7990 int rv;
7991
7992 if (sc->sc_flags & WM_F_LOCK_SWFW) {
7993 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7994 aprint_error_dev(sc->sc_dev,
7995 "%s: failed to get semaphore\n", __func__);
7996 return 0;
7997 }
7998 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
7999 if (wm_get_swfwhw_semaphore(sc)) {
8000 aprint_error_dev(sc->sc_dev,
8001 "%s: failed to get semaphore\n", __func__);
8002 return 0;
8003 }
8004 }
8005
8006 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8007 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8008 KUMCTRLSTA_REN);
8009 CSR_WRITE_FLUSH(sc);
8010 delay(2);
8011
8012 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8013
8014 if (sc->sc_flags & WM_F_LOCK_SWFW)
8015 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8016 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8017 wm_put_swfwhw_semaphore(sc);
8018
8019 return rv;
8020 }
8021
8022 /*
8023 * wm_kmrn_writereg:
8024 *
8025 * Write a kumeran register
8026 */
8027 static void
8028 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8029 {
8030
8031 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8032 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8033 aprint_error_dev(sc->sc_dev,
8034 "%s: failed to get semaphore\n", __func__);
8035 return;
8036 }
8037 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8038 if (wm_get_swfwhw_semaphore(sc)) {
8039 aprint_error_dev(sc->sc_dev,
8040 "%s: failed to get semaphore\n", __func__);
8041 return;
8042 }
8043 }
8044
8045 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8046 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8047 (val & KUMCTRLSTA_MASK));
8048
8049 if (sc->sc_flags & WM_F_LOCK_SWFW)
8050 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8051 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8052 wm_put_swfwhw_semaphore(sc);
8053 }
8054
8055 /* SGMII related */
8056
8057 /*
8058 * wm_sgmii_uses_mdio
8059 *
8060 * Check whether the transaction is to the internal PHY or the external
8061 * MDIO interface. Return true if it's MDIO.
8062 */
8063 static bool
8064 wm_sgmii_uses_mdio(struct wm_softc *sc)
8065 {
8066 uint32_t reg;
8067 bool ismdio = false;
8068
8069 switch (sc->sc_type) {
8070 case WM_T_82575:
8071 case WM_T_82576:
8072 reg = CSR_READ(sc, WMREG_MDIC);
8073 ismdio = ((reg & MDIC_DEST) != 0);
8074 break;
8075 case WM_T_82580:
8076 case WM_T_I350:
8077 case WM_T_I354:
8078 case WM_T_I210:
8079 case WM_T_I211:
8080 reg = CSR_READ(sc, WMREG_MDICNFG);
8081 ismdio = ((reg & MDICNFG_DEST) != 0);
8082 break;
8083 default:
8084 break;
8085 }
8086
8087 return ismdio;
8088 }
8089
8090 /*
8091 * wm_sgmii_readreg: [mii interface function]
8092 *
8093 * Read a PHY register on the SGMII
8094 * This could be handled by the PHY layer if we didn't have to lock the
8095 * ressource ...
8096 */
8097 static int
8098 wm_sgmii_readreg(device_t self, int phy, int reg)
8099 {
8100 struct wm_softc *sc = device_private(self);
8101 uint32_t i2ccmd;
8102 int i, rv;
8103
8104 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8105 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8106 __func__);
8107 return 0;
8108 }
8109
8110 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8111 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8112 | I2CCMD_OPCODE_READ;
8113 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8114
8115 /* Poll the ready bit */
8116 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8117 delay(50);
8118 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8119 if (i2ccmd & I2CCMD_READY)
8120 break;
8121 }
8122 if ((i2ccmd & I2CCMD_READY) == 0)
8123 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8124 if ((i2ccmd & I2CCMD_ERROR) != 0)
8125 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8126
8127 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8128
8129 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8130 return rv;
8131 }
8132
8133 /*
8134 * wm_sgmii_writereg: [mii interface function]
8135 *
8136 * Write a PHY register on the SGMII.
8137 * This could be handled by the PHY layer if we didn't have to lock the
8138 * ressource ...
8139 */
8140 static void
8141 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8142 {
8143 struct wm_softc *sc = device_private(self);
8144 uint32_t i2ccmd;
8145 int i;
8146 int val_swapped;
8147
8148 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8149 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8150 __func__);
8151 return;
8152 }
8153 /* Swap the data bytes for the I2C interface */
8154 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8155 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8156 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8157 | I2CCMD_OPCODE_WRITE | val_swapped;
8158 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8159
8160 /* Poll the ready bit */
8161 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8162 delay(50);
8163 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8164 if (i2ccmd & I2CCMD_READY)
8165 break;
8166 }
8167 if ((i2ccmd & I2CCMD_READY) == 0)
8168 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8169 if ((i2ccmd & I2CCMD_ERROR) != 0)
8170 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8171
8172 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8173 }
8174
8175 /* TBI related */
8176
8177 /*
8178 * wm_tbi_mediainit:
8179 *
8180 * Initialize media for use on 1000BASE-X devices.
8181 */
8182 static void
8183 wm_tbi_mediainit(struct wm_softc *sc)
8184 {
8185 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8186 const char *sep = "";
8187
8188 if (sc->sc_type < WM_T_82543)
8189 sc->sc_tipg = TIPG_WM_DFLT;
8190 else
8191 sc->sc_tipg = TIPG_LG_DFLT;
8192
8193 sc->sc_tbi_serdes_anegticks = 5;
8194
8195 /* Initialize our media structures */
8196 sc->sc_mii.mii_ifp = ifp;
8197 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8198
8199 if ((sc->sc_type >= WM_T_82575)
8200 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
8201 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8202 wm_serdes_mediachange, wm_serdes_mediastatus);
8203 else
8204 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8205 wm_tbi_mediachange, wm_tbi_mediastatus);
8206
8207 /*
8208 * SWD Pins:
8209 *
8210 * 0 = Link LED (output)
8211 * 1 = Loss Of Signal (input)
8212 */
8213 sc->sc_ctrl |= CTRL_SWDPIO(0);
8214
8215 /* XXX Perhaps this is only for TBI */
8216 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8217 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
8218
8219 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8220 sc->sc_ctrl &= ~CTRL_LRST;
8221
8222 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8223
8224 #define ADD(ss, mm, dd) \
8225 do { \
8226 aprint_normal("%s%s", sep, ss); \
8227 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
8228 sep = ", "; \
8229 } while (/*CONSTCOND*/0)
8230
8231 aprint_normal_dev(sc->sc_dev, "");
8232
8233 /* Only 82545 is LX */
8234 if (sc->sc_type == WM_T_82545) {
8235 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
8236 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
8237 } else {
8238 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
8239 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
8240 }
8241 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
8242 aprint_normal("\n");
8243
8244 #undef ADD
8245
8246 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
8247 }
8248
8249 /*
8250 * wm_tbi_mediachange: [ifmedia interface function]
8251 *
8252 * Set hardware to newly-selected media on a 1000BASE-X device.
8253 */
8254 static int
8255 wm_tbi_mediachange(struct ifnet *ifp)
8256 {
8257 struct wm_softc *sc = ifp->if_softc;
8258 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8259 uint32_t status;
8260 int i;
8261
8262 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8263 /* XXX need some work for >= 82571 and < 82575 */
8264 if (sc->sc_type < WM_T_82575)
8265 return 0;
8266 }
8267
8268 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8269 || (sc->sc_type >= WM_T_82575))
8270 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8271
8272 sc->sc_ctrl &= ~CTRL_LRST;
8273 sc->sc_txcw = TXCW_ANE;
8274 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8275 sc->sc_txcw |= TXCW_FD | TXCW_HD;
8276 else if (ife->ifm_media & IFM_FDX)
8277 sc->sc_txcw |= TXCW_FD;
8278 else
8279 sc->sc_txcw |= TXCW_HD;
8280
8281 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
8282 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
8283
8284 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
8285 device_xname(sc->sc_dev), sc->sc_txcw));
8286 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8287 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8288 CSR_WRITE_FLUSH(sc);
8289 delay(1000);
8290
8291 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
8292 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
8293
8294 /*
8295 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
8296 * optics detect a signal, 0 if they don't.
8297 */
8298 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
8299 /* Have signal; wait for the link to come up. */
8300 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
8301 delay(10000);
8302 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
8303 break;
8304 }
8305
8306 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
8307 device_xname(sc->sc_dev),i));
8308
8309 status = CSR_READ(sc, WMREG_STATUS);
8310 DPRINTF(WM_DEBUG_LINK,
8311 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
8312 device_xname(sc->sc_dev),status, STATUS_LU));
8313 if (status & STATUS_LU) {
8314 /* Link is up. */
8315 DPRINTF(WM_DEBUG_LINK,
8316 ("%s: LINK: set media -> link up %s\n",
8317 device_xname(sc->sc_dev),
8318 (status & STATUS_FD) ? "FDX" : "HDX"));
8319
8320 /*
8321 * NOTE: CTRL will update TFCE and RFCE automatically,
8322 * so we should update sc->sc_ctrl
8323 */
8324 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8325 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8326 sc->sc_fcrtl &= ~FCRTL_XONE;
8327 if (status & STATUS_FD)
8328 sc->sc_tctl |=
8329 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8330 else
8331 sc->sc_tctl |=
8332 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8333 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
8334 sc->sc_fcrtl |= FCRTL_XONE;
8335 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8336 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8337 WMREG_OLD_FCRTL : WMREG_FCRTL,
8338 sc->sc_fcrtl);
8339 sc->sc_tbi_linkup = 1;
8340 } else {
8341 if (i == WM_LINKUP_TIMEOUT)
8342 wm_check_for_link(sc);
8343 /* Link is down. */
8344 DPRINTF(WM_DEBUG_LINK,
8345 ("%s: LINK: set media -> link down\n",
8346 device_xname(sc->sc_dev)));
8347 sc->sc_tbi_linkup = 0;
8348 }
8349 } else {
8350 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
8351 device_xname(sc->sc_dev)));
8352 sc->sc_tbi_linkup = 0;
8353 }
8354
8355 wm_tbi_serdes_set_linkled(sc);
8356
8357 return 0;
8358 }
8359
8360 /*
8361 * wm_tbi_mediastatus: [ifmedia interface function]
8362 *
8363 * Get the current interface media status on a 1000BASE-X device.
8364 */
8365 static void
8366 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8367 {
8368 struct wm_softc *sc = ifp->if_softc;
8369 uint32_t ctrl, status;
8370
8371 ifmr->ifm_status = IFM_AVALID;
8372 ifmr->ifm_active = IFM_ETHER;
8373
8374 status = CSR_READ(sc, WMREG_STATUS);
8375 if ((status & STATUS_LU) == 0) {
8376 ifmr->ifm_active |= IFM_NONE;
8377 return;
8378 }
8379
8380 ifmr->ifm_status |= IFM_ACTIVE;
8381 /* Only 82545 is LX */
8382 if (sc->sc_type == WM_T_82545)
8383 ifmr->ifm_active |= IFM_1000_LX;
8384 else
8385 ifmr->ifm_active |= IFM_1000_SX;
8386 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
8387 ifmr->ifm_active |= IFM_FDX;
8388 else
8389 ifmr->ifm_active |= IFM_HDX;
8390 ctrl = CSR_READ(sc, WMREG_CTRL);
8391 if (ctrl & CTRL_RFCE)
8392 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
8393 if (ctrl & CTRL_TFCE)
8394 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
8395 }
8396
8397 /* XXX TBI only */
8398 static int
8399 wm_check_for_link(struct wm_softc *sc)
8400 {
8401 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8402 uint32_t rxcw;
8403 uint32_t ctrl;
8404 uint32_t status;
8405 uint32_t sig;
8406
8407 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8408 /* XXX need some work for >= 82571 */
8409 if (sc->sc_type >= WM_T_82571) {
8410 sc->sc_tbi_linkup = 1;
8411 return 0;
8412 }
8413 }
8414
8415 rxcw = CSR_READ(sc, WMREG_RXCW);
8416 ctrl = CSR_READ(sc, WMREG_CTRL);
8417 status = CSR_READ(sc, WMREG_STATUS);
8418
8419 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8420
8421 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8422 device_xname(sc->sc_dev), __func__,
8423 ((ctrl & CTRL_SWDPIN(1)) == sig),
8424 ((status & STATUS_LU) != 0),
8425 ((rxcw & RXCW_C) != 0)
8426 ));
8427
8428 /*
8429 * SWDPIN LU RXCW
8430 * 0 0 0
8431 * 0 0 1 (should not happen)
8432 * 0 1 0 (should not happen)
8433 * 0 1 1 (should not happen)
8434 * 1 0 0 Disable autonego and force linkup
8435 * 1 0 1 got /C/ but not linkup yet
8436 * 1 1 0 (linkup)
8437 * 1 1 1 If IFM_AUTO, back to autonego
8438 *
8439 */
8440 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8441 && ((status & STATUS_LU) == 0)
8442 && ((rxcw & RXCW_C) == 0)) {
8443 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8444 __func__));
8445 sc->sc_tbi_linkup = 0;
8446 /* Disable auto-negotiation in the TXCW register */
8447 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8448
8449 /*
8450 * Force link-up and also force full-duplex.
8451 *
8452 * NOTE: CTRL was updated TFCE and RFCE automatically,
8453 * so we should update sc->sc_ctrl
8454 */
8455 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8456 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8457 } else if (((status & STATUS_LU) != 0)
8458 && ((rxcw & RXCW_C) != 0)
8459 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8460 sc->sc_tbi_linkup = 1;
8461 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8462 __func__));
8463 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8464 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8465 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8466 && ((rxcw & RXCW_C) != 0)) {
8467 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8468 } else {
8469 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8470 status));
8471 }
8472
8473 return 0;
8474 }
8475
8476 /*
8477 * wm_tbi_tick:
8478 *
8479 * Check the link on TBI devices.
8480 * This function acts as mii_tick().
8481 */
8482 static void
8483 wm_tbi_tick(struct wm_softc *sc)
8484 {
8485 struct mii_data *mii = &sc->sc_mii;
8486 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8487 uint32_t status;
8488
8489 KASSERT(WM_TX_LOCKED(sc));
8490
8491 status = CSR_READ(sc, WMREG_STATUS);
8492
8493 /* XXX is this needed? */
8494 (void)CSR_READ(sc, WMREG_RXCW);
8495 (void)CSR_READ(sc, WMREG_CTRL);
8496
8497 /* set link status */
8498 if ((status & STATUS_LU) == 0) {
8499 DPRINTF(WM_DEBUG_LINK,
8500 ("%s: LINK: checklink -> down\n",
8501 device_xname(sc->sc_dev)));
8502 sc->sc_tbi_linkup = 0;
8503 } else if (sc->sc_tbi_linkup == 0) {
8504 DPRINTF(WM_DEBUG_LINK,
8505 ("%s: LINK: checklink -> up %s\n",
8506 device_xname(sc->sc_dev),
8507 (status & STATUS_FD) ? "FDX" : "HDX"));
8508 sc->sc_tbi_linkup = 1;
8509 sc->sc_tbi_serdes_ticks = 0;
8510 }
8511
8512 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
8513 goto setled;
8514
8515 if ((status & STATUS_LU) == 0) {
8516 sc->sc_tbi_linkup = 0;
8517 /* If the timer expired, retry autonegotiation */
8518 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8519 && (++sc->sc_tbi_serdes_ticks
8520 >= sc->sc_tbi_serdes_anegticks)) {
8521 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8522 sc->sc_tbi_serdes_ticks = 0;
8523 /*
8524 * Reset the link, and let autonegotiation do
8525 * its thing
8526 */
8527 sc->sc_ctrl |= CTRL_LRST;
8528 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8529 CSR_WRITE_FLUSH(sc);
8530 delay(1000);
8531 sc->sc_ctrl &= ~CTRL_LRST;
8532 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8533 CSR_WRITE_FLUSH(sc);
8534 delay(1000);
8535 CSR_WRITE(sc, WMREG_TXCW,
8536 sc->sc_txcw & ~TXCW_ANE);
8537 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8538 }
8539 }
8540
8541 setled:
8542 wm_tbi_serdes_set_linkled(sc);
8543 }
8544
8545 /* SERDES related */
8546 static void
8547 wm_serdes_power_up_link_82575(struct wm_softc *sc)
8548 {
8549 uint32_t reg;
8550
8551 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8552 && ((sc->sc_flags & WM_F_SGMII) == 0))
8553 return;
8554
8555 reg = CSR_READ(sc, WMREG_PCS_CFG);
8556 reg |= PCS_CFG_PCS_EN;
8557 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
8558
8559 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8560 reg &= ~CTRL_EXT_SWDPIN(3);
8561 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8562 CSR_WRITE_FLUSH(sc);
8563 }
8564
8565 static int
8566 wm_serdes_mediachange(struct ifnet *ifp)
8567 {
8568 struct wm_softc *sc = ifp->if_softc;
8569 bool pcs_autoneg = true; /* XXX */
8570 uint32_t ctrl_ext, pcs_lctl, reg;
8571
8572 /* XXX Currently, this function is not called on 8257[12] */
8573 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8574 || (sc->sc_type >= WM_T_82575))
8575 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8576
8577 wm_serdes_power_up_link_82575(sc);
8578
8579 sc->sc_ctrl |= CTRL_SLU;
8580
8581 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8582 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8583
8584 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8585 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8586 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8587 case CTRL_EXT_LINK_MODE_SGMII:
8588 pcs_autoneg = true;
8589 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8590 break;
8591 case CTRL_EXT_LINK_MODE_1000KX:
8592 pcs_autoneg = false;
8593 /* FALLTHROUGH */
8594 default:
8595 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8596 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8597 pcs_autoneg = false;
8598 }
8599 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8600 | CTRL_FRCFDX;
8601 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8602 }
8603 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8604
8605 if (pcs_autoneg) {
8606 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8607 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8608
8609 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8610 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8611 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8612 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8613 } else
8614 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8615
8616 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8617
8618
8619 return 0;
8620 }
8621
8622 static void
8623 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8624 {
8625 struct wm_softc *sc = ifp->if_softc;
8626 struct mii_data *mii = &sc->sc_mii;
8627 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8628 uint32_t pcs_adv, pcs_lpab, reg;
8629
8630 ifmr->ifm_status = IFM_AVALID;
8631 ifmr->ifm_active = IFM_ETHER;
8632
8633 /* Check PCS */
8634 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8635 if ((reg & PCS_LSTS_LINKOK) == 0) {
8636 ifmr->ifm_active |= IFM_NONE;
8637 sc->sc_tbi_linkup = 0;
8638 goto setled;
8639 }
8640
8641 sc->sc_tbi_linkup = 1;
8642 ifmr->ifm_status |= IFM_ACTIVE;
8643 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8644 if ((reg & PCS_LSTS_FDX) != 0)
8645 ifmr->ifm_active |= IFM_FDX;
8646 else
8647 ifmr->ifm_active |= IFM_HDX;
8648 mii->mii_media_active &= ~IFM_ETH_FMASK;
8649 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8650 /* Check flow */
8651 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8652 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8653 printf("XXX LINKOK but not ACOMP\n");
8654 goto setled;
8655 }
8656 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8657 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8658 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8659 if ((pcs_adv & TXCW_SYM_PAUSE)
8660 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8661 mii->mii_media_active |= IFM_FLOW
8662 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8663 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8664 && (pcs_adv & TXCW_ASYM_PAUSE)
8665 && (pcs_lpab & TXCW_SYM_PAUSE)
8666 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8667 mii->mii_media_active |= IFM_FLOW
8668 | IFM_ETH_TXPAUSE;
8669 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8670 && (pcs_adv & TXCW_ASYM_PAUSE)
8671 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8672 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8673 mii->mii_media_active |= IFM_FLOW
8674 | IFM_ETH_RXPAUSE;
8675 } else {
8676 }
8677 }
8678 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8679 | (mii->mii_media_active & IFM_ETH_FMASK);
8680 setled:
8681 wm_tbi_serdes_set_linkled(sc);
8682 }
8683
8684 /*
8685 * wm_serdes_tick:
8686 *
8687 * Check the link on serdes devices.
8688 */
8689 static void
8690 wm_serdes_tick(struct wm_softc *sc)
8691 {
8692 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8693 struct mii_data *mii = &sc->sc_mii;
8694 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8695 uint32_t reg;
8696
8697 KASSERT(WM_TX_LOCKED(sc));
8698
8699 mii->mii_media_status = IFM_AVALID;
8700 mii->mii_media_active = IFM_ETHER;
8701
8702 /* Check PCS */
8703 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8704 if ((reg & PCS_LSTS_LINKOK) != 0) {
8705 mii->mii_media_status |= IFM_ACTIVE;
8706 sc->sc_tbi_linkup = 1;
8707 sc->sc_tbi_serdes_ticks = 0;
8708 mii->mii_media_active |= IFM_1000_SX; /* XXX */
8709 if ((reg & PCS_LSTS_FDX) != 0)
8710 mii->mii_media_active |= IFM_FDX;
8711 else
8712 mii->mii_media_active |= IFM_HDX;
8713 } else {
8714 mii->mii_media_status |= IFM_NONE;
8715 sc->sc_tbi_linkup = 0;
8716 /* If the timer expired, retry autonegotiation */
8717 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8718 && (++sc->sc_tbi_serdes_ticks
8719 >= sc->sc_tbi_serdes_anegticks)) {
8720 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8721 sc->sc_tbi_serdes_ticks = 0;
8722 /* XXX */
8723 wm_serdes_mediachange(ifp);
8724 }
8725 }
8726
8727 wm_tbi_serdes_set_linkled(sc);
8728 }
8729
8730 /* SFP related */
8731
8732 static int
8733 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
8734 {
8735 uint32_t i2ccmd;
8736 int i;
8737
8738 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
8739 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8740
8741 /* Poll the ready bit */
8742 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8743 delay(50);
8744 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8745 if (i2ccmd & I2CCMD_READY)
8746 break;
8747 }
8748 if ((i2ccmd & I2CCMD_READY) == 0)
8749 return -1;
8750 if ((i2ccmd & I2CCMD_ERROR) != 0)
8751 return -1;
8752
8753 *data = i2ccmd & 0x00ff;
8754
8755 return 0;
8756 }
8757
8758 static uint32_t
8759 wm_sfp_get_media_type(struct wm_softc *sc)
8760 {
8761 uint32_t ctrl_ext;
8762 uint8_t val = 0;
8763 int timeout = 3;
8764 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
8765 int rv = -1;
8766
8767 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8768 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
8769 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
8770 CSR_WRITE_FLUSH(sc);
8771
8772 /* Read SFP module data */
8773 while (timeout) {
8774 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
8775 if (rv == 0)
8776 break;
8777 delay(100*1000); /* XXX too big */
8778 timeout--;
8779 }
8780 if (rv != 0)
8781 goto out;
8782 switch (val) {
8783 case SFF_SFP_ID_SFF:
8784 aprint_normal_dev(sc->sc_dev,
8785 "Module/Connector soldered to board\n");
8786 break;
8787 case SFF_SFP_ID_SFP:
8788 aprint_normal_dev(sc->sc_dev, "SFP\n");
8789 break;
8790 case SFF_SFP_ID_UNKNOWN:
8791 goto out;
8792 default:
8793 break;
8794 }
8795
8796 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
8797 if (rv != 0) {
8798 goto out;
8799 }
8800
8801 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
8802 mediatype = WM_MEDIATYPE_SERDES;
8803 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
8804 sc->sc_flags |= WM_F_SGMII;
8805 mediatype = WM_MEDIATYPE_COPPER;
8806 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
8807 sc->sc_flags |= WM_F_SGMII;
8808 mediatype = WM_MEDIATYPE_SERDES;
8809 }
8810
8811 out:
8812 /* Restore I2C interface setting */
8813 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8814
8815 return mediatype;
8816 }
8817 /*
8818 * NVM related.
8819 * Microwire, SPI (w/wo EERD) and Flash.
8820 */
8821
8822 /* Both spi and uwire */
8823
8824 /*
8825 * wm_eeprom_sendbits:
8826 *
8827 * Send a series of bits to the EEPROM.
8828 */
8829 static void
8830 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
8831 {
8832 uint32_t reg;
8833 int x;
8834
8835 reg = CSR_READ(sc, WMREG_EECD);
8836
8837 for (x = nbits; x > 0; x--) {
8838 if (bits & (1U << (x - 1)))
8839 reg |= EECD_DI;
8840 else
8841 reg &= ~EECD_DI;
8842 CSR_WRITE(sc, WMREG_EECD, reg);
8843 CSR_WRITE_FLUSH(sc);
8844 delay(2);
8845 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8846 CSR_WRITE_FLUSH(sc);
8847 delay(2);
8848 CSR_WRITE(sc, WMREG_EECD, reg);
8849 CSR_WRITE_FLUSH(sc);
8850 delay(2);
8851 }
8852 }
8853
8854 /*
8855 * wm_eeprom_recvbits:
8856 *
8857 * Receive a series of bits from the EEPROM.
8858 */
8859 static void
8860 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
8861 {
8862 uint32_t reg, val;
8863 int x;
8864
8865 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
8866
8867 val = 0;
8868 for (x = nbits; x > 0; x--) {
8869 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8870 CSR_WRITE_FLUSH(sc);
8871 delay(2);
8872 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8873 val |= (1U << (x - 1));
8874 CSR_WRITE(sc, WMREG_EECD, reg);
8875 CSR_WRITE_FLUSH(sc);
8876 delay(2);
8877 }
8878 *valp = val;
8879 }
8880
8881 /* Microwire */
8882
8883 /*
8884 * wm_nvm_read_uwire:
8885 *
8886 * Read a word from the EEPROM using the MicroWire protocol.
8887 */
8888 static int
8889 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8890 {
8891 uint32_t reg, val;
8892 int i;
8893
8894 for (i = 0; i < wordcnt; i++) {
8895 /* Clear SK and DI. */
8896 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8897 CSR_WRITE(sc, WMREG_EECD, reg);
8898
8899 /*
8900 * XXX: workaround for a bug in qemu-0.12.x and prior
8901 * and Xen.
8902 *
8903 * We use this workaround only for 82540 because qemu's
8904 * e1000 act as 82540.
8905 */
8906 if (sc->sc_type == WM_T_82540) {
8907 reg |= EECD_SK;
8908 CSR_WRITE(sc, WMREG_EECD, reg);
8909 reg &= ~EECD_SK;
8910 CSR_WRITE(sc, WMREG_EECD, reg);
8911 CSR_WRITE_FLUSH(sc);
8912 delay(2);
8913 }
8914 /* XXX: end of workaround */
8915
8916 /* Set CHIP SELECT. */
8917 reg |= EECD_CS;
8918 CSR_WRITE(sc, WMREG_EECD, reg);
8919 CSR_WRITE_FLUSH(sc);
8920 delay(2);
8921
8922 /* Shift in the READ command. */
8923 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8924
8925 /* Shift in address. */
8926 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8927
8928 /* Shift out the data. */
8929 wm_eeprom_recvbits(sc, &val, 16);
8930 data[i] = val & 0xffff;
8931
8932 /* Clear CHIP SELECT. */
8933 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8934 CSR_WRITE(sc, WMREG_EECD, reg);
8935 CSR_WRITE_FLUSH(sc);
8936 delay(2);
8937 }
8938
8939 return 0;
8940 }
8941
8942 /* SPI */
8943
8944 /*
8945 * Set SPI and FLASH related information from the EECD register.
8946 * For 82541 and 82547, the word size is taken from EEPROM.
8947 */
8948 static int
8949 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8950 {
8951 int size;
8952 uint32_t reg;
8953 uint16_t data;
8954
8955 reg = CSR_READ(sc, WMREG_EECD);
8956 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8957
8958 /* Read the size of NVM from EECD by default */
8959 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8960 switch (sc->sc_type) {
8961 case WM_T_82541:
8962 case WM_T_82541_2:
8963 case WM_T_82547:
8964 case WM_T_82547_2:
8965 /* Set dummy value to access EEPROM */
8966 sc->sc_nvm_wordsize = 64;
8967 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8968 reg = data;
8969 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8970 if (size == 0)
8971 size = 6; /* 64 word size */
8972 else
8973 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8974 break;
8975 case WM_T_80003:
8976 case WM_T_82571:
8977 case WM_T_82572:
8978 case WM_T_82573: /* SPI case */
8979 case WM_T_82574: /* SPI case */
8980 case WM_T_82583: /* SPI case */
8981 size += NVM_WORD_SIZE_BASE_SHIFT;
8982 if (size > 14)
8983 size = 14;
8984 break;
8985 case WM_T_82575:
8986 case WM_T_82576:
8987 case WM_T_82580:
8988 case WM_T_I350:
8989 case WM_T_I354:
8990 case WM_T_I210:
8991 case WM_T_I211:
8992 size += NVM_WORD_SIZE_BASE_SHIFT;
8993 if (size > 15)
8994 size = 15;
8995 break;
8996 default:
8997 aprint_error_dev(sc->sc_dev,
8998 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8999 return -1;
9000 break;
9001 }
9002
9003 sc->sc_nvm_wordsize = 1 << size;
9004
9005 return 0;
9006 }
9007
9008 /*
9009 * wm_nvm_ready_spi:
9010 *
9011 * Wait for a SPI EEPROM to be ready for commands.
9012 */
9013 static int
9014 wm_nvm_ready_spi(struct wm_softc *sc)
9015 {
9016 uint32_t val;
9017 int usec;
9018
9019 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9020 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9021 wm_eeprom_recvbits(sc, &val, 8);
9022 if ((val & SPI_SR_RDY) == 0)
9023 break;
9024 }
9025 if (usec >= SPI_MAX_RETRIES) {
9026 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9027 return 1;
9028 }
9029 return 0;
9030 }
9031
9032 /*
9033 * wm_nvm_read_spi:
9034 *
9035 * Read a work from the EEPROM using the SPI protocol.
9036 */
9037 static int
9038 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9039 {
9040 uint32_t reg, val;
9041 int i;
9042 uint8_t opc;
9043
9044 /* Clear SK and CS. */
9045 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9046 CSR_WRITE(sc, WMREG_EECD, reg);
9047 CSR_WRITE_FLUSH(sc);
9048 delay(2);
9049
9050 if (wm_nvm_ready_spi(sc))
9051 return 1;
9052
9053 /* Toggle CS to flush commands. */
9054 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9055 CSR_WRITE_FLUSH(sc);
9056 delay(2);
9057 CSR_WRITE(sc, WMREG_EECD, reg);
9058 CSR_WRITE_FLUSH(sc);
9059 delay(2);
9060
9061 opc = SPI_OPC_READ;
9062 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9063 opc |= SPI_OPC_A8;
9064
9065 wm_eeprom_sendbits(sc, opc, 8);
9066 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9067
9068 for (i = 0; i < wordcnt; i++) {
9069 wm_eeprom_recvbits(sc, &val, 16);
9070 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9071 }
9072
9073 /* Raise CS and clear SK. */
9074 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9075 CSR_WRITE(sc, WMREG_EECD, reg);
9076 CSR_WRITE_FLUSH(sc);
9077 delay(2);
9078
9079 return 0;
9080 }
9081
9082 /* Using with EERD */
9083
9084 static int
9085 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9086 {
9087 uint32_t attempts = 100000;
9088 uint32_t i, reg = 0;
9089 int32_t done = -1;
9090
9091 for (i = 0; i < attempts; i++) {
9092 reg = CSR_READ(sc, rw);
9093
9094 if (reg & EERD_DONE) {
9095 done = 0;
9096 break;
9097 }
9098 delay(5);
9099 }
9100
9101 return done;
9102 }
9103
9104 static int
9105 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9106 uint16_t *data)
9107 {
9108 int i, eerd = 0;
9109 int error = 0;
9110
9111 for (i = 0; i < wordcnt; i++) {
9112 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9113
9114 CSR_WRITE(sc, WMREG_EERD, eerd);
9115 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9116 if (error != 0)
9117 break;
9118
9119 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9120 }
9121
9122 return error;
9123 }
9124
9125 /* Flash */
9126
9127 static int
9128 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9129 {
9130 uint32_t eecd;
9131 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9132 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9133 uint8_t sig_byte = 0;
9134
9135 switch (sc->sc_type) {
9136 case WM_T_ICH8:
9137 case WM_T_ICH9:
9138 eecd = CSR_READ(sc, WMREG_EECD);
9139 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9140 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9141 return 0;
9142 }
9143 /* FALLTHROUGH */
9144 default:
9145 /* Default to 0 */
9146 *bank = 0;
9147
9148 /* Check bank 0 */
9149 wm_read_ich8_byte(sc, act_offset, &sig_byte);
9150 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9151 *bank = 0;
9152 return 0;
9153 }
9154
9155 /* Check bank 1 */
9156 wm_read_ich8_byte(sc, act_offset + bank1_offset,
9157 &sig_byte);
9158 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9159 *bank = 1;
9160 return 0;
9161 }
9162 }
9163
9164 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9165 device_xname(sc->sc_dev)));
9166 return -1;
9167 }
9168
9169 /******************************************************************************
9170 * This function does initial flash setup so that a new read/write/erase cycle
9171 * can be started.
9172 *
9173 * sc - The pointer to the hw structure
9174 ****************************************************************************/
9175 static int32_t
9176 wm_ich8_cycle_init(struct wm_softc *sc)
9177 {
9178 uint16_t hsfsts;
9179 int32_t error = 1;
9180 int32_t i = 0;
9181
9182 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9183
9184 /* May be check the Flash Des Valid bit in Hw status */
9185 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
9186 return error;
9187 }
9188
9189 /* Clear FCERR in Hw status by writing 1 */
9190 /* Clear DAEL in Hw status by writing a 1 */
9191 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
9192
9193 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9194
9195 /*
9196 * Either we should have a hardware SPI cycle in progress bit to check
9197 * against, in order to start a new cycle or FDONE bit should be
9198 * changed in the hardware so that it is 1 after harware reset, which
9199 * can then be used as an indication whether a cycle is in progress or
9200 * has been completed .. we should also have some software semaphore
9201 * mechanism to guard FDONE or the cycle in progress bit so that two
9202 * threads access to those bits can be sequentiallized or a way so that
9203 * 2 threads dont start the cycle at the same time
9204 */
9205
9206 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9207 /*
9208 * There is no cycle running at present, so we can start a
9209 * cycle
9210 */
9211
9212 /* Begin by setting Flash Cycle Done. */
9213 hsfsts |= HSFSTS_DONE;
9214 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9215 error = 0;
9216 } else {
9217 /*
9218 * otherwise poll for sometime so the current cycle has a
9219 * chance to end before giving up.
9220 */
9221 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
9222 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9223 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9224 error = 0;
9225 break;
9226 }
9227 delay(1);
9228 }
9229 if (error == 0) {
9230 /*
9231 * Successful in waiting for previous cycle to timeout,
9232 * now set the Flash Cycle Done.
9233 */
9234 hsfsts |= HSFSTS_DONE;
9235 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9236 }
9237 }
9238 return error;
9239 }
9240
9241 /******************************************************************************
9242 * This function starts a flash cycle and waits for its completion
9243 *
9244 * sc - The pointer to the hw structure
9245 ****************************************************************************/
9246 static int32_t
9247 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
9248 {
9249 uint16_t hsflctl;
9250 uint16_t hsfsts;
9251 int32_t error = 1;
9252 uint32_t i = 0;
9253
9254 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
9255 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9256 hsflctl |= HSFCTL_GO;
9257 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9258
9259 /* Wait till FDONE bit is set to 1 */
9260 do {
9261 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9262 if (hsfsts & HSFSTS_DONE)
9263 break;
9264 delay(1);
9265 i++;
9266 } while (i < timeout);
9267 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
9268 error = 0;
9269
9270 return error;
9271 }
9272
9273 /******************************************************************************
9274 * Reads a byte or word from the NVM using the ICH8 flash access registers.
9275 *
9276 * sc - The pointer to the hw structure
9277 * index - The index of the byte or word to read.
9278 * size - Size of data to read, 1=byte 2=word
9279 * data - Pointer to the word to store the value read.
9280 *****************************************************************************/
9281 static int32_t
9282 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
9283 uint32_t size, uint16_t *data)
9284 {
9285 uint16_t hsfsts;
9286 uint16_t hsflctl;
9287 uint32_t flash_linear_address;
9288 uint32_t flash_data = 0;
9289 int32_t error = 1;
9290 int32_t count = 0;
9291
9292 if (size < 1 || size > 2 || data == 0x0 ||
9293 index > ICH_FLASH_LINEAR_ADDR_MASK)
9294 return error;
9295
9296 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
9297 sc->sc_ich8_flash_base;
9298
9299 do {
9300 delay(1);
9301 /* Steps */
9302 error = wm_ich8_cycle_init(sc);
9303 if (error)
9304 break;
9305
9306 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9307 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
9308 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
9309 & HSFCTL_BCOUNT_MASK;
9310 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
9311 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9312
9313 /*
9314 * Write the last 24 bits of index into Flash Linear address
9315 * field in Flash Address
9316 */
9317 /* TODO: TBD maybe check the index against the size of flash */
9318
9319 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
9320
9321 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
9322
9323 /*
9324 * Check if FCERR is set to 1, if set to 1, clear it and try
9325 * the whole sequence a few more times, else read in (shift in)
9326 * the Flash Data0, the order is least significant byte first
9327 * msb to lsb
9328 */
9329 if (error == 0) {
9330 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
9331 if (size == 1)
9332 *data = (uint8_t)(flash_data & 0x000000FF);
9333 else if (size == 2)
9334 *data = (uint16_t)(flash_data & 0x0000FFFF);
9335 break;
9336 } else {
9337 /*
9338 * If we've gotten here, then things are probably
9339 * completely hosed, but if the error condition is
9340 * detected, it won't hurt to give it another try...
9341 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
9342 */
9343 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9344 if (hsfsts & HSFSTS_ERR) {
9345 /* Repeat for some time before giving up. */
9346 continue;
9347 } else if ((hsfsts & HSFSTS_DONE) == 0)
9348 break;
9349 }
9350 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
9351
9352 return error;
9353 }
9354
9355 /******************************************************************************
9356 * Reads a single byte from the NVM using the ICH8 flash access registers.
9357 *
9358 * sc - pointer to wm_hw structure
9359 * index - The index of the byte to read.
9360 * data - Pointer to a byte to store the value read.
9361 *****************************************************************************/
9362 static int32_t
9363 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
9364 {
9365 int32_t status;
9366 uint16_t word = 0;
9367
9368 status = wm_read_ich8_data(sc, index, 1, &word);
9369 if (status == 0)
9370 *data = (uint8_t)word;
9371 else
9372 *data = 0;
9373
9374 return status;
9375 }
9376
9377 /******************************************************************************
9378 * Reads a word from the NVM using the ICH8 flash access registers.
9379 *
9380 * sc - pointer to wm_hw structure
9381 * index - The starting byte index of the word to read.
9382 * data - Pointer to a word to store the value read.
9383 *****************************************************************************/
9384 static int32_t
9385 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
9386 {
9387 int32_t status;
9388
9389 status = wm_read_ich8_data(sc, index, 2, data);
9390 return status;
9391 }
9392
9393 /******************************************************************************
9394 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
9395 * register.
9396 *
9397 * sc - Struct containing variables accessed by shared code
9398 * offset - offset of word in the EEPROM to read
9399 * data - word read from the EEPROM
9400 * words - number of words to read
9401 *****************************************************************************/
9402 static int
9403 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
9404 {
9405 int32_t error = 0;
9406 uint32_t flash_bank = 0;
9407 uint32_t act_offset = 0;
9408 uint32_t bank_offset = 0;
9409 uint16_t word = 0;
9410 uint16_t i = 0;
9411
9412 /*
9413 * We need to know which is the valid flash bank. In the event
9414 * that we didn't allocate eeprom_shadow_ram, we may not be
9415 * managing flash_bank. So it cannot be trusted and needs
9416 * to be updated with each read.
9417 */
9418 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
9419 if (error) {
9420 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
9421 device_xname(sc->sc_dev)));
9422 flash_bank = 0;
9423 }
9424
9425 /*
9426 * Adjust offset appropriately if we're on bank 1 - adjust for word
9427 * size
9428 */
9429 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
9430
9431 error = wm_get_swfwhw_semaphore(sc);
9432 if (error) {
9433 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9434 __func__);
9435 return error;
9436 }
9437
9438 for (i = 0; i < words; i++) {
9439 /* The NVM part needs a byte offset, hence * 2 */
9440 act_offset = bank_offset + ((offset + i) * 2);
9441 error = wm_read_ich8_word(sc, act_offset, &word);
9442 if (error) {
9443 aprint_error_dev(sc->sc_dev,
9444 "%s: failed to read NVM\n", __func__);
9445 break;
9446 }
9447 data[i] = word;
9448 }
9449
9450 wm_put_swfwhw_semaphore(sc);
9451 return error;
9452 }
9453
9454 /* iNVM */
9455
9456 static int
9457 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
9458 {
9459 int32_t rv = 0;
9460 uint32_t invm_dword;
9461 uint16_t i;
9462 uint8_t record_type, word_address;
9463
9464 for (i = 0; i < INVM_SIZE; i++) {
9465 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
9466 /* Get record type */
9467 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
9468 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
9469 break;
9470 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
9471 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
9472 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
9473 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
9474 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
9475 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
9476 if (word_address == address) {
9477 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
9478 rv = 0;
9479 break;
9480 }
9481 }
9482 }
9483
9484 return rv;
9485 }
9486
9487 static int
9488 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
9489 {
9490 int rv = 0;
9491 int i;
9492
9493 for (i = 0; i < words; i++) {
9494 switch (offset + i) {
9495 case NVM_OFF_MACADDR:
9496 case NVM_OFF_MACADDR1:
9497 case NVM_OFF_MACADDR2:
9498 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
9499 if (rv != 0) {
9500 data[i] = 0xffff;
9501 rv = -1;
9502 }
9503 break;
9504 case NVM_OFF_CFG2:
9505 rv = wm_nvm_read_word_invm(sc, offset, data);
9506 if (rv != 0) {
9507 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
9508 rv = 0;
9509 }
9510 break;
9511 case NVM_OFF_CFG4:
9512 rv = wm_nvm_read_word_invm(sc, offset, data);
9513 if (rv != 0) {
9514 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
9515 rv = 0;
9516 }
9517 break;
9518 case NVM_OFF_LED_1_CFG:
9519 rv = wm_nvm_read_word_invm(sc, offset, data);
9520 if (rv != 0) {
9521 *data = NVM_LED_1_CFG_DEFAULT_I211;
9522 rv = 0;
9523 }
9524 break;
9525 case NVM_OFF_LED_0_2_CFG:
9526 rv = wm_nvm_read_word_invm(sc, offset, data);
9527 if (rv != 0) {
9528 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
9529 rv = 0;
9530 }
9531 break;
9532 case NVM_OFF_ID_LED_SETTINGS:
9533 rv = wm_nvm_read_word_invm(sc, offset, data);
9534 if (rv != 0) {
9535 *data = ID_LED_RESERVED_FFFF;
9536 rv = 0;
9537 }
9538 break;
9539 default:
9540 DPRINTF(WM_DEBUG_NVM,
9541 ("NVM word 0x%02x is not mapped.\n", offset));
9542 *data = NVM_RESERVED_WORD;
9543 break;
9544 }
9545 }
9546
9547 return rv;
9548 }
9549
9550 /* Lock, detecting NVM type, validate checksum, version and read */
9551
9552 /*
9553 * wm_nvm_acquire:
9554 *
9555 * Perform the EEPROM handshake required on some chips.
9556 */
9557 static int
9558 wm_nvm_acquire(struct wm_softc *sc)
9559 {
9560 uint32_t reg;
9561 int x;
9562 int ret = 0;
9563
9564 /* always success */
9565 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9566 return 0;
9567
9568 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9569 ret = wm_get_swfwhw_semaphore(sc);
9570 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9571 /* This will also do wm_get_swsm_semaphore() if needed */
9572 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9573 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9574 ret = wm_get_swsm_semaphore(sc);
9575 }
9576
9577 if (ret) {
9578 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9579 __func__);
9580 return 1;
9581 }
9582
9583 if (sc->sc_flags & WM_F_LOCK_EECD) {
9584 reg = CSR_READ(sc, WMREG_EECD);
9585
9586 /* Request EEPROM access. */
9587 reg |= EECD_EE_REQ;
9588 CSR_WRITE(sc, WMREG_EECD, reg);
9589
9590 /* ..and wait for it to be granted. */
9591 for (x = 0; x < 1000; x++) {
9592 reg = CSR_READ(sc, WMREG_EECD);
9593 if (reg & EECD_EE_GNT)
9594 break;
9595 delay(5);
9596 }
9597 if ((reg & EECD_EE_GNT) == 0) {
9598 aprint_error_dev(sc->sc_dev,
9599 "could not acquire EEPROM GNT\n");
9600 reg &= ~EECD_EE_REQ;
9601 CSR_WRITE(sc, WMREG_EECD, reg);
9602 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9603 wm_put_swfwhw_semaphore(sc);
9604 if (sc->sc_flags & WM_F_LOCK_SWFW)
9605 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9606 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9607 wm_put_swsm_semaphore(sc);
9608 return 1;
9609 }
9610 }
9611
9612 return 0;
9613 }
9614
9615 /*
9616 * wm_nvm_release:
9617 *
9618 * Release the EEPROM mutex.
9619 */
9620 static void
9621 wm_nvm_release(struct wm_softc *sc)
9622 {
9623 uint32_t reg;
9624
9625 /* always success */
9626 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9627 return;
9628
9629 if (sc->sc_flags & WM_F_LOCK_EECD) {
9630 reg = CSR_READ(sc, WMREG_EECD);
9631 reg &= ~EECD_EE_REQ;
9632 CSR_WRITE(sc, WMREG_EECD, reg);
9633 }
9634
9635 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9636 wm_put_swfwhw_semaphore(sc);
9637 if (sc->sc_flags & WM_F_LOCK_SWFW)
9638 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9639 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9640 wm_put_swsm_semaphore(sc);
9641 }
9642
9643 static int
9644 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9645 {
9646 uint32_t eecd = 0;
9647
9648 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9649 || sc->sc_type == WM_T_82583) {
9650 eecd = CSR_READ(sc, WMREG_EECD);
9651
9652 /* Isolate bits 15 & 16 */
9653 eecd = ((eecd >> 15) & 0x03);
9654
9655 /* If both bits are set, device is Flash type */
9656 if (eecd == 0x03)
9657 return 0;
9658 }
9659 return 1;
9660 }
9661
9662 static int
9663 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9664 {
9665 uint32_t eec;
9666
9667 eec = CSR_READ(sc, WMREG_EEC);
9668 if ((eec & EEC_FLASH_DETECTED) != 0)
9669 return 1;
9670
9671 return 0;
9672 }
9673
9674 /*
9675 * wm_nvm_validate_checksum
9676 *
9677 * The checksum is defined as the sum of the first 64 (16 bit) words.
9678 */
9679 static int
9680 wm_nvm_validate_checksum(struct wm_softc *sc)
9681 {
9682 uint16_t checksum;
9683 uint16_t eeprom_data;
9684 #ifdef WM_DEBUG
9685 uint16_t csum_wordaddr, valid_checksum;
9686 #endif
9687 int i;
9688
9689 checksum = 0;
9690
9691 /* Don't check for I211 */
9692 if (sc->sc_type == WM_T_I211)
9693 return 0;
9694
9695 #ifdef WM_DEBUG
9696 if (sc->sc_type == WM_T_PCH_LPT) {
9697 csum_wordaddr = NVM_OFF_COMPAT;
9698 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
9699 } else {
9700 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
9701 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
9702 }
9703
9704 /* Dump EEPROM image for debug */
9705 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9706 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9707 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
9708 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
9709 if ((eeprom_data & valid_checksum) == 0) {
9710 DPRINTF(WM_DEBUG_NVM,
9711 ("%s: NVM need to be updated (%04x != %04x)\n",
9712 device_xname(sc->sc_dev), eeprom_data,
9713 valid_checksum));
9714 }
9715 }
9716
9717 if ((wm_debug & WM_DEBUG_NVM) != 0) {
9718 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
9719 for (i = 0; i < NVM_SIZE; i++) {
9720 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9721 printf("XXXX ");
9722 else
9723 printf("%04hx ", eeprom_data);
9724 if (i % 8 == 7)
9725 printf("\n");
9726 }
9727 }
9728
9729 #endif /* WM_DEBUG */
9730
9731 for (i = 0; i < NVM_SIZE; i++) {
9732 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9733 return 1;
9734 checksum += eeprom_data;
9735 }
9736
9737 if (checksum != (uint16_t) NVM_CHECKSUM) {
9738 #ifdef WM_DEBUG
9739 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
9740 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
9741 #endif
9742 }
9743
9744 return 0;
9745 }
9746
9747 static void
9748 wm_nvm_version(struct wm_softc *sc)
9749 {
9750 uint16_t major, minor, build, patch;
9751 uint16_t uid0, uid1;
9752 uint16_t nvm_data;
9753 uint16_t off;
9754 bool check_version = false;
9755 bool check_optionrom = false;
9756 bool have_build = false;
9757
9758 /*
9759 * Version format:
9760 *
9761 * XYYZ
9762 * X0YZ
9763 * X0YY
9764 *
9765 * Example:
9766 *
9767 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
9768 * 82571 0x50a6 5.10.6?
9769 * 82572 0x506a 5.6.10?
9770 * 82572EI 0x5069 5.6.9?
9771 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
9772 * 0x2013 2.1.3?
9773 * 82583 0x10a0 1.10.0? (document says it's default vaule)
9774 */
9775 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
9776 switch (sc->sc_type) {
9777 case WM_T_82571:
9778 case WM_T_82572:
9779 case WM_T_82574:
9780 check_version = true;
9781 check_optionrom = true;
9782 have_build = true;
9783 break;
9784 case WM_T_82575:
9785 case WM_T_82576:
9786 case WM_T_82580:
9787 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
9788 check_version = true;
9789 break;
9790 case WM_T_I211:
9791 /* XXX wm_nvm_version_invm(sc); */
9792 return;
9793 case WM_T_I210:
9794 if (!wm_nvm_get_flash_presence_i210(sc)) {
9795 /* XXX wm_nvm_version_invm(sc); */
9796 return;
9797 }
9798 /* FALLTHROUGH */
9799 case WM_T_I350:
9800 case WM_T_I354:
9801 check_version = true;
9802 check_optionrom = true;
9803 break;
9804 default:
9805 return;
9806 }
9807 if (check_version) {
9808 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
9809 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
9810 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
9811 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
9812 build = nvm_data & NVM_BUILD_MASK;
9813 have_build = true;
9814 } else
9815 minor = nvm_data & 0x00ff;
9816
9817 /* Decimal */
9818 minor = (minor / 16) * 10 + (minor % 16);
9819
9820 aprint_verbose(", version %d.%d", major, minor);
9821 if (have_build)
9822 aprint_verbose(".%d", build);
9823 sc->sc_nvm_ver_major = major;
9824 sc->sc_nvm_ver_minor = minor;
9825 }
9826 if (check_optionrom) {
9827 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
9828 /* Option ROM Version */
9829 if ((off != 0x0000) && (off != 0xffff)) {
9830 off += NVM_COMBO_VER_OFF;
9831 wm_nvm_read(sc, off + 1, 1, &uid1);
9832 wm_nvm_read(sc, off, 1, &uid0);
9833 if ((uid0 != 0) && (uid0 != 0xffff)
9834 && (uid1 != 0) && (uid1 != 0xffff)) {
9835 /* 16bits */
9836 major = uid0 >> 8;
9837 build = (uid0 << 8) | (uid1 >> 8);
9838 patch = uid1 & 0x00ff;
9839 aprint_verbose(", option ROM Version %d.%d.%d",
9840 major, build, patch);
9841 }
9842 }
9843 }
9844
9845 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
9846 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
9847 }
9848
9849 /*
9850 * wm_nvm_read:
9851 *
9852 * Read data from the serial EEPROM.
9853 */
9854 static int
9855 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9856 {
9857 int rv;
9858
9859 if (sc->sc_flags & WM_F_EEPROM_INVALID)
9860 return 1;
9861
9862 if (wm_nvm_acquire(sc))
9863 return 1;
9864
9865 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9866 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9867 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
9868 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
9869 else if (sc->sc_flags & WM_F_EEPROM_INVM)
9870 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
9871 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
9872 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
9873 else if (sc->sc_flags & WM_F_EEPROM_SPI)
9874 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
9875 else
9876 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
9877
9878 wm_nvm_release(sc);
9879 return rv;
9880 }
9881
9882 /*
9883 * Hardware semaphores.
9884 * Very complexed...
9885 */
9886
9887 static int
9888 wm_get_swsm_semaphore(struct wm_softc *sc)
9889 {
9890 int32_t timeout;
9891 uint32_t swsm;
9892
9893 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9894 /* Get the SW semaphore. */
9895 timeout = sc->sc_nvm_wordsize + 1;
9896 while (timeout) {
9897 swsm = CSR_READ(sc, WMREG_SWSM);
9898
9899 if ((swsm & SWSM_SMBI) == 0)
9900 break;
9901
9902 delay(50);
9903 timeout--;
9904 }
9905
9906 if (timeout == 0) {
9907 aprint_error_dev(sc->sc_dev,
9908 "could not acquire SWSM SMBI\n");
9909 return 1;
9910 }
9911 }
9912
9913 /* Get the FW semaphore. */
9914 timeout = sc->sc_nvm_wordsize + 1;
9915 while (timeout) {
9916 swsm = CSR_READ(sc, WMREG_SWSM);
9917 swsm |= SWSM_SWESMBI;
9918 CSR_WRITE(sc, WMREG_SWSM, swsm);
9919 /* If we managed to set the bit we got the semaphore. */
9920 swsm = CSR_READ(sc, WMREG_SWSM);
9921 if (swsm & SWSM_SWESMBI)
9922 break;
9923
9924 delay(50);
9925 timeout--;
9926 }
9927
9928 if (timeout == 0) {
9929 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
9930 /* Release semaphores */
9931 wm_put_swsm_semaphore(sc);
9932 return 1;
9933 }
9934 return 0;
9935 }
9936
9937 static void
9938 wm_put_swsm_semaphore(struct wm_softc *sc)
9939 {
9940 uint32_t swsm;
9941
9942 swsm = CSR_READ(sc, WMREG_SWSM);
9943 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
9944 CSR_WRITE(sc, WMREG_SWSM, swsm);
9945 }
9946
9947 static int
9948 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9949 {
9950 uint32_t swfw_sync;
9951 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
9952 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
9953 int timeout = 200;
9954
9955 for (timeout = 0; timeout < 200; timeout++) {
9956 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9957 if (wm_get_swsm_semaphore(sc)) {
9958 aprint_error_dev(sc->sc_dev,
9959 "%s: failed to get semaphore\n",
9960 __func__);
9961 return 1;
9962 }
9963 }
9964 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9965 if ((swfw_sync & (swmask | fwmask)) == 0) {
9966 swfw_sync |= swmask;
9967 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
9968 if (sc->sc_flags & WM_F_LOCK_SWSM)
9969 wm_put_swsm_semaphore(sc);
9970 return 0;
9971 }
9972 if (sc->sc_flags & WM_F_LOCK_SWSM)
9973 wm_put_swsm_semaphore(sc);
9974 delay(5000);
9975 }
9976 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
9977 device_xname(sc->sc_dev), mask, swfw_sync);
9978 return 1;
9979 }
9980
9981 static void
9982 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9983 {
9984 uint32_t swfw_sync;
9985
9986 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9987 while (wm_get_swsm_semaphore(sc) != 0)
9988 continue;
9989 }
9990 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
9991 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
9992 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
9993 if (sc->sc_flags & WM_F_LOCK_SWSM)
9994 wm_put_swsm_semaphore(sc);
9995 }
9996
9997 static int
9998 wm_get_swfwhw_semaphore(struct wm_softc *sc)
9999 {
10000 uint32_t ext_ctrl;
10001 int timeout = 200;
10002
10003 for (timeout = 0; timeout < 200; timeout++) {
10004 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10005 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10006 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10007
10008 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10009 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10010 return 0;
10011 delay(5000);
10012 }
10013 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10014 device_xname(sc->sc_dev), ext_ctrl);
10015 return 1;
10016 }
10017
10018 static void
10019 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10020 {
10021 uint32_t ext_ctrl;
10022 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10023 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10024 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10025 }
10026
10027 static int
10028 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10029 {
10030 int i = 0;
10031 uint32_t reg;
10032
10033 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10034 do {
10035 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10036 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10037 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10038 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10039 break;
10040 delay(2*1000);
10041 i++;
10042 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10043
10044 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10045 wm_put_hw_semaphore_82573(sc);
10046 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10047 device_xname(sc->sc_dev));
10048 return -1;
10049 }
10050
10051 return 0;
10052 }
10053
10054 static void
10055 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10056 {
10057 uint32_t reg;
10058
10059 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10060 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10061 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10062 }
10063
10064 /*
10065 * Management mode and power management related subroutines.
10066 * BMC, AMT, suspend/resume and EEE.
10067 */
10068
10069 static int
10070 wm_check_mng_mode(struct wm_softc *sc)
10071 {
10072 int rv;
10073
10074 switch (sc->sc_type) {
10075 case WM_T_ICH8:
10076 case WM_T_ICH9:
10077 case WM_T_ICH10:
10078 case WM_T_PCH:
10079 case WM_T_PCH2:
10080 case WM_T_PCH_LPT:
10081 rv = wm_check_mng_mode_ich8lan(sc);
10082 break;
10083 case WM_T_82574:
10084 case WM_T_82583:
10085 rv = wm_check_mng_mode_82574(sc);
10086 break;
10087 case WM_T_82571:
10088 case WM_T_82572:
10089 case WM_T_82573:
10090 case WM_T_80003:
10091 rv = wm_check_mng_mode_generic(sc);
10092 break;
10093 default:
10094 /* noting to do */
10095 rv = 0;
10096 break;
10097 }
10098
10099 return rv;
10100 }
10101
10102 static int
10103 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10104 {
10105 uint32_t fwsm;
10106
10107 fwsm = CSR_READ(sc, WMREG_FWSM);
10108
10109 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10110 return 1;
10111
10112 return 0;
10113 }
10114
10115 static int
10116 wm_check_mng_mode_82574(struct wm_softc *sc)
10117 {
10118 uint16_t data;
10119
10120 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10121
10122 if ((data & NVM_CFG2_MNGM_MASK) != 0)
10123 return 1;
10124
10125 return 0;
10126 }
10127
10128 static int
10129 wm_check_mng_mode_generic(struct wm_softc *sc)
10130 {
10131 uint32_t fwsm;
10132
10133 fwsm = CSR_READ(sc, WMREG_FWSM);
10134
10135 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10136 return 1;
10137
10138 return 0;
10139 }
10140
10141 static int
10142 wm_enable_mng_pass_thru(struct wm_softc *sc)
10143 {
10144 uint32_t manc, fwsm, factps;
10145
10146 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
10147 return 0;
10148
10149 manc = CSR_READ(sc, WMREG_MANC);
10150
10151 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
10152 device_xname(sc->sc_dev), manc));
10153 if ((manc & MANC_RECV_TCO_EN) == 0)
10154 return 0;
10155
10156 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
10157 fwsm = CSR_READ(sc, WMREG_FWSM);
10158 factps = CSR_READ(sc, WMREG_FACTPS);
10159 if (((factps & FACTPS_MNGCG) == 0)
10160 && ((fwsm & FWSM_MODE_MASK)
10161 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
10162 return 1;
10163 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10164 uint16_t data;
10165
10166 factps = CSR_READ(sc, WMREG_FACTPS);
10167 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10168 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
10169 device_xname(sc->sc_dev), factps, data));
10170 if (((factps & FACTPS_MNGCG) == 0)
10171 && ((data & NVM_CFG2_MNGM_MASK)
10172 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
10173 return 1;
10174 } else if (((manc & MANC_SMBUS_EN) != 0)
10175 && ((manc & MANC_ASF_EN) == 0))
10176 return 1;
10177
10178 return 0;
10179 }
10180
10181 static int
10182 wm_check_reset_block(struct wm_softc *sc)
10183 {
10184 uint32_t reg;
10185
10186 switch (sc->sc_type) {
10187 case WM_T_ICH8:
10188 case WM_T_ICH9:
10189 case WM_T_ICH10:
10190 case WM_T_PCH:
10191 case WM_T_PCH2:
10192 case WM_T_PCH_LPT:
10193 reg = CSR_READ(sc, WMREG_FWSM);
10194 if ((reg & FWSM_RSPCIPHY) != 0)
10195 return 0;
10196 else
10197 return -1;
10198 break;
10199 case WM_T_82571:
10200 case WM_T_82572:
10201 case WM_T_82573:
10202 case WM_T_82574:
10203 case WM_T_82583:
10204 case WM_T_80003:
10205 reg = CSR_READ(sc, WMREG_MANC);
10206 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
10207 return -1;
10208 else
10209 return 0;
10210 break;
10211 default:
10212 /* no problem */
10213 break;
10214 }
10215
10216 return 0;
10217 }
10218
10219 static void
10220 wm_get_hw_control(struct wm_softc *sc)
10221 {
10222 uint32_t reg;
10223
10224 switch (sc->sc_type) {
10225 case WM_T_82573:
10226 reg = CSR_READ(sc, WMREG_SWSM);
10227 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
10228 break;
10229 case WM_T_82571:
10230 case WM_T_82572:
10231 case WM_T_82574:
10232 case WM_T_82583:
10233 case WM_T_80003:
10234 case WM_T_ICH8:
10235 case WM_T_ICH9:
10236 case WM_T_ICH10:
10237 case WM_T_PCH:
10238 case WM_T_PCH2:
10239 case WM_T_PCH_LPT:
10240 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10241 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
10242 break;
10243 default:
10244 break;
10245 }
10246 }
10247
10248 static void
10249 wm_release_hw_control(struct wm_softc *sc)
10250 {
10251 uint32_t reg;
10252
10253 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
10254 return;
10255
10256 if (sc->sc_type == WM_T_82573) {
10257 reg = CSR_READ(sc, WMREG_SWSM);
10258 reg &= ~SWSM_DRV_LOAD;
10259 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
10260 } else {
10261 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10262 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
10263 }
10264 }
10265
10266 static void
10267 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
10268 {
10269 uint32_t reg;
10270
10271 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10272
10273 if (on != 0)
10274 reg |= EXTCNFCTR_GATE_PHY_CFG;
10275 else
10276 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
10277
10278 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10279 }
10280
10281 static void
10282 wm_smbustopci(struct wm_softc *sc)
10283 {
10284 uint32_t fwsm;
10285
10286 fwsm = CSR_READ(sc, WMREG_FWSM);
10287 if (((fwsm & FWSM_FW_VALID) == 0)
10288 && ((wm_check_reset_block(sc) == 0))) {
10289 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
10290 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
10291 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10292 CSR_WRITE_FLUSH(sc);
10293 delay(10);
10294 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
10295 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10296 CSR_WRITE_FLUSH(sc);
10297 delay(50*1000);
10298
10299 /*
10300 * Gate automatic PHY configuration by hardware on non-managed
10301 * 82579
10302 */
10303 if (sc->sc_type == WM_T_PCH2)
10304 wm_gate_hw_phy_config_ich8lan(sc, 1);
10305 }
10306 }
10307
10308 static void
10309 wm_init_manageability(struct wm_softc *sc)
10310 {
10311
10312 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10313 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
10314 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10315
10316 /* Disable hardware interception of ARP */
10317 manc &= ~MANC_ARP_EN;
10318
10319 /* Enable receiving management packets to the host */
10320 if (sc->sc_type >= WM_T_82571) {
10321 manc |= MANC_EN_MNG2HOST;
10322 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
10323 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
10324 }
10325
10326 CSR_WRITE(sc, WMREG_MANC, manc);
10327 }
10328 }
10329
10330 static void
10331 wm_release_manageability(struct wm_softc *sc)
10332 {
10333
10334 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10335 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10336
10337 manc |= MANC_ARP_EN;
10338 if (sc->sc_type >= WM_T_82571)
10339 manc &= ~MANC_EN_MNG2HOST;
10340
10341 CSR_WRITE(sc, WMREG_MANC, manc);
10342 }
10343 }
10344
10345 static void
10346 wm_get_wakeup(struct wm_softc *sc)
10347 {
10348
10349 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
10350 switch (sc->sc_type) {
10351 case WM_T_82573:
10352 case WM_T_82583:
10353 sc->sc_flags |= WM_F_HAS_AMT;
10354 /* FALLTHROUGH */
10355 case WM_T_80003:
10356 case WM_T_82541:
10357 case WM_T_82547:
10358 case WM_T_82571:
10359 case WM_T_82572:
10360 case WM_T_82574:
10361 case WM_T_82575:
10362 case WM_T_82576:
10363 case WM_T_82580:
10364 case WM_T_I350:
10365 case WM_T_I354:
10366 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
10367 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
10368 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10369 break;
10370 case WM_T_ICH8:
10371 case WM_T_ICH9:
10372 case WM_T_ICH10:
10373 case WM_T_PCH:
10374 case WM_T_PCH2:
10375 case WM_T_PCH_LPT:
10376 sc->sc_flags |= WM_F_HAS_AMT;
10377 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10378 break;
10379 default:
10380 break;
10381 }
10382
10383 /* 1: HAS_MANAGE */
10384 if (wm_enable_mng_pass_thru(sc) != 0)
10385 sc->sc_flags |= WM_F_HAS_MANAGE;
10386
10387 #ifdef WM_DEBUG
10388 printf("\n");
10389 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
10390 printf("HAS_AMT,");
10391 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
10392 printf("ARC_SUBSYS_VALID,");
10393 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
10394 printf("ASF_FIRMWARE_PRES,");
10395 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
10396 printf("HAS_MANAGE,");
10397 printf("\n");
10398 #endif
10399 /*
10400 * Note that the WOL flags is set after the resetting of the eeprom
10401 * stuff
10402 */
10403 }
10404
10405 #ifdef WM_WOL
10406 /* WOL in the newer chipset interfaces (pchlan) */
10407 static void
10408 wm_enable_phy_wakeup(struct wm_softc *sc)
10409 {
10410 #if 0
10411 uint16_t preg;
10412
10413 /* Copy MAC RARs to PHY RARs */
10414
10415 /* Copy MAC MTA to PHY MTA */
10416
10417 /* Configure PHY Rx Control register */
10418
10419 /* Enable PHY wakeup in MAC register */
10420
10421 /* Configure and enable PHY wakeup in PHY registers */
10422
10423 /* Activate PHY wakeup */
10424
10425 /* XXX */
10426 #endif
10427 }
10428
10429 /* Power down workaround on D3 */
10430 static void
10431 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
10432 {
10433 uint32_t reg;
10434 int i;
10435
10436 for (i = 0; i < 2; i++) {
10437 /* Disable link */
10438 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10439 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10440 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10441
10442 /*
10443 * Call gig speed drop workaround on Gig disable before
10444 * accessing any PHY registers
10445 */
10446 if (sc->sc_type == WM_T_ICH8)
10447 wm_gig_downshift_workaround_ich8lan(sc);
10448
10449 /* Write VR power-down enable */
10450 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10451 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10452 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
10453 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
10454
10455 /* Read it back and test */
10456 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10457 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10458 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
10459 break;
10460
10461 /* Issue PHY reset and repeat at most one more time */
10462 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10463 }
10464 }
10465
10466 static void
10467 wm_enable_wakeup(struct wm_softc *sc)
10468 {
10469 uint32_t reg, pmreg;
10470 pcireg_t pmode;
10471
10472 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10473 &pmreg, NULL) == 0)
10474 return;
10475
10476 /* Advertise the wakeup capability */
10477 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
10478 | CTRL_SWDPIN(3));
10479 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
10480
10481 /* ICH workaround */
10482 switch (sc->sc_type) {
10483 case WM_T_ICH8:
10484 case WM_T_ICH9:
10485 case WM_T_ICH10:
10486 case WM_T_PCH:
10487 case WM_T_PCH2:
10488 case WM_T_PCH_LPT:
10489 /* Disable gig during WOL */
10490 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10491 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
10492 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10493 if (sc->sc_type == WM_T_PCH)
10494 wm_gmii_reset(sc);
10495
10496 /* Power down workaround */
10497 if (sc->sc_phytype == WMPHY_82577) {
10498 struct mii_softc *child;
10499
10500 /* Assume that the PHY is copper */
10501 child = LIST_FIRST(&sc->sc_mii.mii_phys);
10502 if (child->mii_mpd_rev <= 2)
10503 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
10504 (768 << 5) | 25, 0x0444); /* magic num */
10505 }
10506 break;
10507 default:
10508 break;
10509 }
10510
10511 /* Keep the laser running on fiber adapters */
10512 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
10513 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
10514 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10515 reg |= CTRL_EXT_SWDPIN(3);
10516 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10517 }
10518
10519 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
10520 #if 0 /* for the multicast packet */
10521 reg |= WUFC_MC;
10522 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
10523 #endif
10524
10525 if (sc->sc_type == WM_T_PCH) {
10526 wm_enable_phy_wakeup(sc);
10527 } else {
10528 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
10529 CSR_WRITE(sc, WMREG_WUFC, reg);
10530 }
10531
10532 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10533 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10534 || (sc->sc_type == WM_T_PCH2))
10535 && (sc->sc_phytype == WMPHY_IGP_3))
10536 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
10537
10538 /* Request PME */
10539 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
10540 #if 0
10541 /* Disable WOL */
10542 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
10543 #else
10544 /* For WOL */
10545 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
10546 #endif
10547 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
10548 }
10549 #endif /* WM_WOL */
10550
10551 /* EEE */
10552
10553 static void
10554 wm_set_eee_i350(struct wm_softc *sc)
10555 {
10556 uint32_t ipcnfg, eeer;
10557
10558 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
10559 eeer = CSR_READ(sc, WMREG_EEER);
10560
10561 if ((sc->sc_flags & WM_F_EEE) != 0) {
10562 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10563 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
10564 | EEER_LPI_FC);
10565 } else {
10566 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10567 ipcnfg &= ~IPCNFG_10BASE_TE;
10568 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
10569 | EEER_LPI_FC);
10570 }
10571
10572 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
10573 CSR_WRITE(sc, WMREG_EEER, eeer);
10574 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
10575 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
10576 }
10577
10578 /*
10579 * Workarounds (mainly PHY related).
10580 * Basically, PHY's workarounds are in the PHY drivers.
10581 */
10582
10583 /* Work-around for 82566 Kumeran PCS lock loss */
10584 static void
10585 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
10586 {
10587 int miistatus, active, i;
10588 int reg;
10589
10590 miistatus = sc->sc_mii.mii_media_status;
10591
10592 /* If the link is not up, do nothing */
10593 if ((miistatus & IFM_ACTIVE) != 0)
10594 return;
10595
10596 active = sc->sc_mii.mii_media_active;
10597
10598 /* Nothing to do if the link is other than 1Gbps */
10599 if (IFM_SUBTYPE(active) != IFM_1000_T)
10600 return;
10601
10602 for (i = 0; i < 10; i++) {
10603 /* read twice */
10604 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10605 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10606 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
10607 goto out; /* GOOD! */
10608
10609 /* Reset the PHY */
10610 wm_gmii_reset(sc);
10611 delay(5*1000);
10612 }
10613
10614 /* Disable GigE link negotiation */
10615 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10616 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10617 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10618
10619 /*
10620 * Call gig speed drop workaround on Gig disable before accessing
10621 * any PHY registers.
10622 */
10623 wm_gig_downshift_workaround_ich8lan(sc);
10624
10625 out:
10626 return;
10627 }
10628
10629 /* WOL from S5 stops working */
10630 static void
10631 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
10632 {
10633 uint16_t kmrn_reg;
10634
10635 /* Only for igp3 */
10636 if (sc->sc_phytype == WMPHY_IGP_3) {
10637 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
10638 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
10639 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10640 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
10641 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10642 }
10643 }
10644
10645 /*
10646 * Workaround for pch's PHYs
10647 * XXX should be moved to new PHY driver?
10648 */
10649 static void
10650 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
10651 {
10652 if (sc->sc_phytype == WMPHY_82577)
10653 wm_set_mdio_slow_mode_hv(sc);
10654
10655 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
10656
10657 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
10658
10659 /* 82578 */
10660 if (sc->sc_phytype == WMPHY_82578) {
10661 /* PCH rev. < 3 */
10662 if (sc->sc_rev < 3) {
10663 /* XXX 6 bit shift? Why? Is it page2? */
10664 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10665 0x66c0);
10666 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
10667 0xffff);
10668 }
10669
10670 /* XXX phy rev. < 2 */
10671 }
10672
10673 /* Select page 0 */
10674
10675 /* XXX acquire semaphore */
10676 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
10677 /* XXX release semaphore */
10678
10679 /*
10680 * Configure the K1 Si workaround during phy reset assuming there is
10681 * link so that it disables K1 if link is in 1Gbps.
10682 */
10683 wm_k1_gig_workaround_hv(sc, 1);
10684 }
10685
10686 static void
10687 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
10688 {
10689
10690 wm_set_mdio_slow_mode_hv(sc);
10691 }
10692
10693 static void
10694 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
10695 {
10696 int k1_enable = sc->sc_nvm_k1_enabled;
10697
10698 /* XXX acquire semaphore */
10699
10700 if (link) {
10701 k1_enable = 0;
10702
10703 /* Link stall fix for link up */
10704 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
10705 } else {
10706 /* Link stall fix for link down */
10707 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
10708 }
10709
10710 wm_configure_k1_ich8lan(sc, k1_enable);
10711
10712 /* XXX release semaphore */
10713 }
10714
10715 static void
10716 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
10717 {
10718 uint32_t reg;
10719
10720 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
10721 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
10722 reg | HV_KMRN_MDIO_SLOW);
10723 }
10724
10725 static void
10726 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
10727 {
10728 uint32_t ctrl, ctrl_ext, tmp;
10729 uint16_t kmrn_reg;
10730
10731 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
10732
10733 if (k1_enable)
10734 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
10735 else
10736 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
10737
10738 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
10739
10740 delay(20);
10741
10742 ctrl = CSR_READ(sc, WMREG_CTRL);
10743 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10744
10745 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
10746 tmp |= CTRL_FRCSPD;
10747
10748 CSR_WRITE(sc, WMREG_CTRL, tmp);
10749 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
10750 CSR_WRITE_FLUSH(sc);
10751 delay(20);
10752
10753 CSR_WRITE(sc, WMREG_CTRL, ctrl);
10754 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10755 CSR_WRITE_FLUSH(sc);
10756 delay(20);
10757 }
10758
10759 /* special case - for 82575 - need to do manual init ... */
10760 static void
10761 wm_reset_init_script_82575(struct wm_softc *sc)
10762 {
10763 /*
10764 * remark: this is untested code - we have no board without EEPROM
10765 * same setup as mentioned int the FreeBSD driver for the i82575
10766 */
10767
10768 /* SerDes configuration via SERDESCTRL */
10769 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
10770 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
10771 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
10772 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
10773
10774 /* CCM configuration via CCMCTL register */
10775 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
10776 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
10777
10778 /* PCIe lanes configuration */
10779 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
10780 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
10781 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
10782 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
10783
10784 /* PCIe PLL Configuration */
10785 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
10786 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
10787 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
10788 }
10789
10790 static void
10791 wm_reset_mdicnfg_82580(struct wm_softc *sc)
10792 {
10793 uint32_t reg;
10794 uint16_t nvmword;
10795 int rv;
10796
10797 if ((sc->sc_flags & WM_F_SGMII) == 0)
10798 return;
10799
10800 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
10801 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
10802 if (rv != 0) {
10803 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
10804 __func__);
10805 return;
10806 }
10807
10808 reg = CSR_READ(sc, WMREG_MDICNFG);
10809 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
10810 reg |= MDICNFG_DEST;
10811 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
10812 reg |= MDICNFG_COM_MDIO;
10813 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10814 }
10815
10816 /*
10817 * I210 Errata 25 and I211 Errata 10
10818 * Slow System Clock.
10819 */
10820 static void
10821 wm_pll_workaround_i210(struct wm_softc *sc)
10822 {
10823 uint32_t mdicnfg, wuc;
10824 uint32_t reg;
10825 pcireg_t pcireg;
10826 uint32_t pmreg;
10827 uint16_t nvmword, tmp_nvmword;
10828 int phyval;
10829 bool wa_done = false;
10830 int i;
10831
10832 /* Save WUC and MDICNFG registers */
10833 wuc = CSR_READ(sc, WMREG_WUC);
10834 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
10835
10836 reg = mdicnfg & ~MDICNFG_DEST;
10837 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10838
10839 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
10840 nvmword = INVM_DEFAULT_AL;
10841 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
10842
10843 /* Get Power Management cap offset */
10844 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10845 &pmreg, NULL) == 0)
10846 return;
10847 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
10848 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
10849 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
10850
10851 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
10852 break; /* OK */
10853 }
10854
10855 wa_done = true;
10856 /* Directly reset the internal PHY */
10857 reg = CSR_READ(sc, WMREG_CTRL);
10858 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
10859
10860 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10861 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
10862 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10863
10864 CSR_WRITE(sc, WMREG_WUC, 0);
10865 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
10866 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10867
10868 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
10869 pmreg + PCI_PMCSR);
10870 pcireg |= PCI_PMCSR_STATE_D3;
10871 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10872 pmreg + PCI_PMCSR, pcireg);
10873 delay(1000);
10874 pcireg &= ~PCI_PMCSR_STATE_D3;
10875 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10876 pmreg + PCI_PMCSR, pcireg);
10877
10878 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
10879 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10880
10881 /* Restore WUC register */
10882 CSR_WRITE(sc, WMREG_WUC, wuc);
10883 }
10884
10885 /* Restore MDICNFG setting */
10886 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
10887 if (wa_done)
10888 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
10889 }
10890