if_wm.c revision 1.336 1 /* $NetBSD: if_wm.c,v 1.336 2015/06/26 06:57:17 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - MSI/MSI-X
78 * - Virtual Function
79 * - Set LED correctly (based on contents in EEPROM)
80 * - Rework how parameters are loaded from the EEPROM.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.336 2015/06/26 06:57:17 msaitoh Exp $");
85
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102
103 #include <sys/rndsource.h>
104
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137
138 #ifdef WM_DEBUG
139 #define WM_DEBUG_LINK 0x01
140 #define WM_DEBUG_TX 0x02
141 #define WM_DEBUG_RX 0x04
142 #define WM_DEBUG_GMII 0x08
143 #define WM_DEBUG_MANAGE 0x10
144 #define WM_DEBUG_NVM 0x20
145 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147
148 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
149 #else
150 #define DPRINTF(x, y) /* nothing */
151 #endif /* WM_DEBUG */
152
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE 1
155 #endif
156
157 #ifdef __HAVE_PCI_MSI_MSIX
158 #if 0 /* off by default */
159 #define WM_MSI_MSIX 1
160 #endif
161 #endif
162
163 /*
164 * This device driver divides interrupt to TX, RX and link state.
165 * Each MSI-X vector indexes are below.
166 */
167 #define WM_NINTR 3
168 #define WM_TX_INTR_INDEX 0
169 #define WM_RX_INTR_INDEX 1
170 #define WM_LINK_INTR_INDEX 2
171 #define WM_MAX_NINTR WM_NINTR
172
173 /*
174 * This device driver set affinity to each interrupts like below (round-robin).
175 * If the number CPUs is less than the number of interrupts, this driver usase
176 * the same CPU for multiple interrupts.
177 */
178 #define WM_TX_INTR_CPUID 0
179 #define WM_RX_INTR_CPUID 1
180 #define WM_LINK_INTR_CPUID 2
181
182 /*
183 * Transmit descriptor list size. Due to errata, we can only have
184 * 256 hardware descriptors in the ring on < 82544, but we use 4096
185 * on >= 82544. We tell the upper layers that they can queue a lot
186 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
187 * of them at a time.
188 *
189 * We allow up to 256 (!) DMA segments per packet. Pathological packet
190 * chains containing many small mbufs have been observed in zero-copy
191 * situations with jumbo frames.
192 */
193 #define WM_NTXSEGS 256
194 #define WM_IFQUEUELEN 256
195 #define WM_TXQUEUELEN_MAX 64
196 #define WM_TXQUEUELEN_MAX_82547 16
197 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
198 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
199 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
200 #define WM_NTXDESC_82542 256
201 #define WM_NTXDESC_82544 4096
202 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
203 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
204 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
205 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
206 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
207
208 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
209
210 /*
211 * Receive descriptor list size. We have one Rx buffer for normal
212 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
213 * packet. We allocate 256 receive descriptors, each with a 2k
214 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
215 */
216 #define WM_NRXDESC 256
217 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
218 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
219 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
220
221 /*
222 * Control structures are DMA'd to the i82542 chip. We allocate them in
223 * a single clump that maps to a single DMA segment to make several things
224 * easier.
225 */
226 struct wm_control_data_82544 {
227 /*
228 * The receive descriptors.
229 */
230 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
231
232 /*
233 * The transmit descriptors. Put these at the end, because
234 * we might use a smaller number of them.
235 */
236 union {
237 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
238 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544];
239 } wdc_u;
240 };
241
242 struct wm_control_data_82542 {
243 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
244 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
245 };
246
247 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
248 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
249 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
250
251 /*
252 * Software state for transmit jobs.
253 */
254 struct wm_txsoft {
255 struct mbuf *txs_mbuf; /* head of our mbuf chain */
256 bus_dmamap_t txs_dmamap; /* our DMA map */
257 int txs_firstdesc; /* first descriptor in packet */
258 int txs_lastdesc; /* last descriptor in packet */
259 int txs_ndesc; /* # of descriptors used */
260 };
261
262 /*
263 * Software state for receive buffers. Each descriptor gets a
264 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
265 * more than one buffer, we chain them together.
266 */
267 struct wm_rxsoft {
268 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
269 bus_dmamap_t rxs_dmamap; /* our DMA map */
270 };
271
272 #define WM_LINKUP_TIMEOUT 50
273
274 static uint16_t swfwphysem[] = {
275 SWFW_PHY0_SM,
276 SWFW_PHY1_SM,
277 SWFW_PHY2_SM,
278 SWFW_PHY3_SM
279 };
280
281 static const uint32_t wm_82580_rxpbs_table[] = {
282 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
283 };
284
285 /*
286 * Software state per device.
287 */
288 struct wm_softc {
289 device_t sc_dev; /* generic device information */
290 bus_space_tag_t sc_st; /* bus space tag */
291 bus_space_handle_t sc_sh; /* bus space handle */
292 bus_size_t sc_ss; /* bus space size */
293 bus_space_tag_t sc_iot; /* I/O space tag */
294 bus_space_handle_t sc_ioh; /* I/O space handle */
295 bus_size_t sc_ios; /* I/O space size */
296 bus_space_tag_t sc_flasht; /* flash registers space tag */
297 bus_space_handle_t sc_flashh; /* flash registers space handle */
298 bus_size_t sc_flashs; /* flash registers space size */
299 bus_dma_tag_t sc_dmat; /* bus DMA tag */
300
301 struct ethercom sc_ethercom; /* ethernet common data */
302 struct mii_data sc_mii; /* MII/media information */
303
304 pci_chipset_tag_t sc_pc;
305 pcitag_t sc_pcitag;
306 int sc_bus_speed; /* PCI/PCIX bus speed */
307 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
308
309 uint16_t sc_pcidevid; /* PCI device ID */
310 wm_chip_type sc_type; /* MAC type */
311 int sc_rev; /* MAC revision */
312 wm_phy_type sc_phytype; /* PHY type */
313 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
314 #define WM_MEDIATYPE_UNKNOWN 0x00
315 #define WM_MEDIATYPE_FIBER 0x01
316 #define WM_MEDIATYPE_COPPER 0x02
317 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
318 int sc_funcid; /* unit number of the chip (0 to 3) */
319 int sc_flags; /* flags; see below */
320 int sc_if_flags; /* last if_flags */
321 int sc_flowflags; /* 802.3x flow control flags */
322 int sc_align_tweak;
323
324 void *sc_ihs[WM_MAX_NINTR]; /*
325 * interrupt cookie.
326 * legacy and msi use sc_ihs[0].
327 */
328 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
329 int sc_nintrs; /* number of interrupts */
330
331 callout_t sc_tick_ch; /* tick callout */
332 bool sc_stopping;
333
334 int sc_nvm_ver_major;
335 int sc_nvm_ver_minor;
336 int sc_nvm_addrbits; /* NVM address bits */
337 unsigned int sc_nvm_wordsize; /* NVM word size */
338 int sc_ich8_flash_base;
339 int sc_ich8_flash_bank_size;
340 int sc_nvm_k1_enabled;
341
342 /* Software state for the transmit and receive descriptors. */
343 int sc_txnum; /* must be a power of two */
344 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
345 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
346
347 /* Control data structures. */
348 int sc_ntxdesc; /* must be a power of two */
349 struct wm_control_data_82544 *sc_control_data;
350 bus_dmamap_t sc_cddmamap; /* control data DMA map */
351 bus_dma_segment_t sc_cd_seg; /* control data segment */
352 int sc_cd_rseg; /* real number of control segment */
353 size_t sc_cd_size; /* control data size */
354 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
355 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs
356 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs
357 #define sc_rxdescs sc_control_data->wcd_rxdescs
358
359 #ifdef WM_EVENT_COUNTERS
360 /* Event counters. */
361 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
362 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
363 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
364 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
365 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
366 struct evcnt sc_ev_rxintr; /* Rx interrupts */
367 struct evcnt sc_ev_linkintr; /* Link interrupts */
368
369 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
370 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
371 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
372 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
373 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
374 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
375 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
376 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
377
378 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
379 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
380
381 struct evcnt sc_ev_tu; /* Tx underrun */
382
383 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
384 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
385 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
386 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
387 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
388 #endif /* WM_EVENT_COUNTERS */
389
390 bus_addr_t sc_tdt_reg; /* offset of TDT register */
391
392 int sc_txfree; /* number of free Tx descriptors */
393 int sc_txnext; /* next ready Tx descriptor */
394
395 int sc_txsfree; /* number of free Tx jobs */
396 int sc_txsnext; /* next free Tx job */
397 int sc_txsdirty; /* dirty Tx jobs */
398
399 /* These 5 variables are used only on the 82547. */
400 int sc_txfifo_size; /* Tx FIFO size */
401 int sc_txfifo_head; /* current head of FIFO */
402 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
403 int sc_txfifo_stall; /* Tx FIFO is stalled */
404 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
405
406 bus_addr_t sc_rdt_reg; /* offset of RDT register */
407
408 int sc_rxptr; /* next ready Rx descriptor/queue ent */
409 int sc_rxdiscard;
410 int sc_rxlen;
411 struct mbuf *sc_rxhead;
412 struct mbuf *sc_rxtail;
413 struct mbuf **sc_rxtailp;
414
415 uint32_t sc_ctrl; /* prototype CTRL register */
416 #if 0
417 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
418 #endif
419 uint32_t sc_icr; /* prototype interrupt bits */
420 uint32_t sc_itr; /* prototype intr throttling reg */
421 uint32_t sc_tctl; /* prototype TCTL register */
422 uint32_t sc_rctl; /* prototype RCTL register */
423 uint32_t sc_txcw; /* prototype TXCW register */
424 uint32_t sc_tipg; /* prototype TIPG register */
425 uint32_t sc_fcrtl; /* prototype FCRTL register */
426 uint32_t sc_pba; /* prototype PBA register */
427
428 int sc_tbi_linkup; /* TBI link status */
429 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
430 int sc_tbi_serdes_ticks; /* tbi ticks */
431
432 int sc_mchash_type; /* multicast filter offset */
433
434 krndsource_t rnd_source; /* random source */
435
436 kmutex_t *sc_tx_lock; /* lock for tx operations */
437 kmutex_t *sc_rx_lock; /* lock for rx operations */
438 };
439
440 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
441 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
442 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
443 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
444 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
445 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
446 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
447 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
448 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
449
450 #ifdef WM_MPSAFE
451 #define CALLOUT_FLAGS CALLOUT_MPSAFE
452 #else
453 #define CALLOUT_FLAGS 0
454 #endif
455
456 #define WM_RXCHAIN_RESET(sc) \
457 do { \
458 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
459 *(sc)->sc_rxtailp = NULL; \
460 (sc)->sc_rxlen = 0; \
461 } while (/*CONSTCOND*/0)
462
463 #define WM_RXCHAIN_LINK(sc, m) \
464 do { \
465 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
466 (sc)->sc_rxtailp = &(m)->m_next; \
467 } while (/*CONSTCOND*/0)
468
469 #ifdef WM_EVENT_COUNTERS
470 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
471 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
472 #else
473 #define WM_EVCNT_INCR(ev) /* nothing */
474 #define WM_EVCNT_ADD(ev, val) /* nothing */
475 #endif
476
477 #define CSR_READ(sc, reg) \
478 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
479 #define CSR_WRITE(sc, reg, val) \
480 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
481 #define CSR_WRITE_FLUSH(sc) \
482 (void) CSR_READ((sc), WMREG_STATUS)
483
484 #define ICH8_FLASH_READ32(sc, reg) \
485 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
486 #define ICH8_FLASH_WRITE32(sc, reg, data) \
487 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
488
489 #define ICH8_FLASH_READ16(sc, reg) \
490 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
491 #define ICH8_FLASH_WRITE16(sc, reg, data) \
492 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
493
494 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
495 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
496
497 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
498 #define WM_CDTXADDR_HI(sc, x) \
499 (sizeof(bus_addr_t) == 8 ? \
500 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
501
502 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
503 #define WM_CDRXADDR_HI(sc, x) \
504 (sizeof(bus_addr_t) == 8 ? \
505 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
506
507 #define WM_CDTXSYNC(sc, x, n, ops) \
508 do { \
509 int __x, __n; \
510 \
511 __x = (x); \
512 __n = (n); \
513 \
514 /* If it will wrap around, sync to the end of the ring. */ \
515 if ((__x + __n) > WM_NTXDESC(sc)) { \
516 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
517 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
518 (WM_NTXDESC(sc) - __x), (ops)); \
519 __n -= (WM_NTXDESC(sc) - __x); \
520 __x = 0; \
521 } \
522 \
523 /* Now sync whatever is left. */ \
524 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
525 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
526 } while (/*CONSTCOND*/0)
527
528 #define WM_CDRXSYNC(sc, x, ops) \
529 do { \
530 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
531 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
532 } while (/*CONSTCOND*/0)
533
534 #define WM_INIT_RXDESC(sc, x) \
535 do { \
536 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
537 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
538 struct mbuf *__m = __rxs->rxs_mbuf; \
539 \
540 /* \
541 * Note: We scoot the packet forward 2 bytes in the buffer \
542 * so that the payload after the Ethernet header is aligned \
543 * to a 4-byte boundary. \
544 * \
545 * XXX BRAINDAMAGE ALERT! \
546 * The stupid chip uses the same size for every buffer, which \
547 * is set in the Receive Control register. We are using the 2K \
548 * size option, but what we REALLY want is (2K - 2)! For this \
549 * reason, we can't "scoot" packets longer than the standard \
550 * Ethernet MTU. On strict-alignment platforms, if the total \
551 * size exceeds (2K - 2) we set align_tweak to 0 and let \
552 * the upper layer copy the headers. \
553 */ \
554 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
555 \
556 wm_set_dma_addr(&__rxd->wrx_addr, \
557 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
558 __rxd->wrx_len = 0; \
559 __rxd->wrx_cksum = 0; \
560 __rxd->wrx_status = 0; \
561 __rxd->wrx_errors = 0; \
562 __rxd->wrx_special = 0; \
563 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
564 \
565 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
566 } while (/*CONSTCOND*/0)
567
568 /*
569 * Register read/write functions.
570 * Other than CSR_{READ|WRITE}().
571 */
572 #if 0
573 static inline uint32_t wm_io_read(struct wm_softc *, int);
574 #endif
575 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
576 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
577 uint32_t, uint32_t);
578 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
579
580 /*
581 * Device driver interface functions and commonly used functions.
582 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
583 */
584 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
585 static int wm_match(device_t, cfdata_t, void *);
586 static void wm_attach(device_t, device_t, void *);
587 static int wm_detach(device_t, int);
588 static bool wm_suspend(device_t, const pmf_qual_t *);
589 static bool wm_resume(device_t, const pmf_qual_t *);
590 static void wm_watchdog(struct ifnet *);
591 static void wm_tick(void *);
592 static int wm_ifflags_cb(struct ethercom *);
593 static int wm_ioctl(struct ifnet *, u_long, void *);
594 /* MAC address related */
595 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
596 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
597 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
598 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
599 static void wm_set_filter(struct wm_softc *);
600 /* Reset and init related */
601 static void wm_set_vlan(struct wm_softc *);
602 static void wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void wm_get_auto_rd_done(struct wm_softc *);
604 static void wm_lan_init_done(struct wm_softc *);
605 static void wm_get_cfg_done(struct wm_softc *);
606 static void wm_initialize_hardware_bits(struct wm_softc *);
607 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
608 static void wm_reset(struct wm_softc *);
609 static int wm_add_rxbuf(struct wm_softc *, int);
610 static void wm_rxdrain(struct wm_softc *);
611 static int wm_init(struct ifnet *);
612 static int wm_init_locked(struct ifnet *);
613 static void wm_stop(struct ifnet *, int);
614 static void wm_stop_locked(struct ifnet *, int);
615 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
616 uint32_t *, uint8_t *);
617 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
618 static void wm_82547_txfifo_stall(void *);
619 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
620 /* Start */
621 static void wm_start(struct ifnet *);
622 static void wm_start_locked(struct ifnet *);
623 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
624 uint32_t *, uint32_t *, bool *);
625 static void wm_nq_start(struct ifnet *);
626 static void wm_nq_start_locked(struct ifnet *);
627 /* Interrupt */
628 static int wm_txeof(struct wm_softc *);
629 static void wm_rxeof(struct wm_softc *);
630 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
631 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
632 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
633 static void wm_linkintr(struct wm_softc *, uint32_t);
634 static int wm_intr_legacy(void *);
635 #ifdef WM_MSI_MSIX
636 static int wm_txintr_msix(void *);
637 static int wm_rxintr_msix(void *);
638 static int wm_linkintr_msix(void *);
639 #endif
640
641 /*
642 * Media related.
643 * GMII, SGMII, TBI, SERDES and SFP.
644 */
645 /* Common */
646 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
647 /* GMII related */
648 static void wm_gmii_reset(struct wm_softc *);
649 static int wm_get_phy_id_82575(struct wm_softc *);
650 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
651 static int wm_gmii_mediachange(struct ifnet *);
652 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
653 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
654 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
655 static int wm_gmii_i82543_readreg(device_t, int, int);
656 static void wm_gmii_i82543_writereg(device_t, int, int, int);
657 static int wm_gmii_i82544_readreg(device_t, int, int);
658 static void wm_gmii_i82544_writereg(device_t, int, int, int);
659 static int wm_gmii_i80003_readreg(device_t, int, int);
660 static void wm_gmii_i80003_writereg(device_t, int, int, int);
661 static int wm_gmii_bm_readreg(device_t, int, int);
662 static void wm_gmii_bm_writereg(device_t, int, int, int);
663 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
664 static int wm_gmii_hv_readreg(device_t, int, int);
665 static void wm_gmii_hv_writereg(device_t, int, int, int);
666 static int wm_gmii_82580_readreg(device_t, int, int);
667 static void wm_gmii_82580_writereg(device_t, int, int, int);
668 static int wm_gmii_gs40g_readreg(device_t, int, int);
669 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
670 static void wm_gmii_statchg(struct ifnet *);
671 static int wm_kmrn_readreg(struct wm_softc *, int);
672 static void wm_kmrn_writereg(struct wm_softc *, int, int);
673 /* SGMII */
674 static bool wm_sgmii_uses_mdio(struct wm_softc *);
675 static int wm_sgmii_readreg(device_t, int, int);
676 static void wm_sgmii_writereg(device_t, int, int, int);
677 /* TBI related */
678 static void wm_tbi_mediainit(struct wm_softc *);
679 static int wm_tbi_mediachange(struct ifnet *);
680 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
681 static int wm_check_for_link(struct wm_softc *);
682 static void wm_tbi_tick(struct wm_softc *);
683 /* SERDES related */
684 static void wm_serdes_power_up_link_82575(struct wm_softc *);
685 static int wm_serdes_mediachange(struct ifnet *);
686 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
687 static void wm_serdes_tick(struct wm_softc *);
688 /* SFP related */
689 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
690 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
691
692 /*
693 * NVM related.
694 * Microwire, SPI (w/wo EERD) and Flash.
695 */
696 /* Misc functions */
697 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
698 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
699 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
700 /* Microwire */
701 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
702 /* SPI */
703 static int wm_nvm_ready_spi(struct wm_softc *);
704 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
705 /* Using with EERD */
706 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
707 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
708 /* Flash */
709 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
710 unsigned int *);
711 static int32_t wm_ich8_cycle_init(struct wm_softc *);
712 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
713 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
714 uint16_t *);
715 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
716 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
717 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
718 /* iNVM */
719 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
720 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
721 /* Lock, detecting NVM type, validate checksum and read */
722 static int wm_nvm_acquire(struct wm_softc *);
723 static void wm_nvm_release(struct wm_softc *);
724 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
725 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
726 static int wm_nvm_validate_checksum(struct wm_softc *);
727 static void wm_nvm_version(struct wm_softc *);
728 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
729
730 /*
731 * Hardware semaphores.
732 * Very complexed...
733 */
734 static int wm_get_swsm_semaphore(struct wm_softc *);
735 static void wm_put_swsm_semaphore(struct wm_softc *);
736 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
737 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
738 static int wm_get_swfwhw_semaphore(struct wm_softc *);
739 static void wm_put_swfwhw_semaphore(struct wm_softc *);
740 static int wm_get_hw_semaphore_82573(struct wm_softc *);
741 static void wm_put_hw_semaphore_82573(struct wm_softc *);
742
743 /*
744 * Management mode and power management related subroutines.
745 * BMC, AMT, suspend/resume and EEE.
746 */
747 static int wm_check_mng_mode(struct wm_softc *);
748 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
749 static int wm_check_mng_mode_82574(struct wm_softc *);
750 static int wm_check_mng_mode_generic(struct wm_softc *);
751 static int wm_enable_mng_pass_thru(struct wm_softc *);
752 static int wm_check_reset_block(struct wm_softc *);
753 static void wm_get_hw_control(struct wm_softc *);
754 static void wm_release_hw_control(struct wm_softc *);
755 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
756 static void wm_smbustopci(struct wm_softc *);
757 static void wm_init_manageability(struct wm_softc *);
758 static void wm_release_manageability(struct wm_softc *);
759 static void wm_get_wakeup(struct wm_softc *);
760 #ifdef WM_WOL
761 static void wm_enable_phy_wakeup(struct wm_softc *);
762 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
763 static void wm_enable_wakeup(struct wm_softc *);
764 #endif
765 /* EEE */
766 static void wm_set_eee_i350(struct wm_softc *);
767
768 /*
769 * Workarounds (mainly PHY related).
770 * Basically, PHY's workarounds are in the PHY drivers.
771 */
772 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
773 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
774 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
775 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
776 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
777 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
778 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
779 static void wm_reset_init_script_82575(struct wm_softc *);
780 static void wm_reset_mdicnfg_82580(struct wm_softc *);
781 static void wm_pll_workaround_i210(struct wm_softc *);
782
783 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
784 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
785
786 /*
787 * Devices supported by this driver.
788 */
789 static const struct wm_product {
790 pci_vendor_id_t wmp_vendor;
791 pci_product_id_t wmp_product;
792 const char *wmp_name;
793 wm_chip_type wmp_type;
794 uint32_t wmp_flags;
795 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
796 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
797 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
798 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
799 #define WMP_MEDIATYPE(x) ((x) & 0x03)
800 } wm_products[] = {
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
802 "Intel i82542 1000BASE-X Ethernet",
803 WM_T_82542_2_1, WMP_F_FIBER },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
806 "Intel i82543GC 1000BASE-X Ethernet",
807 WM_T_82543, WMP_F_FIBER },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
810 "Intel i82543GC 1000BASE-T Ethernet",
811 WM_T_82543, WMP_F_COPPER },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
814 "Intel i82544EI 1000BASE-T Ethernet",
815 WM_T_82544, WMP_F_COPPER },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
818 "Intel i82544EI 1000BASE-X Ethernet",
819 WM_T_82544, WMP_F_FIBER },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
822 "Intel i82544GC 1000BASE-T Ethernet",
823 WM_T_82544, WMP_F_COPPER },
824
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
826 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
827 WM_T_82544, WMP_F_COPPER },
828
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
830 "Intel i82540EM 1000BASE-T Ethernet",
831 WM_T_82540, WMP_F_COPPER },
832
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
834 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
835 WM_T_82540, WMP_F_COPPER },
836
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
838 "Intel i82540EP 1000BASE-T Ethernet",
839 WM_T_82540, WMP_F_COPPER },
840
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
842 "Intel i82540EP 1000BASE-T Ethernet",
843 WM_T_82540, WMP_F_COPPER },
844
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
846 "Intel i82540EP 1000BASE-T Ethernet",
847 WM_T_82540, WMP_F_COPPER },
848
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
850 "Intel i82545EM 1000BASE-T Ethernet",
851 WM_T_82545, WMP_F_COPPER },
852
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
854 "Intel i82545GM 1000BASE-T Ethernet",
855 WM_T_82545_3, WMP_F_COPPER },
856
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
858 "Intel i82545GM 1000BASE-X Ethernet",
859 WM_T_82545_3, WMP_F_FIBER },
860
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
862 "Intel i82545GM Gigabit Ethernet (SERDES)",
863 WM_T_82545_3, WMP_F_SERDES },
864
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
866 "Intel i82546EB 1000BASE-T Ethernet",
867 WM_T_82546, WMP_F_COPPER },
868
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
870 "Intel i82546EB 1000BASE-T Ethernet",
871 WM_T_82546, WMP_F_COPPER },
872
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
874 "Intel i82545EM 1000BASE-X Ethernet",
875 WM_T_82545, WMP_F_FIBER },
876
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
878 "Intel i82546EB 1000BASE-X Ethernet",
879 WM_T_82546, WMP_F_FIBER },
880
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
882 "Intel i82546GB 1000BASE-T Ethernet",
883 WM_T_82546_3, WMP_F_COPPER },
884
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
886 "Intel i82546GB 1000BASE-X Ethernet",
887 WM_T_82546_3, WMP_F_FIBER },
888
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
890 "Intel i82546GB Gigabit Ethernet (SERDES)",
891 WM_T_82546_3, WMP_F_SERDES },
892
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
894 "i82546GB quad-port Gigabit Ethernet",
895 WM_T_82546_3, WMP_F_COPPER },
896
897 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
898 "i82546GB quad-port Gigabit Ethernet (KSP3)",
899 WM_T_82546_3, WMP_F_COPPER },
900
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
902 "Intel PRO/1000MT (82546GB)",
903 WM_T_82546_3, WMP_F_COPPER },
904
905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
906 "Intel i82541EI 1000BASE-T Ethernet",
907 WM_T_82541, WMP_F_COPPER },
908
909 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
910 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
911 WM_T_82541, WMP_F_COPPER },
912
913 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
914 "Intel i82541EI Mobile 1000BASE-T Ethernet",
915 WM_T_82541, WMP_F_COPPER },
916
917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
918 "Intel i82541ER 1000BASE-T Ethernet",
919 WM_T_82541_2, WMP_F_COPPER },
920
921 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
922 "Intel i82541GI 1000BASE-T Ethernet",
923 WM_T_82541_2, WMP_F_COPPER },
924
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
926 "Intel i82541GI Mobile 1000BASE-T Ethernet",
927 WM_T_82541_2, WMP_F_COPPER },
928
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
930 "Intel i82541PI 1000BASE-T Ethernet",
931 WM_T_82541_2, WMP_F_COPPER },
932
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
934 "Intel i82547EI 1000BASE-T Ethernet",
935 WM_T_82547, WMP_F_COPPER },
936
937 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
938 "Intel i82547EI Mobile 1000BASE-T Ethernet",
939 WM_T_82547, WMP_F_COPPER },
940
941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
942 "Intel i82547GI 1000BASE-T Ethernet",
943 WM_T_82547_2, WMP_F_COPPER },
944
945 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
946 "Intel PRO/1000 PT (82571EB)",
947 WM_T_82571, WMP_F_COPPER },
948
949 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
950 "Intel PRO/1000 PF (82571EB)",
951 WM_T_82571, WMP_F_FIBER },
952
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
954 "Intel PRO/1000 PB (82571EB)",
955 WM_T_82571, WMP_F_SERDES },
956
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
958 "Intel PRO/1000 QT (82571EB)",
959 WM_T_82571, WMP_F_COPPER },
960
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
962 "Intel PRO/1000 PT Quad Port Server Adapter",
963 WM_T_82571, WMP_F_COPPER, },
964
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
966 "Intel Gigabit PT Quad Port Server ExpressModule",
967 WM_T_82571, WMP_F_COPPER, },
968
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
970 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
971 WM_T_82571, WMP_F_SERDES, },
972
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
974 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
975 WM_T_82571, WMP_F_SERDES, },
976
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
978 "Intel 82571EB Quad 1000baseX Ethernet",
979 WM_T_82571, WMP_F_FIBER, },
980
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
982 "Intel i82572EI 1000baseT Ethernet",
983 WM_T_82572, WMP_F_COPPER },
984
985 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
986 "Intel i82572EI 1000baseX Ethernet",
987 WM_T_82572, WMP_F_FIBER },
988
989 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
990 "Intel i82572EI Gigabit Ethernet (SERDES)",
991 WM_T_82572, WMP_F_SERDES },
992
993 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
994 "Intel i82572EI 1000baseT Ethernet",
995 WM_T_82572, WMP_F_COPPER },
996
997 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
998 "Intel i82573E",
999 WM_T_82573, WMP_F_COPPER },
1000
1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1002 "Intel i82573E IAMT",
1003 WM_T_82573, WMP_F_COPPER },
1004
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1006 "Intel i82573L Gigabit Ethernet",
1007 WM_T_82573, WMP_F_COPPER },
1008
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1010 "Intel i82574L",
1011 WM_T_82574, WMP_F_COPPER },
1012
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1014 "Intel i82574L",
1015 WM_T_82574, WMP_F_COPPER },
1016
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1018 "Intel i82583V",
1019 WM_T_82583, WMP_F_COPPER },
1020
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1022 "i80003 dual 1000baseT Ethernet",
1023 WM_T_80003, WMP_F_COPPER },
1024
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1026 "i80003 dual 1000baseX Ethernet",
1027 WM_T_80003, WMP_F_COPPER },
1028
1029 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1030 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1031 WM_T_80003, WMP_F_SERDES },
1032
1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1034 "Intel i80003 1000baseT Ethernet",
1035 WM_T_80003, WMP_F_COPPER },
1036
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1038 "Intel i80003 Gigabit Ethernet (SERDES)",
1039 WM_T_80003, WMP_F_SERDES },
1040
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1042 "Intel i82801H (M_AMT) LAN Controller",
1043 WM_T_ICH8, WMP_F_COPPER },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1045 "Intel i82801H (AMT) LAN Controller",
1046 WM_T_ICH8, WMP_F_COPPER },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1048 "Intel i82801H LAN Controller",
1049 WM_T_ICH8, WMP_F_COPPER },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1051 "Intel i82801H (IFE) LAN Controller",
1052 WM_T_ICH8, WMP_F_COPPER },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1054 "Intel i82801H (M) LAN Controller",
1055 WM_T_ICH8, WMP_F_COPPER },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1057 "Intel i82801H IFE (GT) LAN Controller",
1058 WM_T_ICH8, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1060 "Intel i82801H IFE (G) LAN Controller",
1061 WM_T_ICH8, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1063 "82801I (AMT) LAN Controller",
1064 WM_T_ICH9, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1066 "82801I LAN Controller",
1067 WM_T_ICH9, WMP_F_COPPER },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1069 "82801I (G) LAN Controller",
1070 WM_T_ICH9, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1072 "82801I (GT) LAN Controller",
1073 WM_T_ICH9, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1075 "82801I (C) LAN Controller",
1076 WM_T_ICH9, WMP_F_COPPER },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1078 "82801I mobile LAN Controller",
1079 WM_T_ICH9, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1081 "82801I mobile (V) LAN Controller",
1082 WM_T_ICH9, WMP_F_COPPER },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1084 "82801I mobile (AMT) LAN Controller",
1085 WM_T_ICH9, WMP_F_COPPER },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1087 "82567LM-4 LAN Controller",
1088 WM_T_ICH9, WMP_F_COPPER },
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1090 "82567V-3 LAN Controller",
1091 WM_T_ICH9, WMP_F_COPPER },
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1093 "82567LM-2 LAN Controller",
1094 WM_T_ICH10, WMP_F_COPPER },
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1096 "82567LF-2 LAN Controller",
1097 WM_T_ICH10, WMP_F_COPPER },
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1099 "82567LM-3 LAN Controller",
1100 WM_T_ICH10, WMP_F_COPPER },
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1102 "82567LF-3 LAN Controller",
1103 WM_T_ICH10, WMP_F_COPPER },
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1105 "82567V-2 LAN Controller",
1106 WM_T_ICH10, WMP_F_COPPER },
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1108 "82567V-3? LAN Controller",
1109 WM_T_ICH10, WMP_F_COPPER },
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1111 "HANKSVILLE LAN Controller",
1112 WM_T_ICH10, WMP_F_COPPER },
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1114 "PCH LAN (82577LM) Controller",
1115 WM_T_PCH, WMP_F_COPPER },
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1117 "PCH LAN (82577LC) Controller",
1118 WM_T_PCH, WMP_F_COPPER },
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1120 "PCH LAN (82578DM) Controller",
1121 WM_T_PCH, WMP_F_COPPER },
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1123 "PCH LAN (82578DC) Controller",
1124 WM_T_PCH, WMP_F_COPPER },
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1126 "PCH2 LAN (82579LM) Controller",
1127 WM_T_PCH2, WMP_F_COPPER },
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1129 "PCH2 LAN (82579V) Controller",
1130 WM_T_PCH2, WMP_F_COPPER },
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1132 "82575EB dual-1000baseT Ethernet",
1133 WM_T_82575, WMP_F_COPPER },
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1135 "82575EB dual-1000baseX Ethernet (SERDES)",
1136 WM_T_82575, WMP_F_SERDES },
1137 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1138 "82575GB quad-1000baseT Ethernet",
1139 WM_T_82575, WMP_F_COPPER },
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1141 "82575GB quad-1000baseT Ethernet (PM)",
1142 WM_T_82575, WMP_F_COPPER },
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1144 "82576 1000BaseT Ethernet",
1145 WM_T_82576, WMP_F_COPPER },
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1147 "82576 1000BaseX Ethernet",
1148 WM_T_82576, WMP_F_FIBER },
1149
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1151 "82576 gigabit Ethernet (SERDES)",
1152 WM_T_82576, WMP_F_SERDES },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1155 "82576 quad-1000BaseT Ethernet",
1156 WM_T_82576, WMP_F_COPPER },
1157
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1159 "82576 Gigabit ET2 Quad Port Server Adapter",
1160 WM_T_82576, WMP_F_COPPER },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1163 "82576 gigabit Ethernet",
1164 WM_T_82576, WMP_F_COPPER },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1167 "82576 gigabit Ethernet (SERDES)",
1168 WM_T_82576, WMP_F_SERDES },
1169 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1170 "82576 quad-gigabit Ethernet (SERDES)",
1171 WM_T_82576, WMP_F_SERDES },
1172
1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1174 "82580 1000BaseT Ethernet",
1175 WM_T_82580, WMP_F_COPPER },
1176 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1177 "82580 1000BaseX Ethernet",
1178 WM_T_82580, WMP_F_FIBER },
1179
1180 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1181 "82580 1000BaseT Ethernet (SERDES)",
1182 WM_T_82580, WMP_F_SERDES },
1183
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1185 "82580 gigabit Ethernet (SGMII)",
1186 WM_T_82580, WMP_F_COPPER },
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1188 "82580 dual-1000BaseT Ethernet",
1189 WM_T_82580, WMP_F_COPPER },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1192 "82580 quad-1000BaseX Ethernet",
1193 WM_T_82580, WMP_F_FIBER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1196 "DH89XXCC Gigabit Ethernet (SGMII)",
1197 WM_T_82580, WMP_F_COPPER },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1200 "DH89XXCC Gigabit Ethernet (SERDES)",
1201 WM_T_82580, WMP_F_SERDES },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1204 "DH89XXCC 1000BASE-KX Ethernet",
1205 WM_T_82580, WMP_F_SERDES },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1208 "DH89XXCC Gigabit Ethernet (SFP)",
1209 WM_T_82580, WMP_F_SERDES },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1212 "I350 Gigabit Network Connection",
1213 WM_T_I350, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1216 "I350 Gigabit Fiber Network Connection",
1217 WM_T_I350, WMP_F_FIBER },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1220 "I350 Gigabit Backplane Connection",
1221 WM_T_I350, WMP_F_SERDES },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1224 "I350 Quad Port Gigabit Ethernet",
1225 WM_T_I350, WMP_F_SERDES },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1228 "I350 Gigabit Connection",
1229 WM_T_I350, WMP_F_COPPER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1232 "I354 Gigabit Ethernet (KX)",
1233 WM_T_I354, WMP_F_SERDES },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1236 "I354 Gigabit Ethernet (SGMII)",
1237 WM_T_I354, WMP_F_COPPER },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1240 "I354 Gigabit Ethernet (2.5G)",
1241 WM_T_I354, WMP_F_COPPER },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1244 "I210-T1 Ethernet Server Adapter",
1245 WM_T_I210, WMP_F_COPPER },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1248 "I210 Ethernet (Copper OEM)",
1249 WM_T_I210, WMP_F_COPPER },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1252 "I210 Ethernet (Copper IT)",
1253 WM_T_I210, WMP_F_COPPER },
1254
1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1256 "I210 Ethernet (FLASH less)",
1257 WM_T_I210, WMP_F_COPPER },
1258
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1260 "I210 Gigabit Ethernet (Fiber)",
1261 WM_T_I210, WMP_F_FIBER },
1262
1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1264 "I210 Gigabit Ethernet (SERDES)",
1265 WM_T_I210, WMP_F_SERDES },
1266
1267 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1268 "I210 Gigabit Ethernet (FLASH less)",
1269 WM_T_I210, WMP_F_SERDES },
1270
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1272 "I210 Gigabit Ethernet (SGMII)",
1273 WM_T_I210, WMP_F_COPPER },
1274
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1276 "I211 Ethernet (COPPER)",
1277 WM_T_I211, WMP_F_COPPER },
1278 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1279 "I217 V Ethernet Connection",
1280 WM_T_PCH_LPT, WMP_F_COPPER },
1281 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1282 "I217 LM Ethernet Connection",
1283 WM_T_PCH_LPT, WMP_F_COPPER },
1284 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1285 "I218 V Ethernet Connection",
1286 WM_T_PCH_LPT, WMP_F_COPPER },
1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1288 "I218 V Ethernet Connection",
1289 WM_T_PCH_LPT, WMP_F_COPPER },
1290 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1291 "I218 V Ethernet Connection",
1292 WM_T_PCH_LPT, WMP_F_COPPER },
1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1294 "I218 LM Ethernet Connection",
1295 WM_T_PCH_LPT, WMP_F_COPPER },
1296 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1297 "I218 LM Ethernet Connection",
1298 WM_T_PCH_LPT, WMP_F_COPPER },
1299 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1300 "I218 LM Ethernet Connection",
1301 WM_T_PCH_LPT, WMP_F_COPPER },
1302 { 0, 0,
1303 NULL,
1304 0, 0 },
1305 };
1306
1307 #ifdef WM_EVENT_COUNTERS
1308 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1309 #endif /* WM_EVENT_COUNTERS */
1310
1311
1312 /*
1313 * Register read/write functions.
1314 * Other than CSR_{READ|WRITE}().
1315 */
1316
1317 #if 0 /* Not currently used */
1318 static inline uint32_t
1319 wm_io_read(struct wm_softc *sc, int reg)
1320 {
1321
1322 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1323 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1324 }
1325 #endif
1326
1327 static inline void
1328 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1329 {
1330
1331 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1332 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1333 }
1334
1335 static inline void
1336 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1337 uint32_t data)
1338 {
1339 uint32_t regval;
1340 int i;
1341
1342 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1343
1344 CSR_WRITE(sc, reg, regval);
1345
1346 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1347 delay(5);
1348 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1349 break;
1350 }
1351 if (i == SCTL_CTL_POLL_TIMEOUT) {
1352 aprint_error("%s: WARNING:"
1353 " i82575 reg 0x%08x setup did not indicate ready\n",
1354 device_xname(sc->sc_dev), reg);
1355 }
1356 }
1357
1358 static inline void
1359 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1360 {
1361 wa->wa_low = htole32(v & 0xffffffffU);
1362 if (sizeof(bus_addr_t) == 8)
1363 wa->wa_high = htole32((uint64_t) v >> 32);
1364 else
1365 wa->wa_high = 0;
1366 }
1367
1368 /*
1369 * Device driver interface functions and commonly used functions.
1370 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1371 */
1372
1373 /* Lookup supported device table */
1374 static const struct wm_product *
1375 wm_lookup(const struct pci_attach_args *pa)
1376 {
1377 const struct wm_product *wmp;
1378
1379 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1380 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1381 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1382 return wmp;
1383 }
1384 return NULL;
1385 }
1386
1387 /* The match function (ca_match) */
1388 static int
1389 wm_match(device_t parent, cfdata_t cf, void *aux)
1390 {
1391 struct pci_attach_args *pa = aux;
1392
1393 if (wm_lookup(pa) != NULL)
1394 return 1;
1395
1396 return 0;
1397 }
1398
1399 /* The attach function (ca_attach) */
1400 static void
1401 wm_attach(device_t parent, device_t self, void *aux)
1402 {
1403 struct wm_softc *sc = device_private(self);
1404 struct pci_attach_args *pa = aux;
1405 prop_dictionary_t dict;
1406 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1407 pci_chipset_tag_t pc = pa->pa_pc;
1408 #ifndef WM_MSI_MSIX
1409 pci_intr_handle_t ih;
1410 #else
1411 bool intr_established = false;
1412 #endif
1413 const char *intrstr = NULL;
1414 const char *eetype, *xname;
1415 bus_space_tag_t memt;
1416 bus_space_handle_t memh;
1417 bus_size_t memsize;
1418 int memh_valid;
1419 int i, error;
1420 const struct wm_product *wmp;
1421 prop_data_t ea;
1422 prop_number_t pn;
1423 uint8_t enaddr[ETHER_ADDR_LEN];
1424 uint16_t cfg1, cfg2, swdpin, nvmword;
1425 pcireg_t preg, memtype;
1426 uint16_t eeprom_data, apme_mask;
1427 bool force_clear_smbi;
1428 uint32_t link_mode;
1429 uint32_t reg;
1430 char intrbuf[PCI_INTRSTR_LEN];
1431
1432 sc->sc_dev = self;
1433 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1434 sc->sc_stopping = false;
1435
1436 wmp = wm_lookup(pa);
1437 #ifdef DIAGNOSTIC
1438 if (wmp == NULL) {
1439 printf("\n");
1440 panic("wm_attach: impossible");
1441 }
1442 #endif
1443 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1444
1445 sc->sc_pc = pa->pa_pc;
1446 sc->sc_pcitag = pa->pa_tag;
1447
1448 if (pci_dma64_available(pa))
1449 sc->sc_dmat = pa->pa_dmat64;
1450 else
1451 sc->sc_dmat = pa->pa_dmat;
1452
1453 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1454 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1455 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1456
1457 sc->sc_type = wmp->wmp_type;
1458 if (sc->sc_type < WM_T_82543) {
1459 if (sc->sc_rev < 2) {
1460 aprint_error_dev(sc->sc_dev,
1461 "i82542 must be at least rev. 2\n");
1462 return;
1463 }
1464 if (sc->sc_rev < 3)
1465 sc->sc_type = WM_T_82542_2_0;
1466 }
1467
1468 /*
1469 * Disable MSI for Errata:
1470 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1471 *
1472 * 82544: Errata 25
1473 * 82540: Errata 6 (easy to reproduce device timeout)
1474 * 82545: Errata 4 (easy to reproduce device timeout)
1475 * 82546: Errata 26 (easy to reproduce device timeout)
1476 * 82541: Errata 7 (easy to reproduce device timeout)
1477 */
1478 if (sc->sc_type <= WM_T_82541_2)
1479 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1480
1481 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1482 || (sc->sc_type == WM_T_82580)
1483 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1484 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1485 sc->sc_flags |= WM_F_NEWQUEUE;
1486
1487 /* Set device properties (mactype) */
1488 dict = device_properties(sc->sc_dev);
1489 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1490
1491 /*
1492 * Map the device. All devices support memory-mapped acccess,
1493 * and it is really required for normal operation.
1494 */
1495 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1496 switch (memtype) {
1497 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1498 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1499 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1500 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1501 break;
1502 default:
1503 memh_valid = 0;
1504 break;
1505 }
1506
1507 if (memh_valid) {
1508 sc->sc_st = memt;
1509 sc->sc_sh = memh;
1510 sc->sc_ss = memsize;
1511 } else {
1512 aprint_error_dev(sc->sc_dev,
1513 "unable to map device registers\n");
1514 return;
1515 }
1516
1517 /*
1518 * In addition, i82544 and later support I/O mapped indirect
1519 * register access. It is not desirable (nor supported in
1520 * this driver) to use it for normal operation, though it is
1521 * required to work around bugs in some chip versions.
1522 */
1523 if (sc->sc_type >= WM_T_82544) {
1524 /* First we have to find the I/O BAR. */
1525 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1526 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1527 if (memtype == PCI_MAPREG_TYPE_IO)
1528 break;
1529 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1530 PCI_MAPREG_MEM_TYPE_64BIT)
1531 i += 4; /* skip high bits, too */
1532 }
1533 if (i < PCI_MAPREG_END) {
1534 /*
1535 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1536 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1537 * It's no problem because newer chips has no this
1538 * bug.
1539 *
1540 * The i8254x doesn't apparently respond when the
1541 * I/O BAR is 0, which looks somewhat like it's not
1542 * been configured.
1543 */
1544 preg = pci_conf_read(pc, pa->pa_tag, i);
1545 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1546 aprint_error_dev(sc->sc_dev,
1547 "WARNING: I/O BAR at zero.\n");
1548 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1549 0, &sc->sc_iot, &sc->sc_ioh,
1550 NULL, &sc->sc_ios) == 0) {
1551 sc->sc_flags |= WM_F_IOH_VALID;
1552 } else {
1553 aprint_error_dev(sc->sc_dev,
1554 "WARNING: unable to map I/O space\n");
1555 }
1556 }
1557
1558 }
1559
1560 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1561 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1562 preg |= PCI_COMMAND_MASTER_ENABLE;
1563 if (sc->sc_type < WM_T_82542_2_1)
1564 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1565 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1566
1567 /* power up chip */
1568 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1569 NULL)) && error != EOPNOTSUPP) {
1570 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1571 return;
1572 }
1573
1574 #ifndef WM_MSI_MSIX
1575 /*
1576 * Map and establish our interrupt.
1577 */
1578 if (pci_intr_map(pa, &ih)) {
1579 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1580 return;
1581 }
1582 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1583 #ifdef WM_MPSAFE
1584 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1585 #endif
1586 sc->sc_ihs[0] = pci_intr_establish(pc, ih, IPL_NET, wm_intr_legacy,sc);
1587 if (sc->sc_ihs[0] == NULL) {
1588 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1589 if (intrstr != NULL)
1590 aprint_error(" at %s", intrstr);
1591 aprint_error("\n");
1592 return;
1593 }
1594 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1595 sc->sc_nintrs = 1;
1596 #else /* WM_MSI_MSIX */
1597 if (pci_msix_alloc_exact(pa, &sc->sc_intrs, WM_NINTR) == 0) {
1598 /* 1st, try to use MSI-X */
1599 void *vih;
1600 kcpuset_t *affinity;
1601
1602 kcpuset_create(&affinity, false);
1603
1604 /*
1605 * for TX
1606 */
1607 intrstr = pci_intr_string(pc, sc->sc_intrs[WM_TX_INTR_INDEX],
1608 intrbuf, sizeof(intrbuf));
1609 #ifdef WM_MPSAFE
1610 pci_intr_setattr(pc, &sc->sc_intrs[WM_TX_INTR_INDEX],
1611 PCI_INTR_MPSAFE, true);
1612 #endif
1613 vih = pci_intr_establish(pc, sc->sc_intrs[WM_TX_INTR_INDEX],
1614 IPL_NET, wm_txintr_msix, sc);
1615 if (vih == NULL) {
1616 aprint_error_dev(sc->sc_dev,
1617 "unable to establish MSI-X(for TX)%s%s\n",
1618 intrstr ? " at " : "", intrstr ? intrstr : "");
1619 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1620 WM_NINTR);
1621 goto msi;
1622 }
1623 kcpuset_zero(affinity);
1624 /* Round-robin affinity */
1625 kcpuset_set(affinity, WM_TX_INTR_CPUID % ncpu);
1626 error = pci_intr_distribute(vih, affinity, NULL);
1627 if (error == 0) {
1628 aprint_normal_dev(sc->sc_dev,
1629 "for TX interrupting at %s affinity to %u\n",
1630 intrstr, WM_TX_INTR_CPUID % ncpu);
1631 } else {
1632 aprint_normal_dev(sc->sc_dev,
1633 "for TX interrupting at %s\n",
1634 intrstr);
1635 }
1636 sc->sc_ihs[WM_TX_INTR_INDEX] = vih;
1637
1638 /*
1639 * for RX
1640 */
1641 intrstr = pci_intr_string(pc, sc->sc_intrs[WM_RX_INTR_INDEX],
1642 intrbuf, sizeof(intrbuf));
1643 #ifdef WM_MPSAFE
1644 pci_intr_setattr(pc, &sc->sc_intrs[WM_RX_INTR_INDEX],
1645 PCI_INTR_MPSAFE, true);
1646 #endif
1647 vih = pci_intr_establish(pc, sc->sc_intrs[WM_RX_INTR_INDEX],
1648 IPL_NET, wm_rxintr_msix, sc);
1649 if (vih == NULL) {
1650 aprint_error_dev(sc->sc_dev,
1651 "unable to establish MSI-X(for RX)%s%s\n",
1652 intrstr ? " at " : "", intrstr ? intrstr : "");
1653 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1654 WM_NINTR);
1655 goto msi;
1656 }
1657 kcpuset_zero(affinity);
1658 kcpuset_set(affinity, WM_RX_INTR_CPUID % ncpu);
1659 error = pci_intr_distribute(vih, affinity, NULL);
1660 if (error == 0) {
1661 aprint_normal_dev(sc->sc_dev,
1662 "for RX interrupting at %s affinity to %u\n",
1663 intrstr, WM_RX_INTR_CPUID % ncpu);
1664 } else {
1665 aprint_normal_dev(sc->sc_dev,
1666 "for RX interrupting at %s\n",
1667 intrstr);
1668 }
1669 sc->sc_ihs[WM_RX_INTR_INDEX] = vih;
1670
1671 /*
1672 * for link state changing
1673 */
1674 intrstr = pci_intr_string(pc, sc->sc_intrs[WM_LINK_INTR_INDEX],
1675 intrbuf, sizeof(intrbuf));
1676 #ifdef WM_MPSAFE
1677 pci_intr_setattr(pc, &sc->sc_intrs[WM_LINK_INTR_INDEX],
1678 PCI_INTR_MPSAFE, true);
1679 #endif
1680 vih = pci_intr_establish(pc, sc->sc_intrs[WM_LINK_INTR_INDEX],
1681 IPL_NET, wm_linkintr_msix, sc);
1682 if (vih == NULL) {
1683 aprint_error_dev(sc->sc_dev,
1684 "unable to establish MSI-X(for LINK)%s%s\n",
1685 intrstr ? " at " : "", intrstr ? intrstr : "");
1686 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1687 WM_NINTR);
1688 goto msi;
1689 }
1690 kcpuset_zero(affinity);
1691 kcpuset_set(affinity, WM_LINK_INTR_CPUID % ncpu);
1692 error = pci_intr_distribute(vih, affinity, NULL);
1693 if (error == 0) {
1694 aprint_normal_dev(sc->sc_dev,
1695 "for LINK interrupting at %s affinity to %u\n",
1696 intrstr, WM_LINK_INTR_CPUID % ncpu);
1697 } else {
1698 aprint_normal_dev(sc->sc_dev,
1699 "for LINK interrupting at %s\n",
1700 intrstr);
1701 }
1702 sc->sc_ihs[WM_LINK_INTR_INDEX] = vih;
1703
1704 sc->sc_nintrs = WM_NINTR;
1705 kcpuset_destroy(affinity);
1706 intr_established = true;
1707 }
1708
1709 msi:
1710 if ((intr_established == false)
1711 && (pci_msi_alloc_exact(pa, &sc->sc_intrs, 1) == 0)) {
1712 /* 2nd, try to use MSI */
1713 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
1714 sizeof(intrbuf));
1715 #ifdef WM_MPSAFE
1716 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
1717 #endif
1718 sc->sc_ihs[0] = pci_intr_establish(pc, sc->sc_intrs[0],
1719 IPL_NET, wm_intr_legacy, sc);
1720 if (sc->sc_ihs[0] == NULL) {
1721 aprint_error_dev(sc->sc_dev, "unable to establish MSI\n");
1722 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1723 1);
1724 goto intx;
1725 }
1726 aprint_normal_dev(sc->sc_dev, "MSI at %s\n", intrstr);
1727
1728 sc->sc_nintrs = 1;
1729 intr_established = true;
1730 }
1731
1732 intx:
1733 if ((intr_established == false)
1734 && (pci_intx_alloc(pa, &sc->sc_intrs) == 0)) {
1735 /* Last, try to use INTx */
1736 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
1737 sizeof(intrbuf));
1738 #ifdef WM_MPSAFE
1739 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
1740 #endif
1741 sc->sc_ihs[0] = pci_intr_establish(pc, sc->sc_intrs[0],
1742 IPL_NET, wm_intr_legacy, sc);
1743 if (sc->sc_ihs[0] == NULL) {
1744 aprint_error_dev(sc->sc_dev, "unable to establish MSI\n");
1745 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1);
1746 goto int_failed;
1747 }
1748 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1749
1750 sc->sc_nintrs = 1;
1751 intr_established = true;
1752 }
1753
1754 int_failed:
1755 if (intr_established == false) {
1756 aprint_error_dev(sc->sc_dev, "failed to allocate interrput\n");
1757 return;
1758 }
1759 #endif /* WM_MSI_MSIX */
1760
1761 /*
1762 * Check the function ID (unit number of the chip).
1763 */
1764 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1765 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1766 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1767 || (sc->sc_type == WM_T_82580)
1768 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1769 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1770 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1771 else
1772 sc->sc_funcid = 0;
1773
1774 /*
1775 * Determine a few things about the bus we're connected to.
1776 */
1777 if (sc->sc_type < WM_T_82543) {
1778 /* We don't really know the bus characteristics here. */
1779 sc->sc_bus_speed = 33;
1780 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1781 /*
1782 * CSA (Communication Streaming Architecture) is about as fast
1783 * a 32-bit 66MHz PCI Bus.
1784 */
1785 sc->sc_flags |= WM_F_CSA;
1786 sc->sc_bus_speed = 66;
1787 aprint_verbose_dev(sc->sc_dev,
1788 "Communication Streaming Architecture\n");
1789 if (sc->sc_type == WM_T_82547) {
1790 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1791 callout_setfunc(&sc->sc_txfifo_ch,
1792 wm_82547_txfifo_stall, sc);
1793 aprint_verbose_dev(sc->sc_dev,
1794 "using 82547 Tx FIFO stall work-around\n");
1795 }
1796 } else if (sc->sc_type >= WM_T_82571) {
1797 sc->sc_flags |= WM_F_PCIE;
1798 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1799 && (sc->sc_type != WM_T_ICH10)
1800 && (sc->sc_type != WM_T_PCH)
1801 && (sc->sc_type != WM_T_PCH2)
1802 && (sc->sc_type != WM_T_PCH_LPT)) {
1803 /* ICH* and PCH* have no PCIe capability registers */
1804 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1805 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1806 NULL) == 0)
1807 aprint_error_dev(sc->sc_dev,
1808 "unable to find PCIe capability\n");
1809 }
1810 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1811 } else {
1812 reg = CSR_READ(sc, WMREG_STATUS);
1813 if (reg & STATUS_BUS64)
1814 sc->sc_flags |= WM_F_BUS64;
1815 if ((reg & STATUS_PCIX_MODE) != 0) {
1816 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1817
1818 sc->sc_flags |= WM_F_PCIX;
1819 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1820 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1821 aprint_error_dev(sc->sc_dev,
1822 "unable to find PCIX capability\n");
1823 else if (sc->sc_type != WM_T_82545_3 &&
1824 sc->sc_type != WM_T_82546_3) {
1825 /*
1826 * Work around a problem caused by the BIOS
1827 * setting the max memory read byte count
1828 * incorrectly.
1829 */
1830 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1831 sc->sc_pcixe_capoff + PCIX_CMD);
1832 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1833 sc->sc_pcixe_capoff + PCIX_STATUS);
1834
1835 bytecnt =
1836 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1837 PCIX_CMD_BYTECNT_SHIFT;
1838 maxb =
1839 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1840 PCIX_STATUS_MAXB_SHIFT;
1841 if (bytecnt > maxb) {
1842 aprint_verbose_dev(sc->sc_dev,
1843 "resetting PCI-X MMRBC: %d -> %d\n",
1844 512 << bytecnt, 512 << maxb);
1845 pcix_cmd = (pcix_cmd &
1846 ~PCIX_CMD_BYTECNT_MASK) |
1847 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1848 pci_conf_write(pa->pa_pc, pa->pa_tag,
1849 sc->sc_pcixe_capoff + PCIX_CMD,
1850 pcix_cmd);
1851 }
1852 }
1853 }
1854 /*
1855 * The quad port adapter is special; it has a PCIX-PCIX
1856 * bridge on the board, and can run the secondary bus at
1857 * a higher speed.
1858 */
1859 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1860 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1861 : 66;
1862 } else if (sc->sc_flags & WM_F_PCIX) {
1863 switch (reg & STATUS_PCIXSPD_MASK) {
1864 case STATUS_PCIXSPD_50_66:
1865 sc->sc_bus_speed = 66;
1866 break;
1867 case STATUS_PCIXSPD_66_100:
1868 sc->sc_bus_speed = 100;
1869 break;
1870 case STATUS_PCIXSPD_100_133:
1871 sc->sc_bus_speed = 133;
1872 break;
1873 default:
1874 aprint_error_dev(sc->sc_dev,
1875 "unknown PCIXSPD %d; assuming 66MHz\n",
1876 reg & STATUS_PCIXSPD_MASK);
1877 sc->sc_bus_speed = 66;
1878 break;
1879 }
1880 } else
1881 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1882 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1883 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1884 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1885 }
1886
1887 /*
1888 * Allocate the control data structures, and create and load the
1889 * DMA map for it.
1890 *
1891 * NOTE: All Tx descriptors must be in the same 4G segment of
1892 * memory. So must Rx descriptors. We simplify by allocating
1893 * both sets within the same 4G segment.
1894 */
1895 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1896 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1897 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1898 sizeof(struct wm_control_data_82542) :
1899 sizeof(struct wm_control_data_82544);
1900 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1901 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1902 &sc->sc_cd_rseg, 0)) != 0) {
1903 aprint_error_dev(sc->sc_dev,
1904 "unable to allocate control data, error = %d\n",
1905 error);
1906 goto fail_0;
1907 }
1908
1909 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1910 sc->sc_cd_rseg, sc->sc_cd_size,
1911 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1912 aprint_error_dev(sc->sc_dev,
1913 "unable to map control data, error = %d\n", error);
1914 goto fail_1;
1915 }
1916
1917 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1918 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1919 aprint_error_dev(sc->sc_dev,
1920 "unable to create control data DMA map, error = %d\n",
1921 error);
1922 goto fail_2;
1923 }
1924
1925 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1926 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1927 aprint_error_dev(sc->sc_dev,
1928 "unable to load control data DMA map, error = %d\n",
1929 error);
1930 goto fail_3;
1931 }
1932
1933 /* Create the transmit buffer DMA maps. */
1934 WM_TXQUEUELEN(sc) =
1935 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1936 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1937 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1938 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1939 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1940 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1941 aprint_error_dev(sc->sc_dev,
1942 "unable to create Tx DMA map %d, error = %d\n",
1943 i, error);
1944 goto fail_4;
1945 }
1946 }
1947
1948 /* Create the receive buffer DMA maps. */
1949 for (i = 0; i < WM_NRXDESC; i++) {
1950 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1951 MCLBYTES, 0, 0,
1952 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1953 aprint_error_dev(sc->sc_dev,
1954 "unable to create Rx DMA map %d error = %d\n",
1955 i, error);
1956 goto fail_5;
1957 }
1958 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1959 }
1960
1961 /* clear interesting stat counters */
1962 CSR_READ(sc, WMREG_COLC);
1963 CSR_READ(sc, WMREG_RXERRC);
1964
1965 /* get PHY control from SMBus to PCIe */
1966 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1967 || (sc->sc_type == WM_T_PCH_LPT))
1968 wm_smbustopci(sc);
1969
1970 /* Reset the chip to a known state. */
1971 wm_reset(sc);
1972
1973 /* Get some information about the EEPROM. */
1974 switch (sc->sc_type) {
1975 case WM_T_82542_2_0:
1976 case WM_T_82542_2_1:
1977 case WM_T_82543:
1978 case WM_T_82544:
1979 /* Microwire */
1980 sc->sc_nvm_wordsize = 64;
1981 sc->sc_nvm_addrbits = 6;
1982 break;
1983 case WM_T_82540:
1984 case WM_T_82545:
1985 case WM_T_82545_3:
1986 case WM_T_82546:
1987 case WM_T_82546_3:
1988 /* Microwire */
1989 reg = CSR_READ(sc, WMREG_EECD);
1990 if (reg & EECD_EE_SIZE) {
1991 sc->sc_nvm_wordsize = 256;
1992 sc->sc_nvm_addrbits = 8;
1993 } else {
1994 sc->sc_nvm_wordsize = 64;
1995 sc->sc_nvm_addrbits = 6;
1996 }
1997 sc->sc_flags |= WM_F_LOCK_EECD;
1998 break;
1999 case WM_T_82541:
2000 case WM_T_82541_2:
2001 case WM_T_82547:
2002 case WM_T_82547_2:
2003 sc->sc_flags |= WM_F_LOCK_EECD;
2004 reg = CSR_READ(sc, WMREG_EECD);
2005 if (reg & EECD_EE_TYPE) {
2006 /* SPI */
2007 sc->sc_flags |= WM_F_EEPROM_SPI;
2008 wm_nvm_set_addrbits_size_eecd(sc);
2009 } else {
2010 /* Microwire */
2011 if ((reg & EECD_EE_ABITS) != 0) {
2012 sc->sc_nvm_wordsize = 256;
2013 sc->sc_nvm_addrbits = 8;
2014 } else {
2015 sc->sc_nvm_wordsize = 64;
2016 sc->sc_nvm_addrbits = 6;
2017 }
2018 }
2019 break;
2020 case WM_T_82571:
2021 case WM_T_82572:
2022 /* SPI */
2023 sc->sc_flags |= WM_F_EEPROM_SPI;
2024 wm_nvm_set_addrbits_size_eecd(sc);
2025 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
2026 break;
2027 case WM_T_82573:
2028 sc->sc_flags |= WM_F_LOCK_SWSM;
2029 /* FALLTHROUGH */
2030 case WM_T_82574:
2031 case WM_T_82583:
2032 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2033 sc->sc_flags |= WM_F_EEPROM_FLASH;
2034 sc->sc_nvm_wordsize = 2048;
2035 } else {
2036 /* SPI */
2037 sc->sc_flags |= WM_F_EEPROM_SPI;
2038 wm_nvm_set_addrbits_size_eecd(sc);
2039 }
2040 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2041 break;
2042 case WM_T_82575:
2043 case WM_T_82576:
2044 case WM_T_82580:
2045 case WM_T_I350:
2046 case WM_T_I354:
2047 case WM_T_80003:
2048 /* SPI */
2049 sc->sc_flags |= WM_F_EEPROM_SPI;
2050 wm_nvm_set_addrbits_size_eecd(sc);
2051 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2052 | WM_F_LOCK_SWSM;
2053 break;
2054 case WM_T_ICH8:
2055 case WM_T_ICH9:
2056 case WM_T_ICH10:
2057 case WM_T_PCH:
2058 case WM_T_PCH2:
2059 case WM_T_PCH_LPT:
2060 /* FLASH */
2061 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2062 sc->sc_nvm_wordsize = 2048;
2063 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
2064 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2065 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2066 aprint_error_dev(sc->sc_dev,
2067 "can't map FLASH registers\n");
2068 goto fail_5;
2069 }
2070 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2071 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2072 ICH_FLASH_SECTOR_SIZE;
2073 sc->sc_ich8_flash_bank_size =
2074 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2075 sc->sc_ich8_flash_bank_size -=
2076 (reg & ICH_GFPREG_BASE_MASK);
2077 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2078 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2079 break;
2080 case WM_T_I210:
2081 case WM_T_I211:
2082 if (wm_nvm_get_flash_presence_i210(sc)) {
2083 wm_nvm_set_addrbits_size_eecd(sc);
2084 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2085 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2086 } else {
2087 sc->sc_nvm_wordsize = INVM_SIZE;
2088 sc->sc_flags |= WM_F_EEPROM_INVM;
2089 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2090 }
2091 break;
2092 default:
2093 break;
2094 }
2095
2096 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2097 switch (sc->sc_type) {
2098 case WM_T_82571:
2099 case WM_T_82572:
2100 reg = CSR_READ(sc, WMREG_SWSM2);
2101 if ((reg & SWSM2_LOCK) == 0) {
2102 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2103 force_clear_smbi = true;
2104 } else
2105 force_clear_smbi = false;
2106 break;
2107 case WM_T_82573:
2108 case WM_T_82574:
2109 case WM_T_82583:
2110 force_clear_smbi = true;
2111 break;
2112 default:
2113 force_clear_smbi = false;
2114 break;
2115 }
2116 if (force_clear_smbi) {
2117 reg = CSR_READ(sc, WMREG_SWSM);
2118 if ((reg & SWSM_SMBI) != 0)
2119 aprint_error_dev(sc->sc_dev,
2120 "Please update the Bootagent\n");
2121 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2122 }
2123
2124 /*
2125 * Defer printing the EEPROM type until after verifying the checksum
2126 * This allows the EEPROM type to be printed correctly in the case
2127 * that no EEPROM is attached.
2128 */
2129 /*
2130 * Validate the EEPROM checksum. If the checksum fails, flag
2131 * this for later, so we can fail future reads from the EEPROM.
2132 */
2133 if (wm_nvm_validate_checksum(sc)) {
2134 /*
2135 * Read twice again because some PCI-e parts fail the
2136 * first check due to the link being in sleep state.
2137 */
2138 if (wm_nvm_validate_checksum(sc))
2139 sc->sc_flags |= WM_F_EEPROM_INVALID;
2140 }
2141
2142 /* Set device properties (macflags) */
2143 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2144
2145 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2146 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2147 else {
2148 aprint_verbose_dev(sc->sc_dev, "%u words ",
2149 sc->sc_nvm_wordsize);
2150 if (sc->sc_flags & WM_F_EEPROM_INVM)
2151 aprint_verbose("iNVM");
2152 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2153 aprint_verbose("FLASH(HW)");
2154 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2155 aprint_verbose("FLASH");
2156 else {
2157 if (sc->sc_flags & WM_F_EEPROM_SPI)
2158 eetype = "SPI";
2159 else
2160 eetype = "MicroWire";
2161 aprint_verbose("(%d address bits) %s EEPROM",
2162 sc->sc_nvm_addrbits, eetype);
2163 }
2164 }
2165 wm_nvm_version(sc);
2166 aprint_verbose("\n");
2167
2168 /* Check for I21[01] PLL workaround */
2169 if (sc->sc_type == WM_T_I210)
2170 sc->sc_flags |= WM_F_PLL_WA_I210;
2171 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2172 /* NVM image release 3.25 has a workaround */
2173 if ((sc->sc_nvm_ver_major > 3)
2174 || ((sc->sc_nvm_ver_major == 3)
2175 && (sc->sc_nvm_ver_minor >= 25)))
2176 return;
2177 else {
2178 aprint_verbose_dev(sc->sc_dev,
2179 "ROM image version %d.%d is older than 3.25\n",
2180 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2181 sc->sc_flags |= WM_F_PLL_WA_I210;
2182 }
2183 }
2184 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2185 wm_pll_workaround_i210(sc);
2186
2187 switch (sc->sc_type) {
2188 case WM_T_82571:
2189 case WM_T_82572:
2190 case WM_T_82573:
2191 case WM_T_82574:
2192 case WM_T_82583:
2193 case WM_T_80003:
2194 case WM_T_ICH8:
2195 case WM_T_ICH9:
2196 case WM_T_ICH10:
2197 case WM_T_PCH:
2198 case WM_T_PCH2:
2199 case WM_T_PCH_LPT:
2200 if (wm_check_mng_mode(sc) != 0)
2201 wm_get_hw_control(sc);
2202 break;
2203 default:
2204 break;
2205 }
2206 wm_get_wakeup(sc);
2207 /*
2208 * Read the Ethernet address from the EEPROM, if not first found
2209 * in device properties.
2210 */
2211 ea = prop_dictionary_get(dict, "mac-address");
2212 if (ea != NULL) {
2213 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2214 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2215 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2216 } else {
2217 if (wm_read_mac_addr(sc, enaddr) != 0) {
2218 aprint_error_dev(sc->sc_dev,
2219 "unable to read Ethernet address\n");
2220 goto fail_5;
2221 }
2222 }
2223
2224 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2225 ether_sprintf(enaddr));
2226
2227 /*
2228 * Read the config info from the EEPROM, and set up various
2229 * bits in the control registers based on their contents.
2230 */
2231 pn = prop_dictionary_get(dict, "i82543-cfg1");
2232 if (pn != NULL) {
2233 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2234 cfg1 = (uint16_t) prop_number_integer_value(pn);
2235 } else {
2236 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2237 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2238 goto fail_5;
2239 }
2240 }
2241
2242 pn = prop_dictionary_get(dict, "i82543-cfg2");
2243 if (pn != NULL) {
2244 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2245 cfg2 = (uint16_t) prop_number_integer_value(pn);
2246 } else {
2247 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2248 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2249 goto fail_5;
2250 }
2251 }
2252
2253 /* check for WM_F_WOL */
2254 switch (sc->sc_type) {
2255 case WM_T_82542_2_0:
2256 case WM_T_82542_2_1:
2257 case WM_T_82543:
2258 /* dummy? */
2259 eeprom_data = 0;
2260 apme_mask = NVM_CFG3_APME;
2261 break;
2262 case WM_T_82544:
2263 apme_mask = NVM_CFG2_82544_APM_EN;
2264 eeprom_data = cfg2;
2265 break;
2266 case WM_T_82546:
2267 case WM_T_82546_3:
2268 case WM_T_82571:
2269 case WM_T_82572:
2270 case WM_T_82573:
2271 case WM_T_82574:
2272 case WM_T_82583:
2273 case WM_T_80003:
2274 default:
2275 apme_mask = NVM_CFG3_APME;
2276 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2277 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2278 break;
2279 case WM_T_82575:
2280 case WM_T_82576:
2281 case WM_T_82580:
2282 case WM_T_I350:
2283 case WM_T_I354: /* XXX ok? */
2284 case WM_T_ICH8:
2285 case WM_T_ICH9:
2286 case WM_T_ICH10:
2287 case WM_T_PCH:
2288 case WM_T_PCH2:
2289 case WM_T_PCH_LPT:
2290 /* XXX The funcid should be checked on some devices */
2291 apme_mask = WUC_APME;
2292 eeprom_data = CSR_READ(sc, WMREG_WUC);
2293 break;
2294 }
2295
2296 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2297 if ((eeprom_data & apme_mask) != 0)
2298 sc->sc_flags |= WM_F_WOL;
2299 #ifdef WM_DEBUG
2300 if ((sc->sc_flags & WM_F_WOL) != 0)
2301 printf("WOL\n");
2302 #endif
2303
2304 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2305 /* Check NVM for autonegotiation */
2306 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2307 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2308 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2309 }
2310 }
2311
2312 /*
2313 * XXX need special handling for some multiple port cards
2314 * to disable a paticular port.
2315 */
2316
2317 if (sc->sc_type >= WM_T_82544) {
2318 pn = prop_dictionary_get(dict, "i82543-swdpin");
2319 if (pn != NULL) {
2320 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2321 swdpin = (uint16_t) prop_number_integer_value(pn);
2322 } else {
2323 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2324 aprint_error_dev(sc->sc_dev,
2325 "unable to read SWDPIN\n");
2326 goto fail_5;
2327 }
2328 }
2329 }
2330
2331 if (cfg1 & NVM_CFG1_ILOS)
2332 sc->sc_ctrl |= CTRL_ILOS;
2333
2334 /*
2335 * XXX
2336 * This code isn't correct because pin 2 and 3 are located
2337 * in different position on newer chips. Check all datasheet.
2338 *
2339 * Until resolve this problem, check if a chip < 82580
2340 */
2341 if (sc->sc_type <= WM_T_82580) {
2342 if (sc->sc_type >= WM_T_82544) {
2343 sc->sc_ctrl |=
2344 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2345 CTRL_SWDPIO_SHIFT;
2346 sc->sc_ctrl |=
2347 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2348 CTRL_SWDPINS_SHIFT;
2349 } else {
2350 sc->sc_ctrl |=
2351 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2352 CTRL_SWDPIO_SHIFT;
2353 }
2354 }
2355
2356 /* XXX For other than 82580? */
2357 if (sc->sc_type == WM_T_82580) {
2358 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2359 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2360 if (nvmword & __BIT(13)) {
2361 printf("SET ILOS\n");
2362 sc->sc_ctrl |= CTRL_ILOS;
2363 }
2364 }
2365
2366 #if 0
2367 if (sc->sc_type >= WM_T_82544) {
2368 if (cfg1 & NVM_CFG1_IPS0)
2369 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2370 if (cfg1 & NVM_CFG1_IPS1)
2371 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2372 sc->sc_ctrl_ext |=
2373 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2374 CTRL_EXT_SWDPIO_SHIFT;
2375 sc->sc_ctrl_ext |=
2376 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2377 CTRL_EXT_SWDPINS_SHIFT;
2378 } else {
2379 sc->sc_ctrl_ext |=
2380 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2381 CTRL_EXT_SWDPIO_SHIFT;
2382 }
2383 #endif
2384
2385 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2386 #if 0
2387 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2388 #endif
2389
2390 /*
2391 * Set up some register offsets that are different between
2392 * the i82542 and the i82543 and later chips.
2393 */
2394 if (sc->sc_type < WM_T_82543) {
2395 sc->sc_rdt_reg = WMREG_OLD_RDT0;
2396 sc->sc_tdt_reg = WMREG_OLD_TDT;
2397 } else {
2398 sc->sc_rdt_reg = WMREG_RDT;
2399 sc->sc_tdt_reg = WMREG_TDT;
2400 }
2401
2402 if (sc->sc_type == WM_T_PCH) {
2403 uint16_t val;
2404
2405 /* Save the NVM K1 bit setting */
2406 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2407
2408 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2409 sc->sc_nvm_k1_enabled = 1;
2410 else
2411 sc->sc_nvm_k1_enabled = 0;
2412 }
2413
2414 /*
2415 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2416 * media structures accordingly.
2417 */
2418 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2419 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2420 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2421 || sc->sc_type == WM_T_82573
2422 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2423 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2424 wm_gmii_mediainit(sc, wmp->wmp_product);
2425 } else if (sc->sc_type < WM_T_82543 ||
2426 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2427 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2428 aprint_error_dev(sc->sc_dev,
2429 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2430 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2431 }
2432 wm_tbi_mediainit(sc);
2433 } else {
2434 switch (sc->sc_type) {
2435 case WM_T_82575:
2436 case WM_T_82576:
2437 case WM_T_82580:
2438 case WM_T_I350:
2439 case WM_T_I354:
2440 case WM_T_I210:
2441 case WM_T_I211:
2442 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2443 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2444 switch (link_mode) {
2445 case CTRL_EXT_LINK_MODE_1000KX:
2446 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2447 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2448 break;
2449 case CTRL_EXT_LINK_MODE_SGMII:
2450 if (wm_sgmii_uses_mdio(sc)) {
2451 aprint_verbose_dev(sc->sc_dev,
2452 "SGMII(MDIO)\n");
2453 sc->sc_flags |= WM_F_SGMII;
2454 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2455 break;
2456 }
2457 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2458 /*FALLTHROUGH*/
2459 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2460 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2461 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2462 if (link_mode
2463 == CTRL_EXT_LINK_MODE_SGMII) {
2464 sc->sc_mediatype
2465 = WM_MEDIATYPE_COPPER;
2466 sc->sc_flags |= WM_F_SGMII;
2467 } else {
2468 sc->sc_mediatype
2469 = WM_MEDIATYPE_SERDES;
2470 aprint_verbose_dev(sc->sc_dev,
2471 "SERDES\n");
2472 }
2473 break;
2474 }
2475 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2476 aprint_verbose_dev(sc->sc_dev,
2477 "SERDES\n");
2478
2479 /* Change current link mode setting */
2480 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2481 switch (sc->sc_mediatype) {
2482 case WM_MEDIATYPE_COPPER:
2483 reg |= CTRL_EXT_LINK_MODE_SGMII;
2484 break;
2485 case WM_MEDIATYPE_SERDES:
2486 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2487 break;
2488 default:
2489 break;
2490 }
2491 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2492 break;
2493 case CTRL_EXT_LINK_MODE_GMII:
2494 default:
2495 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2496 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2497 break;
2498 }
2499
2500 reg &= ~CTRL_EXT_I2C_ENA;
2501 if ((sc->sc_flags & WM_F_SGMII) != 0)
2502 reg |= CTRL_EXT_I2C_ENA;
2503 else
2504 reg &= ~CTRL_EXT_I2C_ENA;
2505 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2506
2507 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2508 wm_gmii_mediainit(sc, wmp->wmp_product);
2509 else
2510 wm_tbi_mediainit(sc);
2511 break;
2512 default:
2513 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2514 aprint_error_dev(sc->sc_dev,
2515 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2516 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2517 wm_gmii_mediainit(sc, wmp->wmp_product);
2518 }
2519 }
2520
2521 ifp = &sc->sc_ethercom.ec_if;
2522 xname = device_xname(sc->sc_dev);
2523 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2524 ifp->if_softc = sc;
2525 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2526 ifp->if_ioctl = wm_ioctl;
2527 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2528 ifp->if_start = wm_nq_start;
2529 else
2530 ifp->if_start = wm_start;
2531 ifp->if_watchdog = wm_watchdog;
2532 ifp->if_init = wm_init;
2533 ifp->if_stop = wm_stop;
2534 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2535 IFQ_SET_READY(&ifp->if_snd);
2536
2537 /* Check for jumbo frame */
2538 switch (sc->sc_type) {
2539 case WM_T_82573:
2540 /* XXX limited to 9234 if ASPM is disabled */
2541 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2542 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2543 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2544 break;
2545 case WM_T_82571:
2546 case WM_T_82572:
2547 case WM_T_82574:
2548 case WM_T_82575:
2549 case WM_T_82576:
2550 case WM_T_82580:
2551 case WM_T_I350:
2552 case WM_T_I354: /* XXXX ok? */
2553 case WM_T_I210:
2554 case WM_T_I211:
2555 case WM_T_80003:
2556 case WM_T_ICH9:
2557 case WM_T_ICH10:
2558 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2559 case WM_T_PCH_LPT:
2560 /* XXX limited to 9234 */
2561 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2562 break;
2563 case WM_T_PCH:
2564 /* XXX limited to 4096 */
2565 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2566 break;
2567 case WM_T_82542_2_0:
2568 case WM_T_82542_2_1:
2569 case WM_T_82583:
2570 case WM_T_ICH8:
2571 /* No support for jumbo frame */
2572 break;
2573 default:
2574 /* ETHER_MAX_LEN_JUMBO */
2575 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2576 break;
2577 }
2578
2579 /* If we're a i82543 or greater, we can support VLANs. */
2580 if (sc->sc_type >= WM_T_82543)
2581 sc->sc_ethercom.ec_capabilities |=
2582 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2583
2584 /*
2585 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2586 * on i82543 and later.
2587 */
2588 if (sc->sc_type >= WM_T_82543) {
2589 ifp->if_capabilities |=
2590 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2591 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2592 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2593 IFCAP_CSUM_TCPv6_Tx |
2594 IFCAP_CSUM_UDPv6_Tx;
2595 }
2596
2597 /*
2598 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2599 *
2600 * 82541GI (8086:1076) ... no
2601 * 82572EI (8086:10b9) ... yes
2602 */
2603 if (sc->sc_type >= WM_T_82571) {
2604 ifp->if_capabilities |=
2605 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2606 }
2607
2608 /*
2609 * If we're a i82544 or greater (except i82547), we can do
2610 * TCP segmentation offload.
2611 */
2612 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2613 ifp->if_capabilities |= IFCAP_TSOv4;
2614 }
2615
2616 if (sc->sc_type >= WM_T_82571) {
2617 ifp->if_capabilities |= IFCAP_TSOv6;
2618 }
2619
2620 #ifdef WM_MPSAFE
2621 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2622 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2623 #else
2624 sc->sc_tx_lock = NULL;
2625 sc->sc_rx_lock = NULL;
2626 #endif
2627
2628 /* Attach the interface. */
2629 if_attach(ifp);
2630 ether_ifattach(ifp, enaddr);
2631 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2632 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2633 RND_FLAG_DEFAULT);
2634
2635 #ifdef WM_EVENT_COUNTERS
2636 /* Attach event counters. */
2637 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2638 NULL, xname, "txsstall");
2639 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2640 NULL, xname, "txdstall");
2641 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2642 NULL, xname, "txfifo_stall");
2643 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2644 NULL, xname, "txdw");
2645 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2646 NULL, xname, "txqe");
2647 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2648 NULL, xname, "rxintr");
2649 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2650 NULL, xname, "linkintr");
2651
2652 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2653 NULL, xname, "rxipsum");
2654 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2655 NULL, xname, "rxtusum");
2656 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2657 NULL, xname, "txipsum");
2658 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2659 NULL, xname, "txtusum");
2660 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2661 NULL, xname, "txtusum6");
2662
2663 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2664 NULL, xname, "txtso");
2665 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2666 NULL, xname, "txtso6");
2667 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2668 NULL, xname, "txtsopain");
2669
2670 for (i = 0; i < WM_NTXSEGS; i++) {
2671 snprintf(wm_txseg_evcnt_names[i],
2672 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2673 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2674 NULL, xname, wm_txseg_evcnt_names[i]);
2675 }
2676
2677 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2678 NULL, xname, "txdrop");
2679
2680 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2681 NULL, xname, "tu");
2682
2683 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2684 NULL, xname, "tx_xoff");
2685 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2686 NULL, xname, "tx_xon");
2687 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2688 NULL, xname, "rx_xoff");
2689 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2690 NULL, xname, "rx_xon");
2691 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2692 NULL, xname, "rx_macctl");
2693 #endif /* WM_EVENT_COUNTERS */
2694
2695 if (pmf_device_register(self, wm_suspend, wm_resume))
2696 pmf_class_network_register(self, ifp);
2697 else
2698 aprint_error_dev(self, "couldn't establish power handler\n");
2699
2700 sc->sc_flags |= WM_F_ATTACHED;
2701 return;
2702
2703 /*
2704 * Free any resources we've allocated during the failed attach
2705 * attempt. Do this in reverse order and fall through.
2706 */
2707 fail_5:
2708 for (i = 0; i < WM_NRXDESC; i++) {
2709 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2710 bus_dmamap_destroy(sc->sc_dmat,
2711 sc->sc_rxsoft[i].rxs_dmamap);
2712 }
2713 fail_4:
2714 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2715 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2716 bus_dmamap_destroy(sc->sc_dmat,
2717 sc->sc_txsoft[i].txs_dmamap);
2718 }
2719 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2720 fail_3:
2721 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2722 fail_2:
2723 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2724 sc->sc_cd_size);
2725 fail_1:
2726 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2727 fail_0:
2728 return;
2729 }
2730
2731 /* The detach function (ca_detach) */
2732 static int
2733 wm_detach(device_t self, int flags __unused)
2734 {
2735 struct wm_softc *sc = device_private(self);
2736 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2737 int i;
2738 #ifndef WM_MPSAFE
2739 int s;
2740 #endif
2741
2742 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2743 return 0;
2744
2745 #ifndef WM_MPSAFE
2746 s = splnet();
2747 #endif
2748 /* Stop the interface. Callouts are stopped in it. */
2749 wm_stop(ifp, 1);
2750
2751 #ifndef WM_MPSAFE
2752 splx(s);
2753 #endif
2754
2755 pmf_device_deregister(self);
2756
2757 /* Tell the firmware about the release */
2758 WM_BOTH_LOCK(sc);
2759 wm_release_manageability(sc);
2760 wm_release_hw_control(sc);
2761 WM_BOTH_UNLOCK(sc);
2762
2763 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2764
2765 /* Delete all remaining media. */
2766 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2767
2768 ether_ifdetach(ifp);
2769 if_detach(ifp);
2770
2771
2772 /* Unload RX dmamaps and free mbufs */
2773 WM_RX_LOCK(sc);
2774 wm_rxdrain(sc);
2775 WM_RX_UNLOCK(sc);
2776 /* Must unlock here */
2777
2778 /* Free dmamap. It's the same as the end of the wm_attach() function */
2779 for (i = 0; i < WM_NRXDESC; i++) {
2780 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2781 bus_dmamap_destroy(sc->sc_dmat,
2782 sc->sc_rxsoft[i].rxs_dmamap);
2783 }
2784 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2785 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2786 bus_dmamap_destroy(sc->sc_dmat,
2787 sc->sc_txsoft[i].txs_dmamap);
2788 }
2789 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2790 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2791 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2792 sc->sc_cd_size);
2793 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2794
2795 /* Disestablish the interrupt handler */
2796 for (i = 0; i < sc->sc_nintrs; i++) {
2797 if (sc->sc_ihs[i] != NULL) {
2798 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2799 sc->sc_ihs[i] = NULL;
2800 }
2801 }
2802 #ifdef WM_MSI_MSIX
2803 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2804 #endif /* WM_MSI_MSIX */
2805
2806 /* Unmap the registers */
2807 if (sc->sc_ss) {
2808 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2809 sc->sc_ss = 0;
2810 }
2811 if (sc->sc_ios) {
2812 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2813 sc->sc_ios = 0;
2814 }
2815 if (sc->sc_flashs) {
2816 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2817 sc->sc_flashs = 0;
2818 }
2819
2820 if (sc->sc_tx_lock)
2821 mutex_obj_free(sc->sc_tx_lock);
2822 if (sc->sc_rx_lock)
2823 mutex_obj_free(sc->sc_rx_lock);
2824
2825 return 0;
2826 }
2827
2828 static bool
2829 wm_suspend(device_t self, const pmf_qual_t *qual)
2830 {
2831 struct wm_softc *sc = device_private(self);
2832
2833 wm_release_manageability(sc);
2834 wm_release_hw_control(sc);
2835 #ifdef WM_WOL
2836 wm_enable_wakeup(sc);
2837 #endif
2838
2839 return true;
2840 }
2841
2842 static bool
2843 wm_resume(device_t self, const pmf_qual_t *qual)
2844 {
2845 struct wm_softc *sc = device_private(self);
2846
2847 wm_init_manageability(sc);
2848
2849 return true;
2850 }
2851
2852 /*
2853 * wm_watchdog: [ifnet interface function]
2854 *
2855 * Watchdog timer handler.
2856 */
2857 static void
2858 wm_watchdog(struct ifnet *ifp)
2859 {
2860 struct wm_softc *sc = ifp->if_softc;
2861
2862 /*
2863 * Since we're using delayed interrupts, sweep up
2864 * before we report an error.
2865 */
2866 WM_TX_LOCK(sc);
2867 wm_txeof(sc);
2868 WM_TX_UNLOCK(sc);
2869
2870 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2871 #ifdef WM_DEBUG
2872 int i, j;
2873 struct wm_txsoft *txs;
2874 #endif
2875 log(LOG_ERR,
2876 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2877 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2878 sc->sc_txnext);
2879 ifp->if_oerrors++;
2880 #ifdef WM_DEBUG
2881 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2882 i = WM_NEXTTXS(sc, i)) {
2883 txs = &sc->sc_txsoft[i];
2884 printf("txs %d tx %d -> %d\n",
2885 i, txs->txs_firstdesc, txs->txs_lastdesc);
2886 for (j = txs->txs_firstdesc; ;
2887 j = WM_NEXTTX(sc, j)) {
2888 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2889 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2890 printf("\t %#08x%08x\n",
2891 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2892 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2893 if (j == txs->txs_lastdesc)
2894 break;
2895 }
2896 }
2897 #endif
2898 /* Reset the interface. */
2899 (void) wm_init(ifp);
2900 }
2901
2902 /* Try to get more packets going. */
2903 ifp->if_start(ifp);
2904 }
2905
2906 /*
2907 * wm_tick:
2908 *
2909 * One second timer, used to check link status, sweep up
2910 * completed transmit jobs, etc.
2911 */
2912 static void
2913 wm_tick(void *arg)
2914 {
2915 struct wm_softc *sc = arg;
2916 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2917 #ifndef WM_MPSAFE
2918 int s;
2919
2920 s = splnet();
2921 #endif
2922
2923 WM_TX_LOCK(sc);
2924
2925 if (sc->sc_stopping)
2926 goto out;
2927
2928 if (sc->sc_type >= WM_T_82542_2_1) {
2929 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2930 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2931 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2932 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2933 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2934 }
2935
2936 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2937 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2938 + CSR_READ(sc, WMREG_CRCERRS)
2939 + CSR_READ(sc, WMREG_ALGNERRC)
2940 + CSR_READ(sc, WMREG_SYMERRC)
2941 + CSR_READ(sc, WMREG_RXERRC)
2942 + CSR_READ(sc, WMREG_SEC)
2943 + CSR_READ(sc, WMREG_CEXTERR)
2944 + CSR_READ(sc, WMREG_RLEC);
2945 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2946
2947 if (sc->sc_flags & WM_F_HAS_MII)
2948 mii_tick(&sc->sc_mii);
2949 else if ((sc->sc_type >= WM_T_82575)
2950 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2951 wm_serdes_tick(sc);
2952 else
2953 wm_tbi_tick(sc);
2954
2955 out:
2956 WM_TX_UNLOCK(sc);
2957 #ifndef WM_MPSAFE
2958 splx(s);
2959 #endif
2960
2961 if (!sc->sc_stopping)
2962 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2963 }
2964
2965 static int
2966 wm_ifflags_cb(struct ethercom *ec)
2967 {
2968 struct ifnet *ifp = &ec->ec_if;
2969 struct wm_softc *sc = ifp->if_softc;
2970 int change = ifp->if_flags ^ sc->sc_if_flags;
2971 int rc = 0;
2972
2973 WM_BOTH_LOCK(sc);
2974
2975 if (change != 0)
2976 sc->sc_if_flags = ifp->if_flags;
2977
2978 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2979 rc = ENETRESET;
2980 goto out;
2981 }
2982
2983 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2984 wm_set_filter(sc);
2985
2986 wm_set_vlan(sc);
2987
2988 out:
2989 WM_BOTH_UNLOCK(sc);
2990
2991 return rc;
2992 }
2993
2994 /*
2995 * wm_ioctl: [ifnet interface function]
2996 *
2997 * Handle control requests from the operator.
2998 */
2999 static int
3000 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3001 {
3002 struct wm_softc *sc = ifp->if_softc;
3003 struct ifreq *ifr = (struct ifreq *) data;
3004 struct ifaddr *ifa = (struct ifaddr *)data;
3005 struct sockaddr_dl *sdl;
3006 int s, error;
3007
3008 #ifndef WM_MPSAFE
3009 s = splnet();
3010 #endif
3011 switch (cmd) {
3012 case SIOCSIFMEDIA:
3013 case SIOCGIFMEDIA:
3014 WM_BOTH_LOCK(sc);
3015 /* Flow control requires full-duplex mode. */
3016 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3017 (ifr->ifr_media & IFM_FDX) == 0)
3018 ifr->ifr_media &= ~IFM_ETH_FMASK;
3019 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3020 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3021 /* We can do both TXPAUSE and RXPAUSE. */
3022 ifr->ifr_media |=
3023 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3024 }
3025 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3026 }
3027 WM_BOTH_UNLOCK(sc);
3028 #ifdef WM_MPSAFE
3029 s = splnet();
3030 #endif
3031 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3032 #ifdef WM_MPSAFE
3033 splx(s);
3034 #endif
3035 break;
3036 case SIOCINITIFADDR:
3037 WM_BOTH_LOCK(sc);
3038 if (ifa->ifa_addr->sa_family == AF_LINK) {
3039 sdl = satosdl(ifp->if_dl->ifa_addr);
3040 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3041 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3042 /* unicast address is first multicast entry */
3043 wm_set_filter(sc);
3044 error = 0;
3045 WM_BOTH_UNLOCK(sc);
3046 break;
3047 }
3048 WM_BOTH_UNLOCK(sc);
3049 /*FALLTHROUGH*/
3050 default:
3051 #ifdef WM_MPSAFE
3052 s = splnet();
3053 #endif
3054 /* It may call wm_start, so unlock here */
3055 error = ether_ioctl(ifp, cmd, data);
3056 #ifdef WM_MPSAFE
3057 splx(s);
3058 #endif
3059 if (error != ENETRESET)
3060 break;
3061
3062 error = 0;
3063
3064 if (cmd == SIOCSIFCAP) {
3065 error = (*ifp->if_init)(ifp);
3066 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3067 ;
3068 else if (ifp->if_flags & IFF_RUNNING) {
3069 /*
3070 * Multicast list has changed; set the hardware filter
3071 * accordingly.
3072 */
3073 WM_BOTH_LOCK(sc);
3074 wm_set_filter(sc);
3075 WM_BOTH_UNLOCK(sc);
3076 }
3077 break;
3078 }
3079
3080 #ifndef WM_MPSAFE
3081 splx(s);
3082 #endif
3083 return error;
3084 }
3085
3086 /* MAC address related */
3087
3088 /*
3089 * Get the offset of MAC address and return it.
3090 * If error occured, use offset 0.
3091 */
3092 static uint16_t
3093 wm_check_alt_mac_addr(struct wm_softc *sc)
3094 {
3095 uint16_t myea[ETHER_ADDR_LEN / 2];
3096 uint16_t offset = NVM_OFF_MACADDR;
3097
3098 /* Try to read alternative MAC address pointer */
3099 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3100 return 0;
3101
3102 /* Check pointer if it's valid or not. */
3103 if ((offset == 0x0000) || (offset == 0xffff))
3104 return 0;
3105
3106 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3107 /*
3108 * Check whether alternative MAC address is valid or not.
3109 * Some cards have non 0xffff pointer but those don't use
3110 * alternative MAC address in reality.
3111 *
3112 * Check whether the broadcast bit is set or not.
3113 */
3114 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3115 if (((myea[0] & 0xff) & 0x01) == 0)
3116 return offset; /* Found */
3117
3118 /* Not found */
3119 return 0;
3120 }
3121
3122 static int
3123 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3124 {
3125 uint16_t myea[ETHER_ADDR_LEN / 2];
3126 uint16_t offset = NVM_OFF_MACADDR;
3127 int do_invert = 0;
3128
3129 switch (sc->sc_type) {
3130 case WM_T_82580:
3131 case WM_T_I350:
3132 case WM_T_I354:
3133 /* EEPROM Top Level Partitioning */
3134 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3135 break;
3136 case WM_T_82571:
3137 case WM_T_82575:
3138 case WM_T_82576:
3139 case WM_T_80003:
3140 case WM_T_I210:
3141 case WM_T_I211:
3142 offset = wm_check_alt_mac_addr(sc);
3143 if (offset == 0)
3144 if ((sc->sc_funcid & 0x01) == 1)
3145 do_invert = 1;
3146 break;
3147 default:
3148 if ((sc->sc_funcid & 0x01) == 1)
3149 do_invert = 1;
3150 break;
3151 }
3152
3153 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3154 myea) != 0)
3155 goto bad;
3156
3157 enaddr[0] = myea[0] & 0xff;
3158 enaddr[1] = myea[0] >> 8;
3159 enaddr[2] = myea[1] & 0xff;
3160 enaddr[3] = myea[1] >> 8;
3161 enaddr[4] = myea[2] & 0xff;
3162 enaddr[5] = myea[2] >> 8;
3163
3164 /*
3165 * Toggle the LSB of the MAC address on the second port
3166 * of some dual port cards.
3167 */
3168 if (do_invert != 0)
3169 enaddr[5] ^= 1;
3170
3171 return 0;
3172
3173 bad:
3174 return -1;
3175 }
3176
3177 /*
3178 * wm_set_ral:
3179 *
3180 * Set an entery in the receive address list.
3181 */
3182 static void
3183 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3184 {
3185 uint32_t ral_lo, ral_hi;
3186
3187 if (enaddr != NULL) {
3188 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3189 (enaddr[3] << 24);
3190 ral_hi = enaddr[4] | (enaddr[5] << 8);
3191 ral_hi |= RAL_AV;
3192 } else {
3193 ral_lo = 0;
3194 ral_hi = 0;
3195 }
3196
3197 if (sc->sc_type >= WM_T_82544) {
3198 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3199 ral_lo);
3200 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3201 ral_hi);
3202 } else {
3203 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3204 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3205 }
3206 }
3207
3208 /*
3209 * wm_mchash:
3210 *
3211 * Compute the hash of the multicast address for the 4096-bit
3212 * multicast filter.
3213 */
3214 static uint32_t
3215 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3216 {
3217 static const int lo_shift[4] = { 4, 3, 2, 0 };
3218 static const int hi_shift[4] = { 4, 5, 6, 8 };
3219 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3220 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3221 uint32_t hash;
3222
3223 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3224 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3225 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3226 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3227 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3228 return (hash & 0x3ff);
3229 }
3230 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3231 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3232
3233 return (hash & 0xfff);
3234 }
3235
3236 /*
3237 * wm_set_filter:
3238 *
3239 * Set up the receive filter.
3240 */
3241 static void
3242 wm_set_filter(struct wm_softc *sc)
3243 {
3244 struct ethercom *ec = &sc->sc_ethercom;
3245 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3246 struct ether_multi *enm;
3247 struct ether_multistep step;
3248 bus_addr_t mta_reg;
3249 uint32_t hash, reg, bit;
3250 int i, size;
3251
3252 if (sc->sc_type >= WM_T_82544)
3253 mta_reg = WMREG_CORDOVA_MTA;
3254 else
3255 mta_reg = WMREG_MTA;
3256
3257 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3258
3259 if (ifp->if_flags & IFF_BROADCAST)
3260 sc->sc_rctl |= RCTL_BAM;
3261 if (ifp->if_flags & IFF_PROMISC) {
3262 sc->sc_rctl |= RCTL_UPE;
3263 goto allmulti;
3264 }
3265
3266 /*
3267 * Set the station address in the first RAL slot, and
3268 * clear the remaining slots.
3269 */
3270 if (sc->sc_type == WM_T_ICH8)
3271 size = WM_RAL_TABSIZE_ICH8 -1;
3272 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3273 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3274 || (sc->sc_type == WM_T_PCH_LPT))
3275 size = WM_RAL_TABSIZE_ICH8;
3276 else if (sc->sc_type == WM_T_82575)
3277 size = WM_RAL_TABSIZE_82575;
3278 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3279 size = WM_RAL_TABSIZE_82576;
3280 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3281 size = WM_RAL_TABSIZE_I350;
3282 else
3283 size = WM_RAL_TABSIZE;
3284 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3285 for (i = 1; i < size; i++)
3286 wm_set_ral(sc, NULL, i);
3287
3288 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3289 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3290 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3291 size = WM_ICH8_MC_TABSIZE;
3292 else
3293 size = WM_MC_TABSIZE;
3294 /* Clear out the multicast table. */
3295 for (i = 0; i < size; i++)
3296 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3297
3298 ETHER_FIRST_MULTI(step, ec, enm);
3299 while (enm != NULL) {
3300 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3301 /*
3302 * We must listen to a range of multicast addresses.
3303 * For now, just accept all multicasts, rather than
3304 * trying to set only those filter bits needed to match
3305 * the range. (At this time, the only use of address
3306 * ranges is for IP multicast routing, for which the
3307 * range is big enough to require all bits set.)
3308 */
3309 goto allmulti;
3310 }
3311
3312 hash = wm_mchash(sc, enm->enm_addrlo);
3313
3314 reg = (hash >> 5);
3315 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3316 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3317 || (sc->sc_type == WM_T_PCH2)
3318 || (sc->sc_type == WM_T_PCH_LPT))
3319 reg &= 0x1f;
3320 else
3321 reg &= 0x7f;
3322 bit = hash & 0x1f;
3323
3324 hash = CSR_READ(sc, mta_reg + (reg << 2));
3325 hash |= 1U << bit;
3326
3327 /* XXX Hardware bug?? */
3328 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3329 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3330 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3331 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3332 } else
3333 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3334
3335 ETHER_NEXT_MULTI(step, enm);
3336 }
3337
3338 ifp->if_flags &= ~IFF_ALLMULTI;
3339 goto setit;
3340
3341 allmulti:
3342 ifp->if_flags |= IFF_ALLMULTI;
3343 sc->sc_rctl |= RCTL_MPE;
3344
3345 setit:
3346 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3347 }
3348
3349 /* Reset and init related */
3350
3351 static void
3352 wm_set_vlan(struct wm_softc *sc)
3353 {
3354 /* Deal with VLAN enables. */
3355 if (VLAN_ATTACHED(&sc->sc_ethercom))
3356 sc->sc_ctrl |= CTRL_VME;
3357 else
3358 sc->sc_ctrl &= ~CTRL_VME;
3359
3360 /* Write the control registers. */
3361 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3362 }
3363
3364 static void
3365 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3366 {
3367 uint32_t gcr;
3368 pcireg_t ctrl2;
3369
3370 gcr = CSR_READ(sc, WMREG_GCR);
3371
3372 /* Only take action if timeout value is defaulted to 0 */
3373 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3374 goto out;
3375
3376 if ((gcr & GCR_CAP_VER2) == 0) {
3377 gcr |= GCR_CMPL_TMOUT_10MS;
3378 goto out;
3379 }
3380
3381 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3382 sc->sc_pcixe_capoff + PCIE_DCSR2);
3383 ctrl2 |= WM_PCIE_DCSR2_16MS;
3384 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3385 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3386
3387 out:
3388 /* Disable completion timeout resend */
3389 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3390
3391 CSR_WRITE(sc, WMREG_GCR, gcr);
3392 }
3393
3394 void
3395 wm_get_auto_rd_done(struct wm_softc *sc)
3396 {
3397 int i;
3398
3399 /* wait for eeprom to reload */
3400 switch (sc->sc_type) {
3401 case WM_T_82571:
3402 case WM_T_82572:
3403 case WM_T_82573:
3404 case WM_T_82574:
3405 case WM_T_82583:
3406 case WM_T_82575:
3407 case WM_T_82576:
3408 case WM_T_82580:
3409 case WM_T_I350:
3410 case WM_T_I354:
3411 case WM_T_I210:
3412 case WM_T_I211:
3413 case WM_T_80003:
3414 case WM_T_ICH8:
3415 case WM_T_ICH9:
3416 for (i = 0; i < 10; i++) {
3417 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3418 break;
3419 delay(1000);
3420 }
3421 if (i == 10) {
3422 log(LOG_ERR, "%s: auto read from eeprom failed to "
3423 "complete\n", device_xname(sc->sc_dev));
3424 }
3425 break;
3426 default:
3427 break;
3428 }
3429 }
3430
3431 void
3432 wm_lan_init_done(struct wm_softc *sc)
3433 {
3434 uint32_t reg = 0;
3435 int i;
3436
3437 /* wait for eeprom to reload */
3438 switch (sc->sc_type) {
3439 case WM_T_ICH10:
3440 case WM_T_PCH:
3441 case WM_T_PCH2:
3442 case WM_T_PCH_LPT:
3443 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3444 reg = CSR_READ(sc, WMREG_STATUS);
3445 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3446 break;
3447 delay(100);
3448 }
3449 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3450 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3451 "complete\n", device_xname(sc->sc_dev), __func__);
3452 }
3453 break;
3454 default:
3455 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3456 __func__);
3457 break;
3458 }
3459
3460 reg &= ~STATUS_LAN_INIT_DONE;
3461 CSR_WRITE(sc, WMREG_STATUS, reg);
3462 }
3463
3464 void
3465 wm_get_cfg_done(struct wm_softc *sc)
3466 {
3467 int mask;
3468 uint32_t reg;
3469 int i;
3470
3471 /* wait for eeprom to reload */
3472 switch (sc->sc_type) {
3473 case WM_T_82542_2_0:
3474 case WM_T_82542_2_1:
3475 /* null */
3476 break;
3477 case WM_T_82543:
3478 case WM_T_82544:
3479 case WM_T_82540:
3480 case WM_T_82545:
3481 case WM_T_82545_3:
3482 case WM_T_82546:
3483 case WM_T_82546_3:
3484 case WM_T_82541:
3485 case WM_T_82541_2:
3486 case WM_T_82547:
3487 case WM_T_82547_2:
3488 case WM_T_82573:
3489 case WM_T_82574:
3490 case WM_T_82583:
3491 /* generic */
3492 delay(10*1000);
3493 break;
3494 case WM_T_80003:
3495 case WM_T_82571:
3496 case WM_T_82572:
3497 case WM_T_82575:
3498 case WM_T_82576:
3499 case WM_T_82580:
3500 case WM_T_I350:
3501 case WM_T_I354:
3502 case WM_T_I210:
3503 case WM_T_I211:
3504 if (sc->sc_type == WM_T_82571) {
3505 /* Only 82571 shares port 0 */
3506 mask = EEMNGCTL_CFGDONE_0;
3507 } else
3508 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3509 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3510 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3511 break;
3512 delay(1000);
3513 }
3514 if (i >= WM_PHY_CFG_TIMEOUT) {
3515 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3516 device_xname(sc->sc_dev), __func__));
3517 }
3518 break;
3519 case WM_T_ICH8:
3520 case WM_T_ICH9:
3521 case WM_T_ICH10:
3522 case WM_T_PCH:
3523 case WM_T_PCH2:
3524 case WM_T_PCH_LPT:
3525 delay(10*1000);
3526 if (sc->sc_type >= WM_T_ICH10)
3527 wm_lan_init_done(sc);
3528 else
3529 wm_get_auto_rd_done(sc);
3530
3531 reg = CSR_READ(sc, WMREG_STATUS);
3532 if ((reg & STATUS_PHYRA) != 0)
3533 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3534 break;
3535 default:
3536 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3537 __func__);
3538 break;
3539 }
3540 }
3541
3542 /* Init hardware bits */
3543 void
3544 wm_initialize_hardware_bits(struct wm_softc *sc)
3545 {
3546 uint32_t tarc0, tarc1, reg;
3547
3548 /* For 82571 variant, 80003 and ICHs */
3549 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3550 || (sc->sc_type >= WM_T_80003)) {
3551
3552 /* Transmit Descriptor Control 0 */
3553 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3554 reg |= TXDCTL_COUNT_DESC;
3555 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3556
3557 /* Transmit Descriptor Control 1 */
3558 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3559 reg |= TXDCTL_COUNT_DESC;
3560 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3561
3562 /* TARC0 */
3563 tarc0 = CSR_READ(sc, WMREG_TARC0);
3564 switch (sc->sc_type) {
3565 case WM_T_82571:
3566 case WM_T_82572:
3567 case WM_T_82573:
3568 case WM_T_82574:
3569 case WM_T_82583:
3570 case WM_T_80003:
3571 /* Clear bits 30..27 */
3572 tarc0 &= ~__BITS(30, 27);
3573 break;
3574 default:
3575 break;
3576 }
3577
3578 switch (sc->sc_type) {
3579 case WM_T_82571:
3580 case WM_T_82572:
3581 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3582
3583 tarc1 = CSR_READ(sc, WMREG_TARC1);
3584 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3585 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3586 /* 8257[12] Errata No.7 */
3587 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3588
3589 /* TARC1 bit 28 */
3590 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3591 tarc1 &= ~__BIT(28);
3592 else
3593 tarc1 |= __BIT(28);
3594 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3595
3596 /*
3597 * 8257[12] Errata No.13
3598 * Disable Dyamic Clock Gating.
3599 */
3600 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3601 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3602 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3603 break;
3604 case WM_T_82573:
3605 case WM_T_82574:
3606 case WM_T_82583:
3607 if ((sc->sc_type == WM_T_82574)
3608 || (sc->sc_type == WM_T_82583))
3609 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3610
3611 /* Extended Device Control */
3612 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3613 reg &= ~__BIT(23); /* Clear bit 23 */
3614 reg |= __BIT(22); /* Set bit 22 */
3615 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3616
3617 /* Device Control */
3618 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3619 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3620
3621 /* PCIe Control Register */
3622 if ((sc->sc_type == WM_T_82574)
3623 || (sc->sc_type == WM_T_82583)) {
3624 /*
3625 * Document says this bit must be set for
3626 * proper operation.
3627 */
3628 reg = CSR_READ(sc, WMREG_GCR);
3629 reg |= __BIT(22);
3630 CSR_WRITE(sc, WMREG_GCR, reg);
3631
3632 /*
3633 * Apply workaround for hardware errata
3634 * documented in errata docs Fixes issue where
3635 * some error prone or unreliable PCIe
3636 * completions are occurring, particularly
3637 * with ASPM enabled. Without fix, issue can
3638 * cause Tx timeouts.
3639 */
3640 reg = CSR_READ(sc, WMREG_GCR2);
3641 reg |= __BIT(0);
3642 CSR_WRITE(sc, WMREG_GCR2, reg);
3643 }
3644 break;
3645 case WM_T_80003:
3646 /* TARC0 */
3647 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3648 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3649 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3650
3651 /* TARC1 bit 28 */
3652 tarc1 = CSR_READ(sc, WMREG_TARC1);
3653 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3654 tarc1 &= ~__BIT(28);
3655 else
3656 tarc1 |= __BIT(28);
3657 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3658 break;
3659 case WM_T_ICH8:
3660 case WM_T_ICH9:
3661 case WM_T_ICH10:
3662 case WM_T_PCH:
3663 case WM_T_PCH2:
3664 case WM_T_PCH_LPT:
3665 /* TARC 0 */
3666 if (sc->sc_type == WM_T_ICH8) {
3667 /* Set TARC0 bits 29 and 28 */
3668 tarc0 |= __BITS(29, 28);
3669 }
3670 /* Set TARC0 bits 23,24,26,27 */
3671 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3672
3673 /* CTRL_EXT */
3674 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3675 reg |= __BIT(22); /* Set bit 22 */
3676 /*
3677 * Enable PHY low-power state when MAC is at D3
3678 * w/o WoL
3679 */
3680 if (sc->sc_type >= WM_T_PCH)
3681 reg |= CTRL_EXT_PHYPDEN;
3682 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3683
3684 /* TARC1 */
3685 tarc1 = CSR_READ(sc, WMREG_TARC1);
3686 /* bit 28 */
3687 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3688 tarc1 &= ~__BIT(28);
3689 else
3690 tarc1 |= __BIT(28);
3691 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3692 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3693
3694 /* Device Status */
3695 if (sc->sc_type == WM_T_ICH8) {
3696 reg = CSR_READ(sc, WMREG_STATUS);
3697 reg &= ~__BIT(31);
3698 CSR_WRITE(sc, WMREG_STATUS, reg);
3699
3700 }
3701
3702 /*
3703 * Work-around descriptor data corruption issue during
3704 * NFS v2 UDP traffic, just disable the NFS filtering
3705 * capability.
3706 */
3707 reg = CSR_READ(sc, WMREG_RFCTL);
3708 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3709 CSR_WRITE(sc, WMREG_RFCTL, reg);
3710 break;
3711 default:
3712 break;
3713 }
3714 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3715
3716 /*
3717 * 8257[12] Errata No.52 and some others.
3718 * Avoid RSS Hash Value bug.
3719 */
3720 switch (sc->sc_type) {
3721 case WM_T_82571:
3722 case WM_T_82572:
3723 case WM_T_82573:
3724 case WM_T_80003:
3725 case WM_T_ICH8:
3726 reg = CSR_READ(sc, WMREG_RFCTL);
3727 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3728 CSR_WRITE(sc, WMREG_RFCTL, reg);
3729 break;
3730 default:
3731 break;
3732 }
3733 }
3734 }
3735
3736 static uint32_t
3737 wm_rxpbs_adjust_82580(uint32_t val)
3738 {
3739 uint32_t rv = 0;
3740
3741 if (val < __arraycount(wm_82580_rxpbs_table))
3742 rv = wm_82580_rxpbs_table[val];
3743
3744 return rv;
3745 }
3746
3747 /*
3748 * wm_reset:
3749 *
3750 * Reset the i82542 chip.
3751 */
3752 static void
3753 wm_reset(struct wm_softc *sc)
3754 {
3755 int phy_reset = 0;
3756 int error = 0;
3757 uint32_t reg, mask;
3758
3759 /*
3760 * Allocate on-chip memory according to the MTU size.
3761 * The Packet Buffer Allocation register must be written
3762 * before the chip is reset.
3763 */
3764 switch (sc->sc_type) {
3765 case WM_T_82547:
3766 case WM_T_82547_2:
3767 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3768 PBA_22K : PBA_30K;
3769 sc->sc_txfifo_head = 0;
3770 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3771 sc->sc_txfifo_size =
3772 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3773 sc->sc_txfifo_stall = 0;
3774 break;
3775 case WM_T_82571:
3776 case WM_T_82572:
3777 case WM_T_82575: /* XXX need special handing for jumbo frames */
3778 case WM_T_80003:
3779 sc->sc_pba = PBA_32K;
3780 break;
3781 case WM_T_82573:
3782 sc->sc_pba = PBA_12K;
3783 break;
3784 case WM_T_82574:
3785 case WM_T_82583:
3786 sc->sc_pba = PBA_20K;
3787 break;
3788 case WM_T_82576:
3789 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3790 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3791 break;
3792 case WM_T_82580:
3793 case WM_T_I350:
3794 case WM_T_I354:
3795 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3796 break;
3797 case WM_T_I210:
3798 case WM_T_I211:
3799 sc->sc_pba = PBA_34K;
3800 break;
3801 case WM_T_ICH8:
3802 /* Workaround for a bit corruption issue in FIFO memory */
3803 sc->sc_pba = PBA_8K;
3804 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3805 break;
3806 case WM_T_ICH9:
3807 case WM_T_ICH10:
3808 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3809 PBA_14K : PBA_10K;
3810 break;
3811 case WM_T_PCH:
3812 case WM_T_PCH2:
3813 case WM_T_PCH_LPT:
3814 sc->sc_pba = PBA_26K;
3815 break;
3816 default:
3817 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3818 PBA_40K : PBA_48K;
3819 break;
3820 }
3821 /*
3822 * Only old or non-multiqueue devices have the PBA register
3823 * XXX Need special handling for 82575.
3824 */
3825 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3826 || (sc->sc_type == WM_T_82575))
3827 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3828
3829 /* Prevent the PCI-E bus from sticking */
3830 if (sc->sc_flags & WM_F_PCIE) {
3831 int timeout = 800;
3832
3833 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3834 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3835
3836 while (timeout--) {
3837 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3838 == 0)
3839 break;
3840 delay(100);
3841 }
3842 }
3843
3844 /* Set the completion timeout for interface */
3845 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3846 || (sc->sc_type == WM_T_82580)
3847 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3848 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3849 wm_set_pcie_completion_timeout(sc);
3850
3851 /* Clear interrupt */
3852 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3853 if (sc->sc_nintrs > 1) {
3854 if (sc->sc_type != WM_T_82574) {
3855 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3856 CSR_WRITE(sc, WMREG_EIAC, 0);
3857 } else {
3858 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3859 }
3860 }
3861
3862 /* Stop the transmit and receive processes. */
3863 CSR_WRITE(sc, WMREG_RCTL, 0);
3864 sc->sc_rctl &= ~RCTL_EN;
3865 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3866 CSR_WRITE_FLUSH(sc);
3867
3868 /* XXX set_tbi_sbp_82543() */
3869
3870 delay(10*1000);
3871
3872 /* Must acquire the MDIO ownership before MAC reset */
3873 switch (sc->sc_type) {
3874 case WM_T_82573:
3875 case WM_T_82574:
3876 case WM_T_82583:
3877 error = wm_get_hw_semaphore_82573(sc);
3878 break;
3879 default:
3880 break;
3881 }
3882
3883 /*
3884 * 82541 Errata 29? & 82547 Errata 28?
3885 * See also the description about PHY_RST bit in CTRL register
3886 * in 8254x_GBe_SDM.pdf.
3887 */
3888 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3889 CSR_WRITE(sc, WMREG_CTRL,
3890 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3891 CSR_WRITE_FLUSH(sc);
3892 delay(5000);
3893 }
3894
3895 switch (sc->sc_type) {
3896 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3897 case WM_T_82541:
3898 case WM_T_82541_2:
3899 case WM_T_82547:
3900 case WM_T_82547_2:
3901 /*
3902 * On some chipsets, a reset through a memory-mapped write
3903 * cycle can cause the chip to reset before completing the
3904 * write cycle. This causes major headache that can be
3905 * avoided by issuing the reset via indirect register writes
3906 * through I/O space.
3907 *
3908 * So, if we successfully mapped the I/O BAR at attach time,
3909 * use that. Otherwise, try our luck with a memory-mapped
3910 * reset.
3911 */
3912 if (sc->sc_flags & WM_F_IOH_VALID)
3913 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3914 else
3915 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3916 break;
3917 case WM_T_82545_3:
3918 case WM_T_82546_3:
3919 /* Use the shadow control register on these chips. */
3920 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3921 break;
3922 case WM_T_80003:
3923 mask = swfwphysem[sc->sc_funcid];
3924 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3925 wm_get_swfw_semaphore(sc, mask);
3926 CSR_WRITE(sc, WMREG_CTRL, reg);
3927 wm_put_swfw_semaphore(sc, mask);
3928 break;
3929 case WM_T_ICH8:
3930 case WM_T_ICH9:
3931 case WM_T_ICH10:
3932 case WM_T_PCH:
3933 case WM_T_PCH2:
3934 case WM_T_PCH_LPT:
3935 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3936 if (wm_check_reset_block(sc) == 0) {
3937 /*
3938 * Gate automatic PHY configuration by hardware on
3939 * non-managed 82579
3940 */
3941 if ((sc->sc_type == WM_T_PCH2)
3942 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3943 != 0))
3944 wm_gate_hw_phy_config_ich8lan(sc, 1);
3945
3946
3947 reg |= CTRL_PHY_RESET;
3948 phy_reset = 1;
3949 }
3950 wm_get_swfwhw_semaphore(sc);
3951 CSR_WRITE(sc, WMREG_CTRL, reg);
3952 /* Don't insert a completion barrier when reset */
3953 delay(20*1000);
3954 wm_put_swfwhw_semaphore(sc);
3955 break;
3956 case WM_T_82580:
3957 case WM_T_I350:
3958 case WM_T_I354:
3959 case WM_T_I210:
3960 case WM_T_I211:
3961 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3962 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3963 CSR_WRITE_FLUSH(sc);
3964 delay(5000);
3965 break;
3966 case WM_T_82542_2_0:
3967 case WM_T_82542_2_1:
3968 case WM_T_82543:
3969 case WM_T_82540:
3970 case WM_T_82545:
3971 case WM_T_82546:
3972 case WM_T_82571:
3973 case WM_T_82572:
3974 case WM_T_82573:
3975 case WM_T_82574:
3976 case WM_T_82575:
3977 case WM_T_82576:
3978 case WM_T_82583:
3979 default:
3980 /* Everything else can safely use the documented method. */
3981 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3982 break;
3983 }
3984
3985 /* Must release the MDIO ownership after MAC reset */
3986 switch (sc->sc_type) {
3987 case WM_T_82573:
3988 case WM_T_82574:
3989 case WM_T_82583:
3990 if (error == 0)
3991 wm_put_hw_semaphore_82573(sc);
3992 break;
3993 default:
3994 break;
3995 }
3996
3997 if (phy_reset != 0)
3998 wm_get_cfg_done(sc);
3999
4000 /* reload EEPROM */
4001 switch (sc->sc_type) {
4002 case WM_T_82542_2_0:
4003 case WM_T_82542_2_1:
4004 case WM_T_82543:
4005 case WM_T_82544:
4006 delay(10);
4007 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4008 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4009 CSR_WRITE_FLUSH(sc);
4010 delay(2000);
4011 break;
4012 case WM_T_82540:
4013 case WM_T_82545:
4014 case WM_T_82545_3:
4015 case WM_T_82546:
4016 case WM_T_82546_3:
4017 delay(5*1000);
4018 /* XXX Disable HW ARPs on ASF enabled adapters */
4019 break;
4020 case WM_T_82541:
4021 case WM_T_82541_2:
4022 case WM_T_82547:
4023 case WM_T_82547_2:
4024 delay(20000);
4025 /* XXX Disable HW ARPs on ASF enabled adapters */
4026 break;
4027 case WM_T_82571:
4028 case WM_T_82572:
4029 case WM_T_82573:
4030 case WM_T_82574:
4031 case WM_T_82583:
4032 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4033 delay(10);
4034 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4035 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4036 CSR_WRITE_FLUSH(sc);
4037 }
4038 /* check EECD_EE_AUTORD */
4039 wm_get_auto_rd_done(sc);
4040 /*
4041 * Phy configuration from NVM just starts after EECD_AUTO_RD
4042 * is set.
4043 */
4044 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4045 || (sc->sc_type == WM_T_82583))
4046 delay(25*1000);
4047 break;
4048 case WM_T_82575:
4049 case WM_T_82576:
4050 case WM_T_82580:
4051 case WM_T_I350:
4052 case WM_T_I354:
4053 case WM_T_I210:
4054 case WM_T_I211:
4055 case WM_T_80003:
4056 /* check EECD_EE_AUTORD */
4057 wm_get_auto_rd_done(sc);
4058 break;
4059 case WM_T_ICH8:
4060 case WM_T_ICH9:
4061 case WM_T_ICH10:
4062 case WM_T_PCH:
4063 case WM_T_PCH2:
4064 case WM_T_PCH_LPT:
4065 break;
4066 default:
4067 panic("%s: unknown type\n", __func__);
4068 }
4069
4070 /* Check whether EEPROM is present or not */
4071 switch (sc->sc_type) {
4072 case WM_T_82575:
4073 case WM_T_82576:
4074 case WM_T_82580:
4075 case WM_T_I350:
4076 case WM_T_I354:
4077 case WM_T_ICH8:
4078 case WM_T_ICH9:
4079 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4080 /* Not found */
4081 sc->sc_flags |= WM_F_EEPROM_INVALID;
4082 if (sc->sc_type == WM_T_82575)
4083 wm_reset_init_script_82575(sc);
4084 }
4085 break;
4086 default:
4087 break;
4088 }
4089
4090 if ((sc->sc_type == WM_T_82580)
4091 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4092 /* clear global device reset status bit */
4093 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4094 }
4095
4096 /* Clear any pending interrupt events. */
4097 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4098 reg = CSR_READ(sc, WMREG_ICR);
4099 if (sc->sc_nintrs > 1) {
4100 if (sc->sc_type != WM_T_82574) {
4101 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4102 CSR_WRITE(sc, WMREG_EIAC, 0);
4103 } else
4104 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4105 }
4106
4107 /* reload sc_ctrl */
4108 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4109
4110 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4111 wm_set_eee_i350(sc);
4112
4113 /* dummy read from WUC */
4114 if (sc->sc_type == WM_T_PCH)
4115 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4116 /*
4117 * For PCH, this write will make sure that any noise will be detected
4118 * as a CRC error and be dropped rather than show up as a bad packet
4119 * to the DMA engine
4120 */
4121 if (sc->sc_type == WM_T_PCH)
4122 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4123
4124 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4125 CSR_WRITE(sc, WMREG_WUC, 0);
4126
4127 wm_reset_mdicnfg_82580(sc);
4128
4129 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4130 wm_pll_workaround_i210(sc);
4131 }
4132
4133 /*
4134 * wm_add_rxbuf:
4135 *
4136 * Add a receive buffer to the indiciated descriptor.
4137 */
4138 static int
4139 wm_add_rxbuf(struct wm_softc *sc, int idx)
4140 {
4141 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4142 struct mbuf *m;
4143 int error;
4144
4145 KASSERT(WM_RX_LOCKED(sc));
4146
4147 MGETHDR(m, M_DONTWAIT, MT_DATA);
4148 if (m == NULL)
4149 return ENOBUFS;
4150
4151 MCLGET(m, M_DONTWAIT);
4152 if ((m->m_flags & M_EXT) == 0) {
4153 m_freem(m);
4154 return ENOBUFS;
4155 }
4156
4157 if (rxs->rxs_mbuf != NULL)
4158 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4159
4160 rxs->rxs_mbuf = m;
4161
4162 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4163 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4164 BUS_DMA_READ|BUS_DMA_NOWAIT);
4165 if (error) {
4166 /* XXX XXX XXX */
4167 aprint_error_dev(sc->sc_dev,
4168 "unable to load rx DMA map %d, error = %d\n",
4169 idx, error);
4170 panic("wm_add_rxbuf");
4171 }
4172
4173 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4174 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4175
4176 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4177 if ((sc->sc_rctl & RCTL_EN) != 0)
4178 WM_INIT_RXDESC(sc, idx);
4179 } else
4180 WM_INIT_RXDESC(sc, idx);
4181
4182 return 0;
4183 }
4184
4185 /*
4186 * wm_rxdrain:
4187 *
4188 * Drain the receive queue.
4189 */
4190 static void
4191 wm_rxdrain(struct wm_softc *sc)
4192 {
4193 struct wm_rxsoft *rxs;
4194 int i;
4195
4196 KASSERT(WM_RX_LOCKED(sc));
4197
4198 for (i = 0; i < WM_NRXDESC; i++) {
4199 rxs = &sc->sc_rxsoft[i];
4200 if (rxs->rxs_mbuf != NULL) {
4201 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4202 m_freem(rxs->rxs_mbuf);
4203 rxs->rxs_mbuf = NULL;
4204 }
4205 }
4206 }
4207
4208 /*
4209 * wm_init: [ifnet interface function]
4210 *
4211 * Initialize the interface.
4212 */
4213 static int
4214 wm_init(struct ifnet *ifp)
4215 {
4216 struct wm_softc *sc = ifp->if_softc;
4217 int ret;
4218
4219 WM_BOTH_LOCK(sc);
4220 ret = wm_init_locked(ifp);
4221 WM_BOTH_UNLOCK(sc);
4222
4223 return ret;
4224 }
4225
4226 static int
4227 wm_init_locked(struct ifnet *ifp)
4228 {
4229 struct wm_softc *sc = ifp->if_softc;
4230 struct wm_rxsoft *rxs;
4231 int i, j, trynum, error = 0;
4232 uint32_t reg;
4233
4234 KASSERT(WM_BOTH_LOCKED(sc));
4235 /*
4236 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4237 * There is a small but measurable benefit to avoiding the adjusment
4238 * of the descriptor so that the headers are aligned, for normal mtu,
4239 * on such platforms. One possibility is that the DMA itself is
4240 * slightly more efficient if the front of the entire packet (instead
4241 * of the front of the headers) is aligned.
4242 *
4243 * Note we must always set align_tweak to 0 if we are using
4244 * jumbo frames.
4245 */
4246 #ifdef __NO_STRICT_ALIGNMENT
4247 sc->sc_align_tweak = 0;
4248 #else
4249 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4250 sc->sc_align_tweak = 0;
4251 else
4252 sc->sc_align_tweak = 2;
4253 #endif /* __NO_STRICT_ALIGNMENT */
4254
4255 /* Cancel any pending I/O. */
4256 wm_stop_locked(ifp, 0);
4257
4258 /* update statistics before reset */
4259 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4260 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4261
4262 /* Reset the chip to a known state. */
4263 wm_reset(sc);
4264
4265 switch (sc->sc_type) {
4266 case WM_T_82571:
4267 case WM_T_82572:
4268 case WM_T_82573:
4269 case WM_T_82574:
4270 case WM_T_82583:
4271 case WM_T_80003:
4272 case WM_T_ICH8:
4273 case WM_T_ICH9:
4274 case WM_T_ICH10:
4275 case WM_T_PCH:
4276 case WM_T_PCH2:
4277 case WM_T_PCH_LPT:
4278 if (wm_check_mng_mode(sc) != 0)
4279 wm_get_hw_control(sc);
4280 break;
4281 default:
4282 break;
4283 }
4284
4285 /* Init hardware bits */
4286 wm_initialize_hardware_bits(sc);
4287
4288 /* Reset the PHY. */
4289 if (sc->sc_flags & WM_F_HAS_MII)
4290 wm_gmii_reset(sc);
4291
4292 /* Calculate (E)ITR value */
4293 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4294 sc->sc_itr = 450; /* For EITR */
4295 } else if (sc->sc_type >= WM_T_82543) {
4296 /*
4297 * Set up the interrupt throttling register (units of 256ns)
4298 * Note that a footnote in Intel's documentation says this
4299 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4300 * or 10Mbit mode. Empirically, it appears to be the case
4301 * that that is also true for the 1024ns units of the other
4302 * interrupt-related timer registers -- so, really, we ought
4303 * to divide this value by 4 when the link speed is low.
4304 *
4305 * XXX implement this division at link speed change!
4306 */
4307
4308 /*
4309 * For N interrupts/sec, set this value to:
4310 * 1000000000 / (N * 256). Note that we set the
4311 * absolute and packet timer values to this value
4312 * divided by 4 to get "simple timer" behavior.
4313 */
4314
4315 sc->sc_itr = 1500; /* 2604 ints/sec */
4316 }
4317
4318 /* Initialize the transmit descriptor ring. */
4319 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4320 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4321 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4322 sc->sc_txfree = WM_NTXDESC(sc);
4323 sc->sc_txnext = 0;
4324
4325 if (sc->sc_type < WM_T_82543) {
4326 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4327 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4328 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4329 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4330 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4331 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4332 } else {
4333 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4334 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4335 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4336 CSR_WRITE(sc, WMREG_TDH, 0);
4337
4338 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4339 /*
4340 * Don't write TDT before TCTL.EN is set.
4341 * See the document.
4342 */
4343 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
4344 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4345 | TXDCTL_WTHRESH(0));
4346 else {
4347 /* ITR / 4 */
4348 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
4349 if (sc->sc_type >= WM_T_82540) {
4350 /* should be same */
4351 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
4352 }
4353
4354 CSR_WRITE(sc, WMREG_TDT, 0);
4355 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
4356 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4357 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4358 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4359 }
4360 }
4361
4362 /* Initialize the transmit job descriptors. */
4363 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4364 sc->sc_txsoft[i].txs_mbuf = NULL;
4365 sc->sc_txsfree = WM_TXQUEUELEN(sc);
4366 sc->sc_txsnext = 0;
4367 sc->sc_txsdirty = 0;
4368
4369 /*
4370 * Initialize the receive descriptor and receive job
4371 * descriptor rings.
4372 */
4373 if (sc->sc_type < WM_T_82543) {
4374 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4375 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4376 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4377 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4378 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4379 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4380
4381 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4382 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4383 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4384 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4385 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4386 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4387 } else {
4388 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4389 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4390 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4391
4392 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4393 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4394 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4395 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4396 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4397 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4398 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4399 | RXDCTL_WTHRESH(1));
4400 } else {
4401 CSR_WRITE(sc, WMREG_RDH, 0);
4402 CSR_WRITE(sc, WMREG_RDT, 0);
4403 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4404 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
4405 }
4406 }
4407 for (i = 0; i < WM_NRXDESC; i++) {
4408 rxs = &sc->sc_rxsoft[i];
4409 if (rxs->rxs_mbuf == NULL) {
4410 if ((error = wm_add_rxbuf(sc, i)) != 0) {
4411 log(LOG_ERR, "%s: unable to allocate or map "
4412 "rx buffer %d, error = %d\n",
4413 device_xname(sc->sc_dev), i, error);
4414 /*
4415 * XXX Should attempt to run with fewer receive
4416 * XXX buffers instead of just failing.
4417 */
4418 wm_rxdrain(sc);
4419 goto out;
4420 }
4421 } else {
4422 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4423 WM_INIT_RXDESC(sc, i);
4424 /*
4425 * For 82575 and newer device, the RX descriptors
4426 * must be initialized after the setting of RCTL.EN in
4427 * wm_set_filter()
4428 */
4429 }
4430 }
4431 sc->sc_rxptr = 0;
4432 sc->sc_rxdiscard = 0;
4433 WM_RXCHAIN_RESET(sc);
4434
4435 /*
4436 * Clear out the VLAN table -- we don't use it (yet).
4437 */
4438 CSR_WRITE(sc, WMREG_VET, 0);
4439 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4440 trynum = 10; /* Due to hw errata */
4441 else
4442 trynum = 1;
4443 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4444 for (j = 0; j < trynum; j++)
4445 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4446
4447 /*
4448 * Set up flow-control parameters.
4449 *
4450 * XXX Values could probably stand some tuning.
4451 */
4452 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4453 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4454 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4455 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4456 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4457 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4458 }
4459
4460 sc->sc_fcrtl = FCRTL_DFLT;
4461 if (sc->sc_type < WM_T_82543) {
4462 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4463 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4464 } else {
4465 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4466 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4467 }
4468
4469 if (sc->sc_type == WM_T_80003)
4470 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4471 else
4472 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4473
4474 /* Writes the control register. */
4475 wm_set_vlan(sc);
4476
4477 if (sc->sc_flags & WM_F_HAS_MII) {
4478 int val;
4479
4480 switch (sc->sc_type) {
4481 case WM_T_80003:
4482 case WM_T_ICH8:
4483 case WM_T_ICH9:
4484 case WM_T_ICH10:
4485 case WM_T_PCH:
4486 case WM_T_PCH2:
4487 case WM_T_PCH_LPT:
4488 /*
4489 * Set the mac to wait the maximum time between each
4490 * iteration and increase the max iterations when
4491 * polling the phy; this fixes erroneous timeouts at
4492 * 10Mbps.
4493 */
4494 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4495 0xFFFF);
4496 val = wm_kmrn_readreg(sc,
4497 KUMCTRLSTA_OFFSET_INB_PARAM);
4498 val |= 0x3F;
4499 wm_kmrn_writereg(sc,
4500 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4501 break;
4502 default:
4503 break;
4504 }
4505
4506 if (sc->sc_type == WM_T_80003) {
4507 val = CSR_READ(sc, WMREG_CTRL_EXT);
4508 val &= ~CTRL_EXT_LINK_MODE_MASK;
4509 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4510
4511 /* Bypass RX and TX FIFO's */
4512 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4513 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4514 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4515 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4516 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4517 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4518 }
4519 }
4520 #if 0
4521 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4522 #endif
4523
4524 /* Set up checksum offload parameters. */
4525 reg = CSR_READ(sc, WMREG_RXCSUM);
4526 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4527 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4528 reg |= RXCSUM_IPOFL;
4529 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4530 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4531 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4532 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4533 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4534
4535 /* Set up MSI-X */
4536 if (sc->sc_nintrs > 1) {
4537 uint32_t ivar;
4538
4539 if (sc->sc_type == WM_T_82575) {
4540 /* Interrupt control */
4541 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4542 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4543 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4544
4545 /* TX */
4546 CSR_WRITE(sc, WMREG_MSIXBM(WM_TX_INTR_INDEX),
4547 EITR_TX_QUEUE0);
4548 /* RX */
4549 CSR_WRITE(sc, WMREG_MSIXBM(WM_RX_INTR_INDEX),
4550 EITR_RX_QUEUE0);
4551 /* Link status */
4552 CSR_WRITE(sc, WMREG_MSIXBM(WM_LINK_INTR_INDEX),
4553 EITR_OTHER);
4554 } else if (sc->sc_type == WM_T_82574) {
4555 /* Interrupt control */
4556 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4557 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4558 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4559
4560 /* TX, RX and Link status */
4561 ivar = __SHIFTIN((IVAR_VALID_82574 | WM_TX_INTR_INDEX),
4562 IVAR_TX_MASK_Q_82574(0));
4563 ivar |= __SHIFTIN((IVAR_VALID_82574 |WM_RX_INTR_INDEX),
4564 IVAR_RX_MASK_Q_82574(0));
4565 ivar |=__SHIFTIN((IVAR_VALID_82574|WM_LINK_INTR_INDEX),
4566 IVAR_OTHER_MASK);
4567 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4568 } else {
4569 /* Interrupt control */
4570 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4571 | GPIE_MULTI_MSIX | GPIE_EIAME
4572 | GPIE_PBA);
4573
4574 switch (sc->sc_type) {
4575 case WM_T_82580:
4576 case WM_T_I350:
4577 case WM_T_I354:
4578 case WM_T_I210:
4579 case WM_T_I211:
4580 /* TX */
4581 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4582 ivar &= ~IVAR_TX_MASK_Q(0);
4583 ivar |= __SHIFTIN(
4584 (WM_TX_INTR_INDEX | IVAR_VALID),
4585 IVAR_TX_MASK_Q(0));
4586 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4587
4588 /* RX */
4589 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4590 ivar &= ~IVAR_RX_MASK_Q(0);
4591 ivar |= __SHIFTIN(
4592 (WM_RX_INTR_INDEX | IVAR_VALID),
4593 IVAR_RX_MASK_Q(0));
4594 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4595 break;
4596 case WM_T_82576:
4597 /* TX */
4598 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4599 ivar &= ~IVAR_TX_MASK_Q_82576(0);
4600 ivar |= __SHIFTIN(
4601 (WM_TX_INTR_INDEX | IVAR_VALID),
4602 IVAR_TX_MASK_Q_82576(0));
4603 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4604
4605 /* RX */
4606 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4607 ivar &= ~IVAR_RX_MASK_Q_82576(0);
4608 ivar |= __SHIFTIN(
4609 (WM_RX_INTR_INDEX | IVAR_VALID),
4610 IVAR_RX_MASK_Q_82576(0));
4611 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4612 break;
4613 default:
4614 break;
4615 }
4616
4617 /* Link status */
4618 ivar = __SHIFTIN((WM_LINK_INTR_INDEX | IVAR_VALID),
4619 IVAR_MISC_OTHER);
4620 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4621 }
4622 }
4623
4624 /* Set up the interrupt registers. */
4625 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4626 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4627 ICR_RXO | ICR_RXT0;
4628 if (sc->sc_nintrs > 1) {
4629 uint32_t mask;
4630 switch (sc->sc_type) {
4631 case WM_T_82574:
4632 CSR_WRITE(sc, WMREG_EIAC_82574,
4633 WMREG_EIAC_82574_MSIX_MASK);
4634 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4635 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4636 break;
4637 default:
4638 if (sc->sc_type == WM_T_82575)
4639 mask = EITR_RX_QUEUE0 |EITR_TX_QUEUE0
4640 | EITR_OTHER;
4641 else
4642 mask = (1 << WM_RX_INTR_INDEX)
4643 | (1 << WM_TX_INTR_INDEX)
4644 | (1 << WM_LINK_INTR_INDEX);
4645 CSR_WRITE(sc, WMREG_EIAC, mask);
4646 CSR_WRITE(sc, WMREG_EIAM, mask);
4647 CSR_WRITE(sc, WMREG_EIMS, mask);
4648 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4649 break;
4650 }
4651 } else
4652 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4653
4654 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4655 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4656 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4657 reg = CSR_READ(sc, WMREG_KABGTXD);
4658 reg |= KABGTXD_BGSQLBIAS;
4659 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4660 }
4661
4662 /* Set up the inter-packet gap. */
4663 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4664
4665 if (sc->sc_type >= WM_T_82543) {
4666 /*
4667 * XXX 82574 has both ITR and EITR. SET EITR when we use
4668 * the multi queue function with MSI-X.
4669 */
4670 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4671 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4672 else
4673 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4674 }
4675
4676 /* Set the VLAN ethernetype. */
4677 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4678
4679 /*
4680 * Set up the transmit control register; we start out with
4681 * a collision distance suitable for FDX, but update it whe
4682 * we resolve the media type.
4683 */
4684 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4685 | TCTL_CT(TX_COLLISION_THRESHOLD)
4686 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4687 if (sc->sc_type >= WM_T_82571)
4688 sc->sc_tctl |= TCTL_MULR;
4689 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4690
4691 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4692 /* Write TDT after TCTL.EN is set. See the document. */
4693 CSR_WRITE(sc, WMREG_TDT, 0);
4694 }
4695
4696 if (sc->sc_type == WM_T_80003) {
4697 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4698 reg &= ~TCTL_EXT_GCEX_MASK;
4699 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4700 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4701 }
4702
4703 /* Set the media. */
4704 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4705 goto out;
4706
4707 /* Configure for OS presence */
4708 wm_init_manageability(sc);
4709
4710 /*
4711 * Set up the receive control register; we actually program
4712 * the register when we set the receive filter. Use multicast
4713 * address offset type 0.
4714 *
4715 * Only the i82544 has the ability to strip the incoming
4716 * CRC, so we don't enable that feature.
4717 */
4718 sc->sc_mchash_type = 0;
4719 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4720 | RCTL_MO(sc->sc_mchash_type);
4721
4722 /*
4723 * The I350 has a bug where it always strips the CRC whether
4724 * asked to or not. So ask for stripped CRC here and cope in rxeof
4725 */
4726 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4727 || (sc->sc_type == WM_T_I210))
4728 sc->sc_rctl |= RCTL_SECRC;
4729
4730 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4731 && (ifp->if_mtu > ETHERMTU)) {
4732 sc->sc_rctl |= RCTL_LPE;
4733 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4734 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4735 }
4736
4737 if (MCLBYTES == 2048) {
4738 sc->sc_rctl |= RCTL_2k;
4739 } else {
4740 if (sc->sc_type >= WM_T_82543) {
4741 switch (MCLBYTES) {
4742 case 4096:
4743 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4744 break;
4745 case 8192:
4746 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4747 break;
4748 case 16384:
4749 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4750 break;
4751 default:
4752 panic("wm_init: MCLBYTES %d unsupported",
4753 MCLBYTES);
4754 break;
4755 }
4756 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4757 }
4758
4759 /* Set the receive filter. */
4760 wm_set_filter(sc);
4761
4762 /* Enable ECC */
4763 switch (sc->sc_type) {
4764 case WM_T_82571:
4765 reg = CSR_READ(sc, WMREG_PBA_ECC);
4766 reg |= PBA_ECC_CORR_EN;
4767 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4768 break;
4769 case WM_T_PCH_LPT:
4770 reg = CSR_READ(sc, WMREG_PBECCSTS);
4771 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4772 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4773
4774 reg = CSR_READ(sc, WMREG_CTRL);
4775 reg |= CTRL_MEHE;
4776 CSR_WRITE(sc, WMREG_CTRL, reg);
4777 break;
4778 default:
4779 break;
4780 }
4781
4782 /* On 575 and later set RDT only if RX enabled */
4783 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4784 for (i = 0; i < WM_NRXDESC; i++)
4785 WM_INIT_RXDESC(sc, i);
4786
4787 sc->sc_stopping = false;
4788
4789 /* Start the one second link check clock. */
4790 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4791
4792 /* ...all done! */
4793 ifp->if_flags |= IFF_RUNNING;
4794 ifp->if_flags &= ~IFF_OACTIVE;
4795
4796 out:
4797 sc->sc_if_flags = ifp->if_flags;
4798 if (error)
4799 log(LOG_ERR, "%s: interface not running\n",
4800 device_xname(sc->sc_dev));
4801 return error;
4802 }
4803
4804 /*
4805 * wm_stop: [ifnet interface function]
4806 *
4807 * Stop transmission on the interface.
4808 */
4809 static void
4810 wm_stop(struct ifnet *ifp, int disable)
4811 {
4812 struct wm_softc *sc = ifp->if_softc;
4813
4814 WM_BOTH_LOCK(sc);
4815 wm_stop_locked(ifp, disable);
4816 WM_BOTH_UNLOCK(sc);
4817 }
4818
4819 static void
4820 wm_stop_locked(struct ifnet *ifp, int disable)
4821 {
4822 struct wm_softc *sc = ifp->if_softc;
4823 struct wm_txsoft *txs;
4824 int i;
4825
4826 KASSERT(WM_BOTH_LOCKED(sc));
4827
4828 sc->sc_stopping = true;
4829
4830 /* Stop the one second clock. */
4831 callout_stop(&sc->sc_tick_ch);
4832
4833 /* Stop the 82547 Tx FIFO stall check timer. */
4834 if (sc->sc_type == WM_T_82547)
4835 callout_stop(&sc->sc_txfifo_ch);
4836
4837 if (sc->sc_flags & WM_F_HAS_MII) {
4838 /* Down the MII. */
4839 mii_down(&sc->sc_mii);
4840 } else {
4841 #if 0
4842 /* Should we clear PHY's status properly? */
4843 wm_reset(sc);
4844 #endif
4845 }
4846
4847 /* Stop the transmit and receive processes. */
4848 CSR_WRITE(sc, WMREG_TCTL, 0);
4849 CSR_WRITE(sc, WMREG_RCTL, 0);
4850 sc->sc_rctl &= ~RCTL_EN;
4851
4852 /*
4853 * Clear the interrupt mask to ensure the device cannot assert its
4854 * interrupt line.
4855 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
4856 * service any currently pending or shared interrupt.
4857 */
4858 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4859 sc->sc_icr = 0;
4860 if (sc->sc_nintrs > 1) {
4861 if (sc->sc_type != WM_T_82574) {
4862 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4863 CSR_WRITE(sc, WMREG_EIAC, 0);
4864 } else
4865 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4866 }
4867
4868 /* Release any queued transmit buffers. */
4869 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4870 txs = &sc->sc_txsoft[i];
4871 if (txs->txs_mbuf != NULL) {
4872 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4873 m_freem(txs->txs_mbuf);
4874 txs->txs_mbuf = NULL;
4875 }
4876 }
4877
4878 /* Mark the interface as down and cancel the watchdog timer. */
4879 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4880 ifp->if_timer = 0;
4881
4882 if (disable)
4883 wm_rxdrain(sc);
4884
4885 #if 0 /* notyet */
4886 if (sc->sc_type >= WM_T_82544)
4887 CSR_WRITE(sc, WMREG_WUC, 0);
4888 #endif
4889 }
4890
4891 /*
4892 * wm_tx_offload:
4893 *
4894 * Set up TCP/IP checksumming parameters for the
4895 * specified packet.
4896 */
4897 static int
4898 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4899 uint8_t *fieldsp)
4900 {
4901 struct mbuf *m0 = txs->txs_mbuf;
4902 struct livengood_tcpip_ctxdesc *t;
4903 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4904 uint32_t ipcse;
4905 struct ether_header *eh;
4906 int offset, iphl;
4907 uint8_t fields;
4908
4909 /*
4910 * XXX It would be nice if the mbuf pkthdr had offset
4911 * fields for the protocol headers.
4912 */
4913
4914 eh = mtod(m0, struct ether_header *);
4915 switch (htons(eh->ether_type)) {
4916 case ETHERTYPE_IP:
4917 case ETHERTYPE_IPV6:
4918 offset = ETHER_HDR_LEN;
4919 break;
4920
4921 case ETHERTYPE_VLAN:
4922 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4923 break;
4924
4925 default:
4926 /*
4927 * Don't support this protocol or encapsulation.
4928 */
4929 *fieldsp = 0;
4930 *cmdp = 0;
4931 return 0;
4932 }
4933
4934 if ((m0->m_pkthdr.csum_flags &
4935 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4936 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4937 } else {
4938 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4939 }
4940 ipcse = offset + iphl - 1;
4941
4942 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4943 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4944 seg = 0;
4945 fields = 0;
4946
4947 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4948 int hlen = offset + iphl;
4949 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4950
4951 if (__predict_false(m0->m_len <
4952 (hlen + sizeof(struct tcphdr)))) {
4953 /*
4954 * TCP/IP headers are not in the first mbuf; we need
4955 * to do this the slow and painful way. Let's just
4956 * hope this doesn't happen very often.
4957 */
4958 struct tcphdr th;
4959
4960 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4961
4962 m_copydata(m0, hlen, sizeof(th), &th);
4963 if (v4) {
4964 struct ip ip;
4965
4966 m_copydata(m0, offset, sizeof(ip), &ip);
4967 ip.ip_len = 0;
4968 m_copyback(m0,
4969 offset + offsetof(struct ip, ip_len),
4970 sizeof(ip.ip_len), &ip.ip_len);
4971 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4972 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4973 } else {
4974 struct ip6_hdr ip6;
4975
4976 m_copydata(m0, offset, sizeof(ip6), &ip6);
4977 ip6.ip6_plen = 0;
4978 m_copyback(m0,
4979 offset + offsetof(struct ip6_hdr, ip6_plen),
4980 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4981 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4982 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4983 }
4984 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4985 sizeof(th.th_sum), &th.th_sum);
4986
4987 hlen += th.th_off << 2;
4988 } else {
4989 /*
4990 * TCP/IP headers are in the first mbuf; we can do
4991 * this the easy way.
4992 */
4993 struct tcphdr *th;
4994
4995 if (v4) {
4996 struct ip *ip =
4997 (void *)(mtod(m0, char *) + offset);
4998 th = (void *)(mtod(m0, char *) + hlen);
4999
5000 ip->ip_len = 0;
5001 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5002 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5003 } else {
5004 struct ip6_hdr *ip6 =
5005 (void *)(mtod(m0, char *) + offset);
5006 th = (void *)(mtod(m0, char *) + hlen);
5007
5008 ip6->ip6_plen = 0;
5009 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5010 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5011 }
5012 hlen += th->th_off << 2;
5013 }
5014
5015 if (v4) {
5016 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5017 cmdlen |= WTX_TCPIP_CMD_IP;
5018 } else {
5019 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5020 ipcse = 0;
5021 }
5022 cmd |= WTX_TCPIP_CMD_TSE;
5023 cmdlen |= WTX_TCPIP_CMD_TSE |
5024 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
5025 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
5026 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
5027 }
5028
5029 /*
5030 * NOTE: Even if we're not using the IP or TCP/UDP checksum
5031 * offload feature, if we load the context descriptor, we
5032 * MUST provide valid values for IPCSS and TUCSS fields.
5033 */
5034
5035 ipcs = WTX_TCPIP_IPCSS(offset) |
5036 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5037 WTX_TCPIP_IPCSE(ipcse);
5038 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
5039 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5040 fields |= WTX_IXSM;
5041 }
5042
5043 offset += iphl;
5044
5045 if (m0->m_pkthdr.csum_flags &
5046 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5047 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5048 fields |= WTX_TXSM;
5049 tucs = WTX_TCPIP_TUCSS(offset) |
5050 WTX_TCPIP_TUCSO(offset +
5051 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5052 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5053 } else if ((m0->m_pkthdr.csum_flags &
5054 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5055 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5056 fields |= WTX_TXSM;
5057 tucs = WTX_TCPIP_TUCSS(offset) |
5058 WTX_TCPIP_TUCSO(offset +
5059 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5060 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5061 } else {
5062 /* Just initialize it to a valid TCP context. */
5063 tucs = WTX_TCPIP_TUCSS(offset) |
5064 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5065 WTX_TCPIP_TUCSE(0) /* rest of packet */;
5066 }
5067
5068 /* Fill in the context descriptor. */
5069 t = (struct livengood_tcpip_ctxdesc *)
5070 &sc->sc_txdescs[sc->sc_txnext];
5071 t->tcpip_ipcs = htole32(ipcs);
5072 t->tcpip_tucs = htole32(tucs);
5073 t->tcpip_cmdlen = htole32(cmdlen);
5074 t->tcpip_seg = htole32(seg);
5075 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5076
5077 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5078 txs->txs_ndesc++;
5079
5080 *cmdp = cmd;
5081 *fieldsp = fields;
5082
5083 return 0;
5084 }
5085
5086 static void
5087 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5088 {
5089 struct mbuf *m;
5090 int i;
5091
5092 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5093 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5094 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5095 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5096 m->m_data, m->m_len, m->m_flags);
5097 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5098 i, i == 1 ? "" : "s");
5099 }
5100
5101 /*
5102 * wm_82547_txfifo_stall:
5103 *
5104 * Callout used to wait for the 82547 Tx FIFO to drain,
5105 * reset the FIFO pointers, and restart packet transmission.
5106 */
5107 static void
5108 wm_82547_txfifo_stall(void *arg)
5109 {
5110 struct wm_softc *sc = arg;
5111 #ifndef WM_MPSAFE
5112 int s;
5113
5114 s = splnet();
5115 #endif
5116 WM_TX_LOCK(sc);
5117
5118 if (sc->sc_stopping)
5119 goto out;
5120
5121 if (sc->sc_txfifo_stall) {
5122 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
5123 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5124 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5125 /*
5126 * Packets have drained. Stop transmitter, reset
5127 * FIFO pointers, restart transmitter, and kick
5128 * the packet queue.
5129 */
5130 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5131 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5132 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
5133 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
5134 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
5135 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
5136 CSR_WRITE(sc, WMREG_TCTL, tctl);
5137 CSR_WRITE_FLUSH(sc);
5138
5139 sc->sc_txfifo_head = 0;
5140 sc->sc_txfifo_stall = 0;
5141 wm_start_locked(&sc->sc_ethercom.ec_if);
5142 } else {
5143 /*
5144 * Still waiting for packets to drain; try again in
5145 * another tick.
5146 */
5147 callout_schedule(&sc->sc_txfifo_ch, 1);
5148 }
5149 }
5150
5151 out:
5152 WM_TX_UNLOCK(sc);
5153 #ifndef WM_MPSAFE
5154 splx(s);
5155 #endif
5156 }
5157
5158 /*
5159 * wm_82547_txfifo_bugchk:
5160 *
5161 * Check for bug condition in the 82547 Tx FIFO. We need to
5162 * prevent enqueueing a packet that would wrap around the end
5163 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5164 *
5165 * We do this by checking the amount of space before the end
5166 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5167 * the Tx FIFO, wait for all remaining packets to drain, reset
5168 * the internal FIFO pointers to the beginning, and restart
5169 * transmission on the interface.
5170 */
5171 #define WM_FIFO_HDR 0x10
5172 #define WM_82547_PAD_LEN 0x3e0
5173 static int
5174 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5175 {
5176 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
5177 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5178
5179 /* Just return if already stalled. */
5180 if (sc->sc_txfifo_stall)
5181 return 1;
5182
5183 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5184 /* Stall only occurs in half-duplex mode. */
5185 goto send_packet;
5186 }
5187
5188 if (len >= WM_82547_PAD_LEN + space) {
5189 sc->sc_txfifo_stall = 1;
5190 callout_schedule(&sc->sc_txfifo_ch, 1);
5191 return 1;
5192 }
5193
5194 send_packet:
5195 sc->sc_txfifo_head += len;
5196 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
5197 sc->sc_txfifo_head -= sc->sc_txfifo_size;
5198
5199 return 0;
5200 }
5201
5202 /*
5203 * wm_start: [ifnet interface function]
5204 *
5205 * Start packet transmission on the interface.
5206 */
5207 static void
5208 wm_start(struct ifnet *ifp)
5209 {
5210 struct wm_softc *sc = ifp->if_softc;
5211
5212 WM_TX_LOCK(sc);
5213 if (!sc->sc_stopping)
5214 wm_start_locked(ifp);
5215 WM_TX_UNLOCK(sc);
5216 }
5217
5218 static void
5219 wm_start_locked(struct ifnet *ifp)
5220 {
5221 struct wm_softc *sc = ifp->if_softc;
5222 struct mbuf *m0;
5223 struct m_tag *mtag;
5224 struct wm_txsoft *txs;
5225 bus_dmamap_t dmamap;
5226 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
5227 bus_addr_t curaddr;
5228 bus_size_t seglen, curlen;
5229 uint32_t cksumcmd;
5230 uint8_t cksumfields;
5231
5232 KASSERT(WM_TX_LOCKED(sc));
5233
5234 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5235 return;
5236
5237 /* Remember the previous number of free descriptors. */
5238 ofree = sc->sc_txfree;
5239
5240 /*
5241 * Loop through the send queue, setting up transmit descriptors
5242 * until we drain the queue, or use up all available transmit
5243 * descriptors.
5244 */
5245 for (;;) {
5246 m0 = NULL;
5247
5248 /* Get a work queue entry. */
5249 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5250 wm_txeof(sc);
5251 if (sc->sc_txsfree == 0) {
5252 DPRINTF(WM_DEBUG_TX,
5253 ("%s: TX: no free job descriptors\n",
5254 device_xname(sc->sc_dev)));
5255 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5256 break;
5257 }
5258 }
5259
5260 /* Grab a packet off the queue. */
5261 IFQ_DEQUEUE(&ifp->if_snd, m0);
5262 if (m0 == NULL)
5263 break;
5264
5265 DPRINTF(WM_DEBUG_TX,
5266 ("%s: TX: have packet to transmit: %p\n",
5267 device_xname(sc->sc_dev), m0));
5268
5269 txs = &sc->sc_txsoft[sc->sc_txsnext];
5270 dmamap = txs->txs_dmamap;
5271
5272 use_tso = (m0->m_pkthdr.csum_flags &
5273 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
5274
5275 /*
5276 * So says the Linux driver:
5277 * The controller does a simple calculation to make sure
5278 * there is enough room in the FIFO before initiating the
5279 * DMA for each buffer. The calc is:
5280 * 4 = ceil(buffer len / MSS)
5281 * To make sure we don't overrun the FIFO, adjust the max
5282 * buffer len if the MSS drops.
5283 */
5284 dmamap->dm_maxsegsz =
5285 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
5286 ? m0->m_pkthdr.segsz << 2
5287 : WTX_MAX_LEN;
5288
5289 /*
5290 * Load the DMA map. If this fails, the packet either
5291 * didn't fit in the allotted number of segments, or we
5292 * were short on resources. For the too-many-segments
5293 * case, we simply report an error and drop the packet,
5294 * since we can't sanely copy a jumbo packet to a single
5295 * buffer.
5296 */
5297 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5298 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5299 if (error) {
5300 if (error == EFBIG) {
5301 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5302 log(LOG_ERR, "%s: Tx packet consumes too many "
5303 "DMA segments, dropping...\n",
5304 device_xname(sc->sc_dev));
5305 wm_dump_mbuf_chain(sc, m0);
5306 m_freem(m0);
5307 continue;
5308 }
5309 /* Short on resources, just stop for now. */
5310 DPRINTF(WM_DEBUG_TX,
5311 ("%s: TX: dmamap load failed: %d\n",
5312 device_xname(sc->sc_dev), error));
5313 break;
5314 }
5315
5316 segs_needed = dmamap->dm_nsegs;
5317 if (use_tso) {
5318 /* For sentinel descriptor; see below. */
5319 segs_needed++;
5320 }
5321
5322 /*
5323 * Ensure we have enough descriptors free to describe
5324 * the packet. Note, we always reserve one descriptor
5325 * at the end of the ring due to the semantics of the
5326 * TDT register, plus one more in the event we need
5327 * to load offload context.
5328 */
5329 if (segs_needed > sc->sc_txfree - 2) {
5330 /*
5331 * Not enough free descriptors to transmit this
5332 * packet. We haven't committed anything yet,
5333 * so just unload the DMA map, put the packet
5334 * pack on the queue, and punt. Notify the upper
5335 * layer that there are no more slots left.
5336 */
5337 DPRINTF(WM_DEBUG_TX,
5338 ("%s: TX: need %d (%d) descriptors, have %d\n",
5339 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5340 segs_needed, sc->sc_txfree - 1));
5341 ifp->if_flags |= IFF_OACTIVE;
5342 bus_dmamap_unload(sc->sc_dmat, dmamap);
5343 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5344 break;
5345 }
5346
5347 /*
5348 * Check for 82547 Tx FIFO bug. We need to do this
5349 * once we know we can transmit the packet, since we
5350 * do some internal FIFO space accounting here.
5351 */
5352 if (sc->sc_type == WM_T_82547 &&
5353 wm_82547_txfifo_bugchk(sc, m0)) {
5354 DPRINTF(WM_DEBUG_TX,
5355 ("%s: TX: 82547 Tx FIFO bug detected\n",
5356 device_xname(sc->sc_dev)));
5357 ifp->if_flags |= IFF_OACTIVE;
5358 bus_dmamap_unload(sc->sc_dmat, dmamap);
5359 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
5360 break;
5361 }
5362
5363 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5364
5365 DPRINTF(WM_DEBUG_TX,
5366 ("%s: TX: packet has %d (%d) DMA segments\n",
5367 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5368
5369 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5370
5371 /*
5372 * Store a pointer to the packet so that we can free it
5373 * later.
5374 *
5375 * Initially, we consider the number of descriptors the
5376 * packet uses the number of DMA segments. This may be
5377 * incremented by 1 if we do checksum offload (a descriptor
5378 * is used to set the checksum context).
5379 */
5380 txs->txs_mbuf = m0;
5381 txs->txs_firstdesc = sc->sc_txnext;
5382 txs->txs_ndesc = segs_needed;
5383
5384 /* Set up offload parameters for this packet. */
5385 if (m0->m_pkthdr.csum_flags &
5386 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5387 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5388 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5389 if (wm_tx_offload(sc, txs, &cksumcmd,
5390 &cksumfields) != 0) {
5391 /* Error message already displayed. */
5392 bus_dmamap_unload(sc->sc_dmat, dmamap);
5393 continue;
5394 }
5395 } else {
5396 cksumcmd = 0;
5397 cksumfields = 0;
5398 }
5399
5400 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5401
5402 /* Sync the DMA map. */
5403 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5404 BUS_DMASYNC_PREWRITE);
5405
5406 /* Initialize the transmit descriptor. */
5407 for (nexttx = sc->sc_txnext, seg = 0;
5408 seg < dmamap->dm_nsegs; seg++) {
5409 for (seglen = dmamap->dm_segs[seg].ds_len,
5410 curaddr = dmamap->dm_segs[seg].ds_addr;
5411 seglen != 0;
5412 curaddr += curlen, seglen -= curlen,
5413 nexttx = WM_NEXTTX(sc, nexttx)) {
5414 curlen = seglen;
5415
5416 /*
5417 * So says the Linux driver:
5418 * Work around for premature descriptor
5419 * write-backs in TSO mode. Append a
5420 * 4-byte sentinel descriptor.
5421 */
5422 if (use_tso &&
5423 seg == dmamap->dm_nsegs - 1 &&
5424 curlen > 8)
5425 curlen -= 4;
5426
5427 wm_set_dma_addr(
5428 &sc->sc_txdescs[nexttx].wtx_addr,
5429 curaddr);
5430 sc->sc_txdescs[nexttx].wtx_cmdlen =
5431 htole32(cksumcmd | curlen);
5432 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5433 0;
5434 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5435 cksumfields;
5436 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5437 lasttx = nexttx;
5438
5439 DPRINTF(WM_DEBUG_TX,
5440 ("%s: TX: desc %d: low %#" PRIx64 ", "
5441 "len %#04zx\n",
5442 device_xname(sc->sc_dev), nexttx,
5443 (uint64_t)curaddr, curlen));
5444 }
5445 }
5446
5447 KASSERT(lasttx != -1);
5448
5449 /*
5450 * Set up the command byte on the last descriptor of
5451 * the packet. If we're in the interrupt delay window,
5452 * delay the interrupt.
5453 */
5454 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5455 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5456
5457 /*
5458 * If VLANs are enabled and the packet has a VLAN tag, set
5459 * up the descriptor to encapsulate the packet for us.
5460 *
5461 * This is only valid on the last descriptor of the packet.
5462 */
5463 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5464 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5465 htole32(WTX_CMD_VLE);
5466 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5467 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5468 }
5469
5470 txs->txs_lastdesc = lasttx;
5471
5472 DPRINTF(WM_DEBUG_TX,
5473 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5474 device_xname(sc->sc_dev),
5475 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5476
5477 /* Sync the descriptors we're using. */
5478 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5479 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5480
5481 /* Give the packet to the chip. */
5482 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5483
5484 DPRINTF(WM_DEBUG_TX,
5485 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5486
5487 DPRINTF(WM_DEBUG_TX,
5488 ("%s: TX: finished transmitting packet, job %d\n",
5489 device_xname(sc->sc_dev), sc->sc_txsnext));
5490
5491 /* Advance the tx pointer. */
5492 sc->sc_txfree -= txs->txs_ndesc;
5493 sc->sc_txnext = nexttx;
5494
5495 sc->sc_txsfree--;
5496 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5497
5498 /* Pass the packet to any BPF listeners. */
5499 bpf_mtap(ifp, m0);
5500 }
5501
5502 if (m0 != NULL) {
5503 ifp->if_flags |= IFF_OACTIVE;
5504 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5505 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5506 m_freem(m0);
5507 }
5508
5509 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5510 /* No more slots; notify upper layer. */
5511 ifp->if_flags |= IFF_OACTIVE;
5512 }
5513
5514 if (sc->sc_txfree != ofree) {
5515 /* Set a watchdog timer in case the chip flakes out. */
5516 ifp->if_timer = 5;
5517 }
5518 }
5519
5520 /*
5521 * wm_nq_tx_offload:
5522 *
5523 * Set up TCP/IP checksumming parameters for the
5524 * specified packet, for NEWQUEUE devices
5525 */
5526 static int
5527 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5528 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5529 {
5530 struct mbuf *m0 = txs->txs_mbuf;
5531 struct m_tag *mtag;
5532 uint32_t vl_len, mssidx, cmdc;
5533 struct ether_header *eh;
5534 int offset, iphl;
5535
5536 /*
5537 * XXX It would be nice if the mbuf pkthdr had offset
5538 * fields for the protocol headers.
5539 */
5540 *cmdlenp = 0;
5541 *fieldsp = 0;
5542
5543 eh = mtod(m0, struct ether_header *);
5544 switch (htons(eh->ether_type)) {
5545 case ETHERTYPE_IP:
5546 case ETHERTYPE_IPV6:
5547 offset = ETHER_HDR_LEN;
5548 break;
5549
5550 case ETHERTYPE_VLAN:
5551 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5552 break;
5553
5554 default:
5555 /* Don't support this protocol or encapsulation. */
5556 *do_csum = false;
5557 return 0;
5558 }
5559 *do_csum = true;
5560 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5561 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5562
5563 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5564 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5565
5566 if ((m0->m_pkthdr.csum_flags &
5567 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5568 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5569 } else {
5570 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5571 }
5572 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5573 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5574
5575 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5576 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5577 << NQTXC_VLLEN_VLAN_SHIFT);
5578 *cmdlenp |= NQTX_CMD_VLE;
5579 }
5580
5581 mssidx = 0;
5582
5583 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5584 int hlen = offset + iphl;
5585 int tcp_hlen;
5586 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5587
5588 if (__predict_false(m0->m_len <
5589 (hlen + sizeof(struct tcphdr)))) {
5590 /*
5591 * TCP/IP headers are not in the first mbuf; we need
5592 * to do this the slow and painful way. Let's just
5593 * hope this doesn't happen very often.
5594 */
5595 struct tcphdr th;
5596
5597 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5598
5599 m_copydata(m0, hlen, sizeof(th), &th);
5600 if (v4) {
5601 struct ip ip;
5602
5603 m_copydata(m0, offset, sizeof(ip), &ip);
5604 ip.ip_len = 0;
5605 m_copyback(m0,
5606 offset + offsetof(struct ip, ip_len),
5607 sizeof(ip.ip_len), &ip.ip_len);
5608 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5609 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5610 } else {
5611 struct ip6_hdr ip6;
5612
5613 m_copydata(m0, offset, sizeof(ip6), &ip6);
5614 ip6.ip6_plen = 0;
5615 m_copyback(m0,
5616 offset + offsetof(struct ip6_hdr, ip6_plen),
5617 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5618 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5619 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5620 }
5621 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5622 sizeof(th.th_sum), &th.th_sum);
5623
5624 tcp_hlen = th.th_off << 2;
5625 } else {
5626 /*
5627 * TCP/IP headers are in the first mbuf; we can do
5628 * this the easy way.
5629 */
5630 struct tcphdr *th;
5631
5632 if (v4) {
5633 struct ip *ip =
5634 (void *)(mtod(m0, char *) + offset);
5635 th = (void *)(mtod(m0, char *) + hlen);
5636
5637 ip->ip_len = 0;
5638 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5639 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5640 } else {
5641 struct ip6_hdr *ip6 =
5642 (void *)(mtod(m0, char *) + offset);
5643 th = (void *)(mtod(m0, char *) + hlen);
5644
5645 ip6->ip6_plen = 0;
5646 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5647 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5648 }
5649 tcp_hlen = th->th_off << 2;
5650 }
5651 hlen += tcp_hlen;
5652 *cmdlenp |= NQTX_CMD_TSE;
5653
5654 if (v4) {
5655 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5656 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5657 } else {
5658 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5659 *fieldsp |= NQTXD_FIELDS_TUXSM;
5660 }
5661 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5662 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5663 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5664 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5665 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5666 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5667 } else {
5668 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5669 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5670 }
5671
5672 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5673 *fieldsp |= NQTXD_FIELDS_IXSM;
5674 cmdc |= NQTXC_CMD_IP4;
5675 }
5676
5677 if (m0->m_pkthdr.csum_flags &
5678 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5679 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5680 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5681 cmdc |= NQTXC_CMD_TCP;
5682 } else {
5683 cmdc |= NQTXC_CMD_UDP;
5684 }
5685 cmdc |= NQTXC_CMD_IP4;
5686 *fieldsp |= NQTXD_FIELDS_TUXSM;
5687 }
5688 if (m0->m_pkthdr.csum_flags &
5689 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5690 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5691 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5692 cmdc |= NQTXC_CMD_TCP;
5693 } else {
5694 cmdc |= NQTXC_CMD_UDP;
5695 }
5696 cmdc |= NQTXC_CMD_IP6;
5697 *fieldsp |= NQTXD_FIELDS_TUXSM;
5698 }
5699
5700 /* Fill in the context descriptor. */
5701 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5702 htole32(vl_len);
5703 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5704 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5705 htole32(cmdc);
5706 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5707 htole32(mssidx);
5708 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5709 DPRINTF(WM_DEBUG_TX,
5710 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5711 sc->sc_txnext, 0, vl_len));
5712 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5713 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5714 txs->txs_ndesc++;
5715 return 0;
5716 }
5717
5718 /*
5719 * wm_nq_start: [ifnet interface function]
5720 *
5721 * Start packet transmission on the interface for NEWQUEUE devices
5722 */
5723 static void
5724 wm_nq_start(struct ifnet *ifp)
5725 {
5726 struct wm_softc *sc = ifp->if_softc;
5727
5728 WM_TX_LOCK(sc);
5729 if (!sc->sc_stopping)
5730 wm_nq_start_locked(ifp);
5731 WM_TX_UNLOCK(sc);
5732 }
5733
5734 static void
5735 wm_nq_start_locked(struct ifnet *ifp)
5736 {
5737 struct wm_softc *sc = ifp->if_softc;
5738 struct mbuf *m0;
5739 struct m_tag *mtag;
5740 struct wm_txsoft *txs;
5741 bus_dmamap_t dmamap;
5742 int error, nexttx, lasttx = -1, seg, segs_needed;
5743 bool do_csum, sent;
5744
5745 KASSERT(WM_TX_LOCKED(sc));
5746
5747 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5748 return;
5749
5750 sent = false;
5751
5752 /*
5753 * Loop through the send queue, setting up transmit descriptors
5754 * until we drain the queue, or use up all available transmit
5755 * descriptors.
5756 */
5757 for (;;) {
5758 m0 = NULL;
5759
5760 /* Get a work queue entry. */
5761 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5762 wm_txeof(sc);
5763 if (sc->sc_txsfree == 0) {
5764 DPRINTF(WM_DEBUG_TX,
5765 ("%s: TX: no free job descriptors\n",
5766 device_xname(sc->sc_dev)));
5767 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5768 break;
5769 }
5770 }
5771
5772 /* Grab a packet off the queue. */
5773 IFQ_DEQUEUE(&ifp->if_snd, m0);
5774 if (m0 == NULL)
5775 break;
5776
5777 DPRINTF(WM_DEBUG_TX,
5778 ("%s: TX: have packet to transmit: %p\n",
5779 device_xname(sc->sc_dev), m0));
5780
5781 txs = &sc->sc_txsoft[sc->sc_txsnext];
5782 dmamap = txs->txs_dmamap;
5783
5784 /*
5785 * Load the DMA map. If this fails, the packet either
5786 * didn't fit in the allotted number of segments, or we
5787 * were short on resources. For the too-many-segments
5788 * case, we simply report an error and drop the packet,
5789 * since we can't sanely copy a jumbo packet to a single
5790 * buffer.
5791 */
5792 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5793 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5794 if (error) {
5795 if (error == EFBIG) {
5796 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5797 log(LOG_ERR, "%s: Tx packet consumes too many "
5798 "DMA segments, dropping...\n",
5799 device_xname(sc->sc_dev));
5800 wm_dump_mbuf_chain(sc, m0);
5801 m_freem(m0);
5802 continue;
5803 }
5804 /* Short on resources, just stop for now. */
5805 DPRINTF(WM_DEBUG_TX,
5806 ("%s: TX: dmamap load failed: %d\n",
5807 device_xname(sc->sc_dev), error));
5808 break;
5809 }
5810
5811 segs_needed = dmamap->dm_nsegs;
5812
5813 /*
5814 * Ensure we have enough descriptors free to describe
5815 * the packet. Note, we always reserve one descriptor
5816 * at the end of the ring due to the semantics of the
5817 * TDT register, plus one more in the event we need
5818 * to load offload context.
5819 */
5820 if (segs_needed > sc->sc_txfree - 2) {
5821 /*
5822 * Not enough free descriptors to transmit this
5823 * packet. We haven't committed anything yet,
5824 * so just unload the DMA map, put the packet
5825 * pack on the queue, and punt. Notify the upper
5826 * layer that there are no more slots left.
5827 */
5828 DPRINTF(WM_DEBUG_TX,
5829 ("%s: TX: need %d (%d) descriptors, have %d\n",
5830 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5831 segs_needed, sc->sc_txfree - 1));
5832 ifp->if_flags |= IFF_OACTIVE;
5833 bus_dmamap_unload(sc->sc_dmat, dmamap);
5834 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5835 break;
5836 }
5837
5838 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5839
5840 DPRINTF(WM_DEBUG_TX,
5841 ("%s: TX: packet has %d (%d) DMA segments\n",
5842 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5843
5844 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5845
5846 /*
5847 * Store a pointer to the packet so that we can free it
5848 * later.
5849 *
5850 * Initially, we consider the number of descriptors the
5851 * packet uses the number of DMA segments. This may be
5852 * incremented by 1 if we do checksum offload (a descriptor
5853 * is used to set the checksum context).
5854 */
5855 txs->txs_mbuf = m0;
5856 txs->txs_firstdesc = sc->sc_txnext;
5857 txs->txs_ndesc = segs_needed;
5858
5859 /* Set up offload parameters for this packet. */
5860 uint32_t cmdlen, fields, dcmdlen;
5861 if (m0->m_pkthdr.csum_flags &
5862 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5863 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5864 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5865 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5866 &do_csum) != 0) {
5867 /* Error message already displayed. */
5868 bus_dmamap_unload(sc->sc_dmat, dmamap);
5869 continue;
5870 }
5871 } else {
5872 do_csum = false;
5873 cmdlen = 0;
5874 fields = 0;
5875 }
5876
5877 /* Sync the DMA map. */
5878 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5879 BUS_DMASYNC_PREWRITE);
5880
5881 /* Initialize the first transmit descriptor. */
5882 nexttx = sc->sc_txnext;
5883 if (!do_csum) {
5884 /* setup a legacy descriptor */
5885 wm_set_dma_addr(
5886 &sc->sc_txdescs[nexttx].wtx_addr,
5887 dmamap->dm_segs[0].ds_addr);
5888 sc->sc_txdescs[nexttx].wtx_cmdlen =
5889 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5890 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5891 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5892 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5893 NULL) {
5894 sc->sc_txdescs[nexttx].wtx_cmdlen |=
5895 htole32(WTX_CMD_VLE);
5896 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5897 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5898 } else {
5899 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =0;
5900 }
5901 dcmdlen = 0;
5902 } else {
5903 /* setup an advanced data descriptor */
5904 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5905 htole64(dmamap->dm_segs[0].ds_addr);
5906 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5907 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5908 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5909 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5910 htole32(fields);
5911 DPRINTF(WM_DEBUG_TX,
5912 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5913 device_xname(sc->sc_dev), nexttx,
5914 (uint64_t)dmamap->dm_segs[0].ds_addr));
5915 DPRINTF(WM_DEBUG_TX,
5916 ("\t 0x%08x%08x\n", fields,
5917 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5918 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5919 }
5920
5921 lasttx = nexttx;
5922 nexttx = WM_NEXTTX(sc, nexttx);
5923 /*
5924 * fill in the next descriptors. legacy or adcanced format
5925 * is the same here
5926 */
5927 for (seg = 1; seg < dmamap->dm_nsegs;
5928 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5929 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5930 htole64(dmamap->dm_segs[seg].ds_addr);
5931 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5932 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5933 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5934 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5935 lasttx = nexttx;
5936
5937 DPRINTF(WM_DEBUG_TX,
5938 ("%s: TX: desc %d: %#" PRIx64 ", "
5939 "len %#04zx\n",
5940 device_xname(sc->sc_dev), nexttx,
5941 (uint64_t)dmamap->dm_segs[seg].ds_addr,
5942 dmamap->dm_segs[seg].ds_len));
5943 }
5944
5945 KASSERT(lasttx != -1);
5946
5947 /*
5948 * Set up the command byte on the last descriptor of
5949 * the packet. If we're in the interrupt delay window,
5950 * delay the interrupt.
5951 */
5952 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5953 (NQTX_CMD_EOP | NQTX_CMD_RS));
5954 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5955 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5956
5957 txs->txs_lastdesc = lasttx;
5958
5959 DPRINTF(WM_DEBUG_TX,
5960 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5961 device_xname(sc->sc_dev),
5962 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5963
5964 /* Sync the descriptors we're using. */
5965 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5966 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5967
5968 /* Give the packet to the chip. */
5969 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5970 sent = true;
5971
5972 DPRINTF(WM_DEBUG_TX,
5973 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5974
5975 DPRINTF(WM_DEBUG_TX,
5976 ("%s: TX: finished transmitting packet, job %d\n",
5977 device_xname(sc->sc_dev), sc->sc_txsnext));
5978
5979 /* Advance the tx pointer. */
5980 sc->sc_txfree -= txs->txs_ndesc;
5981 sc->sc_txnext = nexttx;
5982
5983 sc->sc_txsfree--;
5984 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5985
5986 /* Pass the packet to any BPF listeners. */
5987 bpf_mtap(ifp, m0);
5988 }
5989
5990 if (m0 != NULL) {
5991 ifp->if_flags |= IFF_OACTIVE;
5992 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5993 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5994 m_freem(m0);
5995 }
5996
5997 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5998 /* No more slots; notify upper layer. */
5999 ifp->if_flags |= IFF_OACTIVE;
6000 }
6001
6002 if (sent) {
6003 /* Set a watchdog timer in case the chip flakes out. */
6004 ifp->if_timer = 5;
6005 }
6006 }
6007
6008 /* Interrupt */
6009
6010 /*
6011 * wm_txeof:
6012 *
6013 * Helper; handle transmit interrupts.
6014 */
6015 static int
6016 wm_txeof(struct wm_softc *sc)
6017 {
6018 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6019 struct wm_txsoft *txs;
6020 bool processed = false;
6021 int count = 0;
6022 int i;
6023 uint8_t status;
6024
6025 if (sc->sc_stopping)
6026 return 0;
6027
6028 ifp->if_flags &= ~IFF_OACTIVE;
6029
6030 /*
6031 * Go through the Tx list and free mbufs for those
6032 * frames which have been transmitted.
6033 */
6034 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
6035 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
6036 txs = &sc->sc_txsoft[i];
6037
6038 DPRINTF(WM_DEBUG_TX,
6039 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6040
6041 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
6042 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6043
6044 status =
6045 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6046 if ((status & WTX_ST_DD) == 0) {
6047 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
6048 BUS_DMASYNC_PREREAD);
6049 break;
6050 }
6051
6052 processed = true;
6053 count++;
6054 DPRINTF(WM_DEBUG_TX,
6055 ("%s: TX: job %d done: descs %d..%d\n",
6056 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6057 txs->txs_lastdesc));
6058
6059 /*
6060 * XXX We should probably be using the statistics
6061 * XXX registers, but I don't know if they exist
6062 * XXX on chips before the i82544.
6063 */
6064
6065 #ifdef WM_EVENT_COUNTERS
6066 if (status & WTX_ST_TU)
6067 WM_EVCNT_INCR(&sc->sc_ev_tu);
6068 #endif /* WM_EVENT_COUNTERS */
6069
6070 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6071 ifp->if_oerrors++;
6072 if (status & WTX_ST_LC)
6073 log(LOG_WARNING, "%s: late collision\n",
6074 device_xname(sc->sc_dev));
6075 else if (status & WTX_ST_EC) {
6076 ifp->if_collisions += 16;
6077 log(LOG_WARNING, "%s: excessive collisions\n",
6078 device_xname(sc->sc_dev));
6079 }
6080 } else
6081 ifp->if_opackets++;
6082
6083 sc->sc_txfree += txs->txs_ndesc;
6084 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6085 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6086 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6087 m_freem(txs->txs_mbuf);
6088 txs->txs_mbuf = NULL;
6089 }
6090
6091 /* Update the dirty transmit buffer pointer. */
6092 sc->sc_txsdirty = i;
6093 DPRINTF(WM_DEBUG_TX,
6094 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6095
6096 if (count != 0)
6097 rnd_add_uint32(&sc->rnd_source, count);
6098
6099 /*
6100 * If there are no more pending transmissions, cancel the watchdog
6101 * timer.
6102 */
6103 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
6104 ifp->if_timer = 0;
6105
6106 return processed;
6107 }
6108
6109 /*
6110 * wm_rxeof:
6111 *
6112 * Helper; handle receive interrupts.
6113 */
6114 static void
6115 wm_rxeof(struct wm_softc *sc)
6116 {
6117 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6118 struct wm_rxsoft *rxs;
6119 struct mbuf *m;
6120 int i, len;
6121 int count = 0;
6122 uint8_t status, errors;
6123 uint16_t vlantag;
6124
6125 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
6126 rxs = &sc->sc_rxsoft[i];
6127
6128 DPRINTF(WM_DEBUG_RX,
6129 ("%s: RX: checking descriptor %d\n",
6130 device_xname(sc->sc_dev), i));
6131
6132 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6133
6134 status = sc->sc_rxdescs[i].wrx_status;
6135 errors = sc->sc_rxdescs[i].wrx_errors;
6136 len = le16toh(sc->sc_rxdescs[i].wrx_len);
6137 vlantag = sc->sc_rxdescs[i].wrx_special;
6138
6139 if ((status & WRX_ST_DD) == 0) {
6140 /* We have processed all of the receive descriptors. */
6141 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
6142 break;
6143 }
6144
6145 count++;
6146 if (__predict_false(sc->sc_rxdiscard)) {
6147 DPRINTF(WM_DEBUG_RX,
6148 ("%s: RX: discarding contents of descriptor %d\n",
6149 device_xname(sc->sc_dev), i));
6150 WM_INIT_RXDESC(sc, i);
6151 if (status & WRX_ST_EOP) {
6152 /* Reset our state. */
6153 DPRINTF(WM_DEBUG_RX,
6154 ("%s: RX: resetting rxdiscard -> 0\n",
6155 device_xname(sc->sc_dev)));
6156 sc->sc_rxdiscard = 0;
6157 }
6158 continue;
6159 }
6160
6161 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6162 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6163
6164 m = rxs->rxs_mbuf;
6165
6166 /*
6167 * Add a new receive buffer to the ring, unless of
6168 * course the length is zero. Treat the latter as a
6169 * failed mapping.
6170 */
6171 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
6172 /*
6173 * Failed, throw away what we've done so
6174 * far, and discard the rest of the packet.
6175 */
6176 ifp->if_ierrors++;
6177 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6178 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6179 WM_INIT_RXDESC(sc, i);
6180 if ((status & WRX_ST_EOP) == 0)
6181 sc->sc_rxdiscard = 1;
6182 if (sc->sc_rxhead != NULL)
6183 m_freem(sc->sc_rxhead);
6184 WM_RXCHAIN_RESET(sc);
6185 DPRINTF(WM_DEBUG_RX,
6186 ("%s: RX: Rx buffer allocation failed, "
6187 "dropping packet%s\n", device_xname(sc->sc_dev),
6188 sc->sc_rxdiscard ? " (discard)" : ""));
6189 continue;
6190 }
6191
6192 m->m_len = len;
6193 sc->sc_rxlen += len;
6194 DPRINTF(WM_DEBUG_RX,
6195 ("%s: RX: buffer at %p len %d\n",
6196 device_xname(sc->sc_dev), m->m_data, len));
6197
6198 /* If this is not the end of the packet, keep looking. */
6199 if ((status & WRX_ST_EOP) == 0) {
6200 WM_RXCHAIN_LINK(sc, m);
6201 DPRINTF(WM_DEBUG_RX,
6202 ("%s: RX: not yet EOP, rxlen -> %d\n",
6203 device_xname(sc->sc_dev), sc->sc_rxlen));
6204 continue;
6205 }
6206
6207 /*
6208 * Okay, we have the entire packet now. The chip is
6209 * configured to include the FCS except I350 and I21[01]
6210 * (not all chips can be configured to strip it),
6211 * so we need to trim it.
6212 * May need to adjust length of previous mbuf in the
6213 * chain if the current mbuf is too short.
6214 * For an eratta, the RCTL_SECRC bit in RCTL register
6215 * is always set in I350, so we don't trim it.
6216 */
6217 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6218 && (sc->sc_type != WM_T_I210)
6219 && (sc->sc_type != WM_T_I211)) {
6220 if (m->m_len < ETHER_CRC_LEN) {
6221 sc->sc_rxtail->m_len
6222 -= (ETHER_CRC_LEN - m->m_len);
6223 m->m_len = 0;
6224 } else
6225 m->m_len -= ETHER_CRC_LEN;
6226 len = sc->sc_rxlen - ETHER_CRC_LEN;
6227 } else
6228 len = sc->sc_rxlen;
6229
6230 WM_RXCHAIN_LINK(sc, m);
6231
6232 *sc->sc_rxtailp = NULL;
6233 m = sc->sc_rxhead;
6234
6235 WM_RXCHAIN_RESET(sc);
6236
6237 DPRINTF(WM_DEBUG_RX,
6238 ("%s: RX: have entire packet, len -> %d\n",
6239 device_xname(sc->sc_dev), len));
6240
6241 /* If an error occurred, update stats and drop the packet. */
6242 if (errors &
6243 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
6244 if (errors & WRX_ER_SE)
6245 log(LOG_WARNING, "%s: symbol error\n",
6246 device_xname(sc->sc_dev));
6247 else if (errors & WRX_ER_SEQ)
6248 log(LOG_WARNING, "%s: receive sequence error\n",
6249 device_xname(sc->sc_dev));
6250 else if (errors & WRX_ER_CE)
6251 log(LOG_WARNING, "%s: CRC error\n",
6252 device_xname(sc->sc_dev));
6253 m_freem(m);
6254 continue;
6255 }
6256
6257 /* No errors. Receive the packet. */
6258 m->m_pkthdr.rcvif = ifp;
6259 m->m_pkthdr.len = len;
6260
6261 /*
6262 * If VLANs are enabled, VLAN packets have been unwrapped
6263 * for us. Associate the tag with the packet.
6264 */
6265 /* XXXX should check for i350 and i354 */
6266 if ((status & WRX_ST_VP) != 0) {
6267 VLAN_INPUT_TAG(ifp, m,
6268 le16toh(vlantag),
6269 continue);
6270 }
6271
6272 /* Set up checksum info for this packet. */
6273 if ((status & WRX_ST_IXSM) == 0) {
6274 if (status & WRX_ST_IPCS) {
6275 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
6276 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6277 if (errors & WRX_ER_IPE)
6278 m->m_pkthdr.csum_flags |=
6279 M_CSUM_IPv4_BAD;
6280 }
6281 if (status & WRX_ST_TCPCS) {
6282 /*
6283 * Note: we don't know if this was TCP or UDP,
6284 * so we just set both bits, and expect the
6285 * upper layers to deal.
6286 */
6287 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
6288 m->m_pkthdr.csum_flags |=
6289 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6290 M_CSUM_TCPv6 | M_CSUM_UDPv6;
6291 if (errors & WRX_ER_TCPE)
6292 m->m_pkthdr.csum_flags |=
6293 M_CSUM_TCP_UDP_BAD;
6294 }
6295 }
6296
6297 ifp->if_ipackets++;
6298
6299 WM_RX_UNLOCK(sc);
6300
6301 /* Pass this up to any BPF listeners. */
6302 bpf_mtap(ifp, m);
6303
6304 /* Pass it on. */
6305 (*ifp->if_input)(ifp, m);
6306
6307 WM_RX_LOCK(sc);
6308
6309 if (sc->sc_stopping)
6310 break;
6311 }
6312
6313 /* Update the receive pointer. */
6314 sc->sc_rxptr = i;
6315 if (count != 0)
6316 rnd_add_uint32(&sc->rnd_source, count);
6317
6318 DPRINTF(WM_DEBUG_RX,
6319 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
6320 }
6321
6322 /*
6323 * wm_linkintr_gmii:
6324 *
6325 * Helper; handle link interrupts for GMII.
6326 */
6327 static void
6328 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
6329 {
6330
6331 KASSERT(WM_TX_LOCKED(sc));
6332
6333 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6334 __func__));
6335
6336 if (icr & ICR_LSC) {
6337 DPRINTF(WM_DEBUG_LINK,
6338 ("%s: LINK: LSC -> mii_pollstat\n",
6339 device_xname(sc->sc_dev)));
6340 mii_pollstat(&sc->sc_mii);
6341 if (sc->sc_type == WM_T_82543) {
6342 int miistatus, active;
6343
6344 /*
6345 * With 82543, we need to force speed and
6346 * duplex on the MAC equal to what the PHY
6347 * speed and duplex configuration is.
6348 */
6349 miistatus = sc->sc_mii.mii_media_status;
6350
6351 if (miistatus & IFM_ACTIVE) {
6352 active = sc->sc_mii.mii_media_active;
6353 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6354 switch (IFM_SUBTYPE(active)) {
6355 case IFM_10_T:
6356 sc->sc_ctrl |= CTRL_SPEED_10;
6357 break;
6358 case IFM_100_TX:
6359 sc->sc_ctrl |= CTRL_SPEED_100;
6360 break;
6361 case IFM_1000_T:
6362 sc->sc_ctrl |= CTRL_SPEED_1000;
6363 break;
6364 default:
6365 /*
6366 * fiber?
6367 * Shoud not enter here.
6368 */
6369 printf("unknown media (%x)\n",
6370 active);
6371 break;
6372 }
6373 if (active & IFM_FDX)
6374 sc->sc_ctrl |= CTRL_FD;
6375 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6376 }
6377 } else if ((sc->sc_type == WM_T_ICH8)
6378 && (sc->sc_phytype == WMPHY_IGP_3)) {
6379 wm_kmrn_lock_loss_workaround_ich8lan(sc);
6380 } else if (sc->sc_type == WM_T_PCH) {
6381 wm_k1_gig_workaround_hv(sc,
6382 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
6383 }
6384
6385 if ((sc->sc_phytype == WMPHY_82578)
6386 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
6387 == IFM_1000_T)) {
6388
6389 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
6390 delay(200*1000); /* XXX too big */
6391
6392 /* Link stall fix for link up */
6393 wm_gmii_hv_writereg(sc->sc_dev, 1,
6394 HV_MUX_DATA_CTRL,
6395 HV_MUX_DATA_CTRL_GEN_TO_MAC
6396 | HV_MUX_DATA_CTRL_FORCE_SPEED);
6397 wm_gmii_hv_writereg(sc->sc_dev, 1,
6398 HV_MUX_DATA_CTRL,
6399 HV_MUX_DATA_CTRL_GEN_TO_MAC);
6400 }
6401 }
6402 } else if (icr & ICR_RXSEQ) {
6403 DPRINTF(WM_DEBUG_LINK,
6404 ("%s: LINK Receive sequence error\n",
6405 device_xname(sc->sc_dev)));
6406 }
6407 }
6408
6409 /*
6410 * wm_linkintr_tbi:
6411 *
6412 * Helper; handle link interrupts for TBI mode.
6413 */
6414 static void
6415 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6416 {
6417 uint32_t status;
6418
6419 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6420 __func__));
6421
6422 status = CSR_READ(sc, WMREG_STATUS);
6423 if (icr & ICR_LSC) {
6424 if (status & STATUS_LU) {
6425 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6426 device_xname(sc->sc_dev),
6427 (status & STATUS_FD) ? "FDX" : "HDX"));
6428 /*
6429 * NOTE: CTRL will update TFCE and RFCE automatically,
6430 * so we should update sc->sc_ctrl
6431 */
6432
6433 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6434 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6435 sc->sc_fcrtl &= ~FCRTL_XONE;
6436 if (status & STATUS_FD)
6437 sc->sc_tctl |=
6438 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6439 else
6440 sc->sc_tctl |=
6441 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6442 if (sc->sc_ctrl & CTRL_TFCE)
6443 sc->sc_fcrtl |= FCRTL_XONE;
6444 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6445 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6446 WMREG_OLD_FCRTL : WMREG_FCRTL,
6447 sc->sc_fcrtl);
6448 sc->sc_tbi_linkup = 1;
6449 } else {
6450 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6451 device_xname(sc->sc_dev)));
6452 sc->sc_tbi_linkup = 0;
6453 }
6454 /* Update LED */
6455 wm_tbi_serdes_set_linkled(sc);
6456 } else if (icr & ICR_RXSEQ) {
6457 DPRINTF(WM_DEBUG_LINK,
6458 ("%s: LINK: Receive sequence error\n",
6459 device_xname(sc->sc_dev)));
6460 }
6461 }
6462
6463 /*
6464 * wm_linkintr_serdes:
6465 *
6466 * Helper; handle link interrupts for TBI mode.
6467 */
6468 static void
6469 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6470 {
6471 struct mii_data *mii = &sc->sc_mii;
6472 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6473 uint32_t pcs_adv, pcs_lpab, reg;
6474
6475 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6476 __func__));
6477
6478 if (icr & ICR_LSC) {
6479 /* Check PCS */
6480 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6481 if ((reg & PCS_LSTS_LINKOK) != 0) {
6482 mii->mii_media_status |= IFM_ACTIVE;
6483 sc->sc_tbi_linkup = 1;
6484 } else {
6485 mii->mii_media_status |= IFM_NONE;
6486 sc->sc_tbi_linkup = 0;
6487 wm_tbi_serdes_set_linkled(sc);
6488 return;
6489 }
6490 mii->mii_media_active |= IFM_1000_SX;
6491 if ((reg & PCS_LSTS_FDX) != 0)
6492 mii->mii_media_active |= IFM_FDX;
6493 else
6494 mii->mii_media_active |= IFM_HDX;
6495 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6496 /* Check flow */
6497 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6498 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6499 DPRINTF(WM_DEBUG_LINK,
6500 ("XXX LINKOK but not ACOMP\n"));
6501 return;
6502 }
6503 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6504 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6505 DPRINTF(WM_DEBUG_LINK,
6506 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6507 if ((pcs_adv & TXCW_SYM_PAUSE)
6508 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6509 mii->mii_media_active |= IFM_FLOW
6510 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6511 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6512 && (pcs_adv & TXCW_ASYM_PAUSE)
6513 && (pcs_lpab & TXCW_SYM_PAUSE)
6514 && (pcs_lpab & TXCW_ASYM_PAUSE))
6515 mii->mii_media_active |= IFM_FLOW
6516 | IFM_ETH_TXPAUSE;
6517 else if ((pcs_adv & TXCW_SYM_PAUSE)
6518 && (pcs_adv & TXCW_ASYM_PAUSE)
6519 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6520 && (pcs_lpab & TXCW_ASYM_PAUSE))
6521 mii->mii_media_active |= IFM_FLOW
6522 | IFM_ETH_RXPAUSE;
6523 }
6524 /* Update LED */
6525 wm_tbi_serdes_set_linkled(sc);
6526 } else {
6527 DPRINTF(WM_DEBUG_LINK,
6528 ("%s: LINK: Receive sequence error\n",
6529 device_xname(sc->sc_dev)));
6530 }
6531 }
6532
6533 /*
6534 * wm_linkintr:
6535 *
6536 * Helper; handle link interrupts.
6537 */
6538 static void
6539 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6540 {
6541
6542 if (sc->sc_flags & WM_F_HAS_MII)
6543 wm_linkintr_gmii(sc, icr);
6544 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6545 && (sc->sc_type >= WM_T_82575))
6546 wm_linkintr_serdes(sc, icr);
6547 else
6548 wm_linkintr_tbi(sc, icr);
6549 }
6550
6551 /*
6552 * wm_intr_legacy:
6553 *
6554 * Interrupt service routine for INTx and MSI.
6555 */
6556 static int
6557 wm_intr_legacy(void *arg)
6558 {
6559 struct wm_softc *sc = arg;
6560 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6561 uint32_t icr, rndval = 0;
6562 int handled = 0;
6563
6564 DPRINTF(WM_DEBUG_TX,
6565 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
6566 while (1 /* CONSTCOND */) {
6567 icr = CSR_READ(sc, WMREG_ICR);
6568 if ((icr & sc->sc_icr) == 0)
6569 break;
6570 if (rndval == 0)
6571 rndval = icr;
6572
6573 WM_RX_LOCK(sc);
6574
6575 if (sc->sc_stopping) {
6576 WM_RX_UNLOCK(sc);
6577 break;
6578 }
6579
6580 handled = 1;
6581
6582 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6583 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6584 DPRINTF(WM_DEBUG_RX,
6585 ("%s: RX: got Rx intr 0x%08x\n",
6586 device_xname(sc->sc_dev),
6587 icr & (ICR_RXDMT0|ICR_RXT0)));
6588 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6589 }
6590 #endif
6591 wm_rxeof(sc);
6592
6593 WM_RX_UNLOCK(sc);
6594 WM_TX_LOCK(sc);
6595
6596 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6597 if (icr & ICR_TXDW) {
6598 DPRINTF(WM_DEBUG_TX,
6599 ("%s: TX: got TXDW interrupt\n",
6600 device_xname(sc->sc_dev)));
6601 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6602 }
6603 #endif
6604 wm_txeof(sc);
6605
6606 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6607 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6608 wm_linkintr(sc, icr);
6609 }
6610
6611 WM_TX_UNLOCK(sc);
6612
6613 if (icr & ICR_RXO) {
6614 #if defined(WM_DEBUG)
6615 log(LOG_WARNING, "%s: Receive overrun\n",
6616 device_xname(sc->sc_dev));
6617 #endif /* defined(WM_DEBUG) */
6618 }
6619 }
6620
6621 rnd_add_uint32(&sc->rnd_source, rndval);
6622
6623 if (handled) {
6624 /* Try to get more packets going. */
6625 ifp->if_start(ifp);
6626 }
6627
6628 return handled;
6629 }
6630
6631 #ifdef WM_MSI_MSIX
6632 /*
6633 * wm_txintr_msix:
6634 *
6635 * Interrupt service routine for TX complete interrupt for MSI-X.
6636 */
6637 static int
6638 wm_txintr_msix(void *arg)
6639 {
6640 struct wm_softc *sc = arg;
6641 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6642 int handled = 0;
6643
6644 DPRINTF(WM_DEBUG_TX,
6645 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
6646
6647 if (sc->sc_type == WM_T_82574)
6648 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ0); /* 82574 only */
6649 else if (sc->sc_type == WM_T_82575)
6650 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE0);
6651 else
6652 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_TX_INTR_INDEX);
6653
6654 WM_TX_LOCK(sc);
6655
6656 if (sc->sc_stopping)
6657 goto out;
6658
6659 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6660 handled = wm_txeof(sc);
6661
6662 out:
6663 WM_TX_UNLOCK(sc);
6664
6665 if (sc->sc_type == WM_T_82574)
6666 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ0); /* 82574 only */
6667 else if (sc->sc_type == WM_T_82575)
6668 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE0);
6669 else
6670 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_TX_INTR_INDEX);
6671
6672 if (handled) {
6673 /* Try to get more packets going. */
6674 ifp->if_start(ifp);
6675 }
6676
6677 return handled;
6678 }
6679
6680 /*
6681 * wm_rxintr_msix:
6682 *
6683 * Interrupt service routine for RX interrupt for MSI-X.
6684 */
6685 static int
6686 wm_rxintr_msix(void *arg)
6687 {
6688 struct wm_softc *sc = arg;
6689
6690 DPRINTF(WM_DEBUG_TX,
6691 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
6692
6693 if (sc->sc_type == WM_T_82574)
6694 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ0); /* 82574 only */
6695 else if (sc->sc_type == WM_T_82575)
6696 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE0);
6697 else
6698 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_RX_INTR_INDEX);
6699
6700 WM_RX_LOCK(sc);
6701
6702 if (sc->sc_stopping)
6703 goto out;
6704
6705 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6706 wm_rxeof(sc);
6707
6708 out:
6709 WM_RX_UNLOCK(sc);
6710
6711 if (sc->sc_type == WM_T_82574)
6712 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ0);
6713 else if (sc->sc_type == WM_T_82575)
6714 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE0);
6715 else
6716 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_RX_INTR_INDEX);
6717
6718 return 1;
6719 }
6720
6721 /*
6722 * wm_linkintr_msix:
6723 *
6724 * Interrupt service routine for link status change for MSI-X.
6725 */
6726 static int
6727 wm_linkintr_msix(void *arg)
6728 {
6729 struct wm_softc *sc = arg;
6730
6731 DPRINTF(WM_DEBUG_TX,
6732 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
6733
6734 if (sc->sc_type == WM_T_82574)
6735 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); /* 82574 only */
6736 else if (sc->sc_type == WM_T_82575)
6737 CSR_WRITE(sc, WMREG_EIMC, EITR_OTHER);
6738 else
6739 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_LINK_INTR_INDEX);
6740 WM_TX_LOCK(sc);
6741 if (sc->sc_stopping)
6742 goto out;
6743
6744 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6745 wm_linkintr(sc, ICR_LSC);
6746
6747 out:
6748 WM_TX_UNLOCK(sc);
6749
6750 if (sc->sc_type == WM_T_82574)
6751 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
6752 else if (sc->sc_type == WM_T_82575)
6753 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
6754 else
6755 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_LINK_INTR_INDEX);
6756
6757 return 1;
6758 }
6759 #endif /* WM_MSI_MSIX */
6760
6761 /*
6762 * Media related.
6763 * GMII, SGMII, TBI (and SERDES)
6764 */
6765
6766 /* Common */
6767
6768 /*
6769 * wm_tbi_serdes_set_linkled:
6770 *
6771 * Update the link LED on TBI and SERDES devices.
6772 */
6773 static void
6774 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
6775 {
6776
6777 if (sc->sc_tbi_linkup)
6778 sc->sc_ctrl |= CTRL_SWDPIN(0);
6779 else
6780 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6781
6782 /* 82540 or newer devices are active low */
6783 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6784
6785 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6786 }
6787
6788 /* GMII related */
6789
6790 /*
6791 * wm_gmii_reset:
6792 *
6793 * Reset the PHY.
6794 */
6795 static void
6796 wm_gmii_reset(struct wm_softc *sc)
6797 {
6798 uint32_t reg;
6799 int rv;
6800
6801 /* get phy semaphore */
6802 switch (sc->sc_type) {
6803 case WM_T_82571:
6804 case WM_T_82572:
6805 case WM_T_82573:
6806 case WM_T_82574:
6807 case WM_T_82583:
6808 /* XXX should get sw semaphore, too */
6809 rv = wm_get_swsm_semaphore(sc);
6810 break;
6811 case WM_T_82575:
6812 case WM_T_82576:
6813 case WM_T_82580:
6814 case WM_T_I350:
6815 case WM_T_I354:
6816 case WM_T_I210:
6817 case WM_T_I211:
6818 case WM_T_80003:
6819 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6820 break;
6821 case WM_T_ICH8:
6822 case WM_T_ICH9:
6823 case WM_T_ICH10:
6824 case WM_T_PCH:
6825 case WM_T_PCH2:
6826 case WM_T_PCH_LPT:
6827 rv = wm_get_swfwhw_semaphore(sc);
6828 break;
6829 default:
6830 /* nothing to do*/
6831 rv = 0;
6832 break;
6833 }
6834 if (rv != 0) {
6835 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6836 __func__);
6837 return;
6838 }
6839
6840 switch (sc->sc_type) {
6841 case WM_T_82542_2_0:
6842 case WM_T_82542_2_1:
6843 /* null */
6844 break;
6845 case WM_T_82543:
6846 /*
6847 * With 82543, we need to force speed and duplex on the MAC
6848 * equal to what the PHY speed and duplex configuration is.
6849 * In addition, we need to perform a hardware reset on the PHY
6850 * to take it out of reset.
6851 */
6852 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6853 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6854
6855 /* The PHY reset pin is active-low. */
6856 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6857 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6858 CTRL_EXT_SWDPIN(4));
6859 reg |= CTRL_EXT_SWDPIO(4);
6860
6861 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6862 CSR_WRITE_FLUSH(sc);
6863 delay(10*1000);
6864
6865 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6866 CSR_WRITE_FLUSH(sc);
6867 delay(150);
6868 #if 0
6869 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6870 #endif
6871 delay(20*1000); /* XXX extra delay to get PHY ID? */
6872 break;
6873 case WM_T_82544: /* reset 10000us */
6874 case WM_T_82540:
6875 case WM_T_82545:
6876 case WM_T_82545_3:
6877 case WM_T_82546:
6878 case WM_T_82546_3:
6879 case WM_T_82541:
6880 case WM_T_82541_2:
6881 case WM_T_82547:
6882 case WM_T_82547_2:
6883 case WM_T_82571: /* reset 100us */
6884 case WM_T_82572:
6885 case WM_T_82573:
6886 case WM_T_82574:
6887 case WM_T_82575:
6888 case WM_T_82576:
6889 case WM_T_82580:
6890 case WM_T_I350:
6891 case WM_T_I354:
6892 case WM_T_I210:
6893 case WM_T_I211:
6894 case WM_T_82583:
6895 case WM_T_80003:
6896 /* generic reset */
6897 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6898 CSR_WRITE_FLUSH(sc);
6899 delay(20000);
6900 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6901 CSR_WRITE_FLUSH(sc);
6902 delay(20000);
6903
6904 if ((sc->sc_type == WM_T_82541)
6905 || (sc->sc_type == WM_T_82541_2)
6906 || (sc->sc_type == WM_T_82547)
6907 || (sc->sc_type == WM_T_82547_2)) {
6908 /* workaround for igp are done in igp_reset() */
6909 /* XXX add code to set LED after phy reset */
6910 }
6911 break;
6912 case WM_T_ICH8:
6913 case WM_T_ICH9:
6914 case WM_T_ICH10:
6915 case WM_T_PCH:
6916 case WM_T_PCH2:
6917 case WM_T_PCH_LPT:
6918 /* generic reset */
6919 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6920 CSR_WRITE_FLUSH(sc);
6921 delay(100);
6922 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6923 CSR_WRITE_FLUSH(sc);
6924 delay(150);
6925 break;
6926 default:
6927 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6928 __func__);
6929 break;
6930 }
6931
6932 /* release PHY semaphore */
6933 switch (sc->sc_type) {
6934 case WM_T_82571:
6935 case WM_T_82572:
6936 case WM_T_82573:
6937 case WM_T_82574:
6938 case WM_T_82583:
6939 /* XXX should put sw semaphore, too */
6940 wm_put_swsm_semaphore(sc);
6941 break;
6942 case WM_T_82575:
6943 case WM_T_82576:
6944 case WM_T_82580:
6945 case WM_T_I350:
6946 case WM_T_I354:
6947 case WM_T_I210:
6948 case WM_T_I211:
6949 case WM_T_80003:
6950 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6951 break;
6952 case WM_T_ICH8:
6953 case WM_T_ICH9:
6954 case WM_T_ICH10:
6955 case WM_T_PCH:
6956 case WM_T_PCH2:
6957 case WM_T_PCH_LPT:
6958 wm_put_swfwhw_semaphore(sc);
6959 break;
6960 default:
6961 /* nothing to do*/
6962 rv = 0;
6963 break;
6964 }
6965
6966 /* get_cfg_done */
6967 wm_get_cfg_done(sc);
6968
6969 /* extra setup */
6970 switch (sc->sc_type) {
6971 case WM_T_82542_2_0:
6972 case WM_T_82542_2_1:
6973 case WM_T_82543:
6974 case WM_T_82544:
6975 case WM_T_82540:
6976 case WM_T_82545:
6977 case WM_T_82545_3:
6978 case WM_T_82546:
6979 case WM_T_82546_3:
6980 case WM_T_82541_2:
6981 case WM_T_82547_2:
6982 case WM_T_82571:
6983 case WM_T_82572:
6984 case WM_T_82573:
6985 case WM_T_82574:
6986 case WM_T_82575:
6987 case WM_T_82576:
6988 case WM_T_82580:
6989 case WM_T_I350:
6990 case WM_T_I354:
6991 case WM_T_I210:
6992 case WM_T_I211:
6993 case WM_T_82583:
6994 case WM_T_80003:
6995 /* null */
6996 break;
6997 case WM_T_82541:
6998 case WM_T_82547:
6999 /* XXX Configure actively LED after PHY reset */
7000 break;
7001 case WM_T_ICH8:
7002 case WM_T_ICH9:
7003 case WM_T_ICH10:
7004 case WM_T_PCH:
7005 case WM_T_PCH2:
7006 case WM_T_PCH_LPT:
7007 /* Allow time for h/w to get to a quiescent state afer reset */
7008 delay(10*1000);
7009
7010 if (sc->sc_type == WM_T_PCH)
7011 wm_hv_phy_workaround_ich8lan(sc);
7012
7013 if (sc->sc_type == WM_T_PCH2)
7014 wm_lv_phy_workaround_ich8lan(sc);
7015
7016 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7017 /*
7018 * dummy read to clear the phy wakeup bit after lcd
7019 * reset
7020 */
7021 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7022 }
7023
7024 /*
7025 * XXX Configure the LCD with th extended configuration region
7026 * in NVM
7027 */
7028
7029 /* Configure the LCD with the OEM bits in NVM */
7030 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
7031 || (sc->sc_type == WM_T_PCH_LPT)) {
7032 /*
7033 * Disable LPLU.
7034 * XXX It seems that 82567 has LPLU, too.
7035 */
7036 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
7037 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
7038 reg |= HV_OEM_BITS_ANEGNOW;
7039 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
7040 }
7041 break;
7042 default:
7043 panic("%s: unknown type\n", __func__);
7044 break;
7045 }
7046 }
7047
7048 /*
7049 * wm_get_phy_id_82575:
7050 *
7051 * Return PHY ID. Return -1 if it failed.
7052 */
7053 static int
7054 wm_get_phy_id_82575(struct wm_softc *sc)
7055 {
7056 uint32_t reg;
7057 int phyid = -1;
7058
7059 /* XXX */
7060 if ((sc->sc_flags & WM_F_SGMII) == 0)
7061 return -1;
7062
7063 if (wm_sgmii_uses_mdio(sc)) {
7064 switch (sc->sc_type) {
7065 case WM_T_82575:
7066 case WM_T_82576:
7067 reg = CSR_READ(sc, WMREG_MDIC);
7068 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7069 break;
7070 case WM_T_82580:
7071 case WM_T_I350:
7072 case WM_T_I354:
7073 case WM_T_I210:
7074 case WM_T_I211:
7075 reg = CSR_READ(sc, WMREG_MDICNFG);
7076 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7077 break;
7078 default:
7079 return -1;
7080 }
7081 }
7082
7083 return phyid;
7084 }
7085
7086
7087 /*
7088 * wm_gmii_mediainit:
7089 *
7090 * Initialize media for use on 1000BASE-T devices.
7091 */
7092 static void
7093 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7094 {
7095 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7096 struct mii_data *mii = &sc->sc_mii;
7097 uint32_t reg;
7098
7099 /* We have GMII. */
7100 sc->sc_flags |= WM_F_HAS_MII;
7101
7102 if (sc->sc_type == WM_T_80003)
7103 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7104 else
7105 sc->sc_tipg = TIPG_1000T_DFLT;
7106
7107 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7108 if ((sc->sc_type == WM_T_82580)
7109 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7110 || (sc->sc_type == WM_T_I211)) {
7111 reg = CSR_READ(sc, WMREG_PHPM);
7112 reg &= ~PHPM_GO_LINK_D;
7113 CSR_WRITE(sc, WMREG_PHPM, reg);
7114 }
7115
7116 /*
7117 * Let the chip set speed/duplex on its own based on
7118 * signals from the PHY.
7119 * XXXbouyer - I'm not sure this is right for the 80003,
7120 * the em driver only sets CTRL_SLU here - but it seems to work.
7121 */
7122 sc->sc_ctrl |= CTRL_SLU;
7123 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7124
7125 /* Initialize our media structures and probe the GMII. */
7126 mii->mii_ifp = ifp;
7127
7128 /*
7129 * Determine the PHY access method.
7130 *
7131 * For SGMII, use SGMII specific method.
7132 *
7133 * For some devices, we can determine the PHY access method
7134 * from sc_type.
7135 *
7136 * For ICH and PCH variants, it's difficult to determine the PHY
7137 * access method by sc_type, so use the PCI product ID for some
7138 * devices.
7139 * For other ICH8 variants, try to use igp's method. If the PHY
7140 * can't detect, then use bm's method.
7141 */
7142 switch (prodid) {
7143 case PCI_PRODUCT_INTEL_PCH_M_LM:
7144 case PCI_PRODUCT_INTEL_PCH_M_LC:
7145 /* 82577 */
7146 sc->sc_phytype = WMPHY_82577;
7147 break;
7148 case PCI_PRODUCT_INTEL_PCH_D_DM:
7149 case PCI_PRODUCT_INTEL_PCH_D_DC:
7150 /* 82578 */
7151 sc->sc_phytype = WMPHY_82578;
7152 break;
7153 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7154 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7155 /* 82579 */
7156 sc->sc_phytype = WMPHY_82579;
7157 break;
7158 case PCI_PRODUCT_INTEL_82801I_BM:
7159 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7160 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7161 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7162 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7163 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7164 /* 82567 */
7165 sc->sc_phytype = WMPHY_BM;
7166 mii->mii_readreg = wm_gmii_bm_readreg;
7167 mii->mii_writereg = wm_gmii_bm_writereg;
7168 break;
7169 default:
7170 if (((sc->sc_flags & WM_F_SGMII) != 0)
7171 && !wm_sgmii_uses_mdio(sc)){
7172 /* SGMII */
7173 mii->mii_readreg = wm_sgmii_readreg;
7174 mii->mii_writereg = wm_sgmii_writereg;
7175 } else if (sc->sc_type >= WM_T_80003) {
7176 /* 80003 */
7177 mii->mii_readreg = wm_gmii_i80003_readreg;
7178 mii->mii_writereg = wm_gmii_i80003_writereg;
7179 } else if (sc->sc_type >= WM_T_I210) {
7180 /* I210 and I211 */
7181 mii->mii_readreg = wm_gmii_gs40g_readreg;
7182 mii->mii_writereg = wm_gmii_gs40g_writereg;
7183 } else if (sc->sc_type >= WM_T_82580) {
7184 /* 82580, I350 and I354 */
7185 sc->sc_phytype = WMPHY_82580;
7186 mii->mii_readreg = wm_gmii_82580_readreg;
7187 mii->mii_writereg = wm_gmii_82580_writereg;
7188 } else if (sc->sc_type >= WM_T_82544) {
7189 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
7190 mii->mii_readreg = wm_gmii_i82544_readreg;
7191 mii->mii_writereg = wm_gmii_i82544_writereg;
7192 } else {
7193 mii->mii_readreg = wm_gmii_i82543_readreg;
7194 mii->mii_writereg = wm_gmii_i82543_writereg;
7195 }
7196 break;
7197 }
7198 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7199 /* All PCH* use _hv_ */
7200 mii->mii_readreg = wm_gmii_hv_readreg;
7201 mii->mii_writereg = wm_gmii_hv_writereg;
7202 }
7203 mii->mii_statchg = wm_gmii_statchg;
7204
7205 wm_gmii_reset(sc);
7206
7207 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7208 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7209 wm_gmii_mediastatus);
7210
7211 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7212 || (sc->sc_type == WM_T_82580)
7213 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7214 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7215 if ((sc->sc_flags & WM_F_SGMII) == 0) {
7216 /* Attach only one port */
7217 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7218 MII_OFFSET_ANY, MIIF_DOPAUSE);
7219 } else {
7220 int i, id;
7221 uint32_t ctrl_ext;
7222
7223 id = wm_get_phy_id_82575(sc);
7224 if (id != -1) {
7225 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
7226 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
7227 }
7228 if ((id == -1)
7229 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
7230 /* Power on sgmii phy if it is disabled */
7231 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7232 CSR_WRITE(sc, WMREG_CTRL_EXT,
7233 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
7234 CSR_WRITE_FLUSH(sc);
7235 delay(300*1000); /* XXX too long */
7236
7237 /* from 1 to 8 */
7238 for (i = 1; i < 8; i++)
7239 mii_attach(sc->sc_dev, &sc->sc_mii,
7240 0xffffffff, i, MII_OFFSET_ANY,
7241 MIIF_DOPAUSE);
7242
7243 /* restore previous sfp cage power state */
7244 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7245 }
7246 }
7247 } else {
7248 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7249 MII_OFFSET_ANY, MIIF_DOPAUSE);
7250 }
7251
7252 /*
7253 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
7254 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
7255 */
7256 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
7257 (LIST_FIRST(&mii->mii_phys) == NULL)) {
7258 wm_set_mdio_slow_mode_hv(sc);
7259 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7260 MII_OFFSET_ANY, MIIF_DOPAUSE);
7261 }
7262
7263 /*
7264 * (For ICH8 variants)
7265 * If PHY detection failed, use BM's r/w function and retry.
7266 */
7267 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7268 /* if failed, retry with *_bm_* */
7269 mii->mii_readreg = wm_gmii_bm_readreg;
7270 mii->mii_writereg = wm_gmii_bm_writereg;
7271
7272 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7273 MII_OFFSET_ANY, MIIF_DOPAUSE);
7274 }
7275
7276 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7277 /* Any PHY wasn't find */
7278 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
7279 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
7280 sc->sc_phytype = WMPHY_NONE;
7281 } else {
7282 /*
7283 * PHY Found!
7284 * Check PHY type.
7285 */
7286 uint32_t model;
7287 struct mii_softc *child;
7288
7289 child = LIST_FIRST(&mii->mii_phys);
7290 if (device_is_a(child->mii_dev, "igphy")) {
7291 struct igphy_softc *isc = (struct igphy_softc *)child;
7292
7293 model = isc->sc_mii.mii_mpd_model;
7294 if (model == MII_MODEL_yyINTEL_I82566)
7295 sc->sc_phytype = WMPHY_IGP_3;
7296 }
7297
7298 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
7299 }
7300 }
7301
7302 /*
7303 * wm_gmii_mediachange: [ifmedia interface function]
7304 *
7305 * Set hardware to newly-selected media on a 1000BASE-T device.
7306 */
7307 static int
7308 wm_gmii_mediachange(struct ifnet *ifp)
7309 {
7310 struct wm_softc *sc = ifp->if_softc;
7311 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7312 int rc;
7313
7314 if ((ifp->if_flags & IFF_UP) == 0)
7315 return 0;
7316
7317 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7318 sc->sc_ctrl |= CTRL_SLU;
7319 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7320 || (sc->sc_type > WM_T_82543)) {
7321 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
7322 } else {
7323 sc->sc_ctrl &= ~CTRL_ASDE;
7324 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7325 if (ife->ifm_media & IFM_FDX)
7326 sc->sc_ctrl |= CTRL_FD;
7327 switch (IFM_SUBTYPE(ife->ifm_media)) {
7328 case IFM_10_T:
7329 sc->sc_ctrl |= CTRL_SPEED_10;
7330 break;
7331 case IFM_100_TX:
7332 sc->sc_ctrl |= CTRL_SPEED_100;
7333 break;
7334 case IFM_1000_T:
7335 sc->sc_ctrl |= CTRL_SPEED_1000;
7336 break;
7337 default:
7338 panic("wm_gmii_mediachange: bad media 0x%x",
7339 ife->ifm_media);
7340 }
7341 }
7342 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7343 if (sc->sc_type <= WM_T_82543)
7344 wm_gmii_reset(sc);
7345
7346 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
7347 return 0;
7348 return rc;
7349 }
7350
7351 /*
7352 * wm_gmii_mediastatus: [ifmedia interface function]
7353 *
7354 * Get the current interface media status on a 1000BASE-T device.
7355 */
7356 static void
7357 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7358 {
7359 struct wm_softc *sc = ifp->if_softc;
7360
7361 ether_mediastatus(ifp, ifmr);
7362 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
7363 | sc->sc_flowflags;
7364 }
7365
7366 #define MDI_IO CTRL_SWDPIN(2)
7367 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
7368 #define MDI_CLK CTRL_SWDPIN(3)
7369
7370 static void
7371 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
7372 {
7373 uint32_t i, v;
7374
7375 v = CSR_READ(sc, WMREG_CTRL);
7376 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7377 v |= MDI_DIR | CTRL_SWDPIO(3);
7378
7379 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
7380 if (data & i)
7381 v |= MDI_IO;
7382 else
7383 v &= ~MDI_IO;
7384 CSR_WRITE(sc, WMREG_CTRL, v);
7385 CSR_WRITE_FLUSH(sc);
7386 delay(10);
7387 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7388 CSR_WRITE_FLUSH(sc);
7389 delay(10);
7390 CSR_WRITE(sc, WMREG_CTRL, v);
7391 CSR_WRITE_FLUSH(sc);
7392 delay(10);
7393 }
7394 }
7395
7396 static uint32_t
7397 wm_i82543_mii_recvbits(struct wm_softc *sc)
7398 {
7399 uint32_t v, i, data = 0;
7400
7401 v = CSR_READ(sc, WMREG_CTRL);
7402 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7403 v |= CTRL_SWDPIO(3);
7404
7405 CSR_WRITE(sc, WMREG_CTRL, v);
7406 CSR_WRITE_FLUSH(sc);
7407 delay(10);
7408 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7409 CSR_WRITE_FLUSH(sc);
7410 delay(10);
7411 CSR_WRITE(sc, WMREG_CTRL, v);
7412 CSR_WRITE_FLUSH(sc);
7413 delay(10);
7414
7415 for (i = 0; i < 16; i++) {
7416 data <<= 1;
7417 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7418 CSR_WRITE_FLUSH(sc);
7419 delay(10);
7420 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7421 data |= 1;
7422 CSR_WRITE(sc, WMREG_CTRL, v);
7423 CSR_WRITE_FLUSH(sc);
7424 delay(10);
7425 }
7426
7427 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7428 CSR_WRITE_FLUSH(sc);
7429 delay(10);
7430 CSR_WRITE(sc, WMREG_CTRL, v);
7431 CSR_WRITE_FLUSH(sc);
7432 delay(10);
7433
7434 return data;
7435 }
7436
7437 #undef MDI_IO
7438 #undef MDI_DIR
7439 #undef MDI_CLK
7440
7441 /*
7442 * wm_gmii_i82543_readreg: [mii interface function]
7443 *
7444 * Read a PHY register on the GMII (i82543 version).
7445 */
7446 static int
7447 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7448 {
7449 struct wm_softc *sc = device_private(self);
7450 int rv;
7451
7452 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7453 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
7454 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7455 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
7456
7457 DPRINTF(WM_DEBUG_GMII,
7458 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7459 device_xname(sc->sc_dev), phy, reg, rv));
7460
7461 return rv;
7462 }
7463
7464 /*
7465 * wm_gmii_i82543_writereg: [mii interface function]
7466 *
7467 * Write a PHY register on the GMII (i82543 version).
7468 */
7469 static void
7470 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7471 {
7472 struct wm_softc *sc = device_private(self);
7473
7474 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7475 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7476 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7477 (MII_COMMAND_START << 30), 32);
7478 }
7479
7480 /*
7481 * wm_gmii_i82544_readreg: [mii interface function]
7482 *
7483 * Read a PHY register on the GMII.
7484 */
7485 static int
7486 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7487 {
7488 struct wm_softc *sc = device_private(self);
7489 uint32_t mdic = 0;
7490 int i, rv;
7491
7492 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7493 MDIC_REGADD(reg));
7494
7495 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7496 mdic = CSR_READ(sc, WMREG_MDIC);
7497 if (mdic & MDIC_READY)
7498 break;
7499 delay(50);
7500 }
7501
7502 if ((mdic & MDIC_READY) == 0) {
7503 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7504 device_xname(sc->sc_dev), phy, reg);
7505 rv = 0;
7506 } else if (mdic & MDIC_E) {
7507 #if 0 /* This is normal if no PHY is present. */
7508 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7509 device_xname(sc->sc_dev), phy, reg);
7510 #endif
7511 rv = 0;
7512 } else {
7513 rv = MDIC_DATA(mdic);
7514 if (rv == 0xffff)
7515 rv = 0;
7516 }
7517
7518 return rv;
7519 }
7520
7521 /*
7522 * wm_gmii_i82544_writereg: [mii interface function]
7523 *
7524 * Write a PHY register on the GMII.
7525 */
7526 static void
7527 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7528 {
7529 struct wm_softc *sc = device_private(self);
7530 uint32_t mdic = 0;
7531 int i;
7532
7533 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7534 MDIC_REGADD(reg) | MDIC_DATA(val));
7535
7536 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7537 mdic = CSR_READ(sc, WMREG_MDIC);
7538 if (mdic & MDIC_READY)
7539 break;
7540 delay(50);
7541 }
7542
7543 if ((mdic & MDIC_READY) == 0)
7544 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7545 device_xname(sc->sc_dev), phy, reg);
7546 else if (mdic & MDIC_E)
7547 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7548 device_xname(sc->sc_dev), phy, reg);
7549 }
7550
7551 /*
7552 * wm_gmii_i80003_readreg: [mii interface function]
7553 *
7554 * Read a PHY register on the kumeran
7555 * This could be handled by the PHY layer if we didn't have to lock the
7556 * ressource ...
7557 */
7558 static int
7559 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7560 {
7561 struct wm_softc *sc = device_private(self);
7562 int sem;
7563 int rv;
7564
7565 if (phy != 1) /* only one PHY on kumeran bus */
7566 return 0;
7567
7568 sem = swfwphysem[sc->sc_funcid];
7569 if (wm_get_swfw_semaphore(sc, sem)) {
7570 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7571 __func__);
7572 return 0;
7573 }
7574
7575 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7576 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7577 reg >> GG82563_PAGE_SHIFT);
7578 } else {
7579 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7580 reg >> GG82563_PAGE_SHIFT);
7581 }
7582 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7583 delay(200);
7584 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7585 delay(200);
7586
7587 wm_put_swfw_semaphore(sc, sem);
7588 return rv;
7589 }
7590
7591 /*
7592 * wm_gmii_i80003_writereg: [mii interface function]
7593 *
7594 * Write a PHY register on the kumeran.
7595 * This could be handled by the PHY layer if we didn't have to lock the
7596 * ressource ...
7597 */
7598 static void
7599 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7600 {
7601 struct wm_softc *sc = device_private(self);
7602 int sem;
7603
7604 if (phy != 1) /* only one PHY on kumeran bus */
7605 return;
7606
7607 sem = swfwphysem[sc->sc_funcid];
7608 if (wm_get_swfw_semaphore(sc, sem)) {
7609 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7610 __func__);
7611 return;
7612 }
7613
7614 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7615 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7616 reg >> GG82563_PAGE_SHIFT);
7617 } else {
7618 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7619 reg >> GG82563_PAGE_SHIFT);
7620 }
7621 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7622 delay(200);
7623 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7624 delay(200);
7625
7626 wm_put_swfw_semaphore(sc, sem);
7627 }
7628
7629 /*
7630 * wm_gmii_bm_readreg: [mii interface function]
7631 *
7632 * Read a PHY register on the kumeran
7633 * This could be handled by the PHY layer if we didn't have to lock the
7634 * ressource ...
7635 */
7636 static int
7637 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7638 {
7639 struct wm_softc *sc = device_private(self);
7640 int sem;
7641 int rv;
7642
7643 sem = swfwphysem[sc->sc_funcid];
7644 if (wm_get_swfw_semaphore(sc, sem)) {
7645 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7646 __func__);
7647 return 0;
7648 }
7649
7650 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7651 if (phy == 1)
7652 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7653 reg);
7654 else
7655 wm_gmii_i82544_writereg(self, phy,
7656 GG82563_PHY_PAGE_SELECT,
7657 reg >> GG82563_PAGE_SHIFT);
7658 }
7659
7660 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7661 wm_put_swfw_semaphore(sc, sem);
7662 return rv;
7663 }
7664
7665 /*
7666 * wm_gmii_bm_writereg: [mii interface function]
7667 *
7668 * Write a PHY register on the kumeran.
7669 * This could be handled by the PHY layer if we didn't have to lock the
7670 * ressource ...
7671 */
7672 static void
7673 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7674 {
7675 struct wm_softc *sc = device_private(self);
7676 int sem;
7677
7678 sem = swfwphysem[sc->sc_funcid];
7679 if (wm_get_swfw_semaphore(sc, sem)) {
7680 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7681 __func__);
7682 return;
7683 }
7684
7685 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7686 if (phy == 1)
7687 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7688 reg);
7689 else
7690 wm_gmii_i82544_writereg(self, phy,
7691 GG82563_PHY_PAGE_SELECT,
7692 reg >> GG82563_PAGE_SHIFT);
7693 }
7694
7695 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7696 wm_put_swfw_semaphore(sc, sem);
7697 }
7698
7699 static void
7700 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7701 {
7702 struct wm_softc *sc = device_private(self);
7703 uint16_t regnum = BM_PHY_REG_NUM(offset);
7704 uint16_t wuce;
7705
7706 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7707 if (sc->sc_type == WM_T_PCH) {
7708 /* XXX e1000 driver do nothing... why? */
7709 }
7710
7711 /* Set page 769 */
7712 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7713 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7714
7715 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7716
7717 wuce &= ~BM_WUC_HOST_WU_BIT;
7718 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7719 wuce | BM_WUC_ENABLE_BIT);
7720
7721 /* Select page 800 */
7722 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7723 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7724
7725 /* Write page 800 */
7726 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7727
7728 if (rd)
7729 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7730 else
7731 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7732
7733 /* Set page 769 */
7734 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7735 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7736
7737 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7738 }
7739
7740 /*
7741 * wm_gmii_hv_readreg: [mii interface function]
7742 *
7743 * Read a PHY register on the kumeran
7744 * This could be handled by the PHY layer if we didn't have to lock the
7745 * ressource ...
7746 */
7747 static int
7748 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7749 {
7750 struct wm_softc *sc = device_private(self);
7751 uint16_t page = BM_PHY_REG_PAGE(reg);
7752 uint16_t regnum = BM_PHY_REG_NUM(reg);
7753 uint16_t val;
7754 int rv;
7755
7756 if (wm_get_swfwhw_semaphore(sc)) {
7757 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7758 __func__);
7759 return 0;
7760 }
7761
7762 /* XXX Workaround failure in MDIO access while cable is disconnected */
7763 if (sc->sc_phytype == WMPHY_82577) {
7764 /* XXX must write */
7765 }
7766
7767 /* Page 800 works differently than the rest so it has its own func */
7768 if (page == BM_WUC_PAGE) {
7769 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7770 return val;
7771 }
7772
7773 /*
7774 * Lower than page 768 works differently than the rest so it has its
7775 * own func
7776 */
7777 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7778 printf("gmii_hv_readreg!!!\n");
7779 return 0;
7780 }
7781
7782 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7783 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7784 page << BME1000_PAGE_SHIFT);
7785 }
7786
7787 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7788 wm_put_swfwhw_semaphore(sc);
7789 return rv;
7790 }
7791
7792 /*
7793 * wm_gmii_hv_writereg: [mii interface function]
7794 *
7795 * Write a PHY register on the kumeran.
7796 * This could be handled by the PHY layer if we didn't have to lock the
7797 * ressource ...
7798 */
7799 static void
7800 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7801 {
7802 struct wm_softc *sc = device_private(self);
7803 uint16_t page = BM_PHY_REG_PAGE(reg);
7804 uint16_t regnum = BM_PHY_REG_NUM(reg);
7805
7806 if (wm_get_swfwhw_semaphore(sc)) {
7807 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7808 __func__);
7809 return;
7810 }
7811
7812 /* XXX Workaround failure in MDIO access while cable is disconnected */
7813
7814 /* Page 800 works differently than the rest so it has its own func */
7815 if (page == BM_WUC_PAGE) {
7816 uint16_t tmp;
7817
7818 tmp = val;
7819 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7820 return;
7821 }
7822
7823 /*
7824 * Lower than page 768 works differently than the rest so it has its
7825 * own func
7826 */
7827 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7828 printf("gmii_hv_writereg!!!\n");
7829 return;
7830 }
7831
7832 /*
7833 * XXX Workaround MDIO accesses being disabled after entering IEEE
7834 * Power Down (whenever bit 11 of the PHY control register is set)
7835 */
7836
7837 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7838 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7839 page << BME1000_PAGE_SHIFT);
7840 }
7841
7842 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7843 wm_put_swfwhw_semaphore(sc);
7844 }
7845
7846 /*
7847 * wm_gmii_82580_readreg: [mii interface function]
7848 *
7849 * Read a PHY register on the 82580 and I350.
7850 * This could be handled by the PHY layer if we didn't have to lock the
7851 * ressource ...
7852 */
7853 static int
7854 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7855 {
7856 struct wm_softc *sc = device_private(self);
7857 int sem;
7858 int rv;
7859
7860 sem = swfwphysem[sc->sc_funcid];
7861 if (wm_get_swfw_semaphore(sc, sem)) {
7862 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7863 __func__);
7864 return 0;
7865 }
7866
7867 rv = wm_gmii_i82544_readreg(self, phy, reg);
7868
7869 wm_put_swfw_semaphore(sc, sem);
7870 return rv;
7871 }
7872
7873 /*
7874 * wm_gmii_82580_writereg: [mii interface function]
7875 *
7876 * Write a PHY register on the 82580 and I350.
7877 * This could be handled by the PHY layer if we didn't have to lock the
7878 * ressource ...
7879 */
7880 static void
7881 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7882 {
7883 struct wm_softc *sc = device_private(self);
7884 int sem;
7885
7886 sem = swfwphysem[sc->sc_funcid];
7887 if (wm_get_swfw_semaphore(sc, sem)) {
7888 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7889 __func__);
7890 return;
7891 }
7892
7893 wm_gmii_i82544_writereg(self, phy, reg, val);
7894
7895 wm_put_swfw_semaphore(sc, sem);
7896 }
7897
7898 /*
7899 * wm_gmii_gs40g_readreg: [mii interface function]
7900 *
7901 * Read a PHY register on the I2100 and I211.
7902 * This could be handled by the PHY layer if we didn't have to lock the
7903 * ressource ...
7904 */
7905 static int
7906 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
7907 {
7908 struct wm_softc *sc = device_private(self);
7909 int sem;
7910 int page, offset;
7911 int rv;
7912
7913 /* Acquire semaphore */
7914 sem = swfwphysem[sc->sc_funcid];
7915 if (wm_get_swfw_semaphore(sc, sem)) {
7916 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7917 __func__);
7918 return 0;
7919 }
7920
7921 /* Page select */
7922 page = reg >> GS40G_PAGE_SHIFT;
7923 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7924
7925 /* Read reg */
7926 offset = reg & GS40G_OFFSET_MASK;
7927 rv = wm_gmii_i82544_readreg(self, phy, offset);
7928
7929 wm_put_swfw_semaphore(sc, sem);
7930 return rv;
7931 }
7932
7933 /*
7934 * wm_gmii_gs40g_writereg: [mii interface function]
7935 *
7936 * Write a PHY register on the I210 and I211.
7937 * This could be handled by the PHY layer if we didn't have to lock the
7938 * ressource ...
7939 */
7940 static void
7941 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
7942 {
7943 struct wm_softc *sc = device_private(self);
7944 int sem;
7945 int page, offset;
7946
7947 /* Acquire semaphore */
7948 sem = swfwphysem[sc->sc_funcid];
7949 if (wm_get_swfw_semaphore(sc, sem)) {
7950 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7951 __func__);
7952 return;
7953 }
7954
7955 /* Page select */
7956 page = reg >> GS40G_PAGE_SHIFT;
7957 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
7958
7959 /* Write reg */
7960 offset = reg & GS40G_OFFSET_MASK;
7961 wm_gmii_i82544_writereg(self, phy, offset, val);
7962
7963 /* Release semaphore */
7964 wm_put_swfw_semaphore(sc, sem);
7965 }
7966
7967 /*
7968 * wm_gmii_statchg: [mii interface function]
7969 *
7970 * Callback from MII layer when media changes.
7971 */
7972 static void
7973 wm_gmii_statchg(struct ifnet *ifp)
7974 {
7975 struct wm_softc *sc = ifp->if_softc;
7976 struct mii_data *mii = &sc->sc_mii;
7977
7978 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7979 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7980 sc->sc_fcrtl &= ~FCRTL_XONE;
7981
7982 /*
7983 * Get flow control negotiation result.
7984 */
7985 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7986 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7987 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7988 mii->mii_media_active &= ~IFM_ETH_FMASK;
7989 }
7990
7991 if (sc->sc_flowflags & IFM_FLOW) {
7992 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7993 sc->sc_ctrl |= CTRL_TFCE;
7994 sc->sc_fcrtl |= FCRTL_XONE;
7995 }
7996 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7997 sc->sc_ctrl |= CTRL_RFCE;
7998 }
7999
8000 if (sc->sc_mii.mii_media_active & IFM_FDX) {
8001 DPRINTF(WM_DEBUG_LINK,
8002 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8003 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8004 } else {
8005 DPRINTF(WM_DEBUG_LINK,
8006 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8007 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8008 }
8009
8010 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8011 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8012 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8013 : WMREG_FCRTL, sc->sc_fcrtl);
8014 if (sc->sc_type == WM_T_80003) {
8015 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8016 case IFM_1000_T:
8017 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8018 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8019 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8020 break;
8021 default:
8022 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8023 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8024 sc->sc_tipg = TIPG_10_100_80003_DFLT;
8025 break;
8026 }
8027 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8028 }
8029 }
8030
8031 /*
8032 * wm_kmrn_readreg:
8033 *
8034 * Read a kumeran register
8035 */
8036 static int
8037 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8038 {
8039 int rv;
8040
8041 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8042 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8043 aprint_error_dev(sc->sc_dev,
8044 "%s: failed to get semaphore\n", __func__);
8045 return 0;
8046 }
8047 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8048 if (wm_get_swfwhw_semaphore(sc)) {
8049 aprint_error_dev(sc->sc_dev,
8050 "%s: failed to get semaphore\n", __func__);
8051 return 0;
8052 }
8053 }
8054
8055 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8056 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8057 KUMCTRLSTA_REN);
8058 CSR_WRITE_FLUSH(sc);
8059 delay(2);
8060
8061 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8062
8063 if (sc->sc_flags & WM_F_LOCK_SWFW)
8064 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8065 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8066 wm_put_swfwhw_semaphore(sc);
8067
8068 return rv;
8069 }
8070
8071 /*
8072 * wm_kmrn_writereg:
8073 *
8074 * Write a kumeran register
8075 */
8076 static void
8077 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8078 {
8079
8080 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8081 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8082 aprint_error_dev(sc->sc_dev,
8083 "%s: failed to get semaphore\n", __func__);
8084 return;
8085 }
8086 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8087 if (wm_get_swfwhw_semaphore(sc)) {
8088 aprint_error_dev(sc->sc_dev,
8089 "%s: failed to get semaphore\n", __func__);
8090 return;
8091 }
8092 }
8093
8094 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8095 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8096 (val & KUMCTRLSTA_MASK));
8097
8098 if (sc->sc_flags & WM_F_LOCK_SWFW)
8099 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8100 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8101 wm_put_swfwhw_semaphore(sc);
8102 }
8103
8104 /* SGMII related */
8105
8106 /*
8107 * wm_sgmii_uses_mdio
8108 *
8109 * Check whether the transaction is to the internal PHY or the external
8110 * MDIO interface. Return true if it's MDIO.
8111 */
8112 static bool
8113 wm_sgmii_uses_mdio(struct wm_softc *sc)
8114 {
8115 uint32_t reg;
8116 bool ismdio = false;
8117
8118 switch (sc->sc_type) {
8119 case WM_T_82575:
8120 case WM_T_82576:
8121 reg = CSR_READ(sc, WMREG_MDIC);
8122 ismdio = ((reg & MDIC_DEST) != 0);
8123 break;
8124 case WM_T_82580:
8125 case WM_T_I350:
8126 case WM_T_I354:
8127 case WM_T_I210:
8128 case WM_T_I211:
8129 reg = CSR_READ(sc, WMREG_MDICNFG);
8130 ismdio = ((reg & MDICNFG_DEST) != 0);
8131 break;
8132 default:
8133 break;
8134 }
8135
8136 return ismdio;
8137 }
8138
8139 /*
8140 * wm_sgmii_readreg: [mii interface function]
8141 *
8142 * Read a PHY register on the SGMII
8143 * This could be handled by the PHY layer if we didn't have to lock the
8144 * ressource ...
8145 */
8146 static int
8147 wm_sgmii_readreg(device_t self, int phy, int reg)
8148 {
8149 struct wm_softc *sc = device_private(self);
8150 uint32_t i2ccmd;
8151 int i, rv;
8152
8153 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8154 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8155 __func__);
8156 return 0;
8157 }
8158
8159 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8160 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8161 | I2CCMD_OPCODE_READ;
8162 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8163
8164 /* Poll the ready bit */
8165 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8166 delay(50);
8167 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8168 if (i2ccmd & I2CCMD_READY)
8169 break;
8170 }
8171 if ((i2ccmd & I2CCMD_READY) == 0)
8172 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8173 if ((i2ccmd & I2CCMD_ERROR) != 0)
8174 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8175
8176 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8177
8178 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8179 return rv;
8180 }
8181
8182 /*
8183 * wm_sgmii_writereg: [mii interface function]
8184 *
8185 * Write a PHY register on the SGMII.
8186 * This could be handled by the PHY layer if we didn't have to lock the
8187 * ressource ...
8188 */
8189 static void
8190 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8191 {
8192 struct wm_softc *sc = device_private(self);
8193 uint32_t i2ccmd;
8194 int i;
8195 int val_swapped;
8196
8197 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8198 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8199 __func__);
8200 return;
8201 }
8202 /* Swap the data bytes for the I2C interface */
8203 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8204 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8205 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8206 | I2CCMD_OPCODE_WRITE | val_swapped;
8207 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8208
8209 /* Poll the ready bit */
8210 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8211 delay(50);
8212 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8213 if (i2ccmd & I2CCMD_READY)
8214 break;
8215 }
8216 if ((i2ccmd & I2CCMD_READY) == 0)
8217 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8218 if ((i2ccmd & I2CCMD_ERROR) != 0)
8219 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8220
8221 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8222 }
8223
8224 /* TBI related */
8225
8226 /*
8227 * wm_tbi_mediainit:
8228 *
8229 * Initialize media for use on 1000BASE-X devices.
8230 */
8231 static void
8232 wm_tbi_mediainit(struct wm_softc *sc)
8233 {
8234 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8235 const char *sep = "";
8236
8237 if (sc->sc_type < WM_T_82543)
8238 sc->sc_tipg = TIPG_WM_DFLT;
8239 else
8240 sc->sc_tipg = TIPG_LG_DFLT;
8241
8242 sc->sc_tbi_serdes_anegticks = 5;
8243
8244 /* Initialize our media structures */
8245 sc->sc_mii.mii_ifp = ifp;
8246 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8247
8248 if ((sc->sc_type >= WM_T_82575)
8249 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
8250 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8251 wm_serdes_mediachange, wm_serdes_mediastatus);
8252 else
8253 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8254 wm_tbi_mediachange, wm_tbi_mediastatus);
8255
8256 /*
8257 * SWD Pins:
8258 *
8259 * 0 = Link LED (output)
8260 * 1 = Loss Of Signal (input)
8261 */
8262 sc->sc_ctrl |= CTRL_SWDPIO(0);
8263
8264 /* XXX Perhaps this is only for TBI */
8265 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8266 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
8267
8268 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8269 sc->sc_ctrl &= ~CTRL_LRST;
8270
8271 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8272
8273 #define ADD(ss, mm, dd) \
8274 do { \
8275 aprint_normal("%s%s", sep, ss); \
8276 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
8277 sep = ", "; \
8278 } while (/*CONSTCOND*/0)
8279
8280 aprint_normal_dev(sc->sc_dev, "");
8281
8282 /* Only 82545 is LX */
8283 if (sc->sc_type == WM_T_82545) {
8284 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
8285 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
8286 } else {
8287 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
8288 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
8289 }
8290 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
8291 aprint_normal("\n");
8292
8293 #undef ADD
8294
8295 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
8296 }
8297
8298 /*
8299 * wm_tbi_mediachange: [ifmedia interface function]
8300 *
8301 * Set hardware to newly-selected media on a 1000BASE-X device.
8302 */
8303 static int
8304 wm_tbi_mediachange(struct ifnet *ifp)
8305 {
8306 struct wm_softc *sc = ifp->if_softc;
8307 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8308 uint32_t status;
8309 int i;
8310
8311 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8312 /* XXX need some work for >= 82571 and < 82575 */
8313 if (sc->sc_type < WM_T_82575)
8314 return 0;
8315 }
8316
8317 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8318 || (sc->sc_type >= WM_T_82575))
8319 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8320
8321 sc->sc_ctrl &= ~CTRL_LRST;
8322 sc->sc_txcw = TXCW_ANE;
8323 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8324 sc->sc_txcw |= TXCW_FD | TXCW_HD;
8325 else if (ife->ifm_media & IFM_FDX)
8326 sc->sc_txcw |= TXCW_FD;
8327 else
8328 sc->sc_txcw |= TXCW_HD;
8329
8330 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
8331 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
8332
8333 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
8334 device_xname(sc->sc_dev), sc->sc_txcw));
8335 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8336 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8337 CSR_WRITE_FLUSH(sc);
8338 delay(1000);
8339
8340 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
8341 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
8342
8343 /*
8344 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
8345 * optics detect a signal, 0 if they don't.
8346 */
8347 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
8348 /* Have signal; wait for the link to come up. */
8349 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
8350 delay(10000);
8351 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
8352 break;
8353 }
8354
8355 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
8356 device_xname(sc->sc_dev),i));
8357
8358 status = CSR_READ(sc, WMREG_STATUS);
8359 DPRINTF(WM_DEBUG_LINK,
8360 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
8361 device_xname(sc->sc_dev),status, STATUS_LU));
8362 if (status & STATUS_LU) {
8363 /* Link is up. */
8364 DPRINTF(WM_DEBUG_LINK,
8365 ("%s: LINK: set media -> link up %s\n",
8366 device_xname(sc->sc_dev),
8367 (status & STATUS_FD) ? "FDX" : "HDX"));
8368
8369 /*
8370 * NOTE: CTRL will update TFCE and RFCE automatically,
8371 * so we should update sc->sc_ctrl
8372 */
8373 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8374 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8375 sc->sc_fcrtl &= ~FCRTL_XONE;
8376 if (status & STATUS_FD)
8377 sc->sc_tctl |=
8378 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8379 else
8380 sc->sc_tctl |=
8381 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8382 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
8383 sc->sc_fcrtl |= FCRTL_XONE;
8384 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8385 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8386 WMREG_OLD_FCRTL : WMREG_FCRTL,
8387 sc->sc_fcrtl);
8388 sc->sc_tbi_linkup = 1;
8389 } else {
8390 if (i == WM_LINKUP_TIMEOUT)
8391 wm_check_for_link(sc);
8392 /* Link is down. */
8393 DPRINTF(WM_DEBUG_LINK,
8394 ("%s: LINK: set media -> link down\n",
8395 device_xname(sc->sc_dev)));
8396 sc->sc_tbi_linkup = 0;
8397 }
8398 } else {
8399 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
8400 device_xname(sc->sc_dev)));
8401 sc->sc_tbi_linkup = 0;
8402 }
8403
8404 wm_tbi_serdes_set_linkled(sc);
8405
8406 return 0;
8407 }
8408
8409 /*
8410 * wm_tbi_mediastatus: [ifmedia interface function]
8411 *
8412 * Get the current interface media status on a 1000BASE-X device.
8413 */
8414 static void
8415 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8416 {
8417 struct wm_softc *sc = ifp->if_softc;
8418 uint32_t ctrl, status;
8419
8420 ifmr->ifm_status = IFM_AVALID;
8421 ifmr->ifm_active = IFM_ETHER;
8422
8423 status = CSR_READ(sc, WMREG_STATUS);
8424 if ((status & STATUS_LU) == 0) {
8425 ifmr->ifm_active |= IFM_NONE;
8426 return;
8427 }
8428
8429 ifmr->ifm_status |= IFM_ACTIVE;
8430 /* Only 82545 is LX */
8431 if (sc->sc_type == WM_T_82545)
8432 ifmr->ifm_active |= IFM_1000_LX;
8433 else
8434 ifmr->ifm_active |= IFM_1000_SX;
8435 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
8436 ifmr->ifm_active |= IFM_FDX;
8437 else
8438 ifmr->ifm_active |= IFM_HDX;
8439 ctrl = CSR_READ(sc, WMREG_CTRL);
8440 if (ctrl & CTRL_RFCE)
8441 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
8442 if (ctrl & CTRL_TFCE)
8443 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
8444 }
8445
8446 /* XXX TBI only */
8447 static int
8448 wm_check_for_link(struct wm_softc *sc)
8449 {
8450 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8451 uint32_t rxcw;
8452 uint32_t ctrl;
8453 uint32_t status;
8454 uint32_t sig;
8455
8456 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8457 /* XXX need some work for >= 82571 */
8458 if (sc->sc_type >= WM_T_82571) {
8459 sc->sc_tbi_linkup = 1;
8460 return 0;
8461 }
8462 }
8463
8464 rxcw = CSR_READ(sc, WMREG_RXCW);
8465 ctrl = CSR_READ(sc, WMREG_CTRL);
8466 status = CSR_READ(sc, WMREG_STATUS);
8467
8468 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8469
8470 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8471 device_xname(sc->sc_dev), __func__,
8472 ((ctrl & CTRL_SWDPIN(1)) == sig),
8473 ((status & STATUS_LU) != 0),
8474 ((rxcw & RXCW_C) != 0)
8475 ));
8476
8477 /*
8478 * SWDPIN LU RXCW
8479 * 0 0 0
8480 * 0 0 1 (should not happen)
8481 * 0 1 0 (should not happen)
8482 * 0 1 1 (should not happen)
8483 * 1 0 0 Disable autonego and force linkup
8484 * 1 0 1 got /C/ but not linkup yet
8485 * 1 1 0 (linkup)
8486 * 1 1 1 If IFM_AUTO, back to autonego
8487 *
8488 */
8489 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8490 && ((status & STATUS_LU) == 0)
8491 && ((rxcw & RXCW_C) == 0)) {
8492 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8493 __func__));
8494 sc->sc_tbi_linkup = 0;
8495 /* Disable auto-negotiation in the TXCW register */
8496 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8497
8498 /*
8499 * Force link-up and also force full-duplex.
8500 *
8501 * NOTE: CTRL was updated TFCE and RFCE automatically,
8502 * so we should update sc->sc_ctrl
8503 */
8504 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8505 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8506 } else if (((status & STATUS_LU) != 0)
8507 && ((rxcw & RXCW_C) != 0)
8508 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8509 sc->sc_tbi_linkup = 1;
8510 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8511 __func__));
8512 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8513 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8514 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8515 && ((rxcw & RXCW_C) != 0)) {
8516 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8517 } else {
8518 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8519 status));
8520 }
8521
8522 return 0;
8523 }
8524
8525 /*
8526 * wm_tbi_tick:
8527 *
8528 * Check the link on TBI devices.
8529 * This function acts as mii_tick().
8530 */
8531 static void
8532 wm_tbi_tick(struct wm_softc *sc)
8533 {
8534 struct mii_data *mii = &sc->sc_mii;
8535 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8536 uint32_t status;
8537
8538 KASSERT(WM_TX_LOCKED(sc));
8539
8540 status = CSR_READ(sc, WMREG_STATUS);
8541
8542 /* XXX is this needed? */
8543 (void)CSR_READ(sc, WMREG_RXCW);
8544 (void)CSR_READ(sc, WMREG_CTRL);
8545
8546 /* set link status */
8547 if ((status & STATUS_LU) == 0) {
8548 DPRINTF(WM_DEBUG_LINK,
8549 ("%s: LINK: checklink -> down\n",
8550 device_xname(sc->sc_dev)));
8551 sc->sc_tbi_linkup = 0;
8552 } else if (sc->sc_tbi_linkup == 0) {
8553 DPRINTF(WM_DEBUG_LINK,
8554 ("%s: LINK: checklink -> up %s\n",
8555 device_xname(sc->sc_dev),
8556 (status & STATUS_FD) ? "FDX" : "HDX"));
8557 sc->sc_tbi_linkup = 1;
8558 sc->sc_tbi_serdes_ticks = 0;
8559 }
8560
8561 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
8562 goto setled;
8563
8564 if ((status & STATUS_LU) == 0) {
8565 sc->sc_tbi_linkup = 0;
8566 /* If the timer expired, retry autonegotiation */
8567 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8568 && (++sc->sc_tbi_serdes_ticks
8569 >= sc->sc_tbi_serdes_anegticks)) {
8570 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8571 sc->sc_tbi_serdes_ticks = 0;
8572 /*
8573 * Reset the link, and let autonegotiation do
8574 * its thing
8575 */
8576 sc->sc_ctrl |= CTRL_LRST;
8577 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8578 CSR_WRITE_FLUSH(sc);
8579 delay(1000);
8580 sc->sc_ctrl &= ~CTRL_LRST;
8581 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8582 CSR_WRITE_FLUSH(sc);
8583 delay(1000);
8584 CSR_WRITE(sc, WMREG_TXCW,
8585 sc->sc_txcw & ~TXCW_ANE);
8586 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8587 }
8588 }
8589
8590 setled:
8591 wm_tbi_serdes_set_linkled(sc);
8592 }
8593
8594 /* SERDES related */
8595 static void
8596 wm_serdes_power_up_link_82575(struct wm_softc *sc)
8597 {
8598 uint32_t reg;
8599
8600 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8601 && ((sc->sc_flags & WM_F_SGMII) == 0))
8602 return;
8603
8604 reg = CSR_READ(sc, WMREG_PCS_CFG);
8605 reg |= PCS_CFG_PCS_EN;
8606 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
8607
8608 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8609 reg &= ~CTRL_EXT_SWDPIN(3);
8610 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8611 CSR_WRITE_FLUSH(sc);
8612 }
8613
8614 static int
8615 wm_serdes_mediachange(struct ifnet *ifp)
8616 {
8617 struct wm_softc *sc = ifp->if_softc;
8618 bool pcs_autoneg = true; /* XXX */
8619 uint32_t ctrl_ext, pcs_lctl, reg;
8620
8621 /* XXX Currently, this function is not called on 8257[12] */
8622 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8623 || (sc->sc_type >= WM_T_82575))
8624 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8625
8626 wm_serdes_power_up_link_82575(sc);
8627
8628 sc->sc_ctrl |= CTRL_SLU;
8629
8630 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8631 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8632
8633 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8634 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8635 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8636 case CTRL_EXT_LINK_MODE_SGMII:
8637 pcs_autoneg = true;
8638 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8639 break;
8640 case CTRL_EXT_LINK_MODE_1000KX:
8641 pcs_autoneg = false;
8642 /* FALLTHROUGH */
8643 default:
8644 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8645 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8646 pcs_autoneg = false;
8647 }
8648 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8649 | CTRL_FRCFDX;
8650 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8651 }
8652 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8653
8654 if (pcs_autoneg) {
8655 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8656 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8657
8658 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8659 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8660 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8661 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8662 } else
8663 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8664
8665 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8666
8667
8668 return 0;
8669 }
8670
8671 static void
8672 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8673 {
8674 struct wm_softc *sc = ifp->if_softc;
8675 struct mii_data *mii = &sc->sc_mii;
8676 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8677 uint32_t pcs_adv, pcs_lpab, reg;
8678
8679 ifmr->ifm_status = IFM_AVALID;
8680 ifmr->ifm_active = IFM_ETHER;
8681
8682 /* Check PCS */
8683 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8684 if ((reg & PCS_LSTS_LINKOK) == 0) {
8685 ifmr->ifm_active |= IFM_NONE;
8686 sc->sc_tbi_linkup = 0;
8687 goto setled;
8688 }
8689
8690 sc->sc_tbi_linkup = 1;
8691 ifmr->ifm_status |= IFM_ACTIVE;
8692 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8693 if ((reg & PCS_LSTS_FDX) != 0)
8694 ifmr->ifm_active |= IFM_FDX;
8695 else
8696 ifmr->ifm_active |= IFM_HDX;
8697 mii->mii_media_active &= ~IFM_ETH_FMASK;
8698 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8699 /* Check flow */
8700 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8701 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8702 printf("XXX LINKOK but not ACOMP\n");
8703 goto setled;
8704 }
8705 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8706 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8707 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8708 if ((pcs_adv & TXCW_SYM_PAUSE)
8709 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8710 mii->mii_media_active |= IFM_FLOW
8711 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8712 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8713 && (pcs_adv & TXCW_ASYM_PAUSE)
8714 && (pcs_lpab & TXCW_SYM_PAUSE)
8715 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8716 mii->mii_media_active |= IFM_FLOW
8717 | IFM_ETH_TXPAUSE;
8718 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8719 && (pcs_adv & TXCW_ASYM_PAUSE)
8720 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8721 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8722 mii->mii_media_active |= IFM_FLOW
8723 | IFM_ETH_RXPAUSE;
8724 } else {
8725 }
8726 }
8727 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8728 | (mii->mii_media_active & IFM_ETH_FMASK);
8729 setled:
8730 wm_tbi_serdes_set_linkled(sc);
8731 }
8732
8733 /*
8734 * wm_serdes_tick:
8735 *
8736 * Check the link on serdes devices.
8737 */
8738 static void
8739 wm_serdes_tick(struct wm_softc *sc)
8740 {
8741 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8742 struct mii_data *mii = &sc->sc_mii;
8743 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8744 uint32_t reg;
8745
8746 KASSERT(WM_TX_LOCKED(sc));
8747
8748 mii->mii_media_status = IFM_AVALID;
8749 mii->mii_media_active = IFM_ETHER;
8750
8751 /* Check PCS */
8752 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8753 if ((reg & PCS_LSTS_LINKOK) != 0) {
8754 mii->mii_media_status |= IFM_ACTIVE;
8755 sc->sc_tbi_linkup = 1;
8756 sc->sc_tbi_serdes_ticks = 0;
8757 mii->mii_media_active |= IFM_1000_SX; /* XXX */
8758 if ((reg & PCS_LSTS_FDX) != 0)
8759 mii->mii_media_active |= IFM_FDX;
8760 else
8761 mii->mii_media_active |= IFM_HDX;
8762 } else {
8763 mii->mii_media_status |= IFM_NONE;
8764 sc->sc_tbi_linkup = 0;
8765 /* If the timer expired, retry autonegotiation */
8766 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8767 && (++sc->sc_tbi_serdes_ticks
8768 >= sc->sc_tbi_serdes_anegticks)) {
8769 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8770 sc->sc_tbi_serdes_ticks = 0;
8771 /* XXX */
8772 wm_serdes_mediachange(ifp);
8773 }
8774 }
8775
8776 wm_tbi_serdes_set_linkled(sc);
8777 }
8778
8779 /* SFP related */
8780
8781 static int
8782 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
8783 {
8784 uint32_t i2ccmd;
8785 int i;
8786
8787 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
8788 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8789
8790 /* Poll the ready bit */
8791 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8792 delay(50);
8793 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8794 if (i2ccmd & I2CCMD_READY)
8795 break;
8796 }
8797 if ((i2ccmd & I2CCMD_READY) == 0)
8798 return -1;
8799 if ((i2ccmd & I2CCMD_ERROR) != 0)
8800 return -1;
8801
8802 *data = i2ccmd & 0x00ff;
8803
8804 return 0;
8805 }
8806
8807 static uint32_t
8808 wm_sfp_get_media_type(struct wm_softc *sc)
8809 {
8810 uint32_t ctrl_ext;
8811 uint8_t val = 0;
8812 int timeout = 3;
8813 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
8814 int rv = -1;
8815
8816 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8817 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
8818 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
8819 CSR_WRITE_FLUSH(sc);
8820
8821 /* Read SFP module data */
8822 while (timeout) {
8823 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
8824 if (rv == 0)
8825 break;
8826 delay(100*1000); /* XXX too big */
8827 timeout--;
8828 }
8829 if (rv != 0)
8830 goto out;
8831 switch (val) {
8832 case SFF_SFP_ID_SFF:
8833 aprint_normal_dev(sc->sc_dev,
8834 "Module/Connector soldered to board\n");
8835 break;
8836 case SFF_SFP_ID_SFP:
8837 aprint_normal_dev(sc->sc_dev, "SFP\n");
8838 break;
8839 case SFF_SFP_ID_UNKNOWN:
8840 goto out;
8841 default:
8842 break;
8843 }
8844
8845 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
8846 if (rv != 0) {
8847 goto out;
8848 }
8849
8850 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
8851 mediatype = WM_MEDIATYPE_SERDES;
8852 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
8853 sc->sc_flags |= WM_F_SGMII;
8854 mediatype = WM_MEDIATYPE_COPPER;
8855 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
8856 sc->sc_flags |= WM_F_SGMII;
8857 mediatype = WM_MEDIATYPE_SERDES;
8858 }
8859
8860 out:
8861 /* Restore I2C interface setting */
8862 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8863
8864 return mediatype;
8865 }
8866 /*
8867 * NVM related.
8868 * Microwire, SPI (w/wo EERD) and Flash.
8869 */
8870
8871 /* Both spi and uwire */
8872
8873 /*
8874 * wm_eeprom_sendbits:
8875 *
8876 * Send a series of bits to the EEPROM.
8877 */
8878 static void
8879 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
8880 {
8881 uint32_t reg;
8882 int x;
8883
8884 reg = CSR_READ(sc, WMREG_EECD);
8885
8886 for (x = nbits; x > 0; x--) {
8887 if (bits & (1U << (x - 1)))
8888 reg |= EECD_DI;
8889 else
8890 reg &= ~EECD_DI;
8891 CSR_WRITE(sc, WMREG_EECD, reg);
8892 CSR_WRITE_FLUSH(sc);
8893 delay(2);
8894 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8895 CSR_WRITE_FLUSH(sc);
8896 delay(2);
8897 CSR_WRITE(sc, WMREG_EECD, reg);
8898 CSR_WRITE_FLUSH(sc);
8899 delay(2);
8900 }
8901 }
8902
8903 /*
8904 * wm_eeprom_recvbits:
8905 *
8906 * Receive a series of bits from the EEPROM.
8907 */
8908 static void
8909 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
8910 {
8911 uint32_t reg, val;
8912 int x;
8913
8914 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
8915
8916 val = 0;
8917 for (x = nbits; x > 0; x--) {
8918 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
8919 CSR_WRITE_FLUSH(sc);
8920 delay(2);
8921 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
8922 val |= (1U << (x - 1));
8923 CSR_WRITE(sc, WMREG_EECD, reg);
8924 CSR_WRITE_FLUSH(sc);
8925 delay(2);
8926 }
8927 *valp = val;
8928 }
8929
8930 /* Microwire */
8931
8932 /*
8933 * wm_nvm_read_uwire:
8934 *
8935 * Read a word from the EEPROM using the MicroWire protocol.
8936 */
8937 static int
8938 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8939 {
8940 uint32_t reg, val;
8941 int i;
8942
8943 for (i = 0; i < wordcnt; i++) {
8944 /* Clear SK and DI. */
8945 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
8946 CSR_WRITE(sc, WMREG_EECD, reg);
8947
8948 /*
8949 * XXX: workaround for a bug in qemu-0.12.x and prior
8950 * and Xen.
8951 *
8952 * We use this workaround only for 82540 because qemu's
8953 * e1000 act as 82540.
8954 */
8955 if (sc->sc_type == WM_T_82540) {
8956 reg |= EECD_SK;
8957 CSR_WRITE(sc, WMREG_EECD, reg);
8958 reg &= ~EECD_SK;
8959 CSR_WRITE(sc, WMREG_EECD, reg);
8960 CSR_WRITE_FLUSH(sc);
8961 delay(2);
8962 }
8963 /* XXX: end of workaround */
8964
8965 /* Set CHIP SELECT. */
8966 reg |= EECD_CS;
8967 CSR_WRITE(sc, WMREG_EECD, reg);
8968 CSR_WRITE_FLUSH(sc);
8969 delay(2);
8970
8971 /* Shift in the READ command. */
8972 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8973
8974 /* Shift in address. */
8975 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8976
8977 /* Shift out the data. */
8978 wm_eeprom_recvbits(sc, &val, 16);
8979 data[i] = val & 0xffff;
8980
8981 /* Clear CHIP SELECT. */
8982 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8983 CSR_WRITE(sc, WMREG_EECD, reg);
8984 CSR_WRITE_FLUSH(sc);
8985 delay(2);
8986 }
8987
8988 return 0;
8989 }
8990
8991 /* SPI */
8992
8993 /*
8994 * Set SPI and FLASH related information from the EECD register.
8995 * For 82541 and 82547, the word size is taken from EEPROM.
8996 */
8997 static int
8998 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8999 {
9000 int size;
9001 uint32_t reg;
9002 uint16_t data;
9003
9004 reg = CSR_READ(sc, WMREG_EECD);
9005 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9006
9007 /* Read the size of NVM from EECD by default */
9008 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9009 switch (sc->sc_type) {
9010 case WM_T_82541:
9011 case WM_T_82541_2:
9012 case WM_T_82547:
9013 case WM_T_82547_2:
9014 /* Set dummy value to access EEPROM */
9015 sc->sc_nvm_wordsize = 64;
9016 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9017 reg = data;
9018 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9019 if (size == 0)
9020 size = 6; /* 64 word size */
9021 else
9022 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9023 break;
9024 case WM_T_80003:
9025 case WM_T_82571:
9026 case WM_T_82572:
9027 case WM_T_82573: /* SPI case */
9028 case WM_T_82574: /* SPI case */
9029 case WM_T_82583: /* SPI case */
9030 size += NVM_WORD_SIZE_BASE_SHIFT;
9031 if (size > 14)
9032 size = 14;
9033 break;
9034 case WM_T_82575:
9035 case WM_T_82576:
9036 case WM_T_82580:
9037 case WM_T_I350:
9038 case WM_T_I354:
9039 case WM_T_I210:
9040 case WM_T_I211:
9041 size += NVM_WORD_SIZE_BASE_SHIFT;
9042 if (size > 15)
9043 size = 15;
9044 break;
9045 default:
9046 aprint_error_dev(sc->sc_dev,
9047 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9048 return -1;
9049 break;
9050 }
9051
9052 sc->sc_nvm_wordsize = 1 << size;
9053
9054 return 0;
9055 }
9056
9057 /*
9058 * wm_nvm_ready_spi:
9059 *
9060 * Wait for a SPI EEPROM to be ready for commands.
9061 */
9062 static int
9063 wm_nvm_ready_spi(struct wm_softc *sc)
9064 {
9065 uint32_t val;
9066 int usec;
9067
9068 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9069 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9070 wm_eeprom_recvbits(sc, &val, 8);
9071 if ((val & SPI_SR_RDY) == 0)
9072 break;
9073 }
9074 if (usec >= SPI_MAX_RETRIES) {
9075 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9076 return 1;
9077 }
9078 return 0;
9079 }
9080
9081 /*
9082 * wm_nvm_read_spi:
9083 *
9084 * Read a work from the EEPROM using the SPI protocol.
9085 */
9086 static int
9087 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9088 {
9089 uint32_t reg, val;
9090 int i;
9091 uint8_t opc;
9092
9093 /* Clear SK and CS. */
9094 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9095 CSR_WRITE(sc, WMREG_EECD, reg);
9096 CSR_WRITE_FLUSH(sc);
9097 delay(2);
9098
9099 if (wm_nvm_ready_spi(sc))
9100 return 1;
9101
9102 /* Toggle CS to flush commands. */
9103 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9104 CSR_WRITE_FLUSH(sc);
9105 delay(2);
9106 CSR_WRITE(sc, WMREG_EECD, reg);
9107 CSR_WRITE_FLUSH(sc);
9108 delay(2);
9109
9110 opc = SPI_OPC_READ;
9111 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9112 opc |= SPI_OPC_A8;
9113
9114 wm_eeprom_sendbits(sc, opc, 8);
9115 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9116
9117 for (i = 0; i < wordcnt; i++) {
9118 wm_eeprom_recvbits(sc, &val, 16);
9119 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9120 }
9121
9122 /* Raise CS and clear SK. */
9123 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9124 CSR_WRITE(sc, WMREG_EECD, reg);
9125 CSR_WRITE_FLUSH(sc);
9126 delay(2);
9127
9128 return 0;
9129 }
9130
9131 /* Using with EERD */
9132
9133 static int
9134 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9135 {
9136 uint32_t attempts = 100000;
9137 uint32_t i, reg = 0;
9138 int32_t done = -1;
9139
9140 for (i = 0; i < attempts; i++) {
9141 reg = CSR_READ(sc, rw);
9142
9143 if (reg & EERD_DONE) {
9144 done = 0;
9145 break;
9146 }
9147 delay(5);
9148 }
9149
9150 return done;
9151 }
9152
9153 static int
9154 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9155 uint16_t *data)
9156 {
9157 int i, eerd = 0;
9158 int error = 0;
9159
9160 for (i = 0; i < wordcnt; i++) {
9161 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9162
9163 CSR_WRITE(sc, WMREG_EERD, eerd);
9164 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9165 if (error != 0)
9166 break;
9167
9168 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9169 }
9170
9171 return error;
9172 }
9173
9174 /* Flash */
9175
9176 static int
9177 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9178 {
9179 uint32_t eecd;
9180 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9181 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9182 uint8_t sig_byte = 0;
9183
9184 switch (sc->sc_type) {
9185 case WM_T_ICH8:
9186 case WM_T_ICH9:
9187 eecd = CSR_READ(sc, WMREG_EECD);
9188 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9189 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9190 return 0;
9191 }
9192 /* FALLTHROUGH */
9193 default:
9194 /* Default to 0 */
9195 *bank = 0;
9196
9197 /* Check bank 0 */
9198 wm_read_ich8_byte(sc, act_offset, &sig_byte);
9199 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9200 *bank = 0;
9201 return 0;
9202 }
9203
9204 /* Check bank 1 */
9205 wm_read_ich8_byte(sc, act_offset + bank1_offset,
9206 &sig_byte);
9207 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9208 *bank = 1;
9209 return 0;
9210 }
9211 }
9212
9213 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9214 device_xname(sc->sc_dev)));
9215 return -1;
9216 }
9217
9218 /******************************************************************************
9219 * This function does initial flash setup so that a new read/write/erase cycle
9220 * can be started.
9221 *
9222 * sc - The pointer to the hw structure
9223 ****************************************************************************/
9224 static int32_t
9225 wm_ich8_cycle_init(struct wm_softc *sc)
9226 {
9227 uint16_t hsfsts;
9228 int32_t error = 1;
9229 int32_t i = 0;
9230
9231 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9232
9233 /* May be check the Flash Des Valid bit in Hw status */
9234 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
9235 return error;
9236 }
9237
9238 /* Clear FCERR in Hw status by writing 1 */
9239 /* Clear DAEL in Hw status by writing a 1 */
9240 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
9241
9242 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9243
9244 /*
9245 * Either we should have a hardware SPI cycle in progress bit to check
9246 * against, in order to start a new cycle or FDONE bit should be
9247 * changed in the hardware so that it is 1 after harware reset, which
9248 * can then be used as an indication whether a cycle is in progress or
9249 * has been completed .. we should also have some software semaphore
9250 * mechanism to guard FDONE or the cycle in progress bit so that two
9251 * threads access to those bits can be sequentiallized or a way so that
9252 * 2 threads dont start the cycle at the same time
9253 */
9254
9255 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9256 /*
9257 * There is no cycle running at present, so we can start a
9258 * cycle
9259 */
9260
9261 /* Begin by setting Flash Cycle Done. */
9262 hsfsts |= HSFSTS_DONE;
9263 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9264 error = 0;
9265 } else {
9266 /*
9267 * otherwise poll for sometime so the current cycle has a
9268 * chance to end before giving up.
9269 */
9270 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
9271 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9272 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9273 error = 0;
9274 break;
9275 }
9276 delay(1);
9277 }
9278 if (error == 0) {
9279 /*
9280 * Successful in waiting for previous cycle to timeout,
9281 * now set the Flash Cycle Done.
9282 */
9283 hsfsts |= HSFSTS_DONE;
9284 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9285 }
9286 }
9287 return error;
9288 }
9289
9290 /******************************************************************************
9291 * This function starts a flash cycle and waits for its completion
9292 *
9293 * sc - The pointer to the hw structure
9294 ****************************************************************************/
9295 static int32_t
9296 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
9297 {
9298 uint16_t hsflctl;
9299 uint16_t hsfsts;
9300 int32_t error = 1;
9301 uint32_t i = 0;
9302
9303 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
9304 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9305 hsflctl |= HSFCTL_GO;
9306 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9307
9308 /* Wait till FDONE bit is set to 1 */
9309 do {
9310 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9311 if (hsfsts & HSFSTS_DONE)
9312 break;
9313 delay(1);
9314 i++;
9315 } while (i < timeout);
9316 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
9317 error = 0;
9318
9319 return error;
9320 }
9321
9322 /******************************************************************************
9323 * Reads a byte or word from the NVM using the ICH8 flash access registers.
9324 *
9325 * sc - The pointer to the hw structure
9326 * index - The index of the byte or word to read.
9327 * size - Size of data to read, 1=byte 2=word
9328 * data - Pointer to the word to store the value read.
9329 *****************************************************************************/
9330 static int32_t
9331 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
9332 uint32_t size, uint16_t *data)
9333 {
9334 uint16_t hsfsts;
9335 uint16_t hsflctl;
9336 uint32_t flash_linear_address;
9337 uint32_t flash_data = 0;
9338 int32_t error = 1;
9339 int32_t count = 0;
9340
9341 if (size < 1 || size > 2 || data == 0x0 ||
9342 index > ICH_FLASH_LINEAR_ADDR_MASK)
9343 return error;
9344
9345 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
9346 sc->sc_ich8_flash_base;
9347
9348 do {
9349 delay(1);
9350 /* Steps */
9351 error = wm_ich8_cycle_init(sc);
9352 if (error)
9353 break;
9354
9355 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9356 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
9357 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
9358 & HSFCTL_BCOUNT_MASK;
9359 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
9360 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9361
9362 /*
9363 * Write the last 24 bits of index into Flash Linear address
9364 * field in Flash Address
9365 */
9366 /* TODO: TBD maybe check the index against the size of flash */
9367
9368 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
9369
9370 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
9371
9372 /*
9373 * Check if FCERR is set to 1, if set to 1, clear it and try
9374 * the whole sequence a few more times, else read in (shift in)
9375 * the Flash Data0, the order is least significant byte first
9376 * msb to lsb
9377 */
9378 if (error == 0) {
9379 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
9380 if (size == 1)
9381 *data = (uint8_t)(flash_data & 0x000000FF);
9382 else if (size == 2)
9383 *data = (uint16_t)(flash_data & 0x0000FFFF);
9384 break;
9385 } else {
9386 /*
9387 * If we've gotten here, then things are probably
9388 * completely hosed, but if the error condition is
9389 * detected, it won't hurt to give it another try...
9390 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
9391 */
9392 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9393 if (hsfsts & HSFSTS_ERR) {
9394 /* Repeat for some time before giving up. */
9395 continue;
9396 } else if ((hsfsts & HSFSTS_DONE) == 0)
9397 break;
9398 }
9399 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
9400
9401 return error;
9402 }
9403
9404 /******************************************************************************
9405 * Reads a single byte from the NVM using the ICH8 flash access registers.
9406 *
9407 * sc - pointer to wm_hw structure
9408 * index - The index of the byte to read.
9409 * data - Pointer to a byte to store the value read.
9410 *****************************************************************************/
9411 static int32_t
9412 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
9413 {
9414 int32_t status;
9415 uint16_t word = 0;
9416
9417 status = wm_read_ich8_data(sc, index, 1, &word);
9418 if (status == 0)
9419 *data = (uint8_t)word;
9420 else
9421 *data = 0;
9422
9423 return status;
9424 }
9425
9426 /******************************************************************************
9427 * Reads a word from the NVM using the ICH8 flash access registers.
9428 *
9429 * sc - pointer to wm_hw structure
9430 * index - The starting byte index of the word to read.
9431 * data - Pointer to a word to store the value read.
9432 *****************************************************************************/
9433 static int32_t
9434 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
9435 {
9436 int32_t status;
9437
9438 status = wm_read_ich8_data(sc, index, 2, data);
9439 return status;
9440 }
9441
9442 /******************************************************************************
9443 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
9444 * register.
9445 *
9446 * sc - Struct containing variables accessed by shared code
9447 * offset - offset of word in the EEPROM to read
9448 * data - word read from the EEPROM
9449 * words - number of words to read
9450 *****************************************************************************/
9451 static int
9452 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
9453 {
9454 int32_t error = 0;
9455 uint32_t flash_bank = 0;
9456 uint32_t act_offset = 0;
9457 uint32_t bank_offset = 0;
9458 uint16_t word = 0;
9459 uint16_t i = 0;
9460
9461 /*
9462 * We need to know which is the valid flash bank. In the event
9463 * that we didn't allocate eeprom_shadow_ram, we may not be
9464 * managing flash_bank. So it cannot be trusted and needs
9465 * to be updated with each read.
9466 */
9467 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
9468 if (error) {
9469 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
9470 device_xname(sc->sc_dev)));
9471 flash_bank = 0;
9472 }
9473
9474 /*
9475 * Adjust offset appropriately if we're on bank 1 - adjust for word
9476 * size
9477 */
9478 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
9479
9480 error = wm_get_swfwhw_semaphore(sc);
9481 if (error) {
9482 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9483 __func__);
9484 return error;
9485 }
9486
9487 for (i = 0; i < words; i++) {
9488 /* The NVM part needs a byte offset, hence * 2 */
9489 act_offset = bank_offset + ((offset + i) * 2);
9490 error = wm_read_ich8_word(sc, act_offset, &word);
9491 if (error) {
9492 aprint_error_dev(sc->sc_dev,
9493 "%s: failed to read NVM\n", __func__);
9494 break;
9495 }
9496 data[i] = word;
9497 }
9498
9499 wm_put_swfwhw_semaphore(sc);
9500 return error;
9501 }
9502
9503 /* iNVM */
9504
9505 static int
9506 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
9507 {
9508 int32_t rv = 0;
9509 uint32_t invm_dword;
9510 uint16_t i;
9511 uint8_t record_type, word_address;
9512
9513 for (i = 0; i < INVM_SIZE; i++) {
9514 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
9515 /* Get record type */
9516 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
9517 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
9518 break;
9519 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
9520 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
9521 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
9522 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
9523 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
9524 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
9525 if (word_address == address) {
9526 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
9527 rv = 0;
9528 break;
9529 }
9530 }
9531 }
9532
9533 return rv;
9534 }
9535
9536 static int
9537 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
9538 {
9539 int rv = 0;
9540 int i;
9541
9542 for (i = 0; i < words; i++) {
9543 switch (offset + i) {
9544 case NVM_OFF_MACADDR:
9545 case NVM_OFF_MACADDR1:
9546 case NVM_OFF_MACADDR2:
9547 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
9548 if (rv != 0) {
9549 data[i] = 0xffff;
9550 rv = -1;
9551 }
9552 break;
9553 case NVM_OFF_CFG2:
9554 rv = wm_nvm_read_word_invm(sc, offset, data);
9555 if (rv != 0) {
9556 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
9557 rv = 0;
9558 }
9559 break;
9560 case NVM_OFF_CFG4:
9561 rv = wm_nvm_read_word_invm(sc, offset, data);
9562 if (rv != 0) {
9563 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
9564 rv = 0;
9565 }
9566 break;
9567 case NVM_OFF_LED_1_CFG:
9568 rv = wm_nvm_read_word_invm(sc, offset, data);
9569 if (rv != 0) {
9570 *data = NVM_LED_1_CFG_DEFAULT_I211;
9571 rv = 0;
9572 }
9573 break;
9574 case NVM_OFF_LED_0_2_CFG:
9575 rv = wm_nvm_read_word_invm(sc, offset, data);
9576 if (rv != 0) {
9577 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
9578 rv = 0;
9579 }
9580 break;
9581 case NVM_OFF_ID_LED_SETTINGS:
9582 rv = wm_nvm_read_word_invm(sc, offset, data);
9583 if (rv != 0) {
9584 *data = ID_LED_RESERVED_FFFF;
9585 rv = 0;
9586 }
9587 break;
9588 default:
9589 DPRINTF(WM_DEBUG_NVM,
9590 ("NVM word 0x%02x is not mapped.\n", offset));
9591 *data = NVM_RESERVED_WORD;
9592 break;
9593 }
9594 }
9595
9596 return rv;
9597 }
9598
9599 /* Lock, detecting NVM type, validate checksum, version and read */
9600
9601 /*
9602 * wm_nvm_acquire:
9603 *
9604 * Perform the EEPROM handshake required on some chips.
9605 */
9606 static int
9607 wm_nvm_acquire(struct wm_softc *sc)
9608 {
9609 uint32_t reg;
9610 int x;
9611 int ret = 0;
9612
9613 /* always success */
9614 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9615 return 0;
9616
9617 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9618 ret = wm_get_swfwhw_semaphore(sc);
9619 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9620 /* This will also do wm_get_swsm_semaphore() if needed */
9621 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9622 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9623 ret = wm_get_swsm_semaphore(sc);
9624 }
9625
9626 if (ret) {
9627 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9628 __func__);
9629 return 1;
9630 }
9631
9632 if (sc->sc_flags & WM_F_LOCK_EECD) {
9633 reg = CSR_READ(sc, WMREG_EECD);
9634
9635 /* Request EEPROM access. */
9636 reg |= EECD_EE_REQ;
9637 CSR_WRITE(sc, WMREG_EECD, reg);
9638
9639 /* ..and wait for it to be granted. */
9640 for (x = 0; x < 1000; x++) {
9641 reg = CSR_READ(sc, WMREG_EECD);
9642 if (reg & EECD_EE_GNT)
9643 break;
9644 delay(5);
9645 }
9646 if ((reg & EECD_EE_GNT) == 0) {
9647 aprint_error_dev(sc->sc_dev,
9648 "could not acquire EEPROM GNT\n");
9649 reg &= ~EECD_EE_REQ;
9650 CSR_WRITE(sc, WMREG_EECD, reg);
9651 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9652 wm_put_swfwhw_semaphore(sc);
9653 if (sc->sc_flags & WM_F_LOCK_SWFW)
9654 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9655 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9656 wm_put_swsm_semaphore(sc);
9657 return 1;
9658 }
9659 }
9660
9661 return 0;
9662 }
9663
9664 /*
9665 * wm_nvm_release:
9666 *
9667 * Release the EEPROM mutex.
9668 */
9669 static void
9670 wm_nvm_release(struct wm_softc *sc)
9671 {
9672 uint32_t reg;
9673
9674 /* always success */
9675 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9676 return;
9677
9678 if (sc->sc_flags & WM_F_LOCK_EECD) {
9679 reg = CSR_READ(sc, WMREG_EECD);
9680 reg &= ~EECD_EE_REQ;
9681 CSR_WRITE(sc, WMREG_EECD, reg);
9682 }
9683
9684 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9685 wm_put_swfwhw_semaphore(sc);
9686 if (sc->sc_flags & WM_F_LOCK_SWFW)
9687 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9688 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9689 wm_put_swsm_semaphore(sc);
9690 }
9691
9692 static int
9693 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9694 {
9695 uint32_t eecd = 0;
9696
9697 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9698 || sc->sc_type == WM_T_82583) {
9699 eecd = CSR_READ(sc, WMREG_EECD);
9700
9701 /* Isolate bits 15 & 16 */
9702 eecd = ((eecd >> 15) & 0x03);
9703
9704 /* If both bits are set, device is Flash type */
9705 if (eecd == 0x03)
9706 return 0;
9707 }
9708 return 1;
9709 }
9710
9711 static int
9712 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9713 {
9714 uint32_t eec;
9715
9716 eec = CSR_READ(sc, WMREG_EEC);
9717 if ((eec & EEC_FLASH_DETECTED) != 0)
9718 return 1;
9719
9720 return 0;
9721 }
9722
9723 /*
9724 * wm_nvm_validate_checksum
9725 *
9726 * The checksum is defined as the sum of the first 64 (16 bit) words.
9727 */
9728 static int
9729 wm_nvm_validate_checksum(struct wm_softc *sc)
9730 {
9731 uint16_t checksum;
9732 uint16_t eeprom_data;
9733 #ifdef WM_DEBUG
9734 uint16_t csum_wordaddr, valid_checksum;
9735 #endif
9736 int i;
9737
9738 checksum = 0;
9739
9740 /* Don't check for I211 */
9741 if (sc->sc_type == WM_T_I211)
9742 return 0;
9743
9744 #ifdef WM_DEBUG
9745 if (sc->sc_type == WM_T_PCH_LPT) {
9746 csum_wordaddr = NVM_OFF_COMPAT;
9747 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
9748 } else {
9749 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
9750 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
9751 }
9752
9753 /* Dump EEPROM image for debug */
9754 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9755 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9756 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
9757 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
9758 if ((eeprom_data & valid_checksum) == 0) {
9759 DPRINTF(WM_DEBUG_NVM,
9760 ("%s: NVM need to be updated (%04x != %04x)\n",
9761 device_xname(sc->sc_dev), eeprom_data,
9762 valid_checksum));
9763 }
9764 }
9765
9766 if ((wm_debug & WM_DEBUG_NVM) != 0) {
9767 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
9768 for (i = 0; i < NVM_SIZE; i++) {
9769 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9770 printf("XXXX ");
9771 else
9772 printf("%04hx ", eeprom_data);
9773 if (i % 8 == 7)
9774 printf("\n");
9775 }
9776 }
9777
9778 #endif /* WM_DEBUG */
9779
9780 for (i = 0; i < NVM_SIZE; i++) {
9781 if (wm_nvm_read(sc, i, 1, &eeprom_data))
9782 return 1;
9783 checksum += eeprom_data;
9784 }
9785
9786 if (checksum != (uint16_t) NVM_CHECKSUM) {
9787 #ifdef WM_DEBUG
9788 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
9789 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
9790 #endif
9791 }
9792
9793 return 0;
9794 }
9795
9796 static void
9797 wm_nvm_version(struct wm_softc *sc)
9798 {
9799 uint16_t major, minor, build, patch;
9800 uint16_t uid0, uid1;
9801 uint16_t nvm_data;
9802 uint16_t off;
9803 bool check_version = false;
9804 bool check_optionrom = false;
9805 bool have_build = false;
9806
9807 /*
9808 * Version format:
9809 *
9810 * XYYZ
9811 * X0YZ
9812 * X0YY
9813 *
9814 * Example:
9815 *
9816 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
9817 * 82571 0x50a6 5.10.6?
9818 * 82572 0x506a 5.6.10?
9819 * 82572EI 0x5069 5.6.9?
9820 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
9821 * 0x2013 2.1.3?
9822 * 82583 0x10a0 1.10.0? (document says it's default vaule)
9823 */
9824 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
9825 switch (sc->sc_type) {
9826 case WM_T_82571:
9827 case WM_T_82572:
9828 case WM_T_82574:
9829 check_version = true;
9830 check_optionrom = true;
9831 have_build = true;
9832 break;
9833 case WM_T_82575:
9834 case WM_T_82576:
9835 case WM_T_82580:
9836 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
9837 check_version = true;
9838 break;
9839 case WM_T_I211:
9840 /* XXX wm_nvm_version_invm(sc); */
9841 return;
9842 case WM_T_I210:
9843 if (!wm_nvm_get_flash_presence_i210(sc)) {
9844 /* XXX wm_nvm_version_invm(sc); */
9845 return;
9846 }
9847 /* FALLTHROUGH */
9848 case WM_T_I350:
9849 case WM_T_I354:
9850 check_version = true;
9851 check_optionrom = true;
9852 break;
9853 default:
9854 return;
9855 }
9856 if (check_version) {
9857 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
9858 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
9859 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
9860 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
9861 build = nvm_data & NVM_BUILD_MASK;
9862 have_build = true;
9863 } else
9864 minor = nvm_data & 0x00ff;
9865
9866 /* Decimal */
9867 minor = (minor / 16) * 10 + (minor % 16);
9868
9869 aprint_verbose(", version %d.%d", major, minor);
9870 if (have_build)
9871 aprint_verbose(".%d", build);
9872 sc->sc_nvm_ver_major = major;
9873 sc->sc_nvm_ver_minor = minor;
9874 }
9875 if (check_optionrom) {
9876 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
9877 /* Option ROM Version */
9878 if ((off != 0x0000) && (off != 0xffff)) {
9879 off += NVM_COMBO_VER_OFF;
9880 wm_nvm_read(sc, off + 1, 1, &uid1);
9881 wm_nvm_read(sc, off, 1, &uid0);
9882 if ((uid0 != 0) && (uid0 != 0xffff)
9883 && (uid1 != 0) && (uid1 != 0xffff)) {
9884 /* 16bits */
9885 major = uid0 >> 8;
9886 build = (uid0 << 8) | (uid1 >> 8);
9887 patch = uid1 & 0x00ff;
9888 aprint_verbose(", option ROM Version %d.%d.%d",
9889 major, build, patch);
9890 }
9891 }
9892 }
9893
9894 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
9895 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
9896 }
9897
9898 /*
9899 * wm_nvm_read:
9900 *
9901 * Read data from the serial EEPROM.
9902 */
9903 static int
9904 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9905 {
9906 int rv;
9907
9908 if (sc->sc_flags & WM_F_EEPROM_INVALID)
9909 return 1;
9910
9911 if (wm_nvm_acquire(sc))
9912 return 1;
9913
9914 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9915 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9916 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
9917 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
9918 else if (sc->sc_flags & WM_F_EEPROM_INVM)
9919 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
9920 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
9921 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
9922 else if (sc->sc_flags & WM_F_EEPROM_SPI)
9923 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
9924 else
9925 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
9926
9927 wm_nvm_release(sc);
9928 return rv;
9929 }
9930
9931 /*
9932 * Hardware semaphores.
9933 * Very complexed...
9934 */
9935
9936 static int
9937 wm_get_swsm_semaphore(struct wm_softc *sc)
9938 {
9939 int32_t timeout;
9940 uint32_t swsm;
9941
9942 if (sc->sc_flags & WM_F_LOCK_SWSM) {
9943 /* Get the SW semaphore. */
9944 timeout = sc->sc_nvm_wordsize + 1;
9945 while (timeout) {
9946 swsm = CSR_READ(sc, WMREG_SWSM);
9947
9948 if ((swsm & SWSM_SMBI) == 0)
9949 break;
9950
9951 delay(50);
9952 timeout--;
9953 }
9954
9955 if (timeout == 0) {
9956 aprint_error_dev(sc->sc_dev,
9957 "could not acquire SWSM SMBI\n");
9958 return 1;
9959 }
9960 }
9961
9962 /* Get the FW semaphore. */
9963 timeout = sc->sc_nvm_wordsize + 1;
9964 while (timeout) {
9965 swsm = CSR_READ(sc, WMREG_SWSM);
9966 swsm |= SWSM_SWESMBI;
9967 CSR_WRITE(sc, WMREG_SWSM, swsm);
9968 /* If we managed to set the bit we got the semaphore. */
9969 swsm = CSR_READ(sc, WMREG_SWSM);
9970 if (swsm & SWSM_SWESMBI)
9971 break;
9972
9973 delay(50);
9974 timeout--;
9975 }
9976
9977 if (timeout == 0) {
9978 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
9979 /* Release semaphores */
9980 wm_put_swsm_semaphore(sc);
9981 return 1;
9982 }
9983 return 0;
9984 }
9985
9986 static void
9987 wm_put_swsm_semaphore(struct wm_softc *sc)
9988 {
9989 uint32_t swsm;
9990
9991 swsm = CSR_READ(sc, WMREG_SWSM);
9992 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
9993 CSR_WRITE(sc, WMREG_SWSM, swsm);
9994 }
9995
9996 static int
9997 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
9998 {
9999 uint32_t swfw_sync;
10000 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10001 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10002 int timeout = 200;
10003
10004 for (timeout = 0; timeout < 200; timeout++) {
10005 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10006 if (wm_get_swsm_semaphore(sc)) {
10007 aprint_error_dev(sc->sc_dev,
10008 "%s: failed to get semaphore\n",
10009 __func__);
10010 return 1;
10011 }
10012 }
10013 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10014 if ((swfw_sync & (swmask | fwmask)) == 0) {
10015 swfw_sync |= swmask;
10016 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10017 if (sc->sc_flags & WM_F_LOCK_SWSM)
10018 wm_put_swsm_semaphore(sc);
10019 return 0;
10020 }
10021 if (sc->sc_flags & WM_F_LOCK_SWSM)
10022 wm_put_swsm_semaphore(sc);
10023 delay(5000);
10024 }
10025 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10026 device_xname(sc->sc_dev), mask, swfw_sync);
10027 return 1;
10028 }
10029
10030 static void
10031 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10032 {
10033 uint32_t swfw_sync;
10034
10035 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10036 while (wm_get_swsm_semaphore(sc) != 0)
10037 continue;
10038 }
10039 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10040 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10041 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10042 if (sc->sc_flags & WM_F_LOCK_SWSM)
10043 wm_put_swsm_semaphore(sc);
10044 }
10045
10046 static int
10047 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10048 {
10049 uint32_t ext_ctrl;
10050 int timeout = 200;
10051
10052 for (timeout = 0; timeout < 200; timeout++) {
10053 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10054 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10055 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10056
10057 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10058 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10059 return 0;
10060 delay(5000);
10061 }
10062 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10063 device_xname(sc->sc_dev), ext_ctrl);
10064 return 1;
10065 }
10066
10067 static void
10068 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10069 {
10070 uint32_t ext_ctrl;
10071 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10072 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10073 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10074 }
10075
10076 static int
10077 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10078 {
10079 int i = 0;
10080 uint32_t reg;
10081
10082 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10083 do {
10084 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10085 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10086 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10087 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10088 break;
10089 delay(2*1000);
10090 i++;
10091 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10092
10093 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10094 wm_put_hw_semaphore_82573(sc);
10095 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10096 device_xname(sc->sc_dev));
10097 return -1;
10098 }
10099
10100 return 0;
10101 }
10102
10103 static void
10104 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10105 {
10106 uint32_t reg;
10107
10108 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10109 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10110 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10111 }
10112
10113 /*
10114 * Management mode and power management related subroutines.
10115 * BMC, AMT, suspend/resume and EEE.
10116 */
10117
10118 static int
10119 wm_check_mng_mode(struct wm_softc *sc)
10120 {
10121 int rv;
10122
10123 switch (sc->sc_type) {
10124 case WM_T_ICH8:
10125 case WM_T_ICH9:
10126 case WM_T_ICH10:
10127 case WM_T_PCH:
10128 case WM_T_PCH2:
10129 case WM_T_PCH_LPT:
10130 rv = wm_check_mng_mode_ich8lan(sc);
10131 break;
10132 case WM_T_82574:
10133 case WM_T_82583:
10134 rv = wm_check_mng_mode_82574(sc);
10135 break;
10136 case WM_T_82571:
10137 case WM_T_82572:
10138 case WM_T_82573:
10139 case WM_T_80003:
10140 rv = wm_check_mng_mode_generic(sc);
10141 break;
10142 default:
10143 /* noting to do */
10144 rv = 0;
10145 break;
10146 }
10147
10148 return rv;
10149 }
10150
10151 static int
10152 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10153 {
10154 uint32_t fwsm;
10155
10156 fwsm = CSR_READ(sc, WMREG_FWSM);
10157
10158 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10159 return 1;
10160
10161 return 0;
10162 }
10163
10164 static int
10165 wm_check_mng_mode_82574(struct wm_softc *sc)
10166 {
10167 uint16_t data;
10168
10169 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10170
10171 if ((data & NVM_CFG2_MNGM_MASK) != 0)
10172 return 1;
10173
10174 return 0;
10175 }
10176
10177 static int
10178 wm_check_mng_mode_generic(struct wm_softc *sc)
10179 {
10180 uint32_t fwsm;
10181
10182 fwsm = CSR_READ(sc, WMREG_FWSM);
10183
10184 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10185 return 1;
10186
10187 return 0;
10188 }
10189
10190 static int
10191 wm_enable_mng_pass_thru(struct wm_softc *sc)
10192 {
10193 uint32_t manc, fwsm, factps;
10194
10195 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
10196 return 0;
10197
10198 manc = CSR_READ(sc, WMREG_MANC);
10199
10200 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
10201 device_xname(sc->sc_dev), manc));
10202 if ((manc & MANC_RECV_TCO_EN) == 0)
10203 return 0;
10204
10205 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
10206 fwsm = CSR_READ(sc, WMREG_FWSM);
10207 factps = CSR_READ(sc, WMREG_FACTPS);
10208 if (((factps & FACTPS_MNGCG) == 0)
10209 && ((fwsm & FWSM_MODE_MASK)
10210 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
10211 return 1;
10212 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10213 uint16_t data;
10214
10215 factps = CSR_READ(sc, WMREG_FACTPS);
10216 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10217 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
10218 device_xname(sc->sc_dev), factps, data));
10219 if (((factps & FACTPS_MNGCG) == 0)
10220 && ((data & NVM_CFG2_MNGM_MASK)
10221 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
10222 return 1;
10223 } else if (((manc & MANC_SMBUS_EN) != 0)
10224 && ((manc & MANC_ASF_EN) == 0))
10225 return 1;
10226
10227 return 0;
10228 }
10229
10230 static int
10231 wm_check_reset_block(struct wm_softc *sc)
10232 {
10233 uint32_t reg;
10234
10235 switch (sc->sc_type) {
10236 case WM_T_ICH8:
10237 case WM_T_ICH9:
10238 case WM_T_ICH10:
10239 case WM_T_PCH:
10240 case WM_T_PCH2:
10241 case WM_T_PCH_LPT:
10242 reg = CSR_READ(sc, WMREG_FWSM);
10243 if ((reg & FWSM_RSPCIPHY) != 0)
10244 return 0;
10245 else
10246 return -1;
10247 break;
10248 case WM_T_82571:
10249 case WM_T_82572:
10250 case WM_T_82573:
10251 case WM_T_82574:
10252 case WM_T_82583:
10253 case WM_T_80003:
10254 reg = CSR_READ(sc, WMREG_MANC);
10255 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
10256 return -1;
10257 else
10258 return 0;
10259 break;
10260 default:
10261 /* no problem */
10262 break;
10263 }
10264
10265 return 0;
10266 }
10267
10268 static void
10269 wm_get_hw_control(struct wm_softc *sc)
10270 {
10271 uint32_t reg;
10272
10273 switch (sc->sc_type) {
10274 case WM_T_82573:
10275 reg = CSR_READ(sc, WMREG_SWSM);
10276 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
10277 break;
10278 case WM_T_82571:
10279 case WM_T_82572:
10280 case WM_T_82574:
10281 case WM_T_82583:
10282 case WM_T_80003:
10283 case WM_T_ICH8:
10284 case WM_T_ICH9:
10285 case WM_T_ICH10:
10286 case WM_T_PCH:
10287 case WM_T_PCH2:
10288 case WM_T_PCH_LPT:
10289 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10290 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
10291 break;
10292 default:
10293 break;
10294 }
10295 }
10296
10297 static void
10298 wm_release_hw_control(struct wm_softc *sc)
10299 {
10300 uint32_t reg;
10301
10302 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
10303 return;
10304
10305 if (sc->sc_type == WM_T_82573) {
10306 reg = CSR_READ(sc, WMREG_SWSM);
10307 reg &= ~SWSM_DRV_LOAD;
10308 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
10309 } else {
10310 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10311 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
10312 }
10313 }
10314
10315 static void
10316 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
10317 {
10318 uint32_t reg;
10319
10320 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10321
10322 if (on != 0)
10323 reg |= EXTCNFCTR_GATE_PHY_CFG;
10324 else
10325 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
10326
10327 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10328 }
10329
10330 static void
10331 wm_smbustopci(struct wm_softc *sc)
10332 {
10333 uint32_t fwsm;
10334
10335 fwsm = CSR_READ(sc, WMREG_FWSM);
10336 if (((fwsm & FWSM_FW_VALID) == 0)
10337 && ((wm_check_reset_block(sc) == 0))) {
10338 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
10339 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
10340 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10341 CSR_WRITE_FLUSH(sc);
10342 delay(10);
10343 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
10344 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10345 CSR_WRITE_FLUSH(sc);
10346 delay(50*1000);
10347
10348 /*
10349 * Gate automatic PHY configuration by hardware on non-managed
10350 * 82579
10351 */
10352 if (sc->sc_type == WM_T_PCH2)
10353 wm_gate_hw_phy_config_ich8lan(sc, 1);
10354 }
10355 }
10356
10357 static void
10358 wm_init_manageability(struct wm_softc *sc)
10359 {
10360
10361 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10362 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
10363 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10364
10365 /* Disable hardware interception of ARP */
10366 manc &= ~MANC_ARP_EN;
10367
10368 /* Enable receiving management packets to the host */
10369 if (sc->sc_type >= WM_T_82571) {
10370 manc |= MANC_EN_MNG2HOST;
10371 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
10372 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
10373 }
10374
10375 CSR_WRITE(sc, WMREG_MANC, manc);
10376 }
10377 }
10378
10379 static void
10380 wm_release_manageability(struct wm_softc *sc)
10381 {
10382
10383 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10384 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10385
10386 manc |= MANC_ARP_EN;
10387 if (sc->sc_type >= WM_T_82571)
10388 manc &= ~MANC_EN_MNG2HOST;
10389
10390 CSR_WRITE(sc, WMREG_MANC, manc);
10391 }
10392 }
10393
10394 static void
10395 wm_get_wakeup(struct wm_softc *sc)
10396 {
10397
10398 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
10399 switch (sc->sc_type) {
10400 case WM_T_82573:
10401 case WM_T_82583:
10402 sc->sc_flags |= WM_F_HAS_AMT;
10403 /* FALLTHROUGH */
10404 case WM_T_80003:
10405 case WM_T_82541:
10406 case WM_T_82547:
10407 case WM_T_82571:
10408 case WM_T_82572:
10409 case WM_T_82574:
10410 case WM_T_82575:
10411 case WM_T_82576:
10412 case WM_T_82580:
10413 case WM_T_I350:
10414 case WM_T_I354:
10415 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
10416 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
10417 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10418 break;
10419 case WM_T_ICH8:
10420 case WM_T_ICH9:
10421 case WM_T_ICH10:
10422 case WM_T_PCH:
10423 case WM_T_PCH2:
10424 case WM_T_PCH_LPT:
10425 sc->sc_flags |= WM_F_HAS_AMT;
10426 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10427 break;
10428 default:
10429 break;
10430 }
10431
10432 /* 1: HAS_MANAGE */
10433 if (wm_enable_mng_pass_thru(sc) != 0)
10434 sc->sc_flags |= WM_F_HAS_MANAGE;
10435
10436 #ifdef WM_DEBUG
10437 printf("\n");
10438 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
10439 printf("HAS_AMT,");
10440 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
10441 printf("ARC_SUBSYS_VALID,");
10442 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
10443 printf("ASF_FIRMWARE_PRES,");
10444 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
10445 printf("HAS_MANAGE,");
10446 printf("\n");
10447 #endif
10448 /*
10449 * Note that the WOL flags is set after the resetting of the eeprom
10450 * stuff
10451 */
10452 }
10453
10454 #ifdef WM_WOL
10455 /* WOL in the newer chipset interfaces (pchlan) */
10456 static void
10457 wm_enable_phy_wakeup(struct wm_softc *sc)
10458 {
10459 #if 0
10460 uint16_t preg;
10461
10462 /* Copy MAC RARs to PHY RARs */
10463
10464 /* Copy MAC MTA to PHY MTA */
10465
10466 /* Configure PHY Rx Control register */
10467
10468 /* Enable PHY wakeup in MAC register */
10469
10470 /* Configure and enable PHY wakeup in PHY registers */
10471
10472 /* Activate PHY wakeup */
10473
10474 /* XXX */
10475 #endif
10476 }
10477
10478 /* Power down workaround on D3 */
10479 static void
10480 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
10481 {
10482 uint32_t reg;
10483 int i;
10484
10485 for (i = 0; i < 2; i++) {
10486 /* Disable link */
10487 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10488 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10489 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10490
10491 /*
10492 * Call gig speed drop workaround on Gig disable before
10493 * accessing any PHY registers
10494 */
10495 if (sc->sc_type == WM_T_ICH8)
10496 wm_gig_downshift_workaround_ich8lan(sc);
10497
10498 /* Write VR power-down enable */
10499 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10500 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10501 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
10502 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
10503
10504 /* Read it back and test */
10505 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10506 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10507 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
10508 break;
10509
10510 /* Issue PHY reset and repeat at most one more time */
10511 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10512 }
10513 }
10514
10515 static void
10516 wm_enable_wakeup(struct wm_softc *sc)
10517 {
10518 uint32_t reg, pmreg;
10519 pcireg_t pmode;
10520
10521 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10522 &pmreg, NULL) == 0)
10523 return;
10524
10525 /* Advertise the wakeup capability */
10526 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
10527 | CTRL_SWDPIN(3));
10528 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
10529
10530 /* ICH workaround */
10531 switch (sc->sc_type) {
10532 case WM_T_ICH8:
10533 case WM_T_ICH9:
10534 case WM_T_ICH10:
10535 case WM_T_PCH:
10536 case WM_T_PCH2:
10537 case WM_T_PCH_LPT:
10538 /* Disable gig during WOL */
10539 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10540 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
10541 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10542 if (sc->sc_type == WM_T_PCH)
10543 wm_gmii_reset(sc);
10544
10545 /* Power down workaround */
10546 if (sc->sc_phytype == WMPHY_82577) {
10547 struct mii_softc *child;
10548
10549 /* Assume that the PHY is copper */
10550 child = LIST_FIRST(&sc->sc_mii.mii_phys);
10551 if (child->mii_mpd_rev <= 2)
10552 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
10553 (768 << 5) | 25, 0x0444); /* magic num */
10554 }
10555 break;
10556 default:
10557 break;
10558 }
10559
10560 /* Keep the laser running on fiber adapters */
10561 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
10562 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
10563 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10564 reg |= CTRL_EXT_SWDPIN(3);
10565 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10566 }
10567
10568 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
10569 #if 0 /* for the multicast packet */
10570 reg |= WUFC_MC;
10571 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
10572 #endif
10573
10574 if (sc->sc_type == WM_T_PCH) {
10575 wm_enable_phy_wakeup(sc);
10576 } else {
10577 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
10578 CSR_WRITE(sc, WMREG_WUFC, reg);
10579 }
10580
10581 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10582 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10583 || (sc->sc_type == WM_T_PCH2))
10584 && (sc->sc_phytype == WMPHY_IGP_3))
10585 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
10586
10587 /* Request PME */
10588 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
10589 #if 0
10590 /* Disable WOL */
10591 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
10592 #else
10593 /* For WOL */
10594 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
10595 #endif
10596 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
10597 }
10598 #endif /* WM_WOL */
10599
10600 /* EEE */
10601
10602 static void
10603 wm_set_eee_i350(struct wm_softc *sc)
10604 {
10605 uint32_t ipcnfg, eeer;
10606
10607 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
10608 eeer = CSR_READ(sc, WMREG_EEER);
10609
10610 if ((sc->sc_flags & WM_F_EEE) != 0) {
10611 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10612 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
10613 | EEER_LPI_FC);
10614 } else {
10615 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10616 ipcnfg &= ~IPCNFG_10BASE_TE;
10617 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
10618 | EEER_LPI_FC);
10619 }
10620
10621 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
10622 CSR_WRITE(sc, WMREG_EEER, eeer);
10623 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
10624 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
10625 }
10626
10627 /*
10628 * Workarounds (mainly PHY related).
10629 * Basically, PHY's workarounds are in the PHY drivers.
10630 */
10631
10632 /* Work-around for 82566 Kumeran PCS lock loss */
10633 static void
10634 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
10635 {
10636 int miistatus, active, i;
10637 int reg;
10638
10639 miistatus = sc->sc_mii.mii_media_status;
10640
10641 /* If the link is not up, do nothing */
10642 if ((miistatus & IFM_ACTIVE) != 0)
10643 return;
10644
10645 active = sc->sc_mii.mii_media_active;
10646
10647 /* Nothing to do if the link is other than 1Gbps */
10648 if (IFM_SUBTYPE(active) != IFM_1000_T)
10649 return;
10650
10651 for (i = 0; i < 10; i++) {
10652 /* read twice */
10653 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10654 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10655 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
10656 goto out; /* GOOD! */
10657
10658 /* Reset the PHY */
10659 wm_gmii_reset(sc);
10660 delay(5*1000);
10661 }
10662
10663 /* Disable GigE link negotiation */
10664 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10665 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10666 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10667
10668 /*
10669 * Call gig speed drop workaround on Gig disable before accessing
10670 * any PHY registers.
10671 */
10672 wm_gig_downshift_workaround_ich8lan(sc);
10673
10674 out:
10675 return;
10676 }
10677
10678 /* WOL from S5 stops working */
10679 static void
10680 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
10681 {
10682 uint16_t kmrn_reg;
10683
10684 /* Only for igp3 */
10685 if (sc->sc_phytype == WMPHY_IGP_3) {
10686 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
10687 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
10688 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10689 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
10690 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10691 }
10692 }
10693
10694 /*
10695 * Workaround for pch's PHYs
10696 * XXX should be moved to new PHY driver?
10697 */
10698 static void
10699 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
10700 {
10701 if (sc->sc_phytype == WMPHY_82577)
10702 wm_set_mdio_slow_mode_hv(sc);
10703
10704 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
10705
10706 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
10707
10708 /* 82578 */
10709 if (sc->sc_phytype == WMPHY_82578) {
10710 /* PCH rev. < 3 */
10711 if (sc->sc_rev < 3) {
10712 /* XXX 6 bit shift? Why? Is it page2? */
10713 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10714 0x66c0);
10715 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
10716 0xffff);
10717 }
10718
10719 /* XXX phy rev. < 2 */
10720 }
10721
10722 /* Select page 0 */
10723
10724 /* XXX acquire semaphore */
10725 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
10726 /* XXX release semaphore */
10727
10728 /*
10729 * Configure the K1 Si workaround during phy reset assuming there is
10730 * link so that it disables K1 if link is in 1Gbps.
10731 */
10732 wm_k1_gig_workaround_hv(sc, 1);
10733 }
10734
10735 static void
10736 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
10737 {
10738
10739 wm_set_mdio_slow_mode_hv(sc);
10740 }
10741
10742 static void
10743 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
10744 {
10745 int k1_enable = sc->sc_nvm_k1_enabled;
10746
10747 /* XXX acquire semaphore */
10748
10749 if (link) {
10750 k1_enable = 0;
10751
10752 /* Link stall fix for link up */
10753 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
10754 } else {
10755 /* Link stall fix for link down */
10756 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
10757 }
10758
10759 wm_configure_k1_ich8lan(sc, k1_enable);
10760
10761 /* XXX release semaphore */
10762 }
10763
10764 static void
10765 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
10766 {
10767 uint32_t reg;
10768
10769 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
10770 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
10771 reg | HV_KMRN_MDIO_SLOW);
10772 }
10773
10774 static void
10775 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
10776 {
10777 uint32_t ctrl, ctrl_ext, tmp;
10778 uint16_t kmrn_reg;
10779
10780 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
10781
10782 if (k1_enable)
10783 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
10784 else
10785 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
10786
10787 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
10788
10789 delay(20);
10790
10791 ctrl = CSR_READ(sc, WMREG_CTRL);
10792 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10793
10794 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
10795 tmp |= CTRL_FRCSPD;
10796
10797 CSR_WRITE(sc, WMREG_CTRL, tmp);
10798 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
10799 CSR_WRITE_FLUSH(sc);
10800 delay(20);
10801
10802 CSR_WRITE(sc, WMREG_CTRL, ctrl);
10803 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10804 CSR_WRITE_FLUSH(sc);
10805 delay(20);
10806 }
10807
10808 /* special case - for 82575 - need to do manual init ... */
10809 static void
10810 wm_reset_init_script_82575(struct wm_softc *sc)
10811 {
10812 /*
10813 * remark: this is untested code - we have no board without EEPROM
10814 * same setup as mentioned int the FreeBSD driver for the i82575
10815 */
10816
10817 /* SerDes configuration via SERDESCTRL */
10818 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
10819 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
10820 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
10821 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
10822
10823 /* CCM configuration via CCMCTL register */
10824 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
10825 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
10826
10827 /* PCIe lanes configuration */
10828 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
10829 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
10830 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
10831 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
10832
10833 /* PCIe PLL Configuration */
10834 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
10835 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
10836 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
10837 }
10838
10839 static void
10840 wm_reset_mdicnfg_82580(struct wm_softc *sc)
10841 {
10842 uint32_t reg;
10843 uint16_t nvmword;
10844 int rv;
10845
10846 if ((sc->sc_flags & WM_F_SGMII) == 0)
10847 return;
10848
10849 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
10850 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
10851 if (rv != 0) {
10852 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
10853 __func__);
10854 return;
10855 }
10856
10857 reg = CSR_READ(sc, WMREG_MDICNFG);
10858 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
10859 reg |= MDICNFG_DEST;
10860 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
10861 reg |= MDICNFG_COM_MDIO;
10862 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10863 }
10864
10865 /*
10866 * I210 Errata 25 and I211 Errata 10
10867 * Slow System Clock.
10868 */
10869 static void
10870 wm_pll_workaround_i210(struct wm_softc *sc)
10871 {
10872 uint32_t mdicnfg, wuc;
10873 uint32_t reg;
10874 pcireg_t pcireg;
10875 uint32_t pmreg;
10876 uint16_t nvmword, tmp_nvmword;
10877 int phyval;
10878 bool wa_done = false;
10879 int i;
10880
10881 /* Save WUC and MDICNFG registers */
10882 wuc = CSR_READ(sc, WMREG_WUC);
10883 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
10884
10885 reg = mdicnfg & ~MDICNFG_DEST;
10886 CSR_WRITE(sc, WMREG_MDICNFG, reg);
10887
10888 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
10889 nvmword = INVM_DEFAULT_AL;
10890 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
10891
10892 /* Get Power Management cap offset */
10893 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10894 &pmreg, NULL) == 0)
10895 return;
10896 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
10897 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
10898 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
10899
10900 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
10901 break; /* OK */
10902 }
10903
10904 wa_done = true;
10905 /* Directly reset the internal PHY */
10906 reg = CSR_READ(sc, WMREG_CTRL);
10907 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
10908
10909 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10910 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
10911 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10912
10913 CSR_WRITE(sc, WMREG_WUC, 0);
10914 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
10915 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10916
10917 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
10918 pmreg + PCI_PMCSR);
10919 pcireg |= PCI_PMCSR_STATE_D3;
10920 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10921 pmreg + PCI_PMCSR, pcireg);
10922 delay(1000);
10923 pcireg &= ~PCI_PMCSR_STATE_D3;
10924 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
10925 pmreg + PCI_PMCSR, pcireg);
10926
10927 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
10928 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
10929
10930 /* Restore WUC register */
10931 CSR_WRITE(sc, WMREG_WUC, wuc);
10932 }
10933
10934 /* Restore MDICNFG setting */
10935 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
10936 if (wa_done)
10937 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
10938 }
10939