if_wm.c revision 1.355 1 /* $NetBSD: if_wm.c,v 1.355 2015/10/13 08:03:59 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet)
77 * - Multi queue
78 * - Image Unique ID
79 * - LPLU other than PCH*
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.355 2015/10/13 08:03:59 knakahara Exp $");
87
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kernel.h>
98 #include <sys/socket.h>
99 #include <sys/ioctl.h>
100 #include <sys/errno.h>
101 #include <sys/device.h>
102 #include <sys/queue.h>
103 #include <sys/syslog.h>
104 #include <sys/interrupt.h>
105
106 #include <sys/rndsource.h>
107
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_media.h>
111 #include <net/if_ether.h>
112
113 #include <net/bpf.h>
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/miidevs.h>
128 #include <dev/mii/mii_bitbang.h>
129 #include <dev/mii/ikphyreg.h>
130 #include <dev/mii/igphyreg.h>
131 #include <dev/mii/igphyvar.h>
132 #include <dev/mii/inbmphyreg.h>
133
134 #include <dev/pci/pcireg.h>
135 #include <dev/pci/pcivar.h>
136 #include <dev/pci/pcidevs.h>
137
138 #include <dev/pci/if_wmreg.h>
139 #include <dev/pci/if_wmvar.h>
140
141 #ifdef WM_DEBUG
142 #define WM_DEBUG_LINK 0x01
143 #define WM_DEBUG_TX 0x02
144 #define WM_DEBUG_RX 0x04
145 #define WM_DEBUG_GMII 0x08
146 #define WM_DEBUG_MANAGE 0x10
147 #define WM_DEBUG_NVM 0x20
148 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
149 | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
150
151 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
152 #else
153 #define DPRINTF(x, y) /* nothing */
154 #endif /* WM_DEBUG */
155
156 #ifdef NET_MPSAFE
157 #define WM_MPSAFE 1
158 #endif
159
160 #ifdef __HAVE_PCI_MSI_MSIX
161 #define WM_MSI_MSIX 1 /* Enable by default */
162 #endif
163
164 /*
165 * This device driver divides interrupt to TX, RX and link state.
166 * Each MSI-X vector indexes are below.
167 */
168 #define WM_MSIX_NINTR 3
169 #define WM_MSIX_TXINTR_IDX 0
170 #define WM_MSIX_RXINTR_IDX 1
171 #define WM_MSIX_LINKINTR_IDX 2
172 #define WM_MAX_NINTR WM_MSIX_NINTR
173
174 /*
175 * This device driver set affinity to each interrupts like below (round-robin).
176 * If the number CPUs is less than the number of interrupts, this driver usase
177 * the same CPU for multiple interrupts.
178 */
179 #define WM_MSIX_TXINTR_CPUID 0
180 #define WM_MSIX_RXINTR_CPUID 1
181 #define WM_MSIX_LINKINTR_CPUID 2
182
183 /*
184 * Transmit descriptor list size. Due to errata, we can only have
185 * 256 hardware descriptors in the ring on < 82544, but we use 4096
186 * on >= 82544. We tell the upper layers that they can queue a lot
187 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
188 * of them at a time.
189 *
190 * We allow up to 256 (!) DMA segments per packet. Pathological packet
191 * chains containing many small mbufs have been observed in zero-copy
192 * situations with jumbo frames.
193 */
194 #define WM_NTXSEGS 256
195 #define WM_IFQUEUELEN 256
196 #define WM_TXQUEUELEN_MAX 64
197 #define WM_TXQUEUELEN_MAX_82547 16
198 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
199 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
200 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
201 #define WM_NTXDESC_82542 256
202 #define WM_NTXDESC_82544 4096
203 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
204 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
205 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
206 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
207 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
208
209 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
210
211 /*
212 * Receive descriptor list size. We have one Rx buffer for normal
213 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
214 * packet. We allocate 256 receive descriptors, each with a 2k
215 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
216 */
217 #define WM_NRXDESC 256
218 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
219 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
220 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
221
222 typedef union txdescs {
223 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
224 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
225 } txdescs_t;
226
227 #define WM_CDTXOFF(x) (sizeof(wiseman_txdesc_t) * x)
228 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
229
230 /*
231 * Software state for transmit jobs.
232 */
233 struct wm_txsoft {
234 struct mbuf *txs_mbuf; /* head of our mbuf chain */
235 bus_dmamap_t txs_dmamap; /* our DMA map */
236 int txs_firstdesc; /* first descriptor in packet */
237 int txs_lastdesc; /* last descriptor in packet */
238 int txs_ndesc; /* # of descriptors used */
239 };
240
241 /*
242 * Software state for receive buffers. Each descriptor gets a
243 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
244 * more than one buffer, we chain them together.
245 */
246 struct wm_rxsoft {
247 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
248 bus_dmamap_t rxs_dmamap; /* our DMA map */
249 };
250
251 #define WM_LINKUP_TIMEOUT 50
252
253 static uint16_t swfwphysem[] = {
254 SWFW_PHY0_SM,
255 SWFW_PHY1_SM,
256 SWFW_PHY2_SM,
257 SWFW_PHY3_SM
258 };
259
260 static const uint32_t wm_82580_rxpbs_table[] = {
261 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
262 };
263
264 /*
265 * Software state per device.
266 */
267 struct wm_softc {
268 device_t sc_dev; /* generic device information */
269 bus_space_tag_t sc_st; /* bus space tag */
270 bus_space_handle_t sc_sh; /* bus space handle */
271 bus_size_t sc_ss; /* bus space size */
272 bus_space_tag_t sc_iot; /* I/O space tag */
273 bus_space_handle_t sc_ioh; /* I/O space handle */
274 bus_size_t sc_ios; /* I/O space size */
275 bus_space_tag_t sc_flasht; /* flash registers space tag */
276 bus_space_handle_t sc_flashh; /* flash registers space handle */
277 bus_size_t sc_flashs; /* flash registers space size */
278 bus_dma_tag_t sc_dmat; /* bus DMA tag */
279
280 struct ethercom sc_ethercom; /* ethernet common data */
281 struct mii_data sc_mii; /* MII/media information */
282
283 pci_chipset_tag_t sc_pc;
284 pcitag_t sc_pcitag;
285 int sc_bus_speed; /* PCI/PCIX bus speed */
286 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
287
288 uint16_t sc_pcidevid; /* PCI device ID */
289 wm_chip_type sc_type; /* MAC type */
290 int sc_rev; /* MAC revision */
291 wm_phy_type sc_phytype; /* PHY type */
292 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
293 #define WM_MEDIATYPE_UNKNOWN 0x00
294 #define WM_MEDIATYPE_FIBER 0x01
295 #define WM_MEDIATYPE_COPPER 0x02
296 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
297 int sc_funcid; /* unit number of the chip (0 to 3) */
298 int sc_flags; /* flags; see below */
299 int sc_if_flags; /* last if_flags */
300 int sc_flowflags; /* 802.3x flow control flags */
301 int sc_align_tweak;
302
303 void *sc_ihs[WM_MAX_NINTR]; /*
304 * interrupt cookie.
305 * legacy and msi use sc_ihs[0].
306 */
307 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
308 int sc_nintrs; /* number of interrupts */
309
310 callout_t sc_tick_ch; /* tick callout */
311 bool sc_stopping;
312
313 int sc_nvm_ver_major;
314 int sc_nvm_ver_minor;
315 int sc_nvm_ver_build;
316 int sc_nvm_addrbits; /* NVM address bits */
317 unsigned int sc_nvm_wordsize; /* NVM word size */
318 int sc_ich8_flash_base;
319 int sc_ich8_flash_bank_size;
320 int sc_nvm_k1_enabled;
321
322 /* Software state for the transmit and receive descriptors. */
323 int sc_txnum; /* must be a power of two */
324 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
325 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
326
327 /* TX control data structures. */
328 int sc_ntxdesc; /* must be a power of two */
329 txdescs_t *sc_txdescs_u;
330 bus_dmamap_t sc_txdesc_dmamap; /* control data DMA map */
331 bus_dma_segment_t sc_txdesc_seg;/* control data segment */
332 int sc_txdesc_rseg; /* real number of control segment */
333 size_t sc_txdesc_size; /* control data size */
334 #define sc_txdesc_dma sc_txdesc_dmamap->dm_segs[0].ds_addr
335 #define sc_txdescs sc_txdescs_u->sctxu_txdescs
336 #define sc_nq_txdescs sc_txdescs_u->sctxu_nq_txdescs
337
338 /* RX control data structures. */
339 wiseman_rxdesc_t *sc_rxdescs;
340 bus_dmamap_t sc_rxdesc_dmamap; /* control data DMA map */
341 bus_dma_segment_t sc_rxdesc_seg;/* control data segment */
342 int sc_rxdesc_rseg; /* real number of control segment */
343 size_t sc_rxdesc_size; /* control data size */
344 #define sc_rxdesc_dma sc_rxdesc_dmamap->dm_segs[0].ds_addr
345
346 #ifdef WM_EVENT_COUNTERS
347 /* Event counters. */
348 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
349 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
350 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
351 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
352 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
353 struct evcnt sc_ev_rxintr; /* Rx interrupts */
354 struct evcnt sc_ev_linkintr; /* Link interrupts */
355
356 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
357 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
358 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
359 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
360 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
361 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
362 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
363 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
364
365 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
366 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
367
368 struct evcnt sc_ev_tu; /* Tx underrun */
369
370 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
371 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
372 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
373 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
374 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
375 #endif /* WM_EVENT_COUNTERS */
376
377 bus_addr_t sc_tdt_reg; /* offset of TDT register */
378
379 int sc_txfree; /* number of free Tx descriptors */
380 int sc_txnext; /* next ready Tx descriptor */
381
382 int sc_txsfree; /* number of free Tx jobs */
383 int sc_txsnext; /* next free Tx job */
384 int sc_txsdirty; /* dirty Tx jobs */
385
386 /* These 5 variables are used only on the 82547. */
387 int sc_txfifo_size; /* Tx FIFO size */
388 int sc_txfifo_head; /* current head of FIFO */
389 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
390 int sc_txfifo_stall; /* Tx FIFO is stalled */
391 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
392
393 bus_addr_t sc_rdt_reg; /* offset of RDT register */
394
395 int sc_rxptr; /* next ready Rx descriptor/queue ent */
396 int sc_rxdiscard;
397 int sc_rxlen;
398 struct mbuf *sc_rxhead;
399 struct mbuf *sc_rxtail;
400 struct mbuf **sc_rxtailp;
401
402 uint32_t sc_ctrl; /* prototype CTRL register */
403 #if 0
404 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
405 #endif
406 uint32_t sc_icr; /* prototype interrupt bits */
407 uint32_t sc_itr; /* prototype intr throttling reg */
408 uint32_t sc_tctl; /* prototype TCTL register */
409 uint32_t sc_rctl; /* prototype RCTL register */
410 uint32_t sc_txcw; /* prototype TXCW register */
411 uint32_t sc_tipg; /* prototype TIPG register */
412 uint32_t sc_fcrtl; /* prototype FCRTL register */
413 uint32_t sc_pba; /* prototype PBA register */
414
415 int sc_tbi_linkup; /* TBI link status */
416 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
417 int sc_tbi_serdes_ticks; /* tbi ticks */
418
419 int sc_mchash_type; /* multicast filter offset */
420
421 krndsource_t rnd_source; /* random source */
422
423 kmutex_t *sc_tx_lock; /* lock for tx operations */
424 kmutex_t *sc_rx_lock; /* lock for rx operations */
425 };
426
427 #define WM_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
428 #define WM_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
429 #define WM_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
430 #define WM_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
431 #define WM_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
432 #define WM_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
433 #define WM_BOTH_LOCK(_sc) do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
434 #define WM_BOTH_UNLOCK(_sc) do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
435 #define WM_BOTH_LOCKED(_sc) (WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
436
437 #ifdef WM_MPSAFE
438 #define CALLOUT_FLAGS CALLOUT_MPSAFE
439 #else
440 #define CALLOUT_FLAGS 0
441 #endif
442
443 #define WM_RXCHAIN_RESET(sc) \
444 do { \
445 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
446 *(sc)->sc_rxtailp = NULL; \
447 (sc)->sc_rxlen = 0; \
448 } while (/*CONSTCOND*/0)
449
450 #define WM_RXCHAIN_LINK(sc, m) \
451 do { \
452 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
453 (sc)->sc_rxtailp = &(m)->m_next; \
454 } while (/*CONSTCOND*/0)
455
456 #ifdef WM_EVENT_COUNTERS
457 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
458 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
459 #else
460 #define WM_EVCNT_INCR(ev) /* nothing */
461 #define WM_EVCNT_ADD(ev, val) /* nothing */
462 #endif
463
464 #define CSR_READ(sc, reg) \
465 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
466 #define CSR_WRITE(sc, reg, val) \
467 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
468 #define CSR_WRITE_FLUSH(sc) \
469 (void) CSR_READ((sc), WMREG_STATUS)
470
471 #define ICH8_FLASH_READ32(sc, reg) \
472 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
473 #define ICH8_FLASH_WRITE32(sc, reg, data) \
474 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
475
476 #define ICH8_FLASH_READ16(sc, reg) \
477 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
478 #define ICH8_FLASH_WRITE16(sc, reg, data) \
479 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
480
481 #define WM_CDTXADDR(sc, x) ((sc)->sc_txdesc_dma + WM_CDTXOFF((x)))
482 #define WM_CDRXADDR(sc, x) ((sc)->sc_rxdesc_dma + WM_CDRXOFF((x)))
483
484 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
485 #define WM_CDTXADDR_HI(sc, x) \
486 (sizeof(bus_addr_t) == 8 ? \
487 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
488
489 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
490 #define WM_CDRXADDR_HI(sc, x) \
491 (sizeof(bus_addr_t) == 8 ? \
492 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
493
494 /*
495 * Register read/write functions.
496 * Other than CSR_{READ|WRITE}().
497 */
498 #if 0
499 static inline uint32_t wm_io_read(struct wm_softc *, int);
500 #endif
501 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
502 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
503 uint32_t, uint32_t);
504 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
505
506 /*
507 * Descriptor sync/init functions.
508 */
509 static inline void wm_cdtxsync(struct wm_softc *, int, int, int);
510 static inline void wm_cdrxsync(struct wm_softc *, int, int);
511 static inline void wm_init_rxdesc(struct wm_softc *, int);
512
513 /*
514 * Device driver interface functions and commonly used functions.
515 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
516 */
517 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
518 static int wm_match(device_t, cfdata_t, void *);
519 static void wm_attach(device_t, device_t, void *);
520 static int wm_detach(device_t, int);
521 static bool wm_suspend(device_t, const pmf_qual_t *);
522 static bool wm_resume(device_t, const pmf_qual_t *);
523 static void wm_watchdog(struct ifnet *);
524 static void wm_tick(void *);
525 static int wm_ifflags_cb(struct ethercom *);
526 static int wm_ioctl(struct ifnet *, u_long, void *);
527 /* MAC address related */
528 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
529 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
530 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
531 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
532 static void wm_set_filter(struct wm_softc *);
533 /* Reset and init related */
534 static void wm_set_vlan(struct wm_softc *);
535 static void wm_set_pcie_completion_timeout(struct wm_softc *);
536 static void wm_get_auto_rd_done(struct wm_softc *);
537 static void wm_lan_init_done(struct wm_softc *);
538 static void wm_get_cfg_done(struct wm_softc *);
539 static void wm_initialize_hardware_bits(struct wm_softc *);
540 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
541 static void wm_reset(struct wm_softc *);
542 static int wm_add_rxbuf(struct wm_softc *, int);
543 static void wm_rxdrain(struct wm_softc *);
544 static int wm_init(struct ifnet *);
545 static int wm_init_locked(struct ifnet *);
546 static void wm_stop(struct ifnet *, int);
547 static void wm_stop_locked(struct ifnet *, int);
548 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
549 uint32_t *, uint8_t *);
550 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
551 static void wm_82547_txfifo_stall(void *);
552 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
553 /* DMA related */
554 static int wm_alloc_tx_descs(struct wm_softc *);
555 static void wm_free_tx_descs(struct wm_softc *);
556 static void wm_init_tx_descs(struct wm_softc *);
557 static int wm_alloc_rx_descs(struct wm_softc *);
558 static void wm_free_rx_descs(struct wm_softc *);
559 static void wm_init_rx_descs(struct wm_softc *);
560 static int wm_alloc_tx_buffer(struct wm_softc *);
561 static void wm_free_tx_buffer(struct wm_softc *);
562 static void wm_init_tx_buffer(struct wm_softc *);
563 static int wm_alloc_rx_buffer(struct wm_softc *);
564 static void wm_free_rx_buffer(struct wm_softc *);
565 static int wm_init_rx_buffer(struct wm_softc *);
566 static void wm_init_tx_queue(struct wm_softc *);
567 static int wm_init_rx_queue(struct wm_softc *);
568 static int wm_alloc_txrx_queues(struct wm_softc *);
569 static void wm_free_txrx_queues(struct wm_softc *);
570 static int wm_init_txrx_queues(struct wm_softc *);
571 /* Start */
572 static void wm_start(struct ifnet *);
573 static void wm_start_locked(struct ifnet *);
574 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
575 uint32_t *, uint32_t *, bool *);
576 static void wm_nq_start(struct ifnet *);
577 static void wm_nq_start_locked(struct ifnet *);
578 /* Interrupt */
579 static int wm_txeof(struct wm_softc *);
580 static void wm_rxeof(struct wm_softc *);
581 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
582 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
583 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
584 static void wm_linkintr(struct wm_softc *, uint32_t);
585 static int wm_intr_legacy(void *);
586 #ifdef WM_MSI_MSIX
587 static int wm_txintr_msix(void *);
588 static int wm_rxintr_msix(void *);
589 static int wm_linkintr_msix(void *);
590 #endif
591
592 /*
593 * Media related.
594 * GMII, SGMII, TBI, SERDES and SFP.
595 */
596 /* Common */
597 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
598 /* GMII related */
599 static void wm_gmii_reset(struct wm_softc *);
600 static int wm_get_phy_id_82575(struct wm_softc *);
601 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
602 static int wm_gmii_mediachange(struct ifnet *);
603 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
604 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
605 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
606 static int wm_gmii_i82543_readreg(device_t, int, int);
607 static void wm_gmii_i82543_writereg(device_t, int, int, int);
608 static int wm_gmii_i82544_readreg(device_t, int, int);
609 static void wm_gmii_i82544_writereg(device_t, int, int, int);
610 static int wm_gmii_i80003_readreg(device_t, int, int);
611 static void wm_gmii_i80003_writereg(device_t, int, int, int);
612 static int wm_gmii_bm_readreg(device_t, int, int);
613 static void wm_gmii_bm_writereg(device_t, int, int, int);
614 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
615 static int wm_gmii_hv_readreg(device_t, int, int);
616 static void wm_gmii_hv_writereg(device_t, int, int, int);
617 static int wm_gmii_82580_readreg(device_t, int, int);
618 static void wm_gmii_82580_writereg(device_t, int, int, int);
619 static int wm_gmii_gs40g_readreg(device_t, int, int);
620 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
621 static void wm_gmii_statchg(struct ifnet *);
622 static int wm_kmrn_readreg(struct wm_softc *, int);
623 static void wm_kmrn_writereg(struct wm_softc *, int, int);
624 /* SGMII */
625 static bool wm_sgmii_uses_mdio(struct wm_softc *);
626 static int wm_sgmii_readreg(device_t, int, int);
627 static void wm_sgmii_writereg(device_t, int, int, int);
628 /* TBI related */
629 static void wm_tbi_mediainit(struct wm_softc *);
630 static int wm_tbi_mediachange(struct ifnet *);
631 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
632 static int wm_check_for_link(struct wm_softc *);
633 static void wm_tbi_tick(struct wm_softc *);
634 /* SERDES related */
635 static void wm_serdes_power_up_link_82575(struct wm_softc *);
636 static int wm_serdes_mediachange(struct ifnet *);
637 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
638 static void wm_serdes_tick(struct wm_softc *);
639 /* SFP related */
640 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
641 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
642
643 /*
644 * NVM related.
645 * Microwire, SPI (w/wo EERD) and Flash.
646 */
647 /* Misc functions */
648 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
649 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
650 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
651 /* Microwire */
652 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
653 /* SPI */
654 static int wm_nvm_ready_spi(struct wm_softc *);
655 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
656 /* Using with EERD */
657 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
658 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
659 /* Flash */
660 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
661 unsigned int *);
662 static int32_t wm_ich8_cycle_init(struct wm_softc *);
663 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
664 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
665 uint16_t *);
666 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
667 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
668 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
669 /* iNVM */
670 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
671 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
672 /* Lock, detecting NVM type, validate checksum and read */
673 static int wm_nvm_acquire(struct wm_softc *);
674 static void wm_nvm_release(struct wm_softc *);
675 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
676 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
677 static int wm_nvm_validate_checksum(struct wm_softc *);
678 static void wm_nvm_version_invm(struct wm_softc *);
679 static void wm_nvm_version(struct wm_softc *);
680 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
681
682 /*
683 * Hardware semaphores.
684 * Very complexed...
685 */
686 static int wm_get_swsm_semaphore(struct wm_softc *);
687 static void wm_put_swsm_semaphore(struct wm_softc *);
688 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
689 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
690 static int wm_get_swfwhw_semaphore(struct wm_softc *);
691 static void wm_put_swfwhw_semaphore(struct wm_softc *);
692 static int wm_get_hw_semaphore_82573(struct wm_softc *);
693 static void wm_put_hw_semaphore_82573(struct wm_softc *);
694
695 /*
696 * Management mode and power management related subroutines.
697 * BMC, AMT, suspend/resume and EEE.
698 */
699 static int wm_check_mng_mode(struct wm_softc *);
700 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
701 static int wm_check_mng_mode_82574(struct wm_softc *);
702 static int wm_check_mng_mode_generic(struct wm_softc *);
703 static int wm_enable_mng_pass_thru(struct wm_softc *);
704 static int wm_check_reset_block(struct wm_softc *);
705 static void wm_get_hw_control(struct wm_softc *);
706 static void wm_release_hw_control(struct wm_softc *);
707 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
708 static void wm_smbustopci(struct wm_softc *);
709 static void wm_init_manageability(struct wm_softc *);
710 static void wm_release_manageability(struct wm_softc *);
711 static void wm_get_wakeup(struct wm_softc *);
712 #ifdef WM_WOL
713 static void wm_enable_phy_wakeup(struct wm_softc *);
714 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
715 static void wm_enable_wakeup(struct wm_softc *);
716 #endif
717 /* EEE */
718 static void wm_set_eee_i350(struct wm_softc *);
719
720 /*
721 * Workarounds (mainly PHY related).
722 * Basically, PHY's workarounds are in the PHY drivers.
723 */
724 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
725 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
726 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
727 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
728 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
729 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
730 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
731 static void wm_reset_init_script_82575(struct wm_softc *);
732 static void wm_reset_mdicnfg_82580(struct wm_softc *);
733 static void wm_pll_workaround_i210(struct wm_softc *);
734
735 #ifdef WM_MSI_MSIX
736 struct _msix_matrix {
737 const char *intrname;
738 int(*func)(void *);
739 int intridx;
740 int cpuid;
741 } msix_matrix[WM_MSIX_NINTR] = {
742 { "TX", wm_txintr_msix, WM_MSIX_TXINTR_IDX, WM_MSIX_TXINTR_CPUID },
743 { "RX", wm_rxintr_msix, WM_MSIX_RXINTR_IDX, WM_MSIX_RXINTR_CPUID },
744 { "LINK", wm_linkintr_msix, WM_MSIX_LINKINTR_IDX,
745 WM_MSIX_LINKINTR_CPUID },
746 };
747 #endif
748
749 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
750 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
751
752 /*
753 * Devices supported by this driver.
754 */
755 static const struct wm_product {
756 pci_vendor_id_t wmp_vendor;
757 pci_product_id_t wmp_product;
758 const char *wmp_name;
759 wm_chip_type wmp_type;
760 uint32_t wmp_flags;
761 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
762 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
763 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
764 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
765 #define WMP_MEDIATYPE(x) ((x) & 0x03)
766 } wm_products[] = {
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
768 "Intel i82542 1000BASE-X Ethernet",
769 WM_T_82542_2_1, WMP_F_FIBER },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
772 "Intel i82543GC 1000BASE-X Ethernet",
773 WM_T_82543, WMP_F_FIBER },
774
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
776 "Intel i82543GC 1000BASE-T Ethernet",
777 WM_T_82543, WMP_F_COPPER },
778
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
780 "Intel i82544EI 1000BASE-T Ethernet",
781 WM_T_82544, WMP_F_COPPER },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
784 "Intel i82544EI 1000BASE-X Ethernet",
785 WM_T_82544, WMP_F_FIBER },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
788 "Intel i82544GC 1000BASE-T Ethernet",
789 WM_T_82544, WMP_F_COPPER },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
792 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
793 WM_T_82544, WMP_F_COPPER },
794
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
796 "Intel i82540EM 1000BASE-T Ethernet",
797 WM_T_82540, WMP_F_COPPER },
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
800 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
801 WM_T_82540, WMP_F_COPPER },
802
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
804 "Intel i82540EP 1000BASE-T Ethernet",
805 WM_T_82540, WMP_F_COPPER },
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
808 "Intel i82540EP 1000BASE-T Ethernet",
809 WM_T_82540, WMP_F_COPPER },
810
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
812 "Intel i82540EP 1000BASE-T Ethernet",
813 WM_T_82540, WMP_F_COPPER },
814
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
816 "Intel i82545EM 1000BASE-T Ethernet",
817 WM_T_82545, WMP_F_COPPER },
818
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
820 "Intel i82545GM 1000BASE-T Ethernet",
821 WM_T_82545_3, WMP_F_COPPER },
822
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
824 "Intel i82545GM 1000BASE-X Ethernet",
825 WM_T_82545_3, WMP_F_FIBER },
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
828 "Intel i82545GM Gigabit Ethernet (SERDES)",
829 WM_T_82545_3, WMP_F_SERDES },
830
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
832 "Intel i82546EB 1000BASE-T Ethernet",
833 WM_T_82546, WMP_F_COPPER },
834
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
836 "Intel i82546EB 1000BASE-T Ethernet",
837 WM_T_82546, WMP_F_COPPER },
838
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
840 "Intel i82545EM 1000BASE-X Ethernet",
841 WM_T_82545, WMP_F_FIBER },
842
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
844 "Intel i82546EB 1000BASE-X Ethernet",
845 WM_T_82546, WMP_F_FIBER },
846
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
848 "Intel i82546GB 1000BASE-T Ethernet",
849 WM_T_82546_3, WMP_F_COPPER },
850
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
852 "Intel i82546GB 1000BASE-X Ethernet",
853 WM_T_82546_3, WMP_F_FIBER },
854
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
856 "Intel i82546GB Gigabit Ethernet (SERDES)",
857 WM_T_82546_3, WMP_F_SERDES },
858
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
860 "i82546GB quad-port Gigabit Ethernet",
861 WM_T_82546_3, WMP_F_COPPER },
862
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
864 "i82546GB quad-port Gigabit Ethernet (KSP3)",
865 WM_T_82546_3, WMP_F_COPPER },
866
867 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
868 "Intel PRO/1000MT (82546GB)",
869 WM_T_82546_3, WMP_F_COPPER },
870
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
872 "Intel i82541EI 1000BASE-T Ethernet",
873 WM_T_82541, WMP_F_COPPER },
874
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
876 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
877 WM_T_82541, WMP_F_COPPER },
878
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
880 "Intel i82541EI Mobile 1000BASE-T Ethernet",
881 WM_T_82541, WMP_F_COPPER },
882
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
884 "Intel i82541ER 1000BASE-T Ethernet",
885 WM_T_82541_2, WMP_F_COPPER },
886
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
888 "Intel i82541GI 1000BASE-T Ethernet",
889 WM_T_82541_2, WMP_F_COPPER },
890
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
892 "Intel i82541GI Mobile 1000BASE-T Ethernet",
893 WM_T_82541_2, WMP_F_COPPER },
894
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
896 "Intel i82541PI 1000BASE-T Ethernet",
897 WM_T_82541_2, WMP_F_COPPER },
898
899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
900 "Intel i82547EI 1000BASE-T Ethernet",
901 WM_T_82547, WMP_F_COPPER },
902
903 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
904 "Intel i82547EI Mobile 1000BASE-T Ethernet",
905 WM_T_82547, WMP_F_COPPER },
906
907 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
908 "Intel i82547GI 1000BASE-T Ethernet",
909 WM_T_82547_2, WMP_F_COPPER },
910
911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
912 "Intel PRO/1000 PT (82571EB)",
913 WM_T_82571, WMP_F_COPPER },
914
915 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
916 "Intel PRO/1000 PF (82571EB)",
917 WM_T_82571, WMP_F_FIBER },
918
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
920 "Intel PRO/1000 PB (82571EB)",
921 WM_T_82571, WMP_F_SERDES },
922
923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
924 "Intel PRO/1000 QT (82571EB)",
925 WM_T_82571, WMP_F_COPPER },
926
927 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
928 "Intel PRO/1000 PT Quad Port Server Adapter",
929 WM_T_82571, WMP_F_COPPER, },
930
931 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
932 "Intel Gigabit PT Quad Port Server ExpressModule",
933 WM_T_82571, WMP_F_COPPER, },
934
935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
936 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
937 WM_T_82571, WMP_F_SERDES, },
938
939 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
940 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
941 WM_T_82571, WMP_F_SERDES, },
942
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
944 "Intel 82571EB Quad 1000baseX Ethernet",
945 WM_T_82571, WMP_F_FIBER, },
946
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
948 "Intel i82572EI 1000baseT Ethernet",
949 WM_T_82572, WMP_F_COPPER },
950
951 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
952 "Intel i82572EI 1000baseX Ethernet",
953 WM_T_82572, WMP_F_FIBER },
954
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
956 "Intel i82572EI Gigabit Ethernet (SERDES)",
957 WM_T_82572, WMP_F_SERDES },
958
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
960 "Intel i82572EI 1000baseT Ethernet",
961 WM_T_82572, WMP_F_COPPER },
962
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
964 "Intel i82573E",
965 WM_T_82573, WMP_F_COPPER },
966
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
968 "Intel i82573E IAMT",
969 WM_T_82573, WMP_F_COPPER },
970
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
972 "Intel i82573L Gigabit Ethernet",
973 WM_T_82573, WMP_F_COPPER },
974
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
976 "Intel i82574L",
977 WM_T_82574, WMP_F_COPPER },
978
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
980 "Intel i82574L",
981 WM_T_82574, WMP_F_COPPER },
982
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
984 "Intel i82583V",
985 WM_T_82583, WMP_F_COPPER },
986
987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
988 "i80003 dual 1000baseT Ethernet",
989 WM_T_80003, WMP_F_COPPER },
990
991 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
992 "i80003 dual 1000baseX Ethernet",
993 WM_T_80003, WMP_F_COPPER },
994
995 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
996 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
997 WM_T_80003, WMP_F_SERDES },
998
999 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1000 "Intel i80003 1000baseT Ethernet",
1001 WM_T_80003, WMP_F_COPPER },
1002
1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1004 "Intel i80003 Gigabit Ethernet (SERDES)",
1005 WM_T_80003, WMP_F_SERDES },
1006
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1008 "Intel i82801H (M_AMT) LAN Controller",
1009 WM_T_ICH8, WMP_F_COPPER },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1011 "Intel i82801H (AMT) LAN Controller",
1012 WM_T_ICH8, WMP_F_COPPER },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1014 "Intel i82801H LAN Controller",
1015 WM_T_ICH8, WMP_F_COPPER },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1017 "Intel i82801H (IFE) LAN Controller",
1018 WM_T_ICH8, WMP_F_COPPER },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1020 "Intel i82801H (M) LAN Controller",
1021 WM_T_ICH8, WMP_F_COPPER },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1023 "Intel i82801H IFE (GT) LAN Controller",
1024 WM_T_ICH8, WMP_F_COPPER },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1026 "Intel i82801H IFE (G) LAN Controller",
1027 WM_T_ICH8, WMP_F_COPPER },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1029 "82801I (AMT) LAN Controller",
1030 WM_T_ICH9, WMP_F_COPPER },
1031 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1032 "82801I LAN Controller",
1033 WM_T_ICH9, WMP_F_COPPER },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1035 "82801I (G) LAN Controller",
1036 WM_T_ICH9, WMP_F_COPPER },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1038 "82801I (GT) LAN Controller",
1039 WM_T_ICH9, WMP_F_COPPER },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1041 "82801I (C) LAN Controller",
1042 WM_T_ICH9, WMP_F_COPPER },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1044 "82801I mobile LAN Controller",
1045 WM_T_ICH9, WMP_F_COPPER },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1047 "82801I mobile (V) LAN Controller",
1048 WM_T_ICH9, WMP_F_COPPER },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1050 "82801I mobile (AMT) LAN Controller",
1051 WM_T_ICH9, WMP_F_COPPER },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1053 "82567LM-4 LAN Controller",
1054 WM_T_ICH9, WMP_F_COPPER },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1056 "82567V-3 LAN Controller",
1057 WM_T_ICH9, WMP_F_COPPER },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1059 "82567LM-2 LAN Controller",
1060 WM_T_ICH10, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1062 "82567LF-2 LAN Controller",
1063 WM_T_ICH10, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1065 "82567LM-3 LAN Controller",
1066 WM_T_ICH10, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1068 "82567LF-3 LAN Controller",
1069 WM_T_ICH10, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1071 "82567V-2 LAN Controller",
1072 WM_T_ICH10, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1074 "82567V-3? LAN Controller",
1075 WM_T_ICH10, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1077 "HANKSVILLE LAN Controller",
1078 WM_T_ICH10, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1080 "PCH LAN (82577LM) Controller",
1081 WM_T_PCH, WMP_F_COPPER },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1083 "PCH LAN (82577LC) Controller",
1084 WM_T_PCH, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1086 "PCH LAN (82578DM) Controller",
1087 WM_T_PCH, WMP_F_COPPER },
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1089 "PCH LAN (82578DC) Controller",
1090 WM_T_PCH, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1092 "PCH2 LAN (82579LM) Controller",
1093 WM_T_PCH2, WMP_F_COPPER },
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1095 "PCH2 LAN (82579V) Controller",
1096 WM_T_PCH2, WMP_F_COPPER },
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1098 "82575EB dual-1000baseT Ethernet",
1099 WM_T_82575, WMP_F_COPPER },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1101 "82575EB dual-1000baseX Ethernet (SERDES)",
1102 WM_T_82575, WMP_F_SERDES },
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1104 "82575GB quad-1000baseT Ethernet",
1105 WM_T_82575, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1107 "82575GB quad-1000baseT Ethernet (PM)",
1108 WM_T_82575, WMP_F_COPPER },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1110 "82576 1000BaseT Ethernet",
1111 WM_T_82576, WMP_F_COPPER },
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1113 "82576 1000BaseX Ethernet",
1114 WM_T_82576, WMP_F_FIBER },
1115
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1117 "82576 gigabit Ethernet (SERDES)",
1118 WM_T_82576, WMP_F_SERDES },
1119
1120 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1121 "82576 quad-1000BaseT Ethernet",
1122 WM_T_82576, WMP_F_COPPER },
1123
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1125 "82576 Gigabit ET2 Quad Port Server Adapter",
1126 WM_T_82576, WMP_F_COPPER },
1127
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1129 "82576 gigabit Ethernet",
1130 WM_T_82576, WMP_F_COPPER },
1131
1132 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1133 "82576 gigabit Ethernet (SERDES)",
1134 WM_T_82576, WMP_F_SERDES },
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1136 "82576 quad-gigabit Ethernet (SERDES)",
1137 WM_T_82576, WMP_F_SERDES },
1138
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1140 "82580 1000BaseT Ethernet",
1141 WM_T_82580, WMP_F_COPPER },
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1143 "82580 1000BaseX Ethernet",
1144 WM_T_82580, WMP_F_FIBER },
1145
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1147 "82580 1000BaseT Ethernet (SERDES)",
1148 WM_T_82580, WMP_F_SERDES },
1149
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1151 "82580 gigabit Ethernet (SGMII)",
1152 WM_T_82580, WMP_F_COPPER },
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1154 "82580 dual-1000BaseT Ethernet",
1155 WM_T_82580, WMP_F_COPPER },
1156
1157 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1158 "82580 quad-1000BaseX Ethernet",
1159 WM_T_82580, WMP_F_FIBER },
1160
1161 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1162 "DH89XXCC Gigabit Ethernet (SGMII)",
1163 WM_T_82580, WMP_F_COPPER },
1164
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1166 "DH89XXCC Gigabit Ethernet (SERDES)",
1167 WM_T_82580, WMP_F_SERDES },
1168
1169 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1170 "DH89XXCC 1000BASE-KX Ethernet",
1171 WM_T_82580, WMP_F_SERDES },
1172
1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1174 "DH89XXCC Gigabit Ethernet (SFP)",
1175 WM_T_82580, WMP_F_SERDES },
1176
1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1178 "I350 Gigabit Network Connection",
1179 WM_T_I350, WMP_F_COPPER },
1180
1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1182 "I350 Gigabit Fiber Network Connection",
1183 WM_T_I350, WMP_F_FIBER },
1184
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1186 "I350 Gigabit Backplane Connection",
1187 WM_T_I350, WMP_F_SERDES },
1188
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1190 "I350 Quad Port Gigabit Ethernet",
1191 WM_T_I350, WMP_F_SERDES },
1192
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1194 "I350 Gigabit Connection",
1195 WM_T_I350, WMP_F_COPPER },
1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1198 "I354 Gigabit Ethernet (KX)",
1199 WM_T_I354, WMP_F_SERDES },
1200
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1202 "I354 Gigabit Ethernet (SGMII)",
1203 WM_T_I354, WMP_F_COPPER },
1204
1205 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1206 "I354 Gigabit Ethernet (2.5G)",
1207 WM_T_I354, WMP_F_COPPER },
1208
1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1210 "I210-T1 Ethernet Server Adapter",
1211 WM_T_I210, WMP_F_COPPER },
1212
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1214 "I210 Ethernet (Copper OEM)",
1215 WM_T_I210, WMP_F_COPPER },
1216
1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1218 "I210 Ethernet (Copper IT)",
1219 WM_T_I210, WMP_F_COPPER },
1220
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1222 "I210 Ethernet (FLASH less)",
1223 WM_T_I210, WMP_F_COPPER },
1224
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1226 "I210 Gigabit Ethernet (Fiber)",
1227 WM_T_I210, WMP_F_FIBER },
1228
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1230 "I210 Gigabit Ethernet (SERDES)",
1231 WM_T_I210, WMP_F_SERDES },
1232
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1234 "I210 Gigabit Ethernet (FLASH less)",
1235 WM_T_I210, WMP_F_SERDES },
1236
1237 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1238 "I210 Gigabit Ethernet (SGMII)",
1239 WM_T_I210, WMP_F_COPPER },
1240
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1242 "I211 Ethernet (COPPER)",
1243 WM_T_I211, WMP_F_COPPER },
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1245 "I217 V Ethernet Connection",
1246 WM_T_PCH_LPT, WMP_F_COPPER },
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1248 "I217 LM Ethernet Connection",
1249 WM_T_PCH_LPT, WMP_F_COPPER },
1250 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1251 "I218 V Ethernet Connection",
1252 WM_T_PCH_LPT, WMP_F_COPPER },
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1254 "I218 V Ethernet Connection",
1255 WM_T_PCH_LPT, WMP_F_COPPER },
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1257 "I218 V Ethernet Connection",
1258 WM_T_PCH_LPT, WMP_F_COPPER },
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1260 "I218 LM Ethernet Connection",
1261 WM_T_PCH_LPT, WMP_F_COPPER },
1262 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1263 "I218 LM Ethernet Connection",
1264 WM_T_PCH_LPT, WMP_F_COPPER },
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1266 "I218 LM Ethernet Connection",
1267 WM_T_PCH_LPT, WMP_F_COPPER },
1268 { 0, 0,
1269 NULL,
1270 0, 0 },
1271 };
1272
1273 #ifdef WM_EVENT_COUNTERS
1274 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1275 #endif /* WM_EVENT_COUNTERS */
1276
1277
1278 /*
1279 * Register read/write functions.
1280 * Other than CSR_{READ|WRITE}().
1281 */
1282
1283 #if 0 /* Not currently used */
1284 static inline uint32_t
1285 wm_io_read(struct wm_softc *sc, int reg)
1286 {
1287
1288 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1289 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1290 }
1291 #endif
1292
1293 static inline void
1294 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1295 {
1296
1297 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1298 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1299 }
1300
1301 static inline void
1302 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1303 uint32_t data)
1304 {
1305 uint32_t regval;
1306 int i;
1307
1308 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1309
1310 CSR_WRITE(sc, reg, regval);
1311
1312 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1313 delay(5);
1314 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1315 break;
1316 }
1317 if (i == SCTL_CTL_POLL_TIMEOUT) {
1318 aprint_error("%s: WARNING:"
1319 " i82575 reg 0x%08x setup did not indicate ready\n",
1320 device_xname(sc->sc_dev), reg);
1321 }
1322 }
1323
1324 static inline void
1325 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1326 {
1327 wa->wa_low = htole32(v & 0xffffffffU);
1328 if (sizeof(bus_addr_t) == 8)
1329 wa->wa_high = htole32((uint64_t) v >> 32);
1330 else
1331 wa->wa_high = 0;
1332 }
1333
1334 /*
1335 * Descriptor sync/init functions.
1336 */
1337 static inline void
1338 wm_cdtxsync(struct wm_softc *sc, int start, int num, int ops)
1339 {
1340
1341 /* If it will wrap around, sync to the end of the ring. */
1342 if ((start + num) > WM_NTXDESC(sc)) {
1343 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap,
1344 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
1345 (WM_NTXDESC(sc) - start), ops);
1346 num -= (WM_NTXDESC(sc) - start);
1347 start = 0;
1348 }
1349
1350 /* Now sync whatever is left. */
1351 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap,
1352 WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
1353 }
1354
1355 static inline void
1356 wm_cdrxsync(struct wm_softc *sc, int start, int ops)
1357 {
1358
1359 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap,
1360 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1361 }
1362
1363 static inline void
1364 wm_init_rxdesc(struct wm_softc *sc, int start)
1365 {
1366 struct wm_rxsoft *rxs = &sc->sc_rxsoft[start];
1367 wiseman_rxdesc_t *rxd = &sc->sc_rxdescs[start];
1368 struct mbuf *m = rxs->rxs_mbuf;
1369
1370 /*
1371 * Note: We scoot the packet forward 2 bytes in the buffer
1372 * so that the payload after the Ethernet header is aligned
1373 * to a 4-byte boundary.
1374
1375 * XXX BRAINDAMAGE ALERT!
1376 * The stupid chip uses the same size for every buffer, which
1377 * is set in the Receive Control register. We are using the 2K
1378 * size option, but what we REALLY want is (2K - 2)! For this
1379 * reason, we can't "scoot" packets longer than the standard
1380 * Ethernet MTU. On strict-alignment platforms, if the total
1381 * size exceeds (2K - 2) we set align_tweak to 0 and let
1382 * the upper layer copy the headers.
1383 */
1384 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1385
1386 wm_set_dma_addr(&rxd->wrx_addr,
1387 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1388 rxd->wrx_len = 0;
1389 rxd->wrx_cksum = 0;
1390 rxd->wrx_status = 0;
1391 rxd->wrx_errors = 0;
1392 rxd->wrx_special = 0;
1393 wm_cdrxsync(sc, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1394
1395 CSR_WRITE(sc, sc->sc_rdt_reg, start);
1396 }
1397
1398 /*
1399 * Device driver interface functions and commonly used functions.
1400 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1401 */
1402
1403 /* Lookup supported device table */
1404 static const struct wm_product *
1405 wm_lookup(const struct pci_attach_args *pa)
1406 {
1407 const struct wm_product *wmp;
1408
1409 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1410 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1411 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1412 return wmp;
1413 }
1414 return NULL;
1415 }
1416
1417 /* The match function (ca_match) */
1418 static int
1419 wm_match(device_t parent, cfdata_t cf, void *aux)
1420 {
1421 struct pci_attach_args *pa = aux;
1422
1423 if (wm_lookup(pa) != NULL)
1424 return 1;
1425
1426 return 0;
1427 }
1428
1429 /* The attach function (ca_attach) */
1430 static void
1431 wm_attach(device_t parent, device_t self, void *aux)
1432 {
1433 struct wm_softc *sc = device_private(self);
1434 struct pci_attach_args *pa = aux;
1435 prop_dictionary_t dict;
1436 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1437 pci_chipset_tag_t pc = pa->pa_pc;
1438 #ifndef WM_MSI_MSIX
1439 pci_intr_handle_t ih;
1440 #else
1441 int counts[PCI_INTR_TYPE_SIZE];
1442 pci_intr_type_t max_type;
1443 #endif
1444 const char *intrstr = NULL;
1445 const char *eetype, *xname;
1446 bus_space_tag_t memt;
1447 bus_space_handle_t memh;
1448 bus_size_t memsize;
1449 int memh_valid;
1450 int i, error;
1451 const struct wm_product *wmp;
1452 prop_data_t ea;
1453 prop_number_t pn;
1454 uint8_t enaddr[ETHER_ADDR_LEN];
1455 uint16_t cfg1, cfg2, swdpin, nvmword;
1456 pcireg_t preg, memtype;
1457 uint16_t eeprom_data, apme_mask;
1458 bool force_clear_smbi;
1459 uint32_t link_mode;
1460 uint32_t reg;
1461 char intrbuf[PCI_INTRSTR_LEN];
1462
1463 sc->sc_dev = self;
1464 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1465 sc->sc_stopping = false;
1466
1467 wmp = wm_lookup(pa);
1468 #ifdef DIAGNOSTIC
1469 if (wmp == NULL) {
1470 printf("\n");
1471 panic("wm_attach: impossible");
1472 }
1473 #endif
1474 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1475
1476 sc->sc_pc = pa->pa_pc;
1477 sc->sc_pcitag = pa->pa_tag;
1478
1479 if (pci_dma64_available(pa))
1480 sc->sc_dmat = pa->pa_dmat64;
1481 else
1482 sc->sc_dmat = pa->pa_dmat;
1483
1484 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1485 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1486 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1487
1488 sc->sc_type = wmp->wmp_type;
1489 if (sc->sc_type < WM_T_82543) {
1490 if (sc->sc_rev < 2) {
1491 aprint_error_dev(sc->sc_dev,
1492 "i82542 must be at least rev. 2\n");
1493 return;
1494 }
1495 if (sc->sc_rev < 3)
1496 sc->sc_type = WM_T_82542_2_0;
1497 }
1498
1499 /*
1500 * Disable MSI for Errata:
1501 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1502 *
1503 * 82544: Errata 25
1504 * 82540: Errata 6 (easy to reproduce device timeout)
1505 * 82545: Errata 4 (easy to reproduce device timeout)
1506 * 82546: Errata 26 (easy to reproduce device timeout)
1507 * 82541: Errata 7 (easy to reproduce device timeout)
1508 *
1509 * "Byte Enables 2 and 3 are not set on MSI writes"
1510 *
1511 * 82571 & 82572: Errata 63
1512 */
1513 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1514 || (sc->sc_type == WM_T_82572))
1515 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1516
1517 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1518 || (sc->sc_type == WM_T_82580)
1519 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1520 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1521 sc->sc_flags |= WM_F_NEWQUEUE;
1522
1523 /* Set device properties (mactype) */
1524 dict = device_properties(sc->sc_dev);
1525 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1526
1527 /*
1528 * Map the device. All devices support memory-mapped acccess,
1529 * and it is really required for normal operation.
1530 */
1531 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1532 switch (memtype) {
1533 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1534 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1535 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1536 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1537 break;
1538 default:
1539 memh_valid = 0;
1540 break;
1541 }
1542
1543 if (memh_valid) {
1544 sc->sc_st = memt;
1545 sc->sc_sh = memh;
1546 sc->sc_ss = memsize;
1547 } else {
1548 aprint_error_dev(sc->sc_dev,
1549 "unable to map device registers\n");
1550 return;
1551 }
1552
1553 /*
1554 * In addition, i82544 and later support I/O mapped indirect
1555 * register access. It is not desirable (nor supported in
1556 * this driver) to use it for normal operation, though it is
1557 * required to work around bugs in some chip versions.
1558 */
1559 if (sc->sc_type >= WM_T_82544) {
1560 /* First we have to find the I/O BAR. */
1561 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1562 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1563 if (memtype == PCI_MAPREG_TYPE_IO)
1564 break;
1565 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1566 PCI_MAPREG_MEM_TYPE_64BIT)
1567 i += 4; /* skip high bits, too */
1568 }
1569 if (i < PCI_MAPREG_END) {
1570 /*
1571 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1572 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1573 * It's no problem because newer chips has no this
1574 * bug.
1575 *
1576 * The i8254x doesn't apparently respond when the
1577 * I/O BAR is 0, which looks somewhat like it's not
1578 * been configured.
1579 */
1580 preg = pci_conf_read(pc, pa->pa_tag, i);
1581 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1582 aprint_error_dev(sc->sc_dev,
1583 "WARNING: I/O BAR at zero.\n");
1584 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1585 0, &sc->sc_iot, &sc->sc_ioh,
1586 NULL, &sc->sc_ios) == 0) {
1587 sc->sc_flags |= WM_F_IOH_VALID;
1588 } else {
1589 aprint_error_dev(sc->sc_dev,
1590 "WARNING: unable to map I/O space\n");
1591 }
1592 }
1593
1594 }
1595
1596 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1597 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1598 preg |= PCI_COMMAND_MASTER_ENABLE;
1599 if (sc->sc_type < WM_T_82542_2_1)
1600 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1601 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1602
1603 /* power up chip */
1604 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1605 NULL)) && error != EOPNOTSUPP) {
1606 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1607 return;
1608 }
1609
1610 #ifndef WM_MSI_MSIX
1611 /*
1612 * Map and establish our interrupt.
1613 */
1614 if (pci_intr_map(pa, &ih)) {
1615 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1616 return;
1617 }
1618 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1619 #ifdef WM_MPSAFE
1620 pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1621 #endif
1622 sc->sc_ihs[0] = pci_intr_establish_xname(pc, ih, IPL_NET,
1623 wm_intr_legacy, sc, device_xname(sc->sc_dev));
1624 if (sc->sc_ihs[0] == NULL) {
1625 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1626 if (intrstr != NULL)
1627 aprint_error(" at %s", intrstr);
1628 aprint_error("\n");
1629 return;
1630 }
1631 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1632 sc->sc_nintrs = 1;
1633 #else /* WM_MSI_MSIX */
1634 /* Allocation settings */
1635 max_type = PCI_INTR_TYPE_MSIX;
1636 counts[PCI_INTR_TYPE_MSIX] = WM_MAX_NINTR;
1637 counts[PCI_INTR_TYPE_MSI] = 1;
1638 counts[PCI_INTR_TYPE_INTX] = 1;
1639
1640 alloc_retry:
1641 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1642 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1643 return;
1644 }
1645
1646 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1647 void *vih;
1648 kcpuset_t *affinity;
1649 char intr_xname[INTRDEVNAMEBUF];
1650
1651 kcpuset_create(&affinity, false);
1652
1653 for (i = 0; i < WM_MSIX_NINTR; i++) {
1654 intrstr = pci_intr_string(pc,
1655 sc->sc_intrs[msix_matrix[i].intridx], intrbuf,
1656 sizeof(intrbuf));
1657 #ifdef WM_MPSAFE
1658 pci_intr_setattr(pc,
1659 &sc->sc_intrs[msix_matrix[i].intridx],
1660 PCI_INTR_MPSAFE, true);
1661 #endif
1662 memset(intr_xname, 0, sizeof(intr_xname));
1663 strlcat(intr_xname, device_xname(sc->sc_dev),
1664 sizeof(intr_xname));
1665 strlcat(intr_xname, msix_matrix[i].intrname,
1666 sizeof(intr_xname));
1667 vih = pci_intr_establish_xname(pc,
1668 sc->sc_intrs[msix_matrix[i].intridx], IPL_NET,
1669 msix_matrix[i].func, sc, intr_xname);
1670 if (vih == NULL) {
1671 aprint_error_dev(sc->sc_dev,
1672 "unable to establish MSI-X(for %s)%s%s\n",
1673 msix_matrix[i].intrname,
1674 intrstr ? " at " : "",
1675 intrstr ? intrstr : "");
1676 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1677 WM_MSIX_NINTR);
1678 kcpuset_destroy(affinity);
1679
1680 /* Setup for MSI: Disable MSI-X */
1681 max_type = PCI_INTR_TYPE_MSI;
1682 counts[PCI_INTR_TYPE_MSI] = 1;
1683 counts[PCI_INTR_TYPE_INTX] = 1;
1684 goto alloc_retry;
1685 }
1686 kcpuset_zero(affinity);
1687 /* Round-robin affinity */
1688 kcpuset_set(affinity, msix_matrix[i].cpuid % ncpu);
1689 error = interrupt_distribute(vih, affinity, NULL);
1690 if (error == 0) {
1691 aprint_normal_dev(sc->sc_dev,
1692 "for %s interrupting at %s affinity to %u\n",
1693 msix_matrix[i].intrname, intrstr,
1694 msix_matrix[i].cpuid % ncpu);
1695 } else {
1696 aprint_normal_dev(sc->sc_dev,
1697 "for %s interrupting at %s\n",
1698 msix_matrix[i].intrname, intrstr);
1699 }
1700 sc->sc_ihs[msix_matrix[i].intridx] = vih;
1701 }
1702
1703 sc->sc_nintrs = WM_MSIX_NINTR;
1704 kcpuset_destroy(affinity);
1705 } else {
1706 /* MSI or INTx */
1707 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
1708 sizeof(intrbuf));
1709 #ifdef WM_MPSAFE
1710 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
1711 #endif
1712 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
1713 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
1714 if (sc->sc_ihs[0] == NULL) {
1715 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
1716 (pci_intr_type(sc->sc_intrs[0])
1717 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
1718 pci_intr_release(sc->sc_pc, sc->sc_intrs, 1);
1719 switch (pci_intr_type(sc->sc_intrs[0])) {
1720 case PCI_INTR_TYPE_MSI:
1721 /* The next try is for INTx: Disable MSI */
1722 max_type = PCI_INTR_TYPE_INTX;
1723 counts[PCI_INTR_TYPE_INTX] = 1;
1724 goto alloc_retry;
1725 case PCI_INTR_TYPE_INTX:
1726 default:
1727 return;
1728 }
1729 }
1730 aprint_normal_dev(sc->sc_dev, "%s at %s\n",
1731 (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI)
1732 ? "MSI" : "interrupting", intrstr);
1733
1734 sc->sc_nintrs = 1;
1735 }
1736 #endif /* WM_MSI_MSIX */
1737
1738 /*
1739 * Check the function ID (unit number of the chip).
1740 */
1741 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1742 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1743 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1744 || (sc->sc_type == WM_T_82580)
1745 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1746 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1747 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1748 else
1749 sc->sc_funcid = 0;
1750
1751 /*
1752 * Determine a few things about the bus we're connected to.
1753 */
1754 if (sc->sc_type < WM_T_82543) {
1755 /* We don't really know the bus characteristics here. */
1756 sc->sc_bus_speed = 33;
1757 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1758 /*
1759 * CSA (Communication Streaming Architecture) is about as fast
1760 * a 32-bit 66MHz PCI Bus.
1761 */
1762 sc->sc_flags |= WM_F_CSA;
1763 sc->sc_bus_speed = 66;
1764 aprint_verbose_dev(sc->sc_dev,
1765 "Communication Streaming Architecture\n");
1766 if (sc->sc_type == WM_T_82547) {
1767 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1768 callout_setfunc(&sc->sc_txfifo_ch,
1769 wm_82547_txfifo_stall, sc);
1770 aprint_verbose_dev(sc->sc_dev,
1771 "using 82547 Tx FIFO stall work-around\n");
1772 }
1773 } else if (sc->sc_type >= WM_T_82571) {
1774 sc->sc_flags |= WM_F_PCIE;
1775 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1776 && (sc->sc_type != WM_T_ICH10)
1777 && (sc->sc_type != WM_T_PCH)
1778 && (sc->sc_type != WM_T_PCH2)
1779 && (sc->sc_type != WM_T_PCH_LPT)) {
1780 /* ICH* and PCH* have no PCIe capability registers */
1781 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1782 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1783 NULL) == 0)
1784 aprint_error_dev(sc->sc_dev,
1785 "unable to find PCIe capability\n");
1786 }
1787 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1788 } else {
1789 reg = CSR_READ(sc, WMREG_STATUS);
1790 if (reg & STATUS_BUS64)
1791 sc->sc_flags |= WM_F_BUS64;
1792 if ((reg & STATUS_PCIX_MODE) != 0) {
1793 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1794
1795 sc->sc_flags |= WM_F_PCIX;
1796 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1797 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1798 aprint_error_dev(sc->sc_dev,
1799 "unable to find PCIX capability\n");
1800 else if (sc->sc_type != WM_T_82545_3 &&
1801 sc->sc_type != WM_T_82546_3) {
1802 /*
1803 * Work around a problem caused by the BIOS
1804 * setting the max memory read byte count
1805 * incorrectly.
1806 */
1807 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1808 sc->sc_pcixe_capoff + PCIX_CMD);
1809 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1810 sc->sc_pcixe_capoff + PCIX_STATUS);
1811
1812 bytecnt =
1813 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1814 PCIX_CMD_BYTECNT_SHIFT;
1815 maxb =
1816 (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1817 PCIX_STATUS_MAXB_SHIFT;
1818 if (bytecnt > maxb) {
1819 aprint_verbose_dev(sc->sc_dev,
1820 "resetting PCI-X MMRBC: %d -> %d\n",
1821 512 << bytecnt, 512 << maxb);
1822 pcix_cmd = (pcix_cmd &
1823 ~PCIX_CMD_BYTECNT_MASK) |
1824 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1825 pci_conf_write(pa->pa_pc, pa->pa_tag,
1826 sc->sc_pcixe_capoff + PCIX_CMD,
1827 pcix_cmd);
1828 }
1829 }
1830 }
1831 /*
1832 * The quad port adapter is special; it has a PCIX-PCIX
1833 * bridge on the board, and can run the secondary bus at
1834 * a higher speed.
1835 */
1836 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1837 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1838 : 66;
1839 } else if (sc->sc_flags & WM_F_PCIX) {
1840 switch (reg & STATUS_PCIXSPD_MASK) {
1841 case STATUS_PCIXSPD_50_66:
1842 sc->sc_bus_speed = 66;
1843 break;
1844 case STATUS_PCIXSPD_66_100:
1845 sc->sc_bus_speed = 100;
1846 break;
1847 case STATUS_PCIXSPD_100_133:
1848 sc->sc_bus_speed = 133;
1849 break;
1850 default:
1851 aprint_error_dev(sc->sc_dev,
1852 "unknown PCIXSPD %d; assuming 66MHz\n",
1853 reg & STATUS_PCIXSPD_MASK);
1854 sc->sc_bus_speed = 66;
1855 break;
1856 }
1857 } else
1858 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1859 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1860 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1861 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1862 }
1863
1864 error = wm_alloc_txrx_queues(sc);
1865 if (error)
1866 return;
1867
1868 /* clear interesting stat counters */
1869 CSR_READ(sc, WMREG_COLC);
1870 CSR_READ(sc, WMREG_RXERRC);
1871
1872 /* get PHY control from SMBus to PCIe */
1873 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1874 || (sc->sc_type == WM_T_PCH_LPT))
1875 wm_smbustopci(sc);
1876
1877 /* Reset the chip to a known state. */
1878 wm_reset(sc);
1879
1880 /* Get some information about the EEPROM. */
1881 switch (sc->sc_type) {
1882 case WM_T_82542_2_0:
1883 case WM_T_82542_2_1:
1884 case WM_T_82543:
1885 case WM_T_82544:
1886 /* Microwire */
1887 sc->sc_nvm_wordsize = 64;
1888 sc->sc_nvm_addrbits = 6;
1889 break;
1890 case WM_T_82540:
1891 case WM_T_82545:
1892 case WM_T_82545_3:
1893 case WM_T_82546:
1894 case WM_T_82546_3:
1895 /* Microwire */
1896 reg = CSR_READ(sc, WMREG_EECD);
1897 if (reg & EECD_EE_SIZE) {
1898 sc->sc_nvm_wordsize = 256;
1899 sc->sc_nvm_addrbits = 8;
1900 } else {
1901 sc->sc_nvm_wordsize = 64;
1902 sc->sc_nvm_addrbits = 6;
1903 }
1904 sc->sc_flags |= WM_F_LOCK_EECD;
1905 break;
1906 case WM_T_82541:
1907 case WM_T_82541_2:
1908 case WM_T_82547:
1909 case WM_T_82547_2:
1910 sc->sc_flags |= WM_F_LOCK_EECD;
1911 reg = CSR_READ(sc, WMREG_EECD);
1912 if (reg & EECD_EE_TYPE) {
1913 /* SPI */
1914 sc->sc_flags |= WM_F_EEPROM_SPI;
1915 wm_nvm_set_addrbits_size_eecd(sc);
1916 } else {
1917 /* Microwire */
1918 if ((reg & EECD_EE_ABITS) != 0) {
1919 sc->sc_nvm_wordsize = 256;
1920 sc->sc_nvm_addrbits = 8;
1921 } else {
1922 sc->sc_nvm_wordsize = 64;
1923 sc->sc_nvm_addrbits = 6;
1924 }
1925 }
1926 break;
1927 case WM_T_82571:
1928 case WM_T_82572:
1929 /* SPI */
1930 sc->sc_flags |= WM_F_EEPROM_SPI;
1931 wm_nvm_set_addrbits_size_eecd(sc);
1932 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1933 break;
1934 case WM_T_82573:
1935 sc->sc_flags |= WM_F_LOCK_SWSM;
1936 /* FALLTHROUGH */
1937 case WM_T_82574:
1938 case WM_T_82583:
1939 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1940 sc->sc_flags |= WM_F_EEPROM_FLASH;
1941 sc->sc_nvm_wordsize = 2048;
1942 } else {
1943 /* SPI */
1944 sc->sc_flags |= WM_F_EEPROM_SPI;
1945 wm_nvm_set_addrbits_size_eecd(sc);
1946 }
1947 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1948 break;
1949 case WM_T_82575:
1950 case WM_T_82576:
1951 case WM_T_82580:
1952 case WM_T_I350:
1953 case WM_T_I354:
1954 case WM_T_80003:
1955 /* SPI */
1956 sc->sc_flags |= WM_F_EEPROM_SPI;
1957 wm_nvm_set_addrbits_size_eecd(sc);
1958 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1959 | WM_F_LOCK_SWSM;
1960 break;
1961 case WM_T_ICH8:
1962 case WM_T_ICH9:
1963 case WM_T_ICH10:
1964 case WM_T_PCH:
1965 case WM_T_PCH2:
1966 case WM_T_PCH_LPT:
1967 /* FLASH */
1968 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1969 sc->sc_nvm_wordsize = 2048;
1970 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1971 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1972 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1973 aprint_error_dev(sc->sc_dev,
1974 "can't map FLASH registers\n");
1975 goto out;
1976 }
1977 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1978 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1979 ICH_FLASH_SECTOR_SIZE;
1980 sc->sc_ich8_flash_bank_size =
1981 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1982 sc->sc_ich8_flash_bank_size -=
1983 (reg & ICH_GFPREG_BASE_MASK);
1984 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1985 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1986 break;
1987 case WM_T_I210:
1988 case WM_T_I211:
1989 if (wm_nvm_get_flash_presence_i210(sc)) {
1990 wm_nvm_set_addrbits_size_eecd(sc);
1991 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1992 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1993 } else {
1994 sc->sc_nvm_wordsize = INVM_SIZE;
1995 sc->sc_flags |= WM_F_EEPROM_INVM;
1996 sc->sc_flags |= WM_F_LOCK_SWFW;
1997 }
1998 break;
1999 default:
2000 break;
2001 }
2002
2003 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2004 switch (sc->sc_type) {
2005 case WM_T_82571:
2006 case WM_T_82572:
2007 reg = CSR_READ(sc, WMREG_SWSM2);
2008 if ((reg & SWSM2_LOCK) == 0) {
2009 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2010 force_clear_smbi = true;
2011 } else
2012 force_clear_smbi = false;
2013 break;
2014 case WM_T_82573:
2015 case WM_T_82574:
2016 case WM_T_82583:
2017 force_clear_smbi = true;
2018 break;
2019 default:
2020 force_clear_smbi = false;
2021 break;
2022 }
2023 if (force_clear_smbi) {
2024 reg = CSR_READ(sc, WMREG_SWSM);
2025 if ((reg & SWSM_SMBI) != 0)
2026 aprint_error_dev(sc->sc_dev,
2027 "Please update the Bootagent\n");
2028 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2029 }
2030
2031 /*
2032 * Defer printing the EEPROM type until after verifying the checksum
2033 * This allows the EEPROM type to be printed correctly in the case
2034 * that no EEPROM is attached.
2035 */
2036 /*
2037 * Validate the EEPROM checksum. If the checksum fails, flag
2038 * this for later, so we can fail future reads from the EEPROM.
2039 */
2040 if (wm_nvm_validate_checksum(sc)) {
2041 /*
2042 * Read twice again because some PCI-e parts fail the
2043 * first check due to the link being in sleep state.
2044 */
2045 if (wm_nvm_validate_checksum(sc))
2046 sc->sc_flags |= WM_F_EEPROM_INVALID;
2047 }
2048
2049 /* Set device properties (macflags) */
2050 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2051
2052 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2053 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2054 else {
2055 aprint_verbose_dev(sc->sc_dev, "%u words ",
2056 sc->sc_nvm_wordsize);
2057 if (sc->sc_flags & WM_F_EEPROM_INVM)
2058 aprint_verbose("iNVM");
2059 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2060 aprint_verbose("FLASH(HW)");
2061 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2062 aprint_verbose("FLASH");
2063 else {
2064 if (sc->sc_flags & WM_F_EEPROM_SPI)
2065 eetype = "SPI";
2066 else
2067 eetype = "MicroWire";
2068 aprint_verbose("(%d address bits) %s EEPROM",
2069 sc->sc_nvm_addrbits, eetype);
2070 }
2071 }
2072 wm_nvm_version(sc);
2073 aprint_verbose("\n");
2074
2075 /* Check for I21[01] PLL workaround */
2076 if (sc->sc_type == WM_T_I210)
2077 sc->sc_flags |= WM_F_PLL_WA_I210;
2078 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2079 /* NVM image release 3.25 has a workaround */
2080 if ((sc->sc_nvm_ver_major < 3)
2081 || ((sc->sc_nvm_ver_major == 3)
2082 && (sc->sc_nvm_ver_minor < 25))) {
2083 aprint_verbose_dev(sc->sc_dev,
2084 "ROM image version %d.%d is older than 3.25\n",
2085 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2086 sc->sc_flags |= WM_F_PLL_WA_I210;
2087 }
2088 }
2089 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2090 wm_pll_workaround_i210(sc);
2091
2092 switch (sc->sc_type) {
2093 case WM_T_82571:
2094 case WM_T_82572:
2095 case WM_T_82573:
2096 case WM_T_82574:
2097 case WM_T_82583:
2098 case WM_T_80003:
2099 case WM_T_ICH8:
2100 case WM_T_ICH9:
2101 case WM_T_ICH10:
2102 case WM_T_PCH:
2103 case WM_T_PCH2:
2104 case WM_T_PCH_LPT:
2105 if (wm_check_mng_mode(sc) != 0)
2106 wm_get_hw_control(sc);
2107 break;
2108 default:
2109 break;
2110 }
2111 wm_get_wakeup(sc);
2112 /*
2113 * Read the Ethernet address from the EEPROM, if not first found
2114 * in device properties.
2115 */
2116 ea = prop_dictionary_get(dict, "mac-address");
2117 if (ea != NULL) {
2118 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2119 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2120 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2121 } else {
2122 if (wm_read_mac_addr(sc, enaddr) != 0) {
2123 aprint_error_dev(sc->sc_dev,
2124 "unable to read Ethernet address\n");
2125 goto out;
2126 }
2127 }
2128
2129 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2130 ether_sprintf(enaddr));
2131
2132 /*
2133 * Read the config info from the EEPROM, and set up various
2134 * bits in the control registers based on their contents.
2135 */
2136 pn = prop_dictionary_get(dict, "i82543-cfg1");
2137 if (pn != NULL) {
2138 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2139 cfg1 = (uint16_t) prop_number_integer_value(pn);
2140 } else {
2141 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2142 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2143 goto out;
2144 }
2145 }
2146
2147 pn = prop_dictionary_get(dict, "i82543-cfg2");
2148 if (pn != NULL) {
2149 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2150 cfg2 = (uint16_t) prop_number_integer_value(pn);
2151 } else {
2152 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2153 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2154 goto out;
2155 }
2156 }
2157
2158 /* check for WM_F_WOL */
2159 switch (sc->sc_type) {
2160 case WM_T_82542_2_0:
2161 case WM_T_82542_2_1:
2162 case WM_T_82543:
2163 /* dummy? */
2164 eeprom_data = 0;
2165 apme_mask = NVM_CFG3_APME;
2166 break;
2167 case WM_T_82544:
2168 apme_mask = NVM_CFG2_82544_APM_EN;
2169 eeprom_data = cfg2;
2170 break;
2171 case WM_T_82546:
2172 case WM_T_82546_3:
2173 case WM_T_82571:
2174 case WM_T_82572:
2175 case WM_T_82573:
2176 case WM_T_82574:
2177 case WM_T_82583:
2178 case WM_T_80003:
2179 default:
2180 apme_mask = NVM_CFG3_APME;
2181 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2182 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2183 break;
2184 case WM_T_82575:
2185 case WM_T_82576:
2186 case WM_T_82580:
2187 case WM_T_I350:
2188 case WM_T_I354: /* XXX ok? */
2189 case WM_T_ICH8:
2190 case WM_T_ICH9:
2191 case WM_T_ICH10:
2192 case WM_T_PCH:
2193 case WM_T_PCH2:
2194 case WM_T_PCH_LPT:
2195 /* XXX The funcid should be checked on some devices */
2196 apme_mask = WUC_APME;
2197 eeprom_data = CSR_READ(sc, WMREG_WUC);
2198 break;
2199 }
2200
2201 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2202 if ((eeprom_data & apme_mask) != 0)
2203 sc->sc_flags |= WM_F_WOL;
2204 #ifdef WM_DEBUG
2205 if ((sc->sc_flags & WM_F_WOL) != 0)
2206 printf("WOL\n");
2207 #endif
2208
2209 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2210 /* Check NVM for autonegotiation */
2211 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2212 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2213 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2214 }
2215 }
2216
2217 /*
2218 * XXX need special handling for some multiple port cards
2219 * to disable a paticular port.
2220 */
2221
2222 if (sc->sc_type >= WM_T_82544) {
2223 pn = prop_dictionary_get(dict, "i82543-swdpin");
2224 if (pn != NULL) {
2225 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2226 swdpin = (uint16_t) prop_number_integer_value(pn);
2227 } else {
2228 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2229 aprint_error_dev(sc->sc_dev,
2230 "unable to read SWDPIN\n");
2231 goto out;
2232 }
2233 }
2234 }
2235
2236 if (cfg1 & NVM_CFG1_ILOS)
2237 sc->sc_ctrl |= CTRL_ILOS;
2238
2239 /*
2240 * XXX
2241 * This code isn't correct because pin 2 and 3 are located
2242 * in different position on newer chips. Check all datasheet.
2243 *
2244 * Until resolve this problem, check if a chip < 82580
2245 */
2246 if (sc->sc_type <= WM_T_82580) {
2247 if (sc->sc_type >= WM_T_82544) {
2248 sc->sc_ctrl |=
2249 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2250 CTRL_SWDPIO_SHIFT;
2251 sc->sc_ctrl |=
2252 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2253 CTRL_SWDPINS_SHIFT;
2254 } else {
2255 sc->sc_ctrl |=
2256 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2257 CTRL_SWDPIO_SHIFT;
2258 }
2259 }
2260
2261 /* XXX For other than 82580? */
2262 if (sc->sc_type == WM_T_82580) {
2263 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2264 printf("CFG3 = %08x\n", (uint32_t)nvmword);
2265 if (nvmword & __BIT(13)) {
2266 printf("SET ILOS\n");
2267 sc->sc_ctrl |= CTRL_ILOS;
2268 }
2269 }
2270
2271 #if 0
2272 if (sc->sc_type >= WM_T_82544) {
2273 if (cfg1 & NVM_CFG1_IPS0)
2274 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2275 if (cfg1 & NVM_CFG1_IPS1)
2276 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2277 sc->sc_ctrl_ext |=
2278 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2279 CTRL_EXT_SWDPIO_SHIFT;
2280 sc->sc_ctrl_ext |=
2281 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2282 CTRL_EXT_SWDPINS_SHIFT;
2283 } else {
2284 sc->sc_ctrl_ext |=
2285 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2286 CTRL_EXT_SWDPIO_SHIFT;
2287 }
2288 #endif
2289
2290 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2291 #if 0
2292 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2293 #endif
2294
2295 if (sc->sc_type == WM_T_PCH) {
2296 uint16_t val;
2297
2298 /* Save the NVM K1 bit setting */
2299 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2300
2301 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2302 sc->sc_nvm_k1_enabled = 1;
2303 else
2304 sc->sc_nvm_k1_enabled = 0;
2305 }
2306
2307 /*
2308 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2309 * media structures accordingly.
2310 */
2311 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2312 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2313 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2314 || sc->sc_type == WM_T_82573
2315 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2316 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2317 wm_gmii_mediainit(sc, wmp->wmp_product);
2318 } else if (sc->sc_type < WM_T_82543 ||
2319 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2320 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2321 aprint_error_dev(sc->sc_dev,
2322 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2323 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2324 }
2325 wm_tbi_mediainit(sc);
2326 } else {
2327 switch (sc->sc_type) {
2328 case WM_T_82575:
2329 case WM_T_82576:
2330 case WM_T_82580:
2331 case WM_T_I350:
2332 case WM_T_I354:
2333 case WM_T_I210:
2334 case WM_T_I211:
2335 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2336 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2337 switch (link_mode) {
2338 case CTRL_EXT_LINK_MODE_1000KX:
2339 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2340 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2341 break;
2342 case CTRL_EXT_LINK_MODE_SGMII:
2343 if (wm_sgmii_uses_mdio(sc)) {
2344 aprint_verbose_dev(sc->sc_dev,
2345 "SGMII(MDIO)\n");
2346 sc->sc_flags |= WM_F_SGMII;
2347 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2348 break;
2349 }
2350 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2351 /*FALLTHROUGH*/
2352 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2353 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2354 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2355 if (link_mode
2356 == CTRL_EXT_LINK_MODE_SGMII) {
2357 sc->sc_mediatype
2358 = WM_MEDIATYPE_COPPER;
2359 sc->sc_flags |= WM_F_SGMII;
2360 } else {
2361 sc->sc_mediatype
2362 = WM_MEDIATYPE_SERDES;
2363 aprint_verbose_dev(sc->sc_dev,
2364 "SERDES\n");
2365 }
2366 break;
2367 }
2368 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2369 aprint_verbose_dev(sc->sc_dev,
2370 "SERDES\n");
2371
2372 /* Change current link mode setting */
2373 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2374 switch (sc->sc_mediatype) {
2375 case WM_MEDIATYPE_COPPER:
2376 reg |= CTRL_EXT_LINK_MODE_SGMII;
2377 break;
2378 case WM_MEDIATYPE_SERDES:
2379 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2380 break;
2381 default:
2382 break;
2383 }
2384 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2385 break;
2386 case CTRL_EXT_LINK_MODE_GMII:
2387 default:
2388 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2389 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2390 break;
2391 }
2392
2393 reg &= ~CTRL_EXT_I2C_ENA;
2394 if ((sc->sc_flags & WM_F_SGMII) != 0)
2395 reg |= CTRL_EXT_I2C_ENA;
2396 else
2397 reg &= ~CTRL_EXT_I2C_ENA;
2398 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2399
2400 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2401 wm_gmii_mediainit(sc, wmp->wmp_product);
2402 else
2403 wm_tbi_mediainit(sc);
2404 break;
2405 default:
2406 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2407 aprint_error_dev(sc->sc_dev,
2408 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2409 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2410 wm_gmii_mediainit(sc, wmp->wmp_product);
2411 }
2412 }
2413
2414 ifp = &sc->sc_ethercom.ec_if;
2415 xname = device_xname(sc->sc_dev);
2416 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2417 ifp->if_softc = sc;
2418 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2419 ifp->if_ioctl = wm_ioctl;
2420 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2421 ifp->if_start = wm_nq_start;
2422 else
2423 ifp->if_start = wm_start;
2424 ifp->if_watchdog = wm_watchdog;
2425 ifp->if_init = wm_init;
2426 ifp->if_stop = wm_stop;
2427 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2428 IFQ_SET_READY(&ifp->if_snd);
2429
2430 /* Check for jumbo frame */
2431 switch (sc->sc_type) {
2432 case WM_T_82573:
2433 /* XXX limited to 9234 if ASPM is disabled */
2434 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2435 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2436 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2437 break;
2438 case WM_T_82571:
2439 case WM_T_82572:
2440 case WM_T_82574:
2441 case WM_T_82575:
2442 case WM_T_82576:
2443 case WM_T_82580:
2444 case WM_T_I350:
2445 case WM_T_I354: /* XXXX ok? */
2446 case WM_T_I210:
2447 case WM_T_I211:
2448 case WM_T_80003:
2449 case WM_T_ICH9:
2450 case WM_T_ICH10:
2451 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2452 case WM_T_PCH_LPT:
2453 /* XXX limited to 9234 */
2454 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2455 break;
2456 case WM_T_PCH:
2457 /* XXX limited to 4096 */
2458 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2459 break;
2460 case WM_T_82542_2_0:
2461 case WM_T_82542_2_1:
2462 case WM_T_82583:
2463 case WM_T_ICH8:
2464 /* No support for jumbo frame */
2465 break;
2466 default:
2467 /* ETHER_MAX_LEN_JUMBO */
2468 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2469 break;
2470 }
2471
2472 /* If we're a i82543 or greater, we can support VLANs. */
2473 if (sc->sc_type >= WM_T_82543)
2474 sc->sc_ethercom.ec_capabilities |=
2475 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2476
2477 /*
2478 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2479 * on i82543 and later.
2480 */
2481 if (sc->sc_type >= WM_T_82543) {
2482 ifp->if_capabilities |=
2483 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2484 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2485 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2486 IFCAP_CSUM_TCPv6_Tx |
2487 IFCAP_CSUM_UDPv6_Tx;
2488 }
2489
2490 /*
2491 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2492 *
2493 * 82541GI (8086:1076) ... no
2494 * 82572EI (8086:10b9) ... yes
2495 */
2496 if (sc->sc_type >= WM_T_82571) {
2497 ifp->if_capabilities |=
2498 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2499 }
2500
2501 /*
2502 * If we're a i82544 or greater (except i82547), we can do
2503 * TCP segmentation offload.
2504 */
2505 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2506 ifp->if_capabilities |= IFCAP_TSOv4;
2507 }
2508
2509 if (sc->sc_type >= WM_T_82571) {
2510 ifp->if_capabilities |= IFCAP_TSOv6;
2511 }
2512
2513 #ifdef WM_MPSAFE
2514 sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2515 sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2516 #else
2517 sc->sc_tx_lock = NULL;
2518 sc->sc_rx_lock = NULL;
2519 #endif
2520
2521 /* Attach the interface. */
2522 if_attach(ifp);
2523 ether_ifattach(ifp, enaddr);
2524 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2525 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2526 RND_FLAG_DEFAULT);
2527
2528 #ifdef WM_EVENT_COUNTERS
2529 /* Attach event counters. */
2530 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2531 NULL, xname, "txsstall");
2532 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2533 NULL, xname, "txdstall");
2534 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2535 NULL, xname, "txfifo_stall");
2536 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2537 NULL, xname, "txdw");
2538 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2539 NULL, xname, "txqe");
2540 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2541 NULL, xname, "rxintr");
2542 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2543 NULL, xname, "linkintr");
2544
2545 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2546 NULL, xname, "rxipsum");
2547 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2548 NULL, xname, "rxtusum");
2549 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2550 NULL, xname, "txipsum");
2551 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2552 NULL, xname, "txtusum");
2553 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2554 NULL, xname, "txtusum6");
2555
2556 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2557 NULL, xname, "txtso");
2558 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2559 NULL, xname, "txtso6");
2560 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2561 NULL, xname, "txtsopain");
2562
2563 for (i = 0; i < WM_NTXSEGS; i++) {
2564 snprintf(wm_txseg_evcnt_names[i],
2565 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2566 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2567 NULL, xname, wm_txseg_evcnt_names[i]);
2568 }
2569
2570 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2571 NULL, xname, "txdrop");
2572
2573 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2574 NULL, xname, "tu");
2575
2576 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2577 NULL, xname, "tx_xoff");
2578 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2579 NULL, xname, "tx_xon");
2580 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2581 NULL, xname, "rx_xoff");
2582 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2583 NULL, xname, "rx_xon");
2584 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2585 NULL, xname, "rx_macctl");
2586 #endif /* WM_EVENT_COUNTERS */
2587
2588 if (pmf_device_register(self, wm_suspend, wm_resume))
2589 pmf_class_network_register(self, ifp);
2590 else
2591 aprint_error_dev(self, "couldn't establish power handler\n");
2592
2593 sc->sc_flags |= WM_F_ATTACHED;
2594 out:
2595 return;
2596 }
2597
2598 /* The detach function (ca_detach) */
2599 static int
2600 wm_detach(device_t self, int flags __unused)
2601 {
2602 struct wm_softc *sc = device_private(self);
2603 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2604 int i;
2605 #ifndef WM_MPSAFE
2606 int s;
2607 #endif
2608
2609 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2610 return 0;
2611
2612 #ifndef WM_MPSAFE
2613 s = splnet();
2614 #endif
2615 /* Stop the interface. Callouts are stopped in it. */
2616 wm_stop(ifp, 1);
2617
2618 #ifndef WM_MPSAFE
2619 splx(s);
2620 #endif
2621
2622 pmf_device_deregister(self);
2623
2624 /* Tell the firmware about the release */
2625 WM_BOTH_LOCK(sc);
2626 wm_release_manageability(sc);
2627 wm_release_hw_control(sc);
2628 WM_BOTH_UNLOCK(sc);
2629
2630 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2631
2632 /* Delete all remaining media. */
2633 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2634
2635 ether_ifdetach(ifp);
2636 if_detach(ifp);
2637
2638
2639 /* Unload RX dmamaps and free mbufs */
2640 WM_RX_LOCK(sc);
2641 wm_rxdrain(sc);
2642 WM_RX_UNLOCK(sc);
2643 /* Must unlock here */
2644
2645 wm_free_txrx_queues(sc);
2646
2647 /* Disestablish the interrupt handler */
2648 for (i = 0; i < sc->sc_nintrs; i++) {
2649 if (sc->sc_ihs[i] != NULL) {
2650 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2651 sc->sc_ihs[i] = NULL;
2652 }
2653 }
2654 #ifdef WM_MSI_MSIX
2655 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2656 #endif /* WM_MSI_MSIX */
2657
2658 /* Unmap the registers */
2659 if (sc->sc_ss) {
2660 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2661 sc->sc_ss = 0;
2662 }
2663 if (sc->sc_ios) {
2664 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2665 sc->sc_ios = 0;
2666 }
2667 if (sc->sc_flashs) {
2668 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2669 sc->sc_flashs = 0;
2670 }
2671
2672 if (sc->sc_tx_lock)
2673 mutex_obj_free(sc->sc_tx_lock);
2674 if (sc->sc_rx_lock)
2675 mutex_obj_free(sc->sc_rx_lock);
2676
2677 return 0;
2678 }
2679
2680 static bool
2681 wm_suspend(device_t self, const pmf_qual_t *qual)
2682 {
2683 struct wm_softc *sc = device_private(self);
2684
2685 wm_release_manageability(sc);
2686 wm_release_hw_control(sc);
2687 #ifdef WM_WOL
2688 wm_enable_wakeup(sc);
2689 #endif
2690
2691 return true;
2692 }
2693
2694 static bool
2695 wm_resume(device_t self, const pmf_qual_t *qual)
2696 {
2697 struct wm_softc *sc = device_private(self);
2698
2699 wm_init_manageability(sc);
2700
2701 return true;
2702 }
2703
2704 /*
2705 * wm_watchdog: [ifnet interface function]
2706 *
2707 * Watchdog timer handler.
2708 */
2709 static void
2710 wm_watchdog(struct ifnet *ifp)
2711 {
2712 struct wm_softc *sc = ifp->if_softc;
2713
2714 /*
2715 * Since we're using delayed interrupts, sweep up
2716 * before we report an error.
2717 */
2718 WM_TX_LOCK(sc);
2719 wm_txeof(sc);
2720 WM_TX_UNLOCK(sc);
2721
2722 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2723 #ifdef WM_DEBUG
2724 int i, j;
2725 struct wm_txsoft *txs;
2726 #endif
2727 log(LOG_ERR,
2728 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2729 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2730 sc->sc_txnext);
2731 ifp->if_oerrors++;
2732 #ifdef WM_DEBUG
2733 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2734 i = WM_NEXTTXS(sc, i)) {
2735 txs = &sc->sc_txsoft[i];
2736 printf("txs %d tx %d -> %d\n",
2737 i, txs->txs_firstdesc, txs->txs_lastdesc);
2738 for (j = txs->txs_firstdesc; ;
2739 j = WM_NEXTTX(sc, j)) {
2740 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2741 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2742 printf("\t %#08x%08x\n",
2743 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2744 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2745 if (j == txs->txs_lastdesc)
2746 break;
2747 }
2748 }
2749 #endif
2750 /* Reset the interface. */
2751 (void) wm_init(ifp);
2752 }
2753
2754 /* Try to get more packets going. */
2755 ifp->if_start(ifp);
2756 }
2757
2758 /*
2759 * wm_tick:
2760 *
2761 * One second timer, used to check link status, sweep up
2762 * completed transmit jobs, etc.
2763 */
2764 static void
2765 wm_tick(void *arg)
2766 {
2767 struct wm_softc *sc = arg;
2768 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2769 #ifndef WM_MPSAFE
2770 int s;
2771
2772 s = splnet();
2773 #endif
2774
2775 WM_TX_LOCK(sc);
2776
2777 if (sc->sc_stopping)
2778 goto out;
2779
2780 if (sc->sc_type >= WM_T_82542_2_1) {
2781 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2782 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2783 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2784 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2785 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2786 }
2787
2788 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2789 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2790 + CSR_READ(sc, WMREG_CRCERRS)
2791 + CSR_READ(sc, WMREG_ALGNERRC)
2792 + CSR_READ(sc, WMREG_SYMERRC)
2793 + CSR_READ(sc, WMREG_RXERRC)
2794 + CSR_READ(sc, WMREG_SEC)
2795 + CSR_READ(sc, WMREG_CEXTERR)
2796 + CSR_READ(sc, WMREG_RLEC);
2797 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2798
2799 if (sc->sc_flags & WM_F_HAS_MII)
2800 mii_tick(&sc->sc_mii);
2801 else if ((sc->sc_type >= WM_T_82575)
2802 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2803 wm_serdes_tick(sc);
2804 else
2805 wm_tbi_tick(sc);
2806
2807 out:
2808 WM_TX_UNLOCK(sc);
2809 #ifndef WM_MPSAFE
2810 splx(s);
2811 #endif
2812
2813 if (!sc->sc_stopping)
2814 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2815 }
2816
2817 static int
2818 wm_ifflags_cb(struct ethercom *ec)
2819 {
2820 struct ifnet *ifp = &ec->ec_if;
2821 struct wm_softc *sc = ifp->if_softc;
2822 int change = ifp->if_flags ^ sc->sc_if_flags;
2823 int rc = 0;
2824
2825 WM_BOTH_LOCK(sc);
2826
2827 if (change != 0)
2828 sc->sc_if_flags = ifp->if_flags;
2829
2830 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2831 rc = ENETRESET;
2832 goto out;
2833 }
2834
2835 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2836 wm_set_filter(sc);
2837
2838 wm_set_vlan(sc);
2839
2840 out:
2841 WM_BOTH_UNLOCK(sc);
2842
2843 return rc;
2844 }
2845
2846 /*
2847 * wm_ioctl: [ifnet interface function]
2848 *
2849 * Handle control requests from the operator.
2850 */
2851 static int
2852 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2853 {
2854 struct wm_softc *sc = ifp->if_softc;
2855 struct ifreq *ifr = (struct ifreq *) data;
2856 struct ifaddr *ifa = (struct ifaddr *)data;
2857 struct sockaddr_dl *sdl;
2858 int s, error;
2859
2860 #ifndef WM_MPSAFE
2861 s = splnet();
2862 #endif
2863 switch (cmd) {
2864 case SIOCSIFMEDIA:
2865 case SIOCGIFMEDIA:
2866 WM_BOTH_LOCK(sc);
2867 /* Flow control requires full-duplex mode. */
2868 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2869 (ifr->ifr_media & IFM_FDX) == 0)
2870 ifr->ifr_media &= ~IFM_ETH_FMASK;
2871 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2872 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2873 /* We can do both TXPAUSE and RXPAUSE. */
2874 ifr->ifr_media |=
2875 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2876 }
2877 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2878 }
2879 WM_BOTH_UNLOCK(sc);
2880 #ifdef WM_MPSAFE
2881 s = splnet();
2882 #endif
2883 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2884 #ifdef WM_MPSAFE
2885 splx(s);
2886 #endif
2887 break;
2888 case SIOCINITIFADDR:
2889 WM_BOTH_LOCK(sc);
2890 if (ifa->ifa_addr->sa_family == AF_LINK) {
2891 sdl = satosdl(ifp->if_dl->ifa_addr);
2892 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2893 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2894 /* unicast address is first multicast entry */
2895 wm_set_filter(sc);
2896 error = 0;
2897 WM_BOTH_UNLOCK(sc);
2898 break;
2899 }
2900 WM_BOTH_UNLOCK(sc);
2901 /*FALLTHROUGH*/
2902 default:
2903 #ifdef WM_MPSAFE
2904 s = splnet();
2905 #endif
2906 /* It may call wm_start, so unlock here */
2907 error = ether_ioctl(ifp, cmd, data);
2908 #ifdef WM_MPSAFE
2909 splx(s);
2910 #endif
2911 if (error != ENETRESET)
2912 break;
2913
2914 error = 0;
2915
2916 if (cmd == SIOCSIFCAP) {
2917 error = (*ifp->if_init)(ifp);
2918 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2919 ;
2920 else if (ifp->if_flags & IFF_RUNNING) {
2921 /*
2922 * Multicast list has changed; set the hardware filter
2923 * accordingly.
2924 */
2925 WM_BOTH_LOCK(sc);
2926 wm_set_filter(sc);
2927 WM_BOTH_UNLOCK(sc);
2928 }
2929 break;
2930 }
2931
2932 #ifndef WM_MPSAFE
2933 splx(s);
2934 #endif
2935 return error;
2936 }
2937
2938 /* MAC address related */
2939
2940 /*
2941 * Get the offset of MAC address and return it.
2942 * If error occured, use offset 0.
2943 */
2944 static uint16_t
2945 wm_check_alt_mac_addr(struct wm_softc *sc)
2946 {
2947 uint16_t myea[ETHER_ADDR_LEN / 2];
2948 uint16_t offset = NVM_OFF_MACADDR;
2949
2950 /* Try to read alternative MAC address pointer */
2951 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2952 return 0;
2953
2954 /* Check pointer if it's valid or not. */
2955 if ((offset == 0x0000) || (offset == 0xffff))
2956 return 0;
2957
2958 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2959 /*
2960 * Check whether alternative MAC address is valid or not.
2961 * Some cards have non 0xffff pointer but those don't use
2962 * alternative MAC address in reality.
2963 *
2964 * Check whether the broadcast bit is set or not.
2965 */
2966 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2967 if (((myea[0] & 0xff) & 0x01) == 0)
2968 return offset; /* Found */
2969
2970 /* Not found */
2971 return 0;
2972 }
2973
2974 static int
2975 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2976 {
2977 uint16_t myea[ETHER_ADDR_LEN / 2];
2978 uint16_t offset = NVM_OFF_MACADDR;
2979 int do_invert = 0;
2980
2981 switch (sc->sc_type) {
2982 case WM_T_82580:
2983 case WM_T_I350:
2984 case WM_T_I354:
2985 /* EEPROM Top Level Partitioning */
2986 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2987 break;
2988 case WM_T_82571:
2989 case WM_T_82575:
2990 case WM_T_82576:
2991 case WM_T_80003:
2992 case WM_T_I210:
2993 case WM_T_I211:
2994 offset = wm_check_alt_mac_addr(sc);
2995 if (offset == 0)
2996 if ((sc->sc_funcid & 0x01) == 1)
2997 do_invert = 1;
2998 break;
2999 default:
3000 if ((sc->sc_funcid & 0x01) == 1)
3001 do_invert = 1;
3002 break;
3003 }
3004
3005 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3006 myea) != 0)
3007 goto bad;
3008
3009 enaddr[0] = myea[0] & 0xff;
3010 enaddr[1] = myea[0] >> 8;
3011 enaddr[2] = myea[1] & 0xff;
3012 enaddr[3] = myea[1] >> 8;
3013 enaddr[4] = myea[2] & 0xff;
3014 enaddr[5] = myea[2] >> 8;
3015
3016 /*
3017 * Toggle the LSB of the MAC address on the second port
3018 * of some dual port cards.
3019 */
3020 if (do_invert != 0)
3021 enaddr[5] ^= 1;
3022
3023 return 0;
3024
3025 bad:
3026 return -1;
3027 }
3028
3029 /*
3030 * wm_set_ral:
3031 *
3032 * Set an entery in the receive address list.
3033 */
3034 static void
3035 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3036 {
3037 uint32_t ral_lo, ral_hi;
3038
3039 if (enaddr != NULL) {
3040 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3041 (enaddr[3] << 24);
3042 ral_hi = enaddr[4] | (enaddr[5] << 8);
3043 ral_hi |= RAL_AV;
3044 } else {
3045 ral_lo = 0;
3046 ral_hi = 0;
3047 }
3048
3049 if (sc->sc_type >= WM_T_82544) {
3050 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3051 ral_lo);
3052 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3053 ral_hi);
3054 } else {
3055 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3056 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3057 }
3058 }
3059
3060 /*
3061 * wm_mchash:
3062 *
3063 * Compute the hash of the multicast address for the 4096-bit
3064 * multicast filter.
3065 */
3066 static uint32_t
3067 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3068 {
3069 static const int lo_shift[4] = { 4, 3, 2, 0 };
3070 static const int hi_shift[4] = { 4, 5, 6, 8 };
3071 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3072 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3073 uint32_t hash;
3074
3075 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3076 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3077 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3078 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3079 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3080 return (hash & 0x3ff);
3081 }
3082 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3083 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3084
3085 return (hash & 0xfff);
3086 }
3087
3088 /*
3089 * wm_set_filter:
3090 *
3091 * Set up the receive filter.
3092 */
3093 static void
3094 wm_set_filter(struct wm_softc *sc)
3095 {
3096 struct ethercom *ec = &sc->sc_ethercom;
3097 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3098 struct ether_multi *enm;
3099 struct ether_multistep step;
3100 bus_addr_t mta_reg;
3101 uint32_t hash, reg, bit;
3102 int i, size;
3103
3104 if (sc->sc_type >= WM_T_82544)
3105 mta_reg = WMREG_CORDOVA_MTA;
3106 else
3107 mta_reg = WMREG_MTA;
3108
3109 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3110
3111 if (ifp->if_flags & IFF_BROADCAST)
3112 sc->sc_rctl |= RCTL_BAM;
3113 if (ifp->if_flags & IFF_PROMISC) {
3114 sc->sc_rctl |= RCTL_UPE;
3115 goto allmulti;
3116 }
3117
3118 /*
3119 * Set the station address in the first RAL slot, and
3120 * clear the remaining slots.
3121 */
3122 if (sc->sc_type == WM_T_ICH8)
3123 size = WM_RAL_TABSIZE_ICH8 -1;
3124 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3125 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3126 || (sc->sc_type == WM_T_PCH_LPT))
3127 size = WM_RAL_TABSIZE_ICH8;
3128 else if (sc->sc_type == WM_T_82575)
3129 size = WM_RAL_TABSIZE_82575;
3130 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3131 size = WM_RAL_TABSIZE_82576;
3132 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3133 size = WM_RAL_TABSIZE_I350;
3134 else
3135 size = WM_RAL_TABSIZE;
3136 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3137 for (i = 1; i < size; i++)
3138 wm_set_ral(sc, NULL, i);
3139
3140 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3141 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3142 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3143 size = WM_ICH8_MC_TABSIZE;
3144 else
3145 size = WM_MC_TABSIZE;
3146 /* Clear out the multicast table. */
3147 for (i = 0; i < size; i++)
3148 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3149
3150 ETHER_FIRST_MULTI(step, ec, enm);
3151 while (enm != NULL) {
3152 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3153 /*
3154 * We must listen to a range of multicast addresses.
3155 * For now, just accept all multicasts, rather than
3156 * trying to set only those filter bits needed to match
3157 * the range. (At this time, the only use of address
3158 * ranges is for IP multicast routing, for which the
3159 * range is big enough to require all bits set.)
3160 */
3161 goto allmulti;
3162 }
3163
3164 hash = wm_mchash(sc, enm->enm_addrlo);
3165
3166 reg = (hash >> 5);
3167 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3168 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3169 || (sc->sc_type == WM_T_PCH2)
3170 || (sc->sc_type == WM_T_PCH_LPT))
3171 reg &= 0x1f;
3172 else
3173 reg &= 0x7f;
3174 bit = hash & 0x1f;
3175
3176 hash = CSR_READ(sc, mta_reg + (reg << 2));
3177 hash |= 1U << bit;
3178
3179 /* XXX Hardware bug?? */
3180 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3181 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3182 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3183 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3184 } else
3185 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3186
3187 ETHER_NEXT_MULTI(step, enm);
3188 }
3189
3190 ifp->if_flags &= ~IFF_ALLMULTI;
3191 goto setit;
3192
3193 allmulti:
3194 ifp->if_flags |= IFF_ALLMULTI;
3195 sc->sc_rctl |= RCTL_MPE;
3196
3197 setit:
3198 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3199 }
3200
3201 /* Reset and init related */
3202
3203 static void
3204 wm_set_vlan(struct wm_softc *sc)
3205 {
3206 /* Deal with VLAN enables. */
3207 if (VLAN_ATTACHED(&sc->sc_ethercom))
3208 sc->sc_ctrl |= CTRL_VME;
3209 else
3210 sc->sc_ctrl &= ~CTRL_VME;
3211
3212 /* Write the control registers. */
3213 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3214 }
3215
3216 static void
3217 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3218 {
3219 uint32_t gcr;
3220 pcireg_t ctrl2;
3221
3222 gcr = CSR_READ(sc, WMREG_GCR);
3223
3224 /* Only take action if timeout value is defaulted to 0 */
3225 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3226 goto out;
3227
3228 if ((gcr & GCR_CAP_VER2) == 0) {
3229 gcr |= GCR_CMPL_TMOUT_10MS;
3230 goto out;
3231 }
3232
3233 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3234 sc->sc_pcixe_capoff + PCIE_DCSR2);
3235 ctrl2 |= WM_PCIE_DCSR2_16MS;
3236 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3237 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3238
3239 out:
3240 /* Disable completion timeout resend */
3241 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3242
3243 CSR_WRITE(sc, WMREG_GCR, gcr);
3244 }
3245
3246 void
3247 wm_get_auto_rd_done(struct wm_softc *sc)
3248 {
3249 int i;
3250
3251 /* wait for eeprom to reload */
3252 switch (sc->sc_type) {
3253 case WM_T_82571:
3254 case WM_T_82572:
3255 case WM_T_82573:
3256 case WM_T_82574:
3257 case WM_T_82583:
3258 case WM_T_82575:
3259 case WM_T_82576:
3260 case WM_T_82580:
3261 case WM_T_I350:
3262 case WM_T_I354:
3263 case WM_T_I210:
3264 case WM_T_I211:
3265 case WM_T_80003:
3266 case WM_T_ICH8:
3267 case WM_T_ICH9:
3268 for (i = 0; i < 10; i++) {
3269 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3270 break;
3271 delay(1000);
3272 }
3273 if (i == 10) {
3274 log(LOG_ERR, "%s: auto read from eeprom failed to "
3275 "complete\n", device_xname(sc->sc_dev));
3276 }
3277 break;
3278 default:
3279 break;
3280 }
3281 }
3282
3283 void
3284 wm_lan_init_done(struct wm_softc *sc)
3285 {
3286 uint32_t reg = 0;
3287 int i;
3288
3289 /* wait for eeprom to reload */
3290 switch (sc->sc_type) {
3291 case WM_T_ICH10:
3292 case WM_T_PCH:
3293 case WM_T_PCH2:
3294 case WM_T_PCH_LPT:
3295 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3296 reg = CSR_READ(sc, WMREG_STATUS);
3297 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3298 break;
3299 delay(100);
3300 }
3301 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3302 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3303 "complete\n", device_xname(sc->sc_dev), __func__);
3304 }
3305 break;
3306 default:
3307 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3308 __func__);
3309 break;
3310 }
3311
3312 reg &= ~STATUS_LAN_INIT_DONE;
3313 CSR_WRITE(sc, WMREG_STATUS, reg);
3314 }
3315
3316 void
3317 wm_get_cfg_done(struct wm_softc *sc)
3318 {
3319 int mask;
3320 uint32_t reg;
3321 int i;
3322
3323 /* wait for eeprom to reload */
3324 switch (sc->sc_type) {
3325 case WM_T_82542_2_0:
3326 case WM_T_82542_2_1:
3327 /* null */
3328 break;
3329 case WM_T_82543:
3330 case WM_T_82544:
3331 case WM_T_82540:
3332 case WM_T_82545:
3333 case WM_T_82545_3:
3334 case WM_T_82546:
3335 case WM_T_82546_3:
3336 case WM_T_82541:
3337 case WM_T_82541_2:
3338 case WM_T_82547:
3339 case WM_T_82547_2:
3340 case WM_T_82573:
3341 case WM_T_82574:
3342 case WM_T_82583:
3343 /* generic */
3344 delay(10*1000);
3345 break;
3346 case WM_T_80003:
3347 case WM_T_82571:
3348 case WM_T_82572:
3349 case WM_T_82575:
3350 case WM_T_82576:
3351 case WM_T_82580:
3352 case WM_T_I350:
3353 case WM_T_I354:
3354 case WM_T_I210:
3355 case WM_T_I211:
3356 if (sc->sc_type == WM_T_82571) {
3357 /* Only 82571 shares port 0 */
3358 mask = EEMNGCTL_CFGDONE_0;
3359 } else
3360 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3361 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3362 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3363 break;
3364 delay(1000);
3365 }
3366 if (i >= WM_PHY_CFG_TIMEOUT) {
3367 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3368 device_xname(sc->sc_dev), __func__));
3369 }
3370 break;
3371 case WM_T_ICH8:
3372 case WM_T_ICH9:
3373 case WM_T_ICH10:
3374 case WM_T_PCH:
3375 case WM_T_PCH2:
3376 case WM_T_PCH_LPT:
3377 delay(10*1000);
3378 if (sc->sc_type >= WM_T_ICH10)
3379 wm_lan_init_done(sc);
3380 else
3381 wm_get_auto_rd_done(sc);
3382
3383 reg = CSR_READ(sc, WMREG_STATUS);
3384 if ((reg & STATUS_PHYRA) != 0)
3385 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3386 break;
3387 default:
3388 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3389 __func__);
3390 break;
3391 }
3392 }
3393
3394 /* Init hardware bits */
3395 void
3396 wm_initialize_hardware_bits(struct wm_softc *sc)
3397 {
3398 uint32_t tarc0, tarc1, reg;
3399
3400 /* For 82571 variant, 80003 and ICHs */
3401 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3402 || (sc->sc_type >= WM_T_80003)) {
3403
3404 /* Transmit Descriptor Control 0 */
3405 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3406 reg |= TXDCTL_COUNT_DESC;
3407 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3408
3409 /* Transmit Descriptor Control 1 */
3410 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3411 reg |= TXDCTL_COUNT_DESC;
3412 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3413
3414 /* TARC0 */
3415 tarc0 = CSR_READ(sc, WMREG_TARC0);
3416 switch (sc->sc_type) {
3417 case WM_T_82571:
3418 case WM_T_82572:
3419 case WM_T_82573:
3420 case WM_T_82574:
3421 case WM_T_82583:
3422 case WM_T_80003:
3423 /* Clear bits 30..27 */
3424 tarc0 &= ~__BITS(30, 27);
3425 break;
3426 default:
3427 break;
3428 }
3429
3430 switch (sc->sc_type) {
3431 case WM_T_82571:
3432 case WM_T_82572:
3433 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3434
3435 tarc1 = CSR_READ(sc, WMREG_TARC1);
3436 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3437 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3438 /* 8257[12] Errata No.7 */
3439 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3440
3441 /* TARC1 bit 28 */
3442 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3443 tarc1 &= ~__BIT(28);
3444 else
3445 tarc1 |= __BIT(28);
3446 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3447
3448 /*
3449 * 8257[12] Errata No.13
3450 * Disable Dyamic Clock Gating.
3451 */
3452 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3453 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3454 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3455 break;
3456 case WM_T_82573:
3457 case WM_T_82574:
3458 case WM_T_82583:
3459 if ((sc->sc_type == WM_T_82574)
3460 || (sc->sc_type == WM_T_82583))
3461 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3462
3463 /* Extended Device Control */
3464 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3465 reg &= ~__BIT(23); /* Clear bit 23 */
3466 reg |= __BIT(22); /* Set bit 22 */
3467 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3468
3469 /* Device Control */
3470 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3471 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3472
3473 /* PCIe Control Register */
3474 /*
3475 * 82573 Errata (unknown).
3476 *
3477 * 82574 Errata 25 and 82583 Errata 12
3478 * "Dropped Rx Packets":
3479 * NVM Image Version 2.1.4 and newer has no this bug.
3480 */
3481 reg = CSR_READ(sc, WMREG_GCR);
3482 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3483 CSR_WRITE(sc, WMREG_GCR, reg);
3484
3485 if ((sc->sc_type == WM_T_82574)
3486 || (sc->sc_type == WM_T_82583)) {
3487 /*
3488 * Document says this bit must be set for
3489 * proper operation.
3490 */
3491 reg = CSR_READ(sc, WMREG_GCR);
3492 reg |= __BIT(22);
3493 CSR_WRITE(sc, WMREG_GCR, reg);
3494
3495 /*
3496 * Apply workaround for hardware errata
3497 * documented in errata docs Fixes issue where
3498 * some error prone or unreliable PCIe
3499 * completions are occurring, particularly
3500 * with ASPM enabled. Without fix, issue can
3501 * cause Tx timeouts.
3502 */
3503 reg = CSR_READ(sc, WMREG_GCR2);
3504 reg |= __BIT(0);
3505 CSR_WRITE(sc, WMREG_GCR2, reg);
3506 }
3507 break;
3508 case WM_T_80003:
3509 /* TARC0 */
3510 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3511 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3512 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3513
3514 /* TARC1 bit 28 */
3515 tarc1 = CSR_READ(sc, WMREG_TARC1);
3516 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3517 tarc1 &= ~__BIT(28);
3518 else
3519 tarc1 |= __BIT(28);
3520 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3521 break;
3522 case WM_T_ICH8:
3523 case WM_T_ICH9:
3524 case WM_T_ICH10:
3525 case WM_T_PCH:
3526 case WM_T_PCH2:
3527 case WM_T_PCH_LPT:
3528 /* TARC 0 */
3529 if (sc->sc_type == WM_T_ICH8) {
3530 /* Set TARC0 bits 29 and 28 */
3531 tarc0 |= __BITS(29, 28);
3532 }
3533 /* Set TARC0 bits 23,24,26,27 */
3534 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3535
3536 /* CTRL_EXT */
3537 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3538 reg |= __BIT(22); /* Set bit 22 */
3539 /*
3540 * Enable PHY low-power state when MAC is at D3
3541 * w/o WoL
3542 */
3543 if (sc->sc_type >= WM_T_PCH)
3544 reg |= CTRL_EXT_PHYPDEN;
3545 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3546
3547 /* TARC1 */
3548 tarc1 = CSR_READ(sc, WMREG_TARC1);
3549 /* bit 28 */
3550 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3551 tarc1 &= ~__BIT(28);
3552 else
3553 tarc1 |= __BIT(28);
3554 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3555 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3556
3557 /* Device Status */
3558 if (sc->sc_type == WM_T_ICH8) {
3559 reg = CSR_READ(sc, WMREG_STATUS);
3560 reg &= ~__BIT(31);
3561 CSR_WRITE(sc, WMREG_STATUS, reg);
3562
3563 }
3564
3565 /*
3566 * Work-around descriptor data corruption issue during
3567 * NFS v2 UDP traffic, just disable the NFS filtering
3568 * capability.
3569 */
3570 reg = CSR_READ(sc, WMREG_RFCTL);
3571 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3572 CSR_WRITE(sc, WMREG_RFCTL, reg);
3573 break;
3574 default:
3575 break;
3576 }
3577 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3578
3579 /*
3580 * 8257[12] Errata No.52 and some others.
3581 * Avoid RSS Hash Value bug.
3582 */
3583 switch (sc->sc_type) {
3584 case WM_T_82571:
3585 case WM_T_82572:
3586 case WM_T_82573:
3587 case WM_T_80003:
3588 case WM_T_ICH8:
3589 reg = CSR_READ(sc, WMREG_RFCTL);
3590 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3591 CSR_WRITE(sc, WMREG_RFCTL, reg);
3592 break;
3593 default:
3594 break;
3595 }
3596 }
3597 }
3598
3599 static uint32_t
3600 wm_rxpbs_adjust_82580(uint32_t val)
3601 {
3602 uint32_t rv = 0;
3603
3604 if (val < __arraycount(wm_82580_rxpbs_table))
3605 rv = wm_82580_rxpbs_table[val];
3606
3607 return rv;
3608 }
3609
3610 /*
3611 * wm_reset:
3612 *
3613 * Reset the i82542 chip.
3614 */
3615 static void
3616 wm_reset(struct wm_softc *sc)
3617 {
3618 int phy_reset = 0;
3619 int error = 0;
3620 uint32_t reg, mask;
3621
3622 /*
3623 * Allocate on-chip memory according to the MTU size.
3624 * The Packet Buffer Allocation register must be written
3625 * before the chip is reset.
3626 */
3627 switch (sc->sc_type) {
3628 case WM_T_82547:
3629 case WM_T_82547_2:
3630 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3631 PBA_22K : PBA_30K;
3632 sc->sc_txfifo_head = 0;
3633 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3634 sc->sc_txfifo_size =
3635 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3636 sc->sc_txfifo_stall = 0;
3637 break;
3638 case WM_T_82571:
3639 case WM_T_82572:
3640 case WM_T_82575: /* XXX need special handing for jumbo frames */
3641 case WM_T_80003:
3642 sc->sc_pba = PBA_32K;
3643 break;
3644 case WM_T_82573:
3645 sc->sc_pba = PBA_12K;
3646 break;
3647 case WM_T_82574:
3648 case WM_T_82583:
3649 sc->sc_pba = PBA_20K;
3650 break;
3651 case WM_T_82576:
3652 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3653 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3654 break;
3655 case WM_T_82580:
3656 case WM_T_I350:
3657 case WM_T_I354:
3658 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3659 break;
3660 case WM_T_I210:
3661 case WM_T_I211:
3662 sc->sc_pba = PBA_34K;
3663 break;
3664 case WM_T_ICH8:
3665 /* Workaround for a bit corruption issue in FIFO memory */
3666 sc->sc_pba = PBA_8K;
3667 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3668 break;
3669 case WM_T_ICH9:
3670 case WM_T_ICH10:
3671 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3672 PBA_14K : PBA_10K;
3673 break;
3674 case WM_T_PCH:
3675 case WM_T_PCH2:
3676 case WM_T_PCH_LPT:
3677 sc->sc_pba = PBA_26K;
3678 break;
3679 default:
3680 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3681 PBA_40K : PBA_48K;
3682 break;
3683 }
3684 /*
3685 * Only old or non-multiqueue devices have the PBA register
3686 * XXX Need special handling for 82575.
3687 */
3688 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3689 || (sc->sc_type == WM_T_82575))
3690 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3691
3692 /* Prevent the PCI-E bus from sticking */
3693 if (sc->sc_flags & WM_F_PCIE) {
3694 int timeout = 800;
3695
3696 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3697 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3698
3699 while (timeout--) {
3700 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3701 == 0)
3702 break;
3703 delay(100);
3704 }
3705 }
3706
3707 /* Set the completion timeout for interface */
3708 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3709 || (sc->sc_type == WM_T_82580)
3710 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3711 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3712 wm_set_pcie_completion_timeout(sc);
3713
3714 /* Clear interrupt */
3715 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3716 if (sc->sc_nintrs > 1) {
3717 if (sc->sc_type != WM_T_82574) {
3718 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3719 CSR_WRITE(sc, WMREG_EIAC, 0);
3720 } else {
3721 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3722 }
3723 }
3724
3725 /* Stop the transmit and receive processes. */
3726 CSR_WRITE(sc, WMREG_RCTL, 0);
3727 sc->sc_rctl &= ~RCTL_EN;
3728 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3729 CSR_WRITE_FLUSH(sc);
3730
3731 /* XXX set_tbi_sbp_82543() */
3732
3733 delay(10*1000);
3734
3735 /* Must acquire the MDIO ownership before MAC reset */
3736 switch (sc->sc_type) {
3737 case WM_T_82573:
3738 case WM_T_82574:
3739 case WM_T_82583:
3740 error = wm_get_hw_semaphore_82573(sc);
3741 break;
3742 default:
3743 break;
3744 }
3745
3746 /*
3747 * 82541 Errata 29? & 82547 Errata 28?
3748 * See also the description about PHY_RST bit in CTRL register
3749 * in 8254x_GBe_SDM.pdf.
3750 */
3751 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3752 CSR_WRITE(sc, WMREG_CTRL,
3753 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3754 CSR_WRITE_FLUSH(sc);
3755 delay(5000);
3756 }
3757
3758 switch (sc->sc_type) {
3759 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3760 case WM_T_82541:
3761 case WM_T_82541_2:
3762 case WM_T_82547:
3763 case WM_T_82547_2:
3764 /*
3765 * On some chipsets, a reset through a memory-mapped write
3766 * cycle can cause the chip to reset before completing the
3767 * write cycle. This causes major headache that can be
3768 * avoided by issuing the reset via indirect register writes
3769 * through I/O space.
3770 *
3771 * So, if we successfully mapped the I/O BAR at attach time,
3772 * use that. Otherwise, try our luck with a memory-mapped
3773 * reset.
3774 */
3775 if (sc->sc_flags & WM_F_IOH_VALID)
3776 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3777 else
3778 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3779 break;
3780 case WM_T_82545_3:
3781 case WM_T_82546_3:
3782 /* Use the shadow control register on these chips. */
3783 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3784 break;
3785 case WM_T_80003:
3786 mask = swfwphysem[sc->sc_funcid];
3787 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3788 wm_get_swfw_semaphore(sc, mask);
3789 CSR_WRITE(sc, WMREG_CTRL, reg);
3790 wm_put_swfw_semaphore(sc, mask);
3791 break;
3792 case WM_T_ICH8:
3793 case WM_T_ICH9:
3794 case WM_T_ICH10:
3795 case WM_T_PCH:
3796 case WM_T_PCH2:
3797 case WM_T_PCH_LPT:
3798 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3799 if (wm_check_reset_block(sc) == 0) {
3800 /*
3801 * Gate automatic PHY configuration by hardware on
3802 * non-managed 82579
3803 */
3804 if ((sc->sc_type == WM_T_PCH2)
3805 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3806 != 0))
3807 wm_gate_hw_phy_config_ich8lan(sc, 1);
3808
3809
3810 reg |= CTRL_PHY_RESET;
3811 phy_reset = 1;
3812 }
3813 wm_get_swfwhw_semaphore(sc);
3814 CSR_WRITE(sc, WMREG_CTRL, reg);
3815 /* Don't insert a completion barrier when reset */
3816 delay(20*1000);
3817 wm_put_swfwhw_semaphore(sc);
3818 break;
3819 case WM_T_82580:
3820 case WM_T_I350:
3821 case WM_T_I354:
3822 case WM_T_I210:
3823 case WM_T_I211:
3824 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3825 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3826 CSR_WRITE_FLUSH(sc);
3827 delay(5000);
3828 break;
3829 case WM_T_82542_2_0:
3830 case WM_T_82542_2_1:
3831 case WM_T_82543:
3832 case WM_T_82540:
3833 case WM_T_82545:
3834 case WM_T_82546:
3835 case WM_T_82571:
3836 case WM_T_82572:
3837 case WM_T_82573:
3838 case WM_T_82574:
3839 case WM_T_82575:
3840 case WM_T_82576:
3841 case WM_T_82583:
3842 default:
3843 /* Everything else can safely use the documented method. */
3844 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3845 break;
3846 }
3847
3848 /* Must release the MDIO ownership after MAC reset */
3849 switch (sc->sc_type) {
3850 case WM_T_82573:
3851 case WM_T_82574:
3852 case WM_T_82583:
3853 if (error == 0)
3854 wm_put_hw_semaphore_82573(sc);
3855 break;
3856 default:
3857 break;
3858 }
3859
3860 if (phy_reset != 0)
3861 wm_get_cfg_done(sc);
3862
3863 /* reload EEPROM */
3864 switch (sc->sc_type) {
3865 case WM_T_82542_2_0:
3866 case WM_T_82542_2_1:
3867 case WM_T_82543:
3868 case WM_T_82544:
3869 delay(10);
3870 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3871 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3872 CSR_WRITE_FLUSH(sc);
3873 delay(2000);
3874 break;
3875 case WM_T_82540:
3876 case WM_T_82545:
3877 case WM_T_82545_3:
3878 case WM_T_82546:
3879 case WM_T_82546_3:
3880 delay(5*1000);
3881 /* XXX Disable HW ARPs on ASF enabled adapters */
3882 break;
3883 case WM_T_82541:
3884 case WM_T_82541_2:
3885 case WM_T_82547:
3886 case WM_T_82547_2:
3887 delay(20000);
3888 /* XXX Disable HW ARPs on ASF enabled adapters */
3889 break;
3890 case WM_T_82571:
3891 case WM_T_82572:
3892 case WM_T_82573:
3893 case WM_T_82574:
3894 case WM_T_82583:
3895 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3896 delay(10);
3897 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3898 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3899 CSR_WRITE_FLUSH(sc);
3900 }
3901 /* check EECD_EE_AUTORD */
3902 wm_get_auto_rd_done(sc);
3903 /*
3904 * Phy configuration from NVM just starts after EECD_AUTO_RD
3905 * is set.
3906 */
3907 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3908 || (sc->sc_type == WM_T_82583))
3909 delay(25*1000);
3910 break;
3911 case WM_T_82575:
3912 case WM_T_82576:
3913 case WM_T_82580:
3914 case WM_T_I350:
3915 case WM_T_I354:
3916 case WM_T_I210:
3917 case WM_T_I211:
3918 case WM_T_80003:
3919 /* check EECD_EE_AUTORD */
3920 wm_get_auto_rd_done(sc);
3921 break;
3922 case WM_T_ICH8:
3923 case WM_T_ICH9:
3924 case WM_T_ICH10:
3925 case WM_T_PCH:
3926 case WM_T_PCH2:
3927 case WM_T_PCH_LPT:
3928 break;
3929 default:
3930 panic("%s: unknown type\n", __func__);
3931 }
3932
3933 /* Check whether EEPROM is present or not */
3934 switch (sc->sc_type) {
3935 case WM_T_82575:
3936 case WM_T_82576:
3937 case WM_T_82580:
3938 case WM_T_I350:
3939 case WM_T_I354:
3940 case WM_T_ICH8:
3941 case WM_T_ICH9:
3942 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3943 /* Not found */
3944 sc->sc_flags |= WM_F_EEPROM_INVALID;
3945 if (sc->sc_type == WM_T_82575)
3946 wm_reset_init_script_82575(sc);
3947 }
3948 break;
3949 default:
3950 break;
3951 }
3952
3953 if ((sc->sc_type == WM_T_82580)
3954 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3955 /* clear global device reset status bit */
3956 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3957 }
3958
3959 /* Clear any pending interrupt events. */
3960 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3961 reg = CSR_READ(sc, WMREG_ICR);
3962 if (sc->sc_nintrs > 1) {
3963 if (sc->sc_type != WM_T_82574) {
3964 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3965 CSR_WRITE(sc, WMREG_EIAC, 0);
3966 } else
3967 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3968 }
3969
3970 /* reload sc_ctrl */
3971 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3972
3973 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3974 wm_set_eee_i350(sc);
3975
3976 /* dummy read from WUC */
3977 if (sc->sc_type == WM_T_PCH)
3978 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3979 /*
3980 * For PCH, this write will make sure that any noise will be detected
3981 * as a CRC error and be dropped rather than show up as a bad packet
3982 * to the DMA engine
3983 */
3984 if (sc->sc_type == WM_T_PCH)
3985 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3986
3987 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3988 CSR_WRITE(sc, WMREG_WUC, 0);
3989
3990 wm_reset_mdicnfg_82580(sc);
3991
3992 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
3993 wm_pll_workaround_i210(sc);
3994 }
3995
3996 /*
3997 * wm_add_rxbuf:
3998 *
3999 * Add a receive buffer to the indiciated descriptor.
4000 */
4001 static int
4002 wm_add_rxbuf(struct wm_softc *sc, int idx)
4003 {
4004 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4005 struct mbuf *m;
4006 int error;
4007
4008 KASSERT(WM_RX_LOCKED(sc));
4009
4010 MGETHDR(m, M_DONTWAIT, MT_DATA);
4011 if (m == NULL)
4012 return ENOBUFS;
4013
4014 MCLGET(m, M_DONTWAIT);
4015 if ((m->m_flags & M_EXT) == 0) {
4016 m_freem(m);
4017 return ENOBUFS;
4018 }
4019
4020 if (rxs->rxs_mbuf != NULL)
4021 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4022
4023 rxs->rxs_mbuf = m;
4024
4025 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4026 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4027 BUS_DMA_READ|BUS_DMA_NOWAIT);
4028 if (error) {
4029 /* XXX XXX XXX */
4030 aprint_error_dev(sc->sc_dev,
4031 "unable to load rx DMA map %d, error = %d\n",
4032 idx, error);
4033 panic("wm_add_rxbuf");
4034 }
4035
4036 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4037 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4038
4039 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4040 if ((sc->sc_rctl & RCTL_EN) != 0)
4041 wm_init_rxdesc(sc, idx);
4042 } else
4043 wm_init_rxdesc(sc, idx);
4044
4045 return 0;
4046 }
4047
4048 /*
4049 * wm_rxdrain:
4050 *
4051 * Drain the receive queue.
4052 */
4053 static void
4054 wm_rxdrain(struct wm_softc *sc)
4055 {
4056 struct wm_rxsoft *rxs;
4057 int i;
4058
4059 KASSERT(WM_RX_LOCKED(sc));
4060
4061 for (i = 0; i < WM_NRXDESC; i++) {
4062 rxs = &sc->sc_rxsoft[i];
4063 if (rxs->rxs_mbuf != NULL) {
4064 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4065 m_freem(rxs->rxs_mbuf);
4066 rxs->rxs_mbuf = NULL;
4067 }
4068 }
4069 }
4070
4071 /*
4072 * wm_init: [ifnet interface function]
4073 *
4074 * Initialize the interface.
4075 */
4076 static int
4077 wm_init(struct ifnet *ifp)
4078 {
4079 struct wm_softc *sc = ifp->if_softc;
4080 int ret;
4081
4082 WM_BOTH_LOCK(sc);
4083 ret = wm_init_locked(ifp);
4084 WM_BOTH_UNLOCK(sc);
4085
4086 return ret;
4087 }
4088
4089 static int
4090 wm_init_locked(struct ifnet *ifp)
4091 {
4092 struct wm_softc *sc = ifp->if_softc;
4093 int i, j, trynum, error = 0;
4094 uint32_t reg;
4095
4096 KASSERT(WM_BOTH_LOCKED(sc));
4097 /*
4098 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4099 * There is a small but measurable benefit to avoiding the adjusment
4100 * of the descriptor so that the headers are aligned, for normal mtu,
4101 * on such platforms. One possibility is that the DMA itself is
4102 * slightly more efficient if the front of the entire packet (instead
4103 * of the front of the headers) is aligned.
4104 *
4105 * Note we must always set align_tweak to 0 if we are using
4106 * jumbo frames.
4107 */
4108 #ifdef __NO_STRICT_ALIGNMENT
4109 sc->sc_align_tweak = 0;
4110 #else
4111 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4112 sc->sc_align_tweak = 0;
4113 else
4114 sc->sc_align_tweak = 2;
4115 #endif /* __NO_STRICT_ALIGNMENT */
4116
4117 /* Cancel any pending I/O. */
4118 wm_stop_locked(ifp, 0);
4119
4120 /* update statistics before reset */
4121 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4122 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4123
4124 /* Reset the chip to a known state. */
4125 wm_reset(sc);
4126
4127 switch (sc->sc_type) {
4128 case WM_T_82571:
4129 case WM_T_82572:
4130 case WM_T_82573:
4131 case WM_T_82574:
4132 case WM_T_82583:
4133 case WM_T_80003:
4134 case WM_T_ICH8:
4135 case WM_T_ICH9:
4136 case WM_T_ICH10:
4137 case WM_T_PCH:
4138 case WM_T_PCH2:
4139 case WM_T_PCH_LPT:
4140 if (wm_check_mng_mode(sc) != 0)
4141 wm_get_hw_control(sc);
4142 break;
4143 default:
4144 break;
4145 }
4146
4147 /* Init hardware bits */
4148 wm_initialize_hardware_bits(sc);
4149
4150 /* Reset the PHY. */
4151 if (sc->sc_flags & WM_F_HAS_MII)
4152 wm_gmii_reset(sc);
4153
4154 /* Calculate (E)ITR value */
4155 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4156 sc->sc_itr = 450; /* For EITR */
4157 } else if (sc->sc_type >= WM_T_82543) {
4158 /*
4159 * Set up the interrupt throttling register (units of 256ns)
4160 * Note that a footnote in Intel's documentation says this
4161 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4162 * or 10Mbit mode. Empirically, it appears to be the case
4163 * that that is also true for the 1024ns units of the other
4164 * interrupt-related timer registers -- so, really, we ought
4165 * to divide this value by 4 when the link speed is low.
4166 *
4167 * XXX implement this division at link speed change!
4168 */
4169
4170 /*
4171 * For N interrupts/sec, set this value to:
4172 * 1000000000 / (N * 256). Note that we set the
4173 * absolute and packet timer values to this value
4174 * divided by 4 to get "simple timer" behavior.
4175 */
4176
4177 sc->sc_itr = 1500; /* 2604 ints/sec */
4178 }
4179
4180 error = wm_init_txrx_queues(sc);
4181 if (error)
4182 goto out;
4183
4184 /*
4185 * Clear out the VLAN table -- we don't use it (yet).
4186 */
4187 CSR_WRITE(sc, WMREG_VET, 0);
4188 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4189 trynum = 10; /* Due to hw errata */
4190 else
4191 trynum = 1;
4192 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4193 for (j = 0; j < trynum; j++)
4194 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4195
4196 /*
4197 * Set up flow-control parameters.
4198 *
4199 * XXX Values could probably stand some tuning.
4200 */
4201 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4202 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4203 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4204 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4205 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4206 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4207 }
4208
4209 sc->sc_fcrtl = FCRTL_DFLT;
4210 if (sc->sc_type < WM_T_82543) {
4211 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4212 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4213 } else {
4214 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4215 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4216 }
4217
4218 if (sc->sc_type == WM_T_80003)
4219 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4220 else
4221 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4222
4223 /* Writes the control register. */
4224 wm_set_vlan(sc);
4225
4226 if (sc->sc_flags & WM_F_HAS_MII) {
4227 int val;
4228
4229 switch (sc->sc_type) {
4230 case WM_T_80003:
4231 case WM_T_ICH8:
4232 case WM_T_ICH9:
4233 case WM_T_ICH10:
4234 case WM_T_PCH:
4235 case WM_T_PCH2:
4236 case WM_T_PCH_LPT:
4237 /*
4238 * Set the mac to wait the maximum time between each
4239 * iteration and increase the max iterations when
4240 * polling the phy; this fixes erroneous timeouts at
4241 * 10Mbps.
4242 */
4243 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4244 0xFFFF);
4245 val = wm_kmrn_readreg(sc,
4246 KUMCTRLSTA_OFFSET_INB_PARAM);
4247 val |= 0x3F;
4248 wm_kmrn_writereg(sc,
4249 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4250 break;
4251 default:
4252 break;
4253 }
4254
4255 if (sc->sc_type == WM_T_80003) {
4256 val = CSR_READ(sc, WMREG_CTRL_EXT);
4257 val &= ~CTRL_EXT_LINK_MODE_MASK;
4258 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4259
4260 /* Bypass RX and TX FIFO's */
4261 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4262 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4263 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4264 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4265 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4266 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4267 }
4268 }
4269 #if 0
4270 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4271 #endif
4272
4273 /* Set up checksum offload parameters. */
4274 reg = CSR_READ(sc, WMREG_RXCSUM);
4275 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4276 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4277 reg |= RXCSUM_IPOFL;
4278 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4279 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4280 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4281 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4282 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4283
4284 /* Set up MSI-X */
4285 if (sc->sc_nintrs > 1) {
4286 uint32_t ivar;
4287
4288 if (sc->sc_type == WM_T_82575) {
4289 /* Interrupt control */
4290 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4291 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4292 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4293
4294 /* TX */
4295 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_TXINTR_IDX),
4296 EITR_TX_QUEUE0);
4297 /* RX */
4298 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_RXINTR_IDX),
4299 EITR_RX_QUEUE0);
4300 /* Link status */
4301 CSR_WRITE(sc, WMREG_MSIXBM(WM_MSIX_LINKINTR_IDX),
4302 EITR_OTHER);
4303 } else if (sc->sc_type == WM_T_82574) {
4304 /* Interrupt control */
4305 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4306 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4307 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4308
4309 /* TX, RX and Link status */
4310 ivar = __SHIFTIN((IVAR_VALID_82574|WM_MSIX_TXINTR_IDX),
4311 IVAR_TX_MASK_Q_82574(0));
4312 ivar |= __SHIFTIN((IVAR_VALID_82574
4313 | WM_MSIX_RXINTR_IDX),
4314 IVAR_RX_MASK_Q_82574(0));
4315 ivar |=__SHIFTIN((IVAR_VALID_82574|WM_MSIX_LINKINTR_IDX),
4316 IVAR_OTHER_MASK);
4317 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4318 } else {
4319 /* Interrupt control */
4320 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4321 | GPIE_MULTI_MSIX | GPIE_EIAME
4322 | GPIE_PBA);
4323
4324 switch (sc->sc_type) {
4325 case WM_T_82580:
4326 case WM_T_I350:
4327 case WM_T_I354:
4328 case WM_T_I210:
4329 case WM_T_I211:
4330 /* TX */
4331 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4332 ivar &= ~IVAR_TX_MASK_Q(0);
4333 ivar |= __SHIFTIN(
4334 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4335 IVAR_TX_MASK_Q(0));
4336 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4337
4338 /* RX */
4339 ivar = CSR_READ(sc, WMREG_IVAR_Q(0));
4340 ivar &= ~IVAR_RX_MASK_Q(0);
4341 ivar |= __SHIFTIN(
4342 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4343 IVAR_RX_MASK_Q(0));
4344 CSR_WRITE(sc, WMREG_IVAR_Q(0), ivar);
4345 break;
4346 case WM_T_82576:
4347 /* TX */
4348 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4349 ivar &= ~IVAR_TX_MASK_Q_82576(0);
4350 ivar |= __SHIFTIN(
4351 (WM_MSIX_TXINTR_IDX | IVAR_VALID),
4352 IVAR_TX_MASK_Q_82576(0));
4353 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4354
4355 /* RX */
4356 ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(0));
4357 ivar &= ~IVAR_RX_MASK_Q_82576(0);
4358 ivar |= __SHIFTIN(
4359 (WM_MSIX_RXINTR_IDX | IVAR_VALID),
4360 IVAR_RX_MASK_Q_82576(0));
4361 CSR_WRITE(sc, WMREG_IVAR_Q_82576(0), ivar);
4362 break;
4363 default:
4364 break;
4365 }
4366
4367 /* Link status */
4368 ivar = __SHIFTIN((WM_MSIX_LINKINTR_IDX | IVAR_VALID),
4369 IVAR_MISC_OTHER);
4370 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4371 }
4372 }
4373
4374 /* Set up the interrupt registers. */
4375 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4376 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4377 ICR_RXO | ICR_RXT0;
4378 if (sc->sc_nintrs > 1) {
4379 uint32_t mask;
4380 switch (sc->sc_type) {
4381 case WM_T_82574:
4382 CSR_WRITE(sc, WMREG_EIAC_82574,
4383 WMREG_EIAC_82574_MSIX_MASK);
4384 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4385 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4386 break;
4387 default:
4388 if (sc->sc_type == WM_T_82575)
4389 mask = EITR_RX_QUEUE0 |EITR_TX_QUEUE0
4390 | EITR_OTHER;
4391 else
4392 mask = (1 << WM_MSIX_RXINTR_IDX)
4393 | (1 << WM_MSIX_TXINTR_IDX)
4394 | (1 << WM_MSIX_LINKINTR_IDX);
4395 CSR_WRITE(sc, WMREG_EIAC, mask);
4396 CSR_WRITE(sc, WMREG_EIAM, mask);
4397 CSR_WRITE(sc, WMREG_EIMS, mask);
4398 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4399 break;
4400 }
4401 } else
4402 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4403
4404 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4405 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4406 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4407 reg = CSR_READ(sc, WMREG_KABGTXD);
4408 reg |= KABGTXD_BGSQLBIAS;
4409 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4410 }
4411
4412 /* Set up the inter-packet gap. */
4413 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4414
4415 if (sc->sc_type >= WM_T_82543) {
4416 /*
4417 * XXX 82574 has both ITR and EITR. SET EITR when we use
4418 * the multi queue function with MSI-X.
4419 */
4420 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4421 if (sc->sc_nintrs > 1) {
4422 CSR_WRITE(sc, WMREG_EITR(WM_MSIX_RXINTR_IDX),
4423 sc->sc_itr);
4424 CSR_WRITE(sc, WMREG_EITR(WM_MSIX_TXINTR_IDX),
4425 sc->sc_itr);
4426 /*
4427 * Link interrupts occur much less than TX
4428 * interrupts and RX interrupts. So, we don't
4429 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4430 * FreeBSD's if_igb.
4431 */
4432 } else
4433 CSR_WRITE(sc, WMREG_EITR(0), sc->sc_itr);
4434 } else
4435 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4436 }
4437
4438 /* Set the VLAN ethernetype. */
4439 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4440
4441 /*
4442 * Set up the transmit control register; we start out with
4443 * a collision distance suitable for FDX, but update it whe
4444 * we resolve the media type.
4445 */
4446 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4447 | TCTL_CT(TX_COLLISION_THRESHOLD)
4448 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4449 if (sc->sc_type >= WM_T_82571)
4450 sc->sc_tctl |= TCTL_MULR;
4451 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4452
4453 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4454 /* Write TDT after TCTL.EN is set. See the document. */
4455 CSR_WRITE(sc, WMREG_TDT, 0);
4456 }
4457
4458 if (sc->sc_type == WM_T_80003) {
4459 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4460 reg &= ~TCTL_EXT_GCEX_MASK;
4461 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4462 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4463 }
4464
4465 /* Set the media. */
4466 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4467 goto out;
4468
4469 /* Configure for OS presence */
4470 wm_init_manageability(sc);
4471
4472 /*
4473 * Set up the receive control register; we actually program
4474 * the register when we set the receive filter. Use multicast
4475 * address offset type 0.
4476 *
4477 * Only the i82544 has the ability to strip the incoming
4478 * CRC, so we don't enable that feature.
4479 */
4480 sc->sc_mchash_type = 0;
4481 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4482 | RCTL_MO(sc->sc_mchash_type);
4483
4484 /*
4485 * The I350 has a bug where it always strips the CRC whether
4486 * asked to or not. So ask for stripped CRC here and cope in rxeof
4487 */
4488 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4489 || (sc->sc_type == WM_T_I210))
4490 sc->sc_rctl |= RCTL_SECRC;
4491
4492 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4493 && (ifp->if_mtu > ETHERMTU)) {
4494 sc->sc_rctl |= RCTL_LPE;
4495 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4496 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4497 }
4498
4499 if (MCLBYTES == 2048) {
4500 sc->sc_rctl |= RCTL_2k;
4501 } else {
4502 if (sc->sc_type >= WM_T_82543) {
4503 switch (MCLBYTES) {
4504 case 4096:
4505 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4506 break;
4507 case 8192:
4508 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4509 break;
4510 case 16384:
4511 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4512 break;
4513 default:
4514 panic("wm_init: MCLBYTES %d unsupported",
4515 MCLBYTES);
4516 break;
4517 }
4518 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4519 }
4520
4521 /* Set the receive filter. */
4522 wm_set_filter(sc);
4523
4524 /* Enable ECC */
4525 switch (sc->sc_type) {
4526 case WM_T_82571:
4527 reg = CSR_READ(sc, WMREG_PBA_ECC);
4528 reg |= PBA_ECC_CORR_EN;
4529 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4530 break;
4531 case WM_T_PCH_LPT:
4532 reg = CSR_READ(sc, WMREG_PBECCSTS);
4533 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4534 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4535
4536 reg = CSR_READ(sc, WMREG_CTRL);
4537 reg |= CTRL_MEHE;
4538 CSR_WRITE(sc, WMREG_CTRL, reg);
4539 break;
4540 default:
4541 break;
4542 }
4543
4544 /* On 575 and later set RDT only if RX enabled */
4545 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4546 for (i = 0; i < WM_NRXDESC; i++)
4547 wm_init_rxdesc(sc, i);
4548
4549 sc->sc_stopping = false;
4550
4551 /* Start the one second link check clock. */
4552 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4553
4554 /* ...all done! */
4555 ifp->if_flags |= IFF_RUNNING;
4556 ifp->if_flags &= ~IFF_OACTIVE;
4557
4558 out:
4559 sc->sc_if_flags = ifp->if_flags;
4560 if (error)
4561 log(LOG_ERR, "%s: interface not running\n",
4562 device_xname(sc->sc_dev));
4563 return error;
4564 }
4565
4566 /*
4567 * wm_stop: [ifnet interface function]
4568 *
4569 * Stop transmission on the interface.
4570 */
4571 static void
4572 wm_stop(struct ifnet *ifp, int disable)
4573 {
4574 struct wm_softc *sc = ifp->if_softc;
4575
4576 WM_BOTH_LOCK(sc);
4577 wm_stop_locked(ifp, disable);
4578 WM_BOTH_UNLOCK(sc);
4579 }
4580
4581 static void
4582 wm_stop_locked(struct ifnet *ifp, int disable)
4583 {
4584 struct wm_softc *sc = ifp->if_softc;
4585 struct wm_txsoft *txs;
4586 int i;
4587
4588 KASSERT(WM_BOTH_LOCKED(sc));
4589
4590 sc->sc_stopping = true;
4591
4592 /* Stop the one second clock. */
4593 callout_stop(&sc->sc_tick_ch);
4594
4595 /* Stop the 82547 Tx FIFO stall check timer. */
4596 if (sc->sc_type == WM_T_82547)
4597 callout_stop(&sc->sc_txfifo_ch);
4598
4599 if (sc->sc_flags & WM_F_HAS_MII) {
4600 /* Down the MII. */
4601 mii_down(&sc->sc_mii);
4602 } else {
4603 #if 0
4604 /* Should we clear PHY's status properly? */
4605 wm_reset(sc);
4606 #endif
4607 }
4608
4609 /* Stop the transmit and receive processes. */
4610 CSR_WRITE(sc, WMREG_TCTL, 0);
4611 CSR_WRITE(sc, WMREG_RCTL, 0);
4612 sc->sc_rctl &= ~RCTL_EN;
4613
4614 /*
4615 * Clear the interrupt mask to ensure the device cannot assert its
4616 * interrupt line.
4617 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
4618 * service any currently pending or shared interrupt.
4619 */
4620 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4621 sc->sc_icr = 0;
4622 if (sc->sc_nintrs > 1) {
4623 if (sc->sc_type != WM_T_82574) {
4624 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4625 CSR_WRITE(sc, WMREG_EIAC, 0);
4626 } else
4627 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4628 }
4629
4630 /* Release any queued transmit buffers. */
4631 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4632 txs = &sc->sc_txsoft[i];
4633 if (txs->txs_mbuf != NULL) {
4634 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4635 m_freem(txs->txs_mbuf);
4636 txs->txs_mbuf = NULL;
4637 }
4638 }
4639
4640 /* Mark the interface as down and cancel the watchdog timer. */
4641 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4642 ifp->if_timer = 0;
4643
4644 if (disable)
4645 wm_rxdrain(sc);
4646
4647 #if 0 /* notyet */
4648 if (sc->sc_type >= WM_T_82544)
4649 CSR_WRITE(sc, WMREG_WUC, 0);
4650 #endif
4651 }
4652
4653 /*
4654 * wm_tx_offload:
4655 *
4656 * Set up TCP/IP checksumming parameters for the
4657 * specified packet.
4658 */
4659 static int
4660 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4661 uint8_t *fieldsp)
4662 {
4663 struct mbuf *m0 = txs->txs_mbuf;
4664 struct livengood_tcpip_ctxdesc *t;
4665 uint32_t ipcs, tucs, cmd, cmdlen, seg;
4666 uint32_t ipcse;
4667 struct ether_header *eh;
4668 int offset, iphl;
4669 uint8_t fields;
4670
4671 /*
4672 * XXX It would be nice if the mbuf pkthdr had offset
4673 * fields for the protocol headers.
4674 */
4675
4676 eh = mtod(m0, struct ether_header *);
4677 switch (htons(eh->ether_type)) {
4678 case ETHERTYPE_IP:
4679 case ETHERTYPE_IPV6:
4680 offset = ETHER_HDR_LEN;
4681 break;
4682
4683 case ETHERTYPE_VLAN:
4684 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4685 break;
4686
4687 default:
4688 /*
4689 * Don't support this protocol or encapsulation.
4690 */
4691 *fieldsp = 0;
4692 *cmdp = 0;
4693 return 0;
4694 }
4695
4696 if ((m0->m_pkthdr.csum_flags &
4697 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4698 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4699 } else {
4700 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4701 }
4702 ipcse = offset + iphl - 1;
4703
4704 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4705 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4706 seg = 0;
4707 fields = 0;
4708
4709 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4710 int hlen = offset + iphl;
4711 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4712
4713 if (__predict_false(m0->m_len <
4714 (hlen + sizeof(struct tcphdr)))) {
4715 /*
4716 * TCP/IP headers are not in the first mbuf; we need
4717 * to do this the slow and painful way. Let's just
4718 * hope this doesn't happen very often.
4719 */
4720 struct tcphdr th;
4721
4722 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4723
4724 m_copydata(m0, hlen, sizeof(th), &th);
4725 if (v4) {
4726 struct ip ip;
4727
4728 m_copydata(m0, offset, sizeof(ip), &ip);
4729 ip.ip_len = 0;
4730 m_copyback(m0,
4731 offset + offsetof(struct ip, ip_len),
4732 sizeof(ip.ip_len), &ip.ip_len);
4733 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4734 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4735 } else {
4736 struct ip6_hdr ip6;
4737
4738 m_copydata(m0, offset, sizeof(ip6), &ip6);
4739 ip6.ip6_plen = 0;
4740 m_copyback(m0,
4741 offset + offsetof(struct ip6_hdr, ip6_plen),
4742 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4743 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4744 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4745 }
4746 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4747 sizeof(th.th_sum), &th.th_sum);
4748
4749 hlen += th.th_off << 2;
4750 } else {
4751 /*
4752 * TCP/IP headers are in the first mbuf; we can do
4753 * this the easy way.
4754 */
4755 struct tcphdr *th;
4756
4757 if (v4) {
4758 struct ip *ip =
4759 (void *)(mtod(m0, char *) + offset);
4760 th = (void *)(mtod(m0, char *) + hlen);
4761
4762 ip->ip_len = 0;
4763 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4764 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4765 } else {
4766 struct ip6_hdr *ip6 =
4767 (void *)(mtod(m0, char *) + offset);
4768 th = (void *)(mtod(m0, char *) + hlen);
4769
4770 ip6->ip6_plen = 0;
4771 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4772 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4773 }
4774 hlen += th->th_off << 2;
4775 }
4776
4777 if (v4) {
4778 WM_EVCNT_INCR(&sc->sc_ev_txtso);
4779 cmdlen |= WTX_TCPIP_CMD_IP;
4780 } else {
4781 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4782 ipcse = 0;
4783 }
4784 cmd |= WTX_TCPIP_CMD_TSE;
4785 cmdlen |= WTX_TCPIP_CMD_TSE |
4786 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4787 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4788 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4789 }
4790
4791 /*
4792 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4793 * offload feature, if we load the context descriptor, we
4794 * MUST provide valid values for IPCSS and TUCSS fields.
4795 */
4796
4797 ipcs = WTX_TCPIP_IPCSS(offset) |
4798 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4799 WTX_TCPIP_IPCSE(ipcse);
4800 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4801 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4802 fields |= WTX_IXSM;
4803 }
4804
4805 offset += iphl;
4806
4807 if (m0->m_pkthdr.csum_flags &
4808 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4809 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4810 fields |= WTX_TXSM;
4811 tucs = WTX_TCPIP_TUCSS(offset) |
4812 WTX_TCPIP_TUCSO(offset +
4813 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4814 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4815 } else if ((m0->m_pkthdr.csum_flags &
4816 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4817 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4818 fields |= WTX_TXSM;
4819 tucs = WTX_TCPIP_TUCSS(offset) |
4820 WTX_TCPIP_TUCSO(offset +
4821 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4822 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4823 } else {
4824 /* Just initialize it to a valid TCP context. */
4825 tucs = WTX_TCPIP_TUCSS(offset) |
4826 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4827 WTX_TCPIP_TUCSE(0) /* rest of packet */;
4828 }
4829
4830 /* Fill in the context descriptor. */
4831 t = (struct livengood_tcpip_ctxdesc *)
4832 &sc->sc_txdescs[sc->sc_txnext];
4833 t->tcpip_ipcs = htole32(ipcs);
4834 t->tcpip_tucs = htole32(tucs);
4835 t->tcpip_cmdlen = htole32(cmdlen);
4836 t->tcpip_seg = htole32(seg);
4837 wm_cdtxsync(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4838
4839 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4840 txs->txs_ndesc++;
4841
4842 *cmdp = cmd;
4843 *fieldsp = fields;
4844
4845 return 0;
4846 }
4847
4848 static void
4849 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4850 {
4851 struct mbuf *m;
4852 int i;
4853
4854 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4855 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4856 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4857 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4858 m->m_data, m->m_len, m->m_flags);
4859 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4860 i, i == 1 ? "" : "s");
4861 }
4862
4863 /*
4864 * wm_82547_txfifo_stall:
4865 *
4866 * Callout used to wait for the 82547 Tx FIFO to drain,
4867 * reset the FIFO pointers, and restart packet transmission.
4868 */
4869 static void
4870 wm_82547_txfifo_stall(void *arg)
4871 {
4872 struct wm_softc *sc = arg;
4873 #ifndef WM_MPSAFE
4874 int s;
4875
4876 s = splnet();
4877 #endif
4878 WM_TX_LOCK(sc);
4879
4880 if (sc->sc_stopping)
4881 goto out;
4882
4883 if (sc->sc_txfifo_stall) {
4884 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4885 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4886 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4887 /*
4888 * Packets have drained. Stop transmitter, reset
4889 * FIFO pointers, restart transmitter, and kick
4890 * the packet queue.
4891 */
4892 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4893 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4894 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4895 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4896 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4897 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4898 CSR_WRITE(sc, WMREG_TCTL, tctl);
4899 CSR_WRITE_FLUSH(sc);
4900
4901 sc->sc_txfifo_head = 0;
4902 sc->sc_txfifo_stall = 0;
4903 wm_start_locked(&sc->sc_ethercom.ec_if);
4904 } else {
4905 /*
4906 * Still waiting for packets to drain; try again in
4907 * another tick.
4908 */
4909 callout_schedule(&sc->sc_txfifo_ch, 1);
4910 }
4911 }
4912
4913 out:
4914 WM_TX_UNLOCK(sc);
4915 #ifndef WM_MPSAFE
4916 splx(s);
4917 #endif
4918 }
4919
4920 /*
4921 * wm_82547_txfifo_bugchk:
4922 *
4923 * Check for bug condition in the 82547 Tx FIFO. We need to
4924 * prevent enqueueing a packet that would wrap around the end
4925 * if the Tx FIFO ring buffer, otherwise the chip will croak.
4926 *
4927 * We do this by checking the amount of space before the end
4928 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
4929 * the Tx FIFO, wait for all remaining packets to drain, reset
4930 * the internal FIFO pointers to the beginning, and restart
4931 * transmission on the interface.
4932 */
4933 #define WM_FIFO_HDR 0x10
4934 #define WM_82547_PAD_LEN 0x3e0
4935 static int
4936 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4937 {
4938 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4939 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4940
4941 /* Just return if already stalled. */
4942 if (sc->sc_txfifo_stall)
4943 return 1;
4944
4945 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4946 /* Stall only occurs in half-duplex mode. */
4947 goto send_packet;
4948 }
4949
4950 if (len >= WM_82547_PAD_LEN + space) {
4951 sc->sc_txfifo_stall = 1;
4952 callout_schedule(&sc->sc_txfifo_ch, 1);
4953 return 1;
4954 }
4955
4956 send_packet:
4957 sc->sc_txfifo_head += len;
4958 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4959 sc->sc_txfifo_head -= sc->sc_txfifo_size;
4960
4961 return 0;
4962 }
4963
4964 static int
4965 wm_alloc_tx_descs(struct wm_softc *sc)
4966 {
4967 int error;
4968
4969 /*
4970 * Allocate the control data structures, and create and load the
4971 * DMA map for it.
4972 *
4973 * NOTE: All Tx descriptors must be in the same 4G segment of
4974 * memory. So must Rx descriptors. We simplify by allocating
4975 * both sets within the same 4G segment.
4976 */
4977 if (sc->sc_type < WM_T_82544) {
4978 WM_NTXDESC(sc) = WM_NTXDESC_82542;
4979 sc->sc_txdesc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(sc);
4980 } else {
4981 WM_NTXDESC(sc) = WM_NTXDESC_82544;
4982 sc->sc_txdesc_size = sizeof(txdescs_t);
4983 }
4984
4985 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_txdesc_size, PAGE_SIZE,
4986 (bus_size_t) 0x100000000ULL, &sc->sc_txdesc_seg, 1,
4987 &sc->sc_txdesc_rseg, 0)) != 0) {
4988 aprint_error_dev(sc->sc_dev,
4989 "unable to allocate TX control data, error = %d\n",
4990 error);
4991 goto fail_0;
4992 }
4993
4994 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_txdesc_seg,
4995 sc->sc_txdesc_rseg, sc->sc_txdesc_size,
4996 (void **)&sc->sc_txdescs_u, BUS_DMA_COHERENT)) != 0) {
4997 aprint_error_dev(sc->sc_dev,
4998 "unable to map TX control data, error = %d\n", error);
4999 goto fail_1;
5000 }
5001
5002 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_txdesc_size, 1,
5003 sc->sc_txdesc_size, 0, 0, &sc->sc_txdesc_dmamap)) != 0) {
5004 aprint_error_dev(sc->sc_dev,
5005 "unable to create TX control data DMA map, error = %d\n",
5006 error);
5007 goto fail_2;
5008 }
5009
5010 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_txdesc_dmamap,
5011 sc->sc_txdescs_u, sc->sc_txdesc_size, NULL, 0)) != 0) {
5012 aprint_error_dev(sc->sc_dev,
5013 "unable to load TX control data DMA map, error = %d\n",
5014 error);
5015 goto fail_3;
5016 }
5017
5018 return 0;
5019
5020 fail_3:
5021 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdesc_dmamap);
5022 fail_2:
5023 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_txdescs_u,
5024 sc->sc_txdesc_size);
5025 fail_1:
5026 bus_dmamem_free(sc->sc_dmat, &sc->sc_txdesc_seg, sc->sc_txdesc_rseg);
5027 fail_0:
5028 return error;
5029 }
5030
5031 static void
5032 wm_free_tx_descs(struct wm_softc *sc)
5033 {
5034
5035 bus_dmamap_unload(sc->sc_dmat, sc->sc_txdesc_dmamap);
5036 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdesc_dmamap);
5037 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_txdescs_u,
5038 sc->sc_txdesc_size);
5039 bus_dmamem_free(sc->sc_dmat, &sc->sc_txdesc_seg, sc->sc_txdesc_rseg);
5040 }
5041
5042 static int
5043 wm_alloc_rx_descs(struct wm_softc *sc)
5044 {
5045 int error;
5046
5047 /*
5048 * Allocate the control data structures, and create and load the
5049 * DMA map for it.
5050 *
5051 * NOTE: All Tx descriptors must be in the same 4G segment of
5052 * memory. So must Rx descriptors. We simplify by allocating
5053 * both sets within the same 4G segment.
5054 */
5055 sc->sc_rxdesc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5056 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rxdesc_size, PAGE_SIZE,
5057 (bus_size_t) 0x100000000ULL, &sc->sc_rxdesc_seg, 1,
5058 &sc->sc_rxdesc_rseg, 0)) != 0) {
5059 aprint_error_dev(sc->sc_dev,
5060 "unable to allocate RX control data, error = %d\n",
5061 error);
5062 goto fail_0;
5063 }
5064
5065 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_rxdesc_seg,
5066 sc->sc_rxdesc_rseg, sc->sc_rxdesc_size,
5067 (void **)&sc->sc_rxdescs, BUS_DMA_COHERENT)) != 0) {
5068 aprint_error_dev(sc->sc_dev,
5069 "unable to map RX control data, error = %d\n", error);
5070 goto fail_1;
5071 }
5072
5073 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_rxdesc_size, 1,
5074 sc->sc_rxdesc_size, 0, 0, &sc->sc_rxdesc_dmamap)) != 0) {
5075 aprint_error_dev(sc->sc_dev,
5076 "unable to create RX control data DMA map, error = %d\n",
5077 error);
5078 goto fail_2;
5079 }
5080
5081 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdesc_dmamap,
5082 sc->sc_rxdescs, sc->sc_rxdesc_size, NULL, 0)) != 0) {
5083 aprint_error_dev(sc->sc_dev,
5084 "unable to load RX control data DMA map, error = %d\n",
5085 error);
5086 goto fail_3;
5087 }
5088
5089 return 0;
5090
5091 fail_3:
5092 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdesc_dmamap);
5093 fail_2:
5094 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_rxdescs,
5095 sc->sc_rxdesc_size);
5096 fail_1:
5097 bus_dmamem_free(sc->sc_dmat, &sc->sc_rxdesc_seg, sc->sc_rxdesc_rseg);
5098 fail_0:
5099 return error;
5100 }
5101
5102 static void
5103 wm_free_rx_descs(struct wm_softc *sc)
5104 {
5105
5106 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxdesc_dmamap);
5107 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdesc_dmamap);
5108 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_rxdescs,
5109 sc->sc_rxdesc_size);
5110 bus_dmamem_free(sc->sc_dmat, &sc->sc_rxdesc_seg, sc->sc_rxdesc_rseg);
5111 }
5112
5113
5114 static int
5115 wm_alloc_tx_buffer(struct wm_softc *sc)
5116 {
5117 int i, error;
5118
5119 /* Create the transmit buffer DMA maps. */
5120 WM_TXQUEUELEN(sc) =
5121 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5122 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5123 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
5124 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5125 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5126 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
5127 aprint_error_dev(sc->sc_dev,
5128 "unable to create Tx DMA map %d, error = %d\n",
5129 i, error);
5130 goto fail;
5131 }
5132 }
5133
5134 return 0;
5135
5136 fail:
5137 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
5138 if (sc->sc_txsoft[i].txs_dmamap != NULL)
5139 bus_dmamap_destroy(sc->sc_dmat,
5140 sc->sc_txsoft[i].txs_dmamap);
5141 }
5142 return error;
5143 }
5144
5145 static void
5146 wm_free_tx_buffer(struct wm_softc *sc)
5147 {
5148 int i;
5149
5150 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
5151 if (sc->sc_txsoft[i].txs_dmamap != NULL)
5152 bus_dmamap_destroy(sc->sc_dmat,
5153 sc->sc_txsoft[i].txs_dmamap);
5154 }
5155 }
5156
5157 static int
5158 wm_alloc_rx_buffer(struct wm_softc *sc)
5159 {
5160 int i, error;
5161
5162 /* Create the receive buffer DMA maps. */
5163 for (i = 0; i < WM_NRXDESC; i++) {
5164 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5165 MCLBYTES, 0, 0,
5166 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
5167 aprint_error_dev(sc->sc_dev,
5168 "unable to create Rx DMA map %d error = %d\n",
5169 i, error);
5170 goto fail;
5171 }
5172 sc->sc_rxsoft[i].rxs_mbuf = NULL;
5173 }
5174
5175 return 0;
5176
5177 fail:
5178 for (i = 0; i < WM_NRXDESC; i++) {
5179 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
5180 bus_dmamap_destroy(sc->sc_dmat,
5181 sc->sc_rxsoft[i].rxs_dmamap);
5182 }
5183 return error;
5184 }
5185
5186 static void
5187 wm_free_rx_buffer(struct wm_softc *sc)
5188 {
5189 int i;
5190
5191 for (i = 0; i < WM_NRXDESC; i++) {
5192 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
5193 bus_dmamap_destroy(sc->sc_dmat,
5194 sc->sc_rxsoft[i].rxs_dmamap);
5195 }
5196 }
5197
5198 /*
5199 * wm_alloc_quques:
5200 * Allocate {tx,rx}descs and {tx,rx} buffers
5201 */
5202 static int
5203 wm_alloc_txrx_queues(struct wm_softc *sc)
5204 {
5205 int error;
5206
5207 /*
5208 * For transmission
5209 */
5210 error = wm_alloc_tx_descs(sc);
5211 if (error)
5212 goto fail_0;
5213
5214 error = wm_alloc_tx_buffer(sc);
5215 if (error)
5216 goto fail_1;
5217
5218 /*
5219 * For recieve
5220 */
5221 error = wm_alloc_rx_descs(sc);
5222 if (error)
5223 goto fail_2;
5224
5225 error = wm_alloc_rx_buffer(sc);
5226 if (error)
5227 goto fail_3;
5228
5229 return 0;
5230
5231 fail_3:
5232 wm_free_rx_descs(sc);
5233 fail_2:
5234 wm_free_tx_buffer(sc);
5235 fail_1:
5236 wm_free_tx_descs(sc);
5237 fail_0:
5238 return error;
5239 }
5240
5241 /*
5242 * wm_free_quques:
5243 * Free {tx,rx}descs and {tx,rx} buffers
5244 */
5245 static void
5246 wm_free_txrx_queues(struct wm_softc *sc)
5247 {
5248
5249 wm_free_rx_buffer(sc);
5250 wm_free_rx_descs(sc);
5251 wm_free_tx_buffer(sc);
5252 wm_free_tx_descs(sc);
5253 }
5254
5255 static void
5256 wm_init_tx_descs(struct wm_softc *sc)
5257 {
5258
5259 KASSERT(WM_TX_LOCKED(sc));
5260
5261 /* Initialize the transmit descriptor ring. */
5262 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
5263 wm_cdtxsync(sc, 0, WM_NTXDESC(sc),
5264 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5265 sc->sc_txfree = WM_NTXDESC(sc);
5266 sc->sc_txnext = 0;
5267
5268 if (sc->sc_type < WM_T_82543) {
5269 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
5270 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
5271 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
5272 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5273 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5274 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5275 } else {
5276 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
5277 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
5278 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
5279 CSR_WRITE(sc, WMREG_TDH, 0);
5280
5281 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5282 /*
5283 * Don't write TDT before TCTL.EN is set.
5284 * See the document.
5285 */
5286 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
5287 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5288 | TXDCTL_WTHRESH(0));
5289 else {
5290 /* ITR / 4 */
5291 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5292 if (sc->sc_type >= WM_T_82540) {
5293 /* should be same */
5294 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5295 }
5296
5297 CSR_WRITE(sc, WMREG_TDT, 0);
5298 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
5299 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5300 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
5301 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5302 }
5303 }
5304 }
5305
5306 static void
5307 wm_init_tx_buffer(struct wm_softc *sc)
5308 {
5309 int i;
5310
5311 KASSERT(WM_TX_LOCKED(sc));
5312
5313 /* Initialize the transmit job descriptors. */
5314 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
5315 sc->sc_txsoft[i].txs_mbuf = NULL;
5316 sc->sc_txsfree = WM_TXQUEUELEN(sc);
5317 sc->sc_txsnext = 0;
5318 sc->sc_txsdirty = 0;
5319 }
5320
5321 static void
5322 wm_init_tx_queue(struct wm_softc *sc)
5323 {
5324
5325 KASSERT(WM_TX_LOCKED(sc));
5326
5327 /*
5328 * Set up some register offsets that are different between
5329 * the i82542 and the i82543 and later chips.
5330 */
5331 if (sc->sc_type < WM_T_82543) {
5332 sc->sc_tdt_reg = WMREG_OLD_TDT;
5333 } else {
5334 sc->sc_tdt_reg = WMREG_TDT;
5335 }
5336
5337 wm_init_tx_descs(sc);
5338 wm_init_tx_buffer(sc);
5339 }
5340
5341 static void
5342 wm_init_rx_descs(struct wm_softc *sc)
5343 {
5344
5345 KASSERT(WM_RX_LOCKED(sc));
5346
5347 /*
5348 * Initialize the receive descriptor and receive job
5349 * descriptor rings.
5350 */
5351 if (sc->sc_type < WM_T_82543) {
5352 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
5353 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
5354 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5355 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5356 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5357 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5358 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5359
5360 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5361 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5362 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5363 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5364 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5365 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5366 } else {
5367 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
5368 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
5369 CSR_WRITE(sc, WMREG_RDLEN,
5370 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5371
5372 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5373 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5374 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5375 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
5376 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5377 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
5378 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5379 | RXDCTL_WTHRESH(1));
5380 } else {
5381 CSR_WRITE(sc, WMREG_RDH, 0);
5382 CSR_WRITE(sc, WMREG_RDT, 0);
5383 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
5384 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
5385 }
5386 }
5387 }
5388
5389 static int
5390 wm_init_rx_buffer(struct wm_softc *sc)
5391 {
5392 struct wm_rxsoft *rxs;
5393 int error, i;
5394
5395 KASSERT(WM_RX_LOCKED(sc));
5396
5397 for (i = 0; i < WM_NRXDESC; i++) {
5398 rxs = &sc->sc_rxsoft[i];
5399 if (rxs->rxs_mbuf == NULL) {
5400 if ((error = wm_add_rxbuf(sc, i)) != 0) {
5401 log(LOG_ERR, "%s: unable to allocate or map "
5402 "rx buffer %d, error = %d\n",
5403 device_xname(sc->sc_dev), i, error);
5404 /*
5405 * XXX Should attempt to run with fewer receive
5406 * XXX buffers instead of just failing.
5407 */
5408 wm_rxdrain(sc);
5409 return ENOMEM;
5410 }
5411 } else {
5412 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5413 wm_init_rxdesc(sc, i);
5414 /*
5415 * For 82575 and newer device, the RX descriptors
5416 * must be initialized after the setting of RCTL.EN in
5417 * wm_set_filter()
5418 */
5419 }
5420 }
5421 sc->sc_rxptr = 0;
5422 sc->sc_rxdiscard = 0;
5423 WM_RXCHAIN_RESET(sc);
5424
5425 return 0;
5426 }
5427
5428 static int
5429 wm_init_rx_queue(struct wm_softc *sc)
5430 {
5431
5432 KASSERT(WM_RX_LOCKED(sc));
5433
5434 /*
5435 * Set up some register offsets that are different between
5436 * the i82542 and the i82543 and later chips.
5437 */
5438 if (sc->sc_type < WM_T_82543) {
5439 sc->sc_rdt_reg = WMREG_OLD_RDT0;
5440 } else {
5441 sc->sc_rdt_reg = WMREG_RDT;
5442 }
5443
5444 wm_init_rx_descs(sc);
5445 return wm_init_rx_buffer(sc);
5446 }
5447
5448 /*
5449 * wm_init_quques:
5450 * Initialize {tx,rx}descs and {tx,rx} buffers
5451 */
5452 static int
5453 wm_init_txrx_queues(struct wm_softc *sc)
5454 {
5455 int error;
5456
5457 KASSERT(WM_BOTH_LOCKED(sc));
5458
5459 wm_init_tx_queue(sc);
5460 error = wm_init_rx_queue(sc);
5461
5462 return error;
5463 }
5464
5465 /*
5466 * wm_start: [ifnet interface function]
5467 *
5468 * Start packet transmission on the interface.
5469 */
5470 static void
5471 wm_start(struct ifnet *ifp)
5472 {
5473 struct wm_softc *sc = ifp->if_softc;
5474
5475 WM_TX_LOCK(sc);
5476 if (!sc->sc_stopping)
5477 wm_start_locked(ifp);
5478 WM_TX_UNLOCK(sc);
5479 }
5480
5481 static void
5482 wm_start_locked(struct ifnet *ifp)
5483 {
5484 struct wm_softc *sc = ifp->if_softc;
5485 struct mbuf *m0;
5486 struct m_tag *mtag;
5487 struct wm_txsoft *txs;
5488 bus_dmamap_t dmamap;
5489 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
5490 bus_addr_t curaddr;
5491 bus_size_t seglen, curlen;
5492 uint32_t cksumcmd;
5493 uint8_t cksumfields;
5494
5495 KASSERT(WM_TX_LOCKED(sc));
5496
5497 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5498 return;
5499
5500 /* Remember the previous number of free descriptors. */
5501 ofree = sc->sc_txfree;
5502
5503 /*
5504 * Loop through the send queue, setting up transmit descriptors
5505 * until we drain the queue, or use up all available transmit
5506 * descriptors.
5507 */
5508 for (;;) {
5509 m0 = NULL;
5510
5511 /* Get a work queue entry. */
5512 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5513 wm_txeof(sc);
5514 if (sc->sc_txsfree == 0) {
5515 DPRINTF(WM_DEBUG_TX,
5516 ("%s: TX: no free job descriptors\n",
5517 device_xname(sc->sc_dev)));
5518 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5519 break;
5520 }
5521 }
5522
5523 /* Grab a packet off the queue. */
5524 IFQ_DEQUEUE(&ifp->if_snd, m0);
5525 if (m0 == NULL)
5526 break;
5527
5528 DPRINTF(WM_DEBUG_TX,
5529 ("%s: TX: have packet to transmit: %p\n",
5530 device_xname(sc->sc_dev), m0));
5531
5532 txs = &sc->sc_txsoft[sc->sc_txsnext];
5533 dmamap = txs->txs_dmamap;
5534
5535 use_tso = (m0->m_pkthdr.csum_flags &
5536 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
5537
5538 /*
5539 * So says the Linux driver:
5540 * The controller does a simple calculation to make sure
5541 * there is enough room in the FIFO before initiating the
5542 * DMA for each buffer. The calc is:
5543 * 4 = ceil(buffer len / MSS)
5544 * To make sure we don't overrun the FIFO, adjust the max
5545 * buffer len if the MSS drops.
5546 */
5547 dmamap->dm_maxsegsz =
5548 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
5549 ? m0->m_pkthdr.segsz << 2
5550 : WTX_MAX_LEN;
5551
5552 /*
5553 * Load the DMA map. If this fails, the packet either
5554 * didn't fit in the allotted number of segments, or we
5555 * were short on resources. For the too-many-segments
5556 * case, we simply report an error and drop the packet,
5557 * since we can't sanely copy a jumbo packet to a single
5558 * buffer.
5559 */
5560 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5561 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5562 if (error) {
5563 if (error == EFBIG) {
5564 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5565 log(LOG_ERR, "%s: Tx packet consumes too many "
5566 "DMA segments, dropping...\n",
5567 device_xname(sc->sc_dev));
5568 wm_dump_mbuf_chain(sc, m0);
5569 m_freem(m0);
5570 continue;
5571 }
5572 /* Short on resources, just stop for now. */
5573 DPRINTF(WM_DEBUG_TX,
5574 ("%s: TX: dmamap load failed: %d\n",
5575 device_xname(sc->sc_dev), error));
5576 break;
5577 }
5578
5579 segs_needed = dmamap->dm_nsegs;
5580 if (use_tso) {
5581 /* For sentinel descriptor; see below. */
5582 segs_needed++;
5583 }
5584
5585 /*
5586 * Ensure we have enough descriptors free to describe
5587 * the packet. Note, we always reserve one descriptor
5588 * at the end of the ring due to the semantics of the
5589 * TDT register, plus one more in the event we need
5590 * to load offload context.
5591 */
5592 if (segs_needed > sc->sc_txfree - 2) {
5593 /*
5594 * Not enough free descriptors to transmit this
5595 * packet. We haven't committed anything yet,
5596 * so just unload the DMA map, put the packet
5597 * pack on the queue, and punt. Notify the upper
5598 * layer that there are no more slots left.
5599 */
5600 DPRINTF(WM_DEBUG_TX,
5601 ("%s: TX: need %d (%d) descriptors, have %d\n",
5602 device_xname(sc->sc_dev), dmamap->dm_nsegs,
5603 segs_needed, sc->sc_txfree - 1));
5604 ifp->if_flags |= IFF_OACTIVE;
5605 bus_dmamap_unload(sc->sc_dmat, dmamap);
5606 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5607 break;
5608 }
5609
5610 /*
5611 * Check for 82547 Tx FIFO bug. We need to do this
5612 * once we know we can transmit the packet, since we
5613 * do some internal FIFO space accounting here.
5614 */
5615 if (sc->sc_type == WM_T_82547 &&
5616 wm_82547_txfifo_bugchk(sc, m0)) {
5617 DPRINTF(WM_DEBUG_TX,
5618 ("%s: TX: 82547 Tx FIFO bug detected\n",
5619 device_xname(sc->sc_dev)));
5620 ifp->if_flags |= IFF_OACTIVE;
5621 bus_dmamap_unload(sc->sc_dmat, dmamap);
5622 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
5623 break;
5624 }
5625
5626 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5627
5628 DPRINTF(WM_DEBUG_TX,
5629 ("%s: TX: packet has %d (%d) DMA segments\n",
5630 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5631
5632 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5633
5634 /*
5635 * Store a pointer to the packet so that we can free it
5636 * later.
5637 *
5638 * Initially, we consider the number of descriptors the
5639 * packet uses the number of DMA segments. This may be
5640 * incremented by 1 if we do checksum offload (a descriptor
5641 * is used to set the checksum context).
5642 */
5643 txs->txs_mbuf = m0;
5644 txs->txs_firstdesc = sc->sc_txnext;
5645 txs->txs_ndesc = segs_needed;
5646
5647 /* Set up offload parameters for this packet. */
5648 if (m0->m_pkthdr.csum_flags &
5649 (M_CSUM_TSOv4|M_CSUM_TSOv6|
5650 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5651 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5652 if (wm_tx_offload(sc, txs, &cksumcmd,
5653 &cksumfields) != 0) {
5654 /* Error message already displayed. */
5655 bus_dmamap_unload(sc->sc_dmat, dmamap);
5656 continue;
5657 }
5658 } else {
5659 cksumcmd = 0;
5660 cksumfields = 0;
5661 }
5662
5663 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
5664
5665 /* Sync the DMA map. */
5666 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5667 BUS_DMASYNC_PREWRITE);
5668
5669 /* Initialize the transmit descriptor. */
5670 for (nexttx = sc->sc_txnext, seg = 0;
5671 seg < dmamap->dm_nsegs; seg++) {
5672 for (seglen = dmamap->dm_segs[seg].ds_len,
5673 curaddr = dmamap->dm_segs[seg].ds_addr;
5674 seglen != 0;
5675 curaddr += curlen, seglen -= curlen,
5676 nexttx = WM_NEXTTX(sc, nexttx)) {
5677 curlen = seglen;
5678
5679 /*
5680 * So says the Linux driver:
5681 * Work around for premature descriptor
5682 * write-backs in TSO mode. Append a
5683 * 4-byte sentinel descriptor.
5684 */
5685 if (use_tso &&
5686 seg == dmamap->dm_nsegs - 1 &&
5687 curlen > 8)
5688 curlen -= 4;
5689
5690 wm_set_dma_addr(
5691 &sc->sc_txdescs[nexttx].wtx_addr,
5692 curaddr);
5693 sc->sc_txdescs[nexttx].wtx_cmdlen =
5694 htole32(cksumcmd | curlen);
5695 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
5696 0;
5697 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
5698 cksumfields;
5699 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5700 lasttx = nexttx;
5701
5702 DPRINTF(WM_DEBUG_TX,
5703 ("%s: TX: desc %d: low %#" PRIx64 ", "
5704 "len %#04zx\n",
5705 device_xname(sc->sc_dev), nexttx,
5706 (uint64_t)curaddr, curlen));
5707 }
5708 }
5709
5710 KASSERT(lasttx != -1);
5711
5712 /*
5713 * Set up the command byte on the last descriptor of
5714 * the packet. If we're in the interrupt delay window,
5715 * delay the interrupt.
5716 */
5717 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5718 htole32(WTX_CMD_EOP | WTX_CMD_RS);
5719
5720 /*
5721 * If VLANs are enabled and the packet has a VLAN tag, set
5722 * up the descriptor to encapsulate the packet for us.
5723 *
5724 * This is only valid on the last descriptor of the packet.
5725 */
5726 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5727 sc->sc_txdescs[lasttx].wtx_cmdlen |=
5728 htole32(WTX_CMD_VLE);
5729 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
5730 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5731 }
5732
5733 txs->txs_lastdesc = lasttx;
5734
5735 DPRINTF(WM_DEBUG_TX,
5736 ("%s: TX: desc %d: cmdlen 0x%08x\n",
5737 device_xname(sc->sc_dev),
5738 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5739
5740 /* Sync the descriptors we're using. */
5741 wm_cdtxsync(sc, sc->sc_txnext, txs->txs_ndesc,
5742 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5743
5744 /* Give the packet to the chip. */
5745 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5746
5747 DPRINTF(WM_DEBUG_TX,
5748 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5749
5750 DPRINTF(WM_DEBUG_TX,
5751 ("%s: TX: finished transmitting packet, job %d\n",
5752 device_xname(sc->sc_dev), sc->sc_txsnext));
5753
5754 /* Advance the tx pointer. */
5755 sc->sc_txfree -= txs->txs_ndesc;
5756 sc->sc_txnext = nexttx;
5757
5758 sc->sc_txsfree--;
5759 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5760
5761 /* Pass the packet to any BPF listeners. */
5762 bpf_mtap(ifp, m0);
5763 }
5764
5765 if (m0 != NULL) {
5766 ifp->if_flags |= IFF_OACTIVE;
5767 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5768 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5769 m_freem(m0);
5770 }
5771
5772 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5773 /* No more slots; notify upper layer. */
5774 ifp->if_flags |= IFF_OACTIVE;
5775 }
5776
5777 if (sc->sc_txfree != ofree) {
5778 /* Set a watchdog timer in case the chip flakes out. */
5779 ifp->if_timer = 5;
5780 }
5781 }
5782
5783 /*
5784 * wm_nq_tx_offload:
5785 *
5786 * Set up TCP/IP checksumming parameters for the
5787 * specified packet, for NEWQUEUE devices
5788 */
5789 static int
5790 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5791 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5792 {
5793 struct mbuf *m0 = txs->txs_mbuf;
5794 struct m_tag *mtag;
5795 uint32_t vl_len, mssidx, cmdc;
5796 struct ether_header *eh;
5797 int offset, iphl;
5798
5799 /*
5800 * XXX It would be nice if the mbuf pkthdr had offset
5801 * fields for the protocol headers.
5802 */
5803 *cmdlenp = 0;
5804 *fieldsp = 0;
5805
5806 eh = mtod(m0, struct ether_header *);
5807 switch (htons(eh->ether_type)) {
5808 case ETHERTYPE_IP:
5809 case ETHERTYPE_IPV6:
5810 offset = ETHER_HDR_LEN;
5811 break;
5812
5813 case ETHERTYPE_VLAN:
5814 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5815 break;
5816
5817 default:
5818 /* Don't support this protocol or encapsulation. */
5819 *do_csum = false;
5820 return 0;
5821 }
5822 *do_csum = true;
5823 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5824 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5825
5826 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5827 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5828
5829 if ((m0->m_pkthdr.csum_flags &
5830 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5831 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5832 } else {
5833 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5834 }
5835 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5836 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5837
5838 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5839 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5840 << NQTXC_VLLEN_VLAN_SHIFT);
5841 *cmdlenp |= NQTX_CMD_VLE;
5842 }
5843
5844 mssidx = 0;
5845
5846 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5847 int hlen = offset + iphl;
5848 int tcp_hlen;
5849 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5850
5851 if (__predict_false(m0->m_len <
5852 (hlen + sizeof(struct tcphdr)))) {
5853 /*
5854 * TCP/IP headers are not in the first mbuf; we need
5855 * to do this the slow and painful way. Let's just
5856 * hope this doesn't happen very often.
5857 */
5858 struct tcphdr th;
5859
5860 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5861
5862 m_copydata(m0, hlen, sizeof(th), &th);
5863 if (v4) {
5864 struct ip ip;
5865
5866 m_copydata(m0, offset, sizeof(ip), &ip);
5867 ip.ip_len = 0;
5868 m_copyback(m0,
5869 offset + offsetof(struct ip, ip_len),
5870 sizeof(ip.ip_len), &ip.ip_len);
5871 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5872 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5873 } else {
5874 struct ip6_hdr ip6;
5875
5876 m_copydata(m0, offset, sizeof(ip6), &ip6);
5877 ip6.ip6_plen = 0;
5878 m_copyback(m0,
5879 offset + offsetof(struct ip6_hdr, ip6_plen),
5880 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5881 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5882 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5883 }
5884 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5885 sizeof(th.th_sum), &th.th_sum);
5886
5887 tcp_hlen = th.th_off << 2;
5888 } else {
5889 /*
5890 * TCP/IP headers are in the first mbuf; we can do
5891 * this the easy way.
5892 */
5893 struct tcphdr *th;
5894
5895 if (v4) {
5896 struct ip *ip =
5897 (void *)(mtod(m0, char *) + offset);
5898 th = (void *)(mtod(m0, char *) + hlen);
5899
5900 ip->ip_len = 0;
5901 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5902 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5903 } else {
5904 struct ip6_hdr *ip6 =
5905 (void *)(mtod(m0, char *) + offset);
5906 th = (void *)(mtod(m0, char *) + hlen);
5907
5908 ip6->ip6_plen = 0;
5909 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5910 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5911 }
5912 tcp_hlen = th->th_off << 2;
5913 }
5914 hlen += tcp_hlen;
5915 *cmdlenp |= NQTX_CMD_TSE;
5916
5917 if (v4) {
5918 WM_EVCNT_INCR(&sc->sc_ev_txtso);
5919 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5920 } else {
5921 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5922 *fieldsp |= NQTXD_FIELDS_TUXSM;
5923 }
5924 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5925 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5926 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5927 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5928 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5929 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5930 } else {
5931 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5932 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5933 }
5934
5935 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5936 *fieldsp |= NQTXD_FIELDS_IXSM;
5937 cmdc |= NQTXC_CMD_IP4;
5938 }
5939
5940 if (m0->m_pkthdr.csum_flags &
5941 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5942 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5943 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5944 cmdc |= NQTXC_CMD_TCP;
5945 } else {
5946 cmdc |= NQTXC_CMD_UDP;
5947 }
5948 cmdc |= NQTXC_CMD_IP4;
5949 *fieldsp |= NQTXD_FIELDS_TUXSM;
5950 }
5951 if (m0->m_pkthdr.csum_flags &
5952 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5953 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5954 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5955 cmdc |= NQTXC_CMD_TCP;
5956 } else {
5957 cmdc |= NQTXC_CMD_UDP;
5958 }
5959 cmdc |= NQTXC_CMD_IP6;
5960 *fieldsp |= NQTXD_FIELDS_TUXSM;
5961 }
5962
5963 /* Fill in the context descriptor. */
5964 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5965 htole32(vl_len);
5966 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5967 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5968 htole32(cmdc);
5969 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5970 htole32(mssidx);
5971 wm_cdtxsync(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5972 DPRINTF(WM_DEBUG_TX,
5973 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5974 sc->sc_txnext, 0, vl_len));
5975 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5976 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5977 txs->txs_ndesc++;
5978 return 0;
5979 }
5980
5981 /*
5982 * wm_nq_start: [ifnet interface function]
5983 *
5984 * Start packet transmission on the interface for NEWQUEUE devices
5985 */
5986 static void
5987 wm_nq_start(struct ifnet *ifp)
5988 {
5989 struct wm_softc *sc = ifp->if_softc;
5990
5991 WM_TX_LOCK(sc);
5992 if (!sc->sc_stopping)
5993 wm_nq_start_locked(ifp);
5994 WM_TX_UNLOCK(sc);
5995 }
5996
5997 static void
5998 wm_nq_start_locked(struct ifnet *ifp)
5999 {
6000 struct wm_softc *sc = ifp->if_softc;
6001 struct mbuf *m0;
6002 struct m_tag *mtag;
6003 struct wm_txsoft *txs;
6004 bus_dmamap_t dmamap;
6005 int error, nexttx, lasttx = -1, seg, segs_needed;
6006 bool do_csum, sent;
6007
6008 KASSERT(WM_TX_LOCKED(sc));
6009
6010 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
6011 return;
6012
6013 sent = false;
6014
6015 /*
6016 * Loop through the send queue, setting up transmit descriptors
6017 * until we drain the queue, or use up all available transmit
6018 * descriptors.
6019 */
6020 for (;;) {
6021 m0 = NULL;
6022
6023 /* Get a work queue entry. */
6024 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
6025 wm_txeof(sc);
6026 if (sc->sc_txsfree == 0) {
6027 DPRINTF(WM_DEBUG_TX,
6028 ("%s: TX: no free job descriptors\n",
6029 device_xname(sc->sc_dev)));
6030 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6031 break;
6032 }
6033 }
6034
6035 /* Grab a packet off the queue. */
6036 IFQ_DEQUEUE(&ifp->if_snd, m0);
6037 if (m0 == NULL)
6038 break;
6039
6040 DPRINTF(WM_DEBUG_TX,
6041 ("%s: TX: have packet to transmit: %p\n",
6042 device_xname(sc->sc_dev), m0));
6043
6044 txs = &sc->sc_txsoft[sc->sc_txsnext];
6045 dmamap = txs->txs_dmamap;
6046
6047 /*
6048 * Load the DMA map. If this fails, the packet either
6049 * didn't fit in the allotted number of segments, or we
6050 * were short on resources. For the too-many-segments
6051 * case, we simply report an error and drop the packet,
6052 * since we can't sanely copy a jumbo packet to a single
6053 * buffer.
6054 */
6055 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6056 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6057 if (error) {
6058 if (error == EFBIG) {
6059 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6060 log(LOG_ERR, "%s: Tx packet consumes too many "
6061 "DMA segments, dropping...\n",
6062 device_xname(sc->sc_dev));
6063 wm_dump_mbuf_chain(sc, m0);
6064 m_freem(m0);
6065 continue;
6066 }
6067 /* Short on resources, just stop for now. */
6068 DPRINTF(WM_DEBUG_TX,
6069 ("%s: TX: dmamap load failed: %d\n",
6070 device_xname(sc->sc_dev), error));
6071 break;
6072 }
6073
6074 segs_needed = dmamap->dm_nsegs;
6075
6076 /*
6077 * Ensure we have enough descriptors free to describe
6078 * the packet. Note, we always reserve one descriptor
6079 * at the end of the ring due to the semantics of the
6080 * TDT register, plus one more in the event we need
6081 * to load offload context.
6082 */
6083 if (segs_needed > sc->sc_txfree - 2) {
6084 /*
6085 * Not enough free descriptors to transmit this
6086 * packet. We haven't committed anything yet,
6087 * so just unload the DMA map, put the packet
6088 * pack on the queue, and punt. Notify the upper
6089 * layer that there are no more slots left.
6090 */
6091 DPRINTF(WM_DEBUG_TX,
6092 ("%s: TX: need %d (%d) descriptors, have %d\n",
6093 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6094 segs_needed, sc->sc_txfree - 1));
6095 ifp->if_flags |= IFF_OACTIVE;
6096 bus_dmamap_unload(sc->sc_dmat, dmamap);
6097 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6098 break;
6099 }
6100
6101 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6102
6103 DPRINTF(WM_DEBUG_TX,
6104 ("%s: TX: packet has %d (%d) DMA segments\n",
6105 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6106
6107 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6108
6109 /*
6110 * Store a pointer to the packet so that we can free it
6111 * later.
6112 *
6113 * Initially, we consider the number of descriptors the
6114 * packet uses the number of DMA segments. This may be
6115 * incremented by 1 if we do checksum offload (a descriptor
6116 * is used to set the checksum context).
6117 */
6118 txs->txs_mbuf = m0;
6119 txs->txs_firstdesc = sc->sc_txnext;
6120 txs->txs_ndesc = segs_needed;
6121
6122 /* Set up offload parameters for this packet. */
6123 uint32_t cmdlen, fields, dcmdlen;
6124 if (m0->m_pkthdr.csum_flags &
6125 (M_CSUM_TSOv4|M_CSUM_TSOv6|
6126 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6127 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6128 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
6129 &do_csum) != 0) {
6130 /* Error message already displayed. */
6131 bus_dmamap_unload(sc->sc_dmat, dmamap);
6132 continue;
6133 }
6134 } else {
6135 do_csum = false;
6136 cmdlen = 0;
6137 fields = 0;
6138 }
6139
6140 /* Sync the DMA map. */
6141 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6142 BUS_DMASYNC_PREWRITE);
6143
6144 /* Initialize the first transmit descriptor. */
6145 nexttx = sc->sc_txnext;
6146 if (!do_csum) {
6147 /* setup a legacy descriptor */
6148 wm_set_dma_addr(
6149 &sc->sc_txdescs[nexttx].wtx_addr,
6150 dmamap->dm_segs[0].ds_addr);
6151 sc->sc_txdescs[nexttx].wtx_cmdlen =
6152 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6153 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
6154 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
6155 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6156 NULL) {
6157 sc->sc_txdescs[nexttx].wtx_cmdlen |=
6158 htole32(WTX_CMD_VLE);
6159 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
6160 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6161 } else {
6162 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =0;
6163 }
6164 dcmdlen = 0;
6165 } else {
6166 /* setup an advanced data descriptor */
6167 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
6168 htole64(dmamap->dm_segs[0].ds_addr);
6169 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6170 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
6171 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6172 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
6173 htole32(fields);
6174 DPRINTF(WM_DEBUG_TX,
6175 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6176 device_xname(sc->sc_dev), nexttx,
6177 (uint64_t)dmamap->dm_segs[0].ds_addr));
6178 DPRINTF(WM_DEBUG_TX,
6179 ("\t 0x%08x%08x\n", fields,
6180 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6181 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6182 }
6183
6184 lasttx = nexttx;
6185 nexttx = WM_NEXTTX(sc, nexttx);
6186 /*
6187 * fill in the next descriptors. legacy or adcanced format
6188 * is the same here
6189 */
6190 for (seg = 1; seg < dmamap->dm_nsegs;
6191 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
6192 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
6193 htole64(dmamap->dm_segs[seg].ds_addr);
6194 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
6195 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6196 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6197 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
6198 lasttx = nexttx;
6199
6200 DPRINTF(WM_DEBUG_TX,
6201 ("%s: TX: desc %d: %#" PRIx64 ", "
6202 "len %#04zx\n",
6203 device_xname(sc->sc_dev), nexttx,
6204 (uint64_t)dmamap->dm_segs[seg].ds_addr,
6205 dmamap->dm_segs[seg].ds_len));
6206 }
6207
6208 KASSERT(lasttx != -1);
6209
6210 /*
6211 * Set up the command byte on the last descriptor of
6212 * the packet. If we're in the interrupt delay window,
6213 * delay the interrupt.
6214 */
6215 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6216 (NQTX_CMD_EOP | NQTX_CMD_RS));
6217 sc->sc_txdescs[lasttx].wtx_cmdlen |=
6218 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6219
6220 txs->txs_lastdesc = lasttx;
6221
6222 DPRINTF(WM_DEBUG_TX,
6223 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6224 device_xname(sc->sc_dev),
6225 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
6226
6227 /* Sync the descriptors we're using. */
6228 wm_cdtxsync(sc, sc->sc_txnext, txs->txs_ndesc,
6229 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6230
6231 /* Give the packet to the chip. */
6232 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
6233 sent = true;
6234
6235 DPRINTF(WM_DEBUG_TX,
6236 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6237
6238 DPRINTF(WM_DEBUG_TX,
6239 ("%s: TX: finished transmitting packet, job %d\n",
6240 device_xname(sc->sc_dev), sc->sc_txsnext));
6241
6242 /* Advance the tx pointer. */
6243 sc->sc_txfree -= txs->txs_ndesc;
6244 sc->sc_txnext = nexttx;
6245
6246 sc->sc_txsfree--;
6247 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
6248
6249 /* Pass the packet to any BPF listeners. */
6250 bpf_mtap(ifp, m0);
6251 }
6252
6253 if (m0 != NULL) {
6254 ifp->if_flags |= IFF_OACTIVE;
6255 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6256 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6257 m_freem(m0);
6258 }
6259
6260 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
6261 /* No more slots; notify upper layer. */
6262 ifp->if_flags |= IFF_OACTIVE;
6263 }
6264
6265 if (sent) {
6266 /* Set a watchdog timer in case the chip flakes out. */
6267 ifp->if_timer = 5;
6268 }
6269 }
6270
6271 /* Interrupt */
6272
6273 /*
6274 * wm_txeof:
6275 *
6276 * Helper; handle transmit interrupts.
6277 */
6278 static int
6279 wm_txeof(struct wm_softc *sc)
6280 {
6281 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6282 struct wm_txsoft *txs;
6283 bool processed = false;
6284 int count = 0;
6285 int i;
6286 uint8_t status;
6287
6288 if (sc->sc_stopping)
6289 return 0;
6290
6291 ifp->if_flags &= ~IFF_OACTIVE;
6292
6293 /*
6294 * Go through the Tx list and free mbufs for those
6295 * frames which have been transmitted.
6296 */
6297 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
6298 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
6299 txs = &sc->sc_txsoft[i];
6300
6301 DPRINTF(WM_DEBUG_TX,
6302 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6303
6304 wm_cdtxsync(sc, txs->txs_firstdesc, txs->txs_ndesc,
6305 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6306
6307 status =
6308 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6309 if ((status & WTX_ST_DD) == 0) {
6310 wm_cdtxsync(sc, txs->txs_lastdesc, 1,
6311 BUS_DMASYNC_PREREAD);
6312 break;
6313 }
6314
6315 processed = true;
6316 count++;
6317 DPRINTF(WM_DEBUG_TX,
6318 ("%s: TX: job %d done: descs %d..%d\n",
6319 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6320 txs->txs_lastdesc));
6321
6322 /*
6323 * XXX We should probably be using the statistics
6324 * XXX registers, but I don't know if they exist
6325 * XXX on chips before the i82544.
6326 */
6327
6328 #ifdef WM_EVENT_COUNTERS
6329 if (status & WTX_ST_TU)
6330 WM_EVCNT_INCR(&sc->sc_ev_tu);
6331 #endif /* WM_EVENT_COUNTERS */
6332
6333 if (status & (WTX_ST_EC|WTX_ST_LC)) {
6334 ifp->if_oerrors++;
6335 if (status & WTX_ST_LC)
6336 log(LOG_WARNING, "%s: late collision\n",
6337 device_xname(sc->sc_dev));
6338 else if (status & WTX_ST_EC) {
6339 ifp->if_collisions += 16;
6340 log(LOG_WARNING, "%s: excessive collisions\n",
6341 device_xname(sc->sc_dev));
6342 }
6343 } else
6344 ifp->if_opackets++;
6345
6346 sc->sc_txfree += txs->txs_ndesc;
6347 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6348 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6349 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6350 m_freem(txs->txs_mbuf);
6351 txs->txs_mbuf = NULL;
6352 }
6353
6354 /* Update the dirty transmit buffer pointer. */
6355 sc->sc_txsdirty = i;
6356 DPRINTF(WM_DEBUG_TX,
6357 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6358
6359 if (count != 0)
6360 rnd_add_uint32(&sc->rnd_source, count);
6361
6362 /*
6363 * If there are no more pending transmissions, cancel the watchdog
6364 * timer.
6365 */
6366 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
6367 ifp->if_timer = 0;
6368
6369 return processed;
6370 }
6371
6372 /*
6373 * wm_rxeof:
6374 *
6375 * Helper; handle receive interrupts.
6376 */
6377 static void
6378 wm_rxeof(struct wm_softc *sc)
6379 {
6380 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6381 struct wm_rxsoft *rxs;
6382 struct mbuf *m;
6383 int i, len;
6384 int count = 0;
6385 uint8_t status, errors;
6386 uint16_t vlantag;
6387
6388 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
6389 rxs = &sc->sc_rxsoft[i];
6390
6391 DPRINTF(WM_DEBUG_RX,
6392 ("%s: RX: checking descriptor %d\n",
6393 device_xname(sc->sc_dev), i));
6394
6395 wm_cdrxsync(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6396
6397 status = sc->sc_rxdescs[i].wrx_status;
6398 errors = sc->sc_rxdescs[i].wrx_errors;
6399 len = le16toh(sc->sc_rxdescs[i].wrx_len);
6400 vlantag = sc->sc_rxdescs[i].wrx_special;
6401
6402 if ((status & WRX_ST_DD) == 0) {
6403 /* We have processed all of the receive descriptors. */
6404 wm_cdrxsync(sc, i, BUS_DMASYNC_PREREAD);
6405 break;
6406 }
6407
6408 count++;
6409 if (__predict_false(sc->sc_rxdiscard)) {
6410 DPRINTF(WM_DEBUG_RX,
6411 ("%s: RX: discarding contents of descriptor %d\n",
6412 device_xname(sc->sc_dev), i));
6413 wm_init_rxdesc(sc, i);
6414 if (status & WRX_ST_EOP) {
6415 /* Reset our state. */
6416 DPRINTF(WM_DEBUG_RX,
6417 ("%s: RX: resetting rxdiscard -> 0\n",
6418 device_xname(sc->sc_dev)));
6419 sc->sc_rxdiscard = 0;
6420 }
6421 continue;
6422 }
6423
6424 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6425 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6426
6427 m = rxs->rxs_mbuf;
6428
6429 /*
6430 * Add a new receive buffer to the ring, unless of
6431 * course the length is zero. Treat the latter as a
6432 * failed mapping.
6433 */
6434 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
6435 /*
6436 * Failed, throw away what we've done so
6437 * far, and discard the rest of the packet.
6438 */
6439 ifp->if_ierrors++;
6440 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6441 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6442 wm_init_rxdesc(sc, i);
6443 if ((status & WRX_ST_EOP) == 0)
6444 sc->sc_rxdiscard = 1;
6445 if (sc->sc_rxhead != NULL)
6446 m_freem(sc->sc_rxhead);
6447 WM_RXCHAIN_RESET(sc);
6448 DPRINTF(WM_DEBUG_RX,
6449 ("%s: RX: Rx buffer allocation failed, "
6450 "dropping packet%s\n", device_xname(sc->sc_dev),
6451 sc->sc_rxdiscard ? " (discard)" : ""));
6452 continue;
6453 }
6454
6455 m->m_len = len;
6456 sc->sc_rxlen += len;
6457 DPRINTF(WM_DEBUG_RX,
6458 ("%s: RX: buffer at %p len %d\n",
6459 device_xname(sc->sc_dev), m->m_data, len));
6460
6461 /* If this is not the end of the packet, keep looking. */
6462 if ((status & WRX_ST_EOP) == 0) {
6463 WM_RXCHAIN_LINK(sc, m);
6464 DPRINTF(WM_DEBUG_RX,
6465 ("%s: RX: not yet EOP, rxlen -> %d\n",
6466 device_xname(sc->sc_dev), sc->sc_rxlen));
6467 continue;
6468 }
6469
6470 /*
6471 * Okay, we have the entire packet now. The chip is
6472 * configured to include the FCS except I350 and I21[01]
6473 * (not all chips can be configured to strip it),
6474 * so we need to trim it.
6475 * May need to adjust length of previous mbuf in the
6476 * chain if the current mbuf is too short.
6477 * For an eratta, the RCTL_SECRC bit in RCTL register
6478 * is always set in I350, so we don't trim it.
6479 */
6480 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6481 && (sc->sc_type != WM_T_I210)
6482 && (sc->sc_type != WM_T_I211)) {
6483 if (m->m_len < ETHER_CRC_LEN) {
6484 sc->sc_rxtail->m_len
6485 -= (ETHER_CRC_LEN - m->m_len);
6486 m->m_len = 0;
6487 } else
6488 m->m_len -= ETHER_CRC_LEN;
6489 len = sc->sc_rxlen - ETHER_CRC_LEN;
6490 } else
6491 len = sc->sc_rxlen;
6492
6493 WM_RXCHAIN_LINK(sc, m);
6494
6495 *sc->sc_rxtailp = NULL;
6496 m = sc->sc_rxhead;
6497
6498 WM_RXCHAIN_RESET(sc);
6499
6500 DPRINTF(WM_DEBUG_RX,
6501 ("%s: RX: have entire packet, len -> %d\n",
6502 device_xname(sc->sc_dev), len));
6503
6504 /* If an error occurred, update stats and drop the packet. */
6505 if (errors &
6506 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
6507 if (errors & WRX_ER_SE)
6508 log(LOG_WARNING, "%s: symbol error\n",
6509 device_xname(sc->sc_dev));
6510 else if (errors & WRX_ER_SEQ)
6511 log(LOG_WARNING, "%s: receive sequence error\n",
6512 device_xname(sc->sc_dev));
6513 else if (errors & WRX_ER_CE)
6514 log(LOG_WARNING, "%s: CRC error\n",
6515 device_xname(sc->sc_dev));
6516 m_freem(m);
6517 continue;
6518 }
6519
6520 /* No errors. Receive the packet. */
6521 m->m_pkthdr.rcvif = ifp;
6522 m->m_pkthdr.len = len;
6523
6524 /*
6525 * If VLANs are enabled, VLAN packets have been unwrapped
6526 * for us. Associate the tag with the packet.
6527 */
6528 /* XXXX should check for i350 and i354 */
6529 if ((status & WRX_ST_VP) != 0) {
6530 VLAN_INPUT_TAG(ifp, m,
6531 le16toh(vlantag),
6532 continue);
6533 }
6534
6535 /* Set up checksum info for this packet. */
6536 if ((status & WRX_ST_IXSM) == 0) {
6537 if (status & WRX_ST_IPCS) {
6538 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
6539 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6540 if (errors & WRX_ER_IPE)
6541 m->m_pkthdr.csum_flags |=
6542 M_CSUM_IPv4_BAD;
6543 }
6544 if (status & WRX_ST_TCPCS) {
6545 /*
6546 * Note: we don't know if this was TCP or UDP,
6547 * so we just set both bits, and expect the
6548 * upper layers to deal.
6549 */
6550 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
6551 m->m_pkthdr.csum_flags |=
6552 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6553 M_CSUM_TCPv6 | M_CSUM_UDPv6;
6554 if (errors & WRX_ER_TCPE)
6555 m->m_pkthdr.csum_flags |=
6556 M_CSUM_TCP_UDP_BAD;
6557 }
6558 }
6559
6560 ifp->if_ipackets++;
6561
6562 WM_RX_UNLOCK(sc);
6563
6564 /* Pass this up to any BPF listeners. */
6565 bpf_mtap(ifp, m);
6566
6567 /* Pass it on. */
6568 (*ifp->if_input)(ifp, m);
6569
6570 WM_RX_LOCK(sc);
6571
6572 if (sc->sc_stopping)
6573 break;
6574 }
6575
6576 /* Update the receive pointer. */
6577 sc->sc_rxptr = i;
6578 if (count != 0)
6579 rnd_add_uint32(&sc->rnd_source, count);
6580
6581 DPRINTF(WM_DEBUG_RX,
6582 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
6583 }
6584
6585 /*
6586 * wm_linkintr_gmii:
6587 *
6588 * Helper; handle link interrupts for GMII.
6589 */
6590 static void
6591 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
6592 {
6593
6594 KASSERT(WM_TX_LOCKED(sc));
6595
6596 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6597 __func__));
6598
6599 if (icr & ICR_LSC) {
6600 DPRINTF(WM_DEBUG_LINK,
6601 ("%s: LINK: LSC -> mii_pollstat\n",
6602 device_xname(sc->sc_dev)));
6603 mii_pollstat(&sc->sc_mii);
6604 if (sc->sc_type == WM_T_82543) {
6605 int miistatus, active;
6606
6607 /*
6608 * With 82543, we need to force speed and
6609 * duplex on the MAC equal to what the PHY
6610 * speed and duplex configuration is.
6611 */
6612 miistatus = sc->sc_mii.mii_media_status;
6613
6614 if (miistatus & IFM_ACTIVE) {
6615 active = sc->sc_mii.mii_media_active;
6616 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6617 switch (IFM_SUBTYPE(active)) {
6618 case IFM_10_T:
6619 sc->sc_ctrl |= CTRL_SPEED_10;
6620 break;
6621 case IFM_100_TX:
6622 sc->sc_ctrl |= CTRL_SPEED_100;
6623 break;
6624 case IFM_1000_T:
6625 sc->sc_ctrl |= CTRL_SPEED_1000;
6626 break;
6627 default:
6628 /*
6629 * fiber?
6630 * Shoud not enter here.
6631 */
6632 printf("unknown media (%x)\n",
6633 active);
6634 break;
6635 }
6636 if (active & IFM_FDX)
6637 sc->sc_ctrl |= CTRL_FD;
6638 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6639 }
6640 } else if ((sc->sc_type == WM_T_ICH8)
6641 && (sc->sc_phytype == WMPHY_IGP_3)) {
6642 wm_kmrn_lock_loss_workaround_ich8lan(sc);
6643 } else if (sc->sc_type == WM_T_PCH) {
6644 wm_k1_gig_workaround_hv(sc,
6645 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
6646 }
6647
6648 if ((sc->sc_phytype == WMPHY_82578)
6649 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
6650 == IFM_1000_T)) {
6651
6652 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
6653 delay(200*1000); /* XXX too big */
6654
6655 /* Link stall fix for link up */
6656 wm_gmii_hv_writereg(sc->sc_dev, 1,
6657 HV_MUX_DATA_CTRL,
6658 HV_MUX_DATA_CTRL_GEN_TO_MAC
6659 | HV_MUX_DATA_CTRL_FORCE_SPEED);
6660 wm_gmii_hv_writereg(sc->sc_dev, 1,
6661 HV_MUX_DATA_CTRL,
6662 HV_MUX_DATA_CTRL_GEN_TO_MAC);
6663 }
6664 }
6665 } else if (icr & ICR_RXSEQ) {
6666 DPRINTF(WM_DEBUG_LINK,
6667 ("%s: LINK Receive sequence error\n",
6668 device_xname(sc->sc_dev)));
6669 }
6670 }
6671
6672 /*
6673 * wm_linkintr_tbi:
6674 *
6675 * Helper; handle link interrupts for TBI mode.
6676 */
6677 static void
6678 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
6679 {
6680 uint32_t status;
6681
6682 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6683 __func__));
6684
6685 status = CSR_READ(sc, WMREG_STATUS);
6686 if (icr & ICR_LSC) {
6687 if (status & STATUS_LU) {
6688 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
6689 device_xname(sc->sc_dev),
6690 (status & STATUS_FD) ? "FDX" : "HDX"));
6691 /*
6692 * NOTE: CTRL will update TFCE and RFCE automatically,
6693 * so we should update sc->sc_ctrl
6694 */
6695
6696 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6697 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6698 sc->sc_fcrtl &= ~FCRTL_XONE;
6699 if (status & STATUS_FD)
6700 sc->sc_tctl |=
6701 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6702 else
6703 sc->sc_tctl |=
6704 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6705 if (sc->sc_ctrl & CTRL_TFCE)
6706 sc->sc_fcrtl |= FCRTL_XONE;
6707 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6708 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6709 WMREG_OLD_FCRTL : WMREG_FCRTL,
6710 sc->sc_fcrtl);
6711 sc->sc_tbi_linkup = 1;
6712 } else {
6713 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
6714 device_xname(sc->sc_dev)));
6715 sc->sc_tbi_linkup = 0;
6716 }
6717 /* Update LED */
6718 wm_tbi_serdes_set_linkled(sc);
6719 } else if (icr & ICR_RXSEQ) {
6720 DPRINTF(WM_DEBUG_LINK,
6721 ("%s: LINK: Receive sequence error\n",
6722 device_xname(sc->sc_dev)));
6723 }
6724 }
6725
6726 /*
6727 * wm_linkintr_serdes:
6728 *
6729 * Helper; handle link interrupts for TBI mode.
6730 */
6731 static void
6732 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
6733 {
6734 struct mii_data *mii = &sc->sc_mii;
6735 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6736 uint32_t pcs_adv, pcs_lpab, reg;
6737
6738 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
6739 __func__));
6740
6741 if (icr & ICR_LSC) {
6742 /* Check PCS */
6743 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6744 if ((reg & PCS_LSTS_LINKOK) != 0) {
6745 mii->mii_media_status |= IFM_ACTIVE;
6746 sc->sc_tbi_linkup = 1;
6747 } else {
6748 mii->mii_media_status |= IFM_NONE;
6749 sc->sc_tbi_linkup = 0;
6750 wm_tbi_serdes_set_linkled(sc);
6751 return;
6752 }
6753 mii->mii_media_active |= IFM_1000_SX;
6754 if ((reg & PCS_LSTS_FDX) != 0)
6755 mii->mii_media_active |= IFM_FDX;
6756 else
6757 mii->mii_media_active |= IFM_HDX;
6758 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6759 /* Check flow */
6760 reg = CSR_READ(sc, WMREG_PCS_LSTS);
6761 if ((reg & PCS_LSTS_AN_COMP) == 0) {
6762 DPRINTF(WM_DEBUG_LINK,
6763 ("XXX LINKOK but not ACOMP\n"));
6764 return;
6765 }
6766 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
6767 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
6768 DPRINTF(WM_DEBUG_LINK,
6769 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
6770 if ((pcs_adv & TXCW_SYM_PAUSE)
6771 && (pcs_lpab & TXCW_SYM_PAUSE)) {
6772 mii->mii_media_active |= IFM_FLOW
6773 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
6774 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
6775 && (pcs_adv & TXCW_ASYM_PAUSE)
6776 && (pcs_lpab & TXCW_SYM_PAUSE)
6777 && (pcs_lpab & TXCW_ASYM_PAUSE))
6778 mii->mii_media_active |= IFM_FLOW
6779 | IFM_ETH_TXPAUSE;
6780 else if ((pcs_adv & TXCW_SYM_PAUSE)
6781 && (pcs_adv & TXCW_ASYM_PAUSE)
6782 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
6783 && (pcs_lpab & TXCW_ASYM_PAUSE))
6784 mii->mii_media_active |= IFM_FLOW
6785 | IFM_ETH_RXPAUSE;
6786 }
6787 /* Update LED */
6788 wm_tbi_serdes_set_linkled(sc);
6789 } else {
6790 DPRINTF(WM_DEBUG_LINK,
6791 ("%s: LINK: Receive sequence error\n",
6792 device_xname(sc->sc_dev)));
6793 }
6794 }
6795
6796 /*
6797 * wm_linkintr:
6798 *
6799 * Helper; handle link interrupts.
6800 */
6801 static void
6802 wm_linkintr(struct wm_softc *sc, uint32_t icr)
6803 {
6804
6805 if (sc->sc_flags & WM_F_HAS_MII)
6806 wm_linkintr_gmii(sc, icr);
6807 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
6808 && (sc->sc_type >= WM_T_82575))
6809 wm_linkintr_serdes(sc, icr);
6810 else
6811 wm_linkintr_tbi(sc, icr);
6812 }
6813
6814 /*
6815 * wm_intr_legacy:
6816 *
6817 * Interrupt service routine for INTx and MSI.
6818 */
6819 static int
6820 wm_intr_legacy(void *arg)
6821 {
6822 struct wm_softc *sc = arg;
6823 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6824 uint32_t icr, rndval = 0;
6825 int handled = 0;
6826
6827 DPRINTF(WM_DEBUG_TX,
6828 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
6829 while (1 /* CONSTCOND */) {
6830 icr = CSR_READ(sc, WMREG_ICR);
6831 if ((icr & sc->sc_icr) == 0)
6832 break;
6833 if (rndval == 0)
6834 rndval = icr;
6835
6836 WM_RX_LOCK(sc);
6837
6838 if (sc->sc_stopping) {
6839 WM_RX_UNLOCK(sc);
6840 break;
6841 }
6842
6843 handled = 1;
6844
6845 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6846 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6847 DPRINTF(WM_DEBUG_RX,
6848 ("%s: RX: got Rx intr 0x%08x\n",
6849 device_xname(sc->sc_dev),
6850 icr & (ICR_RXDMT0|ICR_RXT0)));
6851 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6852 }
6853 #endif
6854 wm_rxeof(sc);
6855
6856 WM_RX_UNLOCK(sc);
6857 WM_TX_LOCK(sc);
6858
6859 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6860 if (icr & ICR_TXDW) {
6861 DPRINTF(WM_DEBUG_TX,
6862 ("%s: TX: got TXDW interrupt\n",
6863 device_xname(sc->sc_dev)));
6864 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6865 }
6866 #endif
6867 wm_txeof(sc);
6868
6869 if (icr & (ICR_LSC|ICR_RXSEQ)) {
6870 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6871 wm_linkintr(sc, icr);
6872 }
6873
6874 WM_TX_UNLOCK(sc);
6875
6876 if (icr & ICR_RXO) {
6877 #if defined(WM_DEBUG)
6878 log(LOG_WARNING, "%s: Receive overrun\n",
6879 device_xname(sc->sc_dev));
6880 #endif /* defined(WM_DEBUG) */
6881 }
6882 }
6883
6884 rnd_add_uint32(&sc->rnd_source, rndval);
6885
6886 if (handled) {
6887 /* Try to get more packets going. */
6888 ifp->if_start(ifp);
6889 }
6890
6891 return handled;
6892 }
6893
6894 #ifdef WM_MSI_MSIX
6895 /*
6896 * wm_txintr_msix:
6897 *
6898 * Interrupt service routine for TX complete interrupt for MSI-X.
6899 */
6900 static int
6901 wm_txintr_msix(void *arg)
6902 {
6903 struct wm_softc *sc = arg;
6904 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6905 int handled = 0;
6906
6907 DPRINTF(WM_DEBUG_TX,
6908 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
6909
6910 if (sc->sc_type == WM_T_82574)
6911 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ0); /* 82574 only */
6912 else if (sc->sc_type == WM_T_82575)
6913 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE0);
6914 else
6915 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_TXINTR_IDX);
6916
6917 WM_TX_LOCK(sc);
6918
6919 if (sc->sc_stopping)
6920 goto out;
6921
6922 WM_EVCNT_INCR(&sc->sc_ev_txdw);
6923 handled = wm_txeof(sc);
6924
6925 out:
6926 WM_TX_UNLOCK(sc);
6927
6928 if (sc->sc_type == WM_T_82574)
6929 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ0); /* 82574 only */
6930 else if (sc->sc_type == WM_T_82575)
6931 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE0);
6932 else
6933 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_TXINTR_IDX);
6934
6935 if (handled) {
6936 /* Try to get more packets going. */
6937 ifp->if_start(ifp);
6938 }
6939
6940 return handled;
6941 }
6942
6943 /*
6944 * wm_rxintr_msix:
6945 *
6946 * Interrupt service routine for RX interrupt for MSI-X.
6947 */
6948 static int
6949 wm_rxintr_msix(void *arg)
6950 {
6951 struct wm_softc *sc = arg;
6952
6953 DPRINTF(WM_DEBUG_TX,
6954 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
6955
6956 if (sc->sc_type == WM_T_82574)
6957 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ0); /* 82574 only */
6958 else if (sc->sc_type == WM_T_82575)
6959 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE0);
6960 else
6961 CSR_WRITE(sc, WMREG_EIMC, 1 << WM_MSIX_RXINTR_IDX);
6962
6963 WM_RX_LOCK(sc);
6964
6965 if (sc->sc_stopping)
6966 goto out;
6967
6968 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6969 wm_rxeof(sc);
6970
6971 out:
6972 WM_RX_UNLOCK(sc);
6973
6974 if (sc->sc_type == WM_T_82574)
6975 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ0);
6976 else if (sc->sc_type == WM_T_82575)
6977 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE0);
6978 else
6979 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_RXINTR_IDX);
6980
6981 return 1;
6982 }
6983
6984 /*
6985 * wm_linkintr_msix:
6986 *
6987 * Interrupt service routine for link status change for MSI-X.
6988 */
6989 static int
6990 wm_linkintr_msix(void *arg)
6991 {
6992 struct wm_softc *sc = arg;
6993 uint32_t reg;
6994
6995 DPRINTF(WM_DEBUG_TX,
6996 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
6997
6998 reg = CSR_READ(sc, WMREG_ICR);
6999 WM_TX_LOCK(sc);
7000 if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7001 goto out;
7002
7003 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7004 wm_linkintr(sc, ICR_LSC);
7005
7006 out:
7007 WM_TX_UNLOCK(sc);
7008
7009 if (sc->sc_type == WM_T_82574)
7010 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
7011 else if (sc->sc_type == WM_T_82575)
7012 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7013 else
7014 CSR_WRITE(sc, WMREG_EIMS, 1 << WM_MSIX_LINKINTR_IDX);
7015
7016 return 1;
7017 }
7018 #endif /* WM_MSI_MSIX */
7019
7020 /*
7021 * Media related.
7022 * GMII, SGMII, TBI (and SERDES)
7023 */
7024
7025 /* Common */
7026
7027 /*
7028 * wm_tbi_serdes_set_linkled:
7029 *
7030 * Update the link LED on TBI and SERDES devices.
7031 */
7032 static void
7033 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7034 {
7035
7036 if (sc->sc_tbi_linkup)
7037 sc->sc_ctrl |= CTRL_SWDPIN(0);
7038 else
7039 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7040
7041 /* 82540 or newer devices are active low */
7042 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7043
7044 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7045 }
7046
7047 /* GMII related */
7048
7049 /*
7050 * wm_gmii_reset:
7051 *
7052 * Reset the PHY.
7053 */
7054 static void
7055 wm_gmii_reset(struct wm_softc *sc)
7056 {
7057 uint32_t reg;
7058 int rv;
7059
7060 /* get phy semaphore */
7061 switch (sc->sc_type) {
7062 case WM_T_82571:
7063 case WM_T_82572:
7064 case WM_T_82573:
7065 case WM_T_82574:
7066 case WM_T_82583:
7067 /* XXX should get sw semaphore, too */
7068 rv = wm_get_swsm_semaphore(sc);
7069 break;
7070 case WM_T_82575:
7071 case WM_T_82576:
7072 case WM_T_82580:
7073 case WM_T_I350:
7074 case WM_T_I354:
7075 case WM_T_I210:
7076 case WM_T_I211:
7077 case WM_T_80003:
7078 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7079 break;
7080 case WM_T_ICH8:
7081 case WM_T_ICH9:
7082 case WM_T_ICH10:
7083 case WM_T_PCH:
7084 case WM_T_PCH2:
7085 case WM_T_PCH_LPT:
7086 rv = wm_get_swfwhw_semaphore(sc);
7087 break;
7088 default:
7089 /* nothing to do*/
7090 rv = 0;
7091 break;
7092 }
7093 if (rv != 0) {
7094 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7095 __func__);
7096 return;
7097 }
7098
7099 switch (sc->sc_type) {
7100 case WM_T_82542_2_0:
7101 case WM_T_82542_2_1:
7102 /* null */
7103 break;
7104 case WM_T_82543:
7105 /*
7106 * With 82543, we need to force speed and duplex on the MAC
7107 * equal to what the PHY speed and duplex configuration is.
7108 * In addition, we need to perform a hardware reset on the PHY
7109 * to take it out of reset.
7110 */
7111 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7112 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7113
7114 /* The PHY reset pin is active-low. */
7115 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7116 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7117 CTRL_EXT_SWDPIN(4));
7118 reg |= CTRL_EXT_SWDPIO(4);
7119
7120 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7121 CSR_WRITE_FLUSH(sc);
7122 delay(10*1000);
7123
7124 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7125 CSR_WRITE_FLUSH(sc);
7126 delay(150);
7127 #if 0
7128 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7129 #endif
7130 delay(20*1000); /* XXX extra delay to get PHY ID? */
7131 break;
7132 case WM_T_82544: /* reset 10000us */
7133 case WM_T_82540:
7134 case WM_T_82545:
7135 case WM_T_82545_3:
7136 case WM_T_82546:
7137 case WM_T_82546_3:
7138 case WM_T_82541:
7139 case WM_T_82541_2:
7140 case WM_T_82547:
7141 case WM_T_82547_2:
7142 case WM_T_82571: /* reset 100us */
7143 case WM_T_82572:
7144 case WM_T_82573:
7145 case WM_T_82574:
7146 case WM_T_82575:
7147 case WM_T_82576:
7148 case WM_T_82580:
7149 case WM_T_I350:
7150 case WM_T_I354:
7151 case WM_T_I210:
7152 case WM_T_I211:
7153 case WM_T_82583:
7154 case WM_T_80003:
7155 /* generic reset */
7156 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7157 CSR_WRITE_FLUSH(sc);
7158 delay(20000);
7159 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7160 CSR_WRITE_FLUSH(sc);
7161 delay(20000);
7162
7163 if ((sc->sc_type == WM_T_82541)
7164 || (sc->sc_type == WM_T_82541_2)
7165 || (sc->sc_type == WM_T_82547)
7166 || (sc->sc_type == WM_T_82547_2)) {
7167 /* workaround for igp are done in igp_reset() */
7168 /* XXX add code to set LED after phy reset */
7169 }
7170 break;
7171 case WM_T_ICH8:
7172 case WM_T_ICH9:
7173 case WM_T_ICH10:
7174 case WM_T_PCH:
7175 case WM_T_PCH2:
7176 case WM_T_PCH_LPT:
7177 /* generic reset */
7178 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7179 CSR_WRITE_FLUSH(sc);
7180 delay(100);
7181 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7182 CSR_WRITE_FLUSH(sc);
7183 delay(150);
7184 break;
7185 default:
7186 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7187 __func__);
7188 break;
7189 }
7190
7191 /* release PHY semaphore */
7192 switch (sc->sc_type) {
7193 case WM_T_82571:
7194 case WM_T_82572:
7195 case WM_T_82573:
7196 case WM_T_82574:
7197 case WM_T_82583:
7198 /* XXX should put sw semaphore, too */
7199 wm_put_swsm_semaphore(sc);
7200 break;
7201 case WM_T_82575:
7202 case WM_T_82576:
7203 case WM_T_82580:
7204 case WM_T_I350:
7205 case WM_T_I354:
7206 case WM_T_I210:
7207 case WM_T_I211:
7208 case WM_T_80003:
7209 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7210 break;
7211 case WM_T_ICH8:
7212 case WM_T_ICH9:
7213 case WM_T_ICH10:
7214 case WM_T_PCH:
7215 case WM_T_PCH2:
7216 case WM_T_PCH_LPT:
7217 wm_put_swfwhw_semaphore(sc);
7218 break;
7219 default:
7220 /* nothing to do*/
7221 rv = 0;
7222 break;
7223 }
7224
7225 /* get_cfg_done */
7226 wm_get_cfg_done(sc);
7227
7228 /* extra setup */
7229 switch (sc->sc_type) {
7230 case WM_T_82542_2_0:
7231 case WM_T_82542_2_1:
7232 case WM_T_82543:
7233 case WM_T_82544:
7234 case WM_T_82540:
7235 case WM_T_82545:
7236 case WM_T_82545_3:
7237 case WM_T_82546:
7238 case WM_T_82546_3:
7239 case WM_T_82541_2:
7240 case WM_T_82547_2:
7241 case WM_T_82571:
7242 case WM_T_82572:
7243 case WM_T_82573:
7244 case WM_T_82574:
7245 case WM_T_82575:
7246 case WM_T_82576:
7247 case WM_T_82580:
7248 case WM_T_I350:
7249 case WM_T_I354:
7250 case WM_T_I210:
7251 case WM_T_I211:
7252 case WM_T_82583:
7253 case WM_T_80003:
7254 /* null */
7255 break;
7256 case WM_T_82541:
7257 case WM_T_82547:
7258 /* XXX Configure actively LED after PHY reset */
7259 break;
7260 case WM_T_ICH8:
7261 case WM_T_ICH9:
7262 case WM_T_ICH10:
7263 case WM_T_PCH:
7264 case WM_T_PCH2:
7265 case WM_T_PCH_LPT:
7266 /* Allow time for h/w to get to a quiescent state afer reset */
7267 delay(10*1000);
7268
7269 if (sc->sc_type == WM_T_PCH)
7270 wm_hv_phy_workaround_ich8lan(sc);
7271
7272 if (sc->sc_type == WM_T_PCH2)
7273 wm_lv_phy_workaround_ich8lan(sc);
7274
7275 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7276 /*
7277 * dummy read to clear the phy wakeup bit after lcd
7278 * reset
7279 */
7280 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7281 }
7282
7283 /*
7284 * XXX Configure the LCD with th extended configuration region
7285 * in NVM
7286 */
7287
7288 /* Configure the LCD with the OEM bits in NVM */
7289 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
7290 || (sc->sc_type == WM_T_PCH_LPT)) {
7291 /*
7292 * Disable LPLU.
7293 * XXX It seems that 82567 has LPLU, too.
7294 */
7295 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
7296 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
7297 reg |= HV_OEM_BITS_ANEGNOW;
7298 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
7299 }
7300 break;
7301 default:
7302 panic("%s: unknown type\n", __func__);
7303 break;
7304 }
7305 }
7306
7307 /*
7308 * wm_get_phy_id_82575:
7309 *
7310 * Return PHY ID. Return -1 if it failed.
7311 */
7312 static int
7313 wm_get_phy_id_82575(struct wm_softc *sc)
7314 {
7315 uint32_t reg;
7316 int phyid = -1;
7317
7318 /* XXX */
7319 if ((sc->sc_flags & WM_F_SGMII) == 0)
7320 return -1;
7321
7322 if (wm_sgmii_uses_mdio(sc)) {
7323 switch (sc->sc_type) {
7324 case WM_T_82575:
7325 case WM_T_82576:
7326 reg = CSR_READ(sc, WMREG_MDIC);
7327 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7328 break;
7329 case WM_T_82580:
7330 case WM_T_I350:
7331 case WM_T_I354:
7332 case WM_T_I210:
7333 case WM_T_I211:
7334 reg = CSR_READ(sc, WMREG_MDICNFG);
7335 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7336 break;
7337 default:
7338 return -1;
7339 }
7340 }
7341
7342 return phyid;
7343 }
7344
7345
7346 /*
7347 * wm_gmii_mediainit:
7348 *
7349 * Initialize media for use on 1000BASE-T devices.
7350 */
7351 static void
7352 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7353 {
7354 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7355 struct mii_data *mii = &sc->sc_mii;
7356 uint32_t reg;
7357
7358 /* We have GMII. */
7359 sc->sc_flags |= WM_F_HAS_MII;
7360
7361 if (sc->sc_type == WM_T_80003)
7362 sc->sc_tipg = TIPG_1000T_80003_DFLT;
7363 else
7364 sc->sc_tipg = TIPG_1000T_DFLT;
7365
7366 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7367 if ((sc->sc_type == WM_T_82580)
7368 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7369 || (sc->sc_type == WM_T_I211)) {
7370 reg = CSR_READ(sc, WMREG_PHPM);
7371 reg &= ~PHPM_GO_LINK_D;
7372 CSR_WRITE(sc, WMREG_PHPM, reg);
7373 }
7374
7375 /*
7376 * Let the chip set speed/duplex on its own based on
7377 * signals from the PHY.
7378 * XXXbouyer - I'm not sure this is right for the 80003,
7379 * the em driver only sets CTRL_SLU here - but it seems to work.
7380 */
7381 sc->sc_ctrl |= CTRL_SLU;
7382 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7383
7384 /* Initialize our media structures and probe the GMII. */
7385 mii->mii_ifp = ifp;
7386
7387 /*
7388 * Determine the PHY access method.
7389 *
7390 * For SGMII, use SGMII specific method.
7391 *
7392 * For some devices, we can determine the PHY access method
7393 * from sc_type.
7394 *
7395 * For ICH and PCH variants, it's difficult to determine the PHY
7396 * access method by sc_type, so use the PCI product ID for some
7397 * devices.
7398 * For other ICH8 variants, try to use igp's method. If the PHY
7399 * can't detect, then use bm's method.
7400 */
7401 switch (prodid) {
7402 case PCI_PRODUCT_INTEL_PCH_M_LM:
7403 case PCI_PRODUCT_INTEL_PCH_M_LC:
7404 /* 82577 */
7405 sc->sc_phytype = WMPHY_82577;
7406 break;
7407 case PCI_PRODUCT_INTEL_PCH_D_DM:
7408 case PCI_PRODUCT_INTEL_PCH_D_DC:
7409 /* 82578 */
7410 sc->sc_phytype = WMPHY_82578;
7411 break;
7412 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7413 case PCI_PRODUCT_INTEL_PCH2_LV_V:
7414 /* 82579 */
7415 sc->sc_phytype = WMPHY_82579;
7416 break;
7417 case PCI_PRODUCT_INTEL_82801I_BM:
7418 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7419 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7420 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7421 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7422 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7423 /* 82567 */
7424 sc->sc_phytype = WMPHY_BM;
7425 mii->mii_readreg = wm_gmii_bm_readreg;
7426 mii->mii_writereg = wm_gmii_bm_writereg;
7427 break;
7428 default:
7429 if (((sc->sc_flags & WM_F_SGMII) != 0)
7430 && !wm_sgmii_uses_mdio(sc)){
7431 /* SGMII */
7432 mii->mii_readreg = wm_sgmii_readreg;
7433 mii->mii_writereg = wm_sgmii_writereg;
7434 } else if (sc->sc_type >= WM_T_80003) {
7435 /* 80003 */
7436 mii->mii_readreg = wm_gmii_i80003_readreg;
7437 mii->mii_writereg = wm_gmii_i80003_writereg;
7438 } else if (sc->sc_type >= WM_T_I210) {
7439 /* I210 and I211 */
7440 mii->mii_readreg = wm_gmii_gs40g_readreg;
7441 mii->mii_writereg = wm_gmii_gs40g_writereg;
7442 } else if (sc->sc_type >= WM_T_82580) {
7443 /* 82580, I350 and I354 */
7444 sc->sc_phytype = WMPHY_82580;
7445 mii->mii_readreg = wm_gmii_82580_readreg;
7446 mii->mii_writereg = wm_gmii_82580_writereg;
7447 } else if (sc->sc_type >= WM_T_82544) {
7448 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
7449 mii->mii_readreg = wm_gmii_i82544_readreg;
7450 mii->mii_writereg = wm_gmii_i82544_writereg;
7451 } else {
7452 mii->mii_readreg = wm_gmii_i82543_readreg;
7453 mii->mii_writereg = wm_gmii_i82543_writereg;
7454 }
7455 break;
7456 }
7457 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7458 /* All PCH* use _hv_ */
7459 mii->mii_readreg = wm_gmii_hv_readreg;
7460 mii->mii_writereg = wm_gmii_hv_writereg;
7461 }
7462 mii->mii_statchg = wm_gmii_statchg;
7463
7464 wm_gmii_reset(sc);
7465
7466 sc->sc_ethercom.ec_mii = &sc->sc_mii;
7467 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7468 wm_gmii_mediastatus);
7469
7470 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7471 || (sc->sc_type == WM_T_82580)
7472 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7473 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7474 if ((sc->sc_flags & WM_F_SGMII) == 0) {
7475 /* Attach only one port */
7476 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7477 MII_OFFSET_ANY, MIIF_DOPAUSE);
7478 } else {
7479 int i, id;
7480 uint32_t ctrl_ext;
7481
7482 id = wm_get_phy_id_82575(sc);
7483 if (id != -1) {
7484 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
7485 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
7486 }
7487 if ((id == -1)
7488 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
7489 /* Power on sgmii phy if it is disabled */
7490 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7491 CSR_WRITE(sc, WMREG_CTRL_EXT,
7492 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
7493 CSR_WRITE_FLUSH(sc);
7494 delay(300*1000); /* XXX too long */
7495
7496 /* from 1 to 8 */
7497 for (i = 1; i < 8; i++)
7498 mii_attach(sc->sc_dev, &sc->sc_mii,
7499 0xffffffff, i, MII_OFFSET_ANY,
7500 MIIF_DOPAUSE);
7501
7502 /* restore previous sfp cage power state */
7503 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7504 }
7505 }
7506 } else {
7507 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7508 MII_OFFSET_ANY, MIIF_DOPAUSE);
7509 }
7510
7511 /*
7512 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
7513 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
7514 */
7515 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
7516 (LIST_FIRST(&mii->mii_phys) == NULL)) {
7517 wm_set_mdio_slow_mode_hv(sc);
7518 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7519 MII_OFFSET_ANY, MIIF_DOPAUSE);
7520 }
7521
7522 /*
7523 * (For ICH8 variants)
7524 * If PHY detection failed, use BM's r/w function and retry.
7525 */
7526 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7527 /* if failed, retry with *_bm_* */
7528 mii->mii_readreg = wm_gmii_bm_readreg;
7529 mii->mii_writereg = wm_gmii_bm_writereg;
7530
7531 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
7532 MII_OFFSET_ANY, MIIF_DOPAUSE);
7533 }
7534
7535 if (LIST_FIRST(&mii->mii_phys) == NULL) {
7536 /* Any PHY wasn't find */
7537 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
7538 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
7539 sc->sc_phytype = WMPHY_NONE;
7540 } else {
7541 /*
7542 * PHY Found!
7543 * Check PHY type.
7544 */
7545 uint32_t model;
7546 struct mii_softc *child;
7547
7548 child = LIST_FIRST(&mii->mii_phys);
7549 if (device_is_a(child->mii_dev, "igphy")) {
7550 struct igphy_softc *isc = (struct igphy_softc *)child;
7551
7552 model = isc->sc_mii.mii_mpd_model;
7553 if (model == MII_MODEL_yyINTEL_I82566)
7554 sc->sc_phytype = WMPHY_IGP_3;
7555 }
7556
7557 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
7558 }
7559 }
7560
7561 /*
7562 * wm_gmii_mediachange: [ifmedia interface function]
7563 *
7564 * Set hardware to newly-selected media on a 1000BASE-T device.
7565 */
7566 static int
7567 wm_gmii_mediachange(struct ifnet *ifp)
7568 {
7569 struct wm_softc *sc = ifp->if_softc;
7570 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7571 int rc;
7572
7573 if ((ifp->if_flags & IFF_UP) == 0)
7574 return 0;
7575
7576 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7577 sc->sc_ctrl |= CTRL_SLU;
7578 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7579 || (sc->sc_type > WM_T_82543)) {
7580 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
7581 } else {
7582 sc->sc_ctrl &= ~CTRL_ASDE;
7583 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7584 if (ife->ifm_media & IFM_FDX)
7585 sc->sc_ctrl |= CTRL_FD;
7586 switch (IFM_SUBTYPE(ife->ifm_media)) {
7587 case IFM_10_T:
7588 sc->sc_ctrl |= CTRL_SPEED_10;
7589 break;
7590 case IFM_100_TX:
7591 sc->sc_ctrl |= CTRL_SPEED_100;
7592 break;
7593 case IFM_1000_T:
7594 sc->sc_ctrl |= CTRL_SPEED_1000;
7595 break;
7596 default:
7597 panic("wm_gmii_mediachange: bad media 0x%x",
7598 ife->ifm_media);
7599 }
7600 }
7601 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7602 if (sc->sc_type <= WM_T_82543)
7603 wm_gmii_reset(sc);
7604
7605 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
7606 return 0;
7607 return rc;
7608 }
7609
7610 /*
7611 * wm_gmii_mediastatus: [ifmedia interface function]
7612 *
7613 * Get the current interface media status on a 1000BASE-T device.
7614 */
7615 static void
7616 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7617 {
7618 struct wm_softc *sc = ifp->if_softc;
7619
7620 ether_mediastatus(ifp, ifmr);
7621 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
7622 | sc->sc_flowflags;
7623 }
7624
7625 #define MDI_IO CTRL_SWDPIN(2)
7626 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
7627 #define MDI_CLK CTRL_SWDPIN(3)
7628
7629 static void
7630 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
7631 {
7632 uint32_t i, v;
7633
7634 v = CSR_READ(sc, WMREG_CTRL);
7635 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7636 v |= MDI_DIR | CTRL_SWDPIO(3);
7637
7638 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
7639 if (data & i)
7640 v |= MDI_IO;
7641 else
7642 v &= ~MDI_IO;
7643 CSR_WRITE(sc, WMREG_CTRL, v);
7644 CSR_WRITE_FLUSH(sc);
7645 delay(10);
7646 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7647 CSR_WRITE_FLUSH(sc);
7648 delay(10);
7649 CSR_WRITE(sc, WMREG_CTRL, v);
7650 CSR_WRITE_FLUSH(sc);
7651 delay(10);
7652 }
7653 }
7654
7655 static uint32_t
7656 wm_i82543_mii_recvbits(struct wm_softc *sc)
7657 {
7658 uint32_t v, i, data = 0;
7659
7660 v = CSR_READ(sc, WMREG_CTRL);
7661 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
7662 v |= CTRL_SWDPIO(3);
7663
7664 CSR_WRITE(sc, WMREG_CTRL, v);
7665 CSR_WRITE_FLUSH(sc);
7666 delay(10);
7667 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7668 CSR_WRITE_FLUSH(sc);
7669 delay(10);
7670 CSR_WRITE(sc, WMREG_CTRL, v);
7671 CSR_WRITE_FLUSH(sc);
7672 delay(10);
7673
7674 for (i = 0; i < 16; i++) {
7675 data <<= 1;
7676 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7677 CSR_WRITE_FLUSH(sc);
7678 delay(10);
7679 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7680 data |= 1;
7681 CSR_WRITE(sc, WMREG_CTRL, v);
7682 CSR_WRITE_FLUSH(sc);
7683 delay(10);
7684 }
7685
7686 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7687 CSR_WRITE_FLUSH(sc);
7688 delay(10);
7689 CSR_WRITE(sc, WMREG_CTRL, v);
7690 CSR_WRITE_FLUSH(sc);
7691 delay(10);
7692
7693 return data;
7694 }
7695
7696 #undef MDI_IO
7697 #undef MDI_DIR
7698 #undef MDI_CLK
7699
7700 /*
7701 * wm_gmii_i82543_readreg: [mii interface function]
7702 *
7703 * Read a PHY register on the GMII (i82543 version).
7704 */
7705 static int
7706 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7707 {
7708 struct wm_softc *sc = device_private(self);
7709 int rv;
7710
7711 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7712 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
7713 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7714 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
7715
7716 DPRINTF(WM_DEBUG_GMII,
7717 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7718 device_xname(sc->sc_dev), phy, reg, rv));
7719
7720 return rv;
7721 }
7722
7723 /*
7724 * wm_gmii_i82543_writereg: [mii interface function]
7725 *
7726 * Write a PHY register on the GMII (i82543 version).
7727 */
7728 static void
7729 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7730 {
7731 struct wm_softc *sc = device_private(self);
7732
7733 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
7734 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7735 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7736 (MII_COMMAND_START << 30), 32);
7737 }
7738
7739 /*
7740 * wm_gmii_i82544_readreg: [mii interface function]
7741 *
7742 * Read a PHY register on the GMII.
7743 */
7744 static int
7745 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7746 {
7747 struct wm_softc *sc = device_private(self);
7748 uint32_t mdic = 0;
7749 int i, rv;
7750
7751 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7752 MDIC_REGADD(reg));
7753
7754 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7755 mdic = CSR_READ(sc, WMREG_MDIC);
7756 if (mdic & MDIC_READY)
7757 break;
7758 delay(50);
7759 }
7760
7761 if ((mdic & MDIC_READY) == 0) {
7762 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7763 device_xname(sc->sc_dev), phy, reg);
7764 rv = 0;
7765 } else if (mdic & MDIC_E) {
7766 #if 0 /* This is normal if no PHY is present. */
7767 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7768 device_xname(sc->sc_dev), phy, reg);
7769 #endif
7770 rv = 0;
7771 } else {
7772 rv = MDIC_DATA(mdic);
7773 if (rv == 0xffff)
7774 rv = 0;
7775 }
7776
7777 return rv;
7778 }
7779
7780 /*
7781 * wm_gmii_i82544_writereg: [mii interface function]
7782 *
7783 * Write a PHY register on the GMII.
7784 */
7785 static void
7786 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7787 {
7788 struct wm_softc *sc = device_private(self);
7789 uint32_t mdic = 0;
7790 int i;
7791
7792 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7793 MDIC_REGADD(reg) | MDIC_DATA(val));
7794
7795 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7796 mdic = CSR_READ(sc, WMREG_MDIC);
7797 if (mdic & MDIC_READY)
7798 break;
7799 delay(50);
7800 }
7801
7802 if ((mdic & MDIC_READY) == 0)
7803 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7804 device_xname(sc->sc_dev), phy, reg);
7805 else if (mdic & MDIC_E)
7806 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7807 device_xname(sc->sc_dev), phy, reg);
7808 }
7809
7810 /*
7811 * wm_gmii_i80003_readreg: [mii interface function]
7812 *
7813 * Read a PHY register on the kumeran
7814 * This could be handled by the PHY layer if we didn't have to lock the
7815 * ressource ...
7816 */
7817 static int
7818 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7819 {
7820 struct wm_softc *sc = device_private(self);
7821 int sem;
7822 int rv;
7823
7824 if (phy != 1) /* only one PHY on kumeran bus */
7825 return 0;
7826
7827 sem = swfwphysem[sc->sc_funcid];
7828 if (wm_get_swfw_semaphore(sc, sem)) {
7829 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7830 __func__);
7831 return 0;
7832 }
7833
7834 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7835 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7836 reg >> GG82563_PAGE_SHIFT);
7837 } else {
7838 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7839 reg >> GG82563_PAGE_SHIFT);
7840 }
7841 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7842 delay(200);
7843 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7844 delay(200);
7845
7846 wm_put_swfw_semaphore(sc, sem);
7847 return rv;
7848 }
7849
7850 /*
7851 * wm_gmii_i80003_writereg: [mii interface function]
7852 *
7853 * Write a PHY register on the kumeran.
7854 * This could be handled by the PHY layer if we didn't have to lock the
7855 * ressource ...
7856 */
7857 static void
7858 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7859 {
7860 struct wm_softc *sc = device_private(self);
7861 int sem;
7862
7863 if (phy != 1) /* only one PHY on kumeran bus */
7864 return;
7865
7866 sem = swfwphysem[sc->sc_funcid];
7867 if (wm_get_swfw_semaphore(sc, sem)) {
7868 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7869 __func__);
7870 return;
7871 }
7872
7873 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7874 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7875 reg >> GG82563_PAGE_SHIFT);
7876 } else {
7877 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7878 reg >> GG82563_PAGE_SHIFT);
7879 }
7880 /* Wait more 200us for a bug of the ready bit in the MDIC register */
7881 delay(200);
7882 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7883 delay(200);
7884
7885 wm_put_swfw_semaphore(sc, sem);
7886 }
7887
7888 /*
7889 * wm_gmii_bm_readreg: [mii interface function]
7890 *
7891 * Read a PHY register on the kumeran
7892 * This could be handled by the PHY layer if we didn't have to lock the
7893 * ressource ...
7894 */
7895 static int
7896 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7897 {
7898 struct wm_softc *sc = device_private(self);
7899 int sem;
7900 int rv;
7901
7902 sem = swfwphysem[sc->sc_funcid];
7903 if (wm_get_swfw_semaphore(sc, sem)) {
7904 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7905 __func__);
7906 return 0;
7907 }
7908
7909 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7910 if (phy == 1)
7911 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7912 reg);
7913 else
7914 wm_gmii_i82544_writereg(self, phy,
7915 GG82563_PHY_PAGE_SELECT,
7916 reg >> GG82563_PAGE_SHIFT);
7917 }
7918
7919 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7920 wm_put_swfw_semaphore(sc, sem);
7921 return rv;
7922 }
7923
7924 /*
7925 * wm_gmii_bm_writereg: [mii interface function]
7926 *
7927 * Write a PHY register on the kumeran.
7928 * This could be handled by the PHY layer if we didn't have to lock the
7929 * ressource ...
7930 */
7931 static void
7932 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7933 {
7934 struct wm_softc *sc = device_private(self);
7935 int sem;
7936
7937 sem = swfwphysem[sc->sc_funcid];
7938 if (wm_get_swfw_semaphore(sc, sem)) {
7939 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7940 __func__);
7941 return;
7942 }
7943
7944 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7945 if (phy == 1)
7946 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7947 reg);
7948 else
7949 wm_gmii_i82544_writereg(self, phy,
7950 GG82563_PHY_PAGE_SELECT,
7951 reg >> GG82563_PAGE_SHIFT);
7952 }
7953
7954 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7955 wm_put_swfw_semaphore(sc, sem);
7956 }
7957
7958 static void
7959 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7960 {
7961 struct wm_softc *sc = device_private(self);
7962 uint16_t regnum = BM_PHY_REG_NUM(offset);
7963 uint16_t wuce;
7964
7965 /* XXX Gig must be disabled for MDIO accesses to page 800 */
7966 if (sc->sc_type == WM_T_PCH) {
7967 /* XXX e1000 driver do nothing... why? */
7968 }
7969
7970 /* Set page 769 */
7971 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7972 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7973
7974 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7975
7976 wuce &= ~BM_WUC_HOST_WU_BIT;
7977 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7978 wuce | BM_WUC_ENABLE_BIT);
7979
7980 /* Select page 800 */
7981 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7982 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7983
7984 /* Write page 800 */
7985 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7986
7987 if (rd)
7988 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7989 else
7990 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7991
7992 /* Set page 769 */
7993 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7994 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7995
7996 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7997 }
7998
7999 /*
8000 * wm_gmii_hv_readreg: [mii interface function]
8001 *
8002 * Read a PHY register on the kumeran
8003 * This could be handled by the PHY layer if we didn't have to lock the
8004 * ressource ...
8005 */
8006 static int
8007 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8008 {
8009 struct wm_softc *sc = device_private(self);
8010 uint16_t page = BM_PHY_REG_PAGE(reg);
8011 uint16_t regnum = BM_PHY_REG_NUM(reg);
8012 uint16_t val;
8013 int rv;
8014
8015 if (wm_get_swfwhw_semaphore(sc)) {
8016 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8017 __func__);
8018 return 0;
8019 }
8020
8021 /* XXX Workaround failure in MDIO access while cable is disconnected */
8022 if (sc->sc_phytype == WMPHY_82577) {
8023 /* XXX must write */
8024 }
8025
8026 /* Page 800 works differently than the rest so it has its own func */
8027 if (page == BM_WUC_PAGE) {
8028 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8029 return val;
8030 }
8031
8032 /*
8033 * Lower than page 768 works differently than the rest so it has its
8034 * own func
8035 */
8036 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8037 printf("gmii_hv_readreg!!!\n");
8038 return 0;
8039 }
8040
8041 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8042 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8043 page << BME1000_PAGE_SHIFT);
8044 }
8045
8046 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8047 wm_put_swfwhw_semaphore(sc);
8048 return rv;
8049 }
8050
8051 /*
8052 * wm_gmii_hv_writereg: [mii interface function]
8053 *
8054 * Write a PHY register on the kumeran.
8055 * This could be handled by the PHY layer if we didn't have to lock the
8056 * ressource ...
8057 */
8058 static void
8059 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8060 {
8061 struct wm_softc *sc = device_private(self);
8062 uint16_t page = BM_PHY_REG_PAGE(reg);
8063 uint16_t regnum = BM_PHY_REG_NUM(reg);
8064
8065 if (wm_get_swfwhw_semaphore(sc)) {
8066 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8067 __func__);
8068 return;
8069 }
8070
8071 /* XXX Workaround failure in MDIO access while cable is disconnected */
8072
8073 /* Page 800 works differently than the rest so it has its own func */
8074 if (page == BM_WUC_PAGE) {
8075 uint16_t tmp;
8076
8077 tmp = val;
8078 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8079 return;
8080 }
8081
8082 /*
8083 * Lower than page 768 works differently than the rest so it has its
8084 * own func
8085 */
8086 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8087 printf("gmii_hv_writereg!!!\n");
8088 return;
8089 }
8090
8091 /*
8092 * XXX Workaround MDIO accesses being disabled after entering IEEE
8093 * Power Down (whenever bit 11 of the PHY control register is set)
8094 */
8095
8096 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8097 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8098 page << BME1000_PAGE_SHIFT);
8099 }
8100
8101 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8102 wm_put_swfwhw_semaphore(sc);
8103 }
8104
8105 /*
8106 * wm_gmii_82580_readreg: [mii interface function]
8107 *
8108 * Read a PHY register on the 82580 and I350.
8109 * This could be handled by the PHY layer if we didn't have to lock the
8110 * ressource ...
8111 */
8112 static int
8113 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8114 {
8115 struct wm_softc *sc = device_private(self);
8116 int sem;
8117 int rv;
8118
8119 sem = swfwphysem[sc->sc_funcid];
8120 if (wm_get_swfw_semaphore(sc, sem)) {
8121 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8122 __func__);
8123 return 0;
8124 }
8125
8126 rv = wm_gmii_i82544_readreg(self, phy, reg);
8127
8128 wm_put_swfw_semaphore(sc, sem);
8129 return rv;
8130 }
8131
8132 /*
8133 * wm_gmii_82580_writereg: [mii interface function]
8134 *
8135 * Write a PHY register on the 82580 and I350.
8136 * This could be handled by the PHY layer if we didn't have to lock the
8137 * ressource ...
8138 */
8139 static void
8140 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8141 {
8142 struct wm_softc *sc = device_private(self);
8143 int sem;
8144
8145 sem = swfwphysem[sc->sc_funcid];
8146 if (wm_get_swfw_semaphore(sc, sem)) {
8147 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8148 __func__);
8149 return;
8150 }
8151
8152 wm_gmii_i82544_writereg(self, phy, reg, val);
8153
8154 wm_put_swfw_semaphore(sc, sem);
8155 }
8156
8157 /*
8158 * wm_gmii_gs40g_readreg: [mii interface function]
8159 *
8160 * Read a PHY register on the I2100 and I211.
8161 * This could be handled by the PHY layer if we didn't have to lock the
8162 * ressource ...
8163 */
8164 static int
8165 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8166 {
8167 struct wm_softc *sc = device_private(self);
8168 int sem;
8169 int page, offset;
8170 int rv;
8171
8172 /* Acquire semaphore */
8173 sem = swfwphysem[sc->sc_funcid];
8174 if (wm_get_swfw_semaphore(sc, sem)) {
8175 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8176 __func__);
8177 return 0;
8178 }
8179
8180 /* Page select */
8181 page = reg >> GS40G_PAGE_SHIFT;
8182 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8183
8184 /* Read reg */
8185 offset = reg & GS40G_OFFSET_MASK;
8186 rv = wm_gmii_i82544_readreg(self, phy, offset);
8187
8188 wm_put_swfw_semaphore(sc, sem);
8189 return rv;
8190 }
8191
8192 /*
8193 * wm_gmii_gs40g_writereg: [mii interface function]
8194 *
8195 * Write a PHY register on the I210 and I211.
8196 * This could be handled by the PHY layer if we didn't have to lock the
8197 * ressource ...
8198 */
8199 static void
8200 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8201 {
8202 struct wm_softc *sc = device_private(self);
8203 int sem;
8204 int page, offset;
8205
8206 /* Acquire semaphore */
8207 sem = swfwphysem[sc->sc_funcid];
8208 if (wm_get_swfw_semaphore(sc, sem)) {
8209 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8210 __func__);
8211 return;
8212 }
8213
8214 /* Page select */
8215 page = reg >> GS40G_PAGE_SHIFT;
8216 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8217
8218 /* Write reg */
8219 offset = reg & GS40G_OFFSET_MASK;
8220 wm_gmii_i82544_writereg(self, phy, offset, val);
8221
8222 /* Release semaphore */
8223 wm_put_swfw_semaphore(sc, sem);
8224 }
8225
8226 /*
8227 * wm_gmii_statchg: [mii interface function]
8228 *
8229 * Callback from MII layer when media changes.
8230 */
8231 static void
8232 wm_gmii_statchg(struct ifnet *ifp)
8233 {
8234 struct wm_softc *sc = ifp->if_softc;
8235 struct mii_data *mii = &sc->sc_mii;
8236
8237 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8238 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8239 sc->sc_fcrtl &= ~FCRTL_XONE;
8240
8241 /*
8242 * Get flow control negotiation result.
8243 */
8244 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8245 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8246 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8247 mii->mii_media_active &= ~IFM_ETH_FMASK;
8248 }
8249
8250 if (sc->sc_flowflags & IFM_FLOW) {
8251 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8252 sc->sc_ctrl |= CTRL_TFCE;
8253 sc->sc_fcrtl |= FCRTL_XONE;
8254 }
8255 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8256 sc->sc_ctrl |= CTRL_RFCE;
8257 }
8258
8259 if (sc->sc_mii.mii_media_active & IFM_FDX) {
8260 DPRINTF(WM_DEBUG_LINK,
8261 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8262 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8263 } else {
8264 DPRINTF(WM_DEBUG_LINK,
8265 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8266 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8267 }
8268
8269 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8270 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8271 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8272 : WMREG_FCRTL, sc->sc_fcrtl);
8273 if (sc->sc_type == WM_T_80003) {
8274 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8275 case IFM_1000_T:
8276 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8277 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8278 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8279 break;
8280 default:
8281 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8282 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8283 sc->sc_tipg = TIPG_10_100_80003_DFLT;
8284 break;
8285 }
8286 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8287 }
8288 }
8289
8290 /*
8291 * wm_kmrn_readreg:
8292 *
8293 * Read a kumeran register
8294 */
8295 static int
8296 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8297 {
8298 int rv;
8299
8300 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8301 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8302 aprint_error_dev(sc->sc_dev,
8303 "%s: failed to get semaphore\n", __func__);
8304 return 0;
8305 }
8306 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8307 if (wm_get_swfwhw_semaphore(sc)) {
8308 aprint_error_dev(sc->sc_dev,
8309 "%s: failed to get semaphore\n", __func__);
8310 return 0;
8311 }
8312 }
8313
8314 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8315 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8316 KUMCTRLSTA_REN);
8317 CSR_WRITE_FLUSH(sc);
8318 delay(2);
8319
8320 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8321
8322 if (sc->sc_flags & WM_F_LOCK_SWFW)
8323 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8324 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8325 wm_put_swfwhw_semaphore(sc);
8326
8327 return rv;
8328 }
8329
8330 /*
8331 * wm_kmrn_writereg:
8332 *
8333 * Write a kumeran register
8334 */
8335 static void
8336 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8337 {
8338
8339 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8340 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8341 aprint_error_dev(sc->sc_dev,
8342 "%s: failed to get semaphore\n", __func__);
8343 return;
8344 }
8345 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8346 if (wm_get_swfwhw_semaphore(sc)) {
8347 aprint_error_dev(sc->sc_dev,
8348 "%s: failed to get semaphore\n", __func__);
8349 return;
8350 }
8351 }
8352
8353 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8354 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8355 (val & KUMCTRLSTA_MASK));
8356
8357 if (sc->sc_flags & WM_F_LOCK_SWFW)
8358 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8359 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8360 wm_put_swfwhw_semaphore(sc);
8361 }
8362
8363 /* SGMII related */
8364
8365 /*
8366 * wm_sgmii_uses_mdio
8367 *
8368 * Check whether the transaction is to the internal PHY or the external
8369 * MDIO interface. Return true if it's MDIO.
8370 */
8371 static bool
8372 wm_sgmii_uses_mdio(struct wm_softc *sc)
8373 {
8374 uint32_t reg;
8375 bool ismdio = false;
8376
8377 switch (sc->sc_type) {
8378 case WM_T_82575:
8379 case WM_T_82576:
8380 reg = CSR_READ(sc, WMREG_MDIC);
8381 ismdio = ((reg & MDIC_DEST) != 0);
8382 break;
8383 case WM_T_82580:
8384 case WM_T_I350:
8385 case WM_T_I354:
8386 case WM_T_I210:
8387 case WM_T_I211:
8388 reg = CSR_READ(sc, WMREG_MDICNFG);
8389 ismdio = ((reg & MDICNFG_DEST) != 0);
8390 break;
8391 default:
8392 break;
8393 }
8394
8395 return ismdio;
8396 }
8397
8398 /*
8399 * wm_sgmii_readreg: [mii interface function]
8400 *
8401 * Read a PHY register on the SGMII
8402 * This could be handled by the PHY layer if we didn't have to lock the
8403 * ressource ...
8404 */
8405 static int
8406 wm_sgmii_readreg(device_t self, int phy, int reg)
8407 {
8408 struct wm_softc *sc = device_private(self);
8409 uint32_t i2ccmd;
8410 int i, rv;
8411
8412 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8413 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8414 __func__);
8415 return 0;
8416 }
8417
8418 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8419 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8420 | I2CCMD_OPCODE_READ;
8421 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8422
8423 /* Poll the ready bit */
8424 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8425 delay(50);
8426 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8427 if (i2ccmd & I2CCMD_READY)
8428 break;
8429 }
8430 if ((i2ccmd & I2CCMD_READY) == 0)
8431 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8432 if ((i2ccmd & I2CCMD_ERROR) != 0)
8433 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8434
8435 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8436
8437 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8438 return rv;
8439 }
8440
8441 /*
8442 * wm_sgmii_writereg: [mii interface function]
8443 *
8444 * Write a PHY register on the SGMII.
8445 * This could be handled by the PHY layer if we didn't have to lock the
8446 * ressource ...
8447 */
8448 static void
8449 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8450 {
8451 struct wm_softc *sc = device_private(self);
8452 uint32_t i2ccmd;
8453 int i;
8454 int val_swapped;
8455
8456 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8457 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8458 __func__);
8459 return;
8460 }
8461 /* Swap the data bytes for the I2C interface */
8462 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8463 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8464 | (phy << I2CCMD_PHY_ADDR_SHIFT)
8465 | I2CCMD_OPCODE_WRITE | val_swapped;
8466 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8467
8468 /* Poll the ready bit */
8469 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8470 delay(50);
8471 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8472 if (i2ccmd & I2CCMD_READY)
8473 break;
8474 }
8475 if ((i2ccmd & I2CCMD_READY) == 0)
8476 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8477 if ((i2ccmd & I2CCMD_ERROR) != 0)
8478 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8479
8480 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8481 }
8482
8483 /* TBI related */
8484
8485 /*
8486 * wm_tbi_mediainit:
8487 *
8488 * Initialize media for use on 1000BASE-X devices.
8489 */
8490 static void
8491 wm_tbi_mediainit(struct wm_softc *sc)
8492 {
8493 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8494 const char *sep = "";
8495
8496 if (sc->sc_type < WM_T_82543)
8497 sc->sc_tipg = TIPG_WM_DFLT;
8498 else
8499 sc->sc_tipg = TIPG_LG_DFLT;
8500
8501 sc->sc_tbi_serdes_anegticks = 5;
8502
8503 /* Initialize our media structures */
8504 sc->sc_mii.mii_ifp = ifp;
8505 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8506
8507 if ((sc->sc_type >= WM_T_82575)
8508 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
8509 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8510 wm_serdes_mediachange, wm_serdes_mediastatus);
8511 else
8512 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
8513 wm_tbi_mediachange, wm_tbi_mediastatus);
8514
8515 /*
8516 * SWD Pins:
8517 *
8518 * 0 = Link LED (output)
8519 * 1 = Loss Of Signal (input)
8520 */
8521 sc->sc_ctrl |= CTRL_SWDPIO(0);
8522
8523 /* XXX Perhaps this is only for TBI */
8524 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8525 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
8526
8527 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8528 sc->sc_ctrl &= ~CTRL_LRST;
8529
8530 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8531
8532 #define ADD(ss, mm, dd) \
8533 do { \
8534 aprint_normal("%s%s", sep, ss); \
8535 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
8536 sep = ", "; \
8537 } while (/*CONSTCOND*/0)
8538
8539 aprint_normal_dev(sc->sc_dev, "");
8540
8541 /* Only 82545 is LX */
8542 if (sc->sc_type == WM_T_82545) {
8543 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
8544 ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
8545 } else {
8546 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
8547 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
8548 }
8549 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
8550 aprint_normal("\n");
8551
8552 #undef ADD
8553
8554 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
8555 }
8556
8557 /*
8558 * wm_tbi_mediachange: [ifmedia interface function]
8559 *
8560 * Set hardware to newly-selected media on a 1000BASE-X device.
8561 */
8562 static int
8563 wm_tbi_mediachange(struct ifnet *ifp)
8564 {
8565 struct wm_softc *sc = ifp->if_softc;
8566 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8567 uint32_t status;
8568 int i;
8569
8570 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8571 /* XXX need some work for >= 82571 and < 82575 */
8572 if (sc->sc_type < WM_T_82575)
8573 return 0;
8574 }
8575
8576 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8577 || (sc->sc_type >= WM_T_82575))
8578 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8579
8580 sc->sc_ctrl &= ~CTRL_LRST;
8581 sc->sc_txcw = TXCW_ANE;
8582 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8583 sc->sc_txcw |= TXCW_FD | TXCW_HD;
8584 else if (ife->ifm_media & IFM_FDX)
8585 sc->sc_txcw |= TXCW_FD;
8586 else
8587 sc->sc_txcw |= TXCW_HD;
8588
8589 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
8590 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
8591
8592 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
8593 device_xname(sc->sc_dev), sc->sc_txcw));
8594 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8595 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8596 CSR_WRITE_FLUSH(sc);
8597 delay(1000);
8598
8599 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
8600 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
8601
8602 /*
8603 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
8604 * optics detect a signal, 0 if they don't.
8605 */
8606 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
8607 /* Have signal; wait for the link to come up. */
8608 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
8609 delay(10000);
8610 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
8611 break;
8612 }
8613
8614 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
8615 device_xname(sc->sc_dev),i));
8616
8617 status = CSR_READ(sc, WMREG_STATUS);
8618 DPRINTF(WM_DEBUG_LINK,
8619 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
8620 device_xname(sc->sc_dev),status, STATUS_LU));
8621 if (status & STATUS_LU) {
8622 /* Link is up. */
8623 DPRINTF(WM_DEBUG_LINK,
8624 ("%s: LINK: set media -> link up %s\n",
8625 device_xname(sc->sc_dev),
8626 (status & STATUS_FD) ? "FDX" : "HDX"));
8627
8628 /*
8629 * NOTE: CTRL will update TFCE and RFCE automatically,
8630 * so we should update sc->sc_ctrl
8631 */
8632 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8633 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8634 sc->sc_fcrtl &= ~FCRTL_XONE;
8635 if (status & STATUS_FD)
8636 sc->sc_tctl |=
8637 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8638 else
8639 sc->sc_tctl |=
8640 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8641 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
8642 sc->sc_fcrtl |= FCRTL_XONE;
8643 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8644 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8645 WMREG_OLD_FCRTL : WMREG_FCRTL,
8646 sc->sc_fcrtl);
8647 sc->sc_tbi_linkup = 1;
8648 } else {
8649 if (i == WM_LINKUP_TIMEOUT)
8650 wm_check_for_link(sc);
8651 /* Link is down. */
8652 DPRINTF(WM_DEBUG_LINK,
8653 ("%s: LINK: set media -> link down\n",
8654 device_xname(sc->sc_dev)));
8655 sc->sc_tbi_linkup = 0;
8656 }
8657 } else {
8658 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
8659 device_xname(sc->sc_dev)));
8660 sc->sc_tbi_linkup = 0;
8661 }
8662
8663 wm_tbi_serdes_set_linkled(sc);
8664
8665 return 0;
8666 }
8667
8668 /*
8669 * wm_tbi_mediastatus: [ifmedia interface function]
8670 *
8671 * Get the current interface media status on a 1000BASE-X device.
8672 */
8673 static void
8674 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8675 {
8676 struct wm_softc *sc = ifp->if_softc;
8677 uint32_t ctrl, status;
8678
8679 ifmr->ifm_status = IFM_AVALID;
8680 ifmr->ifm_active = IFM_ETHER;
8681
8682 status = CSR_READ(sc, WMREG_STATUS);
8683 if ((status & STATUS_LU) == 0) {
8684 ifmr->ifm_active |= IFM_NONE;
8685 return;
8686 }
8687
8688 ifmr->ifm_status |= IFM_ACTIVE;
8689 /* Only 82545 is LX */
8690 if (sc->sc_type == WM_T_82545)
8691 ifmr->ifm_active |= IFM_1000_LX;
8692 else
8693 ifmr->ifm_active |= IFM_1000_SX;
8694 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
8695 ifmr->ifm_active |= IFM_FDX;
8696 else
8697 ifmr->ifm_active |= IFM_HDX;
8698 ctrl = CSR_READ(sc, WMREG_CTRL);
8699 if (ctrl & CTRL_RFCE)
8700 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
8701 if (ctrl & CTRL_TFCE)
8702 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
8703 }
8704
8705 /* XXX TBI only */
8706 static int
8707 wm_check_for_link(struct wm_softc *sc)
8708 {
8709 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8710 uint32_t rxcw;
8711 uint32_t ctrl;
8712 uint32_t status;
8713 uint32_t sig;
8714
8715 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
8716 /* XXX need some work for >= 82571 */
8717 if (sc->sc_type >= WM_T_82571) {
8718 sc->sc_tbi_linkup = 1;
8719 return 0;
8720 }
8721 }
8722
8723 rxcw = CSR_READ(sc, WMREG_RXCW);
8724 ctrl = CSR_READ(sc, WMREG_CTRL);
8725 status = CSR_READ(sc, WMREG_STATUS);
8726
8727 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8728
8729 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8730 device_xname(sc->sc_dev), __func__,
8731 ((ctrl & CTRL_SWDPIN(1)) == sig),
8732 ((status & STATUS_LU) != 0),
8733 ((rxcw & RXCW_C) != 0)
8734 ));
8735
8736 /*
8737 * SWDPIN LU RXCW
8738 * 0 0 0
8739 * 0 0 1 (should not happen)
8740 * 0 1 0 (should not happen)
8741 * 0 1 1 (should not happen)
8742 * 1 0 0 Disable autonego and force linkup
8743 * 1 0 1 got /C/ but not linkup yet
8744 * 1 1 0 (linkup)
8745 * 1 1 1 If IFM_AUTO, back to autonego
8746 *
8747 */
8748 if (((ctrl & CTRL_SWDPIN(1)) == sig)
8749 && ((status & STATUS_LU) == 0)
8750 && ((rxcw & RXCW_C) == 0)) {
8751 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8752 __func__));
8753 sc->sc_tbi_linkup = 0;
8754 /* Disable auto-negotiation in the TXCW register */
8755 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8756
8757 /*
8758 * Force link-up and also force full-duplex.
8759 *
8760 * NOTE: CTRL was updated TFCE and RFCE automatically,
8761 * so we should update sc->sc_ctrl
8762 */
8763 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8764 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8765 } else if (((status & STATUS_LU) != 0)
8766 && ((rxcw & RXCW_C) != 0)
8767 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8768 sc->sc_tbi_linkup = 1;
8769 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8770 __func__));
8771 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8772 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8773 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8774 && ((rxcw & RXCW_C) != 0)) {
8775 DPRINTF(WM_DEBUG_LINK, ("/C/"));
8776 } else {
8777 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8778 status));
8779 }
8780
8781 return 0;
8782 }
8783
8784 /*
8785 * wm_tbi_tick:
8786 *
8787 * Check the link on TBI devices.
8788 * This function acts as mii_tick().
8789 */
8790 static void
8791 wm_tbi_tick(struct wm_softc *sc)
8792 {
8793 struct mii_data *mii = &sc->sc_mii;
8794 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
8795 uint32_t status;
8796
8797 KASSERT(WM_TX_LOCKED(sc));
8798
8799 status = CSR_READ(sc, WMREG_STATUS);
8800
8801 /* XXX is this needed? */
8802 (void)CSR_READ(sc, WMREG_RXCW);
8803 (void)CSR_READ(sc, WMREG_CTRL);
8804
8805 /* set link status */
8806 if ((status & STATUS_LU) == 0) {
8807 DPRINTF(WM_DEBUG_LINK,
8808 ("%s: LINK: checklink -> down\n",
8809 device_xname(sc->sc_dev)));
8810 sc->sc_tbi_linkup = 0;
8811 } else if (sc->sc_tbi_linkup == 0) {
8812 DPRINTF(WM_DEBUG_LINK,
8813 ("%s: LINK: checklink -> up %s\n",
8814 device_xname(sc->sc_dev),
8815 (status & STATUS_FD) ? "FDX" : "HDX"));
8816 sc->sc_tbi_linkup = 1;
8817 sc->sc_tbi_serdes_ticks = 0;
8818 }
8819
8820 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
8821 goto setled;
8822
8823 if ((status & STATUS_LU) == 0) {
8824 sc->sc_tbi_linkup = 0;
8825 /* If the timer expired, retry autonegotiation */
8826 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8827 && (++sc->sc_tbi_serdes_ticks
8828 >= sc->sc_tbi_serdes_anegticks)) {
8829 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
8830 sc->sc_tbi_serdes_ticks = 0;
8831 /*
8832 * Reset the link, and let autonegotiation do
8833 * its thing
8834 */
8835 sc->sc_ctrl |= CTRL_LRST;
8836 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8837 CSR_WRITE_FLUSH(sc);
8838 delay(1000);
8839 sc->sc_ctrl &= ~CTRL_LRST;
8840 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8841 CSR_WRITE_FLUSH(sc);
8842 delay(1000);
8843 CSR_WRITE(sc, WMREG_TXCW,
8844 sc->sc_txcw & ~TXCW_ANE);
8845 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8846 }
8847 }
8848
8849 setled:
8850 wm_tbi_serdes_set_linkled(sc);
8851 }
8852
8853 /* SERDES related */
8854 static void
8855 wm_serdes_power_up_link_82575(struct wm_softc *sc)
8856 {
8857 uint32_t reg;
8858
8859 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
8860 && ((sc->sc_flags & WM_F_SGMII) == 0))
8861 return;
8862
8863 reg = CSR_READ(sc, WMREG_PCS_CFG);
8864 reg |= PCS_CFG_PCS_EN;
8865 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
8866
8867 reg = CSR_READ(sc, WMREG_CTRL_EXT);
8868 reg &= ~CTRL_EXT_SWDPIN(3);
8869 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8870 CSR_WRITE_FLUSH(sc);
8871 }
8872
8873 static int
8874 wm_serdes_mediachange(struct ifnet *ifp)
8875 {
8876 struct wm_softc *sc = ifp->if_softc;
8877 bool pcs_autoneg = true; /* XXX */
8878 uint32_t ctrl_ext, pcs_lctl, reg;
8879
8880 /* XXX Currently, this function is not called on 8257[12] */
8881 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
8882 || (sc->sc_type >= WM_T_82575))
8883 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
8884
8885 wm_serdes_power_up_link_82575(sc);
8886
8887 sc->sc_ctrl |= CTRL_SLU;
8888
8889 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
8890 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
8891
8892 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8893 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
8894 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
8895 case CTRL_EXT_LINK_MODE_SGMII:
8896 pcs_autoneg = true;
8897 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
8898 break;
8899 case CTRL_EXT_LINK_MODE_1000KX:
8900 pcs_autoneg = false;
8901 /* FALLTHROUGH */
8902 default:
8903 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
8904 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
8905 pcs_autoneg = false;
8906 }
8907 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
8908 | CTRL_FRCFDX;
8909 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
8910 }
8911 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8912
8913 if (pcs_autoneg) {
8914 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
8915 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
8916
8917 reg = CSR_READ(sc, WMREG_PCS_ANADV);
8918 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
8919 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
8920 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
8921 } else
8922 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
8923
8924 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
8925
8926
8927 return 0;
8928 }
8929
8930 static void
8931 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8932 {
8933 struct wm_softc *sc = ifp->if_softc;
8934 struct mii_data *mii = &sc->sc_mii;
8935 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8936 uint32_t pcs_adv, pcs_lpab, reg;
8937
8938 ifmr->ifm_status = IFM_AVALID;
8939 ifmr->ifm_active = IFM_ETHER;
8940
8941 /* Check PCS */
8942 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8943 if ((reg & PCS_LSTS_LINKOK) == 0) {
8944 ifmr->ifm_active |= IFM_NONE;
8945 sc->sc_tbi_linkup = 0;
8946 goto setled;
8947 }
8948
8949 sc->sc_tbi_linkup = 1;
8950 ifmr->ifm_status |= IFM_ACTIVE;
8951 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
8952 if ((reg & PCS_LSTS_FDX) != 0)
8953 ifmr->ifm_active |= IFM_FDX;
8954 else
8955 ifmr->ifm_active |= IFM_HDX;
8956 mii->mii_media_active &= ~IFM_ETH_FMASK;
8957 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8958 /* Check flow */
8959 reg = CSR_READ(sc, WMREG_PCS_LSTS);
8960 if ((reg & PCS_LSTS_AN_COMP) == 0) {
8961 printf("XXX LINKOK but not ACOMP\n");
8962 goto setled;
8963 }
8964 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8965 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8966 printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
8967 if ((pcs_adv & TXCW_SYM_PAUSE)
8968 && (pcs_lpab & TXCW_SYM_PAUSE)) {
8969 mii->mii_media_active |= IFM_FLOW
8970 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8971 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8972 && (pcs_adv & TXCW_ASYM_PAUSE)
8973 && (pcs_lpab & TXCW_SYM_PAUSE)
8974 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8975 mii->mii_media_active |= IFM_FLOW
8976 | IFM_ETH_TXPAUSE;
8977 } else if ((pcs_adv & TXCW_SYM_PAUSE)
8978 && (pcs_adv & TXCW_ASYM_PAUSE)
8979 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8980 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
8981 mii->mii_media_active |= IFM_FLOW
8982 | IFM_ETH_RXPAUSE;
8983 } else {
8984 }
8985 }
8986 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8987 | (mii->mii_media_active & IFM_ETH_FMASK);
8988 setled:
8989 wm_tbi_serdes_set_linkled(sc);
8990 }
8991
8992 /*
8993 * wm_serdes_tick:
8994 *
8995 * Check the link on serdes devices.
8996 */
8997 static void
8998 wm_serdes_tick(struct wm_softc *sc)
8999 {
9000 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9001 struct mii_data *mii = &sc->sc_mii;
9002 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9003 uint32_t reg;
9004
9005 KASSERT(WM_TX_LOCKED(sc));
9006
9007 mii->mii_media_status = IFM_AVALID;
9008 mii->mii_media_active = IFM_ETHER;
9009
9010 /* Check PCS */
9011 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9012 if ((reg & PCS_LSTS_LINKOK) != 0) {
9013 mii->mii_media_status |= IFM_ACTIVE;
9014 sc->sc_tbi_linkup = 1;
9015 sc->sc_tbi_serdes_ticks = 0;
9016 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9017 if ((reg & PCS_LSTS_FDX) != 0)
9018 mii->mii_media_active |= IFM_FDX;
9019 else
9020 mii->mii_media_active |= IFM_HDX;
9021 } else {
9022 mii->mii_media_status |= IFM_NONE;
9023 sc->sc_tbi_linkup = 0;
9024 /* If the timer expired, retry autonegotiation */
9025 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9026 && (++sc->sc_tbi_serdes_ticks
9027 >= sc->sc_tbi_serdes_anegticks)) {
9028 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9029 sc->sc_tbi_serdes_ticks = 0;
9030 /* XXX */
9031 wm_serdes_mediachange(ifp);
9032 }
9033 }
9034
9035 wm_tbi_serdes_set_linkled(sc);
9036 }
9037
9038 /* SFP related */
9039
9040 static int
9041 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9042 {
9043 uint32_t i2ccmd;
9044 int i;
9045
9046 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9047 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9048
9049 /* Poll the ready bit */
9050 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9051 delay(50);
9052 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9053 if (i2ccmd & I2CCMD_READY)
9054 break;
9055 }
9056 if ((i2ccmd & I2CCMD_READY) == 0)
9057 return -1;
9058 if ((i2ccmd & I2CCMD_ERROR) != 0)
9059 return -1;
9060
9061 *data = i2ccmd & 0x00ff;
9062
9063 return 0;
9064 }
9065
9066 static uint32_t
9067 wm_sfp_get_media_type(struct wm_softc *sc)
9068 {
9069 uint32_t ctrl_ext;
9070 uint8_t val = 0;
9071 int timeout = 3;
9072 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9073 int rv = -1;
9074
9075 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9076 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9077 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9078 CSR_WRITE_FLUSH(sc);
9079
9080 /* Read SFP module data */
9081 while (timeout) {
9082 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9083 if (rv == 0)
9084 break;
9085 delay(100*1000); /* XXX too big */
9086 timeout--;
9087 }
9088 if (rv != 0)
9089 goto out;
9090 switch (val) {
9091 case SFF_SFP_ID_SFF:
9092 aprint_normal_dev(sc->sc_dev,
9093 "Module/Connector soldered to board\n");
9094 break;
9095 case SFF_SFP_ID_SFP:
9096 aprint_normal_dev(sc->sc_dev, "SFP\n");
9097 break;
9098 case SFF_SFP_ID_UNKNOWN:
9099 goto out;
9100 default:
9101 break;
9102 }
9103
9104 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9105 if (rv != 0) {
9106 goto out;
9107 }
9108
9109 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9110 mediatype = WM_MEDIATYPE_SERDES;
9111 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9112 sc->sc_flags |= WM_F_SGMII;
9113 mediatype = WM_MEDIATYPE_COPPER;
9114 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9115 sc->sc_flags |= WM_F_SGMII;
9116 mediatype = WM_MEDIATYPE_SERDES;
9117 }
9118
9119 out:
9120 /* Restore I2C interface setting */
9121 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9122
9123 return mediatype;
9124 }
9125 /*
9126 * NVM related.
9127 * Microwire, SPI (w/wo EERD) and Flash.
9128 */
9129
9130 /* Both spi and uwire */
9131
9132 /*
9133 * wm_eeprom_sendbits:
9134 *
9135 * Send a series of bits to the EEPROM.
9136 */
9137 static void
9138 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9139 {
9140 uint32_t reg;
9141 int x;
9142
9143 reg = CSR_READ(sc, WMREG_EECD);
9144
9145 for (x = nbits; x > 0; x--) {
9146 if (bits & (1U << (x - 1)))
9147 reg |= EECD_DI;
9148 else
9149 reg &= ~EECD_DI;
9150 CSR_WRITE(sc, WMREG_EECD, reg);
9151 CSR_WRITE_FLUSH(sc);
9152 delay(2);
9153 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9154 CSR_WRITE_FLUSH(sc);
9155 delay(2);
9156 CSR_WRITE(sc, WMREG_EECD, reg);
9157 CSR_WRITE_FLUSH(sc);
9158 delay(2);
9159 }
9160 }
9161
9162 /*
9163 * wm_eeprom_recvbits:
9164 *
9165 * Receive a series of bits from the EEPROM.
9166 */
9167 static void
9168 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9169 {
9170 uint32_t reg, val;
9171 int x;
9172
9173 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9174
9175 val = 0;
9176 for (x = nbits; x > 0; x--) {
9177 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9178 CSR_WRITE_FLUSH(sc);
9179 delay(2);
9180 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9181 val |= (1U << (x - 1));
9182 CSR_WRITE(sc, WMREG_EECD, reg);
9183 CSR_WRITE_FLUSH(sc);
9184 delay(2);
9185 }
9186 *valp = val;
9187 }
9188
9189 /* Microwire */
9190
9191 /*
9192 * wm_nvm_read_uwire:
9193 *
9194 * Read a word from the EEPROM using the MicroWire protocol.
9195 */
9196 static int
9197 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9198 {
9199 uint32_t reg, val;
9200 int i;
9201
9202 for (i = 0; i < wordcnt; i++) {
9203 /* Clear SK and DI. */
9204 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9205 CSR_WRITE(sc, WMREG_EECD, reg);
9206
9207 /*
9208 * XXX: workaround for a bug in qemu-0.12.x and prior
9209 * and Xen.
9210 *
9211 * We use this workaround only for 82540 because qemu's
9212 * e1000 act as 82540.
9213 */
9214 if (sc->sc_type == WM_T_82540) {
9215 reg |= EECD_SK;
9216 CSR_WRITE(sc, WMREG_EECD, reg);
9217 reg &= ~EECD_SK;
9218 CSR_WRITE(sc, WMREG_EECD, reg);
9219 CSR_WRITE_FLUSH(sc);
9220 delay(2);
9221 }
9222 /* XXX: end of workaround */
9223
9224 /* Set CHIP SELECT. */
9225 reg |= EECD_CS;
9226 CSR_WRITE(sc, WMREG_EECD, reg);
9227 CSR_WRITE_FLUSH(sc);
9228 delay(2);
9229
9230 /* Shift in the READ command. */
9231 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9232
9233 /* Shift in address. */
9234 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9235
9236 /* Shift out the data. */
9237 wm_eeprom_recvbits(sc, &val, 16);
9238 data[i] = val & 0xffff;
9239
9240 /* Clear CHIP SELECT. */
9241 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9242 CSR_WRITE(sc, WMREG_EECD, reg);
9243 CSR_WRITE_FLUSH(sc);
9244 delay(2);
9245 }
9246
9247 return 0;
9248 }
9249
9250 /* SPI */
9251
9252 /*
9253 * Set SPI and FLASH related information from the EECD register.
9254 * For 82541 and 82547, the word size is taken from EEPROM.
9255 */
9256 static int
9257 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9258 {
9259 int size;
9260 uint32_t reg;
9261 uint16_t data;
9262
9263 reg = CSR_READ(sc, WMREG_EECD);
9264 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9265
9266 /* Read the size of NVM from EECD by default */
9267 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9268 switch (sc->sc_type) {
9269 case WM_T_82541:
9270 case WM_T_82541_2:
9271 case WM_T_82547:
9272 case WM_T_82547_2:
9273 /* Set dummy value to access EEPROM */
9274 sc->sc_nvm_wordsize = 64;
9275 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9276 reg = data;
9277 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9278 if (size == 0)
9279 size = 6; /* 64 word size */
9280 else
9281 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9282 break;
9283 case WM_T_80003:
9284 case WM_T_82571:
9285 case WM_T_82572:
9286 case WM_T_82573: /* SPI case */
9287 case WM_T_82574: /* SPI case */
9288 case WM_T_82583: /* SPI case */
9289 size += NVM_WORD_SIZE_BASE_SHIFT;
9290 if (size > 14)
9291 size = 14;
9292 break;
9293 case WM_T_82575:
9294 case WM_T_82576:
9295 case WM_T_82580:
9296 case WM_T_I350:
9297 case WM_T_I354:
9298 case WM_T_I210:
9299 case WM_T_I211:
9300 size += NVM_WORD_SIZE_BASE_SHIFT;
9301 if (size > 15)
9302 size = 15;
9303 break;
9304 default:
9305 aprint_error_dev(sc->sc_dev,
9306 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9307 return -1;
9308 break;
9309 }
9310
9311 sc->sc_nvm_wordsize = 1 << size;
9312
9313 return 0;
9314 }
9315
9316 /*
9317 * wm_nvm_ready_spi:
9318 *
9319 * Wait for a SPI EEPROM to be ready for commands.
9320 */
9321 static int
9322 wm_nvm_ready_spi(struct wm_softc *sc)
9323 {
9324 uint32_t val;
9325 int usec;
9326
9327 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9328 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9329 wm_eeprom_recvbits(sc, &val, 8);
9330 if ((val & SPI_SR_RDY) == 0)
9331 break;
9332 }
9333 if (usec >= SPI_MAX_RETRIES) {
9334 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9335 return 1;
9336 }
9337 return 0;
9338 }
9339
9340 /*
9341 * wm_nvm_read_spi:
9342 *
9343 * Read a work from the EEPROM using the SPI protocol.
9344 */
9345 static int
9346 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9347 {
9348 uint32_t reg, val;
9349 int i;
9350 uint8_t opc;
9351
9352 /* Clear SK and CS. */
9353 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9354 CSR_WRITE(sc, WMREG_EECD, reg);
9355 CSR_WRITE_FLUSH(sc);
9356 delay(2);
9357
9358 if (wm_nvm_ready_spi(sc))
9359 return 1;
9360
9361 /* Toggle CS to flush commands. */
9362 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9363 CSR_WRITE_FLUSH(sc);
9364 delay(2);
9365 CSR_WRITE(sc, WMREG_EECD, reg);
9366 CSR_WRITE_FLUSH(sc);
9367 delay(2);
9368
9369 opc = SPI_OPC_READ;
9370 if (sc->sc_nvm_addrbits == 8 && word >= 128)
9371 opc |= SPI_OPC_A8;
9372
9373 wm_eeprom_sendbits(sc, opc, 8);
9374 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9375
9376 for (i = 0; i < wordcnt; i++) {
9377 wm_eeprom_recvbits(sc, &val, 16);
9378 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9379 }
9380
9381 /* Raise CS and clear SK. */
9382 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9383 CSR_WRITE(sc, WMREG_EECD, reg);
9384 CSR_WRITE_FLUSH(sc);
9385 delay(2);
9386
9387 return 0;
9388 }
9389
9390 /* Using with EERD */
9391
9392 static int
9393 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9394 {
9395 uint32_t attempts = 100000;
9396 uint32_t i, reg = 0;
9397 int32_t done = -1;
9398
9399 for (i = 0; i < attempts; i++) {
9400 reg = CSR_READ(sc, rw);
9401
9402 if (reg & EERD_DONE) {
9403 done = 0;
9404 break;
9405 }
9406 delay(5);
9407 }
9408
9409 return done;
9410 }
9411
9412 static int
9413 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9414 uint16_t *data)
9415 {
9416 int i, eerd = 0;
9417 int error = 0;
9418
9419 for (i = 0; i < wordcnt; i++) {
9420 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9421
9422 CSR_WRITE(sc, WMREG_EERD, eerd);
9423 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9424 if (error != 0)
9425 break;
9426
9427 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9428 }
9429
9430 return error;
9431 }
9432
9433 /* Flash */
9434
9435 static int
9436 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9437 {
9438 uint32_t eecd;
9439 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9440 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9441 uint8_t sig_byte = 0;
9442
9443 switch (sc->sc_type) {
9444 case WM_T_ICH8:
9445 case WM_T_ICH9:
9446 eecd = CSR_READ(sc, WMREG_EECD);
9447 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9448 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9449 return 0;
9450 }
9451 /* FALLTHROUGH */
9452 default:
9453 /* Default to 0 */
9454 *bank = 0;
9455
9456 /* Check bank 0 */
9457 wm_read_ich8_byte(sc, act_offset, &sig_byte);
9458 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9459 *bank = 0;
9460 return 0;
9461 }
9462
9463 /* Check bank 1 */
9464 wm_read_ich8_byte(sc, act_offset + bank1_offset,
9465 &sig_byte);
9466 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9467 *bank = 1;
9468 return 0;
9469 }
9470 }
9471
9472 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9473 device_xname(sc->sc_dev)));
9474 return -1;
9475 }
9476
9477 /******************************************************************************
9478 * This function does initial flash setup so that a new read/write/erase cycle
9479 * can be started.
9480 *
9481 * sc - The pointer to the hw structure
9482 ****************************************************************************/
9483 static int32_t
9484 wm_ich8_cycle_init(struct wm_softc *sc)
9485 {
9486 uint16_t hsfsts;
9487 int32_t error = 1;
9488 int32_t i = 0;
9489
9490 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9491
9492 /* May be check the Flash Des Valid bit in Hw status */
9493 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
9494 return error;
9495 }
9496
9497 /* Clear FCERR in Hw status by writing 1 */
9498 /* Clear DAEL in Hw status by writing a 1 */
9499 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
9500
9501 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9502
9503 /*
9504 * Either we should have a hardware SPI cycle in progress bit to check
9505 * against, in order to start a new cycle or FDONE bit should be
9506 * changed in the hardware so that it is 1 after harware reset, which
9507 * can then be used as an indication whether a cycle is in progress or
9508 * has been completed .. we should also have some software semaphore
9509 * mechanism to guard FDONE or the cycle in progress bit so that two
9510 * threads access to those bits can be sequentiallized or a way so that
9511 * 2 threads dont start the cycle at the same time
9512 */
9513
9514 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9515 /*
9516 * There is no cycle running at present, so we can start a
9517 * cycle
9518 */
9519
9520 /* Begin by setting Flash Cycle Done. */
9521 hsfsts |= HSFSTS_DONE;
9522 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9523 error = 0;
9524 } else {
9525 /*
9526 * otherwise poll for sometime so the current cycle has a
9527 * chance to end before giving up.
9528 */
9529 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
9530 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9531 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
9532 error = 0;
9533 break;
9534 }
9535 delay(1);
9536 }
9537 if (error == 0) {
9538 /*
9539 * Successful in waiting for previous cycle to timeout,
9540 * now set the Flash Cycle Done.
9541 */
9542 hsfsts |= HSFSTS_DONE;
9543 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
9544 }
9545 }
9546 return error;
9547 }
9548
9549 /******************************************************************************
9550 * This function starts a flash cycle and waits for its completion
9551 *
9552 * sc - The pointer to the hw structure
9553 ****************************************************************************/
9554 static int32_t
9555 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
9556 {
9557 uint16_t hsflctl;
9558 uint16_t hsfsts;
9559 int32_t error = 1;
9560 uint32_t i = 0;
9561
9562 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
9563 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9564 hsflctl |= HSFCTL_GO;
9565 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9566
9567 /* Wait till FDONE bit is set to 1 */
9568 do {
9569 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9570 if (hsfsts & HSFSTS_DONE)
9571 break;
9572 delay(1);
9573 i++;
9574 } while (i < timeout);
9575 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
9576 error = 0;
9577
9578 return error;
9579 }
9580
9581 /******************************************************************************
9582 * Reads a byte or word from the NVM using the ICH8 flash access registers.
9583 *
9584 * sc - The pointer to the hw structure
9585 * index - The index of the byte or word to read.
9586 * size - Size of data to read, 1=byte 2=word
9587 * data - Pointer to the word to store the value read.
9588 *****************************************************************************/
9589 static int32_t
9590 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
9591 uint32_t size, uint16_t *data)
9592 {
9593 uint16_t hsfsts;
9594 uint16_t hsflctl;
9595 uint32_t flash_linear_address;
9596 uint32_t flash_data = 0;
9597 int32_t error = 1;
9598 int32_t count = 0;
9599
9600 if (size < 1 || size > 2 || data == 0x0 ||
9601 index > ICH_FLASH_LINEAR_ADDR_MASK)
9602 return error;
9603
9604 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
9605 sc->sc_ich8_flash_base;
9606
9607 do {
9608 delay(1);
9609 /* Steps */
9610 error = wm_ich8_cycle_init(sc);
9611 if (error)
9612 break;
9613
9614 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
9615 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
9616 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
9617 & HSFCTL_BCOUNT_MASK;
9618 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
9619 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
9620
9621 /*
9622 * Write the last 24 bits of index into Flash Linear address
9623 * field in Flash Address
9624 */
9625 /* TODO: TBD maybe check the index against the size of flash */
9626
9627 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
9628
9629 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
9630
9631 /*
9632 * Check if FCERR is set to 1, if set to 1, clear it and try
9633 * the whole sequence a few more times, else read in (shift in)
9634 * the Flash Data0, the order is least significant byte first
9635 * msb to lsb
9636 */
9637 if (error == 0) {
9638 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
9639 if (size == 1)
9640 *data = (uint8_t)(flash_data & 0x000000FF);
9641 else if (size == 2)
9642 *data = (uint16_t)(flash_data & 0x0000FFFF);
9643 break;
9644 } else {
9645 /*
9646 * If we've gotten here, then things are probably
9647 * completely hosed, but if the error condition is
9648 * detected, it won't hurt to give it another try...
9649 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
9650 */
9651 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
9652 if (hsfsts & HSFSTS_ERR) {
9653 /* Repeat for some time before giving up. */
9654 continue;
9655 } else if ((hsfsts & HSFSTS_DONE) == 0)
9656 break;
9657 }
9658 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
9659
9660 return error;
9661 }
9662
9663 /******************************************************************************
9664 * Reads a single byte from the NVM using the ICH8 flash access registers.
9665 *
9666 * sc - pointer to wm_hw structure
9667 * index - The index of the byte to read.
9668 * data - Pointer to a byte to store the value read.
9669 *****************************************************************************/
9670 static int32_t
9671 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
9672 {
9673 int32_t status;
9674 uint16_t word = 0;
9675
9676 status = wm_read_ich8_data(sc, index, 1, &word);
9677 if (status == 0)
9678 *data = (uint8_t)word;
9679 else
9680 *data = 0;
9681
9682 return status;
9683 }
9684
9685 /******************************************************************************
9686 * Reads a word from the NVM using the ICH8 flash access registers.
9687 *
9688 * sc - pointer to wm_hw structure
9689 * index - The starting byte index of the word to read.
9690 * data - Pointer to a word to store the value read.
9691 *****************************************************************************/
9692 static int32_t
9693 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
9694 {
9695 int32_t status;
9696
9697 status = wm_read_ich8_data(sc, index, 2, data);
9698 return status;
9699 }
9700
9701 /******************************************************************************
9702 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
9703 * register.
9704 *
9705 * sc - Struct containing variables accessed by shared code
9706 * offset - offset of word in the EEPROM to read
9707 * data - word read from the EEPROM
9708 * words - number of words to read
9709 *****************************************************************************/
9710 static int
9711 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
9712 {
9713 int32_t error = 0;
9714 uint32_t flash_bank = 0;
9715 uint32_t act_offset = 0;
9716 uint32_t bank_offset = 0;
9717 uint16_t word = 0;
9718 uint16_t i = 0;
9719
9720 /*
9721 * We need to know which is the valid flash bank. In the event
9722 * that we didn't allocate eeprom_shadow_ram, we may not be
9723 * managing flash_bank. So it cannot be trusted and needs
9724 * to be updated with each read.
9725 */
9726 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
9727 if (error) {
9728 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
9729 device_xname(sc->sc_dev)));
9730 flash_bank = 0;
9731 }
9732
9733 /*
9734 * Adjust offset appropriately if we're on bank 1 - adjust for word
9735 * size
9736 */
9737 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
9738
9739 error = wm_get_swfwhw_semaphore(sc);
9740 if (error) {
9741 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9742 __func__);
9743 return error;
9744 }
9745
9746 for (i = 0; i < words; i++) {
9747 /* The NVM part needs a byte offset, hence * 2 */
9748 act_offset = bank_offset + ((offset + i) * 2);
9749 error = wm_read_ich8_word(sc, act_offset, &word);
9750 if (error) {
9751 aprint_error_dev(sc->sc_dev,
9752 "%s: failed to read NVM\n", __func__);
9753 break;
9754 }
9755 data[i] = word;
9756 }
9757
9758 wm_put_swfwhw_semaphore(sc);
9759 return error;
9760 }
9761
9762 /* iNVM */
9763
9764 static int
9765 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
9766 {
9767 int32_t rv = 0;
9768 uint32_t invm_dword;
9769 uint16_t i;
9770 uint8_t record_type, word_address;
9771
9772 for (i = 0; i < INVM_SIZE; i++) {
9773 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
9774 /* Get record type */
9775 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
9776 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
9777 break;
9778 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
9779 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
9780 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
9781 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
9782 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
9783 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
9784 if (word_address == address) {
9785 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
9786 rv = 0;
9787 break;
9788 }
9789 }
9790 }
9791
9792 return rv;
9793 }
9794
9795 static int
9796 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
9797 {
9798 int rv = 0;
9799 int i;
9800
9801 for (i = 0; i < words; i++) {
9802 switch (offset + i) {
9803 case NVM_OFF_MACADDR:
9804 case NVM_OFF_MACADDR1:
9805 case NVM_OFF_MACADDR2:
9806 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
9807 if (rv != 0) {
9808 data[i] = 0xffff;
9809 rv = -1;
9810 }
9811 break;
9812 case NVM_OFF_CFG2:
9813 rv = wm_nvm_read_word_invm(sc, offset, data);
9814 if (rv != 0) {
9815 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
9816 rv = 0;
9817 }
9818 break;
9819 case NVM_OFF_CFG4:
9820 rv = wm_nvm_read_word_invm(sc, offset, data);
9821 if (rv != 0) {
9822 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
9823 rv = 0;
9824 }
9825 break;
9826 case NVM_OFF_LED_1_CFG:
9827 rv = wm_nvm_read_word_invm(sc, offset, data);
9828 if (rv != 0) {
9829 *data = NVM_LED_1_CFG_DEFAULT_I211;
9830 rv = 0;
9831 }
9832 break;
9833 case NVM_OFF_LED_0_2_CFG:
9834 rv = wm_nvm_read_word_invm(sc, offset, data);
9835 if (rv != 0) {
9836 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
9837 rv = 0;
9838 }
9839 break;
9840 case NVM_OFF_ID_LED_SETTINGS:
9841 rv = wm_nvm_read_word_invm(sc, offset, data);
9842 if (rv != 0) {
9843 *data = ID_LED_RESERVED_FFFF;
9844 rv = 0;
9845 }
9846 break;
9847 default:
9848 DPRINTF(WM_DEBUG_NVM,
9849 ("NVM word 0x%02x is not mapped.\n", offset));
9850 *data = NVM_RESERVED_WORD;
9851 break;
9852 }
9853 }
9854
9855 return rv;
9856 }
9857
9858 /* Lock, detecting NVM type, validate checksum, version and read */
9859
9860 /*
9861 * wm_nvm_acquire:
9862 *
9863 * Perform the EEPROM handshake required on some chips.
9864 */
9865 static int
9866 wm_nvm_acquire(struct wm_softc *sc)
9867 {
9868 uint32_t reg;
9869 int x;
9870 int ret = 0;
9871
9872 /* always success */
9873 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9874 return 0;
9875
9876 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9877 ret = wm_get_swfwhw_semaphore(sc);
9878 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
9879 /* This will also do wm_get_swsm_semaphore() if needed */
9880 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
9881 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
9882 ret = wm_get_swsm_semaphore(sc);
9883 }
9884
9885 if (ret) {
9886 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9887 __func__);
9888 return 1;
9889 }
9890
9891 if (sc->sc_flags & WM_F_LOCK_EECD) {
9892 reg = CSR_READ(sc, WMREG_EECD);
9893
9894 /* Request EEPROM access. */
9895 reg |= EECD_EE_REQ;
9896 CSR_WRITE(sc, WMREG_EECD, reg);
9897
9898 /* ..and wait for it to be granted. */
9899 for (x = 0; x < 1000; x++) {
9900 reg = CSR_READ(sc, WMREG_EECD);
9901 if (reg & EECD_EE_GNT)
9902 break;
9903 delay(5);
9904 }
9905 if ((reg & EECD_EE_GNT) == 0) {
9906 aprint_error_dev(sc->sc_dev,
9907 "could not acquire EEPROM GNT\n");
9908 reg &= ~EECD_EE_REQ;
9909 CSR_WRITE(sc, WMREG_EECD, reg);
9910 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9911 wm_put_swfwhw_semaphore(sc);
9912 if (sc->sc_flags & WM_F_LOCK_SWFW)
9913 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9914 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9915 wm_put_swsm_semaphore(sc);
9916 return 1;
9917 }
9918 }
9919
9920 return 0;
9921 }
9922
9923 /*
9924 * wm_nvm_release:
9925 *
9926 * Release the EEPROM mutex.
9927 */
9928 static void
9929 wm_nvm_release(struct wm_softc *sc)
9930 {
9931 uint32_t reg;
9932
9933 /* always success */
9934 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
9935 return;
9936
9937 if (sc->sc_flags & WM_F_LOCK_EECD) {
9938 reg = CSR_READ(sc, WMREG_EECD);
9939 reg &= ~EECD_EE_REQ;
9940 CSR_WRITE(sc, WMREG_EECD, reg);
9941 }
9942
9943 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9944 wm_put_swfwhw_semaphore(sc);
9945 if (sc->sc_flags & WM_F_LOCK_SWFW)
9946 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
9947 else if (sc->sc_flags & WM_F_LOCK_SWSM)
9948 wm_put_swsm_semaphore(sc);
9949 }
9950
9951 static int
9952 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
9953 {
9954 uint32_t eecd = 0;
9955
9956 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
9957 || sc->sc_type == WM_T_82583) {
9958 eecd = CSR_READ(sc, WMREG_EECD);
9959
9960 /* Isolate bits 15 & 16 */
9961 eecd = ((eecd >> 15) & 0x03);
9962
9963 /* If both bits are set, device is Flash type */
9964 if (eecd == 0x03)
9965 return 0;
9966 }
9967 return 1;
9968 }
9969
9970 static int
9971 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
9972 {
9973 uint32_t eec;
9974
9975 eec = CSR_READ(sc, WMREG_EEC);
9976 if ((eec & EEC_FLASH_DETECTED) != 0)
9977 return 1;
9978
9979 return 0;
9980 }
9981
9982 /*
9983 * wm_nvm_validate_checksum
9984 *
9985 * The checksum is defined as the sum of the first 64 (16 bit) words.
9986 */
9987 static int
9988 wm_nvm_validate_checksum(struct wm_softc *sc)
9989 {
9990 uint16_t checksum;
9991 uint16_t eeprom_data;
9992 #ifdef WM_DEBUG
9993 uint16_t csum_wordaddr, valid_checksum;
9994 #endif
9995 int i;
9996
9997 checksum = 0;
9998
9999 /* Don't check for I211 */
10000 if (sc->sc_type == WM_T_I211)
10001 return 0;
10002
10003 #ifdef WM_DEBUG
10004 if (sc->sc_type == WM_T_PCH_LPT) {
10005 csum_wordaddr = NVM_OFF_COMPAT;
10006 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10007 } else {
10008 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10009 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10010 }
10011
10012 /* Dump EEPROM image for debug */
10013 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10014 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10015 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10016 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10017 if ((eeprom_data & valid_checksum) == 0) {
10018 DPRINTF(WM_DEBUG_NVM,
10019 ("%s: NVM need to be updated (%04x != %04x)\n",
10020 device_xname(sc->sc_dev), eeprom_data,
10021 valid_checksum));
10022 }
10023 }
10024
10025 if ((wm_debug & WM_DEBUG_NVM) != 0) {
10026 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10027 for (i = 0; i < NVM_SIZE; i++) {
10028 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10029 printf("XXXX ");
10030 else
10031 printf("%04hx ", eeprom_data);
10032 if (i % 8 == 7)
10033 printf("\n");
10034 }
10035 }
10036
10037 #endif /* WM_DEBUG */
10038
10039 for (i = 0; i < NVM_SIZE; i++) {
10040 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10041 return 1;
10042 checksum += eeprom_data;
10043 }
10044
10045 if (checksum != (uint16_t) NVM_CHECKSUM) {
10046 #ifdef WM_DEBUG
10047 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10048 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10049 #endif
10050 }
10051
10052 return 0;
10053 }
10054
10055 static void
10056 wm_nvm_version_invm(struct wm_softc *sc)
10057 {
10058 uint32_t dword;
10059
10060 /*
10061 * Linux's code to decode version is very strange, so we don't
10062 * obey that algorithm and just use word 61 as the document.
10063 * Perhaps it's not perfect though...
10064 *
10065 * Example:
10066 *
10067 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10068 */
10069 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10070 dword = __SHIFTOUT(dword, INVM_VER_1);
10071 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10072 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10073 }
10074
10075 static void
10076 wm_nvm_version(struct wm_softc *sc)
10077 {
10078 uint16_t major, minor, build, patch;
10079 uint16_t uid0, uid1;
10080 uint16_t nvm_data;
10081 uint16_t off;
10082 bool check_version = false;
10083 bool check_optionrom = false;
10084 bool have_build = false;
10085
10086 /*
10087 * Version format:
10088 *
10089 * XYYZ
10090 * X0YZ
10091 * X0YY
10092 *
10093 * Example:
10094 *
10095 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
10096 * 82571 0x50a6 5.10.6?
10097 * 82572 0x506a 5.6.10?
10098 * 82572EI 0x5069 5.6.9?
10099 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
10100 * 0x2013 2.1.3?
10101 * 82583 0x10a0 1.10.0? (document says it's default vaule)
10102 */
10103 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10104 switch (sc->sc_type) {
10105 case WM_T_82571:
10106 case WM_T_82572:
10107 case WM_T_82574:
10108 case WM_T_82583:
10109 check_version = true;
10110 check_optionrom = true;
10111 have_build = true;
10112 break;
10113 case WM_T_82575:
10114 case WM_T_82576:
10115 case WM_T_82580:
10116 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10117 check_version = true;
10118 break;
10119 case WM_T_I211:
10120 wm_nvm_version_invm(sc);
10121 goto printver;
10122 case WM_T_I210:
10123 if (!wm_nvm_get_flash_presence_i210(sc)) {
10124 wm_nvm_version_invm(sc);
10125 goto printver;
10126 }
10127 /* FALLTHROUGH */
10128 case WM_T_I350:
10129 case WM_T_I354:
10130 check_version = true;
10131 check_optionrom = true;
10132 break;
10133 default:
10134 return;
10135 }
10136 if (check_version) {
10137 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10138 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10139 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10140 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10141 build = nvm_data & NVM_BUILD_MASK;
10142 have_build = true;
10143 } else
10144 minor = nvm_data & 0x00ff;
10145
10146 /* Decimal */
10147 minor = (minor / 16) * 10 + (minor % 16);
10148 sc->sc_nvm_ver_major = major;
10149 sc->sc_nvm_ver_minor = minor;
10150
10151 printver:
10152 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10153 sc->sc_nvm_ver_minor);
10154 if (have_build) {
10155 sc->sc_nvm_ver_build = build;
10156 aprint_verbose(".%d", build);
10157 }
10158 }
10159 if (check_optionrom) {
10160 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10161 /* Option ROM Version */
10162 if ((off != 0x0000) && (off != 0xffff)) {
10163 off += NVM_COMBO_VER_OFF;
10164 wm_nvm_read(sc, off + 1, 1, &uid1);
10165 wm_nvm_read(sc, off, 1, &uid0);
10166 if ((uid0 != 0) && (uid0 != 0xffff)
10167 && (uid1 != 0) && (uid1 != 0xffff)) {
10168 /* 16bits */
10169 major = uid0 >> 8;
10170 build = (uid0 << 8) | (uid1 >> 8);
10171 patch = uid1 & 0x00ff;
10172 aprint_verbose(", option ROM Version %d.%d.%d",
10173 major, build, patch);
10174 }
10175 }
10176 }
10177
10178 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10179 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10180 }
10181
10182 /*
10183 * wm_nvm_read:
10184 *
10185 * Read data from the serial EEPROM.
10186 */
10187 static int
10188 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10189 {
10190 int rv;
10191
10192 if (sc->sc_flags & WM_F_EEPROM_INVALID)
10193 return 1;
10194
10195 if (wm_nvm_acquire(sc))
10196 return 1;
10197
10198 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10199 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10200 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10201 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10202 else if (sc->sc_flags & WM_F_EEPROM_INVM)
10203 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10204 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10205 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10206 else if (sc->sc_flags & WM_F_EEPROM_SPI)
10207 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10208 else
10209 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10210
10211 wm_nvm_release(sc);
10212 return rv;
10213 }
10214
10215 /*
10216 * Hardware semaphores.
10217 * Very complexed...
10218 */
10219
10220 static int
10221 wm_get_swsm_semaphore(struct wm_softc *sc)
10222 {
10223 int32_t timeout;
10224 uint32_t swsm;
10225
10226 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10227 /* Get the SW semaphore. */
10228 timeout = sc->sc_nvm_wordsize + 1;
10229 while (timeout) {
10230 swsm = CSR_READ(sc, WMREG_SWSM);
10231
10232 if ((swsm & SWSM_SMBI) == 0)
10233 break;
10234
10235 delay(50);
10236 timeout--;
10237 }
10238
10239 if (timeout == 0) {
10240 aprint_error_dev(sc->sc_dev,
10241 "could not acquire SWSM SMBI\n");
10242 return 1;
10243 }
10244 }
10245
10246 /* Get the FW semaphore. */
10247 timeout = sc->sc_nvm_wordsize + 1;
10248 while (timeout) {
10249 swsm = CSR_READ(sc, WMREG_SWSM);
10250 swsm |= SWSM_SWESMBI;
10251 CSR_WRITE(sc, WMREG_SWSM, swsm);
10252 /* If we managed to set the bit we got the semaphore. */
10253 swsm = CSR_READ(sc, WMREG_SWSM);
10254 if (swsm & SWSM_SWESMBI)
10255 break;
10256
10257 delay(50);
10258 timeout--;
10259 }
10260
10261 if (timeout == 0) {
10262 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
10263 /* Release semaphores */
10264 wm_put_swsm_semaphore(sc);
10265 return 1;
10266 }
10267 return 0;
10268 }
10269
10270 static void
10271 wm_put_swsm_semaphore(struct wm_softc *sc)
10272 {
10273 uint32_t swsm;
10274
10275 swsm = CSR_READ(sc, WMREG_SWSM);
10276 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
10277 CSR_WRITE(sc, WMREG_SWSM, swsm);
10278 }
10279
10280 static int
10281 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10282 {
10283 uint32_t swfw_sync;
10284 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10285 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10286 int timeout = 200;
10287
10288 for (timeout = 0; timeout < 200; timeout++) {
10289 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10290 if (wm_get_swsm_semaphore(sc)) {
10291 aprint_error_dev(sc->sc_dev,
10292 "%s: failed to get semaphore\n",
10293 __func__);
10294 return 1;
10295 }
10296 }
10297 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10298 if ((swfw_sync & (swmask | fwmask)) == 0) {
10299 swfw_sync |= swmask;
10300 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10301 if (sc->sc_flags & WM_F_LOCK_SWSM)
10302 wm_put_swsm_semaphore(sc);
10303 return 0;
10304 }
10305 if (sc->sc_flags & WM_F_LOCK_SWSM)
10306 wm_put_swsm_semaphore(sc);
10307 delay(5000);
10308 }
10309 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10310 device_xname(sc->sc_dev), mask, swfw_sync);
10311 return 1;
10312 }
10313
10314 static void
10315 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10316 {
10317 uint32_t swfw_sync;
10318
10319 if (sc->sc_flags & WM_F_LOCK_SWSM) {
10320 while (wm_get_swsm_semaphore(sc) != 0)
10321 continue;
10322 }
10323 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10324 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10325 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10326 if (sc->sc_flags & WM_F_LOCK_SWSM)
10327 wm_put_swsm_semaphore(sc);
10328 }
10329
10330 static int
10331 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10332 {
10333 uint32_t ext_ctrl;
10334 int timeout = 200;
10335
10336 for (timeout = 0; timeout < 200; timeout++) {
10337 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10338 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10339 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10340
10341 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10342 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10343 return 0;
10344 delay(5000);
10345 }
10346 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10347 device_xname(sc->sc_dev), ext_ctrl);
10348 return 1;
10349 }
10350
10351 static void
10352 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10353 {
10354 uint32_t ext_ctrl;
10355 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10356 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10357 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10358 }
10359
10360 static int
10361 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10362 {
10363 int i = 0;
10364 uint32_t reg;
10365
10366 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10367 do {
10368 CSR_WRITE(sc, WMREG_EXTCNFCTR,
10369 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10370 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10371 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10372 break;
10373 delay(2*1000);
10374 i++;
10375 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10376
10377 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10378 wm_put_hw_semaphore_82573(sc);
10379 log(LOG_ERR, "%s: Driver can't access the PHY\n",
10380 device_xname(sc->sc_dev));
10381 return -1;
10382 }
10383
10384 return 0;
10385 }
10386
10387 static void
10388 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10389 {
10390 uint32_t reg;
10391
10392 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10393 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10394 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10395 }
10396
10397 /*
10398 * Management mode and power management related subroutines.
10399 * BMC, AMT, suspend/resume and EEE.
10400 */
10401
10402 static int
10403 wm_check_mng_mode(struct wm_softc *sc)
10404 {
10405 int rv;
10406
10407 switch (sc->sc_type) {
10408 case WM_T_ICH8:
10409 case WM_T_ICH9:
10410 case WM_T_ICH10:
10411 case WM_T_PCH:
10412 case WM_T_PCH2:
10413 case WM_T_PCH_LPT:
10414 rv = wm_check_mng_mode_ich8lan(sc);
10415 break;
10416 case WM_T_82574:
10417 case WM_T_82583:
10418 rv = wm_check_mng_mode_82574(sc);
10419 break;
10420 case WM_T_82571:
10421 case WM_T_82572:
10422 case WM_T_82573:
10423 case WM_T_80003:
10424 rv = wm_check_mng_mode_generic(sc);
10425 break;
10426 default:
10427 /* noting to do */
10428 rv = 0;
10429 break;
10430 }
10431
10432 return rv;
10433 }
10434
10435 static int
10436 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10437 {
10438 uint32_t fwsm;
10439
10440 fwsm = CSR_READ(sc, WMREG_FWSM);
10441
10442 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10443 return 1;
10444
10445 return 0;
10446 }
10447
10448 static int
10449 wm_check_mng_mode_82574(struct wm_softc *sc)
10450 {
10451 uint16_t data;
10452
10453 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10454
10455 if ((data & NVM_CFG2_MNGM_MASK) != 0)
10456 return 1;
10457
10458 return 0;
10459 }
10460
10461 static int
10462 wm_check_mng_mode_generic(struct wm_softc *sc)
10463 {
10464 uint32_t fwsm;
10465
10466 fwsm = CSR_READ(sc, WMREG_FWSM);
10467
10468 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10469 return 1;
10470
10471 return 0;
10472 }
10473
10474 static int
10475 wm_enable_mng_pass_thru(struct wm_softc *sc)
10476 {
10477 uint32_t manc, fwsm, factps;
10478
10479 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
10480 return 0;
10481
10482 manc = CSR_READ(sc, WMREG_MANC);
10483
10484 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
10485 device_xname(sc->sc_dev), manc));
10486 if ((manc & MANC_RECV_TCO_EN) == 0)
10487 return 0;
10488
10489 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
10490 fwsm = CSR_READ(sc, WMREG_FWSM);
10491 factps = CSR_READ(sc, WMREG_FACTPS);
10492 if (((factps & FACTPS_MNGCG) == 0)
10493 && ((fwsm & FWSM_MODE_MASK)
10494 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
10495 return 1;
10496 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10497 uint16_t data;
10498
10499 factps = CSR_READ(sc, WMREG_FACTPS);
10500 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10501 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
10502 device_xname(sc->sc_dev), factps, data));
10503 if (((factps & FACTPS_MNGCG) == 0)
10504 && ((data & NVM_CFG2_MNGM_MASK)
10505 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
10506 return 1;
10507 } else if (((manc & MANC_SMBUS_EN) != 0)
10508 && ((manc & MANC_ASF_EN) == 0))
10509 return 1;
10510
10511 return 0;
10512 }
10513
10514 static int
10515 wm_check_reset_block(struct wm_softc *sc)
10516 {
10517 uint32_t reg;
10518
10519 switch (sc->sc_type) {
10520 case WM_T_ICH8:
10521 case WM_T_ICH9:
10522 case WM_T_ICH10:
10523 case WM_T_PCH:
10524 case WM_T_PCH2:
10525 case WM_T_PCH_LPT:
10526 reg = CSR_READ(sc, WMREG_FWSM);
10527 if ((reg & FWSM_RSPCIPHY) != 0)
10528 return 0;
10529 else
10530 return -1;
10531 break;
10532 case WM_T_82571:
10533 case WM_T_82572:
10534 case WM_T_82573:
10535 case WM_T_82574:
10536 case WM_T_82583:
10537 case WM_T_80003:
10538 reg = CSR_READ(sc, WMREG_MANC);
10539 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
10540 return -1;
10541 else
10542 return 0;
10543 break;
10544 default:
10545 /* no problem */
10546 break;
10547 }
10548
10549 return 0;
10550 }
10551
10552 static void
10553 wm_get_hw_control(struct wm_softc *sc)
10554 {
10555 uint32_t reg;
10556
10557 switch (sc->sc_type) {
10558 case WM_T_82573:
10559 reg = CSR_READ(sc, WMREG_SWSM);
10560 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
10561 break;
10562 case WM_T_82571:
10563 case WM_T_82572:
10564 case WM_T_82574:
10565 case WM_T_82583:
10566 case WM_T_80003:
10567 case WM_T_ICH8:
10568 case WM_T_ICH9:
10569 case WM_T_ICH10:
10570 case WM_T_PCH:
10571 case WM_T_PCH2:
10572 case WM_T_PCH_LPT:
10573 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10574 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
10575 break;
10576 default:
10577 break;
10578 }
10579 }
10580
10581 static void
10582 wm_release_hw_control(struct wm_softc *sc)
10583 {
10584 uint32_t reg;
10585
10586 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
10587 return;
10588
10589 if (sc->sc_type == WM_T_82573) {
10590 reg = CSR_READ(sc, WMREG_SWSM);
10591 reg &= ~SWSM_DRV_LOAD;
10592 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
10593 } else {
10594 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10595 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
10596 }
10597 }
10598
10599 static void
10600 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
10601 {
10602 uint32_t reg;
10603
10604 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10605
10606 if (on != 0)
10607 reg |= EXTCNFCTR_GATE_PHY_CFG;
10608 else
10609 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
10610
10611 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10612 }
10613
10614 static void
10615 wm_smbustopci(struct wm_softc *sc)
10616 {
10617 uint32_t fwsm;
10618
10619 fwsm = CSR_READ(sc, WMREG_FWSM);
10620 if (((fwsm & FWSM_FW_VALID) == 0)
10621 && ((wm_check_reset_block(sc) == 0))) {
10622 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
10623 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
10624 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10625 CSR_WRITE_FLUSH(sc);
10626 delay(10);
10627 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
10628 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10629 CSR_WRITE_FLUSH(sc);
10630 delay(50*1000);
10631
10632 /*
10633 * Gate automatic PHY configuration by hardware on non-managed
10634 * 82579
10635 */
10636 if (sc->sc_type == WM_T_PCH2)
10637 wm_gate_hw_phy_config_ich8lan(sc, 1);
10638 }
10639 }
10640
10641 static void
10642 wm_init_manageability(struct wm_softc *sc)
10643 {
10644
10645 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10646 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
10647 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10648
10649 /* Disable hardware interception of ARP */
10650 manc &= ~MANC_ARP_EN;
10651
10652 /* Enable receiving management packets to the host */
10653 if (sc->sc_type >= WM_T_82571) {
10654 manc |= MANC_EN_MNG2HOST;
10655 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
10656 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
10657 }
10658
10659 CSR_WRITE(sc, WMREG_MANC, manc);
10660 }
10661 }
10662
10663 static void
10664 wm_release_manageability(struct wm_softc *sc)
10665 {
10666
10667 if (sc->sc_flags & WM_F_HAS_MANAGE) {
10668 uint32_t manc = CSR_READ(sc, WMREG_MANC);
10669
10670 manc |= MANC_ARP_EN;
10671 if (sc->sc_type >= WM_T_82571)
10672 manc &= ~MANC_EN_MNG2HOST;
10673
10674 CSR_WRITE(sc, WMREG_MANC, manc);
10675 }
10676 }
10677
10678 static void
10679 wm_get_wakeup(struct wm_softc *sc)
10680 {
10681
10682 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
10683 switch (sc->sc_type) {
10684 case WM_T_82573:
10685 case WM_T_82583:
10686 sc->sc_flags |= WM_F_HAS_AMT;
10687 /* FALLTHROUGH */
10688 case WM_T_80003:
10689 case WM_T_82541:
10690 case WM_T_82547:
10691 case WM_T_82571:
10692 case WM_T_82572:
10693 case WM_T_82574:
10694 case WM_T_82575:
10695 case WM_T_82576:
10696 case WM_T_82580:
10697 case WM_T_I350:
10698 case WM_T_I354:
10699 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
10700 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
10701 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10702 break;
10703 case WM_T_ICH8:
10704 case WM_T_ICH9:
10705 case WM_T_ICH10:
10706 case WM_T_PCH:
10707 case WM_T_PCH2:
10708 case WM_T_PCH_LPT:
10709 sc->sc_flags |= WM_F_HAS_AMT;
10710 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
10711 break;
10712 default:
10713 break;
10714 }
10715
10716 /* 1: HAS_MANAGE */
10717 if (wm_enable_mng_pass_thru(sc) != 0)
10718 sc->sc_flags |= WM_F_HAS_MANAGE;
10719
10720 #ifdef WM_DEBUG
10721 printf("\n");
10722 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
10723 printf("HAS_AMT,");
10724 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
10725 printf("ARC_SUBSYS_VALID,");
10726 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
10727 printf("ASF_FIRMWARE_PRES,");
10728 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
10729 printf("HAS_MANAGE,");
10730 printf("\n");
10731 #endif
10732 /*
10733 * Note that the WOL flags is set after the resetting of the eeprom
10734 * stuff
10735 */
10736 }
10737
10738 #ifdef WM_WOL
10739 /* WOL in the newer chipset interfaces (pchlan) */
10740 static void
10741 wm_enable_phy_wakeup(struct wm_softc *sc)
10742 {
10743 #if 0
10744 uint16_t preg;
10745
10746 /* Copy MAC RARs to PHY RARs */
10747
10748 /* Copy MAC MTA to PHY MTA */
10749
10750 /* Configure PHY Rx Control register */
10751
10752 /* Enable PHY wakeup in MAC register */
10753
10754 /* Configure and enable PHY wakeup in PHY registers */
10755
10756 /* Activate PHY wakeup */
10757
10758 /* XXX */
10759 #endif
10760 }
10761
10762 /* Power down workaround on D3 */
10763 static void
10764 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
10765 {
10766 uint32_t reg;
10767 int i;
10768
10769 for (i = 0; i < 2; i++) {
10770 /* Disable link */
10771 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10772 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10773 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10774
10775 /*
10776 * Call gig speed drop workaround on Gig disable before
10777 * accessing any PHY registers
10778 */
10779 if (sc->sc_type == WM_T_ICH8)
10780 wm_gig_downshift_workaround_ich8lan(sc);
10781
10782 /* Write VR power-down enable */
10783 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10784 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10785 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
10786 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
10787
10788 /* Read it back and test */
10789 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
10790 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
10791 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
10792 break;
10793
10794 /* Issue PHY reset and repeat at most one more time */
10795 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10796 }
10797 }
10798
10799 static void
10800 wm_enable_wakeup(struct wm_softc *sc)
10801 {
10802 uint32_t reg, pmreg;
10803 pcireg_t pmode;
10804
10805 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
10806 &pmreg, NULL) == 0)
10807 return;
10808
10809 /* Advertise the wakeup capability */
10810 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
10811 | CTRL_SWDPIN(3));
10812 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
10813
10814 /* ICH workaround */
10815 switch (sc->sc_type) {
10816 case WM_T_ICH8:
10817 case WM_T_ICH9:
10818 case WM_T_ICH10:
10819 case WM_T_PCH:
10820 case WM_T_PCH2:
10821 case WM_T_PCH_LPT:
10822 /* Disable gig during WOL */
10823 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10824 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
10825 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10826 if (sc->sc_type == WM_T_PCH)
10827 wm_gmii_reset(sc);
10828
10829 /* Power down workaround */
10830 if (sc->sc_phytype == WMPHY_82577) {
10831 struct mii_softc *child;
10832
10833 /* Assume that the PHY is copper */
10834 child = LIST_FIRST(&sc->sc_mii.mii_phys);
10835 if (child->mii_mpd_rev <= 2)
10836 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
10837 (768 << 5) | 25, 0x0444); /* magic num */
10838 }
10839 break;
10840 default:
10841 break;
10842 }
10843
10844 /* Keep the laser running on fiber adapters */
10845 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
10846 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
10847 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10848 reg |= CTRL_EXT_SWDPIN(3);
10849 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10850 }
10851
10852 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
10853 #if 0 /* for the multicast packet */
10854 reg |= WUFC_MC;
10855 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
10856 #endif
10857
10858 if (sc->sc_type == WM_T_PCH) {
10859 wm_enable_phy_wakeup(sc);
10860 } else {
10861 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
10862 CSR_WRITE(sc, WMREG_WUFC, reg);
10863 }
10864
10865 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10866 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10867 || (sc->sc_type == WM_T_PCH2))
10868 && (sc->sc_phytype == WMPHY_IGP_3))
10869 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
10870
10871 /* Request PME */
10872 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
10873 #if 0
10874 /* Disable WOL */
10875 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
10876 #else
10877 /* For WOL */
10878 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
10879 #endif
10880 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
10881 }
10882 #endif /* WM_WOL */
10883
10884 /* EEE */
10885
10886 static void
10887 wm_set_eee_i350(struct wm_softc *sc)
10888 {
10889 uint32_t ipcnfg, eeer;
10890
10891 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
10892 eeer = CSR_READ(sc, WMREG_EEER);
10893
10894 if ((sc->sc_flags & WM_F_EEE) != 0) {
10895 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10896 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
10897 | EEER_LPI_FC);
10898 } else {
10899 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
10900 ipcnfg &= ~IPCNFG_10BASE_TE;
10901 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
10902 | EEER_LPI_FC);
10903 }
10904
10905 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
10906 CSR_WRITE(sc, WMREG_EEER, eeer);
10907 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
10908 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
10909 }
10910
10911 /*
10912 * Workarounds (mainly PHY related).
10913 * Basically, PHY's workarounds are in the PHY drivers.
10914 */
10915
10916 /* Work-around for 82566 Kumeran PCS lock loss */
10917 static void
10918 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
10919 {
10920 int miistatus, active, i;
10921 int reg;
10922
10923 miistatus = sc->sc_mii.mii_media_status;
10924
10925 /* If the link is not up, do nothing */
10926 if ((miistatus & IFM_ACTIVE) != 0)
10927 return;
10928
10929 active = sc->sc_mii.mii_media_active;
10930
10931 /* Nothing to do if the link is other than 1Gbps */
10932 if (IFM_SUBTYPE(active) != IFM_1000_T)
10933 return;
10934
10935 for (i = 0; i < 10; i++) {
10936 /* read twice */
10937 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10938 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
10939 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
10940 goto out; /* GOOD! */
10941
10942 /* Reset the PHY */
10943 wm_gmii_reset(sc);
10944 delay(5*1000);
10945 }
10946
10947 /* Disable GigE link negotiation */
10948 reg = CSR_READ(sc, WMREG_PHY_CTRL);
10949 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
10950 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
10951
10952 /*
10953 * Call gig speed drop workaround on Gig disable before accessing
10954 * any PHY registers.
10955 */
10956 wm_gig_downshift_workaround_ich8lan(sc);
10957
10958 out:
10959 return;
10960 }
10961
10962 /* WOL from S5 stops working */
10963 static void
10964 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
10965 {
10966 uint16_t kmrn_reg;
10967
10968 /* Only for igp3 */
10969 if (sc->sc_phytype == WMPHY_IGP_3) {
10970 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
10971 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
10972 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10973 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
10974 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
10975 }
10976 }
10977
10978 /*
10979 * Workaround for pch's PHYs
10980 * XXX should be moved to new PHY driver?
10981 */
10982 static void
10983 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
10984 {
10985 if (sc->sc_phytype == WMPHY_82577)
10986 wm_set_mdio_slow_mode_hv(sc);
10987
10988 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
10989
10990 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
10991
10992 /* 82578 */
10993 if (sc->sc_phytype == WMPHY_82578) {
10994 /* PCH rev. < 3 */
10995 if (sc->sc_rev < 3) {
10996 /* XXX 6 bit shift? Why? Is it page2? */
10997 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
10998 0x66c0);
10999 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11000 0xffff);
11001 }
11002
11003 /* XXX phy rev. < 2 */
11004 }
11005
11006 /* Select page 0 */
11007
11008 /* XXX acquire semaphore */
11009 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11010 /* XXX release semaphore */
11011
11012 /*
11013 * Configure the K1 Si workaround during phy reset assuming there is
11014 * link so that it disables K1 if link is in 1Gbps.
11015 */
11016 wm_k1_gig_workaround_hv(sc, 1);
11017 }
11018
11019 static void
11020 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11021 {
11022
11023 wm_set_mdio_slow_mode_hv(sc);
11024 }
11025
11026 static void
11027 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11028 {
11029 int k1_enable = sc->sc_nvm_k1_enabled;
11030
11031 /* XXX acquire semaphore */
11032
11033 if (link) {
11034 k1_enable = 0;
11035
11036 /* Link stall fix for link up */
11037 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11038 } else {
11039 /* Link stall fix for link down */
11040 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11041 }
11042
11043 wm_configure_k1_ich8lan(sc, k1_enable);
11044
11045 /* XXX release semaphore */
11046 }
11047
11048 static void
11049 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11050 {
11051 uint32_t reg;
11052
11053 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11054 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11055 reg | HV_KMRN_MDIO_SLOW);
11056 }
11057
11058 static void
11059 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11060 {
11061 uint32_t ctrl, ctrl_ext, tmp;
11062 uint16_t kmrn_reg;
11063
11064 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11065
11066 if (k1_enable)
11067 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11068 else
11069 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11070
11071 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11072
11073 delay(20);
11074
11075 ctrl = CSR_READ(sc, WMREG_CTRL);
11076 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11077
11078 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11079 tmp |= CTRL_FRCSPD;
11080
11081 CSR_WRITE(sc, WMREG_CTRL, tmp);
11082 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11083 CSR_WRITE_FLUSH(sc);
11084 delay(20);
11085
11086 CSR_WRITE(sc, WMREG_CTRL, ctrl);
11087 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11088 CSR_WRITE_FLUSH(sc);
11089 delay(20);
11090 }
11091
11092 /* special case - for 82575 - need to do manual init ... */
11093 static void
11094 wm_reset_init_script_82575(struct wm_softc *sc)
11095 {
11096 /*
11097 * remark: this is untested code - we have no board without EEPROM
11098 * same setup as mentioned int the FreeBSD driver for the i82575
11099 */
11100
11101 /* SerDes configuration via SERDESCTRL */
11102 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11103 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11104 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11105 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11106
11107 /* CCM configuration via CCMCTL register */
11108 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11109 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11110
11111 /* PCIe lanes configuration */
11112 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11113 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11114 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11115 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11116
11117 /* PCIe PLL Configuration */
11118 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11119 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11120 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11121 }
11122
11123 static void
11124 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11125 {
11126 uint32_t reg;
11127 uint16_t nvmword;
11128 int rv;
11129
11130 if ((sc->sc_flags & WM_F_SGMII) == 0)
11131 return;
11132
11133 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11134 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11135 if (rv != 0) {
11136 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11137 __func__);
11138 return;
11139 }
11140
11141 reg = CSR_READ(sc, WMREG_MDICNFG);
11142 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11143 reg |= MDICNFG_DEST;
11144 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11145 reg |= MDICNFG_COM_MDIO;
11146 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11147 }
11148
11149 /*
11150 * I210 Errata 25 and I211 Errata 10
11151 * Slow System Clock.
11152 */
11153 static void
11154 wm_pll_workaround_i210(struct wm_softc *sc)
11155 {
11156 uint32_t mdicnfg, wuc;
11157 uint32_t reg;
11158 pcireg_t pcireg;
11159 uint32_t pmreg;
11160 uint16_t nvmword, tmp_nvmword;
11161 int phyval;
11162 bool wa_done = false;
11163 int i;
11164
11165 /* Save WUC and MDICNFG registers */
11166 wuc = CSR_READ(sc, WMREG_WUC);
11167 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
11168
11169 reg = mdicnfg & ~MDICNFG_DEST;
11170 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11171
11172 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
11173 nvmword = INVM_DEFAULT_AL;
11174 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
11175
11176 /* Get Power Management cap offset */
11177 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11178 &pmreg, NULL) == 0)
11179 return;
11180 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
11181 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
11182 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
11183
11184 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
11185 break; /* OK */
11186 }
11187
11188 wa_done = true;
11189 /* Directly reset the internal PHY */
11190 reg = CSR_READ(sc, WMREG_CTRL);
11191 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
11192
11193 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11194 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
11195 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11196
11197 CSR_WRITE(sc, WMREG_WUC, 0);
11198 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
11199 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11200
11201 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
11202 pmreg + PCI_PMCSR);
11203 pcireg |= PCI_PMCSR_STATE_D3;
11204 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11205 pmreg + PCI_PMCSR, pcireg);
11206 delay(1000);
11207 pcireg &= ~PCI_PMCSR_STATE_D3;
11208 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11209 pmreg + PCI_PMCSR, pcireg);
11210
11211 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
11212 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11213
11214 /* Restore WUC register */
11215 CSR_WRITE(sc, WMREG_WUC, wuc);
11216 }
11217
11218 /* Restore MDICNFG setting */
11219 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
11220 if (wa_done)
11221 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
11222 }
11223