if_wm.c revision 1.403 1 /* $NetBSD: if_wm.c,v 1.403 2016/05/19 08:20:06 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue
78 * - EEE (Energy Efficiency Ethernet)
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 * - Image Unique ID
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.403 2016/05/19 08:20:06 knakahara Exp $");
87
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108
109 #include <sys/rndsource.h>
110
111 #include <net/if.h>
112 #include <net/if_dl.h>
113 #include <net/if_media.h>
114 #include <net/if_ether.h>
115
116 #include <net/bpf.h>
117
118 #include <netinet/in.h> /* XXX for struct ip */
119 #include <netinet/in_systm.h> /* XXX for struct ip */
120 #include <netinet/ip.h> /* XXX for struct ip */
121 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
122 #include <netinet/tcp.h> /* XXX for struct tcphdr */
123
124 #include <sys/bus.h>
125 #include <sys/intr.h>
126 #include <machine/endian.h>
127
128 #include <dev/mii/mii.h>
129 #include <dev/mii/miivar.h>
130 #include <dev/mii/miidevs.h>
131 #include <dev/mii/mii_bitbang.h>
132 #include <dev/mii/ikphyreg.h>
133 #include <dev/mii/igphyreg.h>
134 #include <dev/mii/igphyvar.h>
135 #include <dev/mii/inbmphyreg.h>
136
137 #include <dev/pci/pcireg.h>
138 #include <dev/pci/pcivar.h>
139 #include <dev/pci/pcidevs.h>
140
141 #include <dev/pci/if_wmreg.h>
142 #include <dev/pci/if_wmvar.h>
143
144 #ifdef WM_DEBUG
145 #define WM_DEBUG_LINK 0x01
146 #define WM_DEBUG_TX 0x02
147 #define WM_DEBUG_RX 0x04
148 #define WM_DEBUG_GMII 0x08
149 #define WM_DEBUG_MANAGE 0x10
150 #define WM_DEBUG_NVM 0x20
151 #define WM_DEBUG_INIT 0x40
152 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
153 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
154
155 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
156 #else
157 #define DPRINTF(x, y) /* nothing */
158 #endif /* WM_DEBUG */
159
160 #ifdef NET_MPSAFE
161 #define WM_MPSAFE 1
162 #endif
163
164 /*
165 * This device driver's max interrupt numbers.
166 */
167 #define WM_MAX_NTXINTR 16
168 #define WM_MAX_NRXINTR 16
169 #define WM_MAX_NINTR (WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
170
171 /*
172 * Transmit descriptor list size. Due to errata, we can only have
173 * 256 hardware descriptors in the ring on < 82544, but we use 4096
174 * on >= 82544. We tell the upper layers that they can queue a lot
175 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
176 * of them at a time.
177 *
178 * We allow up to 256 (!) DMA segments per packet. Pathological packet
179 * chains containing many small mbufs have been observed in zero-copy
180 * situations with jumbo frames.
181 */
182 #define WM_NTXSEGS 256
183 #define WM_IFQUEUELEN 256
184 #define WM_TXQUEUELEN_MAX 64
185 #define WM_TXQUEUELEN_MAX_82547 16
186 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
187 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
188 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
189 #define WM_NTXDESC_82542 256
190 #define WM_NTXDESC_82544 4096
191 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
192 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
193 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
194 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
195 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
196
197 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
198
199 #define WM_TXINTERQSIZE 256
200
201 /*
202 * Receive descriptor list size. We have one Rx buffer for normal
203 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
204 * packet. We allocate 256 receive descriptors, each with a 2k
205 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
206 */
207 #define WM_NRXDESC 256
208 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
209 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
210 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
211
212 typedef union txdescs {
213 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
214 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
215 } txdescs_t;
216
217 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
218 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
219
220 /*
221 * Software state for transmit jobs.
222 */
223 struct wm_txsoft {
224 struct mbuf *txs_mbuf; /* head of our mbuf chain */
225 bus_dmamap_t txs_dmamap; /* our DMA map */
226 int txs_firstdesc; /* first descriptor in packet */
227 int txs_lastdesc; /* last descriptor in packet */
228 int txs_ndesc; /* # of descriptors used */
229 };
230
231 /*
232 * Software state for receive buffers. Each descriptor gets a
233 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
234 * more than one buffer, we chain them together.
235 */
236 struct wm_rxsoft {
237 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
238 bus_dmamap_t rxs_dmamap; /* our DMA map */
239 };
240
241 #define WM_LINKUP_TIMEOUT 50
242
243 static uint16_t swfwphysem[] = {
244 SWFW_PHY0_SM,
245 SWFW_PHY1_SM,
246 SWFW_PHY2_SM,
247 SWFW_PHY3_SM
248 };
249
250 static const uint32_t wm_82580_rxpbs_table[] = {
251 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
252 };
253
254 struct wm_softc;
255
256 struct wm_txqueue {
257 kmutex_t *txq_lock; /* lock for tx operations */
258
259 struct wm_softc *txq_sc;
260
261 int txq_id; /* index of transmit queues */
262 int txq_intr_idx; /* index of MSI-X tables */
263
264 /* Software state for the transmit descriptors. */
265 int txq_num; /* must be a power of two */
266 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
267
268 /* TX control data structures. */
269 int txq_ndesc; /* must be a power of two */
270 size_t txq_descsize; /* a tx descriptor size */
271 txdescs_t *txq_descs_u;
272 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
273 bus_dma_segment_t txq_desc_seg; /* control data segment */
274 int txq_desc_rseg; /* real number of control segment */
275 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
276 #define txq_descs txq_descs_u->sctxu_txdescs
277 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
278
279 bus_addr_t txq_tdt_reg; /* offset of TDT register */
280
281 int txq_free; /* number of free Tx descriptors */
282 int txq_next; /* next ready Tx descriptor */
283
284 int txq_sfree; /* number of free Tx jobs */
285 int txq_snext; /* next free Tx job */
286 int txq_sdirty; /* dirty Tx jobs */
287
288 /* These 4 variables are used only on the 82547. */
289 int txq_fifo_size; /* Tx FIFO size */
290 int txq_fifo_head; /* current head of FIFO */
291 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
292 int txq_fifo_stall; /* Tx FIFO is stalled */
293
294 /*
295 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
296 * CPUs. This queue intermediate them without block.
297 */
298 pcq_t *txq_interq;
299
300 /*
301 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
302 * to manage Tx H/W queue's busy flag.
303 */
304 int txq_flags; /* flags for H/W queue, see below */
305 #define WM_TXQ_NO_SPACE 0x1
306
307 /* XXX which event counter is required? */
308 };
309
310 struct wm_rxqueue {
311 kmutex_t *rxq_lock; /* lock for rx operations */
312
313 struct wm_softc *rxq_sc;
314
315 int rxq_id; /* index of receive queues */
316 int rxq_intr_idx; /* index of MSI-X tables */
317
318 /* Software state for the receive descriptors. */
319 wiseman_rxdesc_t *rxq_descs;
320
321 /* RX control data structures. */
322 struct wm_rxsoft rxq_soft[WM_NRXDESC];
323 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
324 bus_dma_segment_t rxq_desc_seg; /* control data segment */
325 int rxq_desc_rseg; /* real number of control segment */
326 size_t rxq_desc_size; /* control data size */
327 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
328
329 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
330
331 int rxq_ptr; /* next ready Rx desc/queue ent */
332 int rxq_discard;
333 int rxq_len;
334 struct mbuf *rxq_head;
335 struct mbuf *rxq_tail;
336 struct mbuf **rxq_tailp;
337
338 /* XXX which event counter is required? */
339 };
340
341 /*
342 * Software state per device.
343 */
344 struct wm_softc {
345 device_t sc_dev; /* generic device information */
346 bus_space_tag_t sc_st; /* bus space tag */
347 bus_space_handle_t sc_sh; /* bus space handle */
348 bus_size_t sc_ss; /* bus space size */
349 bus_space_tag_t sc_iot; /* I/O space tag */
350 bus_space_handle_t sc_ioh; /* I/O space handle */
351 bus_size_t sc_ios; /* I/O space size */
352 bus_space_tag_t sc_flasht; /* flash registers space tag */
353 bus_space_handle_t sc_flashh; /* flash registers space handle */
354 bus_size_t sc_flashs; /* flash registers space size */
355 off_t sc_flashreg_offset; /*
356 * offset to flash registers from
357 * start of BAR
358 */
359 bus_dma_tag_t sc_dmat; /* bus DMA tag */
360
361 struct ethercom sc_ethercom; /* ethernet common data */
362 struct mii_data sc_mii; /* MII/media information */
363
364 pci_chipset_tag_t sc_pc;
365 pcitag_t sc_pcitag;
366 int sc_bus_speed; /* PCI/PCIX bus speed */
367 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
368
369 uint16_t sc_pcidevid; /* PCI device ID */
370 wm_chip_type sc_type; /* MAC type */
371 int sc_rev; /* MAC revision */
372 wm_phy_type sc_phytype; /* PHY type */
373 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
374 #define WM_MEDIATYPE_UNKNOWN 0x00
375 #define WM_MEDIATYPE_FIBER 0x01
376 #define WM_MEDIATYPE_COPPER 0x02
377 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
378 int sc_funcid; /* unit number of the chip (0 to 3) */
379 int sc_flags; /* flags; see below */
380 int sc_if_flags; /* last if_flags */
381 int sc_flowflags; /* 802.3x flow control flags */
382 int sc_align_tweak;
383
384 void *sc_ihs[WM_MAX_NINTR]; /*
385 * interrupt cookie.
386 * legacy and msi use sc_ihs[0].
387 */
388 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
389 int sc_nintrs; /* number of interrupts */
390
391 int sc_link_intr_idx; /* index of MSI-X tables */
392
393 callout_t sc_tick_ch; /* tick callout */
394 bool sc_stopping;
395
396 int sc_nvm_ver_major;
397 int sc_nvm_ver_minor;
398 int sc_nvm_ver_build;
399 int sc_nvm_addrbits; /* NVM address bits */
400 unsigned int sc_nvm_wordsize; /* NVM word size */
401 int sc_ich8_flash_base;
402 int sc_ich8_flash_bank_size;
403 int sc_nvm_k1_enabled;
404
405 int sc_ntxqueues;
406 struct wm_txqueue *sc_txq;
407
408 int sc_nrxqueues;
409 struct wm_rxqueue *sc_rxq;
410
411 #ifdef WM_EVENT_COUNTERS
412 /* Event counters. */
413 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
414 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
415 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
416 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
417 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
418 struct evcnt sc_ev_rxintr; /* Rx interrupts */
419 struct evcnt sc_ev_linkintr; /* Link interrupts */
420
421 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
422 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
423 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
424 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
425 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
426 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
427 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
428 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
429
430 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
431 struct evcnt sc_ev_txdrop; /* Tx packets dropped(too many segs) */
432
433 struct evcnt sc_ev_tu; /* Tx underrun */
434
435 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
436 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
437 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
438 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
439 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
440 #endif /* WM_EVENT_COUNTERS */
441
442 /* This variable are used only on the 82547. */
443 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
444
445 uint32_t sc_ctrl; /* prototype CTRL register */
446 #if 0
447 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
448 #endif
449 uint32_t sc_icr; /* prototype interrupt bits */
450 uint32_t sc_itr; /* prototype intr throttling reg */
451 uint32_t sc_tctl; /* prototype TCTL register */
452 uint32_t sc_rctl; /* prototype RCTL register */
453 uint32_t sc_txcw; /* prototype TXCW register */
454 uint32_t sc_tipg; /* prototype TIPG register */
455 uint32_t sc_fcrtl; /* prototype FCRTL register */
456 uint32_t sc_pba; /* prototype PBA register */
457
458 int sc_tbi_linkup; /* TBI link status */
459 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
460 int sc_tbi_serdes_ticks; /* tbi ticks */
461
462 int sc_mchash_type; /* multicast filter offset */
463
464 krndsource_t rnd_source; /* random source */
465
466 kmutex_t *sc_core_lock; /* lock for softc operations */
467
468 struct if_percpuq *sc_ipq; /* softint-based input queues */
469 };
470
471 #define WM_TX_LOCK(_txq) if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
472 #define WM_TX_TRYLOCK(_txq) ((_txq)->txq_lock == NULL || mutex_tryenter((_txq)->txq_lock))
473 #define WM_TX_UNLOCK(_txq) if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
474 #define WM_TX_LOCKED(_txq) (!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
475 #define WM_RX_LOCK(_rxq) if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
476 #define WM_RX_UNLOCK(_rxq) if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
477 #define WM_RX_LOCKED(_rxq) (!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
478 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
479 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
480 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
481
482 #ifdef WM_MPSAFE
483 #define CALLOUT_FLAGS CALLOUT_MPSAFE
484 #else
485 #define CALLOUT_FLAGS 0
486 #endif
487
488 #define WM_RXCHAIN_RESET(rxq) \
489 do { \
490 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
491 *(rxq)->rxq_tailp = NULL; \
492 (rxq)->rxq_len = 0; \
493 } while (/*CONSTCOND*/0)
494
495 #define WM_RXCHAIN_LINK(rxq, m) \
496 do { \
497 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
498 (rxq)->rxq_tailp = &(m)->m_next; \
499 } while (/*CONSTCOND*/0)
500
501 #ifdef WM_EVENT_COUNTERS
502 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
503 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
504 #else
505 #define WM_EVCNT_INCR(ev) /* nothing */
506 #define WM_EVCNT_ADD(ev, val) /* nothing */
507 #endif
508
509 #define CSR_READ(sc, reg) \
510 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
511 #define CSR_WRITE(sc, reg, val) \
512 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
513 #define CSR_WRITE_FLUSH(sc) \
514 (void) CSR_READ((sc), WMREG_STATUS)
515
516 #define ICH8_FLASH_READ32(sc, reg) \
517 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
518 (reg) + sc->sc_flashreg_offset)
519 #define ICH8_FLASH_WRITE32(sc, reg, data) \
520 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
521 (reg) + sc->sc_flashreg_offset, (data))
522
523 #define ICH8_FLASH_READ16(sc, reg) \
524 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
525 (reg) + sc->sc_flashreg_offset)
526 #define ICH8_FLASH_WRITE16(sc, reg, data) \
527 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
528 (reg) + sc->sc_flashreg_offset, (data))
529
530 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
531 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
532
533 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
534 #define WM_CDTXADDR_HI(txq, x) \
535 (sizeof(bus_addr_t) == 8 ? \
536 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
537
538 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
539 #define WM_CDRXADDR_HI(rxq, x) \
540 (sizeof(bus_addr_t) == 8 ? \
541 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
542
543 /*
544 * Register read/write functions.
545 * Other than CSR_{READ|WRITE}().
546 */
547 #if 0
548 static inline uint32_t wm_io_read(struct wm_softc *, int);
549 #endif
550 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
551 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
552 uint32_t, uint32_t);
553 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
554
555 /*
556 * Descriptor sync/init functions.
557 */
558 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
559 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
560 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
561
562 /*
563 * Device driver interface functions and commonly used functions.
564 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
565 */
566 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
567 static int wm_match(device_t, cfdata_t, void *);
568 static void wm_attach(device_t, device_t, void *);
569 static int wm_detach(device_t, int);
570 static bool wm_suspend(device_t, const pmf_qual_t *);
571 static bool wm_resume(device_t, const pmf_qual_t *);
572 static void wm_watchdog(struct ifnet *);
573 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
574 static void wm_tick(void *);
575 static int wm_ifflags_cb(struct ethercom *);
576 static int wm_ioctl(struct ifnet *, u_long, void *);
577 /* MAC address related */
578 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
579 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
580 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
581 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
582 static void wm_set_filter(struct wm_softc *);
583 /* Reset and init related */
584 static void wm_set_vlan(struct wm_softc *);
585 static void wm_set_pcie_completion_timeout(struct wm_softc *);
586 static void wm_get_auto_rd_done(struct wm_softc *);
587 static void wm_lan_init_done(struct wm_softc *);
588 static void wm_get_cfg_done(struct wm_softc *);
589 static void wm_initialize_hardware_bits(struct wm_softc *);
590 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
591 static void wm_reset(struct wm_softc *);
592 static int wm_add_rxbuf(struct wm_rxqueue *, int);
593 static void wm_rxdrain(struct wm_rxqueue *);
594 static void wm_rss_getkey(uint8_t *);
595 static void wm_init_rss(struct wm_softc *);
596 static void wm_adjust_qnum(struct wm_softc *, int);
597 static int wm_setup_legacy(struct wm_softc *);
598 static int wm_setup_msix(struct wm_softc *);
599 static int wm_init(struct ifnet *);
600 static int wm_init_locked(struct ifnet *);
601 static void wm_stop(struct ifnet *, int);
602 static void wm_stop_locked(struct ifnet *, int);
603 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
604 static void wm_82547_txfifo_stall(void *);
605 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
606 /* DMA related */
607 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
608 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
609 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
610 static void wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
611 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
612 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
613 static void wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
614 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
615 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
616 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
617 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
618 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
619 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
620 static void wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
621 static int wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
622 static int wm_alloc_txrx_queues(struct wm_softc *);
623 static void wm_free_txrx_queues(struct wm_softc *);
624 static int wm_init_txrx_queues(struct wm_softc *);
625 /* Start */
626 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
627 uint32_t *, uint8_t *);
628 static void wm_start(struct ifnet *);
629 static void wm_start_locked(struct ifnet *);
630 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
631 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
632 static void wm_nq_start(struct ifnet *);
633 static void wm_nq_start_locked(struct ifnet *);
634 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
635 static inline int wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
636 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
637 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
638 /* Interrupt */
639 static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
640 static void wm_rxeof(struct wm_rxqueue *);
641 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
642 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
643 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
644 static void wm_linkintr(struct wm_softc *, uint32_t);
645 static int wm_intr_legacy(void *);
646 static int wm_txintr_msix(void *);
647 static int wm_rxintr_msix(void *);
648 static int wm_linkintr_msix(void *);
649
650 /*
651 * Media related.
652 * GMII, SGMII, TBI, SERDES and SFP.
653 */
654 /* Common */
655 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
656 /* GMII related */
657 static void wm_gmii_reset(struct wm_softc *);
658 static int wm_get_phy_id_82575(struct wm_softc *);
659 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
660 static int wm_gmii_mediachange(struct ifnet *);
661 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
662 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
663 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
664 static int wm_gmii_i82543_readreg(device_t, int, int);
665 static void wm_gmii_i82543_writereg(device_t, int, int, int);
666 static int wm_gmii_i82544_readreg(device_t, int, int);
667 static void wm_gmii_i82544_writereg(device_t, int, int, int);
668 static int wm_gmii_i80003_readreg(device_t, int, int);
669 static void wm_gmii_i80003_writereg(device_t, int, int, int);
670 static int wm_gmii_bm_readreg(device_t, int, int);
671 static void wm_gmii_bm_writereg(device_t, int, int, int);
672 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
673 static int wm_gmii_hv_readreg(device_t, int, int);
674 static void wm_gmii_hv_writereg(device_t, int, int, int);
675 static int wm_gmii_82580_readreg(device_t, int, int);
676 static void wm_gmii_82580_writereg(device_t, int, int, int);
677 static int wm_gmii_gs40g_readreg(device_t, int, int);
678 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
679 static void wm_gmii_statchg(struct ifnet *);
680 static int wm_kmrn_readreg(struct wm_softc *, int);
681 static void wm_kmrn_writereg(struct wm_softc *, int, int);
682 /* SGMII */
683 static bool wm_sgmii_uses_mdio(struct wm_softc *);
684 static int wm_sgmii_readreg(device_t, int, int);
685 static void wm_sgmii_writereg(device_t, int, int, int);
686 /* TBI related */
687 static void wm_tbi_mediainit(struct wm_softc *);
688 static int wm_tbi_mediachange(struct ifnet *);
689 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
690 static int wm_check_for_link(struct wm_softc *);
691 static void wm_tbi_tick(struct wm_softc *);
692 /* SERDES related */
693 static void wm_serdes_power_up_link_82575(struct wm_softc *);
694 static int wm_serdes_mediachange(struct ifnet *);
695 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
696 static void wm_serdes_tick(struct wm_softc *);
697 /* SFP related */
698 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
699 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
700
701 /*
702 * NVM related.
703 * Microwire, SPI (w/wo EERD) and Flash.
704 */
705 /* Misc functions */
706 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
707 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
708 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
709 /* Microwire */
710 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
711 /* SPI */
712 static int wm_nvm_ready_spi(struct wm_softc *);
713 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
714 /* Using with EERD */
715 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
716 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
717 /* Flash */
718 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
719 unsigned int *);
720 static int32_t wm_ich8_cycle_init(struct wm_softc *);
721 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
722 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
723 uint32_t *);
724 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
725 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
726 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
727 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
728 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
729 /* iNVM */
730 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
731 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
732 /* Lock, detecting NVM type, validate checksum and read */
733 static int wm_nvm_acquire(struct wm_softc *);
734 static void wm_nvm_release(struct wm_softc *);
735 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
736 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
737 static int wm_nvm_validate_checksum(struct wm_softc *);
738 static void wm_nvm_version_invm(struct wm_softc *);
739 static void wm_nvm_version(struct wm_softc *);
740 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
741
742 /*
743 * Hardware semaphores.
744 * Very complexed...
745 */
746 static int wm_get_swsm_semaphore(struct wm_softc *);
747 static void wm_put_swsm_semaphore(struct wm_softc *);
748 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
749 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
750 static int wm_get_swfwhw_semaphore(struct wm_softc *);
751 static void wm_put_swfwhw_semaphore(struct wm_softc *);
752 static int wm_get_hw_semaphore_82573(struct wm_softc *);
753 static void wm_put_hw_semaphore_82573(struct wm_softc *);
754
755 /*
756 * Management mode and power management related subroutines.
757 * BMC, AMT, suspend/resume and EEE.
758 */
759 #ifdef WM_WOL
760 static int wm_check_mng_mode(struct wm_softc *);
761 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
762 static int wm_check_mng_mode_82574(struct wm_softc *);
763 static int wm_check_mng_mode_generic(struct wm_softc *);
764 #endif
765 static int wm_enable_mng_pass_thru(struct wm_softc *);
766 static bool wm_phy_resetisblocked(struct wm_softc *);
767 static void wm_get_hw_control(struct wm_softc *);
768 static void wm_release_hw_control(struct wm_softc *);
769 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
770 static void wm_smbustopci(struct wm_softc *);
771 static void wm_init_manageability(struct wm_softc *);
772 static void wm_release_manageability(struct wm_softc *);
773 static void wm_get_wakeup(struct wm_softc *);
774 #ifdef WM_WOL
775 static void wm_enable_phy_wakeup(struct wm_softc *);
776 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
777 static void wm_enable_wakeup(struct wm_softc *);
778 #endif
779 /* LPLU (Low Power Link Up) */
780 static void wm_lplu_d0_disable(struct wm_softc *);
781 static void wm_lplu_d0_disable_pch(struct wm_softc *);
782 /* EEE */
783 static void wm_set_eee_i350(struct wm_softc *);
784
785 /*
786 * Workarounds (mainly PHY related).
787 * Basically, PHY's workarounds are in the PHY drivers.
788 */
789 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
790 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
791 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
792 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
793 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
794 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
795 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
796 static void wm_reset_init_script_82575(struct wm_softc *);
797 static void wm_reset_mdicnfg_82580(struct wm_softc *);
798 static void wm_pll_workaround_i210(struct wm_softc *);
799
800 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
801 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
802
803 /*
804 * Devices supported by this driver.
805 */
806 static const struct wm_product {
807 pci_vendor_id_t wmp_vendor;
808 pci_product_id_t wmp_product;
809 const char *wmp_name;
810 wm_chip_type wmp_type;
811 uint32_t wmp_flags;
812 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
813 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
814 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
815 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
816 #define WMP_MEDIATYPE(x) ((x) & 0x03)
817 } wm_products[] = {
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
819 "Intel i82542 1000BASE-X Ethernet",
820 WM_T_82542_2_1, WMP_F_FIBER },
821
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
823 "Intel i82543GC 1000BASE-X Ethernet",
824 WM_T_82543, WMP_F_FIBER },
825
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
827 "Intel i82543GC 1000BASE-T Ethernet",
828 WM_T_82543, WMP_F_COPPER },
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
831 "Intel i82544EI 1000BASE-T Ethernet",
832 WM_T_82544, WMP_F_COPPER },
833
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
835 "Intel i82544EI 1000BASE-X Ethernet",
836 WM_T_82544, WMP_F_FIBER },
837
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
839 "Intel i82544GC 1000BASE-T Ethernet",
840 WM_T_82544, WMP_F_COPPER },
841
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
843 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
844 WM_T_82544, WMP_F_COPPER },
845
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
847 "Intel i82540EM 1000BASE-T Ethernet",
848 WM_T_82540, WMP_F_COPPER },
849
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
851 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
852 WM_T_82540, WMP_F_COPPER },
853
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
855 "Intel i82540EP 1000BASE-T Ethernet",
856 WM_T_82540, WMP_F_COPPER },
857
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
859 "Intel i82540EP 1000BASE-T Ethernet",
860 WM_T_82540, WMP_F_COPPER },
861
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
863 "Intel i82540EP 1000BASE-T Ethernet",
864 WM_T_82540, WMP_F_COPPER },
865
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
867 "Intel i82545EM 1000BASE-T Ethernet",
868 WM_T_82545, WMP_F_COPPER },
869
870 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
871 "Intel i82545GM 1000BASE-T Ethernet",
872 WM_T_82545_3, WMP_F_COPPER },
873
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
875 "Intel i82545GM 1000BASE-X Ethernet",
876 WM_T_82545_3, WMP_F_FIBER },
877
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
879 "Intel i82545GM Gigabit Ethernet (SERDES)",
880 WM_T_82545_3, WMP_F_SERDES },
881
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
883 "Intel i82546EB 1000BASE-T Ethernet",
884 WM_T_82546, WMP_F_COPPER },
885
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
887 "Intel i82546EB 1000BASE-T Ethernet",
888 WM_T_82546, WMP_F_COPPER },
889
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
891 "Intel i82545EM 1000BASE-X Ethernet",
892 WM_T_82545, WMP_F_FIBER },
893
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
895 "Intel i82546EB 1000BASE-X Ethernet",
896 WM_T_82546, WMP_F_FIBER },
897
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
899 "Intel i82546GB 1000BASE-T Ethernet",
900 WM_T_82546_3, WMP_F_COPPER },
901
902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
903 "Intel i82546GB 1000BASE-X Ethernet",
904 WM_T_82546_3, WMP_F_FIBER },
905
906 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
907 "Intel i82546GB Gigabit Ethernet (SERDES)",
908 WM_T_82546_3, WMP_F_SERDES },
909
910 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
911 "i82546GB quad-port Gigabit Ethernet",
912 WM_T_82546_3, WMP_F_COPPER },
913
914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
915 "i82546GB quad-port Gigabit Ethernet (KSP3)",
916 WM_T_82546_3, WMP_F_COPPER },
917
918 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
919 "Intel PRO/1000MT (82546GB)",
920 WM_T_82546_3, WMP_F_COPPER },
921
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
923 "Intel i82541EI 1000BASE-T Ethernet",
924 WM_T_82541, WMP_F_COPPER },
925
926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
927 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
928 WM_T_82541, WMP_F_COPPER },
929
930 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
931 "Intel i82541EI Mobile 1000BASE-T Ethernet",
932 WM_T_82541, WMP_F_COPPER },
933
934 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
935 "Intel i82541ER 1000BASE-T Ethernet",
936 WM_T_82541_2, WMP_F_COPPER },
937
938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
939 "Intel i82541GI 1000BASE-T Ethernet",
940 WM_T_82541_2, WMP_F_COPPER },
941
942 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
943 "Intel i82541GI Mobile 1000BASE-T Ethernet",
944 WM_T_82541_2, WMP_F_COPPER },
945
946 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
947 "Intel i82541PI 1000BASE-T Ethernet",
948 WM_T_82541_2, WMP_F_COPPER },
949
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
951 "Intel i82547EI 1000BASE-T Ethernet",
952 WM_T_82547, WMP_F_COPPER },
953
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
955 "Intel i82547EI Mobile 1000BASE-T Ethernet",
956 WM_T_82547, WMP_F_COPPER },
957
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
959 "Intel i82547GI 1000BASE-T Ethernet",
960 WM_T_82547_2, WMP_F_COPPER },
961
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
963 "Intel PRO/1000 PT (82571EB)",
964 WM_T_82571, WMP_F_COPPER },
965
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
967 "Intel PRO/1000 PF (82571EB)",
968 WM_T_82571, WMP_F_FIBER },
969
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
971 "Intel PRO/1000 PB (82571EB)",
972 WM_T_82571, WMP_F_SERDES },
973
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
975 "Intel PRO/1000 QT (82571EB)",
976 WM_T_82571, WMP_F_COPPER },
977
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
979 "Intel PRO/1000 PT Quad Port Server Adapter",
980 WM_T_82571, WMP_F_COPPER, },
981
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
983 "Intel Gigabit PT Quad Port Server ExpressModule",
984 WM_T_82571, WMP_F_COPPER, },
985
986 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
987 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
988 WM_T_82571, WMP_F_SERDES, },
989
990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
991 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
992 WM_T_82571, WMP_F_SERDES, },
993
994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
995 "Intel 82571EB Quad 1000baseX Ethernet",
996 WM_T_82571, WMP_F_FIBER, },
997
998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
999 "Intel i82572EI 1000baseT Ethernet",
1000 WM_T_82572, WMP_F_COPPER },
1001
1002 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1003 "Intel i82572EI 1000baseX Ethernet",
1004 WM_T_82572, WMP_F_FIBER },
1005
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1007 "Intel i82572EI Gigabit Ethernet (SERDES)",
1008 WM_T_82572, WMP_F_SERDES },
1009
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1011 "Intel i82572EI 1000baseT Ethernet",
1012 WM_T_82572, WMP_F_COPPER },
1013
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1015 "Intel i82573E",
1016 WM_T_82573, WMP_F_COPPER },
1017
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1019 "Intel i82573E IAMT",
1020 WM_T_82573, WMP_F_COPPER },
1021
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1023 "Intel i82573L Gigabit Ethernet",
1024 WM_T_82573, WMP_F_COPPER },
1025
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1027 "Intel i82574L",
1028 WM_T_82574, WMP_F_COPPER },
1029
1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1031 "Intel i82574L",
1032 WM_T_82574, WMP_F_COPPER },
1033
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1035 "Intel i82583V",
1036 WM_T_82583, WMP_F_COPPER },
1037
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1039 "i80003 dual 1000baseT Ethernet",
1040 WM_T_80003, WMP_F_COPPER },
1041
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1043 "i80003 dual 1000baseX Ethernet",
1044 WM_T_80003, WMP_F_COPPER },
1045
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1047 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1048 WM_T_80003, WMP_F_SERDES },
1049
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1051 "Intel i80003 1000baseT Ethernet",
1052 WM_T_80003, WMP_F_COPPER },
1053
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1055 "Intel i80003 Gigabit Ethernet (SERDES)",
1056 WM_T_80003, WMP_F_SERDES },
1057
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1059 "Intel i82801H (M_AMT) LAN Controller",
1060 WM_T_ICH8, WMP_F_COPPER },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1062 "Intel i82801H (AMT) LAN Controller",
1063 WM_T_ICH8, WMP_F_COPPER },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1065 "Intel i82801H LAN Controller",
1066 WM_T_ICH8, WMP_F_COPPER },
1067 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1068 "Intel i82801H (IFE) LAN Controller",
1069 WM_T_ICH8, WMP_F_COPPER },
1070 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1071 "Intel i82801H (M) LAN Controller",
1072 WM_T_ICH8, WMP_F_COPPER },
1073 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1074 "Intel i82801H IFE (GT) LAN Controller",
1075 WM_T_ICH8, WMP_F_COPPER },
1076 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1077 "Intel i82801H IFE (G) LAN Controller",
1078 WM_T_ICH8, WMP_F_COPPER },
1079 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1080 "82801I (AMT) LAN Controller",
1081 WM_T_ICH9, WMP_F_COPPER },
1082 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1083 "82801I LAN Controller",
1084 WM_T_ICH9, WMP_F_COPPER },
1085 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1086 "82801I (G) LAN Controller",
1087 WM_T_ICH9, WMP_F_COPPER },
1088 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1089 "82801I (GT) LAN Controller",
1090 WM_T_ICH9, WMP_F_COPPER },
1091 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1092 "82801I (C) LAN Controller",
1093 WM_T_ICH9, WMP_F_COPPER },
1094 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1095 "82801I mobile LAN Controller",
1096 WM_T_ICH9, WMP_F_COPPER },
1097 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1098 "82801I mobile (V) LAN Controller",
1099 WM_T_ICH9, WMP_F_COPPER },
1100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1101 "82801I mobile (AMT) LAN Controller",
1102 WM_T_ICH9, WMP_F_COPPER },
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1104 "82567LM-4 LAN Controller",
1105 WM_T_ICH9, WMP_F_COPPER },
1106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1107 "82567V-3 LAN Controller",
1108 WM_T_ICH9, WMP_F_COPPER },
1109 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1110 "82567LM-2 LAN Controller",
1111 WM_T_ICH10, WMP_F_COPPER },
1112 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1113 "82567LF-2 LAN Controller",
1114 WM_T_ICH10, WMP_F_COPPER },
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1116 "82567LM-3 LAN Controller",
1117 WM_T_ICH10, WMP_F_COPPER },
1118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1119 "82567LF-3 LAN Controller",
1120 WM_T_ICH10, WMP_F_COPPER },
1121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1122 "82567V-2 LAN Controller",
1123 WM_T_ICH10, WMP_F_COPPER },
1124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1125 "82567V-3? LAN Controller",
1126 WM_T_ICH10, WMP_F_COPPER },
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1128 "HANKSVILLE LAN Controller",
1129 WM_T_ICH10, WMP_F_COPPER },
1130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1131 "PCH LAN (82577LM) Controller",
1132 WM_T_PCH, WMP_F_COPPER },
1133 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1134 "PCH LAN (82577LC) Controller",
1135 WM_T_PCH, WMP_F_COPPER },
1136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1137 "PCH LAN (82578DM) Controller",
1138 WM_T_PCH, WMP_F_COPPER },
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1140 "PCH LAN (82578DC) Controller",
1141 WM_T_PCH, WMP_F_COPPER },
1142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1143 "PCH2 LAN (82579LM) Controller",
1144 WM_T_PCH2, WMP_F_COPPER },
1145 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1146 "PCH2 LAN (82579V) Controller",
1147 WM_T_PCH2, WMP_F_COPPER },
1148 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1149 "82575EB dual-1000baseT Ethernet",
1150 WM_T_82575, WMP_F_COPPER },
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1152 "82575EB dual-1000baseX Ethernet (SERDES)",
1153 WM_T_82575, WMP_F_SERDES },
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1155 "82575GB quad-1000baseT Ethernet",
1156 WM_T_82575, WMP_F_COPPER },
1157 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1158 "82575GB quad-1000baseT Ethernet (PM)",
1159 WM_T_82575, WMP_F_COPPER },
1160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1161 "82576 1000BaseT Ethernet",
1162 WM_T_82576, WMP_F_COPPER },
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1164 "82576 1000BaseX Ethernet",
1165 WM_T_82576, WMP_F_FIBER },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1168 "82576 gigabit Ethernet (SERDES)",
1169 WM_T_82576, WMP_F_SERDES },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1172 "82576 quad-1000BaseT Ethernet",
1173 WM_T_82576, WMP_F_COPPER },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1176 "82576 Gigabit ET2 Quad Port Server Adapter",
1177 WM_T_82576, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1180 "82576 gigabit Ethernet",
1181 WM_T_82576, WMP_F_COPPER },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1184 "82576 gigabit Ethernet (SERDES)",
1185 WM_T_82576, WMP_F_SERDES },
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1187 "82576 quad-gigabit Ethernet (SERDES)",
1188 WM_T_82576, WMP_F_SERDES },
1189
1190 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1191 "82580 1000BaseT Ethernet",
1192 WM_T_82580, WMP_F_COPPER },
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1194 "82580 1000BaseX Ethernet",
1195 WM_T_82580, WMP_F_FIBER },
1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1198 "82580 1000BaseT Ethernet (SERDES)",
1199 WM_T_82580, WMP_F_SERDES },
1200
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1202 "82580 gigabit Ethernet (SGMII)",
1203 WM_T_82580, WMP_F_COPPER },
1204 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1205 "82580 dual-1000BaseT Ethernet",
1206 WM_T_82580, WMP_F_COPPER },
1207
1208 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1209 "82580 quad-1000BaseX Ethernet",
1210 WM_T_82580, WMP_F_FIBER },
1211
1212 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1213 "DH89XXCC Gigabit Ethernet (SGMII)",
1214 WM_T_82580, WMP_F_COPPER },
1215
1216 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1217 "DH89XXCC Gigabit Ethernet (SERDES)",
1218 WM_T_82580, WMP_F_SERDES },
1219
1220 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1221 "DH89XXCC 1000BASE-KX Ethernet",
1222 WM_T_82580, WMP_F_SERDES },
1223
1224 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1225 "DH89XXCC Gigabit Ethernet (SFP)",
1226 WM_T_82580, WMP_F_SERDES },
1227
1228 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1229 "I350 Gigabit Network Connection",
1230 WM_T_I350, WMP_F_COPPER },
1231
1232 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1233 "I350 Gigabit Fiber Network Connection",
1234 WM_T_I350, WMP_F_FIBER },
1235
1236 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1237 "I350 Gigabit Backplane Connection",
1238 WM_T_I350, WMP_F_SERDES },
1239
1240 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1241 "I350 Quad Port Gigabit Ethernet",
1242 WM_T_I350, WMP_F_SERDES },
1243
1244 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1245 "I350 Gigabit Connection",
1246 WM_T_I350, WMP_F_COPPER },
1247
1248 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1249 "I354 Gigabit Ethernet (KX)",
1250 WM_T_I354, WMP_F_SERDES },
1251
1252 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1253 "I354 Gigabit Ethernet (SGMII)",
1254 WM_T_I354, WMP_F_COPPER },
1255
1256 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1257 "I354 Gigabit Ethernet (2.5G)",
1258 WM_T_I354, WMP_F_COPPER },
1259
1260 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1261 "I210-T1 Ethernet Server Adapter",
1262 WM_T_I210, WMP_F_COPPER },
1263
1264 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1265 "I210 Ethernet (Copper OEM)",
1266 WM_T_I210, WMP_F_COPPER },
1267
1268 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1269 "I210 Ethernet (Copper IT)",
1270 WM_T_I210, WMP_F_COPPER },
1271
1272 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1273 "I210 Ethernet (FLASH less)",
1274 WM_T_I210, WMP_F_COPPER },
1275
1276 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1277 "I210 Gigabit Ethernet (Fiber)",
1278 WM_T_I210, WMP_F_FIBER },
1279
1280 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1281 "I210 Gigabit Ethernet (SERDES)",
1282 WM_T_I210, WMP_F_SERDES },
1283
1284 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1285 "I210 Gigabit Ethernet (FLASH less)",
1286 WM_T_I210, WMP_F_SERDES },
1287
1288 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1289 "I210 Gigabit Ethernet (SGMII)",
1290 WM_T_I210, WMP_F_COPPER },
1291
1292 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1293 "I211 Ethernet (COPPER)",
1294 WM_T_I211, WMP_F_COPPER },
1295 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1296 "I217 V Ethernet Connection",
1297 WM_T_PCH_LPT, WMP_F_COPPER },
1298 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1299 "I217 LM Ethernet Connection",
1300 WM_T_PCH_LPT, WMP_F_COPPER },
1301 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1302 "I218 V Ethernet Connection",
1303 WM_T_PCH_LPT, WMP_F_COPPER },
1304 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1305 "I218 V Ethernet Connection",
1306 WM_T_PCH_LPT, WMP_F_COPPER },
1307 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1308 "I218 V Ethernet Connection",
1309 WM_T_PCH_LPT, WMP_F_COPPER },
1310 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1311 "I218 LM Ethernet Connection",
1312 WM_T_PCH_LPT, WMP_F_COPPER },
1313 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1314 "I218 LM Ethernet Connection",
1315 WM_T_PCH_LPT, WMP_F_COPPER },
1316 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1317 "I218 LM Ethernet Connection",
1318 WM_T_PCH_LPT, WMP_F_COPPER },
1319 #if 0
1320 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1321 "I219 V Ethernet Connection",
1322 WM_T_PCH_SPT, WMP_F_COPPER },
1323 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1324 "I219 V Ethernet Connection",
1325 WM_T_PCH_SPT, WMP_F_COPPER },
1326 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1327 "I219 LM Ethernet Connection",
1328 WM_T_PCH_SPT, WMP_F_COPPER },
1329 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1330 "I219 LM Ethernet Connection",
1331 WM_T_PCH_SPT, WMP_F_COPPER },
1332 #endif
1333 { 0, 0,
1334 NULL,
1335 0, 0 },
1336 };
1337
1338 #ifdef WM_EVENT_COUNTERS
1339 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1340 #endif /* WM_EVENT_COUNTERS */
1341
1342
1343 /*
1344 * Register read/write functions.
1345 * Other than CSR_{READ|WRITE}().
1346 */
1347
1348 #if 0 /* Not currently used */
1349 static inline uint32_t
1350 wm_io_read(struct wm_softc *sc, int reg)
1351 {
1352
1353 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1354 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1355 }
1356 #endif
1357
1358 static inline void
1359 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1360 {
1361
1362 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1363 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1364 }
1365
1366 static inline void
1367 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1368 uint32_t data)
1369 {
1370 uint32_t regval;
1371 int i;
1372
1373 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1374
1375 CSR_WRITE(sc, reg, regval);
1376
1377 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1378 delay(5);
1379 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1380 break;
1381 }
1382 if (i == SCTL_CTL_POLL_TIMEOUT) {
1383 aprint_error("%s: WARNING:"
1384 " i82575 reg 0x%08x setup did not indicate ready\n",
1385 device_xname(sc->sc_dev), reg);
1386 }
1387 }
1388
1389 static inline void
1390 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1391 {
1392 wa->wa_low = htole32(v & 0xffffffffU);
1393 if (sizeof(bus_addr_t) == 8)
1394 wa->wa_high = htole32((uint64_t) v >> 32);
1395 else
1396 wa->wa_high = 0;
1397 }
1398
1399 /*
1400 * Descriptor sync/init functions.
1401 */
1402 static inline void
1403 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1404 {
1405 struct wm_softc *sc = txq->txq_sc;
1406
1407 /* If it will wrap around, sync to the end of the ring. */
1408 if ((start + num) > WM_NTXDESC(txq)) {
1409 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1410 WM_CDTXOFF(txq, start), txq->txq_descsize *
1411 (WM_NTXDESC(txq) - start), ops);
1412 num -= (WM_NTXDESC(txq) - start);
1413 start = 0;
1414 }
1415
1416 /* Now sync whatever is left. */
1417 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1418 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1419 }
1420
1421 static inline void
1422 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1423 {
1424 struct wm_softc *sc = rxq->rxq_sc;
1425
1426 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1427 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1428 }
1429
1430 static inline void
1431 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1432 {
1433 struct wm_softc *sc = rxq->rxq_sc;
1434 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1435 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1436 struct mbuf *m = rxs->rxs_mbuf;
1437
1438 /*
1439 * Note: We scoot the packet forward 2 bytes in the buffer
1440 * so that the payload after the Ethernet header is aligned
1441 * to a 4-byte boundary.
1442
1443 * XXX BRAINDAMAGE ALERT!
1444 * The stupid chip uses the same size for every buffer, which
1445 * is set in the Receive Control register. We are using the 2K
1446 * size option, but what we REALLY want is (2K - 2)! For this
1447 * reason, we can't "scoot" packets longer than the standard
1448 * Ethernet MTU. On strict-alignment platforms, if the total
1449 * size exceeds (2K - 2) we set align_tweak to 0 and let
1450 * the upper layer copy the headers.
1451 */
1452 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1453
1454 wm_set_dma_addr(&rxd->wrx_addr,
1455 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1456 rxd->wrx_len = 0;
1457 rxd->wrx_cksum = 0;
1458 rxd->wrx_status = 0;
1459 rxd->wrx_errors = 0;
1460 rxd->wrx_special = 0;
1461 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1462
1463 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1464 }
1465
1466 /*
1467 * Device driver interface functions and commonly used functions.
1468 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1469 */
1470
1471 /* Lookup supported device table */
1472 static const struct wm_product *
1473 wm_lookup(const struct pci_attach_args *pa)
1474 {
1475 const struct wm_product *wmp;
1476
1477 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1478 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1479 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1480 return wmp;
1481 }
1482 return NULL;
1483 }
1484
1485 /* The match function (ca_match) */
1486 static int
1487 wm_match(device_t parent, cfdata_t cf, void *aux)
1488 {
1489 struct pci_attach_args *pa = aux;
1490
1491 if (wm_lookup(pa) != NULL)
1492 return 1;
1493
1494 return 0;
1495 }
1496
1497 /* The attach function (ca_attach) */
1498 static void
1499 wm_attach(device_t parent, device_t self, void *aux)
1500 {
1501 struct wm_softc *sc = device_private(self);
1502 struct pci_attach_args *pa = aux;
1503 prop_dictionary_t dict;
1504 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1505 pci_chipset_tag_t pc = pa->pa_pc;
1506 int counts[PCI_INTR_TYPE_SIZE];
1507 pci_intr_type_t max_type;
1508 const char *eetype, *xname;
1509 bus_space_tag_t memt;
1510 bus_space_handle_t memh;
1511 bus_size_t memsize;
1512 int memh_valid;
1513 int i, error;
1514 const struct wm_product *wmp;
1515 prop_data_t ea;
1516 prop_number_t pn;
1517 uint8_t enaddr[ETHER_ADDR_LEN];
1518 uint16_t cfg1, cfg2, swdpin, nvmword;
1519 pcireg_t preg, memtype;
1520 uint16_t eeprom_data, apme_mask;
1521 bool force_clear_smbi;
1522 uint32_t link_mode;
1523 uint32_t reg;
1524
1525 sc->sc_dev = self;
1526 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1527 sc->sc_stopping = false;
1528
1529 wmp = wm_lookup(pa);
1530 #ifdef DIAGNOSTIC
1531 if (wmp == NULL) {
1532 printf("\n");
1533 panic("wm_attach: impossible");
1534 }
1535 #endif
1536 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1537
1538 sc->sc_pc = pa->pa_pc;
1539 sc->sc_pcitag = pa->pa_tag;
1540
1541 if (pci_dma64_available(pa))
1542 sc->sc_dmat = pa->pa_dmat64;
1543 else
1544 sc->sc_dmat = pa->pa_dmat;
1545
1546 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1547 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1548 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1549
1550 sc->sc_type = wmp->wmp_type;
1551 if (sc->sc_type < WM_T_82543) {
1552 if (sc->sc_rev < 2) {
1553 aprint_error_dev(sc->sc_dev,
1554 "i82542 must be at least rev. 2\n");
1555 return;
1556 }
1557 if (sc->sc_rev < 3)
1558 sc->sc_type = WM_T_82542_2_0;
1559 }
1560
1561 /*
1562 * Disable MSI for Errata:
1563 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1564 *
1565 * 82544: Errata 25
1566 * 82540: Errata 6 (easy to reproduce device timeout)
1567 * 82545: Errata 4 (easy to reproduce device timeout)
1568 * 82546: Errata 26 (easy to reproduce device timeout)
1569 * 82541: Errata 7 (easy to reproduce device timeout)
1570 *
1571 * "Byte Enables 2 and 3 are not set on MSI writes"
1572 *
1573 * 82571 & 82572: Errata 63
1574 */
1575 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1576 || (sc->sc_type == WM_T_82572))
1577 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1578
1579 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1580 || (sc->sc_type == WM_T_82580)
1581 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1582 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1583 sc->sc_flags |= WM_F_NEWQUEUE;
1584
1585 /* Set device properties (mactype) */
1586 dict = device_properties(sc->sc_dev);
1587 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1588
1589 /*
1590 * Map the device. All devices support memory-mapped acccess,
1591 * and it is really required for normal operation.
1592 */
1593 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1594 switch (memtype) {
1595 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1596 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1597 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1598 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1599 break;
1600 default:
1601 memh_valid = 0;
1602 break;
1603 }
1604
1605 if (memh_valid) {
1606 sc->sc_st = memt;
1607 sc->sc_sh = memh;
1608 sc->sc_ss = memsize;
1609 } else {
1610 aprint_error_dev(sc->sc_dev,
1611 "unable to map device registers\n");
1612 return;
1613 }
1614
1615 /*
1616 * In addition, i82544 and later support I/O mapped indirect
1617 * register access. It is not desirable (nor supported in
1618 * this driver) to use it for normal operation, though it is
1619 * required to work around bugs in some chip versions.
1620 */
1621 if (sc->sc_type >= WM_T_82544) {
1622 /* First we have to find the I/O BAR. */
1623 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1624 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1625 if (memtype == PCI_MAPREG_TYPE_IO)
1626 break;
1627 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1628 PCI_MAPREG_MEM_TYPE_64BIT)
1629 i += 4; /* skip high bits, too */
1630 }
1631 if (i < PCI_MAPREG_END) {
1632 /*
1633 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1634 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1635 * It's no problem because newer chips has no this
1636 * bug.
1637 *
1638 * The i8254x doesn't apparently respond when the
1639 * I/O BAR is 0, which looks somewhat like it's not
1640 * been configured.
1641 */
1642 preg = pci_conf_read(pc, pa->pa_tag, i);
1643 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1644 aprint_error_dev(sc->sc_dev,
1645 "WARNING: I/O BAR at zero.\n");
1646 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1647 0, &sc->sc_iot, &sc->sc_ioh,
1648 NULL, &sc->sc_ios) == 0) {
1649 sc->sc_flags |= WM_F_IOH_VALID;
1650 } else {
1651 aprint_error_dev(sc->sc_dev,
1652 "WARNING: unable to map I/O space\n");
1653 }
1654 }
1655
1656 }
1657
1658 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1659 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1660 preg |= PCI_COMMAND_MASTER_ENABLE;
1661 if (sc->sc_type < WM_T_82542_2_1)
1662 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1663 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1664
1665 /* power up chip */
1666 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1667 NULL)) && error != EOPNOTSUPP) {
1668 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1669 return;
1670 }
1671
1672 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1673
1674 /* Allocation settings */
1675 max_type = PCI_INTR_TYPE_MSIX;
1676 counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
1677 counts[PCI_INTR_TYPE_MSI] = 1;
1678 counts[PCI_INTR_TYPE_INTX] = 1;
1679
1680 alloc_retry:
1681 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1682 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1683 return;
1684 }
1685
1686 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1687 error = wm_setup_msix(sc);
1688 if (error) {
1689 pci_intr_release(pc, sc->sc_intrs,
1690 counts[PCI_INTR_TYPE_MSIX]);
1691
1692 /* Setup for MSI: Disable MSI-X */
1693 max_type = PCI_INTR_TYPE_MSI;
1694 counts[PCI_INTR_TYPE_MSI] = 1;
1695 counts[PCI_INTR_TYPE_INTX] = 1;
1696 goto alloc_retry;
1697 }
1698 } else if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1699 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1700 error = wm_setup_legacy(sc);
1701 if (error) {
1702 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1703 counts[PCI_INTR_TYPE_MSI]);
1704
1705 /* The next try is for INTx: Disable MSI */
1706 max_type = PCI_INTR_TYPE_INTX;
1707 counts[PCI_INTR_TYPE_INTX] = 1;
1708 goto alloc_retry;
1709 }
1710 } else {
1711 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1712 error = wm_setup_legacy(sc);
1713 if (error) {
1714 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1715 counts[PCI_INTR_TYPE_INTX]);
1716 return;
1717 }
1718 }
1719
1720 /*
1721 * Check the function ID (unit number of the chip).
1722 */
1723 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1724 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1725 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1726 || (sc->sc_type == WM_T_82580)
1727 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1728 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1729 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1730 else
1731 sc->sc_funcid = 0;
1732
1733 /*
1734 * Determine a few things about the bus we're connected to.
1735 */
1736 if (sc->sc_type < WM_T_82543) {
1737 /* We don't really know the bus characteristics here. */
1738 sc->sc_bus_speed = 33;
1739 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1740 /*
1741 * CSA (Communication Streaming Architecture) is about as fast
1742 * a 32-bit 66MHz PCI Bus.
1743 */
1744 sc->sc_flags |= WM_F_CSA;
1745 sc->sc_bus_speed = 66;
1746 aprint_verbose_dev(sc->sc_dev,
1747 "Communication Streaming Architecture\n");
1748 if (sc->sc_type == WM_T_82547) {
1749 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1750 callout_setfunc(&sc->sc_txfifo_ch,
1751 wm_82547_txfifo_stall, sc);
1752 aprint_verbose_dev(sc->sc_dev,
1753 "using 82547 Tx FIFO stall work-around\n");
1754 }
1755 } else if (sc->sc_type >= WM_T_82571) {
1756 sc->sc_flags |= WM_F_PCIE;
1757 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1758 && (sc->sc_type != WM_T_ICH10)
1759 && (sc->sc_type != WM_T_PCH)
1760 && (sc->sc_type != WM_T_PCH2)
1761 && (sc->sc_type != WM_T_PCH_LPT)
1762 && (sc->sc_type != WM_T_PCH_SPT)) {
1763 /* ICH* and PCH* have no PCIe capability registers */
1764 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1765 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1766 NULL) == 0)
1767 aprint_error_dev(sc->sc_dev,
1768 "unable to find PCIe capability\n");
1769 }
1770 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1771 } else {
1772 reg = CSR_READ(sc, WMREG_STATUS);
1773 if (reg & STATUS_BUS64)
1774 sc->sc_flags |= WM_F_BUS64;
1775 if ((reg & STATUS_PCIX_MODE) != 0) {
1776 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1777
1778 sc->sc_flags |= WM_F_PCIX;
1779 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1780 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1781 aprint_error_dev(sc->sc_dev,
1782 "unable to find PCIX capability\n");
1783 else if (sc->sc_type != WM_T_82545_3 &&
1784 sc->sc_type != WM_T_82546_3) {
1785 /*
1786 * Work around a problem caused by the BIOS
1787 * setting the max memory read byte count
1788 * incorrectly.
1789 */
1790 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1791 sc->sc_pcixe_capoff + PCIX_CMD);
1792 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1793 sc->sc_pcixe_capoff + PCIX_STATUS);
1794
1795 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1796 PCIX_CMD_BYTECNT_SHIFT;
1797 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1798 PCIX_STATUS_MAXB_SHIFT;
1799 if (bytecnt > maxb) {
1800 aprint_verbose_dev(sc->sc_dev,
1801 "resetting PCI-X MMRBC: %d -> %d\n",
1802 512 << bytecnt, 512 << maxb);
1803 pcix_cmd = (pcix_cmd &
1804 ~PCIX_CMD_BYTECNT_MASK) |
1805 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1806 pci_conf_write(pa->pa_pc, pa->pa_tag,
1807 sc->sc_pcixe_capoff + PCIX_CMD,
1808 pcix_cmd);
1809 }
1810 }
1811 }
1812 /*
1813 * The quad port adapter is special; it has a PCIX-PCIX
1814 * bridge on the board, and can run the secondary bus at
1815 * a higher speed.
1816 */
1817 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1818 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1819 : 66;
1820 } else if (sc->sc_flags & WM_F_PCIX) {
1821 switch (reg & STATUS_PCIXSPD_MASK) {
1822 case STATUS_PCIXSPD_50_66:
1823 sc->sc_bus_speed = 66;
1824 break;
1825 case STATUS_PCIXSPD_66_100:
1826 sc->sc_bus_speed = 100;
1827 break;
1828 case STATUS_PCIXSPD_100_133:
1829 sc->sc_bus_speed = 133;
1830 break;
1831 default:
1832 aprint_error_dev(sc->sc_dev,
1833 "unknown PCIXSPD %d; assuming 66MHz\n",
1834 reg & STATUS_PCIXSPD_MASK);
1835 sc->sc_bus_speed = 66;
1836 break;
1837 }
1838 } else
1839 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1840 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1841 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1842 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1843 }
1844
1845 /* clear interesting stat counters */
1846 CSR_READ(sc, WMREG_COLC);
1847 CSR_READ(sc, WMREG_RXERRC);
1848
1849 /* get PHY control from SMBus to PCIe */
1850 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1851 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
1852 wm_smbustopci(sc);
1853
1854 /* Reset the chip to a known state. */
1855 wm_reset(sc);
1856
1857 /* Get some information about the EEPROM. */
1858 switch (sc->sc_type) {
1859 case WM_T_82542_2_0:
1860 case WM_T_82542_2_1:
1861 case WM_T_82543:
1862 case WM_T_82544:
1863 /* Microwire */
1864 sc->sc_nvm_wordsize = 64;
1865 sc->sc_nvm_addrbits = 6;
1866 break;
1867 case WM_T_82540:
1868 case WM_T_82545:
1869 case WM_T_82545_3:
1870 case WM_T_82546:
1871 case WM_T_82546_3:
1872 /* Microwire */
1873 reg = CSR_READ(sc, WMREG_EECD);
1874 if (reg & EECD_EE_SIZE) {
1875 sc->sc_nvm_wordsize = 256;
1876 sc->sc_nvm_addrbits = 8;
1877 } else {
1878 sc->sc_nvm_wordsize = 64;
1879 sc->sc_nvm_addrbits = 6;
1880 }
1881 sc->sc_flags |= WM_F_LOCK_EECD;
1882 break;
1883 case WM_T_82541:
1884 case WM_T_82541_2:
1885 case WM_T_82547:
1886 case WM_T_82547_2:
1887 sc->sc_flags |= WM_F_LOCK_EECD;
1888 reg = CSR_READ(sc, WMREG_EECD);
1889 if (reg & EECD_EE_TYPE) {
1890 /* SPI */
1891 sc->sc_flags |= WM_F_EEPROM_SPI;
1892 wm_nvm_set_addrbits_size_eecd(sc);
1893 } else {
1894 /* Microwire */
1895 if ((reg & EECD_EE_ABITS) != 0) {
1896 sc->sc_nvm_wordsize = 256;
1897 sc->sc_nvm_addrbits = 8;
1898 } else {
1899 sc->sc_nvm_wordsize = 64;
1900 sc->sc_nvm_addrbits = 6;
1901 }
1902 }
1903 break;
1904 case WM_T_82571:
1905 case WM_T_82572:
1906 /* SPI */
1907 sc->sc_flags |= WM_F_EEPROM_SPI;
1908 wm_nvm_set_addrbits_size_eecd(sc);
1909 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1910 break;
1911 case WM_T_82573:
1912 sc->sc_flags |= WM_F_LOCK_SWSM;
1913 /* FALLTHROUGH */
1914 case WM_T_82574:
1915 case WM_T_82583:
1916 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1917 sc->sc_flags |= WM_F_EEPROM_FLASH;
1918 sc->sc_nvm_wordsize = 2048;
1919 } else {
1920 /* SPI */
1921 sc->sc_flags |= WM_F_EEPROM_SPI;
1922 wm_nvm_set_addrbits_size_eecd(sc);
1923 }
1924 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1925 break;
1926 case WM_T_82575:
1927 case WM_T_82576:
1928 case WM_T_82580:
1929 case WM_T_I350:
1930 case WM_T_I354:
1931 case WM_T_80003:
1932 /* SPI */
1933 sc->sc_flags |= WM_F_EEPROM_SPI;
1934 wm_nvm_set_addrbits_size_eecd(sc);
1935 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1936 | WM_F_LOCK_SWSM;
1937 break;
1938 case WM_T_ICH8:
1939 case WM_T_ICH9:
1940 case WM_T_ICH10:
1941 case WM_T_PCH:
1942 case WM_T_PCH2:
1943 case WM_T_PCH_LPT:
1944 /* FLASH */
1945 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1946 sc->sc_nvm_wordsize = 2048;
1947 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
1948 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1949 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1950 aprint_error_dev(sc->sc_dev,
1951 "can't map FLASH registers\n");
1952 goto out;
1953 }
1954 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1955 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1956 ICH_FLASH_SECTOR_SIZE;
1957 sc->sc_ich8_flash_bank_size =
1958 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1959 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
1960 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1961 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1962 sc->sc_flashreg_offset = 0;
1963 break;
1964 case WM_T_PCH_SPT:
1965 /* SPT has no GFPREG; flash registers mapped through BAR0 */
1966 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1967 sc->sc_flasht = sc->sc_st;
1968 sc->sc_flashh = sc->sc_sh;
1969 sc->sc_ich8_flash_base = 0;
1970 sc->sc_nvm_wordsize =
1971 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
1972 * NVM_SIZE_MULTIPLIER;
1973 /* It is size in bytes, we want words */
1974 sc->sc_nvm_wordsize /= 2;
1975 /* assume 2 banks */
1976 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
1977 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
1978 break;
1979 case WM_T_I210:
1980 case WM_T_I211:
1981 if (wm_nvm_get_flash_presence_i210(sc)) {
1982 wm_nvm_set_addrbits_size_eecd(sc);
1983 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1984 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1985 } else {
1986 sc->sc_nvm_wordsize = INVM_SIZE;
1987 sc->sc_flags |= WM_F_EEPROM_INVM;
1988 sc->sc_flags |= WM_F_LOCK_SWFW;
1989 }
1990 break;
1991 default:
1992 break;
1993 }
1994
1995 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1996 switch (sc->sc_type) {
1997 case WM_T_82571:
1998 case WM_T_82572:
1999 reg = CSR_READ(sc, WMREG_SWSM2);
2000 if ((reg & SWSM2_LOCK) == 0) {
2001 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2002 force_clear_smbi = true;
2003 } else
2004 force_clear_smbi = false;
2005 break;
2006 case WM_T_82573:
2007 case WM_T_82574:
2008 case WM_T_82583:
2009 force_clear_smbi = true;
2010 break;
2011 default:
2012 force_clear_smbi = false;
2013 break;
2014 }
2015 if (force_clear_smbi) {
2016 reg = CSR_READ(sc, WMREG_SWSM);
2017 if ((reg & SWSM_SMBI) != 0)
2018 aprint_error_dev(sc->sc_dev,
2019 "Please update the Bootagent\n");
2020 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2021 }
2022
2023 /*
2024 * Defer printing the EEPROM type until after verifying the checksum
2025 * This allows the EEPROM type to be printed correctly in the case
2026 * that no EEPROM is attached.
2027 */
2028 /*
2029 * Validate the EEPROM checksum. If the checksum fails, flag
2030 * this for later, so we can fail future reads from the EEPROM.
2031 */
2032 if (wm_nvm_validate_checksum(sc)) {
2033 /*
2034 * Read twice again because some PCI-e parts fail the
2035 * first check due to the link being in sleep state.
2036 */
2037 if (wm_nvm_validate_checksum(sc))
2038 sc->sc_flags |= WM_F_EEPROM_INVALID;
2039 }
2040
2041 /* Set device properties (macflags) */
2042 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2043
2044 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2045 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2046 else {
2047 aprint_verbose_dev(sc->sc_dev, "%u words ",
2048 sc->sc_nvm_wordsize);
2049 if (sc->sc_flags & WM_F_EEPROM_INVM)
2050 aprint_verbose("iNVM");
2051 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2052 aprint_verbose("FLASH(HW)");
2053 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2054 aprint_verbose("FLASH");
2055 else {
2056 if (sc->sc_flags & WM_F_EEPROM_SPI)
2057 eetype = "SPI";
2058 else
2059 eetype = "MicroWire";
2060 aprint_verbose("(%d address bits) %s EEPROM",
2061 sc->sc_nvm_addrbits, eetype);
2062 }
2063 }
2064 wm_nvm_version(sc);
2065 aprint_verbose("\n");
2066
2067 /* Check for I21[01] PLL workaround */
2068 if (sc->sc_type == WM_T_I210)
2069 sc->sc_flags |= WM_F_PLL_WA_I210;
2070 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2071 /* NVM image release 3.25 has a workaround */
2072 if ((sc->sc_nvm_ver_major < 3)
2073 || ((sc->sc_nvm_ver_major == 3)
2074 && (sc->sc_nvm_ver_minor < 25))) {
2075 aprint_verbose_dev(sc->sc_dev,
2076 "ROM image version %d.%d is older than 3.25\n",
2077 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2078 sc->sc_flags |= WM_F_PLL_WA_I210;
2079 }
2080 }
2081 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2082 wm_pll_workaround_i210(sc);
2083
2084 wm_get_wakeup(sc);
2085 switch (sc->sc_type) {
2086 case WM_T_82571:
2087 case WM_T_82572:
2088 case WM_T_82573:
2089 case WM_T_82574:
2090 case WM_T_82583:
2091 case WM_T_80003:
2092 case WM_T_ICH8:
2093 case WM_T_ICH9:
2094 case WM_T_ICH10:
2095 case WM_T_PCH:
2096 case WM_T_PCH2:
2097 case WM_T_PCH_LPT:
2098 case WM_T_PCH_SPT:
2099 /* Non-AMT based hardware can now take control from firmware */
2100 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2101 wm_get_hw_control(sc);
2102 break;
2103 default:
2104 break;
2105 }
2106
2107 /*
2108 * Read the Ethernet address from the EEPROM, if not first found
2109 * in device properties.
2110 */
2111 ea = prop_dictionary_get(dict, "mac-address");
2112 if (ea != NULL) {
2113 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2114 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2115 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2116 } else {
2117 if (wm_read_mac_addr(sc, enaddr) != 0) {
2118 aprint_error_dev(sc->sc_dev,
2119 "unable to read Ethernet address\n");
2120 goto out;
2121 }
2122 }
2123
2124 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2125 ether_sprintf(enaddr));
2126
2127 /*
2128 * Read the config info from the EEPROM, and set up various
2129 * bits in the control registers based on their contents.
2130 */
2131 pn = prop_dictionary_get(dict, "i82543-cfg1");
2132 if (pn != NULL) {
2133 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2134 cfg1 = (uint16_t) prop_number_integer_value(pn);
2135 } else {
2136 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2137 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2138 goto out;
2139 }
2140 }
2141
2142 pn = prop_dictionary_get(dict, "i82543-cfg2");
2143 if (pn != NULL) {
2144 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2145 cfg2 = (uint16_t) prop_number_integer_value(pn);
2146 } else {
2147 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2148 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2149 goto out;
2150 }
2151 }
2152
2153 /* check for WM_F_WOL */
2154 switch (sc->sc_type) {
2155 case WM_T_82542_2_0:
2156 case WM_T_82542_2_1:
2157 case WM_T_82543:
2158 /* dummy? */
2159 eeprom_data = 0;
2160 apme_mask = NVM_CFG3_APME;
2161 break;
2162 case WM_T_82544:
2163 apme_mask = NVM_CFG2_82544_APM_EN;
2164 eeprom_data = cfg2;
2165 break;
2166 case WM_T_82546:
2167 case WM_T_82546_3:
2168 case WM_T_82571:
2169 case WM_T_82572:
2170 case WM_T_82573:
2171 case WM_T_82574:
2172 case WM_T_82583:
2173 case WM_T_80003:
2174 default:
2175 apme_mask = NVM_CFG3_APME;
2176 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2177 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2178 break;
2179 case WM_T_82575:
2180 case WM_T_82576:
2181 case WM_T_82580:
2182 case WM_T_I350:
2183 case WM_T_I354: /* XXX ok? */
2184 case WM_T_ICH8:
2185 case WM_T_ICH9:
2186 case WM_T_ICH10:
2187 case WM_T_PCH:
2188 case WM_T_PCH2:
2189 case WM_T_PCH_LPT:
2190 case WM_T_PCH_SPT:
2191 /* XXX The funcid should be checked on some devices */
2192 apme_mask = WUC_APME;
2193 eeprom_data = CSR_READ(sc, WMREG_WUC);
2194 break;
2195 }
2196
2197 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2198 if ((eeprom_data & apme_mask) != 0)
2199 sc->sc_flags |= WM_F_WOL;
2200 #ifdef WM_DEBUG
2201 if ((sc->sc_flags & WM_F_WOL) != 0)
2202 printf("WOL\n");
2203 #endif
2204
2205 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2206 /* Check NVM for autonegotiation */
2207 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2208 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2209 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2210 }
2211 }
2212
2213 /*
2214 * XXX need special handling for some multiple port cards
2215 * to disable a paticular port.
2216 */
2217
2218 if (sc->sc_type >= WM_T_82544) {
2219 pn = prop_dictionary_get(dict, "i82543-swdpin");
2220 if (pn != NULL) {
2221 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2222 swdpin = (uint16_t) prop_number_integer_value(pn);
2223 } else {
2224 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2225 aprint_error_dev(sc->sc_dev,
2226 "unable to read SWDPIN\n");
2227 goto out;
2228 }
2229 }
2230 }
2231
2232 if (cfg1 & NVM_CFG1_ILOS)
2233 sc->sc_ctrl |= CTRL_ILOS;
2234
2235 /*
2236 * XXX
2237 * This code isn't correct because pin 2 and 3 are located
2238 * in different position on newer chips. Check all datasheet.
2239 *
2240 * Until resolve this problem, check if a chip < 82580
2241 */
2242 if (sc->sc_type <= WM_T_82580) {
2243 if (sc->sc_type >= WM_T_82544) {
2244 sc->sc_ctrl |=
2245 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2246 CTRL_SWDPIO_SHIFT;
2247 sc->sc_ctrl |=
2248 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2249 CTRL_SWDPINS_SHIFT;
2250 } else {
2251 sc->sc_ctrl |=
2252 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2253 CTRL_SWDPIO_SHIFT;
2254 }
2255 }
2256
2257 /* XXX For other than 82580? */
2258 if (sc->sc_type == WM_T_82580) {
2259 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2260 if (nvmword & __BIT(13))
2261 sc->sc_ctrl |= CTRL_ILOS;
2262 }
2263
2264 #if 0
2265 if (sc->sc_type >= WM_T_82544) {
2266 if (cfg1 & NVM_CFG1_IPS0)
2267 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2268 if (cfg1 & NVM_CFG1_IPS1)
2269 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2270 sc->sc_ctrl_ext |=
2271 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2272 CTRL_EXT_SWDPIO_SHIFT;
2273 sc->sc_ctrl_ext |=
2274 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2275 CTRL_EXT_SWDPINS_SHIFT;
2276 } else {
2277 sc->sc_ctrl_ext |=
2278 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2279 CTRL_EXT_SWDPIO_SHIFT;
2280 }
2281 #endif
2282
2283 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2284 #if 0
2285 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2286 #endif
2287
2288 if (sc->sc_type == WM_T_PCH) {
2289 uint16_t val;
2290
2291 /* Save the NVM K1 bit setting */
2292 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2293
2294 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2295 sc->sc_nvm_k1_enabled = 1;
2296 else
2297 sc->sc_nvm_k1_enabled = 0;
2298 }
2299
2300 /*
2301 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2302 * media structures accordingly.
2303 */
2304 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2305 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2306 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2307 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2308 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2309 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2310 wm_gmii_mediainit(sc, wmp->wmp_product);
2311 } else if (sc->sc_type < WM_T_82543 ||
2312 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2313 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2314 aprint_error_dev(sc->sc_dev,
2315 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2316 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2317 }
2318 wm_tbi_mediainit(sc);
2319 } else {
2320 switch (sc->sc_type) {
2321 case WM_T_82575:
2322 case WM_T_82576:
2323 case WM_T_82580:
2324 case WM_T_I350:
2325 case WM_T_I354:
2326 case WM_T_I210:
2327 case WM_T_I211:
2328 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2329 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2330 switch (link_mode) {
2331 case CTRL_EXT_LINK_MODE_1000KX:
2332 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2333 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2334 break;
2335 case CTRL_EXT_LINK_MODE_SGMII:
2336 if (wm_sgmii_uses_mdio(sc)) {
2337 aprint_verbose_dev(sc->sc_dev,
2338 "SGMII(MDIO)\n");
2339 sc->sc_flags |= WM_F_SGMII;
2340 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2341 break;
2342 }
2343 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2344 /*FALLTHROUGH*/
2345 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2346 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2347 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2348 if (link_mode
2349 == CTRL_EXT_LINK_MODE_SGMII) {
2350 sc->sc_mediatype
2351 = WM_MEDIATYPE_COPPER;
2352 sc->sc_flags |= WM_F_SGMII;
2353 } else {
2354 sc->sc_mediatype
2355 = WM_MEDIATYPE_SERDES;
2356 aprint_verbose_dev(sc->sc_dev,
2357 "SERDES\n");
2358 }
2359 break;
2360 }
2361 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2362 aprint_verbose_dev(sc->sc_dev,
2363 "SERDES\n");
2364
2365 /* Change current link mode setting */
2366 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2367 switch (sc->sc_mediatype) {
2368 case WM_MEDIATYPE_COPPER:
2369 reg |= CTRL_EXT_LINK_MODE_SGMII;
2370 break;
2371 case WM_MEDIATYPE_SERDES:
2372 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2373 break;
2374 default:
2375 break;
2376 }
2377 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2378 break;
2379 case CTRL_EXT_LINK_MODE_GMII:
2380 default:
2381 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2382 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2383 break;
2384 }
2385
2386 reg &= ~CTRL_EXT_I2C_ENA;
2387 if ((sc->sc_flags & WM_F_SGMII) != 0)
2388 reg |= CTRL_EXT_I2C_ENA;
2389 else
2390 reg &= ~CTRL_EXT_I2C_ENA;
2391 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2392
2393 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2394 wm_gmii_mediainit(sc, wmp->wmp_product);
2395 else
2396 wm_tbi_mediainit(sc);
2397 break;
2398 default:
2399 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2400 aprint_error_dev(sc->sc_dev,
2401 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2402 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2403 wm_gmii_mediainit(sc, wmp->wmp_product);
2404 }
2405 }
2406
2407 ifp = &sc->sc_ethercom.ec_if;
2408 xname = device_xname(sc->sc_dev);
2409 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2410 ifp->if_softc = sc;
2411 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2412 ifp->if_ioctl = wm_ioctl;
2413 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2414 ifp->if_start = wm_nq_start;
2415 if (sc->sc_ntxqueues > 1)
2416 ifp->if_transmit = wm_nq_transmit;
2417 } else
2418 ifp->if_start = wm_start;
2419 ifp->if_watchdog = wm_watchdog;
2420 ifp->if_init = wm_init;
2421 ifp->if_stop = wm_stop;
2422 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2423 IFQ_SET_READY(&ifp->if_snd);
2424
2425 /* Check for jumbo frame */
2426 switch (sc->sc_type) {
2427 case WM_T_82573:
2428 /* XXX limited to 9234 if ASPM is disabled */
2429 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2430 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2431 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2432 break;
2433 case WM_T_82571:
2434 case WM_T_82572:
2435 case WM_T_82574:
2436 case WM_T_82575:
2437 case WM_T_82576:
2438 case WM_T_82580:
2439 case WM_T_I350:
2440 case WM_T_I354: /* XXXX ok? */
2441 case WM_T_I210:
2442 case WM_T_I211:
2443 case WM_T_80003:
2444 case WM_T_ICH9:
2445 case WM_T_ICH10:
2446 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2447 case WM_T_PCH_LPT:
2448 case WM_T_PCH_SPT:
2449 /* XXX limited to 9234 */
2450 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2451 break;
2452 case WM_T_PCH:
2453 /* XXX limited to 4096 */
2454 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2455 break;
2456 case WM_T_82542_2_0:
2457 case WM_T_82542_2_1:
2458 case WM_T_82583:
2459 case WM_T_ICH8:
2460 /* No support for jumbo frame */
2461 break;
2462 default:
2463 /* ETHER_MAX_LEN_JUMBO */
2464 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2465 break;
2466 }
2467
2468 /* If we're a i82543 or greater, we can support VLANs. */
2469 if (sc->sc_type >= WM_T_82543)
2470 sc->sc_ethercom.ec_capabilities |=
2471 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2472
2473 /*
2474 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2475 * on i82543 and later.
2476 */
2477 if (sc->sc_type >= WM_T_82543) {
2478 ifp->if_capabilities |=
2479 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2480 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2481 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2482 IFCAP_CSUM_TCPv6_Tx |
2483 IFCAP_CSUM_UDPv6_Tx;
2484 }
2485
2486 /*
2487 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2488 *
2489 * 82541GI (8086:1076) ... no
2490 * 82572EI (8086:10b9) ... yes
2491 */
2492 if (sc->sc_type >= WM_T_82571) {
2493 ifp->if_capabilities |=
2494 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2495 }
2496
2497 /*
2498 * If we're a i82544 or greater (except i82547), we can do
2499 * TCP segmentation offload.
2500 */
2501 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2502 ifp->if_capabilities |= IFCAP_TSOv4;
2503 }
2504
2505 if (sc->sc_type >= WM_T_82571) {
2506 ifp->if_capabilities |= IFCAP_TSOv6;
2507 }
2508
2509 #ifdef WM_MPSAFE
2510 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2511 #else
2512 sc->sc_core_lock = NULL;
2513 #endif
2514
2515 /* Attach the interface. */
2516 if_initialize(ifp);
2517 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2518 ether_ifattach(ifp, enaddr);
2519 if_register(ifp);
2520 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2521 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2522 RND_FLAG_DEFAULT);
2523
2524 #ifdef WM_EVENT_COUNTERS
2525 /* Attach event counters. */
2526 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2527 NULL, xname, "txsstall");
2528 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2529 NULL, xname, "txdstall");
2530 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2531 NULL, xname, "txfifo_stall");
2532 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2533 NULL, xname, "txdw");
2534 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2535 NULL, xname, "txqe");
2536 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2537 NULL, xname, "rxintr");
2538 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2539 NULL, xname, "linkintr");
2540
2541 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2542 NULL, xname, "rxipsum");
2543 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2544 NULL, xname, "rxtusum");
2545 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2546 NULL, xname, "txipsum");
2547 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2548 NULL, xname, "txtusum");
2549 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2550 NULL, xname, "txtusum6");
2551
2552 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2553 NULL, xname, "txtso");
2554 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2555 NULL, xname, "txtso6");
2556 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2557 NULL, xname, "txtsopain");
2558
2559 for (i = 0; i < WM_NTXSEGS; i++) {
2560 snprintf(wm_txseg_evcnt_names[i],
2561 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2562 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2563 NULL, xname, wm_txseg_evcnt_names[i]);
2564 }
2565
2566 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2567 NULL, xname, "txdrop");
2568
2569 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2570 NULL, xname, "tu");
2571
2572 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2573 NULL, xname, "tx_xoff");
2574 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2575 NULL, xname, "tx_xon");
2576 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2577 NULL, xname, "rx_xoff");
2578 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2579 NULL, xname, "rx_xon");
2580 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2581 NULL, xname, "rx_macctl");
2582 #endif /* WM_EVENT_COUNTERS */
2583
2584 if (pmf_device_register(self, wm_suspend, wm_resume))
2585 pmf_class_network_register(self, ifp);
2586 else
2587 aprint_error_dev(self, "couldn't establish power handler\n");
2588
2589 sc->sc_flags |= WM_F_ATTACHED;
2590 out:
2591 return;
2592 }
2593
2594 /* The detach function (ca_detach) */
2595 static int
2596 wm_detach(device_t self, int flags __unused)
2597 {
2598 struct wm_softc *sc = device_private(self);
2599 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2600 int i;
2601 #ifndef WM_MPSAFE
2602 int s;
2603 #endif
2604
2605 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2606 return 0;
2607
2608 #ifndef WM_MPSAFE
2609 s = splnet();
2610 #endif
2611 /* Stop the interface. Callouts are stopped in it. */
2612 wm_stop(ifp, 1);
2613
2614 #ifndef WM_MPSAFE
2615 splx(s);
2616 #endif
2617
2618 pmf_device_deregister(self);
2619
2620 /* Tell the firmware about the release */
2621 WM_CORE_LOCK(sc);
2622 wm_release_manageability(sc);
2623 wm_release_hw_control(sc);
2624 WM_CORE_UNLOCK(sc);
2625
2626 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2627
2628 /* Delete all remaining media. */
2629 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2630
2631 ether_ifdetach(ifp);
2632 if_detach(ifp);
2633 if_percpuq_destroy(sc->sc_ipq);
2634
2635 /* Unload RX dmamaps and free mbufs */
2636 for (i = 0; i < sc->sc_nrxqueues; i++) {
2637 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
2638 WM_RX_LOCK(rxq);
2639 wm_rxdrain(rxq);
2640 WM_RX_UNLOCK(rxq);
2641 }
2642 /* Must unlock here */
2643
2644 /* Disestablish the interrupt handler */
2645 for (i = 0; i < sc->sc_nintrs; i++) {
2646 if (sc->sc_ihs[i] != NULL) {
2647 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2648 sc->sc_ihs[i] = NULL;
2649 }
2650 }
2651 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2652
2653 wm_free_txrx_queues(sc);
2654
2655 /* Unmap the registers */
2656 if (sc->sc_ss) {
2657 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2658 sc->sc_ss = 0;
2659 }
2660 if (sc->sc_ios) {
2661 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2662 sc->sc_ios = 0;
2663 }
2664 if (sc->sc_flashs) {
2665 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2666 sc->sc_flashs = 0;
2667 }
2668
2669 if (sc->sc_core_lock)
2670 mutex_obj_free(sc->sc_core_lock);
2671
2672 return 0;
2673 }
2674
2675 static bool
2676 wm_suspend(device_t self, const pmf_qual_t *qual)
2677 {
2678 struct wm_softc *sc = device_private(self);
2679
2680 wm_release_manageability(sc);
2681 wm_release_hw_control(sc);
2682 #ifdef WM_WOL
2683 wm_enable_wakeup(sc);
2684 #endif
2685
2686 return true;
2687 }
2688
2689 static bool
2690 wm_resume(device_t self, const pmf_qual_t *qual)
2691 {
2692 struct wm_softc *sc = device_private(self);
2693
2694 wm_init_manageability(sc);
2695
2696 return true;
2697 }
2698
2699 /*
2700 * wm_watchdog: [ifnet interface function]
2701 *
2702 * Watchdog timer handler.
2703 */
2704 static void
2705 wm_watchdog(struct ifnet *ifp)
2706 {
2707 int qid;
2708 struct wm_softc *sc = ifp->if_softc;
2709
2710 for (qid = 0; qid < sc->sc_ntxqueues; qid++) {
2711 struct wm_txqueue *txq = &sc->sc_txq[qid];
2712
2713 wm_watchdog_txq(ifp, txq);
2714 }
2715
2716 /* Reset the interface. */
2717 (void) wm_init(ifp);
2718
2719 /*
2720 * There are still some upper layer processing which call
2721 * ifp->if_start(). e.g. ALTQ
2722 */
2723 /* Try to get more packets going. */
2724 ifp->if_start(ifp);
2725 }
2726
2727 static void
2728 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2729 {
2730 struct wm_softc *sc = ifp->if_softc;
2731
2732 /*
2733 * Since we're using delayed interrupts, sweep up
2734 * before we report an error.
2735 */
2736 WM_TX_LOCK(txq);
2737 wm_txeof(sc, txq);
2738 WM_TX_UNLOCK(txq);
2739
2740 if (txq->txq_free != WM_NTXDESC(txq)) {
2741 #ifdef WM_DEBUG
2742 int i, j;
2743 struct wm_txsoft *txs;
2744 #endif
2745 log(LOG_ERR,
2746 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2747 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2748 txq->txq_next);
2749 ifp->if_oerrors++;
2750 #ifdef WM_DEBUG
2751 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2752 i = WM_NEXTTXS(txq, i)) {
2753 txs = &txq->txq_soft[i];
2754 printf("txs %d tx %d -> %d\n",
2755 i, txs->txs_firstdesc, txs->txs_lastdesc);
2756 for (j = txs->txs_firstdesc; ;
2757 j = WM_NEXTTX(txq, j)) {
2758 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2759 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2760 printf("\t %#08x%08x\n",
2761 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2762 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2763 if (j == txs->txs_lastdesc)
2764 break;
2765 }
2766 }
2767 #endif
2768 }
2769 }
2770
2771 /*
2772 * wm_tick:
2773 *
2774 * One second timer, used to check link status, sweep up
2775 * completed transmit jobs, etc.
2776 */
2777 static void
2778 wm_tick(void *arg)
2779 {
2780 struct wm_softc *sc = arg;
2781 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2782 #ifndef WM_MPSAFE
2783 int s;
2784
2785 s = splnet();
2786 #endif
2787
2788 WM_CORE_LOCK(sc);
2789
2790 if (sc->sc_stopping)
2791 goto out;
2792
2793 if (sc->sc_type >= WM_T_82542_2_1) {
2794 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2795 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2796 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2797 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2798 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2799 }
2800
2801 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2802 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2803 + CSR_READ(sc, WMREG_CRCERRS)
2804 + CSR_READ(sc, WMREG_ALGNERRC)
2805 + CSR_READ(sc, WMREG_SYMERRC)
2806 + CSR_READ(sc, WMREG_RXERRC)
2807 + CSR_READ(sc, WMREG_SEC)
2808 + CSR_READ(sc, WMREG_CEXTERR)
2809 + CSR_READ(sc, WMREG_RLEC);
2810 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2811
2812 if (sc->sc_flags & WM_F_HAS_MII)
2813 mii_tick(&sc->sc_mii);
2814 else if ((sc->sc_type >= WM_T_82575)
2815 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2816 wm_serdes_tick(sc);
2817 else
2818 wm_tbi_tick(sc);
2819
2820 out:
2821 WM_CORE_UNLOCK(sc);
2822 #ifndef WM_MPSAFE
2823 splx(s);
2824 #endif
2825
2826 if (!sc->sc_stopping)
2827 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2828 }
2829
2830 static int
2831 wm_ifflags_cb(struct ethercom *ec)
2832 {
2833 struct ifnet *ifp = &ec->ec_if;
2834 struct wm_softc *sc = ifp->if_softc;
2835 int change = ifp->if_flags ^ sc->sc_if_flags;
2836 int rc = 0;
2837
2838 WM_CORE_LOCK(sc);
2839
2840 if (change != 0)
2841 sc->sc_if_flags = ifp->if_flags;
2842
2843 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2844 rc = ENETRESET;
2845 goto out;
2846 }
2847
2848 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2849 wm_set_filter(sc);
2850
2851 wm_set_vlan(sc);
2852
2853 out:
2854 WM_CORE_UNLOCK(sc);
2855
2856 return rc;
2857 }
2858
2859 /*
2860 * wm_ioctl: [ifnet interface function]
2861 *
2862 * Handle control requests from the operator.
2863 */
2864 static int
2865 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2866 {
2867 struct wm_softc *sc = ifp->if_softc;
2868 struct ifreq *ifr = (struct ifreq *) data;
2869 struct ifaddr *ifa = (struct ifaddr *)data;
2870 struct sockaddr_dl *sdl;
2871 int s, error;
2872
2873 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2874 device_xname(sc->sc_dev), __func__));
2875 #ifndef WM_MPSAFE
2876 s = splnet();
2877 #endif
2878 switch (cmd) {
2879 case SIOCSIFMEDIA:
2880 case SIOCGIFMEDIA:
2881 WM_CORE_LOCK(sc);
2882 /* Flow control requires full-duplex mode. */
2883 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2884 (ifr->ifr_media & IFM_FDX) == 0)
2885 ifr->ifr_media &= ~IFM_ETH_FMASK;
2886 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2887 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2888 /* We can do both TXPAUSE and RXPAUSE. */
2889 ifr->ifr_media |=
2890 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2891 }
2892 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2893 }
2894 WM_CORE_UNLOCK(sc);
2895 #ifdef WM_MPSAFE
2896 s = splnet();
2897 #endif
2898 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2899 #ifdef WM_MPSAFE
2900 splx(s);
2901 #endif
2902 break;
2903 case SIOCINITIFADDR:
2904 WM_CORE_LOCK(sc);
2905 if (ifa->ifa_addr->sa_family == AF_LINK) {
2906 sdl = satosdl(ifp->if_dl->ifa_addr);
2907 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2908 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2909 /* unicast address is first multicast entry */
2910 wm_set_filter(sc);
2911 error = 0;
2912 WM_CORE_UNLOCK(sc);
2913 break;
2914 }
2915 WM_CORE_UNLOCK(sc);
2916 /*FALLTHROUGH*/
2917 default:
2918 #ifdef WM_MPSAFE
2919 s = splnet();
2920 #endif
2921 /* It may call wm_start, so unlock here */
2922 error = ether_ioctl(ifp, cmd, data);
2923 #ifdef WM_MPSAFE
2924 splx(s);
2925 #endif
2926 if (error != ENETRESET)
2927 break;
2928
2929 error = 0;
2930
2931 if (cmd == SIOCSIFCAP) {
2932 error = (*ifp->if_init)(ifp);
2933 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2934 ;
2935 else if (ifp->if_flags & IFF_RUNNING) {
2936 /*
2937 * Multicast list has changed; set the hardware filter
2938 * accordingly.
2939 */
2940 WM_CORE_LOCK(sc);
2941 wm_set_filter(sc);
2942 WM_CORE_UNLOCK(sc);
2943 }
2944 break;
2945 }
2946
2947 #ifndef WM_MPSAFE
2948 splx(s);
2949 #endif
2950 return error;
2951 }
2952
2953 /* MAC address related */
2954
2955 /*
2956 * Get the offset of MAC address and return it.
2957 * If error occured, use offset 0.
2958 */
2959 static uint16_t
2960 wm_check_alt_mac_addr(struct wm_softc *sc)
2961 {
2962 uint16_t myea[ETHER_ADDR_LEN / 2];
2963 uint16_t offset = NVM_OFF_MACADDR;
2964
2965 /* Try to read alternative MAC address pointer */
2966 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2967 return 0;
2968
2969 /* Check pointer if it's valid or not. */
2970 if ((offset == 0x0000) || (offset == 0xffff))
2971 return 0;
2972
2973 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2974 /*
2975 * Check whether alternative MAC address is valid or not.
2976 * Some cards have non 0xffff pointer but those don't use
2977 * alternative MAC address in reality.
2978 *
2979 * Check whether the broadcast bit is set or not.
2980 */
2981 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2982 if (((myea[0] & 0xff) & 0x01) == 0)
2983 return offset; /* Found */
2984
2985 /* Not found */
2986 return 0;
2987 }
2988
2989 static int
2990 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2991 {
2992 uint16_t myea[ETHER_ADDR_LEN / 2];
2993 uint16_t offset = NVM_OFF_MACADDR;
2994 int do_invert = 0;
2995
2996 switch (sc->sc_type) {
2997 case WM_T_82580:
2998 case WM_T_I350:
2999 case WM_T_I354:
3000 /* EEPROM Top Level Partitioning */
3001 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3002 break;
3003 case WM_T_82571:
3004 case WM_T_82575:
3005 case WM_T_82576:
3006 case WM_T_80003:
3007 case WM_T_I210:
3008 case WM_T_I211:
3009 offset = wm_check_alt_mac_addr(sc);
3010 if (offset == 0)
3011 if ((sc->sc_funcid & 0x01) == 1)
3012 do_invert = 1;
3013 break;
3014 default:
3015 if ((sc->sc_funcid & 0x01) == 1)
3016 do_invert = 1;
3017 break;
3018 }
3019
3020 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3021 myea) != 0)
3022 goto bad;
3023
3024 enaddr[0] = myea[0] & 0xff;
3025 enaddr[1] = myea[0] >> 8;
3026 enaddr[2] = myea[1] & 0xff;
3027 enaddr[3] = myea[1] >> 8;
3028 enaddr[4] = myea[2] & 0xff;
3029 enaddr[5] = myea[2] >> 8;
3030
3031 /*
3032 * Toggle the LSB of the MAC address on the second port
3033 * of some dual port cards.
3034 */
3035 if (do_invert != 0)
3036 enaddr[5] ^= 1;
3037
3038 return 0;
3039
3040 bad:
3041 return -1;
3042 }
3043
3044 /*
3045 * wm_set_ral:
3046 *
3047 * Set an entery in the receive address list.
3048 */
3049 static void
3050 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3051 {
3052 uint32_t ral_lo, ral_hi;
3053
3054 if (enaddr != NULL) {
3055 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3056 (enaddr[3] << 24);
3057 ral_hi = enaddr[4] | (enaddr[5] << 8);
3058 ral_hi |= RAL_AV;
3059 } else {
3060 ral_lo = 0;
3061 ral_hi = 0;
3062 }
3063
3064 if (sc->sc_type >= WM_T_82544) {
3065 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3066 ral_lo);
3067 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3068 ral_hi);
3069 } else {
3070 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3071 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3072 }
3073 }
3074
3075 /*
3076 * wm_mchash:
3077 *
3078 * Compute the hash of the multicast address for the 4096-bit
3079 * multicast filter.
3080 */
3081 static uint32_t
3082 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3083 {
3084 static const int lo_shift[4] = { 4, 3, 2, 0 };
3085 static const int hi_shift[4] = { 4, 5, 6, 8 };
3086 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3087 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3088 uint32_t hash;
3089
3090 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3091 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3092 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3093 || (sc->sc_type == WM_T_PCH_SPT)) {
3094 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3095 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3096 return (hash & 0x3ff);
3097 }
3098 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3099 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3100
3101 return (hash & 0xfff);
3102 }
3103
3104 /*
3105 * wm_set_filter:
3106 *
3107 * Set up the receive filter.
3108 */
3109 static void
3110 wm_set_filter(struct wm_softc *sc)
3111 {
3112 struct ethercom *ec = &sc->sc_ethercom;
3113 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3114 struct ether_multi *enm;
3115 struct ether_multistep step;
3116 bus_addr_t mta_reg;
3117 uint32_t hash, reg, bit;
3118 int i, size, ralmax;
3119
3120 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3121 device_xname(sc->sc_dev), __func__));
3122 if (sc->sc_type >= WM_T_82544)
3123 mta_reg = WMREG_CORDOVA_MTA;
3124 else
3125 mta_reg = WMREG_MTA;
3126
3127 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3128
3129 if (ifp->if_flags & IFF_BROADCAST)
3130 sc->sc_rctl |= RCTL_BAM;
3131 if (ifp->if_flags & IFF_PROMISC) {
3132 sc->sc_rctl |= RCTL_UPE;
3133 goto allmulti;
3134 }
3135
3136 /*
3137 * Set the station address in the first RAL slot, and
3138 * clear the remaining slots.
3139 */
3140 if (sc->sc_type == WM_T_ICH8)
3141 size = WM_RAL_TABSIZE_ICH8 -1;
3142 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3143 || (sc->sc_type == WM_T_PCH))
3144 size = WM_RAL_TABSIZE_ICH8;
3145 else if (sc->sc_type == WM_T_PCH2)
3146 size = WM_RAL_TABSIZE_PCH2;
3147 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3148 size = WM_RAL_TABSIZE_PCH_LPT;
3149 else if (sc->sc_type == WM_T_82575)
3150 size = WM_RAL_TABSIZE_82575;
3151 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3152 size = WM_RAL_TABSIZE_82576;
3153 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3154 size = WM_RAL_TABSIZE_I350;
3155 else
3156 size = WM_RAL_TABSIZE;
3157 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3158
3159 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3160 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3161 switch (i) {
3162 case 0:
3163 /* We can use all entries */
3164 ralmax = size;
3165 break;
3166 case 1:
3167 /* Only RAR[0] */
3168 ralmax = 1;
3169 break;
3170 default:
3171 /* available SHRA + RAR[0] */
3172 ralmax = i + 1;
3173 }
3174 } else
3175 ralmax = size;
3176 for (i = 1; i < size; i++) {
3177 if (i < ralmax)
3178 wm_set_ral(sc, NULL, i);
3179 }
3180
3181 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3182 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3183 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3184 || (sc->sc_type == WM_T_PCH_SPT))
3185 size = WM_ICH8_MC_TABSIZE;
3186 else
3187 size = WM_MC_TABSIZE;
3188 /* Clear out the multicast table. */
3189 for (i = 0; i < size; i++)
3190 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3191
3192 ETHER_FIRST_MULTI(step, ec, enm);
3193 while (enm != NULL) {
3194 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3195 /*
3196 * We must listen to a range of multicast addresses.
3197 * For now, just accept all multicasts, rather than
3198 * trying to set only those filter bits needed to match
3199 * the range. (At this time, the only use of address
3200 * ranges is for IP multicast routing, for which the
3201 * range is big enough to require all bits set.)
3202 */
3203 goto allmulti;
3204 }
3205
3206 hash = wm_mchash(sc, enm->enm_addrlo);
3207
3208 reg = (hash >> 5);
3209 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3210 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3211 || (sc->sc_type == WM_T_PCH2)
3212 || (sc->sc_type == WM_T_PCH_LPT)
3213 || (sc->sc_type == WM_T_PCH_SPT))
3214 reg &= 0x1f;
3215 else
3216 reg &= 0x7f;
3217 bit = hash & 0x1f;
3218
3219 hash = CSR_READ(sc, mta_reg + (reg << 2));
3220 hash |= 1U << bit;
3221
3222 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3223 /*
3224 * 82544 Errata 9: Certain register cannot be written
3225 * with particular alignments in PCI-X bus operation
3226 * (FCAH, MTA and VFTA).
3227 */
3228 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3229 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3230 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3231 } else
3232 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3233
3234 ETHER_NEXT_MULTI(step, enm);
3235 }
3236
3237 ifp->if_flags &= ~IFF_ALLMULTI;
3238 goto setit;
3239
3240 allmulti:
3241 ifp->if_flags |= IFF_ALLMULTI;
3242 sc->sc_rctl |= RCTL_MPE;
3243
3244 setit:
3245 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3246 }
3247
3248 /* Reset and init related */
3249
3250 static void
3251 wm_set_vlan(struct wm_softc *sc)
3252 {
3253
3254 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3255 device_xname(sc->sc_dev), __func__));
3256 /* Deal with VLAN enables. */
3257 if (VLAN_ATTACHED(&sc->sc_ethercom))
3258 sc->sc_ctrl |= CTRL_VME;
3259 else
3260 sc->sc_ctrl &= ~CTRL_VME;
3261
3262 /* Write the control registers. */
3263 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3264 }
3265
3266 static void
3267 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3268 {
3269 uint32_t gcr;
3270 pcireg_t ctrl2;
3271
3272 gcr = CSR_READ(sc, WMREG_GCR);
3273
3274 /* Only take action if timeout value is defaulted to 0 */
3275 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3276 goto out;
3277
3278 if ((gcr & GCR_CAP_VER2) == 0) {
3279 gcr |= GCR_CMPL_TMOUT_10MS;
3280 goto out;
3281 }
3282
3283 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3284 sc->sc_pcixe_capoff + PCIE_DCSR2);
3285 ctrl2 |= WM_PCIE_DCSR2_16MS;
3286 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3287 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3288
3289 out:
3290 /* Disable completion timeout resend */
3291 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3292
3293 CSR_WRITE(sc, WMREG_GCR, gcr);
3294 }
3295
3296 void
3297 wm_get_auto_rd_done(struct wm_softc *sc)
3298 {
3299 int i;
3300
3301 /* wait for eeprom to reload */
3302 switch (sc->sc_type) {
3303 case WM_T_82571:
3304 case WM_T_82572:
3305 case WM_T_82573:
3306 case WM_T_82574:
3307 case WM_T_82583:
3308 case WM_T_82575:
3309 case WM_T_82576:
3310 case WM_T_82580:
3311 case WM_T_I350:
3312 case WM_T_I354:
3313 case WM_T_I210:
3314 case WM_T_I211:
3315 case WM_T_80003:
3316 case WM_T_ICH8:
3317 case WM_T_ICH9:
3318 for (i = 0; i < 10; i++) {
3319 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3320 break;
3321 delay(1000);
3322 }
3323 if (i == 10) {
3324 log(LOG_ERR, "%s: auto read from eeprom failed to "
3325 "complete\n", device_xname(sc->sc_dev));
3326 }
3327 break;
3328 default:
3329 break;
3330 }
3331 }
3332
3333 void
3334 wm_lan_init_done(struct wm_softc *sc)
3335 {
3336 uint32_t reg = 0;
3337 int i;
3338
3339 /* wait for eeprom to reload */
3340 switch (sc->sc_type) {
3341 case WM_T_ICH10:
3342 case WM_T_PCH:
3343 case WM_T_PCH2:
3344 case WM_T_PCH_LPT:
3345 case WM_T_PCH_SPT:
3346 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3347 reg = CSR_READ(sc, WMREG_STATUS);
3348 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3349 break;
3350 delay(100);
3351 }
3352 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3353 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3354 "complete\n", device_xname(sc->sc_dev), __func__);
3355 }
3356 break;
3357 default:
3358 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3359 __func__);
3360 break;
3361 }
3362
3363 reg &= ~STATUS_LAN_INIT_DONE;
3364 CSR_WRITE(sc, WMREG_STATUS, reg);
3365 }
3366
3367 void
3368 wm_get_cfg_done(struct wm_softc *sc)
3369 {
3370 int mask;
3371 uint32_t reg;
3372 int i;
3373
3374 /* wait for eeprom to reload */
3375 switch (sc->sc_type) {
3376 case WM_T_82542_2_0:
3377 case WM_T_82542_2_1:
3378 /* null */
3379 break;
3380 case WM_T_82543:
3381 case WM_T_82544:
3382 case WM_T_82540:
3383 case WM_T_82545:
3384 case WM_T_82545_3:
3385 case WM_T_82546:
3386 case WM_T_82546_3:
3387 case WM_T_82541:
3388 case WM_T_82541_2:
3389 case WM_T_82547:
3390 case WM_T_82547_2:
3391 case WM_T_82573:
3392 case WM_T_82574:
3393 case WM_T_82583:
3394 /* generic */
3395 delay(10*1000);
3396 break;
3397 case WM_T_80003:
3398 case WM_T_82571:
3399 case WM_T_82572:
3400 case WM_T_82575:
3401 case WM_T_82576:
3402 case WM_T_82580:
3403 case WM_T_I350:
3404 case WM_T_I354:
3405 case WM_T_I210:
3406 case WM_T_I211:
3407 if (sc->sc_type == WM_T_82571) {
3408 /* Only 82571 shares port 0 */
3409 mask = EEMNGCTL_CFGDONE_0;
3410 } else
3411 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3412 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3413 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3414 break;
3415 delay(1000);
3416 }
3417 if (i >= WM_PHY_CFG_TIMEOUT) {
3418 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3419 device_xname(sc->sc_dev), __func__));
3420 }
3421 break;
3422 case WM_T_ICH8:
3423 case WM_T_ICH9:
3424 case WM_T_ICH10:
3425 case WM_T_PCH:
3426 case WM_T_PCH2:
3427 case WM_T_PCH_LPT:
3428 case WM_T_PCH_SPT:
3429 delay(10*1000);
3430 if (sc->sc_type >= WM_T_ICH10)
3431 wm_lan_init_done(sc);
3432 else
3433 wm_get_auto_rd_done(sc);
3434
3435 reg = CSR_READ(sc, WMREG_STATUS);
3436 if ((reg & STATUS_PHYRA) != 0)
3437 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3438 break;
3439 default:
3440 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3441 __func__);
3442 break;
3443 }
3444 }
3445
3446 /* Init hardware bits */
3447 void
3448 wm_initialize_hardware_bits(struct wm_softc *sc)
3449 {
3450 uint32_t tarc0, tarc1, reg;
3451
3452 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3453 device_xname(sc->sc_dev), __func__));
3454 /* For 82571 variant, 80003 and ICHs */
3455 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3456 || (sc->sc_type >= WM_T_80003)) {
3457
3458 /* Transmit Descriptor Control 0 */
3459 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3460 reg |= TXDCTL_COUNT_DESC;
3461 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3462
3463 /* Transmit Descriptor Control 1 */
3464 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3465 reg |= TXDCTL_COUNT_DESC;
3466 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3467
3468 /* TARC0 */
3469 tarc0 = CSR_READ(sc, WMREG_TARC0);
3470 switch (sc->sc_type) {
3471 case WM_T_82571:
3472 case WM_T_82572:
3473 case WM_T_82573:
3474 case WM_T_82574:
3475 case WM_T_82583:
3476 case WM_T_80003:
3477 /* Clear bits 30..27 */
3478 tarc0 &= ~__BITS(30, 27);
3479 break;
3480 default:
3481 break;
3482 }
3483
3484 switch (sc->sc_type) {
3485 case WM_T_82571:
3486 case WM_T_82572:
3487 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3488
3489 tarc1 = CSR_READ(sc, WMREG_TARC1);
3490 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3491 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3492 /* 8257[12] Errata No.7 */
3493 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3494
3495 /* TARC1 bit 28 */
3496 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3497 tarc1 &= ~__BIT(28);
3498 else
3499 tarc1 |= __BIT(28);
3500 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3501
3502 /*
3503 * 8257[12] Errata No.13
3504 * Disable Dyamic Clock Gating.
3505 */
3506 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3507 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3508 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3509 break;
3510 case WM_T_82573:
3511 case WM_T_82574:
3512 case WM_T_82583:
3513 if ((sc->sc_type == WM_T_82574)
3514 || (sc->sc_type == WM_T_82583))
3515 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3516
3517 /* Extended Device Control */
3518 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3519 reg &= ~__BIT(23); /* Clear bit 23 */
3520 reg |= __BIT(22); /* Set bit 22 */
3521 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3522
3523 /* Device Control */
3524 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3525 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3526
3527 /* PCIe Control Register */
3528 /*
3529 * 82573 Errata (unknown).
3530 *
3531 * 82574 Errata 25 and 82583 Errata 12
3532 * "Dropped Rx Packets":
3533 * NVM Image Version 2.1.4 and newer has no this bug.
3534 */
3535 reg = CSR_READ(sc, WMREG_GCR);
3536 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3537 CSR_WRITE(sc, WMREG_GCR, reg);
3538
3539 if ((sc->sc_type == WM_T_82574)
3540 || (sc->sc_type == WM_T_82583)) {
3541 /*
3542 * Document says this bit must be set for
3543 * proper operation.
3544 */
3545 reg = CSR_READ(sc, WMREG_GCR);
3546 reg |= __BIT(22);
3547 CSR_WRITE(sc, WMREG_GCR, reg);
3548
3549 /*
3550 * Apply workaround for hardware errata
3551 * documented in errata docs Fixes issue where
3552 * some error prone or unreliable PCIe
3553 * completions are occurring, particularly
3554 * with ASPM enabled. Without fix, issue can
3555 * cause Tx timeouts.
3556 */
3557 reg = CSR_READ(sc, WMREG_GCR2);
3558 reg |= __BIT(0);
3559 CSR_WRITE(sc, WMREG_GCR2, reg);
3560 }
3561 break;
3562 case WM_T_80003:
3563 /* TARC0 */
3564 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3565 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3566 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3567
3568 /* TARC1 bit 28 */
3569 tarc1 = CSR_READ(sc, WMREG_TARC1);
3570 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3571 tarc1 &= ~__BIT(28);
3572 else
3573 tarc1 |= __BIT(28);
3574 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3575 break;
3576 case WM_T_ICH8:
3577 case WM_T_ICH9:
3578 case WM_T_ICH10:
3579 case WM_T_PCH:
3580 case WM_T_PCH2:
3581 case WM_T_PCH_LPT:
3582 case WM_T_PCH_SPT:
3583 /* TARC0 */
3584 if ((sc->sc_type == WM_T_ICH8)
3585 || (sc->sc_type == WM_T_PCH_SPT)) {
3586 /* Set TARC0 bits 29 and 28 */
3587 tarc0 |= __BITS(29, 28);
3588 }
3589 /* Set TARC0 bits 23,24,26,27 */
3590 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3591
3592 /* CTRL_EXT */
3593 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3594 reg |= __BIT(22); /* Set bit 22 */
3595 /*
3596 * Enable PHY low-power state when MAC is at D3
3597 * w/o WoL
3598 */
3599 if (sc->sc_type >= WM_T_PCH)
3600 reg |= CTRL_EXT_PHYPDEN;
3601 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3602
3603 /* TARC1 */
3604 tarc1 = CSR_READ(sc, WMREG_TARC1);
3605 /* bit 28 */
3606 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3607 tarc1 &= ~__BIT(28);
3608 else
3609 tarc1 |= __BIT(28);
3610 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3611 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3612
3613 /* Device Status */
3614 if (sc->sc_type == WM_T_ICH8) {
3615 reg = CSR_READ(sc, WMREG_STATUS);
3616 reg &= ~__BIT(31);
3617 CSR_WRITE(sc, WMREG_STATUS, reg);
3618
3619 }
3620
3621 /* IOSFPC */
3622 if (sc->sc_type == WM_T_PCH_SPT) {
3623 reg = CSR_READ(sc, WMREG_IOSFPC);
3624 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3625 CSR_WRITE(sc, WMREG_IOSFPC, reg);
3626 }
3627 /*
3628 * Work-around descriptor data corruption issue during
3629 * NFS v2 UDP traffic, just disable the NFS filtering
3630 * capability.
3631 */
3632 reg = CSR_READ(sc, WMREG_RFCTL);
3633 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3634 CSR_WRITE(sc, WMREG_RFCTL, reg);
3635 break;
3636 default:
3637 break;
3638 }
3639 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3640
3641 /*
3642 * 8257[12] Errata No.52 and some others.
3643 * Avoid RSS Hash Value bug.
3644 */
3645 switch (sc->sc_type) {
3646 case WM_T_82571:
3647 case WM_T_82572:
3648 case WM_T_82573:
3649 case WM_T_80003:
3650 case WM_T_ICH8:
3651 reg = CSR_READ(sc, WMREG_RFCTL);
3652 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3653 CSR_WRITE(sc, WMREG_RFCTL, reg);
3654 break;
3655 default:
3656 break;
3657 }
3658 }
3659 }
3660
3661 static uint32_t
3662 wm_rxpbs_adjust_82580(uint32_t val)
3663 {
3664 uint32_t rv = 0;
3665
3666 if (val < __arraycount(wm_82580_rxpbs_table))
3667 rv = wm_82580_rxpbs_table[val];
3668
3669 return rv;
3670 }
3671
3672 /*
3673 * wm_reset:
3674 *
3675 * Reset the i82542 chip.
3676 */
3677 static void
3678 wm_reset(struct wm_softc *sc)
3679 {
3680 int phy_reset = 0;
3681 int i, error = 0;
3682 uint32_t reg, mask;
3683
3684 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3685 device_xname(sc->sc_dev), __func__));
3686 /*
3687 * Allocate on-chip memory according to the MTU size.
3688 * The Packet Buffer Allocation register must be written
3689 * before the chip is reset.
3690 */
3691 switch (sc->sc_type) {
3692 case WM_T_82547:
3693 case WM_T_82547_2:
3694 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3695 PBA_22K : PBA_30K;
3696 for (i = 0; i < sc->sc_ntxqueues; i++) {
3697 struct wm_txqueue *txq = &sc->sc_txq[i];
3698 txq->txq_fifo_head = 0;
3699 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3700 txq->txq_fifo_size =
3701 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3702 txq->txq_fifo_stall = 0;
3703 }
3704 break;
3705 case WM_T_82571:
3706 case WM_T_82572:
3707 case WM_T_82575: /* XXX need special handing for jumbo frames */
3708 case WM_T_80003:
3709 sc->sc_pba = PBA_32K;
3710 break;
3711 case WM_T_82573:
3712 sc->sc_pba = PBA_12K;
3713 break;
3714 case WM_T_82574:
3715 case WM_T_82583:
3716 sc->sc_pba = PBA_20K;
3717 break;
3718 case WM_T_82576:
3719 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3720 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3721 break;
3722 case WM_T_82580:
3723 case WM_T_I350:
3724 case WM_T_I354:
3725 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3726 break;
3727 case WM_T_I210:
3728 case WM_T_I211:
3729 sc->sc_pba = PBA_34K;
3730 break;
3731 case WM_T_ICH8:
3732 /* Workaround for a bit corruption issue in FIFO memory */
3733 sc->sc_pba = PBA_8K;
3734 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3735 break;
3736 case WM_T_ICH9:
3737 case WM_T_ICH10:
3738 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3739 PBA_14K : PBA_10K;
3740 break;
3741 case WM_T_PCH:
3742 case WM_T_PCH2:
3743 case WM_T_PCH_LPT:
3744 case WM_T_PCH_SPT:
3745 sc->sc_pba = PBA_26K;
3746 break;
3747 default:
3748 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3749 PBA_40K : PBA_48K;
3750 break;
3751 }
3752 /*
3753 * Only old or non-multiqueue devices have the PBA register
3754 * XXX Need special handling for 82575.
3755 */
3756 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3757 || (sc->sc_type == WM_T_82575))
3758 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3759
3760 /* Prevent the PCI-E bus from sticking */
3761 if (sc->sc_flags & WM_F_PCIE) {
3762 int timeout = 800;
3763
3764 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3765 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3766
3767 while (timeout--) {
3768 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3769 == 0)
3770 break;
3771 delay(100);
3772 }
3773 }
3774
3775 /* Set the completion timeout for interface */
3776 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3777 || (sc->sc_type == WM_T_82580)
3778 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3779 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3780 wm_set_pcie_completion_timeout(sc);
3781
3782 /* Clear interrupt */
3783 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3784 if (sc->sc_nintrs > 1) {
3785 if (sc->sc_type != WM_T_82574) {
3786 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3787 CSR_WRITE(sc, WMREG_EIAC, 0);
3788 } else {
3789 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3790 }
3791 }
3792
3793 /* Stop the transmit and receive processes. */
3794 CSR_WRITE(sc, WMREG_RCTL, 0);
3795 sc->sc_rctl &= ~RCTL_EN;
3796 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3797 CSR_WRITE_FLUSH(sc);
3798
3799 /* XXX set_tbi_sbp_82543() */
3800
3801 delay(10*1000);
3802
3803 /* Must acquire the MDIO ownership before MAC reset */
3804 switch (sc->sc_type) {
3805 case WM_T_82573:
3806 case WM_T_82574:
3807 case WM_T_82583:
3808 error = wm_get_hw_semaphore_82573(sc);
3809 break;
3810 default:
3811 break;
3812 }
3813
3814 /*
3815 * 82541 Errata 29? & 82547 Errata 28?
3816 * See also the description about PHY_RST bit in CTRL register
3817 * in 8254x_GBe_SDM.pdf.
3818 */
3819 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3820 CSR_WRITE(sc, WMREG_CTRL,
3821 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3822 CSR_WRITE_FLUSH(sc);
3823 delay(5000);
3824 }
3825
3826 switch (sc->sc_type) {
3827 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3828 case WM_T_82541:
3829 case WM_T_82541_2:
3830 case WM_T_82547:
3831 case WM_T_82547_2:
3832 /*
3833 * On some chipsets, a reset through a memory-mapped write
3834 * cycle can cause the chip to reset before completing the
3835 * write cycle. This causes major headache that can be
3836 * avoided by issuing the reset via indirect register writes
3837 * through I/O space.
3838 *
3839 * So, if we successfully mapped the I/O BAR at attach time,
3840 * use that. Otherwise, try our luck with a memory-mapped
3841 * reset.
3842 */
3843 if (sc->sc_flags & WM_F_IOH_VALID)
3844 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3845 else
3846 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3847 break;
3848 case WM_T_82545_3:
3849 case WM_T_82546_3:
3850 /* Use the shadow control register on these chips. */
3851 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3852 break;
3853 case WM_T_80003:
3854 mask = swfwphysem[sc->sc_funcid];
3855 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3856 wm_get_swfw_semaphore(sc, mask);
3857 CSR_WRITE(sc, WMREG_CTRL, reg);
3858 wm_put_swfw_semaphore(sc, mask);
3859 break;
3860 case WM_T_ICH8:
3861 case WM_T_ICH9:
3862 case WM_T_ICH10:
3863 case WM_T_PCH:
3864 case WM_T_PCH2:
3865 case WM_T_PCH_LPT:
3866 case WM_T_PCH_SPT:
3867 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3868 if (wm_phy_resetisblocked(sc) == false) {
3869 /*
3870 * Gate automatic PHY configuration by hardware on
3871 * non-managed 82579
3872 */
3873 if ((sc->sc_type == WM_T_PCH2)
3874 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3875 == 0))
3876 wm_gate_hw_phy_config_ich8lan(sc, true);
3877
3878 reg |= CTRL_PHY_RESET;
3879 phy_reset = 1;
3880 } else
3881 printf("XXX reset is blocked!!!\n");
3882 wm_get_swfwhw_semaphore(sc);
3883 CSR_WRITE(sc, WMREG_CTRL, reg);
3884 /* Don't insert a completion barrier when reset */
3885 delay(20*1000);
3886 wm_put_swfwhw_semaphore(sc);
3887 break;
3888 case WM_T_82580:
3889 case WM_T_I350:
3890 case WM_T_I354:
3891 case WM_T_I210:
3892 case WM_T_I211:
3893 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3894 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3895 CSR_WRITE_FLUSH(sc);
3896 delay(5000);
3897 break;
3898 case WM_T_82542_2_0:
3899 case WM_T_82542_2_1:
3900 case WM_T_82543:
3901 case WM_T_82540:
3902 case WM_T_82545:
3903 case WM_T_82546:
3904 case WM_T_82571:
3905 case WM_T_82572:
3906 case WM_T_82573:
3907 case WM_T_82574:
3908 case WM_T_82575:
3909 case WM_T_82576:
3910 case WM_T_82583:
3911 default:
3912 /* Everything else can safely use the documented method. */
3913 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3914 break;
3915 }
3916
3917 /* Must release the MDIO ownership after MAC reset */
3918 switch (sc->sc_type) {
3919 case WM_T_82573:
3920 case WM_T_82574:
3921 case WM_T_82583:
3922 if (error == 0)
3923 wm_put_hw_semaphore_82573(sc);
3924 break;
3925 default:
3926 break;
3927 }
3928
3929 if (phy_reset != 0)
3930 wm_get_cfg_done(sc);
3931
3932 /* reload EEPROM */
3933 switch (sc->sc_type) {
3934 case WM_T_82542_2_0:
3935 case WM_T_82542_2_1:
3936 case WM_T_82543:
3937 case WM_T_82544:
3938 delay(10);
3939 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3940 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3941 CSR_WRITE_FLUSH(sc);
3942 delay(2000);
3943 break;
3944 case WM_T_82540:
3945 case WM_T_82545:
3946 case WM_T_82545_3:
3947 case WM_T_82546:
3948 case WM_T_82546_3:
3949 delay(5*1000);
3950 /* XXX Disable HW ARPs on ASF enabled adapters */
3951 break;
3952 case WM_T_82541:
3953 case WM_T_82541_2:
3954 case WM_T_82547:
3955 case WM_T_82547_2:
3956 delay(20000);
3957 /* XXX Disable HW ARPs on ASF enabled adapters */
3958 break;
3959 case WM_T_82571:
3960 case WM_T_82572:
3961 case WM_T_82573:
3962 case WM_T_82574:
3963 case WM_T_82583:
3964 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3965 delay(10);
3966 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3967 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3968 CSR_WRITE_FLUSH(sc);
3969 }
3970 /* check EECD_EE_AUTORD */
3971 wm_get_auto_rd_done(sc);
3972 /*
3973 * Phy configuration from NVM just starts after EECD_AUTO_RD
3974 * is set.
3975 */
3976 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3977 || (sc->sc_type == WM_T_82583))
3978 delay(25*1000);
3979 break;
3980 case WM_T_82575:
3981 case WM_T_82576:
3982 case WM_T_82580:
3983 case WM_T_I350:
3984 case WM_T_I354:
3985 case WM_T_I210:
3986 case WM_T_I211:
3987 case WM_T_80003:
3988 /* check EECD_EE_AUTORD */
3989 wm_get_auto_rd_done(sc);
3990 break;
3991 case WM_T_ICH8:
3992 case WM_T_ICH9:
3993 case WM_T_ICH10:
3994 case WM_T_PCH:
3995 case WM_T_PCH2:
3996 case WM_T_PCH_LPT:
3997 case WM_T_PCH_SPT:
3998 break;
3999 default:
4000 panic("%s: unknown type\n", __func__);
4001 }
4002
4003 /* Check whether EEPROM is present or not */
4004 switch (sc->sc_type) {
4005 case WM_T_82575:
4006 case WM_T_82576:
4007 case WM_T_82580:
4008 case WM_T_I350:
4009 case WM_T_I354:
4010 case WM_T_ICH8:
4011 case WM_T_ICH9:
4012 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4013 /* Not found */
4014 sc->sc_flags |= WM_F_EEPROM_INVALID;
4015 if (sc->sc_type == WM_T_82575)
4016 wm_reset_init_script_82575(sc);
4017 }
4018 break;
4019 default:
4020 break;
4021 }
4022
4023 if ((sc->sc_type == WM_T_82580)
4024 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4025 /* clear global device reset status bit */
4026 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4027 }
4028
4029 /* Clear any pending interrupt events. */
4030 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4031 reg = CSR_READ(sc, WMREG_ICR);
4032 if (sc->sc_nintrs > 1) {
4033 if (sc->sc_type != WM_T_82574) {
4034 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4035 CSR_WRITE(sc, WMREG_EIAC, 0);
4036 } else
4037 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4038 }
4039
4040 /* reload sc_ctrl */
4041 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4042
4043 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4044 wm_set_eee_i350(sc);
4045
4046 /* dummy read from WUC */
4047 if (sc->sc_type == WM_T_PCH)
4048 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4049 /*
4050 * For PCH, this write will make sure that any noise will be detected
4051 * as a CRC error and be dropped rather than show up as a bad packet
4052 * to the DMA engine
4053 */
4054 if (sc->sc_type == WM_T_PCH)
4055 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4056
4057 if (sc->sc_type >= WM_T_82544)
4058 CSR_WRITE(sc, WMREG_WUC, 0);
4059
4060 wm_reset_mdicnfg_82580(sc);
4061
4062 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4063 wm_pll_workaround_i210(sc);
4064 }
4065
4066 /*
4067 * wm_add_rxbuf:
4068 *
4069 * Add a receive buffer to the indiciated descriptor.
4070 */
4071 static int
4072 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4073 {
4074 struct wm_softc *sc = rxq->rxq_sc;
4075 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4076 struct mbuf *m;
4077 int error;
4078
4079 KASSERT(WM_RX_LOCKED(rxq));
4080
4081 MGETHDR(m, M_DONTWAIT, MT_DATA);
4082 if (m == NULL)
4083 return ENOBUFS;
4084
4085 MCLGET(m, M_DONTWAIT);
4086 if ((m->m_flags & M_EXT) == 0) {
4087 m_freem(m);
4088 return ENOBUFS;
4089 }
4090
4091 if (rxs->rxs_mbuf != NULL)
4092 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4093
4094 rxs->rxs_mbuf = m;
4095
4096 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4097 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4098 BUS_DMA_READ | BUS_DMA_NOWAIT);
4099 if (error) {
4100 /* XXX XXX XXX */
4101 aprint_error_dev(sc->sc_dev,
4102 "unable to load rx DMA map %d, error = %d\n",
4103 idx, error);
4104 panic("wm_add_rxbuf");
4105 }
4106
4107 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4108 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4109
4110 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4111 if ((sc->sc_rctl & RCTL_EN) != 0)
4112 wm_init_rxdesc(rxq, idx);
4113 } else
4114 wm_init_rxdesc(rxq, idx);
4115
4116 return 0;
4117 }
4118
4119 /*
4120 * wm_rxdrain:
4121 *
4122 * Drain the receive queue.
4123 */
4124 static void
4125 wm_rxdrain(struct wm_rxqueue *rxq)
4126 {
4127 struct wm_softc *sc = rxq->rxq_sc;
4128 struct wm_rxsoft *rxs;
4129 int i;
4130
4131 KASSERT(WM_RX_LOCKED(rxq));
4132
4133 for (i = 0; i < WM_NRXDESC; i++) {
4134 rxs = &rxq->rxq_soft[i];
4135 if (rxs->rxs_mbuf != NULL) {
4136 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4137 m_freem(rxs->rxs_mbuf);
4138 rxs->rxs_mbuf = NULL;
4139 }
4140 }
4141 }
4142
4143
4144 /*
4145 * XXX copy from FreeBSD's sys/net/rss_config.c
4146 */
4147 /*
4148 * RSS secret key, intended to prevent attacks on load-balancing. Its
4149 * effectiveness may be limited by algorithm choice and available entropy
4150 * during the boot.
4151 *
4152 * XXXRW: And that we don't randomize it yet!
4153 *
4154 * This is the default Microsoft RSS specification key which is also
4155 * the Chelsio T5 firmware default key.
4156 */
4157 #define RSS_KEYSIZE 40
4158 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4159 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4160 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4161 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4162 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4163 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4164 };
4165
4166 /*
4167 * Caller must pass an array of size sizeof(rss_key).
4168 *
4169 * XXX
4170 * As if_ixgbe may use this function, this function should not be
4171 * if_wm specific function.
4172 */
4173 static void
4174 wm_rss_getkey(uint8_t *key)
4175 {
4176
4177 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4178 }
4179
4180 /*
4181 * Setup registers for RSS.
4182 *
4183 * XXX not yet VMDq support
4184 */
4185 static void
4186 wm_init_rss(struct wm_softc *sc)
4187 {
4188 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4189 int i;
4190
4191 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4192
4193 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4194 int qid, reta_ent;
4195
4196 qid = i % sc->sc_nrxqueues;
4197 switch(sc->sc_type) {
4198 case WM_T_82574:
4199 reta_ent = __SHIFTIN(qid,
4200 RETA_ENT_QINDEX_MASK_82574);
4201 break;
4202 case WM_T_82575:
4203 reta_ent = __SHIFTIN(qid,
4204 RETA_ENT_QINDEX1_MASK_82575);
4205 break;
4206 default:
4207 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4208 break;
4209 }
4210
4211 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4212 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4213 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4214 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4215 }
4216
4217 wm_rss_getkey((uint8_t *)rss_key);
4218 for (i = 0; i < RSSRK_NUM_REGS; i++)
4219 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4220
4221 if (sc->sc_type == WM_T_82574)
4222 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4223 else
4224 mrqc = MRQC_ENABLE_RSS_MQ;
4225
4226 /* XXXX
4227 * The same as FreeBSD igb.
4228 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4229 */
4230 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4231 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4232 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4233 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4234
4235 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4236 }
4237
4238 /*
4239 * Adjust TX and RX queue numbers which the system actulally uses.
4240 *
4241 * The numbers are affected by below parameters.
4242 * - The nubmer of hardware queues
4243 * - The number of MSI-X vectors (= "nvectors" argument)
4244 * - ncpu
4245 */
4246 static void
4247 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4248 {
4249 int hw_ntxqueues, hw_nrxqueues;
4250
4251 if (nvectors < 3) {
4252 sc->sc_ntxqueues = 1;
4253 sc->sc_nrxqueues = 1;
4254 return;
4255 }
4256
4257 switch(sc->sc_type) {
4258 case WM_T_82572:
4259 hw_ntxqueues = 2;
4260 hw_nrxqueues = 2;
4261 break;
4262 case WM_T_82574:
4263 hw_ntxqueues = 2;
4264 hw_nrxqueues = 2;
4265 break;
4266 case WM_T_82575:
4267 hw_ntxqueues = 4;
4268 hw_nrxqueues = 4;
4269 break;
4270 case WM_T_82576:
4271 hw_ntxqueues = 16;
4272 hw_nrxqueues = 16;
4273 break;
4274 case WM_T_82580:
4275 case WM_T_I350:
4276 case WM_T_I354:
4277 hw_ntxqueues = 8;
4278 hw_nrxqueues = 8;
4279 break;
4280 case WM_T_I210:
4281 hw_ntxqueues = 4;
4282 hw_nrxqueues = 4;
4283 break;
4284 case WM_T_I211:
4285 hw_ntxqueues = 2;
4286 hw_nrxqueues = 2;
4287 break;
4288 /*
4289 * As below ethernet controllers does not support MSI-X,
4290 * this driver let them not use multiqueue.
4291 * - WM_T_80003
4292 * - WM_T_ICH8
4293 * - WM_T_ICH9
4294 * - WM_T_ICH10
4295 * - WM_T_PCH
4296 * - WM_T_PCH2
4297 * - WM_T_PCH_LPT
4298 */
4299 default:
4300 hw_ntxqueues = 1;
4301 hw_nrxqueues = 1;
4302 break;
4303 }
4304
4305 /*
4306 * As queues more then MSI-X vectors cannot improve scaling, we limit
4307 * the number of queues used actually.
4308 *
4309 * XXX
4310 * Currently, we separate TX queue interrupts and RX queue interrupts.
4311 * Howerver, the number of MSI-X vectors of recent controllers (such as
4312 * I354) expects that drivers bundle a TX queue interrupt and a RX
4313 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
4314 * such a way.
4315 */
4316 if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
4317 sc->sc_ntxqueues = (nvectors - 1) / 2;
4318 sc->sc_nrxqueues = (nvectors - 1) / 2;
4319 } else {
4320 sc->sc_ntxqueues = hw_ntxqueues;
4321 sc->sc_nrxqueues = hw_nrxqueues;
4322 }
4323
4324 /*
4325 * As queues more then cpus cannot improve scaling, we limit
4326 * the number of queues used actually.
4327 */
4328 if (ncpu < sc->sc_ntxqueues)
4329 sc->sc_ntxqueues = ncpu;
4330 if (ncpu < sc->sc_nrxqueues)
4331 sc->sc_nrxqueues = ncpu;
4332 }
4333
4334 /*
4335 * Both single interrupt MSI and INTx can use this function.
4336 */
4337 static int
4338 wm_setup_legacy(struct wm_softc *sc)
4339 {
4340 pci_chipset_tag_t pc = sc->sc_pc;
4341 const char *intrstr = NULL;
4342 char intrbuf[PCI_INTRSTR_LEN];
4343 int error;
4344
4345 error = wm_alloc_txrx_queues(sc);
4346 if (error) {
4347 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4348 error);
4349 return ENOMEM;
4350 }
4351 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4352 sizeof(intrbuf));
4353 #ifdef WM_MPSAFE
4354 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4355 #endif
4356 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4357 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4358 if (sc->sc_ihs[0] == NULL) {
4359 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4360 (pci_intr_type(sc->sc_intrs[0])
4361 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4362 return ENOMEM;
4363 }
4364
4365 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4366 sc->sc_nintrs = 1;
4367 return 0;
4368 }
4369
4370 static int
4371 wm_setup_msix(struct wm_softc *sc)
4372 {
4373 void *vih;
4374 kcpuset_t *affinity;
4375 int qidx, error, intr_idx, tx_established, rx_established;
4376 pci_chipset_tag_t pc = sc->sc_pc;
4377 const char *intrstr = NULL;
4378 char intrbuf[PCI_INTRSTR_LEN];
4379 char intr_xname[INTRDEVNAMEBUF];
4380 /*
4381 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
4382 * start from CPU#1.
4383 */
4384 int affinity_offset = 1;
4385
4386 error = wm_alloc_txrx_queues(sc);
4387 if (error) {
4388 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4389 error);
4390 return ENOMEM;
4391 }
4392
4393 kcpuset_create(&affinity, false);
4394 intr_idx = 0;
4395
4396 /*
4397 * TX
4398 */
4399 tx_established = 0;
4400 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4401 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4402 int affinity_to = (affinity_offset + intr_idx) % ncpu;
4403
4404 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4405 sizeof(intrbuf));
4406 #ifdef WM_MPSAFE
4407 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4408 PCI_INTR_MPSAFE, true);
4409 #endif
4410 memset(intr_xname, 0, sizeof(intr_xname));
4411 snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
4412 device_xname(sc->sc_dev), qidx);
4413 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4414 IPL_NET, wm_txintr_msix, txq, intr_xname);
4415 if (vih == NULL) {
4416 aprint_error_dev(sc->sc_dev,
4417 "unable to establish MSI-X(for TX)%s%s\n",
4418 intrstr ? " at " : "",
4419 intrstr ? intrstr : "");
4420
4421 goto fail_0;
4422 }
4423 kcpuset_zero(affinity);
4424 /* Round-robin affinity */
4425 kcpuset_set(affinity, affinity_to);
4426 error = interrupt_distribute(vih, affinity, NULL);
4427 if (error == 0) {
4428 aprint_normal_dev(sc->sc_dev,
4429 "for TX interrupting at %s affinity to %u\n",
4430 intrstr, affinity_to);
4431 } else {
4432 aprint_normal_dev(sc->sc_dev,
4433 "for TX interrupting at %s\n", intrstr);
4434 }
4435 sc->sc_ihs[intr_idx] = vih;
4436 txq->txq_id = qidx;
4437 txq->txq_intr_idx = intr_idx;
4438
4439 tx_established++;
4440 intr_idx++;
4441 }
4442
4443 /*
4444 * RX
4445 */
4446 rx_established = 0;
4447 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4448 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4449 int affinity_to = (affinity_offset + intr_idx) % ncpu;
4450
4451 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4452 sizeof(intrbuf));
4453 #ifdef WM_MPSAFE
4454 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4455 PCI_INTR_MPSAFE, true);
4456 #endif
4457 memset(intr_xname, 0, sizeof(intr_xname));
4458 snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
4459 device_xname(sc->sc_dev), qidx);
4460 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4461 IPL_NET, wm_rxintr_msix, rxq, intr_xname);
4462 if (vih == NULL) {
4463 aprint_error_dev(sc->sc_dev,
4464 "unable to establish MSI-X(for RX)%s%s\n",
4465 intrstr ? " at " : "",
4466 intrstr ? intrstr : "");
4467
4468 goto fail_1;
4469 }
4470 kcpuset_zero(affinity);
4471 /* Round-robin affinity */
4472 kcpuset_set(affinity, affinity_to);
4473 error = interrupt_distribute(vih, affinity, NULL);
4474 if (error == 0) {
4475 aprint_normal_dev(sc->sc_dev,
4476 "for RX interrupting at %s affinity to %u\n",
4477 intrstr, affinity_to);
4478 } else {
4479 aprint_normal_dev(sc->sc_dev,
4480 "for RX interrupting at %s\n", intrstr);
4481 }
4482 sc->sc_ihs[intr_idx] = vih;
4483 rxq->rxq_id = qidx;
4484 rxq->rxq_intr_idx = intr_idx;
4485
4486 rx_established++;
4487 intr_idx++;
4488 }
4489
4490 /*
4491 * LINK
4492 */
4493 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4494 sizeof(intrbuf));
4495 #ifdef WM_MPSAFE
4496 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4497 #endif
4498 memset(intr_xname, 0, sizeof(intr_xname));
4499 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4500 device_xname(sc->sc_dev));
4501 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4502 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4503 if (vih == NULL) {
4504 aprint_error_dev(sc->sc_dev,
4505 "unable to establish MSI-X(for LINK)%s%s\n",
4506 intrstr ? " at " : "",
4507 intrstr ? intrstr : "");
4508
4509 goto fail_1;
4510 }
4511 /* keep default affinity to LINK interrupt */
4512 aprint_normal_dev(sc->sc_dev,
4513 "for LINK interrupting at %s\n", intrstr);
4514 sc->sc_ihs[intr_idx] = vih;
4515 sc->sc_link_intr_idx = intr_idx;
4516
4517 sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
4518 kcpuset_destroy(affinity);
4519 return 0;
4520
4521 fail_1:
4522 for (qidx = 0; qidx < rx_established; qidx++) {
4523 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4524 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[rxq->rxq_intr_idx]);
4525 sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
4526 }
4527 fail_0:
4528 for (qidx = 0; qidx < tx_established; qidx++) {
4529 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4530 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[txq->txq_intr_idx]);
4531 sc->sc_ihs[txq->txq_intr_idx] = NULL;
4532 }
4533
4534 kcpuset_destroy(affinity);
4535 return ENOMEM;
4536 }
4537
4538 /*
4539 * wm_init: [ifnet interface function]
4540 *
4541 * Initialize the interface.
4542 */
4543 static int
4544 wm_init(struct ifnet *ifp)
4545 {
4546 struct wm_softc *sc = ifp->if_softc;
4547 int ret;
4548
4549 WM_CORE_LOCK(sc);
4550 ret = wm_init_locked(ifp);
4551 WM_CORE_UNLOCK(sc);
4552
4553 return ret;
4554 }
4555
4556 static int
4557 wm_init_locked(struct ifnet *ifp)
4558 {
4559 struct wm_softc *sc = ifp->if_softc;
4560 int i, j, trynum, error = 0;
4561 uint32_t reg;
4562
4563 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4564 device_xname(sc->sc_dev), __func__));
4565 KASSERT(WM_CORE_LOCKED(sc));
4566 /*
4567 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4568 * There is a small but measurable benefit to avoiding the adjusment
4569 * of the descriptor so that the headers are aligned, for normal mtu,
4570 * on such platforms. One possibility is that the DMA itself is
4571 * slightly more efficient if the front of the entire packet (instead
4572 * of the front of the headers) is aligned.
4573 *
4574 * Note we must always set align_tweak to 0 if we are using
4575 * jumbo frames.
4576 */
4577 #ifdef __NO_STRICT_ALIGNMENT
4578 sc->sc_align_tweak = 0;
4579 #else
4580 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4581 sc->sc_align_tweak = 0;
4582 else
4583 sc->sc_align_tweak = 2;
4584 #endif /* __NO_STRICT_ALIGNMENT */
4585
4586 /* Cancel any pending I/O. */
4587 wm_stop_locked(ifp, 0);
4588
4589 /* update statistics before reset */
4590 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4591 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4592
4593 /* Reset the chip to a known state. */
4594 wm_reset(sc);
4595
4596 switch (sc->sc_type) {
4597 case WM_T_82571:
4598 case WM_T_82572:
4599 case WM_T_82573:
4600 case WM_T_82574:
4601 case WM_T_82583:
4602 case WM_T_80003:
4603 case WM_T_ICH8:
4604 case WM_T_ICH9:
4605 case WM_T_ICH10:
4606 case WM_T_PCH:
4607 case WM_T_PCH2:
4608 case WM_T_PCH_LPT:
4609 case WM_T_PCH_SPT:
4610 /* AMT based hardware can now take control from firmware */
4611 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4612 wm_get_hw_control(sc);
4613 break;
4614 default:
4615 break;
4616 }
4617
4618 /* Init hardware bits */
4619 wm_initialize_hardware_bits(sc);
4620
4621 /* Reset the PHY. */
4622 if (sc->sc_flags & WM_F_HAS_MII)
4623 wm_gmii_reset(sc);
4624
4625 /* Calculate (E)ITR value */
4626 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4627 sc->sc_itr = 450; /* For EITR */
4628 } else if (sc->sc_type >= WM_T_82543) {
4629 /*
4630 * Set up the interrupt throttling register (units of 256ns)
4631 * Note that a footnote in Intel's documentation says this
4632 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4633 * or 10Mbit mode. Empirically, it appears to be the case
4634 * that that is also true for the 1024ns units of the other
4635 * interrupt-related timer registers -- so, really, we ought
4636 * to divide this value by 4 when the link speed is low.
4637 *
4638 * XXX implement this division at link speed change!
4639 */
4640
4641 /*
4642 * For N interrupts/sec, set this value to:
4643 * 1000000000 / (N * 256). Note that we set the
4644 * absolute and packet timer values to this value
4645 * divided by 4 to get "simple timer" behavior.
4646 */
4647
4648 sc->sc_itr = 1500; /* 2604 ints/sec */
4649 }
4650
4651 error = wm_init_txrx_queues(sc);
4652 if (error)
4653 goto out;
4654
4655 /*
4656 * Clear out the VLAN table -- we don't use it (yet).
4657 */
4658 CSR_WRITE(sc, WMREG_VET, 0);
4659 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4660 trynum = 10; /* Due to hw errata */
4661 else
4662 trynum = 1;
4663 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4664 for (j = 0; j < trynum; j++)
4665 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4666
4667 /*
4668 * Set up flow-control parameters.
4669 *
4670 * XXX Values could probably stand some tuning.
4671 */
4672 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4673 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4674 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4675 && (sc->sc_type != WM_T_PCH_SPT)) {
4676 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4677 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4678 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4679 }
4680
4681 sc->sc_fcrtl = FCRTL_DFLT;
4682 if (sc->sc_type < WM_T_82543) {
4683 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4684 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4685 } else {
4686 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4687 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4688 }
4689
4690 if (sc->sc_type == WM_T_80003)
4691 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4692 else
4693 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4694
4695 /* Writes the control register. */
4696 wm_set_vlan(sc);
4697
4698 if (sc->sc_flags & WM_F_HAS_MII) {
4699 int val;
4700
4701 switch (sc->sc_type) {
4702 case WM_T_80003:
4703 case WM_T_ICH8:
4704 case WM_T_ICH9:
4705 case WM_T_ICH10:
4706 case WM_T_PCH:
4707 case WM_T_PCH2:
4708 case WM_T_PCH_LPT:
4709 case WM_T_PCH_SPT:
4710 /*
4711 * Set the mac to wait the maximum time between each
4712 * iteration and increase the max iterations when
4713 * polling the phy; this fixes erroneous timeouts at
4714 * 10Mbps.
4715 */
4716 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4717 0xFFFF);
4718 val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4719 val |= 0x3F;
4720 wm_kmrn_writereg(sc,
4721 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4722 break;
4723 default:
4724 break;
4725 }
4726
4727 if (sc->sc_type == WM_T_80003) {
4728 val = CSR_READ(sc, WMREG_CTRL_EXT);
4729 val &= ~CTRL_EXT_LINK_MODE_MASK;
4730 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4731
4732 /* Bypass RX and TX FIFO's */
4733 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4734 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4735 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4736 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4737 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4738 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4739 }
4740 }
4741 #if 0
4742 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4743 #endif
4744
4745 /* Set up checksum offload parameters. */
4746 reg = CSR_READ(sc, WMREG_RXCSUM);
4747 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4748 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4749 reg |= RXCSUM_IPOFL;
4750 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4751 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4752 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4753 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4754 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4755
4756 /* Set up MSI-X */
4757 if (sc->sc_nintrs > 1) {
4758 uint32_t ivar;
4759 struct wm_txqueue *txq;
4760 struct wm_rxqueue *rxq;
4761 int qid;
4762
4763 if (sc->sc_type == WM_T_82575) {
4764 /* Interrupt control */
4765 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4766 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4767 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4768
4769 /* TX */
4770 for (i = 0; i < sc->sc_ntxqueues; i++) {
4771 txq = &sc->sc_txq[i];
4772 CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
4773 EITR_TX_QUEUE(txq->txq_id));
4774 }
4775 /* RX */
4776 for (i = 0; i < sc->sc_nrxqueues; i++) {
4777 rxq = &sc->sc_rxq[i];
4778 CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
4779 EITR_RX_QUEUE(rxq->rxq_id));
4780 }
4781 /* Link status */
4782 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4783 EITR_OTHER);
4784 } else if (sc->sc_type == WM_T_82574) {
4785 /* Interrupt control */
4786 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4787 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4788 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4789
4790 ivar = 0;
4791 /* TX */
4792 for (i = 0; i < sc->sc_ntxqueues; i++) {
4793 txq = &sc->sc_txq[i];
4794 ivar |= __SHIFTIN((IVAR_VALID_82574
4795 | txq->txq_intr_idx),
4796 IVAR_TX_MASK_Q_82574(txq->txq_id));
4797 }
4798 /* RX */
4799 for (i = 0; i < sc->sc_nrxqueues; i++) {
4800 rxq = &sc->sc_rxq[i];
4801 ivar |= __SHIFTIN((IVAR_VALID_82574
4802 | rxq->rxq_intr_idx),
4803 IVAR_RX_MASK_Q_82574(rxq->rxq_id));
4804 }
4805 /* Link status */
4806 ivar |= __SHIFTIN((IVAR_VALID_82574
4807 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4808 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4809 } else {
4810 /* Interrupt control */
4811 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4812 | GPIE_EIAME | GPIE_PBA);
4813
4814 switch (sc->sc_type) {
4815 case WM_T_82580:
4816 case WM_T_I350:
4817 case WM_T_I354:
4818 case WM_T_I210:
4819 case WM_T_I211:
4820 /* TX */
4821 for (i = 0; i < sc->sc_ntxqueues; i++) {
4822 txq = &sc->sc_txq[i];
4823 qid = txq->txq_id;
4824 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4825 ivar &= ~IVAR_TX_MASK_Q(qid);
4826 ivar |= __SHIFTIN((txq->txq_intr_idx
4827 | IVAR_VALID),
4828 IVAR_TX_MASK_Q(qid));
4829 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4830 }
4831
4832 /* RX */
4833 for (i = 0; i < sc->sc_nrxqueues; i++) {
4834 rxq = &sc->sc_rxq[i];
4835 qid = rxq->rxq_id;
4836 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4837 ivar &= ~IVAR_RX_MASK_Q(qid);
4838 ivar |= __SHIFTIN((rxq->rxq_intr_idx
4839 | IVAR_VALID),
4840 IVAR_RX_MASK_Q(qid));
4841 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4842 }
4843 break;
4844 case WM_T_82576:
4845 /* TX */
4846 for (i = 0; i < sc->sc_ntxqueues; i++) {
4847 txq = &sc->sc_txq[i];
4848 qid = txq->txq_id;
4849 ivar = CSR_READ(sc,
4850 WMREG_IVAR_Q_82576(qid));
4851 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4852 ivar |= __SHIFTIN((txq->txq_intr_idx
4853 | IVAR_VALID),
4854 IVAR_TX_MASK_Q_82576(qid));
4855 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4856 ivar);
4857 }
4858
4859 /* RX */
4860 for (i = 0; i < sc->sc_nrxqueues; i++) {
4861 rxq = &sc->sc_rxq[i];
4862 qid = rxq->rxq_id;
4863 ivar = CSR_READ(sc,
4864 WMREG_IVAR_Q_82576(qid));
4865 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4866 ivar |= __SHIFTIN((rxq->rxq_intr_idx
4867 | IVAR_VALID),
4868 IVAR_RX_MASK_Q_82576(qid));
4869 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4870 ivar);
4871 }
4872 break;
4873 default:
4874 break;
4875 }
4876
4877 /* Link status */
4878 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4879 IVAR_MISC_OTHER);
4880 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4881 }
4882
4883 if (sc->sc_nrxqueues > 1) {
4884 wm_init_rss(sc);
4885
4886 /*
4887 ** NOTE: Receive Full-Packet Checksum Offload
4888 ** is mutually exclusive with Multiqueue. However
4889 ** this is not the same as TCP/IP checksums which
4890 ** still work.
4891 */
4892 reg = CSR_READ(sc, WMREG_RXCSUM);
4893 reg |= RXCSUM_PCSD;
4894 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4895 }
4896 }
4897
4898 /* Set up the interrupt registers. */
4899 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4900 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4901 ICR_RXO | ICR_RXT0;
4902 if (sc->sc_nintrs > 1) {
4903 uint32_t mask;
4904 struct wm_txqueue *txq;
4905 struct wm_rxqueue *rxq;
4906
4907 switch (sc->sc_type) {
4908 case WM_T_82574:
4909 CSR_WRITE(sc, WMREG_EIAC_82574,
4910 WMREG_EIAC_82574_MSIX_MASK);
4911 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4912 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4913 break;
4914 default:
4915 if (sc->sc_type == WM_T_82575) {
4916 mask = 0;
4917 for (i = 0; i < sc->sc_ntxqueues; i++) {
4918 txq = &sc->sc_txq[i];
4919 mask |= EITR_TX_QUEUE(txq->txq_id);
4920 }
4921 for (i = 0; i < sc->sc_nrxqueues; i++) {
4922 rxq = &sc->sc_rxq[i];
4923 mask |= EITR_RX_QUEUE(rxq->rxq_id);
4924 }
4925 mask |= EITR_OTHER;
4926 } else {
4927 mask = 0;
4928 for (i = 0; i < sc->sc_ntxqueues; i++) {
4929 txq = &sc->sc_txq[i];
4930 mask |= 1 << txq->txq_intr_idx;
4931 }
4932 for (i = 0; i < sc->sc_nrxqueues; i++) {
4933 rxq = &sc->sc_rxq[i];
4934 mask |= 1 << rxq->rxq_intr_idx;
4935 }
4936 mask |= 1 << sc->sc_link_intr_idx;
4937 }
4938 CSR_WRITE(sc, WMREG_EIAC, mask);
4939 CSR_WRITE(sc, WMREG_EIAM, mask);
4940 CSR_WRITE(sc, WMREG_EIMS, mask);
4941 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4942 break;
4943 }
4944 } else
4945 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4946
4947 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4948 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4949 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4950 || (sc->sc_type == WM_T_PCH_SPT)) {
4951 reg = CSR_READ(sc, WMREG_KABGTXD);
4952 reg |= KABGTXD_BGSQLBIAS;
4953 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4954 }
4955
4956 /* Set up the inter-packet gap. */
4957 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4958
4959 if (sc->sc_type >= WM_T_82543) {
4960 /*
4961 * XXX 82574 has both ITR and EITR. SET EITR when we use
4962 * the multi queue function with MSI-X.
4963 */
4964 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4965 int qidx;
4966 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4967 struct wm_txqueue *txq = &sc->sc_txq[qidx];
4968 CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
4969 sc->sc_itr);
4970 }
4971 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4972 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4973 CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
4974 sc->sc_itr);
4975 }
4976 /*
4977 * Link interrupts occur much less than TX
4978 * interrupts and RX interrupts. So, we don't
4979 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4980 * FreeBSD's if_igb.
4981 */
4982 } else
4983 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4984 }
4985
4986 /* Set the VLAN ethernetype. */
4987 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4988
4989 /*
4990 * Set up the transmit control register; we start out with
4991 * a collision distance suitable for FDX, but update it whe
4992 * we resolve the media type.
4993 */
4994 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4995 | TCTL_CT(TX_COLLISION_THRESHOLD)
4996 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4997 if (sc->sc_type >= WM_T_82571)
4998 sc->sc_tctl |= TCTL_MULR;
4999 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5000
5001 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5002 /* Write TDT after TCTL.EN is set. See the document. */
5003 CSR_WRITE(sc, WMREG_TDT(0), 0);
5004 }
5005
5006 if (sc->sc_type == WM_T_80003) {
5007 reg = CSR_READ(sc, WMREG_TCTL_EXT);
5008 reg &= ~TCTL_EXT_GCEX_MASK;
5009 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5010 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5011 }
5012
5013 /* Set the media. */
5014 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5015 goto out;
5016
5017 /* Configure for OS presence */
5018 wm_init_manageability(sc);
5019
5020 /*
5021 * Set up the receive control register; we actually program
5022 * the register when we set the receive filter. Use multicast
5023 * address offset type 0.
5024 *
5025 * Only the i82544 has the ability to strip the incoming
5026 * CRC, so we don't enable that feature.
5027 */
5028 sc->sc_mchash_type = 0;
5029 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5030 | RCTL_MO(sc->sc_mchash_type);
5031
5032 /*
5033 * The I350 has a bug where it always strips the CRC whether
5034 * asked to or not. So ask for stripped CRC here and cope in rxeof
5035 */
5036 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5037 || (sc->sc_type == WM_T_I210))
5038 sc->sc_rctl |= RCTL_SECRC;
5039
5040 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5041 && (ifp->if_mtu > ETHERMTU)) {
5042 sc->sc_rctl |= RCTL_LPE;
5043 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5044 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5045 }
5046
5047 if (MCLBYTES == 2048) {
5048 sc->sc_rctl |= RCTL_2k;
5049 } else {
5050 if (sc->sc_type >= WM_T_82543) {
5051 switch (MCLBYTES) {
5052 case 4096:
5053 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5054 break;
5055 case 8192:
5056 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5057 break;
5058 case 16384:
5059 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5060 break;
5061 default:
5062 panic("wm_init: MCLBYTES %d unsupported",
5063 MCLBYTES);
5064 break;
5065 }
5066 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
5067 }
5068
5069 /* Set the receive filter. */
5070 wm_set_filter(sc);
5071
5072 /* Enable ECC */
5073 switch (sc->sc_type) {
5074 case WM_T_82571:
5075 reg = CSR_READ(sc, WMREG_PBA_ECC);
5076 reg |= PBA_ECC_CORR_EN;
5077 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5078 break;
5079 case WM_T_PCH_LPT:
5080 case WM_T_PCH_SPT:
5081 reg = CSR_READ(sc, WMREG_PBECCSTS);
5082 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5083 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5084
5085 reg = CSR_READ(sc, WMREG_CTRL);
5086 reg |= CTRL_MEHE;
5087 CSR_WRITE(sc, WMREG_CTRL, reg);
5088 break;
5089 default:
5090 break;
5091 }
5092
5093 /* On 575 and later set RDT only if RX enabled */
5094 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5095 int qidx;
5096 for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
5097 struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
5098 for (i = 0; i < WM_NRXDESC; i++) {
5099 WM_RX_LOCK(rxq);
5100 wm_init_rxdesc(rxq, i);
5101 WM_RX_UNLOCK(rxq);
5102
5103 }
5104 }
5105 }
5106
5107 sc->sc_stopping = false;
5108
5109 /* Start the one second link check clock. */
5110 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5111
5112 /* ...all done! */
5113 ifp->if_flags |= IFF_RUNNING;
5114 ifp->if_flags &= ~IFF_OACTIVE;
5115
5116 out:
5117 sc->sc_if_flags = ifp->if_flags;
5118 if (error)
5119 log(LOG_ERR, "%s: interface not running\n",
5120 device_xname(sc->sc_dev));
5121 return error;
5122 }
5123
5124 /*
5125 * wm_stop: [ifnet interface function]
5126 *
5127 * Stop transmission on the interface.
5128 */
5129 static void
5130 wm_stop(struct ifnet *ifp, int disable)
5131 {
5132 struct wm_softc *sc = ifp->if_softc;
5133
5134 WM_CORE_LOCK(sc);
5135 wm_stop_locked(ifp, disable);
5136 WM_CORE_UNLOCK(sc);
5137 }
5138
5139 static void
5140 wm_stop_locked(struct ifnet *ifp, int disable)
5141 {
5142 struct wm_softc *sc = ifp->if_softc;
5143 struct wm_txsoft *txs;
5144 int i, qidx;
5145
5146 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5147 device_xname(sc->sc_dev), __func__));
5148 KASSERT(WM_CORE_LOCKED(sc));
5149
5150 sc->sc_stopping = true;
5151
5152 /* Stop the one second clock. */
5153 callout_stop(&sc->sc_tick_ch);
5154
5155 /* Stop the 82547 Tx FIFO stall check timer. */
5156 if (sc->sc_type == WM_T_82547)
5157 callout_stop(&sc->sc_txfifo_ch);
5158
5159 if (sc->sc_flags & WM_F_HAS_MII) {
5160 /* Down the MII. */
5161 mii_down(&sc->sc_mii);
5162 } else {
5163 #if 0
5164 /* Should we clear PHY's status properly? */
5165 wm_reset(sc);
5166 #endif
5167 }
5168
5169 /* Stop the transmit and receive processes. */
5170 CSR_WRITE(sc, WMREG_TCTL, 0);
5171 CSR_WRITE(sc, WMREG_RCTL, 0);
5172 sc->sc_rctl &= ~RCTL_EN;
5173
5174 /*
5175 * Clear the interrupt mask to ensure the device cannot assert its
5176 * interrupt line.
5177 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5178 * service any currently pending or shared interrupt.
5179 */
5180 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5181 sc->sc_icr = 0;
5182 if (sc->sc_nintrs > 1) {
5183 if (sc->sc_type != WM_T_82574) {
5184 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5185 CSR_WRITE(sc, WMREG_EIAC, 0);
5186 } else
5187 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5188 }
5189
5190 /* Release any queued transmit buffers. */
5191 for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
5192 struct wm_txqueue *txq = &sc->sc_txq[qidx];
5193 WM_TX_LOCK(txq);
5194 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5195 txs = &txq->txq_soft[i];
5196 if (txs->txs_mbuf != NULL) {
5197 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5198 m_freem(txs->txs_mbuf);
5199 txs->txs_mbuf = NULL;
5200 }
5201 }
5202 if (sc->sc_type == WM_T_PCH_SPT) {
5203 pcireg_t preg;
5204 uint32_t reg;
5205 int nexttx;
5206
5207 /* First, disable MULR fix in FEXTNVM11 */
5208 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5209 reg |= FEXTNVM11_DIS_MULRFIX;
5210 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5211
5212 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5213 WM_PCI_DESCRING_STATUS);
5214 reg = CSR_READ(sc, WMREG_TDLEN(0));
5215 printf("XXX RST: FLUSH = %08x, len = %u\n",
5216 (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
5217 if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
5218 && (reg != 0)) {
5219 /* TX */
5220 printf("XXX need TX flush (reg = %08x)\n",
5221 preg);
5222 wm_init_tx_descs(sc, txq);
5223 wm_init_tx_regs(sc, txq);
5224 nexttx = txq->txq_next;
5225 wm_set_dma_addr(
5226 &txq->txq_descs[nexttx].wtx_addr,
5227 WM_CDTXADDR(txq, nexttx));
5228 txq->txq_descs[nexttx].wtx_cmdlen
5229 = htole32(WTX_CMD_IFCS | 512);
5230 wm_cdtxsync(txq, nexttx, 1,
5231 BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
5232 CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
5233 CSR_WRITE(sc, WMREG_TDT(0), nexttx);
5234 CSR_WRITE_FLUSH(sc);
5235 delay(250);
5236 CSR_WRITE(sc, WMREG_TCTL, 0);
5237 }
5238 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5239 WM_PCI_DESCRING_STATUS);
5240 if (preg & DESCRING_STATUS_FLUSH_REQ) {
5241 /* RX */
5242 printf("XXX need RX flush\n");
5243 }
5244 }
5245 WM_TX_UNLOCK(txq);
5246 }
5247
5248 /* Mark the interface as down and cancel the watchdog timer. */
5249 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5250 ifp->if_timer = 0;
5251
5252 if (disable) {
5253 for (i = 0; i < sc->sc_nrxqueues; i++) {
5254 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5255 WM_RX_LOCK(rxq);
5256 wm_rxdrain(rxq);
5257 WM_RX_UNLOCK(rxq);
5258 }
5259 }
5260
5261 #if 0 /* notyet */
5262 if (sc->sc_type >= WM_T_82544)
5263 CSR_WRITE(sc, WMREG_WUC, 0);
5264 #endif
5265 }
5266
5267 static void
5268 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5269 {
5270 struct mbuf *m;
5271 int i;
5272
5273 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5274 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5275 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5276 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5277 m->m_data, m->m_len, m->m_flags);
5278 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5279 i, i == 1 ? "" : "s");
5280 }
5281
5282 /*
5283 * wm_82547_txfifo_stall:
5284 *
5285 * Callout used to wait for the 82547 Tx FIFO to drain,
5286 * reset the FIFO pointers, and restart packet transmission.
5287 */
5288 static void
5289 wm_82547_txfifo_stall(void *arg)
5290 {
5291 struct wm_softc *sc = arg;
5292 struct wm_txqueue *txq = sc->sc_txq;
5293 #ifndef WM_MPSAFE
5294 int s;
5295
5296 s = splnet();
5297 #endif
5298 WM_TX_LOCK(txq);
5299
5300 if (sc->sc_stopping)
5301 goto out;
5302
5303 if (txq->txq_fifo_stall) {
5304 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5305 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5306 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5307 /*
5308 * Packets have drained. Stop transmitter, reset
5309 * FIFO pointers, restart transmitter, and kick
5310 * the packet queue.
5311 */
5312 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5313 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5314 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5315 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5316 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5317 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5318 CSR_WRITE(sc, WMREG_TCTL, tctl);
5319 CSR_WRITE_FLUSH(sc);
5320
5321 txq->txq_fifo_head = 0;
5322 txq->txq_fifo_stall = 0;
5323 wm_start_locked(&sc->sc_ethercom.ec_if);
5324 } else {
5325 /*
5326 * Still waiting for packets to drain; try again in
5327 * another tick.
5328 */
5329 callout_schedule(&sc->sc_txfifo_ch, 1);
5330 }
5331 }
5332
5333 out:
5334 WM_TX_UNLOCK(txq);
5335 #ifndef WM_MPSAFE
5336 splx(s);
5337 #endif
5338 }
5339
5340 /*
5341 * wm_82547_txfifo_bugchk:
5342 *
5343 * Check for bug condition in the 82547 Tx FIFO. We need to
5344 * prevent enqueueing a packet that would wrap around the end
5345 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5346 *
5347 * We do this by checking the amount of space before the end
5348 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5349 * the Tx FIFO, wait for all remaining packets to drain, reset
5350 * the internal FIFO pointers to the beginning, and restart
5351 * transmission on the interface.
5352 */
5353 #define WM_FIFO_HDR 0x10
5354 #define WM_82547_PAD_LEN 0x3e0
5355 static int
5356 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5357 {
5358 struct wm_txqueue *txq = &sc->sc_txq[0];
5359 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5360 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5361
5362 /* Just return if already stalled. */
5363 if (txq->txq_fifo_stall)
5364 return 1;
5365
5366 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5367 /* Stall only occurs in half-duplex mode. */
5368 goto send_packet;
5369 }
5370
5371 if (len >= WM_82547_PAD_LEN + space) {
5372 txq->txq_fifo_stall = 1;
5373 callout_schedule(&sc->sc_txfifo_ch, 1);
5374 return 1;
5375 }
5376
5377 send_packet:
5378 txq->txq_fifo_head += len;
5379 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5380 txq->txq_fifo_head -= txq->txq_fifo_size;
5381
5382 return 0;
5383 }
5384
5385 static int
5386 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5387 {
5388 int error;
5389
5390 /*
5391 * Allocate the control data structures, and create and load the
5392 * DMA map for it.
5393 *
5394 * NOTE: All Tx descriptors must be in the same 4G segment of
5395 * memory. So must Rx descriptors. We simplify by allocating
5396 * both sets within the same 4G segment.
5397 */
5398 if (sc->sc_type < WM_T_82544)
5399 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5400 else
5401 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5402 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5403 txq->txq_descsize = sizeof(nq_txdesc_t);
5404 else
5405 txq->txq_descsize = sizeof(wiseman_txdesc_t);
5406
5407 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5408 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5409 1, &txq->txq_desc_rseg, 0)) != 0) {
5410 aprint_error_dev(sc->sc_dev,
5411 "unable to allocate TX control data, error = %d\n",
5412 error);
5413 goto fail_0;
5414 }
5415
5416 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5417 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5418 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5419 aprint_error_dev(sc->sc_dev,
5420 "unable to map TX control data, error = %d\n", error);
5421 goto fail_1;
5422 }
5423
5424 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5425 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5426 aprint_error_dev(sc->sc_dev,
5427 "unable to create TX control data DMA map, error = %d\n",
5428 error);
5429 goto fail_2;
5430 }
5431
5432 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5433 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5434 aprint_error_dev(sc->sc_dev,
5435 "unable to load TX control data DMA map, error = %d\n",
5436 error);
5437 goto fail_3;
5438 }
5439
5440 return 0;
5441
5442 fail_3:
5443 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5444 fail_2:
5445 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5446 WM_TXDESCS_SIZE(txq));
5447 fail_1:
5448 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5449 fail_0:
5450 return error;
5451 }
5452
5453 static void
5454 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5455 {
5456
5457 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5458 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5459 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5460 WM_TXDESCS_SIZE(txq));
5461 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5462 }
5463
5464 static int
5465 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5466 {
5467 int error;
5468
5469 /*
5470 * Allocate the control data structures, and create and load the
5471 * DMA map for it.
5472 *
5473 * NOTE: All Tx descriptors must be in the same 4G segment of
5474 * memory. So must Rx descriptors. We simplify by allocating
5475 * both sets within the same 4G segment.
5476 */
5477 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5478 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5479 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5480 1, &rxq->rxq_desc_rseg, 0)) != 0) {
5481 aprint_error_dev(sc->sc_dev,
5482 "unable to allocate RX control data, error = %d\n",
5483 error);
5484 goto fail_0;
5485 }
5486
5487 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5488 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5489 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5490 aprint_error_dev(sc->sc_dev,
5491 "unable to map RX control data, error = %d\n", error);
5492 goto fail_1;
5493 }
5494
5495 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5496 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5497 aprint_error_dev(sc->sc_dev,
5498 "unable to create RX control data DMA map, error = %d\n",
5499 error);
5500 goto fail_2;
5501 }
5502
5503 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5504 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5505 aprint_error_dev(sc->sc_dev,
5506 "unable to load RX control data DMA map, error = %d\n",
5507 error);
5508 goto fail_3;
5509 }
5510
5511 return 0;
5512
5513 fail_3:
5514 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5515 fail_2:
5516 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5517 rxq->rxq_desc_size);
5518 fail_1:
5519 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5520 fail_0:
5521 return error;
5522 }
5523
5524 static void
5525 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5526 {
5527
5528 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5529 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5530 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5531 rxq->rxq_desc_size);
5532 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5533 }
5534
5535
5536 static int
5537 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5538 {
5539 int i, error;
5540
5541 /* Create the transmit buffer DMA maps. */
5542 WM_TXQUEUELEN(txq) =
5543 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5544 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5545 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5546 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5547 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5548 &txq->txq_soft[i].txs_dmamap)) != 0) {
5549 aprint_error_dev(sc->sc_dev,
5550 "unable to create Tx DMA map %d, error = %d\n",
5551 i, error);
5552 goto fail;
5553 }
5554 }
5555
5556 return 0;
5557
5558 fail:
5559 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5560 if (txq->txq_soft[i].txs_dmamap != NULL)
5561 bus_dmamap_destroy(sc->sc_dmat,
5562 txq->txq_soft[i].txs_dmamap);
5563 }
5564 return error;
5565 }
5566
5567 static void
5568 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5569 {
5570 int i;
5571
5572 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5573 if (txq->txq_soft[i].txs_dmamap != NULL)
5574 bus_dmamap_destroy(sc->sc_dmat,
5575 txq->txq_soft[i].txs_dmamap);
5576 }
5577 }
5578
5579 static int
5580 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5581 {
5582 int i, error;
5583
5584 /* Create the receive buffer DMA maps. */
5585 for (i = 0; i < WM_NRXDESC; i++) {
5586 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5587 MCLBYTES, 0, 0,
5588 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5589 aprint_error_dev(sc->sc_dev,
5590 "unable to create Rx DMA map %d error = %d\n",
5591 i, error);
5592 goto fail;
5593 }
5594 rxq->rxq_soft[i].rxs_mbuf = NULL;
5595 }
5596
5597 return 0;
5598
5599 fail:
5600 for (i = 0; i < WM_NRXDESC; i++) {
5601 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5602 bus_dmamap_destroy(sc->sc_dmat,
5603 rxq->rxq_soft[i].rxs_dmamap);
5604 }
5605 return error;
5606 }
5607
5608 static void
5609 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5610 {
5611 int i;
5612
5613 for (i = 0; i < WM_NRXDESC; i++) {
5614 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5615 bus_dmamap_destroy(sc->sc_dmat,
5616 rxq->rxq_soft[i].rxs_dmamap);
5617 }
5618 }
5619
5620 /*
5621 * wm_alloc_quques:
5622 * Allocate {tx,rx}descs and {tx,rx} buffers
5623 */
5624 static int
5625 wm_alloc_txrx_queues(struct wm_softc *sc)
5626 {
5627 int i, error, tx_done, rx_done;
5628
5629 /*
5630 * For transmission
5631 */
5632 sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
5633 KM_SLEEP);
5634 if (sc->sc_txq == NULL) {
5635 aprint_error_dev(sc->sc_dev,"unable to allocate wm_txqueue\n");
5636 error = ENOMEM;
5637 goto fail_0;
5638 }
5639
5640 error = 0;
5641 tx_done = 0;
5642 for (i = 0; i < sc->sc_ntxqueues; i++) {
5643 struct wm_txqueue *txq = &sc->sc_txq[i];
5644 txq->txq_sc = sc;
5645 #ifdef WM_MPSAFE
5646 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5647 #else
5648 txq->txq_lock = NULL;
5649 #endif
5650 error = wm_alloc_tx_descs(sc, txq);
5651 if (error)
5652 break;
5653 error = wm_alloc_tx_buffer(sc, txq);
5654 if (error) {
5655 wm_free_tx_descs(sc, txq);
5656 break;
5657 }
5658 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5659 if (txq->txq_interq == NULL) {
5660 wm_free_tx_descs(sc, txq);
5661 wm_free_tx_buffer(sc, txq);
5662 error = ENOMEM;
5663 break;
5664 }
5665 tx_done++;
5666 }
5667 if (error)
5668 goto fail_1;
5669
5670 /*
5671 * For recieve
5672 */
5673 sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
5674 KM_SLEEP);
5675 if (sc->sc_rxq == NULL) {
5676 aprint_error_dev(sc->sc_dev,"unable to allocate wm_rxqueue\n");
5677 error = ENOMEM;
5678 goto fail_1;
5679 }
5680
5681 error = 0;
5682 rx_done = 0;
5683 for (i = 0; i < sc->sc_nrxqueues; i++) {
5684 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5685 rxq->rxq_sc = sc;
5686 #ifdef WM_MPSAFE
5687 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5688 #else
5689 rxq->rxq_lock = NULL;
5690 #endif
5691 error = wm_alloc_rx_descs(sc, rxq);
5692 if (error)
5693 break;
5694
5695 error = wm_alloc_rx_buffer(sc, rxq);
5696 if (error) {
5697 wm_free_rx_descs(sc, rxq);
5698 break;
5699 }
5700
5701 rx_done++;
5702 }
5703 if (error)
5704 goto fail_2;
5705
5706 return 0;
5707
5708 fail_2:
5709 for (i = 0; i < rx_done; i++) {
5710 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5711 wm_free_rx_buffer(sc, rxq);
5712 wm_free_rx_descs(sc, rxq);
5713 if (rxq->rxq_lock)
5714 mutex_obj_free(rxq->rxq_lock);
5715 }
5716 kmem_free(sc->sc_rxq,
5717 sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5718 fail_1:
5719 for (i = 0; i < tx_done; i++) {
5720 struct wm_txqueue *txq = &sc->sc_txq[i];
5721 pcq_destroy(txq->txq_interq);
5722 wm_free_tx_buffer(sc, txq);
5723 wm_free_tx_descs(sc, txq);
5724 if (txq->txq_lock)
5725 mutex_obj_free(txq->txq_lock);
5726 }
5727 kmem_free(sc->sc_txq,
5728 sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5729 fail_0:
5730 return error;
5731 }
5732
5733 /*
5734 * wm_free_quques:
5735 * Free {tx,rx}descs and {tx,rx} buffers
5736 */
5737 static void
5738 wm_free_txrx_queues(struct wm_softc *sc)
5739 {
5740 int i;
5741
5742 for (i = 0; i < sc->sc_nrxqueues; i++) {
5743 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5744 wm_free_rx_buffer(sc, rxq);
5745 wm_free_rx_descs(sc, rxq);
5746 if (rxq->rxq_lock)
5747 mutex_obj_free(rxq->rxq_lock);
5748 }
5749 kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5750
5751 for (i = 0; i < sc->sc_ntxqueues; i++) {
5752 struct wm_txqueue *txq = &sc->sc_txq[i];
5753 wm_free_tx_buffer(sc, txq);
5754 wm_free_tx_descs(sc, txq);
5755 if (txq->txq_lock)
5756 mutex_obj_free(txq->txq_lock);
5757 }
5758 kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5759 }
5760
5761 static void
5762 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5763 {
5764
5765 KASSERT(WM_TX_LOCKED(txq));
5766
5767 /* Initialize the transmit descriptor ring. */
5768 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5769 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5770 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5771 txq->txq_free = WM_NTXDESC(txq);
5772 txq->txq_next = 0;
5773 }
5774
5775 static void
5776 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
5777 {
5778
5779 KASSERT(WM_TX_LOCKED(txq));
5780
5781 if (sc->sc_type < WM_T_82543) {
5782 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5783 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5784 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5785 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5786 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5787 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5788 } else {
5789 int qid = txq->txq_id;
5790
5791 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5792 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5793 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5794 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5795
5796 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5797 /*
5798 * Don't write TDT before TCTL.EN is set.
5799 * See the document.
5800 */
5801 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5802 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5803 | TXDCTL_WTHRESH(0));
5804 else {
5805 /* ITR / 4 */
5806 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5807 if (sc->sc_type >= WM_T_82540) {
5808 /* should be same */
5809 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5810 }
5811
5812 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5813 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5814 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5815 }
5816 }
5817 }
5818
5819 static void
5820 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5821 {
5822 int i;
5823
5824 KASSERT(WM_TX_LOCKED(txq));
5825
5826 /* Initialize the transmit job descriptors. */
5827 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5828 txq->txq_soft[i].txs_mbuf = NULL;
5829 txq->txq_sfree = WM_TXQUEUELEN(txq);
5830 txq->txq_snext = 0;
5831 txq->txq_sdirty = 0;
5832 }
5833
5834 static void
5835 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
5836 {
5837
5838 KASSERT(WM_TX_LOCKED(txq));
5839
5840 /*
5841 * Set up some register offsets that are different between
5842 * the i82542 and the i82543 and later chips.
5843 */
5844 if (sc->sc_type < WM_T_82543)
5845 txq->txq_tdt_reg = WMREG_OLD_TDT;
5846 else
5847 txq->txq_tdt_reg = WMREG_TDT(txq->txq_id);
5848
5849 wm_init_tx_descs(sc, txq);
5850 wm_init_tx_regs(sc, txq);
5851 wm_init_tx_buffer(sc, txq);
5852 }
5853
5854 static void
5855 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5856 {
5857
5858 KASSERT(WM_RX_LOCKED(rxq));
5859
5860 /*
5861 * Initialize the receive descriptor and receive job
5862 * descriptor rings.
5863 */
5864 if (sc->sc_type < WM_T_82543) {
5865 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5866 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5867 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5868 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5869 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5870 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5871 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5872
5873 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5874 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5875 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5876 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5877 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5878 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5879 } else {
5880 int qid = rxq->rxq_id;
5881
5882 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5883 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5884 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5885
5886 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5887 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5888 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5889 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5890 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5891 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5892 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5893 | RXDCTL_WTHRESH(1));
5894 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5895 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5896 } else {
5897 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5898 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5899 /* ITR / 4 */
5900 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5901 /* MUST be same */
5902 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5903 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5904 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5905 }
5906 }
5907 }
5908
5909 static int
5910 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5911 {
5912 struct wm_rxsoft *rxs;
5913 int error, i;
5914
5915 KASSERT(WM_RX_LOCKED(rxq));
5916
5917 for (i = 0; i < WM_NRXDESC; i++) {
5918 rxs = &rxq->rxq_soft[i];
5919 if (rxs->rxs_mbuf == NULL) {
5920 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5921 log(LOG_ERR, "%s: unable to allocate or map "
5922 "rx buffer %d, error = %d\n",
5923 device_xname(sc->sc_dev), i, error);
5924 /*
5925 * XXX Should attempt to run with fewer receive
5926 * XXX buffers instead of just failing.
5927 */
5928 wm_rxdrain(rxq);
5929 return ENOMEM;
5930 }
5931 } else {
5932 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5933 wm_init_rxdesc(rxq, i);
5934 /*
5935 * For 82575 and newer device, the RX descriptors
5936 * must be initialized after the setting of RCTL.EN in
5937 * wm_set_filter()
5938 */
5939 }
5940 }
5941 rxq->rxq_ptr = 0;
5942 rxq->rxq_discard = 0;
5943 WM_RXCHAIN_RESET(rxq);
5944
5945 return 0;
5946 }
5947
5948 static int
5949 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
5950 {
5951
5952 KASSERT(WM_RX_LOCKED(rxq));
5953
5954 /*
5955 * Set up some register offsets that are different between
5956 * the i82542 and the i82543 and later chips.
5957 */
5958 if (sc->sc_type < WM_T_82543)
5959 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5960 else
5961 rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
5962
5963 wm_init_rx_regs(sc, rxq);
5964 return wm_init_rx_buffer(sc, rxq);
5965 }
5966
5967 /*
5968 * wm_init_quques:
5969 * Initialize {tx,rx}descs and {tx,rx} buffers
5970 */
5971 static int
5972 wm_init_txrx_queues(struct wm_softc *sc)
5973 {
5974 int i, error;
5975
5976 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5977 device_xname(sc->sc_dev), __func__));
5978 for (i = 0; i < sc->sc_ntxqueues; i++) {
5979 struct wm_txqueue *txq = &sc->sc_txq[i];
5980 WM_TX_LOCK(txq);
5981 wm_init_tx_queue(sc, txq);
5982 WM_TX_UNLOCK(txq);
5983 }
5984
5985 error = 0;
5986 for (i = 0; i < sc->sc_nrxqueues; i++) {
5987 struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5988 WM_RX_LOCK(rxq);
5989 error = wm_init_rx_queue(sc, rxq);
5990 WM_RX_UNLOCK(rxq);
5991 if (error)
5992 break;
5993 }
5994
5995 return error;
5996 }
5997
5998 /*
5999 * wm_tx_offload:
6000 *
6001 * Set up TCP/IP checksumming parameters for the
6002 * specified packet.
6003 */
6004 static int
6005 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
6006 uint8_t *fieldsp)
6007 {
6008 struct wm_txqueue *txq = &sc->sc_txq[0];
6009 struct mbuf *m0 = txs->txs_mbuf;
6010 struct livengood_tcpip_ctxdesc *t;
6011 uint32_t ipcs, tucs, cmd, cmdlen, seg;
6012 uint32_t ipcse;
6013 struct ether_header *eh;
6014 int offset, iphl;
6015 uint8_t fields;
6016
6017 /*
6018 * XXX It would be nice if the mbuf pkthdr had offset
6019 * fields for the protocol headers.
6020 */
6021
6022 eh = mtod(m0, struct ether_header *);
6023 switch (htons(eh->ether_type)) {
6024 case ETHERTYPE_IP:
6025 case ETHERTYPE_IPV6:
6026 offset = ETHER_HDR_LEN;
6027 break;
6028
6029 case ETHERTYPE_VLAN:
6030 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6031 break;
6032
6033 default:
6034 /*
6035 * Don't support this protocol or encapsulation.
6036 */
6037 *fieldsp = 0;
6038 *cmdp = 0;
6039 return 0;
6040 }
6041
6042 if ((m0->m_pkthdr.csum_flags &
6043 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
6044 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6045 } else {
6046 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6047 }
6048 ipcse = offset + iphl - 1;
6049
6050 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6051 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6052 seg = 0;
6053 fields = 0;
6054
6055 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6056 int hlen = offset + iphl;
6057 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6058
6059 if (__predict_false(m0->m_len <
6060 (hlen + sizeof(struct tcphdr)))) {
6061 /*
6062 * TCP/IP headers are not in the first mbuf; we need
6063 * to do this the slow and painful way. Let's just
6064 * hope this doesn't happen very often.
6065 */
6066 struct tcphdr th;
6067
6068 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6069
6070 m_copydata(m0, hlen, sizeof(th), &th);
6071 if (v4) {
6072 struct ip ip;
6073
6074 m_copydata(m0, offset, sizeof(ip), &ip);
6075 ip.ip_len = 0;
6076 m_copyback(m0,
6077 offset + offsetof(struct ip, ip_len),
6078 sizeof(ip.ip_len), &ip.ip_len);
6079 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6080 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6081 } else {
6082 struct ip6_hdr ip6;
6083
6084 m_copydata(m0, offset, sizeof(ip6), &ip6);
6085 ip6.ip6_plen = 0;
6086 m_copyback(m0,
6087 offset + offsetof(struct ip6_hdr, ip6_plen),
6088 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6089 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6090 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6091 }
6092 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6093 sizeof(th.th_sum), &th.th_sum);
6094
6095 hlen += th.th_off << 2;
6096 } else {
6097 /*
6098 * TCP/IP headers are in the first mbuf; we can do
6099 * this the easy way.
6100 */
6101 struct tcphdr *th;
6102
6103 if (v4) {
6104 struct ip *ip =
6105 (void *)(mtod(m0, char *) + offset);
6106 th = (void *)(mtod(m0, char *) + hlen);
6107
6108 ip->ip_len = 0;
6109 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6110 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6111 } else {
6112 struct ip6_hdr *ip6 =
6113 (void *)(mtod(m0, char *) + offset);
6114 th = (void *)(mtod(m0, char *) + hlen);
6115
6116 ip6->ip6_plen = 0;
6117 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6118 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6119 }
6120 hlen += th->th_off << 2;
6121 }
6122
6123 if (v4) {
6124 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6125 cmdlen |= WTX_TCPIP_CMD_IP;
6126 } else {
6127 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6128 ipcse = 0;
6129 }
6130 cmd |= WTX_TCPIP_CMD_TSE;
6131 cmdlen |= WTX_TCPIP_CMD_TSE |
6132 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6133 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6134 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6135 }
6136
6137 /*
6138 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6139 * offload feature, if we load the context descriptor, we
6140 * MUST provide valid values for IPCSS and TUCSS fields.
6141 */
6142
6143 ipcs = WTX_TCPIP_IPCSS(offset) |
6144 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6145 WTX_TCPIP_IPCSE(ipcse);
6146 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6147 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
6148 fields |= WTX_IXSM;
6149 }
6150
6151 offset += iphl;
6152
6153 if (m0->m_pkthdr.csum_flags &
6154 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6155 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6156 fields |= WTX_TXSM;
6157 tucs = WTX_TCPIP_TUCSS(offset) |
6158 WTX_TCPIP_TUCSO(offset +
6159 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6160 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6161 } else if ((m0->m_pkthdr.csum_flags &
6162 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6163 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6164 fields |= WTX_TXSM;
6165 tucs = WTX_TCPIP_TUCSS(offset) |
6166 WTX_TCPIP_TUCSO(offset +
6167 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6168 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6169 } else {
6170 /* Just initialize it to a valid TCP context. */
6171 tucs = WTX_TCPIP_TUCSS(offset) |
6172 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6173 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6174 }
6175
6176 /* Fill in the context descriptor. */
6177 t = (struct livengood_tcpip_ctxdesc *)
6178 &txq->txq_descs[txq->txq_next];
6179 t->tcpip_ipcs = htole32(ipcs);
6180 t->tcpip_tucs = htole32(tucs);
6181 t->tcpip_cmdlen = htole32(cmdlen);
6182 t->tcpip_seg = htole32(seg);
6183 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6184
6185 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6186 txs->txs_ndesc++;
6187
6188 *cmdp = cmd;
6189 *fieldsp = fields;
6190
6191 return 0;
6192 }
6193
6194 /*
6195 * wm_start: [ifnet interface function]
6196 *
6197 * Start packet transmission on the interface.
6198 */
6199 static void
6200 wm_start(struct ifnet *ifp)
6201 {
6202 struct wm_softc *sc = ifp->if_softc;
6203 struct wm_txqueue *txq = &sc->sc_txq[0];
6204
6205 WM_TX_LOCK(txq);
6206 if (!sc->sc_stopping)
6207 wm_start_locked(ifp);
6208 WM_TX_UNLOCK(txq);
6209 }
6210
6211 static void
6212 wm_start_locked(struct ifnet *ifp)
6213 {
6214 struct wm_softc *sc = ifp->if_softc;
6215 struct wm_txqueue *txq = &sc->sc_txq[0];
6216 struct mbuf *m0;
6217 struct m_tag *mtag;
6218 struct wm_txsoft *txs;
6219 bus_dmamap_t dmamap;
6220 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6221 bus_addr_t curaddr;
6222 bus_size_t seglen, curlen;
6223 uint32_t cksumcmd;
6224 uint8_t cksumfields;
6225
6226 KASSERT(WM_TX_LOCKED(txq));
6227
6228 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6229 return;
6230
6231 /* Remember the previous number of free descriptors. */
6232 ofree = txq->txq_free;
6233
6234 /*
6235 * Loop through the send queue, setting up transmit descriptors
6236 * until we drain the queue, or use up all available transmit
6237 * descriptors.
6238 */
6239 for (;;) {
6240 m0 = NULL;
6241
6242 /* Get a work queue entry. */
6243 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6244 wm_txeof(sc, txq);
6245 if (txq->txq_sfree == 0) {
6246 DPRINTF(WM_DEBUG_TX,
6247 ("%s: TX: no free job descriptors\n",
6248 device_xname(sc->sc_dev)));
6249 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6250 break;
6251 }
6252 }
6253
6254 /* Grab a packet off the queue. */
6255 IFQ_DEQUEUE(&ifp->if_snd, m0);
6256 if (m0 == NULL)
6257 break;
6258
6259 DPRINTF(WM_DEBUG_TX,
6260 ("%s: TX: have packet to transmit: %p\n",
6261 device_xname(sc->sc_dev), m0));
6262
6263 txs = &txq->txq_soft[txq->txq_snext];
6264 dmamap = txs->txs_dmamap;
6265
6266 use_tso = (m0->m_pkthdr.csum_flags &
6267 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6268
6269 /*
6270 * So says the Linux driver:
6271 * The controller does a simple calculation to make sure
6272 * there is enough room in the FIFO before initiating the
6273 * DMA for each buffer. The calc is:
6274 * 4 = ceil(buffer len / MSS)
6275 * To make sure we don't overrun the FIFO, adjust the max
6276 * buffer len if the MSS drops.
6277 */
6278 dmamap->dm_maxsegsz =
6279 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6280 ? m0->m_pkthdr.segsz << 2
6281 : WTX_MAX_LEN;
6282
6283 /*
6284 * Load the DMA map. If this fails, the packet either
6285 * didn't fit in the allotted number of segments, or we
6286 * were short on resources. For the too-many-segments
6287 * case, we simply report an error and drop the packet,
6288 * since we can't sanely copy a jumbo packet to a single
6289 * buffer.
6290 */
6291 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6292 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6293 if (error) {
6294 if (error == EFBIG) {
6295 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6296 log(LOG_ERR, "%s: Tx packet consumes too many "
6297 "DMA segments, dropping...\n",
6298 device_xname(sc->sc_dev));
6299 wm_dump_mbuf_chain(sc, m0);
6300 m_freem(m0);
6301 continue;
6302 }
6303 /* Short on resources, just stop for now. */
6304 DPRINTF(WM_DEBUG_TX,
6305 ("%s: TX: dmamap load failed: %d\n",
6306 device_xname(sc->sc_dev), error));
6307 break;
6308 }
6309
6310 segs_needed = dmamap->dm_nsegs;
6311 if (use_tso) {
6312 /* For sentinel descriptor; see below. */
6313 segs_needed++;
6314 }
6315
6316 /*
6317 * Ensure we have enough descriptors free to describe
6318 * the packet. Note, we always reserve one descriptor
6319 * at the end of the ring due to the semantics of the
6320 * TDT register, plus one more in the event we need
6321 * to load offload context.
6322 */
6323 if (segs_needed > txq->txq_free - 2) {
6324 /*
6325 * Not enough free descriptors to transmit this
6326 * packet. We haven't committed anything yet,
6327 * so just unload the DMA map, put the packet
6328 * pack on the queue, and punt. Notify the upper
6329 * layer that there are no more slots left.
6330 */
6331 DPRINTF(WM_DEBUG_TX,
6332 ("%s: TX: need %d (%d) descriptors, have %d\n",
6333 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6334 segs_needed, txq->txq_free - 1));
6335 ifp->if_flags |= IFF_OACTIVE;
6336 bus_dmamap_unload(sc->sc_dmat, dmamap);
6337 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6338 break;
6339 }
6340
6341 /*
6342 * Check for 82547 Tx FIFO bug. We need to do this
6343 * once we know we can transmit the packet, since we
6344 * do some internal FIFO space accounting here.
6345 */
6346 if (sc->sc_type == WM_T_82547 &&
6347 wm_82547_txfifo_bugchk(sc, m0)) {
6348 DPRINTF(WM_DEBUG_TX,
6349 ("%s: TX: 82547 Tx FIFO bug detected\n",
6350 device_xname(sc->sc_dev)));
6351 ifp->if_flags |= IFF_OACTIVE;
6352 bus_dmamap_unload(sc->sc_dmat, dmamap);
6353 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6354 break;
6355 }
6356
6357 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6358
6359 DPRINTF(WM_DEBUG_TX,
6360 ("%s: TX: packet has %d (%d) DMA segments\n",
6361 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6362
6363 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6364
6365 /*
6366 * Store a pointer to the packet so that we can free it
6367 * later.
6368 *
6369 * Initially, we consider the number of descriptors the
6370 * packet uses the number of DMA segments. This may be
6371 * incremented by 1 if we do checksum offload (a descriptor
6372 * is used to set the checksum context).
6373 */
6374 txs->txs_mbuf = m0;
6375 txs->txs_firstdesc = txq->txq_next;
6376 txs->txs_ndesc = segs_needed;
6377
6378 /* Set up offload parameters for this packet. */
6379 if (m0->m_pkthdr.csum_flags &
6380 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6381 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6382 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6383 if (wm_tx_offload(sc, txs, &cksumcmd,
6384 &cksumfields) != 0) {
6385 /* Error message already displayed. */
6386 bus_dmamap_unload(sc->sc_dmat, dmamap);
6387 continue;
6388 }
6389 } else {
6390 cksumcmd = 0;
6391 cksumfields = 0;
6392 }
6393
6394 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6395
6396 /* Sync the DMA map. */
6397 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6398 BUS_DMASYNC_PREWRITE);
6399
6400 /* Initialize the transmit descriptor. */
6401 for (nexttx = txq->txq_next, seg = 0;
6402 seg < dmamap->dm_nsegs; seg++) {
6403 for (seglen = dmamap->dm_segs[seg].ds_len,
6404 curaddr = dmamap->dm_segs[seg].ds_addr;
6405 seglen != 0;
6406 curaddr += curlen, seglen -= curlen,
6407 nexttx = WM_NEXTTX(txq, nexttx)) {
6408 curlen = seglen;
6409
6410 /*
6411 * So says the Linux driver:
6412 * Work around for premature descriptor
6413 * write-backs in TSO mode. Append a
6414 * 4-byte sentinel descriptor.
6415 */
6416 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6417 curlen > 8)
6418 curlen -= 4;
6419
6420 wm_set_dma_addr(
6421 &txq->txq_descs[nexttx].wtx_addr, curaddr);
6422 txq->txq_descs[nexttx].wtx_cmdlen
6423 = htole32(cksumcmd | curlen);
6424 txq->txq_descs[nexttx].wtx_fields.wtxu_status
6425 = 0;
6426 txq->txq_descs[nexttx].wtx_fields.wtxu_options
6427 = cksumfields;
6428 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6429 lasttx = nexttx;
6430
6431 DPRINTF(WM_DEBUG_TX,
6432 ("%s: TX: desc %d: low %#" PRIx64 ", "
6433 "len %#04zx\n",
6434 device_xname(sc->sc_dev), nexttx,
6435 (uint64_t)curaddr, curlen));
6436 }
6437 }
6438
6439 KASSERT(lasttx != -1);
6440
6441 /*
6442 * Set up the command byte on the last descriptor of
6443 * the packet. If we're in the interrupt delay window,
6444 * delay the interrupt.
6445 */
6446 txq->txq_descs[lasttx].wtx_cmdlen |=
6447 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6448
6449 /*
6450 * If VLANs are enabled and the packet has a VLAN tag, set
6451 * up the descriptor to encapsulate the packet for us.
6452 *
6453 * This is only valid on the last descriptor of the packet.
6454 */
6455 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6456 txq->txq_descs[lasttx].wtx_cmdlen |=
6457 htole32(WTX_CMD_VLE);
6458 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6459 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6460 }
6461
6462 txs->txs_lastdesc = lasttx;
6463
6464 DPRINTF(WM_DEBUG_TX,
6465 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6466 device_xname(sc->sc_dev),
6467 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6468
6469 /* Sync the descriptors we're using. */
6470 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6471 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6472
6473 /* Give the packet to the chip. */
6474 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6475
6476 DPRINTF(WM_DEBUG_TX,
6477 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6478
6479 DPRINTF(WM_DEBUG_TX,
6480 ("%s: TX: finished transmitting packet, job %d\n",
6481 device_xname(sc->sc_dev), txq->txq_snext));
6482
6483 /* Advance the tx pointer. */
6484 txq->txq_free -= txs->txs_ndesc;
6485 txq->txq_next = nexttx;
6486
6487 txq->txq_sfree--;
6488 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6489
6490 /* Pass the packet to any BPF listeners. */
6491 bpf_mtap(ifp, m0);
6492 }
6493
6494 if (m0 != NULL) {
6495 ifp->if_flags |= IFF_OACTIVE;
6496 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6497 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6498 __func__));
6499 m_freem(m0);
6500 }
6501
6502 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6503 /* No more slots; notify upper layer. */
6504 ifp->if_flags |= IFF_OACTIVE;
6505 }
6506
6507 if (txq->txq_free != ofree) {
6508 /* Set a watchdog timer in case the chip flakes out. */
6509 ifp->if_timer = 5;
6510 }
6511 }
6512
6513 /*
6514 * wm_nq_tx_offload:
6515 *
6516 * Set up TCP/IP checksumming parameters for the
6517 * specified packet, for NEWQUEUE devices
6518 */
6519 static int
6520 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6521 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6522 {
6523 struct mbuf *m0 = txs->txs_mbuf;
6524 struct m_tag *mtag;
6525 uint32_t vl_len, mssidx, cmdc;
6526 struct ether_header *eh;
6527 int offset, iphl;
6528
6529 /*
6530 * XXX It would be nice if the mbuf pkthdr had offset
6531 * fields for the protocol headers.
6532 */
6533 *cmdlenp = 0;
6534 *fieldsp = 0;
6535
6536 eh = mtod(m0, struct ether_header *);
6537 switch (htons(eh->ether_type)) {
6538 case ETHERTYPE_IP:
6539 case ETHERTYPE_IPV6:
6540 offset = ETHER_HDR_LEN;
6541 break;
6542
6543 case ETHERTYPE_VLAN:
6544 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6545 break;
6546
6547 default:
6548 /* Don't support this protocol or encapsulation. */
6549 *do_csum = false;
6550 return 0;
6551 }
6552 *do_csum = true;
6553 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6554 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6555
6556 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6557 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6558
6559 if ((m0->m_pkthdr.csum_flags &
6560 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6561 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6562 } else {
6563 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6564 }
6565 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6566 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6567
6568 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6569 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6570 << NQTXC_VLLEN_VLAN_SHIFT);
6571 *cmdlenp |= NQTX_CMD_VLE;
6572 }
6573
6574 mssidx = 0;
6575
6576 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6577 int hlen = offset + iphl;
6578 int tcp_hlen;
6579 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6580
6581 if (__predict_false(m0->m_len <
6582 (hlen + sizeof(struct tcphdr)))) {
6583 /*
6584 * TCP/IP headers are not in the first mbuf; we need
6585 * to do this the slow and painful way. Let's just
6586 * hope this doesn't happen very often.
6587 */
6588 struct tcphdr th;
6589
6590 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6591
6592 m_copydata(m0, hlen, sizeof(th), &th);
6593 if (v4) {
6594 struct ip ip;
6595
6596 m_copydata(m0, offset, sizeof(ip), &ip);
6597 ip.ip_len = 0;
6598 m_copyback(m0,
6599 offset + offsetof(struct ip, ip_len),
6600 sizeof(ip.ip_len), &ip.ip_len);
6601 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6602 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6603 } else {
6604 struct ip6_hdr ip6;
6605
6606 m_copydata(m0, offset, sizeof(ip6), &ip6);
6607 ip6.ip6_plen = 0;
6608 m_copyback(m0,
6609 offset + offsetof(struct ip6_hdr, ip6_plen),
6610 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6611 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6612 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6613 }
6614 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6615 sizeof(th.th_sum), &th.th_sum);
6616
6617 tcp_hlen = th.th_off << 2;
6618 } else {
6619 /*
6620 * TCP/IP headers are in the first mbuf; we can do
6621 * this the easy way.
6622 */
6623 struct tcphdr *th;
6624
6625 if (v4) {
6626 struct ip *ip =
6627 (void *)(mtod(m0, char *) + offset);
6628 th = (void *)(mtod(m0, char *) + hlen);
6629
6630 ip->ip_len = 0;
6631 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6632 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6633 } else {
6634 struct ip6_hdr *ip6 =
6635 (void *)(mtod(m0, char *) + offset);
6636 th = (void *)(mtod(m0, char *) + hlen);
6637
6638 ip6->ip6_plen = 0;
6639 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6640 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6641 }
6642 tcp_hlen = th->th_off << 2;
6643 }
6644 hlen += tcp_hlen;
6645 *cmdlenp |= NQTX_CMD_TSE;
6646
6647 if (v4) {
6648 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6649 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6650 } else {
6651 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6652 *fieldsp |= NQTXD_FIELDS_TUXSM;
6653 }
6654 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6655 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6656 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6657 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6658 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6659 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6660 } else {
6661 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6662 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6663 }
6664
6665 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6666 *fieldsp |= NQTXD_FIELDS_IXSM;
6667 cmdc |= NQTXC_CMD_IP4;
6668 }
6669
6670 if (m0->m_pkthdr.csum_flags &
6671 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6672 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6673 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6674 cmdc |= NQTXC_CMD_TCP;
6675 } else {
6676 cmdc |= NQTXC_CMD_UDP;
6677 }
6678 cmdc |= NQTXC_CMD_IP4;
6679 *fieldsp |= NQTXD_FIELDS_TUXSM;
6680 }
6681 if (m0->m_pkthdr.csum_flags &
6682 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6683 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6684 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6685 cmdc |= NQTXC_CMD_TCP;
6686 } else {
6687 cmdc |= NQTXC_CMD_UDP;
6688 }
6689 cmdc |= NQTXC_CMD_IP6;
6690 *fieldsp |= NQTXD_FIELDS_TUXSM;
6691 }
6692
6693 /* Fill in the context descriptor. */
6694 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6695 htole32(vl_len);
6696 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6697 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6698 htole32(cmdc);
6699 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6700 htole32(mssidx);
6701 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6702 DPRINTF(WM_DEBUG_TX,
6703 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6704 txq->txq_next, 0, vl_len));
6705 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6706 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6707 txs->txs_ndesc++;
6708 return 0;
6709 }
6710
6711 /*
6712 * wm_nq_start: [ifnet interface function]
6713 *
6714 * Start packet transmission on the interface for NEWQUEUE devices
6715 */
6716 static void
6717 wm_nq_start(struct ifnet *ifp)
6718 {
6719 struct wm_softc *sc = ifp->if_softc;
6720 struct wm_txqueue *txq = &sc->sc_txq[0];
6721
6722 WM_TX_LOCK(txq);
6723 if (!sc->sc_stopping)
6724 wm_nq_start_locked(ifp);
6725 WM_TX_UNLOCK(txq);
6726 }
6727
6728 static void
6729 wm_nq_start_locked(struct ifnet *ifp)
6730 {
6731 struct wm_softc *sc = ifp->if_softc;
6732 struct wm_txqueue *txq = &sc->sc_txq[0];
6733
6734 wm_nq_send_common_locked(ifp, txq, false);
6735 }
6736
6737 static inline int
6738 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6739 {
6740 struct wm_softc *sc = ifp->if_softc;
6741 u_int cpuid = cpu_index(curcpu());
6742
6743 /*
6744 * Currently, simple distribute strategy.
6745 * TODO:
6746 * destribute by flowid(RSS has value).
6747 */
6748
6749 return cpuid % sc->sc_ntxqueues;
6750 }
6751
6752 static int
6753 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6754 {
6755 int qid;
6756 struct wm_softc *sc = ifp->if_softc;
6757 struct wm_txqueue *txq;
6758
6759 qid = wm_nq_select_txqueue(ifp, m);
6760 txq = &sc->sc_txq[qid];
6761
6762 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6763 m_freem(m);
6764 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6765 return ENOBUFS;
6766 }
6767
6768 if (WM_TX_TRYLOCK(txq)) {
6769 /* XXXX should be per TX queue */
6770 ifp->if_obytes += m->m_pkthdr.len;
6771 if (m->m_flags & M_MCAST)
6772 ifp->if_omcasts++;
6773
6774 if (!sc->sc_stopping)
6775 wm_nq_transmit_locked(ifp, txq);
6776 WM_TX_UNLOCK(txq);
6777 }
6778
6779 return 0;
6780 }
6781
6782 static void
6783 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6784 {
6785
6786 wm_nq_send_common_locked(ifp, txq, true);
6787 }
6788
6789 static void
6790 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6791 bool is_transmit)
6792 {
6793 struct wm_softc *sc = ifp->if_softc;
6794 struct mbuf *m0;
6795 struct m_tag *mtag;
6796 struct wm_txsoft *txs;
6797 bus_dmamap_t dmamap;
6798 int error, nexttx, lasttx = -1, seg, segs_needed;
6799 bool do_csum, sent;
6800
6801 KASSERT(WM_TX_LOCKED(txq));
6802
6803 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6804 return;
6805 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6806 return;
6807
6808 sent = false;
6809
6810 /*
6811 * Loop through the send queue, setting up transmit descriptors
6812 * until we drain the queue, or use up all available transmit
6813 * descriptors.
6814 */
6815 for (;;) {
6816 m0 = NULL;
6817
6818 /* Get a work queue entry. */
6819 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6820 wm_txeof(sc, txq);
6821 if (txq->txq_sfree == 0) {
6822 DPRINTF(WM_DEBUG_TX,
6823 ("%s: TX: no free job descriptors\n",
6824 device_xname(sc->sc_dev)));
6825 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6826 break;
6827 }
6828 }
6829
6830 /* Grab a packet off the queue. */
6831 if (is_transmit)
6832 m0 = pcq_get(txq->txq_interq);
6833 else
6834 IFQ_DEQUEUE(&ifp->if_snd, m0);
6835 if (m0 == NULL)
6836 break;
6837
6838 DPRINTF(WM_DEBUG_TX,
6839 ("%s: TX: have packet to transmit: %p\n",
6840 device_xname(sc->sc_dev), m0));
6841
6842 txs = &txq->txq_soft[txq->txq_snext];
6843 dmamap = txs->txs_dmamap;
6844
6845 /*
6846 * Load the DMA map. If this fails, the packet either
6847 * didn't fit in the allotted number of segments, or we
6848 * were short on resources. For the too-many-segments
6849 * case, we simply report an error and drop the packet,
6850 * since we can't sanely copy a jumbo packet to a single
6851 * buffer.
6852 */
6853 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6854 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6855 if (error) {
6856 if (error == EFBIG) {
6857 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6858 log(LOG_ERR, "%s: Tx packet consumes too many "
6859 "DMA segments, dropping...\n",
6860 device_xname(sc->sc_dev));
6861 wm_dump_mbuf_chain(sc, m0);
6862 m_freem(m0);
6863 continue;
6864 }
6865 /* Short on resources, just stop for now. */
6866 DPRINTF(WM_DEBUG_TX,
6867 ("%s: TX: dmamap load failed: %d\n",
6868 device_xname(sc->sc_dev), error));
6869 break;
6870 }
6871
6872 segs_needed = dmamap->dm_nsegs;
6873
6874 /*
6875 * Ensure we have enough descriptors free to describe
6876 * the packet. Note, we always reserve one descriptor
6877 * at the end of the ring due to the semantics of the
6878 * TDT register, plus one more in the event we need
6879 * to load offload context.
6880 */
6881 if (segs_needed > txq->txq_free - 2) {
6882 /*
6883 * Not enough free descriptors to transmit this
6884 * packet. We haven't committed anything yet,
6885 * so just unload the DMA map, put the packet
6886 * pack on the queue, and punt. Notify the upper
6887 * layer that there are no more slots left.
6888 */
6889 DPRINTF(WM_DEBUG_TX,
6890 ("%s: TX: need %d (%d) descriptors, have %d\n",
6891 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6892 segs_needed, txq->txq_free - 1));
6893 txq->txq_flags |= WM_TXQ_NO_SPACE;
6894 bus_dmamap_unload(sc->sc_dmat, dmamap);
6895 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6896 break;
6897 }
6898
6899 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6900
6901 DPRINTF(WM_DEBUG_TX,
6902 ("%s: TX: packet has %d (%d) DMA segments\n",
6903 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6904
6905 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6906
6907 /*
6908 * Store a pointer to the packet so that we can free it
6909 * later.
6910 *
6911 * Initially, we consider the number of descriptors the
6912 * packet uses the number of DMA segments. This may be
6913 * incremented by 1 if we do checksum offload (a descriptor
6914 * is used to set the checksum context).
6915 */
6916 txs->txs_mbuf = m0;
6917 txs->txs_firstdesc = txq->txq_next;
6918 txs->txs_ndesc = segs_needed;
6919
6920 /* Set up offload parameters for this packet. */
6921 uint32_t cmdlen, fields, dcmdlen;
6922 if (m0->m_pkthdr.csum_flags &
6923 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6924 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6925 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6926 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
6927 &do_csum) != 0) {
6928 /* Error message already displayed. */
6929 bus_dmamap_unload(sc->sc_dmat, dmamap);
6930 continue;
6931 }
6932 } else {
6933 do_csum = false;
6934 cmdlen = 0;
6935 fields = 0;
6936 }
6937
6938 /* Sync the DMA map. */
6939 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6940 BUS_DMASYNC_PREWRITE);
6941
6942 /* Initialize the first transmit descriptor. */
6943 nexttx = txq->txq_next;
6944 if (!do_csum) {
6945 /* setup a legacy descriptor */
6946 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
6947 dmamap->dm_segs[0].ds_addr);
6948 txq->txq_descs[nexttx].wtx_cmdlen =
6949 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6950 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6951 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6952 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6953 NULL) {
6954 txq->txq_descs[nexttx].wtx_cmdlen |=
6955 htole32(WTX_CMD_VLE);
6956 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6957 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6958 } else {
6959 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6960 }
6961 dcmdlen = 0;
6962 } else {
6963 /* setup an advanced data descriptor */
6964 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6965 htole64(dmamap->dm_segs[0].ds_addr);
6966 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6967 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6968 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6969 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6970 htole32(fields);
6971 DPRINTF(WM_DEBUG_TX,
6972 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6973 device_xname(sc->sc_dev), nexttx,
6974 (uint64_t)dmamap->dm_segs[0].ds_addr));
6975 DPRINTF(WM_DEBUG_TX,
6976 ("\t 0x%08x%08x\n", fields,
6977 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6978 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6979 }
6980
6981 lasttx = nexttx;
6982 nexttx = WM_NEXTTX(txq, nexttx);
6983 /*
6984 * fill in the next descriptors. legacy or adcanced format
6985 * is the same here
6986 */
6987 for (seg = 1; seg < dmamap->dm_nsegs;
6988 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6989 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6990 htole64(dmamap->dm_segs[seg].ds_addr);
6991 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6992 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6993 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6994 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6995 lasttx = nexttx;
6996
6997 DPRINTF(WM_DEBUG_TX,
6998 ("%s: TX: desc %d: %#" PRIx64 ", "
6999 "len %#04zx\n",
7000 device_xname(sc->sc_dev), nexttx,
7001 (uint64_t)dmamap->dm_segs[seg].ds_addr,
7002 dmamap->dm_segs[seg].ds_len));
7003 }
7004
7005 KASSERT(lasttx != -1);
7006
7007 /*
7008 * Set up the command byte on the last descriptor of
7009 * the packet. If we're in the interrupt delay window,
7010 * delay the interrupt.
7011 */
7012 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7013 (NQTX_CMD_EOP | NQTX_CMD_RS));
7014 txq->txq_descs[lasttx].wtx_cmdlen |=
7015 htole32(WTX_CMD_EOP | WTX_CMD_RS);
7016
7017 txs->txs_lastdesc = lasttx;
7018
7019 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7020 device_xname(sc->sc_dev),
7021 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7022
7023 /* Sync the descriptors we're using. */
7024 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7025 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7026
7027 /* Give the packet to the chip. */
7028 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7029 sent = true;
7030
7031 DPRINTF(WM_DEBUG_TX,
7032 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7033
7034 DPRINTF(WM_DEBUG_TX,
7035 ("%s: TX: finished transmitting packet, job %d\n",
7036 device_xname(sc->sc_dev), txq->txq_snext));
7037
7038 /* Advance the tx pointer. */
7039 txq->txq_free -= txs->txs_ndesc;
7040 txq->txq_next = nexttx;
7041
7042 txq->txq_sfree--;
7043 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7044
7045 /* Pass the packet to any BPF listeners. */
7046 bpf_mtap(ifp, m0);
7047 }
7048
7049 if (m0 != NULL) {
7050 txq->txq_flags |= WM_TXQ_NO_SPACE;
7051 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
7052 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7053 __func__));
7054 m_freem(m0);
7055 }
7056
7057 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7058 /* No more slots; notify upper layer. */
7059 txq->txq_flags |= WM_TXQ_NO_SPACE;
7060 }
7061
7062 if (sent) {
7063 /* Set a watchdog timer in case the chip flakes out. */
7064 ifp->if_timer = 5;
7065 }
7066 }
7067
7068 /* Interrupt */
7069
7070 /*
7071 * wm_txeof:
7072 *
7073 * Helper; handle transmit interrupts.
7074 */
7075 static int
7076 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7077 {
7078 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7079 struct wm_txsoft *txs;
7080 bool processed = false;
7081 int count = 0;
7082 int i;
7083 uint8_t status;
7084
7085 if (sc->sc_stopping)
7086 return 0;
7087
7088 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7089
7090 /*
7091 * Go through the Tx list and free mbufs for those
7092 * frames which have been transmitted.
7093 */
7094 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7095 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7096 txs = &txq->txq_soft[i];
7097
7098 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7099 device_xname(sc->sc_dev), i));
7100
7101 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7102 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7103
7104 status =
7105 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7106 if ((status & WTX_ST_DD) == 0) {
7107 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7108 BUS_DMASYNC_PREREAD);
7109 break;
7110 }
7111
7112 processed = true;
7113 count++;
7114 DPRINTF(WM_DEBUG_TX,
7115 ("%s: TX: job %d done: descs %d..%d\n",
7116 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7117 txs->txs_lastdesc));
7118
7119 /*
7120 * XXX We should probably be using the statistics
7121 * XXX registers, but I don't know if they exist
7122 * XXX on chips before the i82544.
7123 */
7124
7125 #ifdef WM_EVENT_COUNTERS
7126 if (status & WTX_ST_TU)
7127 WM_EVCNT_INCR(&sc->sc_ev_tu);
7128 #endif /* WM_EVENT_COUNTERS */
7129
7130 if (status & (WTX_ST_EC | WTX_ST_LC)) {
7131 ifp->if_oerrors++;
7132 if (status & WTX_ST_LC)
7133 log(LOG_WARNING, "%s: late collision\n",
7134 device_xname(sc->sc_dev));
7135 else if (status & WTX_ST_EC) {
7136 ifp->if_collisions += 16;
7137 log(LOG_WARNING, "%s: excessive collisions\n",
7138 device_xname(sc->sc_dev));
7139 }
7140 } else
7141 ifp->if_opackets++;
7142
7143 txq->txq_free += txs->txs_ndesc;
7144 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7145 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7146 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7147 m_freem(txs->txs_mbuf);
7148 txs->txs_mbuf = NULL;
7149 }
7150
7151 /* Update the dirty transmit buffer pointer. */
7152 txq->txq_sdirty = i;
7153 DPRINTF(WM_DEBUG_TX,
7154 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7155
7156 if (count != 0)
7157 rnd_add_uint32(&sc->rnd_source, count);
7158
7159 /*
7160 * If there are no more pending transmissions, cancel the watchdog
7161 * timer.
7162 */
7163 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7164 ifp->if_timer = 0;
7165
7166 return processed;
7167 }
7168
7169 /*
7170 * wm_rxeof:
7171 *
7172 * Helper; handle receive interrupts.
7173 */
7174 static void
7175 wm_rxeof(struct wm_rxqueue *rxq)
7176 {
7177 struct wm_softc *sc = rxq->rxq_sc;
7178 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7179 struct wm_rxsoft *rxs;
7180 struct mbuf *m;
7181 int i, len;
7182 int count = 0;
7183 uint8_t status, errors;
7184 uint16_t vlantag;
7185
7186 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7187 rxs = &rxq->rxq_soft[i];
7188
7189 DPRINTF(WM_DEBUG_RX,
7190 ("%s: RX: checking descriptor %d\n",
7191 device_xname(sc->sc_dev), i));
7192
7193 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7194
7195 status = rxq->rxq_descs[i].wrx_status;
7196 errors = rxq->rxq_descs[i].wrx_errors;
7197 len = le16toh(rxq->rxq_descs[i].wrx_len);
7198 vlantag = rxq->rxq_descs[i].wrx_special;
7199
7200 if ((status & WRX_ST_DD) == 0) {
7201 /* We have processed all of the receive descriptors. */
7202 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7203 break;
7204 }
7205
7206 count++;
7207 if (__predict_false(rxq->rxq_discard)) {
7208 DPRINTF(WM_DEBUG_RX,
7209 ("%s: RX: discarding contents of descriptor %d\n",
7210 device_xname(sc->sc_dev), i));
7211 wm_init_rxdesc(rxq, i);
7212 if (status & WRX_ST_EOP) {
7213 /* Reset our state. */
7214 DPRINTF(WM_DEBUG_RX,
7215 ("%s: RX: resetting rxdiscard -> 0\n",
7216 device_xname(sc->sc_dev)));
7217 rxq->rxq_discard = 0;
7218 }
7219 continue;
7220 }
7221
7222 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7223 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7224
7225 m = rxs->rxs_mbuf;
7226
7227 /*
7228 * Add a new receive buffer to the ring, unless of
7229 * course the length is zero. Treat the latter as a
7230 * failed mapping.
7231 */
7232 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7233 /*
7234 * Failed, throw away what we've done so
7235 * far, and discard the rest of the packet.
7236 */
7237 ifp->if_ierrors++;
7238 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7239 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7240 wm_init_rxdesc(rxq, i);
7241 if ((status & WRX_ST_EOP) == 0)
7242 rxq->rxq_discard = 1;
7243 if (rxq->rxq_head != NULL)
7244 m_freem(rxq->rxq_head);
7245 WM_RXCHAIN_RESET(rxq);
7246 DPRINTF(WM_DEBUG_RX,
7247 ("%s: RX: Rx buffer allocation failed, "
7248 "dropping packet%s\n", device_xname(sc->sc_dev),
7249 rxq->rxq_discard ? " (discard)" : ""));
7250 continue;
7251 }
7252
7253 m->m_len = len;
7254 rxq->rxq_len += len;
7255 DPRINTF(WM_DEBUG_RX,
7256 ("%s: RX: buffer at %p len %d\n",
7257 device_xname(sc->sc_dev), m->m_data, len));
7258
7259 /* If this is not the end of the packet, keep looking. */
7260 if ((status & WRX_ST_EOP) == 0) {
7261 WM_RXCHAIN_LINK(rxq, m);
7262 DPRINTF(WM_DEBUG_RX,
7263 ("%s: RX: not yet EOP, rxlen -> %d\n",
7264 device_xname(sc->sc_dev), rxq->rxq_len));
7265 continue;
7266 }
7267
7268 /*
7269 * Okay, we have the entire packet now. The chip is
7270 * configured to include the FCS except I350 and I21[01]
7271 * (not all chips can be configured to strip it),
7272 * so we need to trim it.
7273 * May need to adjust length of previous mbuf in the
7274 * chain if the current mbuf is too short.
7275 * For an eratta, the RCTL_SECRC bit in RCTL register
7276 * is always set in I350, so we don't trim it.
7277 */
7278 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7279 && (sc->sc_type != WM_T_I210)
7280 && (sc->sc_type != WM_T_I211)) {
7281 if (m->m_len < ETHER_CRC_LEN) {
7282 rxq->rxq_tail->m_len
7283 -= (ETHER_CRC_LEN - m->m_len);
7284 m->m_len = 0;
7285 } else
7286 m->m_len -= ETHER_CRC_LEN;
7287 len = rxq->rxq_len - ETHER_CRC_LEN;
7288 } else
7289 len = rxq->rxq_len;
7290
7291 WM_RXCHAIN_LINK(rxq, m);
7292
7293 *rxq->rxq_tailp = NULL;
7294 m = rxq->rxq_head;
7295
7296 WM_RXCHAIN_RESET(rxq);
7297
7298 DPRINTF(WM_DEBUG_RX,
7299 ("%s: RX: have entire packet, len -> %d\n",
7300 device_xname(sc->sc_dev), len));
7301
7302 /* If an error occurred, update stats and drop the packet. */
7303 if (errors &
7304 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7305 if (errors & WRX_ER_SE)
7306 log(LOG_WARNING, "%s: symbol error\n",
7307 device_xname(sc->sc_dev));
7308 else if (errors & WRX_ER_SEQ)
7309 log(LOG_WARNING, "%s: receive sequence error\n",
7310 device_xname(sc->sc_dev));
7311 else if (errors & WRX_ER_CE)
7312 log(LOG_WARNING, "%s: CRC error\n",
7313 device_xname(sc->sc_dev));
7314 m_freem(m);
7315 continue;
7316 }
7317
7318 /* No errors. Receive the packet. */
7319 m->m_pkthdr.rcvif = ifp;
7320 m->m_pkthdr.len = len;
7321
7322 /*
7323 * If VLANs are enabled, VLAN packets have been unwrapped
7324 * for us. Associate the tag with the packet.
7325 */
7326 /* XXXX should check for i350 and i354 */
7327 if ((status & WRX_ST_VP) != 0) {
7328 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7329 }
7330
7331 /* Set up checksum info for this packet. */
7332 if ((status & WRX_ST_IXSM) == 0) {
7333 if (status & WRX_ST_IPCS) {
7334 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7335 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7336 if (errors & WRX_ER_IPE)
7337 m->m_pkthdr.csum_flags |=
7338 M_CSUM_IPv4_BAD;
7339 }
7340 if (status & WRX_ST_TCPCS) {
7341 /*
7342 * Note: we don't know if this was TCP or UDP,
7343 * so we just set both bits, and expect the
7344 * upper layers to deal.
7345 */
7346 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7347 m->m_pkthdr.csum_flags |=
7348 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7349 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7350 if (errors & WRX_ER_TCPE)
7351 m->m_pkthdr.csum_flags |=
7352 M_CSUM_TCP_UDP_BAD;
7353 }
7354 }
7355
7356 ifp->if_ipackets++;
7357
7358 WM_RX_UNLOCK(rxq);
7359
7360 /* Pass this up to any BPF listeners. */
7361 bpf_mtap(ifp, m);
7362
7363 /* Pass it on. */
7364 if_percpuq_enqueue(sc->sc_ipq, m);
7365
7366 WM_RX_LOCK(rxq);
7367
7368 if (sc->sc_stopping)
7369 break;
7370 }
7371
7372 /* Update the receive pointer. */
7373 rxq->rxq_ptr = i;
7374 if (count != 0)
7375 rnd_add_uint32(&sc->rnd_source, count);
7376
7377 DPRINTF(WM_DEBUG_RX,
7378 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7379 }
7380
7381 /*
7382 * wm_linkintr_gmii:
7383 *
7384 * Helper; handle link interrupts for GMII.
7385 */
7386 static void
7387 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7388 {
7389
7390 KASSERT(WM_CORE_LOCKED(sc));
7391
7392 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7393 __func__));
7394
7395 if (icr & ICR_LSC) {
7396 uint32_t status = CSR_READ(sc, WMREG_STATUS);
7397
7398 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7399 wm_gig_downshift_workaround_ich8lan(sc);
7400
7401 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7402 device_xname(sc->sc_dev)));
7403 mii_pollstat(&sc->sc_mii);
7404 if (sc->sc_type == WM_T_82543) {
7405 int miistatus, active;
7406
7407 /*
7408 * With 82543, we need to force speed and
7409 * duplex on the MAC equal to what the PHY
7410 * speed and duplex configuration is.
7411 */
7412 miistatus = sc->sc_mii.mii_media_status;
7413
7414 if (miistatus & IFM_ACTIVE) {
7415 active = sc->sc_mii.mii_media_active;
7416 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7417 switch (IFM_SUBTYPE(active)) {
7418 case IFM_10_T:
7419 sc->sc_ctrl |= CTRL_SPEED_10;
7420 break;
7421 case IFM_100_TX:
7422 sc->sc_ctrl |= CTRL_SPEED_100;
7423 break;
7424 case IFM_1000_T:
7425 sc->sc_ctrl |= CTRL_SPEED_1000;
7426 break;
7427 default:
7428 /*
7429 * fiber?
7430 * Shoud not enter here.
7431 */
7432 printf("unknown media (%x)\n", active);
7433 break;
7434 }
7435 if (active & IFM_FDX)
7436 sc->sc_ctrl |= CTRL_FD;
7437 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7438 }
7439 } else if ((sc->sc_type == WM_T_ICH8)
7440 && (sc->sc_phytype == WMPHY_IGP_3)) {
7441 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7442 } else if (sc->sc_type == WM_T_PCH) {
7443 wm_k1_gig_workaround_hv(sc,
7444 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7445 }
7446
7447 if ((sc->sc_phytype == WMPHY_82578)
7448 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7449 == IFM_1000_T)) {
7450
7451 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7452 delay(200*1000); /* XXX too big */
7453
7454 /* Link stall fix for link up */
7455 wm_gmii_hv_writereg(sc->sc_dev, 1,
7456 HV_MUX_DATA_CTRL,
7457 HV_MUX_DATA_CTRL_GEN_TO_MAC
7458 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7459 wm_gmii_hv_writereg(sc->sc_dev, 1,
7460 HV_MUX_DATA_CTRL,
7461 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7462 }
7463 }
7464 } else if (icr & ICR_RXSEQ) {
7465 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7466 device_xname(sc->sc_dev)));
7467 }
7468 }
7469
7470 /*
7471 * wm_linkintr_tbi:
7472 *
7473 * Helper; handle link interrupts for TBI mode.
7474 */
7475 static void
7476 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7477 {
7478 uint32_t status;
7479
7480 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7481 __func__));
7482
7483 status = CSR_READ(sc, WMREG_STATUS);
7484 if (icr & ICR_LSC) {
7485 if (status & STATUS_LU) {
7486 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7487 device_xname(sc->sc_dev),
7488 (status & STATUS_FD) ? "FDX" : "HDX"));
7489 /*
7490 * NOTE: CTRL will update TFCE and RFCE automatically,
7491 * so we should update sc->sc_ctrl
7492 */
7493
7494 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7495 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7496 sc->sc_fcrtl &= ~FCRTL_XONE;
7497 if (status & STATUS_FD)
7498 sc->sc_tctl |=
7499 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7500 else
7501 sc->sc_tctl |=
7502 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7503 if (sc->sc_ctrl & CTRL_TFCE)
7504 sc->sc_fcrtl |= FCRTL_XONE;
7505 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7506 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7507 WMREG_OLD_FCRTL : WMREG_FCRTL,
7508 sc->sc_fcrtl);
7509 sc->sc_tbi_linkup = 1;
7510 } else {
7511 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7512 device_xname(sc->sc_dev)));
7513 sc->sc_tbi_linkup = 0;
7514 }
7515 /* Update LED */
7516 wm_tbi_serdes_set_linkled(sc);
7517 } else if (icr & ICR_RXSEQ) {
7518 DPRINTF(WM_DEBUG_LINK,
7519 ("%s: LINK: Receive sequence error\n",
7520 device_xname(sc->sc_dev)));
7521 }
7522 }
7523
7524 /*
7525 * wm_linkintr_serdes:
7526 *
7527 * Helper; handle link interrupts for TBI mode.
7528 */
7529 static void
7530 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7531 {
7532 struct mii_data *mii = &sc->sc_mii;
7533 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7534 uint32_t pcs_adv, pcs_lpab, reg;
7535
7536 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7537 __func__));
7538
7539 if (icr & ICR_LSC) {
7540 /* Check PCS */
7541 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7542 if ((reg & PCS_LSTS_LINKOK) != 0) {
7543 mii->mii_media_status |= IFM_ACTIVE;
7544 sc->sc_tbi_linkup = 1;
7545 } else {
7546 mii->mii_media_status |= IFM_NONE;
7547 sc->sc_tbi_linkup = 0;
7548 wm_tbi_serdes_set_linkled(sc);
7549 return;
7550 }
7551 mii->mii_media_active |= IFM_1000_SX;
7552 if ((reg & PCS_LSTS_FDX) != 0)
7553 mii->mii_media_active |= IFM_FDX;
7554 else
7555 mii->mii_media_active |= IFM_HDX;
7556 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7557 /* Check flow */
7558 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7559 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7560 DPRINTF(WM_DEBUG_LINK,
7561 ("XXX LINKOK but not ACOMP\n"));
7562 return;
7563 }
7564 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7565 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7566 DPRINTF(WM_DEBUG_LINK,
7567 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7568 if ((pcs_adv & TXCW_SYM_PAUSE)
7569 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7570 mii->mii_media_active |= IFM_FLOW
7571 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7572 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7573 && (pcs_adv & TXCW_ASYM_PAUSE)
7574 && (pcs_lpab & TXCW_SYM_PAUSE)
7575 && (pcs_lpab & TXCW_ASYM_PAUSE))
7576 mii->mii_media_active |= IFM_FLOW
7577 | IFM_ETH_TXPAUSE;
7578 else if ((pcs_adv & TXCW_SYM_PAUSE)
7579 && (pcs_adv & TXCW_ASYM_PAUSE)
7580 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7581 && (pcs_lpab & TXCW_ASYM_PAUSE))
7582 mii->mii_media_active |= IFM_FLOW
7583 | IFM_ETH_RXPAUSE;
7584 }
7585 /* Update LED */
7586 wm_tbi_serdes_set_linkled(sc);
7587 } else {
7588 DPRINTF(WM_DEBUG_LINK,
7589 ("%s: LINK: Receive sequence error\n",
7590 device_xname(sc->sc_dev)));
7591 }
7592 }
7593
7594 /*
7595 * wm_linkintr:
7596 *
7597 * Helper; handle link interrupts.
7598 */
7599 static void
7600 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7601 {
7602
7603 KASSERT(WM_CORE_LOCKED(sc));
7604
7605 if (sc->sc_flags & WM_F_HAS_MII)
7606 wm_linkintr_gmii(sc, icr);
7607 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7608 && (sc->sc_type >= WM_T_82575))
7609 wm_linkintr_serdes(sc, icr);
7610 else
7611 wm_linkintr_tbi(sc, icr);
7612 }
7613
7614 /*
7615 * wm_intr_legacy:
7616 *
7617 * Interrupt service routine for INTx and MSI.
7618 */
7619 static int
7620 wm_intr_legacy(void *arg)
7621 {
7622 struct wm_softc *sc = arg;
7623 struct wm_txqueue *txq = &sc->sc_txq[0];
7624 struct wm_rxqueue *rxq = &sc->sc_rxq[0];
7625 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7626 uint32_t icr, rndval = 0;
7627 int handled = 0;
7628
7629 DPRINTF(WM_DEBUG_TX,
7630 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7631 while (1 /* CONSTCOND */) {
7632 icr = CSR_READ(sc, WMREG_ICR);
7633 if ((icr & sc->sc_icr) == 0)
7634 break;
7635 if (rndval == 0)
7636 rndval = icr;
7637
7638 WM_RX_LOCK(rxq);
7639
7640 if (sc->sc_stopping) {
7641 WM_RX_UNLOCK(rxq);
7642 break;
7643 }
7644
7645 handled = 1;
7646
7647 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7648 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7649 DPRINTF(WM_DEBUG_RX,
7650 ("%s: RX: got Rx intr 0x%08x\n",
7651 device_xname(sc->sc_dev),
7652 icr & (ICR_RXDMT0 | ICR_RXT0)));
7653 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7654 }
7655 #endif
7656 wm_rxeof(rxq);
7657
7658 WM_RX_UNLOCK(rxq);
7659 WM_TX_LOCK(txq);
7660
7661 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7662 if (icr & ICR_TXDW) {
7663 DPRINTF(WM_DEBUG_TX,
7664 ("%s: TX: got TXDW interrupt\n",
7665 device_xname(sc->sc_dev)));
7666 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7667 }
7668 #endif
7669 wm_txeof(sc, txq);
7670
7671 WM_TX_UNLOCK(txq);
7672 WM_CORE_LOCK(sc);
7673
7674 if (icr & (ICR_LSC | ICR_RXSEQ)) {
7675 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7676 wm_linkintr(sc, icr);
7677 }
7678
7679 WM_CORE_UNLOCK(sc);
7680
7681 if (icr & ICR_RXO) {
7682 #if defined(WM_DEBUG)
7683 log(LOG_WARNING, "%s: Receive overrun\n",
7684 device_xname(sc->sc_dev));
7685 #endif /* defined(WM_DEBUG) */
7686 }
7687 }
7688
7689 rnd_add_uint32(&sc->rnd_source, rndval);
7690
7691 if (handled) {
7692 /* Try to get more packets going. */
7693 ifp->if_start(ifp);
7694 }
7695
7696 return handled;
7697 }
7698
7699 /*
7700 * wm_txintr_msix:
7701 *
7702 * Interrupt service routine for TX complete interrupt for MSI-X.
7703 */
7704 static int
7705 wm_txintr_msix(void *arg)
7706 {
7707 struct wm_txqueue *txq = arg;
7708 struct wm_softc *sc = txq->txq_sc;
7709 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7710
7711 DPRINTF(WM_DEBUG_TX,
7712 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7713
7714 if (sc->sc_type == WM_T_82574)
7715 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id));
7716 else if (sc->sc_type == WM_T_82575)
7717 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
7718 else
7719 CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
7720
7721 WM_TX_LOCK(txq);
7722
7723 if (sc->sc_stopping)
7724 goto out;
7725
7726 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7727 wm_txeof(sc, txq);
7728
7729 out:
7730 WM_TX_UNLOCK(txq);
7731
7732 if (sc->sc_type == WM_T_82574)
7733 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id));
7734 else if (sc->sc_type == WM_T_82575)
7735 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
7736 else
7737 CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
7738
7739 /* Try to get more packets going. */
7740 if (pcq_peek(txq->txq_interq) != NULL) {
7741 WM_TX_LOCK(txq);
7742 wm_nq_transmit_locked(ifp, txq);
7743 WM_TX_UNLOCK(txq);
7744 }
7745 /*
7746 * There are still some upper layer processing which call
7747 * ifp->if_start(). e.g. ALTQ
7748 */
7749 if (txq->txq_id == 0) {
7750 if (!IFQ_IS_EMPTY(&ifp->if_snd))
7751 ifp->if_start(ifp);
7752 }
7753
7754 return 1;
7755 }
7756
7757 /*
7758 * wm_rxintr_msix:
7759 *
7760 * Interrupt service routine for RX interrupt for MSI-X.
7761 */
7762 static int
7763 wm_rxintr_msix(void *arg)
7764 {
7765 struct wm_rxqueue *rxq = arg;
7766 struct wm_softc *sc = rxq->rxq_sc;
7767
7768 DPRINTF(WM_DEBUG_RX,
7769 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7770
7771 if (sc->sc_type == WM_T_82574)
7772 CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id));
7773 else if (sc->sc_type == WM_T_82575)
7774 CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
7775 else
7776 CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
7777
7778 WM_RX_LOCK(rxq);
7779
7780 if (sc->sc_stopping)
7781 goto out;
7782
7783 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7784 wm_rxeof(rxq);
7785
7786 out:
7787 WM_RX_UNLOCK(rxq);
7788
7789 if (sc->sc_type == WM_T_82574)
7790 CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
7791 else if (sc->sc_type == WM_T_82575)
7792 CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
7793 else
7794 CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
7795
7796 return 1;
7797 }
7798
7799 /*
7800 * wm_linkintr_msix:
7801 *
7802 * Interrupt service routine for link status change for MSI-X.
7803 */
7804 static int
7805 wm_linkintr_msix(void *arg)
7806 {
7807 struct wm_softc *sc = arg;
7808 uint32_t reg;
7809
7810 DPRINTF(WM_DEBUG_LINK,
7811 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7812
7813 reg = CSR_READ(sc, WMREG_ICR);
7814 WM_CORE_LOCK(sc);
7815 if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7816 goto out;
7817
7818 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7819 wm_linkintr(sc, ICR_LSC);
7820
7821 out:
7822 WM_CORE_UNLOCK(sc);
7823
7824 if (sc->sc_type == WM_T_82574)
7825 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7826 else if (sc->sc_type == WM_T_82575)
7827 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7828 else
7829 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7830
7831 return 1;
7832 }
7833
7834 /*
7835 * Media related.
7836 * GMII, SGMII, TBI (and SERDES)
7837 */
7838
7839 /* Common */
7840
7841 /*
7842 * wm_tbi_serdes_set_linkled:
7843 *
7844 * Update the link LED on TBI and SERDES devices.
7845 */
7846 static void
7847 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7848 {
7849
7850 if (sc->sc_tbi_linkup)
7851 sc->sc_ctrl |= CTRL_SWDPIN(0);
7852 else
7853 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7854
7855 /* 82540 or newer devices are active low */
7856 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7857
7858 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7859 }
7860
7861 /* GMII related */
7862
7863 /*
7864 * wm_gmii_reset:
7865 *
7866 * Reset the PHY.
7867 */
7868 static void
7869 wm_gmii_reset(struct wm_softc *sc)
7870 {
7871 uint32_t reg;
7872 int rv;
7873
7874 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7875 device_xname(sc->sc_dev), __func__));
7876 /* get phy semaphore */
7877 switch (sc->sc_type) {
7878 case WM_T_82571:
7879 case WM_T_82572:
7880 case WM_T_82573:
7881 case WM_T_82574:
7882 case WM_T_82583:
7883 /* XXX should get sw semaphore, too */
7884 rv = wm_get_swsm_semaphore(sc);
7885 break;
7886 case WM_T_82575:
7887 case WM_T_82576:
7888 case WM_T_82580:
7889 case WM_T_I350:
7890 case WM_T_I354:
7891 case WM_T_I210:
7892 case WM_T_I211:
7893 case WM_T_80003:
7894 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7895 break;
7896 case WM_T_ICH8:
7897 case WM_T_ICH9:
7898 case WM_T_ICH10:
7899 case WM_T_PCH:
7900 case WM_T_PCH2:
7901 case WM_T_PCH_LPT:
7902 case WM_T_PCH_SPT:
7903 rv = wm_get_swfwhw_semaphore(sc);
7904 break;
7905 default:
7906 /* nothing to do*/
7907 rv = 0;
7908 break;
7909 }
7910 if (rv != 0) {
7911 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7912 __func__);
7913 return;
7914 }
7915
7916 switch (sc->sc_type) {
7917 case WM_T_82542_2_0:
7918 case WM_T_82542_2_1:
7919 /* null */
7920 break;
7921 case WM_T_82543:
7922 /*
7923 * With 82543, we need to force speed and duplex on the MAC
7924 * equal to what the PHY speed and duplex configuration is.
7925 * In addition, we need to perform a hardware reset on the PHY
7926 * to take it out of reset.
7927 */
7928 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7929 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7930
7931 /* The PHY reset pin is active-low. */
7932 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7933 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7934 CTRL_EXT_SWDPIN(4));
7935 reg |= CTRL_EXT_SWDPIO(4);
7936
7937 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7938 CSR_WRITE_FLUSH(sc);
7939 delay(10*1000);
7940
7941 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7942 CSR_WRITE_FLUSH(sc);
7943 delay(150);
7944 #if 0
7945 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7946 #endif
7947 delay(20*1000); /* XXX extra delay to get PHY ID? */
7948 break;
7949 case WM_T_82544: /* reset 10000us */
7950 case WM_T_82540:
7951 case WM_T_82545:
7952 case WM_T_82545_3:
7953 case WM_T_82546:
7954 case WM_T_82546_3:
7955 case WM_T_82541:
7956 case WM_T_82541_2:
7957 case WM_T_82547:
7958 case WM_T_82547_2:
7959 case WM_T_82571: /* reset 100us */
7960 case WM_T_82572:
7961 case WM_T_82573:
7962 case WM_T_82574:
7963 case WM_T_82575:
7964 case WM_T_82576:
7965 case WM_T_82580:
7966 case WM_T_I350:
7967 case WM_T_I354:
7968 case WM_T_I210:
7969 case WM_T_I211:
7970 case WM_T_82583:
7971 case WM_T_80003:
7972 /* generic reset */
7973 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7974 CSR_WRITE_FLUSH(sc);
7975 delay(20000);
7976 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7977 CSR_WRITE_FLUSH(sc);
7978 delay(20000);
7979
7980 if ((sc->sc_type == WM_T_82541)
7981 || (sc->sc_type == WM_T_82541_2)
7982 || (sc->sc_type == WM_T_82547)
7983 || (sc->sc_type == WM_T_82547_2)) {
7984 /* workaround for igp are done in igp_reset() */
7985 /* XXX add code to set LED after phy reset */
7986 }
7987 break;
7988 case WM_T_ICH8:
7989 case WM_T_ICH9:
7990 case WM_T_ICH10:
7991 case WM_T_PCH:
7992 case WM_T_PCH2:
7993 case WM_T_PCH_LPT:
7994 case WM_T_PCH_SPT:
7995 /* generic reset */
7996 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7997 CSR_WRITE_FLUSH(sc);
7998 delay(100);
7999 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8000 CSR_WRITE_FLUSH(sc);
8001 delay(150);
8002 break;
8003 default:
8004 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8005 __func__);
8006 break;
8007 }
8008
8009 /* release PHY semaphore */
8010 switch (sc->sc_type) {
8011 case WM_T_82571:
8012 case WM_T_82572:
8013 case WM_T_82573:
8014 case WM_T_82574:
8015 case WM_T_82583:
8016 /* XXX should put sw semaphore, too */
8017 wm_put_swsm_semaphore(sc);
8018 break;
8019 case WM_T_82575:
8020 case WM_T_82576:
8021 case WM_T_82580:
8022 case WM_T_I350:
8023 case WM_T_I354:
8024 case WM_T_I210:
8025 case WM_T_I211:
8026 case WM_T_80003:
8027 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8028 break;
8029 case WM_T_ICH8:
8030 case WM_T_ICH9:
8031 case WM_T_ICH10:
8032 case WM_T_PCH:
8033 case WM_T_PCH2:
8034 case WM_T_PCH_LPT:
8035 case WM_T_PCH_SPT:
8036 wm_put_swfwhw_semaphore(sc);
8037 break;
8038 default:
8039 /* nothing to do */
8040 rv = 0;
8041 break;
8042 }
8043
8044 /* get_cfg_done */
8045 wm_get_cfg_done(sc);
8046
8047 /* extra setup */
8048 switch (sc->sc_type) {
8049 case WM_T_82542_2_0:
8050 case WM_T_82542_2_1:
8051 case WM_T_82543:
8052 case WM_T_82544:
8053 case WM_T_82540:
8054 case WM_T_82545:
8055 case WM_T_82545_3:
8056 case WM_T_82546:
8057 case WM_T_82546_3:
8058 case WM_T_82541_2:
8059 case WM_T_82547_2:
8060 case WM_T_82571:
8061 case WM_T_82572:
8062 case WM_T_82573:
8063 case WM_T_82575:
8064 case WM_T_82576:
8065 case WM_T_82580:
8066 case WM_T_I350:
8067 case WM_T_I354:
8068 case WM_T_I210:
8069 case WM_T_I211:
8070 case WM_T_80003:
8071 /* null */
8072 break;
8073 case WM_T_82574:
8074 case WM_T_82583:
8075 wm_lplu_d0_disable(sc);
8076 break;
8077 case WM_T_82541:
8078 case WM_T_82547:
8079 /* XXX Configure actively LED after PHY reset */
8080 break;
8081 case WM_T_ICH8:
8082 case WM_T_ICH9:
8083 case WM_T_ICH10:
8084 case WM_T_PCH:
8085 case WM_T_PCH2:
8086 case WM_T_PCH_LPT:
8087 case WM_T_PCH_SPT:
8088 /* Allow time for h/w to get to a quiescent state afer reset */
8089 delay(10*1000);
8090
8091 if (sc->sc_type == WM_T_PCH)
8092 wm_hv_phy_workaround_ich8lan(sc);
8093
8094 if (sc->sc_type == WM_T_PCH2)
8095 wm_lv_phy_workaround_ich8lan(sc);
8096
8097 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
8098 /*
8099 * dummy read to clear the phy wakeup bit after lcd
8100 * reset
8101 */
8102 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
8103 }
8104
8105 /*
8106 * XXX Configure the LCD with th extended configuration region
8107 * in NVM
8108 */
8109
8110 /* Disable D0 LPLU. */
8111 if (sc->sc_type >= WM_T_PCH) /* PCH* */
8112 wm_lplu_d0_disable_pch(sc);
8113 else
8114 wm_lplu_d0_disable(sc); /* ICH* */
8115 break;
8116 default:
8117 panic("%s: unknown type\n", __func__);
8118 break;
8119 }
8120 }
8121
8122 /*
8123 * wm_get_phy_id_82575:
8124 *
8125 * Return PHY ID. Return -1 if it failed.
8126 */
8127 static int
8128 wm_get_phy_id_82575(struct wm_softc *sc)
8129 {
8130 uint32_t reg;
8131 int phyid = -1;
8132
8133 /* XXX */
8134 if ((sc->sc_flags & WM_F_SGMII) == 0)
8135 return -1;
8136
8137 if (wm_sgmii_uses_mdio(sc)) {
8138 switch (sc->sc_type) {
8139 case WM_T_82575:
8140 case WM_T_82576:
8141 reg = CSR_READ(sc, WMREG_MDIC);
8142 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8143 break;
8144 case WM_T_82580:
8145 case WM_T_I350:
8146 case WM_T_I354:
8147 case WM_T_I210:
8148 case WM_T_I211:
8149 reg = CSR_READ(sc, WMREG_MDICNFG);
8150 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8151 break;
8152 default:
8153 return -1;
8154 }
8155 }
8156
8157 return phyid;
8158 }
8159
8160
8161 /*
8162 * wm_gmii_mediainit:
8163 *
8164 * Initialize media for use on 1000BASE-T devices.
8165 */
8166 static void
8167 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8168 {
8169 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8170 struct mii_data *mii = &sc->sc_mii;
8171 uint32_t reg;
8172
8173 /* We have GMII. */
8174 sc->sc_flags |= WM_F_HAS_MII;
8175
8176 if (sc->sc_type == WM_T_80003)
8177 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8178 else
8179 sc->sc_tipg = TIPG_1000T_DFLT;
8180
8181 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8182 if ((sc->sc_type == WM_T_82580)
8183 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8184 || (sc->sc_type == WM_T_I211)) {
8185 reg = CSR_READ(sc, WMREG_PHPM);
8186 reg &= ~PHPM_GO_LINK_D;
8187 CSR_WRITE(sc, WMREG_PHPM, reg);
8188 }
8189
8190 /*
8191 * Let the chip set speed/duplex on its own based on
8192 * signals from the PHY.
8193 * XXXbouyer - I'm not sure this is right for the 80003,
8194 * the em driver only sets CTRL_SLU here - but it seems to work.
8195 */
8196 sc->sc_ctrl |= CTRL_SLU;
8197 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8198
8199 /* Initialize our media structures and probe the GMII. */
8200 mii->mii_ifp = ifp;
8201
8202 /*
8203 * Determine the PHY access method.
8204 *
8205 * For SGMII, use SGMII specific method.
8206 *
8207 * For some devices, we can determine the PHY access method
8208 * from sc_type.
8209 *
8210 * For ICH and PCH variants, it's difficult to determine the PHY
8211 * access method by sc_type, so use the PCI product ID for some
8212 * devices.
8213 * For other ICH8 variants, try to use igp's method. If the PHY
8214 * can't detect, then use bm's method.
8215 */
8216 switch (prodid) {
8217 case PCI_PRODUCT_INTEL_PCH_M_LM:
8218 case PCI_PRODUCT_INTEL_PCH_M_LC:
8219 /* 82577 */
8220 sc->sc_phytype = WMPHY_82577;
8221 break;
8222 case PCI_PRODUCT_INTEL_PCH_D_DM:
8223 case PCI_PRODUCT_INTEL_PCH_D_DC:
8224 /* 82578 */
8225 sc->sc_phytype = WMPHY_82578;
8226 break;
8227 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8228 case PCI_PRODUCT_INTEL_PCH2_LV_V:
8229 /* 82579 */
8230 sc->sc_phytype = WMPHY_82579;
8231 break;
8232 case PCI_PRODUCT_INTEL_82801I_BM:
8233 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8234 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8235 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8236 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8237 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8238 /* 82567 */
8239 sc->sc_phytype = WMPHY_BM;
8240 mii->mii_readreg = wm_gmii_bm_readreg;
8241 mii->mii_writereg = wm_gmii_bm_writereg;
8242 break;
8243 default:
8244 if (((sc->sc_flags & WM_F_SGMII) != 0)
8245 && !wm_sgmii_uses_mdio(sc)){
8246 /* SGMII */
8247 mii->mii_readreg = wm_sgmii_readreg;
8248 mii->mii_writereg = wm_sgmii_writereg;
8249 } else if (sc->sc_type >= WM_T_80003) {
8250 /* 80003 */
8251 mii->mii_readreg = wm_gmii_i80003_readreg;
8252 mii->mii_writereg = wm_gmii_i80003_writereg;
8253 } else if (sc->sc_type >= WM_T_I210) {
8254 /* I210 and I211 */
8255 mii->mii_readreg = wm_gmii_gs40g_readreg;
8256 mii->mii_writereg = wm_gmii_gs40g_writereg;
8257 } else if (sc->sc_type >= WM_T_82580) {
8258 /* 82580, I350 and I354 */
8259 sc->sc_phytype = WMPHY_82580;
8260 mii->mii_readreg = wm_gmii_82580_readreg;
8261 mii->mii_writereg = wm_gmii_82580_writereg;
8262 } else if (sc->sc_type >= WM_T_82544) {
8263 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8264 mii->mii_readreg = wm_gmii_i82544_readreg;
8265 mii->mii_writereg = wm_gmii_i82544_writereg;
8266 } else {
8267 mii->mii_readreg = wm_gmii_i82543_readreg;
8268 mii->mii_writereg = wm_gmii_i82543_writereg;
8269 }
8270 break;
8271 }
8272 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8273 /* All PCH* use _hv_ */
8274 mii->mii_readreg = wm_gmii_hv_readreg;
8275 mii->mii_writereg = wm_gmii_hv_writereg;
8276 }
8277 mii->mii_statchg = wm_gmii_statchg;
8278
8279 wm_gmii_reset(sc);
8280
8281 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8282 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8283 wm_gmii_mediastatus);
8284
8285 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8286 || (sc->sc_type == WM_T_82580)
8287 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8288 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8289 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8290 /* Attach only one port */
8291 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8292 MII_OFFSET_ANY, MIIF_DOPAUSE);
8293 } else {
8294 int i, id;
8295 uint32_t ctrl_ext;
8296
8297 id = wm_get_phy_id_82575(sc);
8298 if (id != -1) {
8299 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8300 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8301 }
8302 if ((id == -1)
8303 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8304 /* Power on sgmii phy if it is disabled */
8305 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8306 CSR_WRITE(sc, WMREG_CTRL_EXT,
8307 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8308 CSR_WRITE_FLUSH(sc);
8309 delay(300*1000); /* XXX too long */
8310
8311 /* from 1 to 8 */
8312 for (i = 1; i < 8; i++)
8313 mii_attach(sc->sc_dev, &sc->sc_mii,
8314 0xffffffff, i, MII_OFFSET_ANY,
8315 MIIF_DOPAUSE);
8316
8317 /* restore previous sfp cage power state */
8318 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8319 }
8320 }
8321 } else {
8322 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8323 MII_OFFSET_ANY, MIIF_DOPAUSE);
8324 }
8325
8326 /*
8327 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8328 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8329 */
8330 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8331 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8332 wm_set_mdio_slow_mode_hv(sc);
8333 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8334 MII_OFFSET_ANY, MIIF_DOPAUSE);
8335 }
8336
8337 /*
8338 * (For ICH8 variants)
8339 * If PHY detection failed, use BM's r/w function and retry.
8340 */
8341 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8342 /* if failed, retry with *_bm_* */
8343 mii->mii_readreg = wm_gmii_bm_readreg;
8344 mii->mii_writereg = wm_gmii_bm_writereg;
8345
8346 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8347 MII_OFFSET_ANY, MIIF_DOPAUSE);
8348 }
8349
8350 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8351 /* Any PHY wasn't find */
8352 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8353 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8354 sc->sc_phytype = WMPHY_NONE;
8355 } else {
8356 /*
8357 * PHY Found!
8358 * Check PHY type.
8359 */
8360 uint32_t model;
8361 struct mii_softc *child;
8362
8363 child = LIST_FIRST(&mii->mii_phys);
8364 model = child->mii_mpd_model;
8365 if (model == MII_MODEL_yyINTEL_I82566)
8366 sc->sc_phytype = WMPHY_IGP_3;
8367
8368 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8369 }
8370 }
8371
8372 /*
8373 * wm_gmii_mediachange: [ifmedia interface function]
8374 *
8375 * Set hardware to newly-selected media on a 1000BASE-T device.
8376 */
8377 static int
8378 wm_gmii_mediachange(struct ifnet *ifp)
8379 {
8380 struct wm_softc *sc = ifp->if_softc;
8381 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8382 int rc;
8383
8384 if ((ifp->if_flags & IFF_UP) == 0)
8385 return 0;
8386
8387 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8388 sc->sc_ctrl |= CTRL_SLU;
8389 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8390 || (sc->sc_type > WM_T_82543)) {
8391 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8392 } else {
8393 sc->sc_ctrl &= ~CTRL_ASDE;
8394 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8395 if (ife->ifm_media & IFM_FDX)
8396 sc->sc_ctrl |= CTRL_FD;
8397 switch (IFM_SUBTYPE(ife->ifm_media)) {
8398 case IFM_10_T:
8399 sc->sc_ctrl |= CTRL_SPEED_10;
8400 break;
8401 case IFM_100_TX:
8402 sc->sc_ctrl |= CTRL_SPEED_100;
8403 break;
8404 case IFM_1000_T:
8405 sc->sc_ctrl |= CTRL_SPEED_1000;
8406 break;
8407 default:
8408 panic("wm_gmii_mediachange: bad media 0x%x",
8409 ife->ifm_media);
8410 }
8411 }
8412 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8413 if (sc->sc_type <= WM_T_82543)
8414 wm_gmii_reset(sc);
8415
8416 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8417 return 0;
8418 return rc;
8419 }
8420
8421 /*
8422 * wm_gmii_mediastatus: [ifmedia interface function]
8423 *
8424 * Get the current interface media status on a 1000BASE-T device.
8425 */
8426 static void
8427 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8428 {
8429 struct wm_softc *sc = ifp->if_softc;
8430
8431 ether_mediastatus(ifp, ifmr);
8432 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8433 | sc->sc_flowflags;
8434 }
8435
8436 #define MDI_IO CTRL_SWDPIN(2)
8437 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8438 #define MDI_CLK CTRL_SWDPIN(3)
8439
8440 static void
8441 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8442 {
8443 uint32_t i, v;
8444
8445 v = CSR_READ(sc, WMREG_CTRL);
8446 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8447 v |= MDI_DIR | CTRL_SWDPIO(3);
8448
8449 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8450 if (data & i)
8451 v |= MDI_IO;
8452 else
8453 v &= ~MDI_IO;
8454 CSR_WRITE(sc, WMREG_CTRL, v);
8455 CSR_WRITE_FLUSH(sc);
8456 delay(10);
8457 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8458 CSR_WRITE_FLUSH(sc);
8459 delay(10);
8460 CSR_WRITE(sc, WMREG_CTRL, v);
8461 CSR_WRITE_FLUSH(sc);
8462 delay(10);
8463 }
8464 }
8465
8466 static uint32_t
8467 wm_i82543_mii_recvbits(struct wm_softc *sc)
8468 {
8469 uint32_t v, i, data = 0;
8470
8471 v = CSR_READ(sc, WMREG_CTRL);
8472 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8473 v |= CTRL_SWDPIO(3);
8474
8475 CSR_WRITE(sc, WMREG_CTRL, v);
8476 CSR_WRITE_FLUSH(sc);
8477 delay(10);
8478 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8479 CSR_WRITE_FLUSH(sc);
8480 delay(10);
8481 CSR_WRITE(sc, WMREG_CTRL, v);
8482 CSR_WRITE_FLUSH(sc);
8483 delay(10);
8484
8485 for (i = 0; i < 16; i++) {
8486 data <<= 1;
8487 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8488 CSR_WRITE_FLUSH(sc);
8489 delay(10);
8490 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8491 data |= 1;
8492 CSR_WRITE(sc, WMREG_CTRL, v);
8493 CSR_WRITE_FLUSH(sc);
8494 delay(10);
8495 }
8496
8497 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8498 CSR_WRITE_FLUSH(sc);
8499 delay(10);
8500 CSR_WRITE(sc, WMREG_CTRL, v);
8501 CSR_WRITE_FLUSH(sc);
8502 delay(10);
8503
8504 return data;
8505 }
8506
8507 #undef MDI_IO
8508 #undef MDI_DIR
8509 #undef MDI_CLK
8510
8511 /*
8512 * wm_gmii_i82543_readreg: [mii interface function]
8513 *
8514 * Read a PHY register on the GMII (i82543 version).
8515 */
8516 static int
8517 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8518 {
8519 struct wm_softc *sc = device_private(self);
8520 int rv;
8521
8522 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8523 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8524 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8525 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8526
8527 DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8528 device_xname(sc->sc_dev), phy, reg, rv));
8529
8530 return rv;
8531 }
8532
8533 /*
8534 * wm_gmii_i82543_writereg: [mii interface function]
8535 *
8536 * Write a PHY register on the GMII (i82543 version).
8537 */
8538 static void
8539 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8540 {
8541 struct wm_softc *sc = device_private(self);
8542
8543 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8544 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8545 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8546 (MII_COMMAND_START << 30), 32);
8547 }
8548
8549 /*
8550 * wm_gmii_i82544_readreg: [mii interface function]
8551 *
8552 * Read a PHY register on the GMII.
8553 */
8554 static int
8555 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8556 {
8557 struct wm_softc *sc = device_private(self);
8558 uint32_t mdic = 0;
8559 int i, rv;
8560
8561 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8562 MDIC_REGADD(reg));
8563
8564 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8565 mdic = CSR_READ(sc, WMREG_MDIC);
8566 if (mdic & MDIC_READY)
8567 break;
8568 delay(50);
8569 }
8570
8571 if ((mdic & MDIC_READY) == 0) {
8572 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8573 device_xname(sc->sc_dev), phy, reg);
8574 rv = 0;
8575 } else if (mdic & MDIC_E) {
8576 #if 0 /* This is normal if no PHY is present. */
8577 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8578 device_xname(sc->sc_dev), phy, reg);
8579 #endif
8580 rv = 0;
8581 } else {
8582 rv = MDIC_DATA(mdic);
8583 if (rv == 0xffff)
8584 rv = 0;
8585 }
8586
8587 return rv;
8588 }
8589
8590 /*
8591 * wm_gmii_i82544_writereg: [mii interface function]
8592 *
8593 * Write a PHY register on the GMII.
8594 */
8595 static void
8596 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8597 {
8598 struct wm_softc *sc = device_private(self);
8599 uint32_t mdic = 0;
8600 int i;
8601
8602 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8603 MDIC_REGADD(reg) | MDIC_DATA(val));
8604
8605 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8606 mdic = CSR_READ(sc, WMREG_MDIC);
8607 if (mdic & MDIC_READY)
8608 break;
8609 delay(50);
8610 }
8611
8612 if ((mdic & MDIC_READY) == 0)
8613 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8614 device_xname(sc->sc_dev), phy, reg);
8615 else if (mdic & MDIC_E)
8616 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8617 device_xname(sc->sc_dev), phy, reg);
8618 }
8619
8620 /*
8621 * wm_gmii_i80003_readreg: [mii interface function]
8622 *
8623 * Read a PHY register on the kumeran
8624 * This could be handled by the PHY layer if we didn't have to lock the
8625 * ressource ...
8626 */
8627 static int
8628 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8629 {
8630 struct wm_softc *sc = device_private(self);
8631 int sem;
8632 int rv;
8633
8634 if (phy != 1) /* only one PHY on kumeran bus */
8635 return 0;
8636
8637 sem = swfwphysem[sc->sc_funcid];
8638 if (wm_get_swfw_semaphore(sc, sem)) {
8639 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8640 __func__);
8641 return 0;
8642 }
8643
8644 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8645 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8646 reg >> GG82563_PAGE_SHIFT);
8647 } else {
8648 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8649 reg >> GG82563_PAGE_SHIFT);
8650 }
8651 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8652 delay(200);
8653 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8654 delay(200);
8655
8656 wm_put_swfw_semaphore(sc, sem);
8657 return rv;
8658 }
8659
8660 /*
8661 * wm_gmii_i80003_writereg: [mii interface function]
8662 *
8663 * Write a PHY register on the kumeran.
8664 * This could be handled by the PHY layer if we didn't have to lock the
8665 * ressource ...
8666 */
8667 static void
8668 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8669 {
8670 struct wm_softc *sc = device_private(self);
8671 int sem;
8672
8673 if (phy != 1) /* only one PHY on kumeran bus */
8674 return;
8675
8676 sem = swfwphysem[sc->sc_funcid];
8677 if (wm_get_swfw_semaphore(sc, sem)) {
8678 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8679 __func__);
8680 return;
8681 }
8682
8683 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8684 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8685 reg >> GG82563_PAGE_SHIFT);
8686 } else {
8687 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8688 reg >> GG82563_PAGE_SHIFT);
8689 }
8690 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8691 delay(200);
8692 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8693 delay(200);
8694
8695 wm_put_swfw_semaphore(sc, sem);
8696 }
8697
8698 /*
8699 * wm_gmii_bm_readreg: [mii interface function]
8700 *
8701 * Read a PHY register on the kumeran
8702 * This could be handled by the PHY layer if we didn't have to lock the
8703 * ressource ...
8704 */
8705 static int
8706 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8707 {
8708 struct wm_softc *sc = device_private(self);
8709 int sem;
8710 int rv;
8711
8712 sem = swfwphysem[sc->sc_funcid];
8713 if (wm_get_swfw_semaphore(sc, sem)) {
8714 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8715 __func__);
8716 return 0;
8717 }
8718
8719 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8720 if (phy == 1)
8721 wm_gmii_i82544_writereg(self, phy,
8722 MII_IGPHY_PAGE_SELECT, reg);
8723 else
8724 wm_gmii_i82544_writereg(self, phy,
8725 GG82563_PHY_PAGE_SELECT,
8726 reg >> GG82563_PAGE_SHIFT);
8727 }
8728
8729 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8730 wm_put_swfw_semaphore(sc, sem);
8731 return rv;
8732 }
8733
8734 /*
8735 * wm_gmii_bm_writereg: [mii interface function]
8736 *
8737 * Write a PHY register on the kumeran.
8738 * This could be handled by the PHY layer if we didn't have to lock the
8739 * ressource ...
8740 */
8741 static void
8742 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8743 {
8744 struct wm_softc *sc = device_private(self);
8745 int sem;
8746
8747 sem = swfwphysem[sc->sc_funcid];
8748 if (wm_get_swfw_semaphore(sc, sem)) {
8749 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8750 __func__);
8751 return;
8752 }
8753
8754 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8755 if (phy == 1)
8756 wm_gmii_i82544_writereg(self, phy,
8757 MII_IGPHY_PAGE_SELECT, reg);
8758 else
8759 wm_gmii_i82544_writereg(self, phy,
8760 GG82563_PHY_PAGE_SELECT,
8761 reg >> GG82563_PAGE_SHIFT);
8762 }
8763
8764 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8765 wm_put_swfw_semaphore(sc, sem);
8766 }
8767
8768 static void
8769 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8770 {
8771 struct wm_softc *sc = device_private(self);
8772 uint16_t regnum = BM_PHY_REG_NUM(offset);
8773 uint16_t wuce;
8774
8775 /* XXX Gig must be disabled for MDIO accesses to page 800 */
8776 if (sc->sc_type == WM_T_PCH) {
8777 /* XXX e1000 driver do nothing... why? */
8778 }
8779
8780 /* Set page 769 */
8781 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8782 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8783
8784 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8785
8786 wuce &= ~BM_WUC_HOST_WU_BIT;
8787 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8788 wuce | BM_WUC_ENABLE_BIT);
8789
8790 /* Select page 800 */
8791 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8792 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8793
8794 /* Write page 800 */
8795 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8796
8797 if (rd)
8798 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8799 else
8800 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8801
8802 /* Set page 769 */
8803 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8804 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8805
8806 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8807 }
8808
8809 /*
8810 * wm_gmii_hv_readreg: [mii interface function]
8811 *
8812 * Read a PHY register on the kumeran
8813 * This could be handled by the PHY layer if we didn't have to lock the
8814 * ressource ...
8815 */
8816 static int
8817 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8818 {
8819 struct wm_softc *sc = device_private(self);
8820 uint16_t page = BM_PHY_REG_PAGE(reg);
8821 uint16_t regnum = BM_PHY_REG_NUM(reg);
8822 uint16_t val;
8823 int rv;
8824
8825 if (wm_get_swfwhw_semaphore(sc)) {
8826 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8827 __func__);
8828 return 0;
8829 }
8830
8831 /* XXX Workaround failure in MDIO access while cable is disconnected */
8832 if (sc->sc_phytype == WMPHY_82577) {
8833 /* XXX must write */
8834 }
8835
8836 /* Page 800 works differently than the rest so it has its own func */
8837 if (page == BM_WUC_PAGE) {
8838 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8839 return val;
8840 }
8841
8842 /*
8843 * Lower than page 768 works differently than the rest so it has its
8844 * own func
8845 */
8846 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8847 printf("gmii_hv_readreg!!!\n");
8848 return 0;
8849 }
8850
8851 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8852 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8853 page << BME1000_PAGE_SHIFT);
8854 }
8855
8856 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8857 wm_put_swfwhw_semaphore(sc);
8858 return rv;
8859 }
8860
8861 /*
8862 * wm_gmii_hv_writereg: [mii interface function]
8863 *
8864 * Write a PHY register on the kumeran.
8865 * This could be handled by the PHY layer if we didn't have to lock the
8866 * ressource ...
8867 */
8868 static void
8869 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8870 {
8871 struct wm_softc *sc = device_private(self);
8872 uint16_t page = BM_PHY_REG_PAGE(reg);
8873 uint16_t regnum = BM_PHY_REG_NUM(reg);
8874
8875 if (wm_get_swfwhw_semaphore(sc)) {
8876 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8877 __func__);
8878 return;
8879 }
8880
8881 /* XXX Workaround failure in MDIO access while cable is disconnected */
8882
8883 /* Page 800 works differently than the rest so it has its own func */
8884 if (page == BM_WUC_PAGE) {
8885 uint16_t tmp;
8886
8887 tmp = val;
8888 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8889 return;
8890 }
8891
8892 /*
8893 * Lower than page 768 works differently than the rest so it has its
8894 * own func
8895 */
8896 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8897 printf("gmii_hv_writereg!!!\n");
8898 return;
8899 }
8900
8901 /*
8902 * XXX Workaround MDIO accesses being disabled after entering IEEE
8903 * Power Down (whenever bit 11 of the PHY control register is set)
8904 */
8905
8906 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8907 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8908 page << BME1000_PAGE_SHIFT);
8909 }
8910
8911 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8912 wm_put_swfwhw_semaphore(sc);
8913 }
8914
8915 /*
8916 * wm_gmii_82580_readreg: [mii interface function]
8917 *
8918 * Read a PHY register on the 82580 and I350.
8919 * This could be handled by the PHY layer if we didn't have to lock the
8920 * ressource ...
8921 */
8922 static int
8923 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8924 {
8925 struct wm_softc *sc = device_private(self);
8926 int sem;
8927 int rv;
8928
8929 sem = swfwphysem[sc->sc_funcid];
8930 if (wm_get_swfw_semaphore(sc, sem)) {
8931 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8932 __func__);
8933 return 0;
8934 }
8935
8936 rv = wm_gmii_i82544_readreg(self, phy, reg);
8937
8938 wm_put_swfw_semaphore(sc, sem);
8939 return rv;
8940 }
8941
8942 /*
8943 * wm_gmii_82580_writereg: [mii interface function]
8944 *
8945 * Write a PHY register on the 82580 and I350.
8946 * This could be handled by the PHY layer if we didn't have to lock the
8947 * ressource ...
8948 */
8949 static void
8950 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8951 {
8952 struct wm_softc *sc = device_private(self);
8953 int sem;
8954
8955 sem = swfwphysem[sc->sc_funcid];
8956 if (wm_get_swfw_semaphore(sc, sem)) {
8957 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8958 __func__);
8959 return;
8960 }
8961
8962 wm_gmii_i82544_writereg(self, phy, reg, val);
8963
8964 wm_put_swfw_semaphore(sc, sem);
8965 }
8966
8967 /*
8968 * wm_gmii_gs40g_readreg: [mii interface function]
8969 *
8970 * Read a PHY register on the I2100 and I211.
8971 * This could be handled by the PHY layer if we didn't have to lock the
8972 * ressource ...
8973 */
8974 static int
8975 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8976 {
8977 struct wm_softc *sc = device_private(self);
8978 int sem;
8979 int page, offset;
8980 int rv;
8981
8982 /* Acquire semaphore */
8983 sem = swfwphysem[sc->sc_funcid];
8984 if (wm_get_swfw_semaphore(sc, sem)) {
8985 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8986 __func__);
8987 return 0;
8988 }
8989
8990 /* Page select */
8991 page = reg >> GS40G_PAGE_SHIFT;
8992 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8993
8994 /* Read reg */
8995 offset = reg & GS40G_OFFSET_MASK;
8996 rv = wm_gmii_i82544_readreg(self, phy, offset);
8997
8998 wm_put_swfw_semaphore(sc, sem);
8999 return rv;
9000 }
9001
9002 /*
9003 * wm_gmii_gs40g_writereg: [mii interface function]
9004 *
9005 * Write a PHY register on the I210 and I211.
9006 * This could be handled by the PHY layer if we didn't have to lock the
9007 * ressource ...
9008 */
9009 static void
9010 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
9011 {
9012 struct wm_softc *sc = device_private(self);
9013 int sem;
9014 int page, offset;
9015
9016 /* Acquire semaphore */
9017 sem = swfwphysem[sc->sc_funcid];
9018 if (wm_get_swfw_semaphore(sc, sem)) {
9019 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9020 __func__);
9021 return;
9022 }
9023
9024 /* Page select */
9025 page = reg >> GS40G_PAGE_SHIFT;
9026 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
9027
9028 /* Write reg */
9029 offset = reg & GS40G_OFFSET_MASK;
9030 wm_gmii_i82544_writereg(self, phy, offset, val);
9031
9032 /* Release semaphore */
9033 wm_put_swfw_semaphore(sc, sem);
9034 }
9035
9036 /*
9037 * wm_gmii_statchg: [mii interface function]
9038 *
9039 * Callback from MII layer when media changes.
9040 */
9041 static void
9042 wm_gmii_statchg(struct ifnet *ifp)
9043 {
9044 struct wm_softc *sc = ifp->if_softc;
9045 struct mii_data *mii = &sc->sc_mii;
9046
9047 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
9048 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9049 sc->sc_fcrtl &= ~FCRTL_XONE;
9050
9051 /*
9052 * Get flow control negotiation result.
9053 */
9054 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
9055 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
9056 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
9057 mii->mii_media_active &= ~IFM_ETH_FMASK;
9058 }
9059
9060 if (sc->sc_flowflags & IFM_FLOW) {
9061 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
9062 sc->sc_ctrl |= CTRL_TFCE;
9063 sc->sc_fcrtl |= FCRTL_XONE;
9064 }
9065 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
9066 sc->sc_ctrl |= CTRL_RFCE;
9067 }
9068
9069 if (sc->sc_mii.mii_media_active & IFM_FDX) {
9070 DPRINTF(WM_DEBUG_LINK,
9071 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
9072 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9073 } else {
9074 DPRINTF(WM_DEBUG_LINK,
9075 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
9076 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9077 }
9078
9079 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9080 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9081 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
9082 : WMREG_FCRTL, sc->sc_fcrtl);
9083 if (sc->sc_type == WM_T_80003) {
9084 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
9085 case IFM_1000_T:
9086 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9087 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
9088 sc->sc_tipg = TIPG_1000T_80003_DFLT;
9089 break;
9090 default:
9091 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9092 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
9093 sc->sc_tipg = TIPG_10_100_80003_DFLT;
9094 break;
9095 }
9096 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
9097 }
9098 }
9099
9100 /*
9101 * wm_kmrn_readreg:
9102 *
9103 * Read a kumeran register
9104 */
9105 static int
9106 wm_kmrn_readreg(struct wm_softc *sc, int reg)
9107 {
9108 int rv;
9109
9110 if (sc->sc_flags & WM_F_LOCK_SWFW) {
9111 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
9112 aprint_error_dev(sc->sc_dev,
9113 "%s: failed to get semaphore\n", __func__);
9114 return 0;
9115 }
9116 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9117 if (wm_get_swfwhw_semaphore(sc)) {
9118 aprint_error_dev(sc->sc_dev,
9119 "%s: failed to get semaphore\n", __func__);
9120 return 0;
9121 }
9122 }
9123
9124 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9125 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9126 KUMCTRLSTA_REN);
9127 CSR_WRITE_FLUSH(sc);
9128 delay(2);
9129
9130 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9131
9132 if (sc->sc_flags & WM_F_LOCK_SWFW)
9133 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9134 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9135 wm_put_swfwhw_semaphore(sc);
9136
9137 return rv;
9138 }
9139
9140 /*
9141 * wm_kmrn_writereg:
9142 *
9143 * Write a kumeran register
9144 */
9145 static void
9146 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9147 {
9148
9149 if (sc->sc_flags & WM_F_LOCK_SWFW) {
9150 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
9151 aprint_error_dev(sc->sc_dev,
9152 "%s: failed to get semaphore\n", __func__);
9153 return;
9154 }
9155 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9156 if (wm_get_swfwhw_semaphore(sc)) {
9157 aprint_error_dev(sc->sc_dev,
9158 "%s: failed to get semaphore\n", __func__);
9159 return;
9160 }
9161 }
9162
9163 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9164 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9165 (val & KUMCTRLSTA_MASK));
9166
9167 if (sc->sc_flags & WM_F_LOCK_SWFW)
9168 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9169 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9170 wm_put_swfwhw_semaphore(sc);
9171 }
9172
9173 /* SGMII related */
9174
9175 /*
9176 * wm_sgmii_uses_mdio
9177 *
9178 * Check whether the transaction is to the internal PHY or the external
9179 * MDIO interface. Return true if it's MDIO.
9180 */
9181 static bool
9182 wm_sgmii_uses_mdio(struct wm_softc *sc)
9183 {
9184 uint32_t reg;
9185 bool ismdio = false;
9186
9187 switch (sc->sc_type) {
9188 case WM_T_82575:
9189 case WM_T_82576:
9190 reg = CSR_READ(sc, WMREG_MDIC);
9191 ismdio = ((reg & MDIC_DEST) != 0);
9192 break;
9193 case WM_T_82580:
9194 case WM_T_I350:
9195 case WM_T_I354:
9196 case WM_T_I210:
9197 case WM_T_I211:
9198 reg = CSR_READ(sc, WMREG_MDICNFG);
9199 ismdio = ((reg & MDICNFG_DEST) != 0);
9200 break;
9201 default:
9202 break;
9203 }
9204
9205 return ismdio;
9206 }
9207
9208 /*
9209 * wm_sgmii_readreg: [mii interface function]
9210 *
9211 * Read a PHY register on the SGMII
9212 * This could be handled by the PHY layer if we didn't have to lock the
9213 * ressource ...
9214 */
9215 static int
9216 wm_sgmii_readreg(device_t self, int phy, int reg)
9217 {
9218 struct wm_softc *sc = device_private(self);
9219 uint32_t i2ccmd;
9220 int i, rv;
9221
9222 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9223 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9224 __func__);
9225 return 0;
9226 }
9227
9228 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9229 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9230 | I2CCMD_OPCODE_READ;
9231 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9232
9233 /* Poll the ready bit */
9234 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9235 delay(50);
9236 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9237 if (i2ccmd & I2CCMD_READY)
9238 break;
9239 }
9240 if ((i2ccmd & I2CCMD_READY) == 0)
9241 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9242 if ((i2ccmd & I2CCMD_ERROR) != 0)
9243 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9244
9245 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9246
9247 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
9248 return rv;
9249 }
9250
9251 /*
9252 * wm_sgmii_writereg: [mii interface function]
9253 *
9254 * Write a PHY register on the SGMII.
9255 * This could be handled by the PHY layer if we didn't have to lock the
9256 * ressource ...
9257 */
9258 static void
9259 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9260 {
9261 struct wm_softc *sc = device_private(self);
9262 uint32_t i2ccmd;
9263 int i;
9264 int val_swapped;
9265
9266 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9267 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9268 __func__);
9269 return;
9270 }
9271 /* Swap the data bytes for the I2C interface */
9272 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9273 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9274 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9275 | I2CCMD_OPCODE_WRITE | val_swapped;
9276 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9277
9278 /* Poll the ready bit */
9279 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9280 delay(50);
9281 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9282 if (i2ccmd & I2CCMD_READY)
9283 break;
9284 }
9285 if ((i2ccmd & I2CCMD_READY) == 0)
9286 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9287 if ((i2ccmd & I2CCMD_ERROR) != 0)
9288 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9289
9290 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9291 }
9292
9293 /* TBI related */
9294
9295 /*
9296 * wm_tbi_mediainit:
9297 *
9298 * Initialize media for use on 1000BASE-X devices.
9299 */
9300 static void
9301 wm_tbi_mediainit(struct wm_softc *sc)
9302 {
9303 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9304 const char *sep = "";
9305
9306 if (sc->sc_type < WM_T_82543)
9307 sc->sc_tipg = TIPG_WM_DFLT;
9308 else
9309 sc->sc_tipg = TIPG_LG_DFLT;
9310
9311 sc->sc_tbi_serdes_anegticks = 5;
9312
9313 /* Initialize our media structures */
9314 sc->sc_mii.mii_ifp = ifp;
9315 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9316
9317 if ((sc->sc_type >= WM_T_82575)
9318 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9319 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9320 wm_serdes_mediachange, wm_serdes_mediastatus);
9321 else
9322 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9323 wm_tbi_mediachange, wm_tbi_mediastatus);
9324
9325 /*
9326 * SWD Pins:
9327 *
9328 * 0 = Link LED (output)
9329 * 1 = Loss Of Signal (input)
9330 */
9331 sc->sc_ctrl |= CTRL_SWDPIO(0);
9332
9333 /* XXX Perhaps this is only for TBI */
9334 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9335 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9336
9337 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9338 sc->sc_ctrl &= ~CTRL_LRST;
9339
9340 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9341
9342 #define ADD(ss, mm, dd) \
9343 do { \
9344 aprint_normal("%s%s", sep, ss); \
9345 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9346 sep = ", "; \
9347 } while (/*CONSTCOND*/0)
9348
9349 aprint_normal_dev(sc->sc_dev, "");
9350
9351 /* Only 82545 is LX */
9352 if (sc->sc_type == WM_T_82545) {
9353 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9354 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9355 } else {
9356 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9357 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9358 }
9359 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9360 aprint_normal("\n");
9361
9362 #undef ADD
9363
9364 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9365 }
9366
9367 /*
9368 * wm_tbi_mediachange: [ifmedia interface function]
9369 *
9370 * Set hardware to newly-selected media on a 1000BASE-X device.
9371 */
9372 static int
9373 wm_tbi_mediachange(struct ifnet *ifp)
9374 {
9375 struct wm_softc *sc = ifp->if_softc;
9376 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9377 uint32_t status;
9378 int i;
9379
9380 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9381 /* XXX need some work for >= 82571 and < 82575 */
9382 if (sc->sc_type < WM_T_82575)
9383 return 0;
9384 }
9385
9386 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9387 || (sc->sc_type >= WM_T_82575))
9388 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9389
9390 sc->sc_ctrl &= ~CTRL_LRST;
9391 sc->sc_txcw = TXCW_ANE;
9392 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9393 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9394 else if (ife->ifm_media & IFM_FDX)
9395 sc->sc_txcw |= TXCW_FD;
9396 else
9397 sc->sc_txcw |= TXCW_HD;
9398
9399 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9400 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9401
9402 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9403 device_xname(sc->sc_dev), sc->sc_txcw));
9404 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9405 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9406 CSR_WRITE_FLUSH(sc);
9407 delay(1000);
9408
9409 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9410 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9411
9412 /*
9413 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9414 * optics detect a signal, 0 if they don't.
9415 */
9416 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9417 /* Have signal; wait for the link to come up. */
9418 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9419 delay(10000);
9420 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9421 break;
9422 }
9423
9424 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9425 device_xname(sc->sc_dev),i));
9426
9427 status = CSR_READ(sc, WMREG_STATUS);
9428 DPRINTF(WM_DEBUG_LINK,
9429 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9430 device_xname(sc->sc_dev),status, STATUS_LU));
9431 if (status & STATUS_LU) {
9432 /* Link is up. */
9433 DPRINTF(WM_DEBUG_LINK,
9434 ("%s: LINK: set media -> link up %s\n",
9435 device_xname(sc->sc_dev),
9436 (status & STATUS_FD) ? "FDX" : "HDX"));
9437
9438 /*
9439 * NOTE: CTRL will update TFCE and RFCE automatically,
9440 * so we should update sc->sc_ctrl
9441 */
9442 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9443 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9444 sc->sc_fcrtl &= ~FCRTL_XONE;
9445 if (status & STATUS_FD)
9446 sc->sc_tctl |=
9447 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9448 else
9449 sc->sc_tctl |=
9450 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9451 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9452 sc->sc_fcrtl |= FCRTL_XONE;
9453 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9454 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9455 WMREG_OLD_FCRTL : WMREG_FCRTL,
9456 sc->sc_fcrtl);
9457 sc->sc_tbi_linkup = 1;
9458 } else {
9459 if (i == WM_LINKUP_TIMEOUT)
9460 wm_check_for_link(sc);
9461 /* Link is down. */
9462 DPRINTF(WM_DEBUG_LINK,
9463 ("%s: LINK: set media -> link down\n",
9464 device_xname(sc->sc_dev)));
9465 sc->sc_tbi_linkup = 0;
9466 }
9467 } else {
9468 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9469 device_xname(sc->sc_dev)));
9470 sc->sc_tbi_linkup = 0;
9471 }
9472
9473 wm_tbi_serdes_set_linkled(sc);
9474
9475 return 0;
9476 }
9477
9478 /*
9479 * wm_tbi_mediastatus: [ifmedia interface function]
9480 *
9481 * Get the current interface media status on a 1000BASE-X device.
9482 */
9483 static void
9484 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9485 {
9486 struct wm_softc *sc = ifp->if_softc;
9487 uint32_t ctrl, status;
9488
9489 ifmr->ifm_status = IFM_AVALID;
9490 ifmr->ifm_active = IFM_ETHER;
9491
9492 status = CSR_READ(sc, WMREG_STATUS);
9493 if ((status & STATUS_LU) == 0) {
9494 ifmr->ifm_active |= IFM_NONE;
9495 return;
9496 }
9497
9498 ifmr->ifm_status |= IFM_ACTIVE;
9499 /* Only 82545 is LX */
9500 if (sc->sc_type == WM_T_82545)
9501 ifmr->ifm_active |= IFM_1000_LX;
9502 else
9503 ifmr->ifm_active |= IFM_1000_SX;
9504 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9505 ifmr->ifm_active |= IFM_FDX;
9506 else
9507 ifmr->ifm_active |= IFM_HDX;
9508 ctrl = CSR_READ(sc, WMREG_CTRL);
9509 if (ctrl & CTRL_RFCE)
9510 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9511 if (ctrl & CTRL_TFCE)
9512 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9513 }
9514
9515 /* XXX TBI only */
9516 static int
9517 wm_check_for_link(struct wm_softc *sc)
9518 {
9519 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9520 uint32_t rxcw;
9521 uint32_t ctrl;
9522 uint32_t status;
9523 uint32_t sig;
9524
9525 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9526 /* XXX need some work for >= 82571 */
9527 if (sc->sc_type >= WM_T_82571) {
9528 sc->sc_tbi_linkup = 1;
9529 return 0;
9530 }
9531 }
9532
9533 rxcw = CSR_READ(sc, WMREG_RXCW);
9534 ctrl = CSR_READ(sc, WMREG_CTRL);
9535 status = CSR_READ(sc, WMREG_STATUS);
9536
9537 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9538
9539 DPRINTF(WM_DEBUG_LINK,
9540 ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9541 device_xname(sc->sc_dev), __func__,
9542 ((ctrl & CTRL_SWDPIN(1)) == sig),
9543 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9544
9545 /*
9546 * SWDPIN LU RXCW
9547 * 0 0 0
9548 * 0 0 1 (should not happen)
9549 * 0 1 0 (should not happen)
9550 * 0 1 1 (should not happen)
9551 * 1 0 0 Disable autonego and force linkup
9552 * 1 0 1 got /C/ but not linkup yet
9553 * 1 1 0 (linkup)
9554 * 1 1 1 If IFM_AUTO, back to autonego
9555 *
9556 */
9557 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9558 && ((status & STATUS_LU) == 0)
9559 && ((rxcw & RXCW_C) == 0)) {
9560 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9561 __func__));
9562 sc->sc_tbi_linkup = 0;
9563 /* Disable auto-negotiation in the TXCW register */
9564 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9565
9566 /*
9567 * Force link-up and also force full-duplex.
9568 *
9569 * NOTE: CTRL was updated TFCE and RFCE automatically,
9570 * so we should update sc->sc_ctrl
9571 */
9572 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9573 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9574 } else if (((status & STATUS_LU) != 0)
9575 && ((rxcw & RXCW_C) != 0)
9576 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9577 sc->sc_tbi_linkup = 1;
9578 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9579 __func__));
9580 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9581 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9582 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9583 && ((rxcw & RXCW_C) != 0)) {
9584 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9585 } else {
9586 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9587 status));
9588 }
9589
9590 return 0;
9591 }
9592
9593 /*
9594 * wm_tbi_tick:
9595 *
9596 * Check the link on TBI devices.
9597 * This function acts as mii_tick().
9598 */
9599 static void
9600 wm_tbi_tick(struct wm_softc *sc)
9601 {
9602 struct mii_data *mii = &sc->sc_mii;
9603 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9604 uint32_t status;
9605
9606 KASSERT(WM_CORE_LOCKED(sc));
9607
9608 status = CSR_READ(sc, WMREG_STATUS);
9609
9610 /* XXX is this needed? */
9611 (void)CSR_READ(sc, WMREG_RXCW);
9612 (void)CSR_READ(sc, WMREG_CTRL);
9613
9614 /* set link status */
9615 if ((status & STATUS_LU) == 0) {
9616 DPRINTF(WM_DEBUG_LINK,
9617 ("%s: LINK: checklink -> down\n",
9618 device_xname(sc->sc_dev)));
9619 sc->sc_tbi_linkup = 0;
9620 } else if (sc->sc_tbi_linkup == 0) {
9621 DPRINTF(WM_DEBUG_LINK,
9622 ("%s: LINK: checklink -> up %s\n",
9623 device_xname(sc->sc_dev),
9624 (status & STATUS_FD) ? "FDX" : "HDX"));
9625 sc->sc_tbi_linkup = 1;
9626 sc->sc_tbi_serdes_ticks = 0;
9627 }
9628
9629 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9630 goto setled;
9631
9632 if ((status & STATUS_LU) == 0) {
9633 sc->sc_tbi_linkup = 0;
9634 /* If the timer expired, retry autonegotiation */
9635 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9636 && (++sc->sc_tbi_serdes_ticks
9637 >= sc->sc_tbi_serdes_anegticks)) {
9638 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9639 sc->sc_tbi_serdes_ticks = 0;
9640 /*
9641 * Reset the link, and let autonegotiation do
9642 * its thing
9643 */
9644 sc->sc_ctrl |= CTRL_LRST;
9645 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9646 CSR_WRITE_FLUSH(sc);
9647 delay(1000);
9648 sc->sc_ctrl &= ~CTRL_LRST;
9649 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9650 CSR_WRITE_FLUSH(sc);
9651 delay(1000);
9652 CSR_WRITE(sc, WMREG_TXCW,
9653 sc->sc_txcw & ~TXCW_ANE);
9654 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9655 }
9656 }
9657
9658 setled:
9659 wm_tbi_serdes_set_linkled(sc);
9660 }
9661
9662 /* SERDES related */
9663 static void
9664 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9665 {
9666 uint32_t reg;
9667
9668 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9669 && ((sc->sc_flags & WM_F_SGMII) == 0))
9670 return;
9671
9672 reg = CSR_READ(sc, WMREG_PCS_CFG);
9673 reg |= PCS_CFG_PCS_EN;
9674 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9675
9676 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9677 reg &= ~CTRL_EXT_SWDPIN(3);
9678 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9679 CSR_WRITE_FLUSH(sc);
9680 }
9681
9682 static int
9683 wm_serdes_mediachange(struct ifnet *ifp)
9684 {
9685 struct wm_softc *sc = ifp->if_softc;
9686 bool pcs_autoneg = true; /* XXX */
9687 uint32_t ctrl_ext, pcs_lctl, reg;
9688
9689 /* XXX Currently, this function is not called on 8257[12] */
9690 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9691 || (sc->sc_type >= WM_T_82575))
9692 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9693
9694 wm_serdes_power_up_link_82575(sc);
9695
9696 sc->sc_ctrl |= CTRL_SLU;
9697
9698 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9699 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9700
9701 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9702 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9703 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9704 case CTRL_EXT_LINK_MODE_SGMII:
9705 pcs_autoneg = true;
9706 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9707 break;
9708 case CTRL_EXT_LINK_MODE_1000KX:
9709 pcs_autoneg = false;
9710 /* FALLTHROUGH */
9711 default:
9712 if ((sc->sc_type == WM_T_82575)
9713 || (sc->sc_type == WM_T_82576)) {
9714 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9715 pcs_autoneg = false;
9716 }
9717 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9718 | CTRL_FRCFDX;
9719 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9720 }
9721 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9722
9723 if (pcs_autoneg) {
9724 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9725 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9726
9727 reg = CSR_READ(sc, WMREG_PCS_ANADV);
9728 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9729 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9730 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9731 } else
9732 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9733
9734 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9735
9736
9737 return 0;
9738 }
9739
9740 static void
9741 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9742 {
9743 struct wm_softc *sc = ifp->if_softc;
9744 struct mii_data *mii = &sc->sc_mii;
9745 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9746 uint32_t pcs_adv, pcs_lpab, reg;
9747
9748 ifmr->ifm_status = IFM_AVALID;
9749 ifmr->ifm_active = IFM_ETHER;
9750
9751 /* Check PCS */
9752 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9753 if ((reg & PCS_LSTS_LINKOK) == 0) {
9754 ifmr->ifm_active |= IFM_NONE;
9755 sc->sc_tbi_linkup = 0;
9756 goto setled;
9757 }
9758
9759 sc->sc_tbi_linkup = 1;
9760 ifmr->ifm_status |= IFM_ACTIVE;
9761 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9762 if ((reg & PCS_LSTS_FDX) != 0)
9763 ifmr->ifm_active |= IFM_FDX;
9764 else
9765 ifmr->ifm_active |= IFM_HDX;
9766 mii->mii_media_active &= ~IFM_ETH_FMASK;
9767 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9768 /* Check flow */
9769 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9770 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9771 DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9772 goto setled;
9773 }
9774 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9775 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9776 DPRINTF(WM_DEBUG_LINK,
9777 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9778 if ((pcs_adv & TXCW_SYM_PAUSE)
9779 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9780 mii->mii_media_active |= IFM_FLOW
9781 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9782 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9783 && (pcs_adv & TXCW_ASYM_PAUSE)
9784 && (pcs_lpab & TXCW_SYM_PAUSE)
9785 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9786 mii->mii_media_active |= IFM_FLOW
9787 | IFM_ETH_TXPAUSE;
9788 } else if ((pcs_adv & TXCW_SYM_PAUSE)
9789 && (pcs_adv & TXCW_ASYM_PAUSE)
9790 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9791 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9792 mii->mii_media_active |= IFM_FLOW
9793 | IFM_ETH_RXPAUSE;
9794 } else {
9795 }
9796 }
9797 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9798 | (mii->mii_media_active & IFM_ETH_FMASK);
9799 setled:
9800 wm_tbi_serdes_set_linkled(sc);
9801 }
9802
9803 /*
9804 * wm_serdes_tick:
9805 *
9806 * Check the link on serdes devices.
9807 */
9808 static void
9809 wm_serdes_tick(struct wm_softc *sc)
9810 {
9811 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9812 struct mii_data *mii = &sc->sc_mii;
9813 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9814 uint32_t reg;
9815
9816 KASSERT(WM_CORE_LOCKED(sc));
9817
9818 mii->mii_media_status = IFM_AVALID;
9819 mii->mii_media_active = IFM_ETHER;
9820
9821 /* Check PCS */
9822 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9823 if ((reg & PCS_LSTS_LINKOK) != 0) {
9824 mii->mii_media_status |= IFM_ACTIVE;
9825 sc->sc_tbi_linkup = 1;
9826 sc->sc_tbi_serdes_ticks = 0;
9827 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9828 if ((reg & PCS_LSTS_FDX) != 0)
9829 mii->mii_media_active |= IFM_FDX;
9830 else
9831 mii->mii_media_active |= IFM_HDX;
9832 } else {
9833 mii->mii_media_status |= IFM_NONE;
9834 sc->sc_tbi_linkup = 0;
9835 /* If the timer expired, retry autonegotiation */
9836 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9837 && (++sc->sc_tbi_serdes_ticks
9838 >= sc->sc_tbi_serdes_anegticks)) {
9839 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9840 sc->sc_tbi_serdes_ticks = 0;
9841 /* XXX */
9842 wm_serdes_mediachange(ifp);
9843 }
9844 }
9845
9846 wm_tbi_serdes_set_linkled(sc);
9847 }
9848
9849 /* SFP related */
9850
9851 static int
9852 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9853 {
9854 uint32_t i2ccmd;
9855 int i;
9856
9857 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9858 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9859
9860 /* Poll the ready bit */
9861 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9862 delay(50);
9863 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9864 if (i2ccmd & I2CCMD_READY)
9865 break;
9866 }
9867 if ((i2ccmd & I2CCMD_READY) == 0)
9868 return -1;
9869 if ((i2ccmd & I2CCMD_ERROR) != 0)
9870 return -1;
9871
9872 *data = i2ccmd & 0x00ff;
9873
9874 return 0;
9875 }
9876
9877 static uint32_t
9878 wm_sfp_get_media_type(struct wm_softc *sc)
9879 {
9880 uint32_t ctrl_ext;
9881 uint8_t val = 0;
9882 int timeout = 3;
9883 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9884 int rv = -1;
9885
9886 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9887 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9888 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9889 CSR_WRITE_FLUSH(sc);
9890
9891 /* Read SFP module data */
9892 while (timeout) {
9893 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9894 if (rv == 0)
9895 break;
9896 delay(100*1000); /* XXX too big */
9897 timeout--;
9898 }
9899 if (rv != 0)
9900 goto out;
9901 switch (val) {
9902 case SFF_SFP_ID_SFF:
9903 aprint_normal_dev(sc->sc_dev,
9904 "Module/Connector soldered to board\n");
9905 break;
9906 case SFF_SFP_ID_SFP:
9907 aprint_normal_dev(sc->sc_dev, "SFP\n");
9908 break;
9909 case SFF_SFP_ID_UNKNOWN:
9910 goto out;
9911 default:
9912 break;
9913 }
9914
9915 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9916 if (rv != 0) {
9917 goto out;
9918 }
9919
9920 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9921 mediatype = WM_MEDIATYPE_SERDES;
9922 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9923 sc->sc_flags |= WM_F_SGMII;
9924 mediatype = WM_MEDIATYPE_COPPER;
9925 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9926 sc->sc_flags |= WM_F_SGMII;
9927 mediatype = WM_MEDIATYPE_SERDES;
9928 }
9929
9930 out:
9931 /* Restore I2C interface setting */
9932 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9933
9934 return mediatype;
9935 }
9936 /*
9937 * NVM related.
9938 * Microwire, SPI (w/wo EERD) and Flash.
9939 */
9940
9941 /* Both spi and uwire */
9942
9943 /*
9944 * wm_eeprom_sendbits:
9945 *
9946 * Send a series of bits to the EEPROM.
9947 */
9948 static void
9949 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9950 {
9951 uint32_t reg;
9952 int x;
9953
9954 reg = CSR_READ(sc, WMREG_EECD);
9955
9956 for (x = nbits; x > 0; x--) {
9957 if (bits & (1U << (x - 1)))
9958 reg |= EECD_DI;
9959 else
9960 reg &= ~EECD_DI;
9961 CSR_WRITE(sc, WMREG_EECD, reg);
9962 CSR_WRITE_FLUSH(sc);
9963 delay(2);
9964 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9965 CSR_WRITE_FLUSH(sc);
9966 delay(2);
9967 CSR_WRITE(sc, WMREG_EECD, reg);
9968 CSR_WRITE_FLUSH(sc);
9969 delay(2);
9970 }
9971 }
9972
9973 /*
9974 * wm_eeprom_recvbits:
9975 *
9976 * Receive a series of bits from the EEPROM.
9977 */
9978 static void
9979 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9980 {
9981 uint32_t reg, val;
9982 int x;
9983
9984 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9985
9986 val = 0;
9987 for (x = nbits; x > 0; x--) {
9988 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9989 CSR_WRITE_FLUSH(sc);
9990 delay(2);
9991 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9992 val |= (1U << (x - 1));
9993 CSR_WRITE(sc, WMREG_EECD, reg);
9994 CSR_WRITE_FLUSH(sc);
9995 delay(2);
9996 }
9997 *valp = val;
9998 }
9999
10000 /* Microwire */
10001
10002 /*
10003 * wm_nvm_read_uwire:
10004 *
10005 * Read a word from the EEPROM using the MicroWire protocol.
10006 */
10007 static int
10008 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10009 {
10010 uint32_t reg, val;
10011 int i;
10012
10013 for (i = 0; i < wordcnt; i++) {
10014 /* Clear SK and DI. */
10015 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
10016 CSR_WRITE(sc, WMREG_EECD, reg);
10017
10018 /*
10019 * XXX: workaround for a bug in qemu-0.12.x and prior
10020 * and Xen.
10021 *
10022 * We use this workaround only for 82540 because qemu's
10023 * e1000 act as 82540.
10024 */
10025 if (sc->sc_type == WM_T_82540) {
10026 reg |= EECD_SK;
10027 CSR_WRITE(sc, WMREG_EECD, reg);
10028 reg &= ~EECD_SK;
10029 CSR_WRITE(sc, WMREG_EECD, reg);
10030 CSR_WRITE_FLUSH(sc);
10031 delay(2);
10032 }
10033 /* XXX: end of workaround */
10034
10035 /* Set CHIP SELECT. */
10036 reg |= EECD_CS;
10037 CSR_WRITE(sc, WMREG_EECD, reg);
10038 CSR_WRITE_FLUSH(sc);
10039 delay(2);
10040
10041 /* Shift in the READ command. */
10042 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
10043
10044 /* Shift in address. */
10045 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
10046
10047 /* Shift out the data. */
10048 wm_eeprom_recvbits(sc, &val, 16);
10049 data[i] = val & 0xffff;
10050
10051 /* Clear CHIP SELECT. */
10052 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
10053 CSR_WRITE(sc, WMREG_EECD, reg);
10054 CSR_WRITE_FLUSH(sc);
10055 delay(2);
10056 }
10057
10058 return 0;
10059 }
10060
10061 /* SPI */
10062
10063 /*
10064 * Set SPI and FLASH related information from the EECD register.
10065 * For 82541 and 82547, the word size is taken from EEPROM.
10066 */
10067 static int
10068 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
10069 {
10070 int size;
10071 uint32_t reg;
10072 uint16_t data;
10073
10074 reg = CSR_READ(sc, WMREG_EECD);
10075 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
10076
10077 /* Read the size of NVM from EECD by default */
10078 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10079 switch (sc->sc_type) {
10080 case WM_T_82541:
10081 case WM_T_82541_2:
10082 case WM_T_82547:
10083 case WM_T_82547_2:
10084 /* Set dummy value to access EEPROM */
10085 sc->sc_nvm_wordsize = 64;
10086 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
10087 reg = data;
10088 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10089 if (size == 0)
10090 size = 6; /* 64 word size */
10091 else
10092 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
10093 break;
10094 case WM_T_80003:
10095 case WM_T_82571:
10096 case WM_T_82572:
10097 case WM_T_82573: /* SPI case */
10098 case WM_T_82574: /* SPI case */
10099 case WM_T_82583: /* SPI case */
10100 size += NVM_WORD_SIZE_BASE_SHIFT;
10101 if (size > 14)
10102 size = 14;
10103 break;
10104 case WM_T_82575:
10105 case WM_T_82576:
10106 case WM_T_82580:
10107 case WM_T_I350:
10108 case WM_T_I354:
10109 case WM_T_I210:
10110 case WM_T_I211:
10111 size += NVM_WORD_SIZE_BASE_SHIFT;
10112 if (size > 15)
10113 size = 15;
10114 break;
10115 default:
10116 aprint_error_dev(sc->sc_dev,
10117 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10118 return -1;
10119 break;
10120 }
10121
10122 sc->sc_nvm_wordsize = 1 << size;
10123
10124 return 0;
10125 }
10126
10127 /*
10128 * wm_nvm_ready_spi:
10129 *
10130 * Wait for a SPI EEPROM to be ready for commands.
10131 */
10132 static int
10133 wm_nvm_ready_spi(struct wm_softc *sc)
10134 {
10135 uint32_t val;
10136 int usec;
10137
10138 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10139 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10140 wm_eeprom_recvbits(sc, &val, 8);
10141 if ((val & SPI_SR_RDY) == 0)
10142 break;
10143 }
10144 if (usec >= SPI_MAX_RETRIES) {
10145 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10146 return 1;
10147 }
10148 return 0;
10149 }
10150
10151 /*
10152 * wm_nvm_read_spi:
10153 *
10154 * Read a work from the EEPROM using the SPI protocol.
10155 */
10156 static int
10157 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10158 {
10159 uint32_t reg, val;
10160 int i;
10161 uint8_t opc;
10162
10163 /* Clear SK and CS. */
10164 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10165 CSR_WRITE(sc, WMREG_EECD, reg);
10166 CSR_WRITE_FLUSH(sc);
10167 delay(2);
10168
10169 if (wm_nvm_ready_spi(sc))
10170 return 1;
10171
10172 /* Toggle CS to flush commands. */
10173 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10174 CSR_WRITE_FLUSH(sc);
10175 delay(2);
10176 CSR_WRITE(sc, WMREG_EECD, reg);
10177 CSR_WRITE_FLUSH(sc);
10178 delay(2);
10179
10180 opc = SPI_OPC_READ;
10181 if (sc->sc_nvm_addrbits == 8 && word >= 128)
10182 opc |= SPI_OPC_A8;
10183
10184 wm_eeprom_sendbits(sc, opc, 8);
10185 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10186
10187 for (i = 0; i < wordcnt; i++) {
10188 wm_eeprom_recvbits(sc, &val, 16);
10189 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10190 }
10191
10192 /* Raise CS and clear SK. */
10193 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10194 CSR_WRITE(sc, WMREG_EECD, reg);
10195 CSR_WRITE_FLUSH(sc);
10196 delay(2);
10197
10198 return 0;
10199 }
10200
10201 /* Using with EERD */
10202
10203 static int
10204 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10205 {
10206 uint32_t attempts = 100000;
10207 uint32_t i, reg = 0;
10208 int32_t done = -1;
10209
10210 for (i = 0; i < attempts; i++) {
10211 reg = CSR_READ(sc, rw);
10212
10213 if (reg & EERD_DONE) {
10214 done = 0;
10215 break;
10216 }
10217 delay(5);
10218 }
10219
10220 return done;
10221 }
10222
10223 static int
10224 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10225 uint16_t *data)
10226 {
10227 int i, eerd = 0;
10228 int error = 0;
10229
10230 for (i = 0; i < wordcnt; i++) {
10231 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10232
10233 CSR_WRITE(sc, WMREG_EERD, eerd);
10234 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10235 if (error != 0)
10236 break;
10237
10238 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10239 }
10240
10241 return error;
10242 }
10243
10244 /* Flash */
10245
10246 static int
10247 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10248 {
10249 uint32_t eecd;
10250 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10251 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10252 uint8_t sig_byte = 0;
10253
10254 switch (sc->sc_type) {
10255 case WM_T_PCH_SPT:
10256 /*
10257 * In SPT, read from the CTRL_EXT reg instead of accessing the
10258 * sector valid bits from the NVM.
10259 */
10260 *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10261 if ((*bank == 0) || (*bank == 1)) {
10262 aprint_error_dev(sc->sc_dev,
10263 "%s: no valid NVM bank present\n",
10264 __func__);
10265 return -1;
10266 } else {
10267 *bank = *bank - 2;
10268 return 0;
10269 }
10270 case WM_T_ICH8:
10271 case WM_T_ICH9:
10272 eecd = CSR_READ(sc, WMREG_EECD);
10273 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10274 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10275 return 0;
10276 }
10277 /* FALLTHROUGH */
10278 default:
10279 /* Default to 0 */
10280 *bank = 0;
10281
10282 /* Check bank 0 */
10283 wm_read_ich8_byte(sc, act_offset, &sig_byte);
10284 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10285 *bank = 0;
10286 return 0;
10287 }
10288
10289 /* Check bank 1 */
10290 wm_read_ich8_byte(sc, act_offset + bank1_offset,
10291 &sig_byte);
10292 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10293 *bank = 1;
10294 return 0;
10295 }
10296 }
10297
10298 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10299 device_xname(sc->sc_dev)));
10300 return -1;
10301 }
10302
10303 /******************************************************************************
10304 * This function does initial flash setup so that a new read/write/erase cycle
10305 * can be started.
10306 *
10307 * sc - The pointer to the hw structure
10308 ****************************************************************************/
10309 static int32_t
10310 wm_ich8_cycle_init(struct wm_softc *sc)
10311 {
10312 uint16_t hsfsts;
10313 int32_t error = 1;
10314 int32_t i = 0;
10315
10316 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10317
10318 /* May be check the Flash Des Valid bit in Hw status */
10319 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10320 return error;
10321 }
10322
10323 /* Clear FCERR in Hw status by writing 1 */
10324 /* Clear DAEL in Hw status by writing a 1 */
10325 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10326
10327 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10328
10329 /*
10330 * Either we should have a hardware SPI cycle in progress bit to check
10331 * against, in order to start a new cycle or FDONE bit should be
10332 * changed in the hardware so that it is 1 after harware reset, which
10333 * can then be used as an indication whether a cycle is in progress or
10334 * has been completed .. we should also have some software semaphore
10335 * mechanism to guard FDONE or the cycle in progress bit so that two
10336 * threads access to those bits can be sequentiallized or a way so that
10337 * 2 threads dont start the cycle at the same time
10338 */
10339
10340 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10341 /*
10342 * There is no cycle running at present, so we can start a
10343 * cycle
10344 */
10345
10346 /* Begin by setting Flash Cycle Done. */
10347 hsfsts |= HSFSTS_DONE;
10348 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10349 error = 0;
10350 } else {
10351 /*
10352 * otherwise poll for sometime so the current cycle has a
10353 * chance to end before giving up.
10354 */
10355 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10356 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10357 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10358 error = 0;
10359 break;
10360 }
10361 delay(1);
10362 }
10363 if (error == 0) {
10364 /*
10365 * Successful in waiting for previous cycle to timeout,
10366 * now set the Flash Cycle Done.
10367 */
10368 hsfsts |= HSFSTS_DONE;
10369 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10370 }
10371 }
10372 return error;
10373 }
10374
10375 /******************************************************************************
10376 * This function starts a flash cycle and waits for its completion
10377 *
10378 * sc - The pointer to the hw structure
10379 ****************************************************************************/
10380 static int32_t
10381 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10382 {
10383 uint16_t hsflctl;
10384 uint16_t hsfsts;
10385 int32_t error = 1;
10386 uint32_t i = 0;
10387
10388 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10389 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10390 hsflctl |= HSFCTL_GO;
10391 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10392
10393 /* Wait till FDONE bit is set to 1 */
10394 do {
10395 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10396 if (hsfsts & HSFSTS_DONE)
10397 break;
10398 delay(1);
10399 i++;
10400 } while (i < timeout);
10401 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10402 error = 0;
10403
10404 return error;
10405 }
10406
10407 /******************************************************************************
10408 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10409 *
10410 * sc - The pointer to the hw structure
10411 * index - The index of the byte or word to read.
10412 * size - Size of data to read, 1=byte 2=word, 4=dword
10413 * data - Pointer to the word to store the value read.
10414 *****************************************************************************/
10415 static int32_t
10416 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10417 uint32_t size, uint32_t *data)
10418 {
10419 uint16_t hsfsts;
10420 uint16_t hsflctl;
10421 uint32_t flash_linear_address;
10422 uint32_t flash_data = 0;
10423 int32_t error = 1;
10424 int32_t count = 0;
10425
10426 if (size < 1 || size > 4 || data == 0x0 ||
10427 index > ICH_FLASH_LINEAR_ADDR_MASK)
10428 return error;
10429
10430 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10431 sc->sc_ich8_flash_base;
10432
10433 do {
10434 delay(1);
10435 /* Steps */
10436 error = wm_ich8_cycle_init(sc);
10437 if (error)
10438 break;
10439
10440 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10441 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10442 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10443 & HSFCTL_BCOUNT_MASK;
10444 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10445 if (sc->sc_type == WM_T_PCH_SPT) {
10446 /*
10447 * In SPT, This register is in Lan memory space, not
10448 * flash. Therefore, only 32 bit access is supported.
10449 */
10450 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10451 (uint32_t)hsflctl);
10452 } else
10453 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10454
10455 /*
10456 * Write the last 24 bits of index into Flash Linear address
10457 * field in Flash Address
10458 */
10459 /* TODO: TBD maybe check the index against the size of flash */
10460
10461 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10462
10463 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10464
10465 /*
10466 * Check if FCERR is set to 1, if set to 1, clear it and try
10467 * the whole sequence a few more times, else read in (shift in)
10468 * the Flash Data0, the order is least significant byte first
10469 * msb to lsb
10470 */
10471 if (error == 0) {
10472 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10473 if (size == 1)
10474 *data = (uint8_t)(flash_data & 0x000000FF);
10475 else if (size == 2)
10476 *data = (uint16_t)(flash_data & 0x0000FFFF);
10477 else if (size == 4)
10478 *data = (uint32_t)flash_data;
10479 break;
10480 } else {
10481 /*
10482 * If we've gotten here, then things are probably
10483 * completely hosed, but if the error condition is
10484 * detected, it won't hurt to give it another try...
10485 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10486 */
10487 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10488 if (hsfsts & HSFSTS_ERR) {
10489 /* Repeat for some time before giving up. */
10490 continue;
10491 } else if ((hsfsts & HSFSTS_DONE) == 0)
10492 break;
10493 }
10494 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10495
10496 return error;
10497 }
10498
10499 /******************************************************************************
10500 * Reads a single byte from the NVM using the ICH8 flash access registers.
10501 *
10502 * sc - pointer to wm_hw structure
10503 * index - The index of the byte to read.
10504 * data - Pointer to a byte to store the value read.
10505 *****************************************************************************/
10506 static int32_t
10507 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10508 {
10509 int32_t status;
10510 uint32_t word = 0;
10511
10512 status = wm_read_ich8_data(sc, index, 1, &word);
10513 if (status == 0)
10514 *data = (uint8_t)word;
10515 else
10516 *data = 0;
10517
10518 return status;
10519 }
10520
10521 /******************************************************************************
10522 * Reads a word from the NVM using the ICH8 flash access registers.
10523 *
10524 * sc - pointer to wm_hw structure
10525 * index - The starting byte index of the word to read.
10526 * data - Pointer to a word to store the value read.
10527 *****************************************************************************/
10528 static int32_t
10529 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10530 {
10531 int32_t status;
10532 uint32_t word = 0;
10533
10534 status = wm_read_ich8_data(sc, index, 2, &word);
10535 if (status == 0)
10536 *data = (uint16_t)word;
10537 else
10538 *data = 0;
10539
10540 return status;
10541 }
10542
10543 /******************************************************************************
10544 * Reads a dword from the NVM using the ICH8 flash access registers.
10545 *
10546 * sc - pointer to wm_hw structure
10547 * index - The starting byte index of the word to read.
10548 * data - Pointer to a word to store the value read.
10549 *****************************************************************************/
10550 static int32_t
10551 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10552 {
10553 int32_t status;
10554
10555 status = wm_read_ich8_data(sc, index, 4, data);
10556 return status;
10557 }
10558
10559 /******************************************************************************
10560 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10561 * register.
10562 *
10563 * sc - Struct containing variables accessed by shared code
10564 * offset - offset of word in the EEPROM to read
10565 * data - word read from the EEPROM
10566 * words - number of words to read
10567 *****************************************************************************/
10568 static int
10569 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10570 {
10571 int32_t error = 0;
10572 uint32_t flash_bank = 0;
10573 uint32_t act_offset = 0;
10574 uint32_t bank_offset = 0;
10575 uint16_t word = 0;
10576 uint16_t i = 0;
10577
10578 /*
10579 * We need to know which is the valid flash bank. In the event
10580 * that we didn't allocate eeprom_shadow_ram, we may not be
10581 * managing flash_bank. So it cannot be trusted and needs
10582 * to be updated with each read.
10583 */
10584 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10585 if (error) {
10586 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10587 device_xname(sc->sc_dev)));
10588 flash_bank = 0;
10589 }
10590
10591 /*
10592 * Adjust offset appropriately if we're on bank 1 - adjust for word
10593 * size
10594 */
10595 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10596
10597 error = wm_get_swfwhw_semaphore(sc);
10598 if (error) {
10599 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10600 __func__);
10601 return error;
10602 }
10603
10604 for (i = 0; i < words; i++) {
10605 /* The NVM part needs a byte offset, hence * 2 */
10606 act_offset = bank_offset + ((offset + i) * 2);
10607 error = wm_read_ich8_word(sc, act_offset, &word);
10608 if (error) {
10609 aprint_error_dev(sc->sc_dev,
10610 "%s: failed to read NVM\n", __func__);
10611 break;
10612 }
10613 data[i] = word;
10614 }
10615
10616 wm_put_swfwhw_semaphore(sc);
10617 return error;
10618 }
10619
10620 /******************************************************************************
10621 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10622 * register.
10623 *
10624 * sc - Struct containing variables accessed by shared code
10625 * offset - offset of word in the EEPROM to read
10626 * data - word read from the EEPROM
10627 * words - number of words to read
10628 *****************************************************************************/
10629 static int
10630 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10631 {
10632 int32_t error = 0;
10633 uint32_t flash_bank = 0;
10634 uint32_t act_offset = 0;
10635 uint32_t bank_offset = 0;
10636 uint32_t dword = 0;
10637 uint16_t i = 0;
10638
10639 /*
10640 * We need to know which is the valid flash bank. In the event
10641 * that we didn't allocate eeprom_shadow_ram, we may not be
10642 * managing flash_bank. So it cannot be trusted and needs
10643 * to be updated with each read.
10644 */
10645 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10646 if (error) {
10647 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10648 device_xname(sc->sc_dev)));
10649 flash_bank = 0;
10650 }
10651
10652 /*
10653 * Adjust offset appropriately if we're on bank 1 - adjust for word
10654 * size
10655 */
10656 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10657
10658 error = wm_get_swfwhw_semaphore(sc);
10659 if (error) {
10660 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10661 __func__);
10662 return error;
10663 }
10664
10665 for (i = 0; i < words; i++) {
10666 /* The NVM part needs a byte offset, hence * 2 */
10667 act_offset = bank_offset + ((offset + i) * 2);
10668 /* but we must read dword aligned, so mask ... */
10669 error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
10670 if (error) {
10671 aprint_error_dev(sc->sc_dev,
10672 "%s: failed to read NVM\n", __func__);
10673 break;
10674 }
10675 /* ... and pick out low or high word */
10676 if ((act_offset & 0x2) == 0)
10677 data[i] = (uint16_t)(dword & 0xFFFF);
10678 else
10679 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
10680 }
10681
10682 wm_put_swfwhw_semaphore(sc);
10683 return error;
10684 }
10685
10686 /* iNVM */
10687
10688 static int
10689 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10690 {
10691 int32_t rv = 0;
10692 uint32_t invm_dword;
10693 uint16_t i;
10694 uint8_t record_type, word_address;
10695
10696 for (i = 0; i < INVM_SIZE; i++) {
10697 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10698 /* Get record type */
10699 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10700 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10701 break;
10702 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10703 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10704 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10705 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10706 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10707 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10708 if (word_address == address) {
10709 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10710 rv = 0;
10711 break;
10712 }
10713 }
10714 }
10715
10716 return rv;
10717 }
10718
10719 static int
10720 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10721 {
10722 int rv = 0;
10723 int i;
10724
10725 for (i = 0; i < words; i++) {
10726 switch (offset + i) {
10727 case NVM_OFF_MACADDR:
10728 case NVM_OFF_MACADDR1:
10729 case NVM_OFF_MACADDR2:
10730 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10731 if (rv != 0) {
10732 data[i] = 0xffff;
10733 rv = -1;
10734 }
10735 break;
10736 case NVM_OFF_CFG2:
10737 rv = wm_nvm_read_word_invm(sc, offset, data);
10738 if (rv != 0) {
10739 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
10740 rv = 0;
10741 }
10742 break;
10743 case NVM_OFF_CFG4:
10744 rv = wm_nvm_read_word_invm(sc, offset, data);
10745 if (rv != 0) {
10746 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
10747 rv = 0;
10748 }
10749 break;
10750 case NVM_OFF_LED_1_CFG:
10751 rv = wm_nvm_read_word_invm(sc, offset, data);
10752 if (rv != 0) {
10753 *data = NVM_LED_1_CFG_DEFAULT_I211;
10754 rv = 0;
10755 }
10756 break;
10757 case NVM_OFF_LED_0_2_CFG:
10758 rv = wm_nvm_read_word_invm(sc, offset, data);
10759 if (rv != 0) {
10760 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
10761 rv = 0;
10762 }
10763 break;
10764 case NVM_OFF_ID_LED_SETTINGS:
10765 rv = wm_nvm_read_word_invm(sc, offset, data);
10766 if (rv != 0) {
10767 *data = ID_LED_RESERVED_FFFF;
10768 rv = 0;
10769 }
10770 break;
10771 default:
10772 DPRINTF(WM_DEBUG_NVM,
10773 ("NVM word 0x%02x is not mapped.\n", offset));
10774 *data = NVM_RESERVED_WORD;
10775 break;
10776 }
10777 }
10778
10779 return rv;
10780 }
10781
10782 /* Lock, detecting NVM type, validate checksum, version and read */
10783
10784 /*
10785 * wm_nvm_acquire:
10786 *
10787 * Perform the EEPROM handshake required on some chips.
10788 */
10789 static int
10790 wm_nvm_acquire(struct wm_softc *sc)
10791 {
10792 uint32_t reg;
10793 int x;
10794 int ret = 0;
10795
10796 /* always success */
10797 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10798 return 0;
10799
10800 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10801 ret = wm_get_swfwhw_semaphore(sc);
10802 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10803 /* This will also do wm_get_swsm_semaphore() if needed */
10804 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10805 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10806 ret = wm_get_swsm_semaphore(sc);
10807 }
10808
10809 if (ret) {
10810 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10811 __func__);
10812 return 1;
10813 }
10814
10815 if (sc->sc_flags & WM_F_LOCK_EECD) {
10816 reg = CSR_READ(sc, WMREG_EECD);
10817
10818 /* Request EEPROM access. */
10819 reg |= EECD_EE_REQ;
10820 CSR_WRITE(sc, WMREG_EECD, reg);
10821
10822 /* ..and wait for it to be granted. */
10823 for (x = 0; x < 1000; x++) {
10824 reg = CSR_READ(sc, WMREG_EECD);
10825 if (reg & EECD_EE_GNT)
10826 break;
10827 delay(5);
10828 }
10829 if ((reg & EECD_EE_GNT) == 0) {
10830 aprint_error_dev(sc->sc_dev,
10831 "could not acquire EEPROM GNT\n");
10832 reg &= ~EECD_EE_REQ;
10833 CSR_WRITE(sc, WMREG_EECD, reg);
10834 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10835 wm_put_swfwhw_semaphore(sc);
10836 if (sc->sc_flags & WM_F_LOCK_SWFW)
10837 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10838 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10839 wm_put_swsm_semaphore(sc);
10840 return 1;
10841 }
10842 }
10843
10844 return 0;
10845 }
10846
10847 /*
10848 * wm_nvm_release:
10849 *
10850 * Release the EEPROM mutex.
10851 */
10852 static void
10853 wm_nvm_release(struct wm_softc *sc)
10854 {
10855 uint32_t reg;
10856
10857 /* always success */
10858 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10859 return;
10860
10861 if (sc->sc_flags & WM_F_LOCK_EECD) {
10862 reg = CSR_READ(sc, WMREG_EECD);
10863 reg &= ~EECD_EE_REQ;
10864 CSR_WRITE(sc, WMREG_EECD, reg);
10865 }
10866
10867 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10868 wm_put_swfwhw_semaphore(sc);
10869 if (sc->sc_flags & WM_F_LOCK_SWFW)
10870 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10871 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10872 wm_put_swsm_semaphore(sc);
10873 }
10874
10875 static int
10876 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10877 {
10878 uint32_t eecd = 0;
10879
10880 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10881 || sc->sc_type == WM_T_82583) {
10882 eecd = CSR_READ(sc, WMREG_EECD);
10883
10884 /* Isolate bits 15 & 16 */
10885 eecd = ((eecd >> 15) & 0x03);
10886
10887 /* If both bits are set, device is Flash type */
10888 if (eecd == 0x03)
10889 return 0;
10890 }
10891 return 1;
10892 }
10893
10894 static int
10895 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10896 {
10897 uint32_t eec;
10898
10899 eec = CSR_READ(sc, WMREG_EEC);
10900 if ((eec & EEC_FLASH_DETECTED) != 0)
10901 return 1;
10902
10903 return 0;
10904 }
10905
10906 /*
10907 * wm_nvm_validate_checksum
10908 *
10909 * The checksum is defined as the sum of the first 64 (16 bit) words.
10910 */
10911 static int
10912 wm_nvm_validate_checksum(struct wm_softc *sc)
10913 {
10914 uint16_t checksum;
10915 uint16_t eeprom_data;
10916 #ifdef WM_DEBUG
10917 uint16_t csum_wordaddr, valid_checksum;
10918 #endif
10919 int i;
10920
10921 checksum = 0;
10922
10923 /* Don't check for I211 */
10924 if (sc->sc_type == WM_T_I211)
10925 return 0;
10926
10927 #ifdef WM_DEBUG
10928 if (sc->sc_type == WM_T_PCH_LPT) {
10929 csum_wordaddr = NVM_OFF_COMPAT;
10930 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10931 } else {
10932 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10933 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10934 }
10935
10936 /* Dump EEPROM image for debug */
10937 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10938 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10939 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10940 /* XXX PCH_SPT? */
10941 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10942 if ((eeprom_data & valid_checksum) == 0) {
10943 DPRINTF(WM_DEBUG_NVM,
10944 ("%s: NVM need to be updated (%04x != %04x)\n",
10945 device_xname(sc->sc_dev), eeprom_data,
10946 valid_checksum));
10947 }
10948 }
10949
10950 if ((wm_debug & WM_DEBUG_NVM) != 0) {
10951 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10952 for (i = 0; i < NVM_SIZE; i++) {
10953 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10954 printf("XXXX ");
10955 else
10956 printf("%04hx ", eeprom_data);
10957 if (i % 8 == 7)
10958 printf("\n");
10959 }
10960 }
10961
10962 #endif /* WM_DEBUG */
10963
10964 for (i = 0; i < NVM_SIZE; i++) {
10965 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10966 return 1;
10967 checksum += eeprom_data;
10968 }
10969
10970 if (checksum != (uint16_t) NVM_CHECKSUM) {
10971 #ifdef WM_DEBUG
10972 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10973 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10974 #endif
10975 }
10976
10977 return 0;
10978 }
10979
10980 static void
10981 wm_nvm_version_invm(struct wm_softc *sc)
10982 {
10983 uint32_t dword;
10984
10985 /*
10986 * Linux's code to decode version is very strange, so we don't
10987 * obey that algorithm and just use word 61 as the document.
10988 * Perhaps it's not perfect though...
10989 *
10990 * Example:
10991 *
10992 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10993 */
10994 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10995 dword = __SHIFTOUT(dword, INVM_VER_1);
10996 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10997 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10998 }
10999
11000 static void
11001 wm_nvm_version(struct wm_softc *sc)
11002 {
11003 uint16_t major, minor, build, patch;
11004 uint16_t uid0, uid1;
11005 uint16_t nvm_data;
11006 uint16_t off;
11007 bool check_version = false;
11008 bool check_optionrom = false;
11009 bool have_build = false;
11010
11011 /*
11012 * Version format:
11013 *
11014 * XYYZ
11015 * X0YZ
11016 * X0YY
11017 *
11018 * Example:
11019 *
11020 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
11021 * 82571 0x50a6 5.10.6?
11022 * 82572 0x506a 5.6.10?
11023 * 82572EI 0x5069 5.6.9?
11024 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
11025 * 0x2013 2.1.3?
11026 * 82583 0x10a0 1.10.0? (document says it's default vaule)
11027 */
11028 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
11029 switch (sc->sc_type) {
11030 case WM_T_82571:
11031 case WM_T_82572:
11032 case WM_T_82574:
11033 case WM_T_82583:
11034 check_version = true;
11035 check_optionrom = true;
11036 have_build = true;
11037 break;
11038 case WM_T_82575:
11039 case WM_T_82576:
11040 case WM_T_82580:
11041 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
11042 check_version = true;
11043 break;
11044 case WM_T_I211:
11045 wm_nvm_version_invm(sc);
11046 goto printver;
11047 case WM_T_I210:
11048 if (!wm_nvm_get_flash_presence_i210(sc)) {
11049 wm_nvm_version_invm(sc);
11050 goto printver;
11051 }
11052 /* FALLTHROUGH */
11053 case WM_T_I350:
11054 case WM_T_I354:
11055 check_version = true;
11056 check_optionrom = true;
11057 break;
11058 default:
11059 return;
11060 }
11061 if (check_version) {
11062 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
11063 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
11064 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
11065 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
11066 build = nvm_data & NVM_BUILD_MASK;
11067 have_build = true;
11068 } else
11069 minor = nvm_data & 0x00ff;
11070
11071 /* Decimal */
11072 minor = (minor / 16) * 10 + (minor % 16);
11073 sc->sc_nvm_ver_major = major;
11074 sc->sc_nvm_ver_minor = minor;
11075
11076 printver:
11077 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
11078 sc->sc_nvm_ver_minor);
11079 if (have_build) {
11080 sc->sc_nvm_ver_build = build;
11081 aprint_verbose(".%d", build);
11082 }
11083 }
11084 if (check_optionrom) {
11085 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
11086 /* Option ROM Version */
11087 if ((off != 0x0000) && (off != 0xffff)) {
11088 off += NVM_COMBO_VER_OFF;
11089 wm_nvm_read(sc, off + 1, 1, &uid1);
11090 wm_nvm_read(sc, off, 1, &uid0);
11091 if ((uid0 != 0) && (uid0 != 0xffff)
11092 && (uid1 != 0) && (uid1 != 0xffff)) {
11093 /* 16bits */
11094 major = uid0 >> 8;
11095 build = (uid0 << 8) | (uid1 >> 8);
11096 patch = uid1 & 0x00ff;
11097 aprint_verbose(", option ROM Version %d.%d.%d",
11098 major, build, patch);
11099 }
11100 }
11101 }
11102
11103 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
11104 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
11105 }
11106
11107 /*
11108 * wm_nvm_read:
11109 *
11110 * Read data from the serial EEPROM.
11111 */
11112 static int
11113 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11114 {
11115 int rv;
11116
11117 if (sc->sc_flags & WM_F_EEPROM_INVALID)
11118 return 1;
11119
11120 if (wm_nvm_acquire(sc))
11121 return 1;
11122
11123 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11124 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11125 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11126 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11127 else if (sc->sc_type == WM_T_PCH_SPT)
11128 rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11129 else if (sc->sc_flags & WM_F_EEPROM_INVM)
11130 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11131 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11132 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11133 else if (sc->sc_flags & WM_F_EEPROM_SPI)
11134 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11135 else
11136 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11137
11138 wm_nvm_release(sc);
11139 return rv;
11140 }
11141
11142 /*
11143 * Hardware semaphores.
11144 * Very complexed...
11145 */
11146
11147 static int
11148 wm_get_swsm_semaphore(struct wm_softc *sc)
11149 {
11150 int32_t timeout;
11151 uint32_t swsm;
11152
11153 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11154 /* Get the SW semaphore. */
11155 timeout = sc->sc_nvm_wordsize + 1;
11156 while (timeout) {
11157 swsm = CSR_READ(sc, WMREG_SWSM);
11158
11159 if ((swsm & SWSM_SMBI) == 0)
11160 break;
11161
11162 delay(50);
11163 timeout--;
11164 }
11165
11166 if (timeout == 0) {
11167 aprint_error_dev(sc->sc_dev,
11168 "could not acquire SWSM SMBI\n");
11169 return 1;
11170 }
11171 }
11172
11173 /* Get the FW semaphore. */
11174 timeout = sc->sc_nvm_wordsize + 1;
11175 while (timeout) {
11176 swsm = CSR_READ(sc, WMREG_SWSM);
11177 swsm |= SWSM_SWESMBI;
11178 CSR_WRITE(sc, WMREG_SWSM, swsm);
11179 /* If we managed to set the bit we got the semaphore. */
11180 swsm = CSR_READ(sc, WMREG_SWSM);
11181 if (swsm & SWSM_SWESMBI)
11182 break;
11183
11184 delay(50);
11185 timeout--;
11186 }
11187
11188 if (timeout == 0) {
11189 aprint_error_dev(sc->sc_dev,
11190 "could not acquire SWSM SWESMBI\n");
11191 /* Release semaphores */
11192 wm_put_swsm_semaphore(sc);
11193 return 1;
11194 }
11195 return 0;
11196 }
11197
11198 static void
11199 wm_put_swsm_semaphore(struct wm_softc *sc)
11200 {
11201 uint32_t swsm;
11202
11203 swsm = CSR_READ(sc, WMREG_SWSM);
11204 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11205 CSR_WRITE(sc, WMREG_SWSM, swsm);
11206 }
11207
11208 static int
11209 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11210 {
11211 uint32_t swfw_sync;
11212 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11213 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11214 int timeout = 200;
11215
11216 for (timeout = 0; timeout < 200; timeout++) {
11217 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11218 if (wm_get_swsm_semaphore(sc)) {
11219 aprint_error_dev(sc->sc_dev,
11220 "%s: failed to get semaphore\n",
11221 __func__);
11222 return 1;
11223 }
11224 }
11225 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11226 if ((swfw_sync & (swmask | fwmask)) == 0) {
11227 swfw_sync |= swmask;
11228 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11229 if (sc->sc_flags & WM_F_LOCK_SWSM)
11230 wm_put_swsm_semaphore(sc);
11231 return 0;
11232 }
11233 if (sc->sc_flags & WM_F_LOCK_SWSM)
11234 wm_put_swsm_semaphore(sc);
11235 delay(5000);
11236 }
11237 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11238 device_xname(sc->sc_dev), mask, swfw_sync);
11239 return 1;
11240 }
11241
11242 static void
11243 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11244 {
11245 uint32_t swfw_sync;
11246
11247 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11248 while (wm_get_swsm_semaphore(sc) != 0)
11249 continue;
11250 }
11251 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11252 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11253 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11254 if (sc->sc_flags & WM_F_LOCK_SWSM)
11255 wm_put_swsm_semaphore(sc);
11256 }
11257
11258 static int
11259 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11260 {
11261 uint32_t ext_ctrl;
11262 int timeout = 200;
11263
11264 for (timeout = 0; timeout < 200; timeout++) {
11265 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11266 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11267 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11268
11269 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11270 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11271 return 0;
11272 delay(5000);
11273 }
11274 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11275 device_xname(sc->sc_dev), ext_ctrl);
11276 return 1;
11277 }
11278
11279 static void
11280 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11281 {
11282 uint32_t ext_ctrl;
11283
11284 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11285 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11286 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11287 }
11288
11289 static int
11290 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11291 {
11292 int i = 0;
11293 uint32_t reg;
11294
11295 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11296 do {
11297 CSR_WRITE(sc, WMREG_EXTCNFCTR,
11298 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11299 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11300 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11301 break;
11302 delay(2*1000);
11303 i++;
11304 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11305
11306 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11307 wm_put_hw_semaphore_82573(sc);
11308 log(LOG_ERR, "%s: Driver can't access the PHY\n",
11309 device_xname(sc->sc_dev));
11310 return -1;
11311 }
11312
11313 return 0;
11314 }
11315
11316 static void
11317 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11318 {
11319 uint32_t reg;
11320
11321 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11322 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11323 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11324 }
11325
11326 /*
11327 * Management mode and power management related subroutines.
11328 * BMC, AMT, suspend/resume and EEE.
11329 */
11330
11331 #ifdef WM_WOL
11332 static int
11333 wm_check_mng_mode(struct wm_softc *sc)
11334 {
11335 int rv;
11336
11337 switch (sc->sc_type) {
11338 case WM_T_ICH8:
11339 case WM_T_ICH9:
11340 case WM_T_ICH10:
11341 case WM_T_PCH:
11342 case WM_T_PCH2:
11343 case WM_T_PCH_LPT:
11344 case WM_T_PCH_SPT:
11345 rv = wm_check_mng_mode_ich8lan(sc);
11346 break;
11347 case WM_T_82574:
11348 case WM_T_82583:
11349 rv = wm_check_mng_mode_82574(sc);
11350 break;
11351 case WM_T_82571:
11352 case WM_T_82572:
11353 case WM_T_82573:
11354 case WM_T_80003:
11355 rv = wm_check_mng_mode_generic(sc);
11356 break;
11357 default:
11358 /* noting to do */
11359 rv = 0;
11360 break;
11361 }
11362
11363 return rv;
11364 }
11365
11366 static int
11367 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11368 {
11369 uint32_t fwsm;
11370
11371 fwsm = CSR_READ(sc, WMREG_FWSM);
11372
11373 if (((fwsm & FWSM_FW_VALID) != 0)
11374 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11375 return 1;
11376
11377 return 0;
11378 }
11379
11380 static int
11381 wm_check_mng_mode_82574(struct wm_softc *sc)
11382 {
11383 uint16_t data;
11384
11385 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11386
11387 if ((data & NVM_CFG2_MNGM_MASK) != 0)
11388 return 1;
11389
11390 return 0;
11391 }
11392
11393 static int
11394 wm_check_mng_mode_generic(struct wm_softc *sc)
11395 {
11396 uint32_t fwsm;
11397
11398 fwsm = CSR_READ(sc, WMREG_FWSM);
11399
11400 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11401 return 1;
11402
11403 return 0;
11404 }
11405 #endif /* WM_WOL */
11406
11407 static int
11408 wm_enable_mng_pass_thru(struct wm_softc *sc)
11409 {
11410 uint32_t manc, fwsm, factps;
11411
11412 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11413 return 0;
11414
11415 manc = CSR_READ(sc, WMREG_MANC);
11416
11417 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11418 device_xname(sc->sc_dev), manc));
11419 if ((manc & MANC_RECV_TCO_EN) == 0)
11420 return 0;
11421
11422 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11423 fwsm = CSR_READ(sc, WMREG_FWSM);
11424 factps = CSR_READ(sc, WMREG_FACTPS);
11425 if (((factps & FACTPS_MNGCG) == 0)
11426 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11427 return 1;
11428 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11429 uint16_t data;
11430
11431 factps = CSR_READ(sc, WMREG_FACTPS);
11432 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11433 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11434 device_xname(sc->sc_dev), factps, data));
11435 if (((factps & FACTPS_MNGCG) == 0)
11436 && ((data & NVM_CFG2_MNGM_MASK)
11437 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11438 return 1;
11439 } else if (((manc & MANC_SMBUS_EN) != 0)
11440 && ((manc & MANC_ASF_EN) == 0))
11441 return 1;
11442
11443 return 0;
11444 }
11445
11446 static bool
11447 wm_phy_resetisblocked(struct wm_softc *sc)
11448 {
11449 bool blocked = false;
11450 uint32_t reg;
11451 int i = 0;
11452
11453 switch (sc->sc_type) {
11454 case WM_T_ICH8:
11455 case WM_T_ICH9:
11456 case WM_T_ICH10:
11457 case WM_T_PCH:
11458 case WM_T_PCH2:
11459 case WM_T_PCH_LPT:
11460 case WM_T_PCH_SPT:
11461 do {
11462 reg = CSR_READ(sc, WMREG_FWSM);
11463 if ((reg & FWSM_RSPCIPHY) == 0) {
11464 blocked = true;
11465 delay(10*1000);
11466 continue;
11467 }
11468 blocked = false;
11469 } while (blocked && (i++ < 10));
11470 return blocked;
11471 break;
11472 case WM_T_82571:
11473 case WM_T_82572:
11474 case WM_T_82573:
11475 case WM_T_82574:
11476 case WM_T_82583:
11477 case WM_T_80003:
11478 reg = CSR_READ(sc, WMREG_MANC);
11479 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11480 return true;
11481 else
11482 return false;
11483 break;
11484 default:
11485 /* no problem */
11486 break;
11487 }
11488
11489 return false;
11490 }
11491
11492 static void
11493 wm_get_hw_control(struct wm_softc *sc)
11494 {
11495 uint32_t reg;
11496
11497 switch (sc->sc_type) {
11498 case WM_T_82573:
11499 reg = CSR_READ(sc, WMREG_SWSM);
11500 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11501 break;
11502 case WM_T_82571:
11503 case WM_T_82572:
11504 case WM_T_82574:
11505 case WM_T_82583:
11506 case WM_T_80003:
11507 case WM_T_ICH8:
11508 case WM_T_ICH9:
11509 case WM_T_ICH10:
11510 case WM_T_PCH:
11511 case WM_T_PCH2:
11512 case WM_T_PCH_LPT:
11513 case WM_T_PCH_SPT:
11514 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11515 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11516 break;
11517 default:
11518 break;
11519 }
11520 }
11521
11522 static void
11523 wm_release_hw_control(struct wm_softc *sc)
11524 {
11525 uint32_t reg;
11526
11527 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11528 return;
11529
11530 if (sc->sc_type == WM_T_82573) {
11531 reg = CSR_READ(sc, WMREG_SWSM);
11532 reg &= ~SWSM_DRV_LOAD;
11533 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11534 } else {
11535 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11536 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11537 }
11538 }
11539
11540 static void
11541 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
11542 {
11543 uint32_t reg;
11544
11545 if (sc->sc_type < WM_T_PCH2)
11546 return;
11547
11548 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11549
11550 if (gate)
11551 reg |= EXTCNFCTR_GATE_PHY_CFG;
11552 else
11553 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11554
11555 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11556 }
11557
11558 static void
11559 wm_smbustopci(struct wm_softc *sc)
11560 {
11561 uint32_t fwsm, reg;
11562
11563 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
11564 wm_gate_hw_phy_config_ich8lan(sc, true);
11565
11566 /* Acquire semaphore */
11567 wm_get_swfwhw_semaphore(sc);
11568
11569 fwsm = CSR_READ(sc, WMREG_FWSM);
11570 if (((fwsm & FWSM_FW_VALID) == 0)
11571 && ((wm_phy_resetisblocked(sc) == false))) {
11572 if (sc->sc_type >= WM_T_PCH_LPT) {
11573 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11574 reg |= CTRL_EXT_FORCE_SMBUS;
11575 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11576 CSR_WRITE_FLUSH(sc);
11577 delay(50*1000);
11578 }
11579
11580 /* Toggle LANPHYPC */
11581 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11582 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11583 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11584 CSR_WRITE_FLUSH(sc);
11585 delay(10);
11586 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11587 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11588 CSR_WRITE_FLUSH(sc);
11589 delay(50*1000);
11590
11591 if (sc->sc_type >= WM_T_PCH_LPT) {
11592 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11593 reg &= ~CTRL_EXT_FORCE_SMBUS;
11594 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11595 }
11596 }
11597
11598 /* Release semaphore */
11599 wm_put_swfwhw_semaphore(sc);
11600
11601 /*
11602 * Ungate automatic PHY configuration by hardware on non-managed 82579
11603 */
11604 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
11605 wm_gate_hw_phy_config_ich8lan(sc, false);
11606 }
11607
11608 static void
11609 wm_init_manageability(struct wm_softc *sc)
11610 {
11611
11612 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11613 device_xname(sc->sc_dev), __func__));
11614 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11615 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11616 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11617
11618 /* Disable hardware interception of ARP */
11619 manc &= ~MANC_ARP_EN;
11620
11621 /* Enable receiving management packets to the host */
11622 if (sc->sc_type >= WM_T_82571) {
11623 manc |= MANC_EN_MNG2HOST;
11624 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11625 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11626 }
11627
11628 CSR_WRITE(sc, WMREG_MANC, manc);
11629 }
11630 }
11631
11632 static void
11633 wm_release_manageability(struct wm_softc *sc)
11634 {
11635
11636 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11637 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11638
11639 manc |= MANC_ARP_EN;
11640 if (sc->sc_type >= WM_T_82571)
11641 manc &= ~MANC_EN_MNG2HOST;
11642
11643 CSR_WRITE(sc, WMREG_MANC, manc);
11644 }
11645 }
11646
11647 static void
11648 wm_get_wakeup(struct wm_softc *sc)
11649 {
11650
11651 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11652 switch (sc->sc_type) {
11653 case WM_T_82573:
11654 case WM_T_82583:
11655 sc->sc_flags |= WM_F_HAS_AMT;
11656 /* FALLTHROUGH */
11657 case WM_T_80003:
11658 case WM_T_82541:
11659 case WM_T_82547:
11660 case WM_T_82571:
11661 case WM_T_82572:
11662 case WM_T_82574:
11663 case WM_T_82575:
11664 case WM_T_82576:
11665 case WM_T_82580:
11666 case WM_T_I350:
11667 case WM_T_I354:
11668 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11669 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11670 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11671 break;
11672 case WM_T_ICH8:
11673 case WM_T_ICH9:
11674 case WM_T_ICH10:
11675 case WM_T_PCH:
11676 case WM_T_PCH2:
11677 case WM_T_PCH_LPT:
11678 case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
11679 sc->sc_flags |= WM_F_HAS_AMT;
11680 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11681 break;
11682 default:
11683 break;
11684 }
11685
11686 /* 1: HAS_MANAGE */
11687 if (wm_enable_mng_pass_thru(sc) != 0)
11688 sc->sc_flags |= WM_F_HAS_MANAGE;
11689
11690 #ifdef WM_DEBUG
11691 printf("\n");
11692 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11693 printf("HAS_AMT,");
11694 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11695 printf("ARC_SUBSYS_VALID,");
11696 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11697 printf("ASF_FIRMWARE_PRES,");
11698 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11699 printf("HAS_MANAGE,");
11700 printf("\n");
11701 #endif
11702 /*
11703 * Note that the WOL flags is set after the resetting of the eeprom
11704 * stuff
11705 */
11706 }
11707
11708 #ifdef WM_WOL
11709 /* WOL in the newer chipset interfaces (pchlan) */
11710 static void
11711 wm_enable_phy_wakeup(struct wm_softc *sc)
11712 {
11713 #if 0
11714 uint16_t preg;
11715
11716 /* Copy MAC RARs to PHY RARs */
11717
11718 /* Copy MAC MTA to PHY MTA */
11719
11720 /* Configure PHY Rx Control register */
11721
11722 /* Enable PHY wakeup in MAC register */
11723
11724 /* Configure and enable PHY wakeup in PHY registers */
11725
11726 /* Activate PHY wakeup */
11727
11728 /* XXX */
11729 #endif
11730 }
11731
11732 /* Power down workaround on D3 */
11733 static void
11734 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11735 {
11736 uint32_t reg;
11737 int i;
11738
11739 for (i = 0; i < 2; i++) {
11740 /* Disable link */
11741 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11742 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11743 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11744
11745 /*
11746 * Call gig speed drop workaround on Gig disable before
11747 * accessing any PHY registers
11748 */
11749 if (sc->sc_type == WM_T_ICH8)
11750 wm_gig_downshift_workaround_ich8lan(sc);
11751
11752 /* Write VR power-down enable */
11753 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11754 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11755 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11756 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11757
11758 /* Read it back and test */
11759 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11760 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11761 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11762 break;
11763
11764 /* Issue PHY reset and repeat at most one more time */
11765 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11766 }
11767 }
11768
11769 static void
11770 wm_enable_wakeup(struct wm_softc *sc)
11771 {
11772 uint32_t reg, pmreg;
11773 pcireg_t pmode;
11774
11775 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11776 &pmreg, NULL) == 0)
11777 return;
11778
11779 /* Advertise the wakeup capability */
11780 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11781 | CTRL_SWDPIN(3));
11782 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11783
11784 /* ICH workaround */
11785 switch (sc->sc_type) {
11786 case WM_T_ICH8:
11787 case WM_T_ICH9:
11788 case WM_T_ICH10:
11789 case WM_T_PCH:
11790 case WM_T_PCH2:
11791 case WM_T_PCH_LPT:
11792 case WM_T_PCH_SPT:
11793 /* Disable gig during WOL */
11794 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11795 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11796 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11797 if (sc->sc_type == WM_T_PCH)
11798 wm_gmii_reset(sc);
11799
11800 /* Power down workaround */
11801 if (sc->sc_phytype == WMPHY_82577) {
11802 struct mii_softc *child;
11803
11804 /* Assume that the PHY is copper */
11805 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11806 if (child->mii_mpd_rev <= 2)
11807 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11808 (768 << 5) | 25, 0x0444); /* magic num */
11809 }
11810 break;
11811 default:
11812 break;
11813 }
11814
11815 /* Keep the laser running on fiber adapters */
11816 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11817 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11818 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11819 reg |= CTRL_EXT_SWDPIN(3);
11820 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11821 }
11822
11823 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11824 #if 0 /* for the multicast packet */
11825 reg |= WUFC_MC;
11826 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11827 #endif
11828
11829 if (sc->sc_type == WM_T_PCH) {
11830 wm_enable_phy_wakeup(sc);
11831 } else {
11832 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11833 CSR_WRITE(sc, WMREG_WUFC, reg);
11834 }
11835
11836 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11837 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11838 || (sc->sc_type == WM_T_PCH2))
11839 && (sc->sc_phytype == WMPHY_IGP_3))
11840 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11841
11842 /* Request PME */
11843 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11844 #if 0
11845 /* Disable WOL */
11846 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11847 #else
11848 /* For WOL */
11849 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11850 #endif
11851 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11852 }
11853 #endif /* WM_WOL */
11854
11855 /* LPLU */
11856
11857 static void
11858 wm_lplu_d0_disable(struct wm_softc *sc)
11859 {
11860 uint32_t reg;
11861
11862 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11863 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
11864 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11865 }
11866
11867 static void
11868 wm_lplu_d0_disable_pch(struct wm_softc *sc)
11869 {
11870 uint32_t reg;
11871
11872 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
11873 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
11874 reg |= HV_OEM_BITS_ANEGNOW;
11875 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
11876 }
11877
11878 /* EEE */
11879
11880 static void
11881 wm_set_eee_i350(struct wm_softc *sc)
11882 {
11883 uint32_t ipcnfg, eeer;
11884
11885 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11886 eeer = CSR_READ(sc, WMREG_EEER);
11887
11888 if ((sc->sc_flags & WM_F_EEE) != 0) {
11889 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11890 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11891 | EEER_LPI_FC);
11892 } else {
11893 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11894 ipcnfg &= ~IPCNFG_10BASE_TE;
11895 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11896 | EEER_LPI_FC);
11897 }
11898
11899 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11900 CSR_WRITE(sc, WMREG_EEER, eeer);
11901 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11902 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11903 }
11904
11905 /*
11906 * Workarounds (mainly PHY related).
11907 * Basically, PHY's workarounds are in the PHY drivers.
11908 */
11909
11910 /* Work-around for 82566 Kumeran PCS lock loss */
11911 static void
11912 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11913 {
11914 #if 0
11915 int miistatus, active, i;
11916 int reg;
11917
11918 miistatus = sc->sc_mii.mii_media_status;
11919
11920 /* If the link is not up, do nothing */
11921 if ((miistatus & IFM_ACTIVE) == 0)
11922 return;
11923
11924 active = sc->sc_mii.mii_media_active;
11925
11926 /* Nothing to do if the link is other than 1Gbps */
11927 if (IFM_SUBTYPE(active) != IFM_1000_T)
11928 return;
11929
11930 for (i = 0; i < 10; i++) {
11931 /* read twice */
11932 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11933 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11934 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
11935 goto out; /* GOOD! */
11936
11937 /* Reset the PHY */
11938 wm_gmii_reset(sc);
11939 delay(5*1000);
11940 }
11941
11942 /* Disable GigE link negotiation */
11943 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11944 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11945 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11946
11947 /*
11948 * Call gig speed drop workaround on Gig disable before accessing
11949 * any PHY registers.
11950 */
11951 wm_gig_downshift_workaround_ich8lan(sc);
11952
11953 out:
11954 return;
11955 #endif
11956 }
11957
11958 /* WOL from S5 stops working */
11959 static void
11960 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11961 {
11962 uint16_t kmrn_reg;
11963
11964 /* Only for igp3 */
11965 if (sc->sc_phytype == WMPHY_IGP_3) {
11966 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11967 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11968 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11969 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11970 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11971 }
11972 }
11973
11974 /*
11975 * Workaround for pch's PHYs
11976 * XXX should be moved to new PHY driver?
11977 */
11978 static void
11979 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11980 {
11981 if (sc->sc_phytype == WMPHY_82577)
11982 wm_set_mdio_slow_mode_hv(sc);
11983
11984 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11985
11986 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11987
11988 /* 82578 */
11989 if (sc->sc_phytype == WMPHY_82578) {
11990 /* PCH rev. < 3 */
11991 if (sc->sc_rev < 3) {
11992 /* XXX 6 bit shift? Why? Is it page2? */
11993 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11994 0x66c0);
11995 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11996 0xffff);
11997 }
11998
11999 /* XXX phy rev. < 2 */
12000 }
12001
12002 /* Select page 0 */
12003
12004 /* XXX acquire semaphore */
12005 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
12006 /* XXX release semaphore */
12007
12008 /*
12009 * Configure the K1 Si workaround during phy reset assuming there is
12010 * link so that it disables K1 if link is in 1Gbps.
12011 */
12012 wm_k1_gig_workaround_hv(sc, 1);
12013 }
12014
12015 static void
12016 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
12017 {
12018
12019 wm_set_mdio_slow_mode_hv(sc);
12020 }
12021
12022 static void
12023 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
12024 {
12025 int k1_enable = sc->sc_nvm_k1_enabled;
12026
12027 /* XXX acquire semaphore */
12028
12029 if (link) {
12030 k1_enable = 0;
12031
12032 /* Link stall fix for link up */
12033 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
12034 } else {
12035 /* Link stall fix for link down */
12036 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
12037 }
12038
12039 wm_configure_k1_ich8lan(sc, k1_enable);
12040
12041 /* XXX release semaphore */
12042 }
12043
12044 static void
12045 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
12046 {
12047 uint32_t reg;
12048
12049 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
12050 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
12051 reg | HV_KMRN_MDIO_SLOW);
12052 }
12053
12054 static void
12055 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
12056 {
12057 uint32_t ctrl, ctrl_ext, tmp;
12058 uint16_t kmrn_reg;
12059
12060 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
12061
12062 if (k1_enable)
12063 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
12064 else
12065 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
12066
12067 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
12068
12069 delay(20);
12070
12071 ctrl = CSR_READ(sc, WMREG_CTRL);
12072 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12073
12074 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
12075 tmp |= CTRL_FRCSPD;
12076
12077 CSR_WRITE(sc, WMREG_CTRL, tmp);
12078 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
12079 CSR_WRITE_FLUSH(sc);
12080 delay(20);
12081
12082 CSR_WRITE(sc, WMREG_CTRL, ctrl);
12083 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12084 CSR_WRITE_FLUSH(sc);
12085 delay(20);
12086 }
12087
12088 /* special case - for 82575 - need to do manual init ... */
12089 static void
12090 wm_reset_init_script_82575(struct wm_softc *sc)
12091 {
12092 /*
12093 * remark: this is untested code - we have no board without EEPROM
12094 * same setup as mentioned int the FreeBSD driver for the i82575
12095 */
12096
12097 /* SerDes configuration via SERDESCTRL */
12098 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
12099 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
12100 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
12101 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
12102
12103 /* CCM configuration via CCMCTL register */
12104 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
12105 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
12106
12107 /* PCIe lanes configuration */
12108 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
12109 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
12110 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
12111 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
12112
12113 /* PCIe PLL Configuration */
12114 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
12115 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
12116 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
12117 }
12118
12119 static void
12120 wm_reset_mdicnfg_82580(struct wm_softc *sc)
12121 {
12122 uint32_t reg;
12123 uint16_t nvmword;
12124 int rv;
12125
12126 if ((sc->sc_flags & WM_F_SGMII) == 0)
12127 return;
12128
12129 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12130 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12131 if (rv != 0) {
12132 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12133 __func__);
12134 return;
12135 }
12136
12137 reg = CSR_READ(sc, WMREG_MDICNFG);
12138 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12139 reg |= MDICNFG_DEST;
12140 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12141 reg |= MDICNFG_COM_MDIO;
12142 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12143 }
12144
12145 /*
12146 * I210 Errata 25 and I211 Errata 10
12147 * Slow System Clock.
12148 */
12149 static void
12150 wm_pll_workaround_i210(struct wm_softc *sc)
12151 {
12152 uint32_t mdicnfg, wuc;
12153 uint32_t reg;
12154 pcireg_t pcireg;
12155 uint32_t pmreg;
12156 uint16_t nvmword, tmp_nvmword;
12157 int phyval;
12158 bool wa_done = false;
12159 int i;
12160
12161 /* Save WUC and MDICNFG registers */
12162 wuc = CSR_READ(sc, WMREG_WUC);
12163 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
12164
12165 reg = mdicnfg & ~MDICNFG_DEST;
12166 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12167
12168 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
12169 nvmword = INVM_DEFAULT_AL;
12170 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
12171
12172 /* Get Power Management cap offset */
12173 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12174 &pmreg, NULL) == 0)
12175 return;
12176 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
12177 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
12178 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
12179
12180 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
12181 break; /* OK */
12182 }
12183
12184 wa_done = true;
12185 /* Directly reset the internal PHY */
12186 reg = CSR_READ(sc, WMREG_CTRL);
12187 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
12188
12189 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12190 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
12191 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12192
12193 CSR_WRITE(sc, WMREG_WUC, 0);
12194 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
12195 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12196
12197 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12198 pmreg + PCI_PMCSR);
12199 pcireg |= PCI_PMCSR_STATE_D3;
12200 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12201 pmreg + PCI_PMCSR, pcireg);
12202 delay(1000);
12203 pcireg &= ~PCI_PMCSR_STATE_D3;
12204 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12205 pmreg + PCI_PMCSR, pcireg);
12206
12207 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
12208 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12209
12210 /* Restore WUC register */
12211 CSR_WRITE(sc, WMREG_WUC, wuc);
12212 }
12213
12214 /* Restore MDICNFG setting */
12215 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
12216 if (wa_done)
12217 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
12218 }
12219