if_wm.c revision 1.413 1 /* $NetBSD: if_wm.c,v 1.413 2016/06/14 09:07:22 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - Disable D0 LPLU on 8257[12356], 82580 and I350.
77 * - TX Multi queue improvement (refine queue selection logic)
78 * - Advanced Receive Descriptor
79 * - EEE (Energy Efficiency Ethernet)
80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM.
83 * - Image Unique ID
84 * - restructure evcnt
85 */
86
87 #include <sys/cdefs.h>
88 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.413 2016/06/14 09:07:22 skrll Exp $");
89
90 #ifdef _KERNEL_OPT
91 #include "opt_net_mpsafe.h"
92 #endif
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/callout.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kmem.h>
100 #include <sys/kernel.h>
101 #include <sys/socket.h>
102 #include <sys/ioctl.h>
103 #include <sys/errno.h>
104 #include <sys/device.h>
105 #include <sys/queue.h>
106 #include <sys/syslog.h>
107 #include <sys/interrupt.h>
108 #include <sys/cpu.h>
109 #include <sys/pcq.h>
110
111 #include <sys/rndsource.h>
112
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117
118 #include <net/bpf.h>
119
120 #include <netinet/in.h> /* XXX for struct ip */
121 #include <netinet/in_systm.h> /* XXX for struct ip */
122 #include <netinet/ip.h> /* XXX for struct ip */
123 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
124 #include <netinet/tcp.h> /* XXX for struct tcphdr */
125
126 #include <sys/bus.h>
127 #include <sys/intr.h>
128 #include <machine/endian.h>
129
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132 #include <dev/mii/miidevs.h>
133 #include <dev/mii/mii_bitbang.h>
134 #include <dev/mii/ikphyreg.h>
135 #include <dev/mii/igphyreg.h>
136 #include <dev/mii/igphyvar.h>
137 #include <dev/mii/inbmphyreg.h>
138
139 #include <dev/pci/pcireg.h>
140 #include <dev/pci/pcivar.h>
141 #include <dev/pci/pcidevs.h>
142
143 #include <dev/pci/if_wmreg.h>
144 #include <dev/pci/if_wmvar.h>
145
146 #ifdef WM_DEBUG
147 #define WM_DEBUG_LINK 0x01
148 #define WM_DEBUG_TX 0x02
149 #define WM_DEBUG_RX 0x04
150 #define WM_DEBUG_GMII 0x08
151 #define WM_DEBUG_MANAGE 0x10
152 #define WM_DEBUG_NVM 0x20
153 #define WM_DEBUG_INIT 0x40
154 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155 | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
156
157 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
158 #else
159 #define DPRINTF(x, y) /* nothing */
160 #endif /* WM_DEBUG */
161
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE 1
164 #endif
165
166 /*
167 * This device driver's max interrupt numbers.
168 */
169 #define WM_MAX_NQUEUEINTR 16
170 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
171
172 /*
173 * Transmit descriptor list size. Due to errata, we can only have
174 * 256 hardware descriptors in the ring on < 82544, but we use 4096
175 * on >= 82544. We tell the upper layers that they can queue a lot
176 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177 * of them at a time.
178 *
179 * We allow up to 256 (!) DMA segments per packet. Pathological packet
180 * chains containing many small mbufs have been observed in zero-copy
181 * situations with jumbo frames.
182 */
183 #define WM_NTXSEGS 256
184 #define WM_IFQUEUELEN 256
185 #define WM_TXQUEUELEN_MAX 64
186 #define WM_TXQUEUELEN_MAX_82547 16
187 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
188 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
189 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
190 #define WM_NTXDESC_82542 256
191 #define WM_NTXDESC_82544 4096
192 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
193 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
194 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197
198 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
199
200 #define WM_TXINTERQSIZE 256
201
202 /*
203 * Receive descriptor list size. We have one Rx buffer for normal
204 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
205 * packet. We allocate 256 receive descriptors, each with a 2k
206 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207 */
208 #define WM_NRXDESC 256
209 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
210 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
211 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
212
213 typedef union txdescs {
214 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217
218 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
219 #define WM_CDRXOFF(x) (sizeof(wiseman_rxdesc_t) * x)
220
221 /*
222 * Software state for transmit jobs.
223 */
224 struct wm_txsoft {
225 struct mbuf *txs_mbuf; /* head of our mbuf chain */
226 bus_dmamap_t txs_dmamap; /* our DMA map */
227 int txs_firstdesc; /* first descriptor in packet */
228 int txs_lastdesc; /* last descriptor in packet */
229 int txs_ndesc; /* # of descriptors used */
230 };
231
232 /*
233 * Software state for receive buffers. Each descriptor gets a
234 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
235 * more than one buffer, we chain them together.
236 */
237 struct wm_rxsoft {
238 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
239 bus_dmamap_t rxs_dmamap; /* our DMA map */
240 };
241
242 #define WM_LINKUP_TIMEOUT 50
243
244 static uint16_t swfwphysem[] = {
245 SWFW_PHY0_SM,
246 SWFW_PHY1_SM,
247 SWFW_PHY2_SM,
248 SWFW_PHY3_SM
249 };
250
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254
255 struct wm_softc;
256
257 struct wm_txqueue {
258 kmutex_t *txq_lock; /* lock for tx operations */
259
260 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
261
262 /* Software state for the transmit descriptors. */
263 int txq_num; /* must be a power of two */
264 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
265
266 /* TX control data structures. */
267 int txq_ndesc; /* must be a power of two */
268 size_t txq_descsize; /* a tx descriptor size */
269 txdescs_t *txq_descs_u;
270 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
271 bus_dma_segment_t txq_desc_seg; /* control data segment */
272 int txq_desc_rseg; /* real number of control segment */
273 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
274 #define txq_descs txq_descs_u->sctxu_txdescs
275 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
276
277 bus_addr_t txq_tdt_reg; /* offset of TDT register */
278
279 int txq_free; /* number of free Tx descriptors */
280 int txq_next; /* next ready Tx descriptor */
281
282 int txq_sfree; /* number of free Tx jobs */
283 int txq_snext; /* next free Tx job */
284 int txq_sdirty; /* dirty Tx jobs */
285
286 /* These 4 variables are used only on the 82547. */
287 int txq_fifo_size; /* Tx FIFO size */
288 int txq_fifo_head; /* current head of FIFO */
289 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
290 int txq_fifo_stall; /* Tx FIFO is stalled */
291
292 /*
293 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
294 * CPUs. This queue intermediate them without block.
295 */
296 pcq_t *txq_interq;
297
298 /*
299 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
300 * to manage Tx H/W queue's busy flag.
301 */
302 int txq_flags; /* flags for H/W queue, see below */
303 #define WM_TXQ_NO_SPACE 0x1
304
305 /* XXX which event counter is required? */
306 };
307
308 struct wm_rxqueue {
309 kmutex_t *rxq_lock; /* lock for rx operations */
310
311 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
312
313 /* Software state for the receive descriptors. */
314 wiseman_rxdesc_t *rxq_descs;
315
316 /* RX control data structures. */
317 struct wm_rxsoft rxq_soft[WM_NRXDESC];
318 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
319 bus_dma_segment_t rxq_desc_seg; /* control data segment */
320 int rxq_desc_rseg; /* real number of control segment */
321 size_t rxq_desc_size; /* control data size */
322 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
323
324 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
325
326 int rxq_ptr; /* next ready Rx desc/queue ent */
327 int rxq_discard;
328 int rxq_len;
329 struct mbuf *rxq_head;
330 struct mbuf *rxq_tail;
331 struct mbuf **rxq_tailp;
332
333 /* XXX which event counter is required? */
334 };
335
336 struct wm_queue {
337 int wmq_id; /* index of transmit and receive queues */
338 int wmq_intr_idx; /* index of MSI-X tables */
339
340 struct wm_txqueue wmq_txq;
341 struct wm_rxqueue wmq_rxq;
342 };
343
344 /*
345 * Software state per device.
346 */
347 struct wm_softc {
348 device_t sc_dev; /* generic device information */
349 bus_space_tag_t sc_st; /* bus space tag */
350 bus_space_handle_t sc_sh; /* bus space handle */
351 bus_size_t sc_ss; /* bus space size */
352 bus_space_tag_t sc_iot; /* I/O space tag */
353 bus_space_handle_t sc_ioh; /* I/O space handle */
354 bus_size_t sc_ios; /* I/O space size */
355 bus_space_tag_t sc_flasht; /* flash registers space tag */
356 bus_space_handle_t sc_flashh; /* flash registers space handle */
357 bus_size_t sc_flashs; /* flash registers space size */
358 off_t sc_flashreg_offset; /*
359 * offset to flash registers from
360 * start of BAR
361 */
362 bus_dma_tag_t sc_dmat; /* bus DMA tag */
363
364 struct ethercom sc_ethercom; /* ethernet common data */
365 struct mii_data sc_mii; /* MII/media information */
366
367 pci_chipset_tag_t sc_pc;
368 pcitag_t sc_pcitag;
369 int sc_bus_speed; /* PCI/PCIX bus speed */
370 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
371
372 uint16_t sc_pcidevid; /* PCI device ID */
373 wm_chip_type sc_type; /* MAC type */
374 int sc_rev; /* MAC revision */
375 wm_phy_type sc_phytype; /* PHY type */
376 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
377 #define WM_MEDIATYPE_UNKNOWN 0x00
378 #define WM_MEDIATYPE_FIBER 0x01
379 #define WM_MEDIATYPE_COPPER 0x02
380 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
381 int sc_funcid; /* unit number of the chip (0 to 3) */
382 int sc_flags; /* flags; see below */
383 int sc_if_flags; /* last if_flags */
384 int sc_flowflags; /* 802.3x flow control flags */
385 int sc_align_tweak;
386
387 void *sc_ihs[WM_MAX_NINTR]; /*
388 * interrupt cookie.
389 * legacy and msi use sc_ihs[0].
390 */
391 pci_intr_handle_t *sc_intrs; /* legacy and msi use sc_intrs[0] */
392 int sc_nintrs; /* number of interrupts */
393
394 int sc_link_intr_idx; /* index of MSI-X tables */
395
396 callout_t sc_tick_ch; /* tick callout */
397 bool sc_stopping;
398
399 int sc_nvm_ver_major;
400 int sc_nvm_ver_minor;
401 int sc_nvm_ver_build;
402 int sc_nvm_addrbits; /* NVM address bits */
403 unsigned int sc_nvm_wordsize; /* NVM word size */
404 int sc_ich8_flash_base;
405 int sc_ich8_flash_bank_size;
406 int sc_nvm_k1_enabled;
407
408 int sc_nqueues;
409 struct wm_queue *sc_queue;
410
411 int sc_affinity_offset;
412
413 #ifdef WM_EVENT_COUNTERS
414 /* Event counters. */
415 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
416 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
417 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
418 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
419 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
420 struct evcnt sc_ev_rxintr; /* Rx interrupts */
421 struct evcnt sc_ev_linkintr; /* Link interrupts */
422
423 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
424 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
425 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
426 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
427 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
428 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
429 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
430 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
431
432 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
433 struct evcnt sc_ev_txdrop; /* Tx packets dropped(too many segs) */
434
435 struct evcnt sc_ev_tu; /* Tx underrun */
436
437 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
438 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
439 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
440 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
441 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
442 #endif /* WM_EVENT_COUNTERS */
443
444 /* This variable are used only on the 82547. */
445 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
446
447 uint32_t sc_ctrl; /* prototype CTRL register */
448 #if 0
449 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
450 #endif
451 uint32_t sc_icr; /* prototype interrupt bits */
452 uint32_t sc_itr; /* prototype intr throttling reg */
453 uint32_t sc_tctl; /* prototype TCTL register */
454 uint32_t sc_rctl; /* prototype RCTL register */
455 uint32_t sc_txcw; /* prototype TXCW register */
456 uint32_t sc_tipg; /* prototype TIPG register */
457 uint32_t sc_fcrtl; /* prototype FCRTL register */
458 uint32_t sc_pba; /* prototype PBA register */
459
460 int sc_tbi_linkup; /* TBI link status */
461 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
462 int sc_tbi_serdes_ticks; /* tbi ticks */
463
464 int sc_mchash_type; /* multicast filter offset */
465
466 krndsource_t rnd_source; /* random source */
467
468 kmutex_t *sc_core_lock; /* lock for softc operations */
469
470 struct if_percpuq *sc_ipq; /* softint-based input queues */
471 };
472
473 #define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
474 #define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
475 #define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
476
477 #ifdef WM_MPSAFE
478 #define CALLOUT_FLAGS CALLOUT_MPSAFE
479 #else
480 #define CALLOUT_FLAGS 0
481 #endif
482
483 #define WM_RXCHAIN_RESET(rxq) \
484 do { \
485 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
486 *(rxq)->rxq_tailp = NULL; \
487 (rxq)->rxq_len = 0; \
488 } while (/*CONSTCOND*/0)
489
490 #define WM_RXCHAIN_LINK(rxq, m) \
491 do { \
492 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
493 (rxq)->rxq_tailp = &(m)->m_next; \
494 } while (/*CONSTCOND*/0)
495
496 #ifdef WM_EVENT_COUNTERS
497 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
498 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
499 #else
500 #define WM_EVCNT_INCR(ev) /* nothing */
501 #define WM_EVCNT_ADD(ev, val) /* nothing */
502 #endif
503
504 #define CSR_READ(sc, reg) \
505 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
506 #define CSR_WRITE(sc, reg, val) \
507 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
508 #define CSR_WRITE_FLUSH(sc) \
509 (void) CSR_READ((sc), WMREG_STATUS)
510
511 #define ICH8_FLASH_READ32(sc, reg) \
512 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
513 (reg) + sc->sc_flashreg_offset)
514 #define ICH8_FLASH_WRITE32(sc, reg, data) \
515 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
516 (reg) + sc->sc_flashreg_offset, (data))
517
518 #define ICH8_FLASH_READ16(sc, reg) \
519 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
520 (reg) + sc->sc_flashreg_offset)
521 #define ICH8_FLASH_WRITE16(sc, reg, data) \
522 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
523 (reg) + sc->sc_flashreg_offset, (data))
524
525 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
526 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
527
528 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
529 #define WM_CDTXADDR_HI(txq, x) \
530 (sizeof(bus_addr_t) == 8 ? \
531 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
532
533 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
534 #define WM_CDRXADDR_HI(rxq, x) \
535 (sizeof(bus_addr_t) == 8 ? \
536 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
537
538 /*
539 * Register read/write functions.
540 * Other than CSR_{READ|WRITE}().
541 */
542 #if 0
543 static inline uint32_t wm_io_read(struct wm_softc *, int);
544 #endif
545 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
546 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
547 uint32_t, uint32_t);
548 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
549
550 /*
551 * Descriptor sync/init functions.
552 */
553 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
554 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
555 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
556
557 /*
558 * Device driver interface functions and commonly used functions.
559 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
560 */
561 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
562 static int wm_match(device_t, cfdata_t, void *);
563 static void wm_attach(device_t, device_t, void *);
564 static int wm_detach(device_t, int);
565 static bool wm_suspend(device_t, const pmf_qual_t *);
566 static bool wm_resume(device_t, const pmf_qual_t *);
567 static void wm_watchdog(struct ifnet *);
568 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
569 static void wm_tick(void *);
570 static int wm_ifflags_cb(struct ethercom *);
571 static int wm_ioctl(struct ifnet *, u_long, void *);
572 /* MAC address related */
573 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
574 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
575 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
576 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
577 static void wm_set_filter(struct wm_softc *);
578 /* Reset and init related */
579 static void wm_set_vlan(struct wm_softc *);
580 static void wm_set_pcie_completion_timeout(struct wm_softc *);
581 static void wm_get_auto_rd_done(struct wm_softc *);
582 static void wm_lan_init_done(struct wm_softc *);
583 static void wm_get_cfg_done(struct wm_softc *);
584 static void wm_initialize_hardware_bits(struct wm_softc *);
585 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
586 static void wm_reset(struct wm_softc *);
587 static int wm_add_rxbuf(struct wm_rxqueue *, int);
588 static void wm_rxdrain(struct wm_rxqueue *);
589 static void wm_rss_getkey(uint8_t *);
590 static void wm_init_rss(struct wm_softc *);
591 static void wm_adjust_qnum(struct wm_softc *, int);
592 static int wm_setup_legacy(struct wm_softc *);
593 static int wm_setup_msix(struct wm_softc *);
594 static int wm_init(struct ifnet *);
595 static int wm_init_locked(struct ifnet *);
596 static void wm_stop(struct ifnet *, int);
597 static void wm_stop_locked(struct ifnet *, int);
598 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
599 static void wm_82547_txfifo_stall(void *);
600 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
601 /* DMA related */
602 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
603 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
604 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
605 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
606 struct wm_txqueue *);
607 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
608 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
609 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
610 struct wm_rxqueue *);
611 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
612 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
613 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
614 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
615 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
616 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
617 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
618 struct wm_txqueue *);
619 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
620 struct wm_rxqueue *);
621 static int wm_alloc_txrx_queues(struct wm_softc *);
622 static void wm_free_txrx_queues(struct wm_softc *);
623 static int wm_init_txrx_queues(struct wm_softc *);
624 /* Start */
625 static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
626 uint32_t *, uint8_t *);
627 static void wm_start(struct ifnet *);
628 static void wm_start_locked(struct ifnet *);
629 static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
630 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
631 static void wm_nq_start(struct ifnet *);
632 static void wm_nq_start_locked(struct ifnet *);
633 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
634 static inline int wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
635 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
636 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
637 /* Interrupt */
638 static int wm_txeof(struct wm_softc *, struct wm_txqueue *);
639 static void wm_rxeof(struct wm_rxqueue *);
640 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
641 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
642 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
643 static void wm_linkintr(struct wm_softc *, uint32_t);
644 static int wm_intr_legacy(void *);
645 static int wm_txrxintr_msix(void *);
646 static int wm_linkintr_msix(void *);
647
648 /*
649 * Media related.
650 * GMII, SGMII, TBI, SERDES and SFP.
651 */
652 /* Common */
653 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
654 /* GMII related */
655 static void wm_gmii_reset(struct wm_softc *);
656 static int wm_get_phy_id_82575(struct wm_softc *);
657 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
658 static int wm_gmii_mediachange(struct ifnet *);
659 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
660 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
661 static uint32_t wm_i82543_mii_recvbits(struct wm_softc *);
662 static int wm_gmii_i82543_readreg(device_t, int, int);
663 static void wm_gmii_i82543_writereg(device_t, int, int, int);
664 static int wm_gmii_i82544_readreg(device_t, int, int);
665 static void wm_gmii_i82544_writereg(device_t, int, int, int);
666 static int wm_gmii_i80003_readreg(device_t, int, int);
667 static void wm_gmii_i80003_writereg(device_t, int, int, int);
668 static int wm_gmii_bm_readreg(device_t, int, int);
669 static void wm_gmii_bm_writereg(device_t, int, int, int);
670 static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
671 static int wm_gmii_hv_readreg(device_t, int, int);
672 static void wm_gmii_hv_writereg(device_t, int, int, int);
673 static int wm_gmii_82580_readreg(device_t, int, int);
674 static void wm_gmii_82580_writereg(device_t, int, int, int);
675 static int wm_gmii_gs40g_readreg(device_t, int, int);
676 static void wm_gmii_gs40g_writereg(device_t, int, int, int);
677 static void wm_gmii_statchg(struct ifnet *);
678 static int wm_kmrn_readreg(struct wm_softc *, int);
679 static void wm_kmrn_writereg(struct wm_softc *, int, int);
680 /* SGMII */
681 static bool wm_sgmii_uses_mdio(struct wm_softc *);
682 static int wm_sgmii_readreg(device_t, int, int);
683 static void wm_sgmii_writereg(device_t, int, int, int);
684 /* TBI related */
685 static void wm_tbi_mediainit(struct wm_softc *);
686 static int wm_tbi_mediachange(struct ifnet *);
687 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
688 static int wm_check_for_link(struct wm_softc *);
689 static void wm_tbi_tick(struct wm_softc *);
690 /* SERDES related */
691 static void wm_serdes_power_up_link_82575(struct wm_softc *);
692 static int wm_serdes_mediachange(struct ifnet *);
693 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
694 static void wm_serdes_tick(struct wm_softc *);
695 /* SFP related */
696 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
697 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
698
699 /*
700 * NVM related.
701 * Microwire, SPI (w/wo EERD) and Flash.
702 */
703 /* Misc functions */
704 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
705 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
706 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
707 /* Microwire */
708 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
709 /* SPI */
710 static int wm_nvm_ready_spi(struct wm_softc *);
711 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
712 /* Using with EERD */
713 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
714 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
715 /* Flash */
716 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
717 unsigned int *);
718 static int32_t wm_ich8_cycle_init(struct wm_softc *);
719 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
720 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
721 uint32_t *);
722 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
723 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
724 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
725 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
726 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
727 /* iNVM */
728 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
729 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
730 /* Lock, detecting NVM type, validate checksum and read */
731 static int wm_nvm_acquire(struct wm_softc *);
732 static void wm_nvm_release(struct wm_softc *);
733 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
734 static int wm_nvm_get_flash_presence_i210(struct wm_softc *);
735 static int wm_nvm_validate_checksum(struct wm_softc *);
736 static void wm_nvm_version_invm(struct wm_softc *);
737 static void wm_nvm_version(struct wm_softc *);
738 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
739
740 /*
741 * Hardware semaphores.
742 * Very complexed...
743 */
744 static int wm_get_swsm_semaphore(struct wm_softc *);
745 static void wm_put_swsm_semaphore(struct wm_softc *);
746 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
747 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
748 static int wm_get_swfwhw_semaphore(struct wm_softc *);
749 static void wm_put_swfwhw_semaphore(struct wm_softc *);
750 static int wm_get_hw_semaphore_82573(struct wm_softc *);
751 static void wm_put_hw_semaphore_82573(struct wm_softc *);
752
753 /*
754 * Management mode and power management related subroutines.
755 * BMC, AMT, suspend/resume and EEE.
756 */
757 #ifdef WM_WOL
758 static int wm_check_mng_mode(struct wm_softc *);
759 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
760 static int wm_check_mng_mode_82574(struct wm_softc *);
761 static int wm_check_mng_mode_generic(struct wm_softc *);
762 #endif
763 static int wm_enable_mng_pass_thru(struct wm_softc *);
764 static bool wm_phy_resetisblocked(struct wm_softc *);
765 static void wm_get_hw_control(struct wm_softc *);
766 static void wm_release_hw_control(struct wm_softc *);
767 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
768 static void wm_smbustopci(struct wm_softc *);
769 static void wm_init_manageability(struct wm_softc *);
770 static void wm_release_manageability(struct wm_softc *);
771 static void wm_get_wakeup(struct wm_softc *);
772 #ifdef WM_WOL
773 static void wm_enable_phy_wakeup(struct wm_softc *);
774 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
775 static void wm_enable_wakeup(struct wm_softc *);
776 #endif
777 /* LPLU (Low Power Link Up) */
778 static void wm_lplu_d0_disable(struct wm_softc *);
779 static void wm_lplu_d0_disable_pch(struct wm_softc *);
780 /* EEE */
781 static void wm_set_eee_i350(struct wm_softc *);
782
783 /*
784 * Workarounds (mainly PHY related).
785 * Basically, PHY's workarounds are in the PHY drivers.
786 */
787 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
788 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
789 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
790 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *);
791 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
792 static void wm_set_mdio_slow_mode_hv(struct wm_softc *);
793 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
794 static void wm_reset_init_script_82575(struct wm_softc *);
795 static void wm_reset_mdicnfg_82580(struct wm_softc *);
796 static void wm_pll_workaround_i210(struct wm_softc *);
797
798 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
799 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
800
801 /*
802 * Devices supported by this driver.
803 */
804 static const struct wm_product {
805 pci_vendor_id_t wmp_vendor;
806 pci_product_id_t wmp_product;
807 const char *wmp_name;
808 wm_chip_type wmp_type;
809 uint32_t wmp_flags;
810 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
811 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
812 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
813 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
814 #define WMP_MEDIATYPE(x) ((x) & 0x03)
815 } wm_products[] = {
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
817 "Intel i82542 1000BASE-X Ethernet",
818 WM_T_82542_2_1, WMP_F_FIBER },
819
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
821 "Intel i82543GC 1000BASE-X Ethernet",
822 WM_T_82543, WMP_F_FIBER },
823
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
825 "Intel i82543GC 1000BASE-T Ethernet",
826 WM_T_82543, WMP_F_COPPER },
827
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
829 "Intel i82544EI 1000BASE-T Ethernet",
830 WM_T_82544, WMP_F_COPPER },
831
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
833 "Intel i82544EI 1000BASE-X Ethernet",
834 WM_T_82544, WMP_F_FIBER },
835
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
837 "Intel i82544GC 1000BASE-T Ethernet",
838 WM_T_82544, WMP_F_COPPER },
839
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
841 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
842 WM_T_82544, WMP_F_COPPER },
843
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
845 "Intel i82540EM 1000BASE-T Ethernet",
846 WM_T_82540, WMP_F_COPPER },
847
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
849 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
850 WM_T_82540, WMP_F_COPPER },
851
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
853 "Intel i82540EP 1000BASE-T Ethernet",
854 WM_T_82540, WMP_F_COPPER },
855
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
857 "Intel i82540EP 1000BASE-T Ethernet",
858 WM_T_82540, WMP_F_COPPER },
859
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
861 "Intel i82540EP 1000BASE-T Ethernet",
862 WM_T_82540, WMP_F_COPPER },
863
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
865 "Intel i82545EM 1000BASE-T Ethernet",
866 WM_T_82545, WMP_F_COPPER },
867
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
869 "Intel i82545GM 1000BASE-T Ethernet",
870 WM_T_82545_3, WMP_F_COPPER },
871
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
873 "Intel i82545GM 1000BASE-X Ethernet",
874 WM_T_82545_3, WMP_F_FIBER },
875
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
877 "Intel i82545GM Gigabit Ethernet (SERDES)",
878 WM_T_82545_3, WMP_F_SERDES },
879
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
881 "Intel i82546EB 1000BASE-T Ethernet",
882 WM_T_82546, WMP_F_COPPER },
883
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
885 "Intel i82546EB 1000BASE-T Ethernet",
886 WM_T_82546, WMP_F_COPPER },
887
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
889 "Intel i82545EM 1000BASE-X Ethernet",
890 WM_T_82545, WMP_F_FIBER },
891
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
893 "Intel i82546EB 1000BASE-X Ethernet",
894 WM_T_82546, WMP_F_FIBER },
895
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
897 "Intel i82546GB 1000BASE-T Ethernet",
898 WM_T_82546_3, WMP_F_COPPER },
899
900 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
901 "Intel i82546GB 1000BASE-X Ethernet",
902 WM_T_82546_3, WMP_F_FIBER },
903
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
905 "Intel i82546GB Gigabit Ethernet (SERDES)",
906 WM_T_82546_3, WMP_F_SERDES },
907
908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
909 "i82546GB quad-port Gigabit Ethernet",
910 WM_T_82546_3, WMP_F_COPPER },
911
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
913 "i82546GB quad-port Gigabit Ethernet (KSP3)",
914 WM_T_82546_3, WMP_F_COPPER },
915
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
917 "Intel PRO/1000MT (82546GB)",
918 WM_T_82546_3, WMP_F_COPPER },
919
920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
921 "Intel i82541EI 1000BASE-T Ethernet",
922 WM_T_82541, WMP_F_COPPER },
923
924 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
925 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
926 WM_T_82541, WMP_F_COPPER },
927
928 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
929 "Intel i82541EI Mobile 1000BASE-T Ethernet",
930 WM_T_82541, WMP_F_COPPER },
931
932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
933 "Intel i82541ER 1000BASE-T Ethernet",
934 WM_T_82541_2, WMP_F_COPPER },
935
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
937 "Intel i82541GI 1000BASE-T Ethernet",
938 WM_T_82541_2, WMP_F_COPPER },
939
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
941 "Intel i82541GI Mobile 1000BASE-T Ethernet",
942 WM_T_82541_2, WMP_F_COPPER },
943
944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
945 "Intel i82541PI 1000BASE-T Ethernet",
946 WM_T_82541_2, WMP_F_COPPER },
947
948 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
949 "Intel i82547EI 1000BASE-T Ethernet",
950 WM_T_82547, WMP_F_COPPER },
951
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
953 "Intel i82547EI Mobile 1000BASE-T Ethernet",
954 WM_T_82547, WMP_F_COPPER },
955
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
957 "Intel i82547GI 1000BASE-T Ethernet",
958 WM_T_82547_2, WMP_F_COPPER },
959
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
961 "Intel PRO/1000 PT (82571EB)",
962 WM_T_82571, WMP_F_COPPER },
963
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
965 "Intel PRO/1000 PF (82571EB)",
966 WM_T_82571, WMP_F_FIBER },
967
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
969 "Intel PRO/1000 PB (82571EB)",
970 WM_T_82571, WMP_F_SERDES },
971
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
973 "Intel PRO/1000 QT (82571EB)",
974 WM_T_82571, WMP_F_COPPER },
975
976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
977 "Intel PRO/1000 PT Quad Port Server Adapter",
978 WM_T_82571, WMP_F_COPPER, },
979
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
981 "Intel Gigabit PT Quad Port Server ExpressModule",
982 WM_T_82571, WMP_F_COPPER, },
983
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
985 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
986 WM_T_82571, WMP_F_SERDES, },
987
988 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
989 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
990 WM_T_82571, WMP_F_SERDES, },
991
992 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
993 "Intel 82571EB Quad 1000baseX Ethernet",
994 WM_T_82571, WMP_F_FIBER, },
995
996 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
997 "Intel i82572EI 1000baseT Ethernet",
998 WM_T_82572, WMP_F_COPPER },
999
1000 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1001 "Intel i82572EI 1000baseX Ethernet",
1002 WM_T_82572, WMP_F_FIBER },
1003
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1005 "Intel i82572EI Gigabit Ethernet (SERDES)",
1006 WM_T_82572, WMP_F_SERDES },
1007
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1009 "Intel i82572EI 1000baseT Ethernet",
1010 WM_T_82572, WMP_F_COPPER },
1011
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1013 "Intel i82573E",
1014 WM_T_82573, WMP_F_COPPER },
1015
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1017 "Intel i82573E IAMT",
1018 WM_T_82573, WMP_F_COPPER },
1019
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1021 "Intel i82573L Gigabit Ethernet",
1022 WM_T_82573, WMP_F_COPPER },
1023
1024 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1025 "Intel i82574L",
1026 WM_T_82574, WMP_F_COPPER },
1027
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1029 "Intel i82574L",
1030 WM_T_82574, WMP_F_COPPER },
1031
1032 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1033 "Intel i82583V",
1034 WM_T_82583, WMP_F_COPPER },
1035
1036 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1037 "i80003 dual 1000baseT Ethernet",
1038 WM_T_80003, WMP_F_COPPER },
1039
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1041 "i80003 dual 1000baseX Ethernet",
1042 WM_T_80003, WMP_F_COPPER },
1043
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1045 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1046 WM_T_80003, WMP_F_SERDES },
1047
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1049 "Intel i80003 1000baseT Ethernet",
1050 WM_T_80003, WMP_F_COPPER },
1051
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1053 "Intel i80003 Gigabit Ethernet (SERDES)",
1054 WM_T_80003, WMP_F_SERDES },
1055
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1057 "Intel i82801H (M_AMT) LAN Controller",
1058 WM_T_ICH8, WMP_F_COPPER },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1060 "Intel i82801H (AMT) LAN Controller",
1061 WM_T_ICH8, WMP_F_COPPER },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1063 "Intel i82801H LAN Controller",
1064 WM_T_ICH8, WMP_F_COPPER },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1066 "Intel i82801H (IFE) LAN Controller",
1067 WM_T_ICH8, WMP_F_COPPER },
1068 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1069 "Intel i82801H (M) LAN Controller",
1070 WM_T_ICH8, WMP_F_COPPER },
1071 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1072 "Intel i82801H IFE (GT) LAN Controller",
1073 WM_T_ICH8, WMP_F_COPPER },
1074 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1075 "Intel i82801H IFE (G) LAN Controller",
1076 WM_T_ICH8, WMP_F_COPPER },
1077 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1078 "82801I (AMT) LAN Controller",
1079 WM_T_ICH9, WMP_F_COPPER },
1080 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1081 "82801I LAN Controller",
1082 WM_T_ICH9, WMP_F_COPPER },
1083 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1084 "82801I (G) LAN Controller",
1085 WM_T_ICH9, WMP_F_COPPER },
1086 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1087 "82801I (GT) LAN Controller",
1088 WM_T_ICH9, WMP_F_COPPER },
1089 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1090 "82801I (C) LAN Controller",
1091 WM_T_ICH9, WMP_F_COPPER },
1092 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1093 "82801I mobile LAN Controller",
1094 WM_T_ICH9, WMP_F_COPPER },
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1096 "82801I mobile (V) LAN Controller",
1097 WM_T_ICH9, WMP_F_COPPER },
1098 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1099 "82801I mobile (AMT) LAN Controller",
1100 WM_T_ICH9, WMP_F_COPPER },
1101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1102 "82567LM-4 LAN Controller",
1103 WM_T_ICH9, WMP_F_COPPER },
1104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
1105 "82567V-3 LAN Controller",
1106 WM_T_ICH9, WMP_F_COPPER },
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1108 "82567LM-2 LAN Controller",
1109 WM_T_ICH10, WMP_F_COPPER },
1110 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1111 "82567LF-2 LAN Controller",
1112 WM_T_ICH10, WMP_F_COPPER },
1113 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1114 "82567LM-3 LAN Controller",
1115 WM_T_ICH10, WMP_F_COPPER },
1116 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1117 "82567LF-3 LAN Controller",
1118 WM_T_ICH10, WMP_F_COPPER },
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1120 "82567V-2 LAN Controller",
1121 WM_T_ICH10, WMP_F_COPPER },
1122 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1123 "82567V-3? LAN Controller",
1124 WM_T_ICH10, WMP_F_COPPER },
1125 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1126 "HANKSVILLE LAN Controller",
1127 WM_T_ICH10, WMP_F_COPPER },
1128 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1129 "PCH LAN (82577LM) Controller",
1130 WM_T_PCH, WMP_F_COPPER },
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1132 "PCH LAN (82577LC) Controller",
1133 WM_T_PCH, WMP_F_COPPER },
1134 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1135 "PCH LAN (82578DM) Controller",
1136 WM_T_PCH, WMP_F_COPPER },
1137 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1138 "PCH LAN (82578DC) Controller",
1139 WM_T_PCH, WMP_F_COPPER },
1140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1141 "PCH2 LAN (82579LM) Controller",
1142 WM_T_PCH2, WMP_F_COPPER },
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1144 "PCH2 LAN (82579V) Controller",
1145 WM_T_PCH2, WMP_F_COPPER },
1146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1147 "82575EB dual-1000baseT Ethernet",
1148 WM_T_82575, WMP_F_COPPER },
1149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1150 "82575EB dual-1000baseX Ethernet (SERDES)",
1151 WM_T_82575, WMP_F_SERDES },
1152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1153 "82575GB quad-1000baseT Ethernet",
1154 WM_T_82575, WMP_F_COPPER },
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1156 "82575GB quad-1000baseT Ethernet (PM)",
1157 WM_T_82575, WMP_F_COPPER },
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1159 "82576 1000BaseT Ethernet",
1160 WM_T_82576, WMP_F_COPPER },
1161 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1162 "82576 1000BaseX Ethernet",
1163 WM_T_82576, WMP_F_FIBER },
1164
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1166 "82576 gigabit Ethernet (SERDES)",
1167 WM_T_82576, WMP_F_SERDES },
1168
1169 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1170 "82576 quad-1000BaseT Ethernet",
1171 WM_T_82576, WMP_F_COPPER },
1172
1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1174 "82576 Gigabit ET2 Quad Port Server Adapter",
1175 WM_T_82576, WMP_F_COPPER },
1176
1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1178 "82576 gigabit Ethernet",
1179 WM_T_82576, WMP_F_COPPER },
1180
1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1182 "82576 gigabit Ethernet (SERDES)",
1183 WM_T_82576, WMP_F_SERDES },
1184 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1185 "82576 quad-gigabit Ethernet (SERDES)",
1186 WM_T_82576, WMP_F_SERDES },
1187
1188 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1189 "82580 1000BaseT Ethernet",
1190 WM_T_82580, WMP_F_COPPER },
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1192 "82580 1000BaseX Ethernet",
1193 WM_T_82580, WMP_F_FIBER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1196 "82580 1000BaseT Ethernet (SERDES)",
1197 WM_T_82580, WMP_F_SERDES },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1200 "82580 gigabit Ethernet (SGMII)",
1201 WM_T_82580, WMP_F_COPPER },
1202 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1203 "82580 dual-1000BaseT Ethernet",
1204 WM_T_82580, WMP_F_COPPER },
1205
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1207 "82580 quad-1000BaseX Ethernet",
1208 WM_T_82580, WMP_F_FIBER },
1209
1210 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1211 "DH89XXCC Gigabit Ethernet (SGMII)",
1212 WM_T_82580, WMP_F_COPPER },
1213
1214 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1215 "DH89XXCC Gigabit Ethernet (SERDES)",
1216 WM_T_82580, WMP_F_SERDES },
1217
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1219 "DH89XXCC 1000BASE-KX Ethernet",
1220 WM_T_82580, WMP_F_SERDES },
1221
1222 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1223 "DH89XXCC Gigabit Ethernet (SFP)",
1224 WM_T_82580, WMP_F_SERDES },
1225
1226 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1227 "I350 Gigabit Network Connection",
1228 WM_T_I350, WMP_F_COPPER },
1229
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1231 "I350 Gigabit Fiber Network Connection",
1232 WM_T_I350, WMP_F_FIBER },
1233
1234 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1235 "I350 Gigabit Backplane Connection",
1236 WM_T_I350, WMP_F_SERDES },
1237
1238 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1239 "I350 Quad Port Gigabit Ethernet",
1240 WM_T_I350, WMP_F_SERDES },
1241
1242 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1243 "I350 Gigabit Connection",
1244 WM_T_I350, WMP_F_COPPER },
1245
1246 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1247 "I354 Gigabit Ethernet (KX)",
1248 WM_T_I354, WMP_F_SERDES },
1249
1250 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1251 "I354 Gigabit Ethernet (SGMII)",
1252 WM_T_I354, WMP_F_COPPER },
1253
1254 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1255 "I354 Gigabit Ethernet (2.5G)",
1256 WM_T_I354, WMP_F_COPPER },
1257
1258 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1259 "I210-T1 Ethernet Server Adapter",
1260 WM_T_I210, WMP_F_COPPER },
1261
1262 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1263 "I210 Ethernet (Copper OEM)",
1264 WM_T_I210, WMP_F_COPPER },
1265
1266 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1267 "I210 Ethernet (Copper IT)",
1268 WM_T_I210, WMP_F_COPPER },
1269
1270 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1271 "I210 Ethernet (FLASH less)",
1272 WM_T_I210, WMP_F_COPPER },
1273
1274 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1275 "I210 Gigabit Ethernet (Fiber)",
1276 WM_T_I210, WMP_F_FIBER },
1277
1278 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1279 "I210 Gigabit Ethernet (SERDES)",
1280 WM_T_I210, WMP_F_SERDES },
1281
1282 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1283 "I210 Gigabit Ethernet (FLASH less)",
1284 WM_T_I210, WMP_F_SERDES },
1285
1286 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1287 "I210 Gigabit Ethernet (SGMII)",
1288 WM_T_I210, WMP_F_COPPER },
1289
1290 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1291 "I211 Ethernet (COPPER)",
1292 WM_T_I211, WMP_F_COPPER },
1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1294 "I217 V Ethernet Connection",
1295 WM_T_PCH_LPT, WMP_F_COPPER },
1296 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1297 "I217 LM Ethernet Connection",
1298 WM_T_PCH_LPT, WMP_F_COPPER },
1299 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1300 "I218 V Ethernet Connection",
1301 WM_T_PCH_LPT, WMP_F_COPPER },
1302 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1303 "I218 V Ethernet Connection",
1304 WM_T_PCH_LPT, WMP_F_COPPER },
1305 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1306 "I218 V Ethernet Connection",
1307 WM_T_PCH_LPT, WMP_F_COPPER },
1308 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1309 "I218 LM Ethernet Connection",
1310 WM_T_PCH_LPT, WMP_F_COPPER },
1311 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1312 "I218 LM Ethernet Connection",
1313 WM_T_PCH_LPT, WMP_F_COPPER },
1314 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1315 "I218 LM Ethernet Connection",
1316 WM_T_PCH_LPT, WMP_F_COPPER },
1317 #if 0
1318 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1319 "I219 V Ethernet Connection",
1320 WM_T_PCH_SPT, WMP_F_COPPER },
1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1322 "I219 V Ethernet Connection",
1323 WM_T_PCH_SPT, WMP_F_COPPER },
1324 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1325 "I219 LM Ethernet Connection",
1326 WM_T_PCH_SPT, WMP_F_COPPER },
1327 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1328 "I219 LM Ethernet Connection",
1329 WM_T_PCH_SPT, WMP_F_COPPER },
1330 #endif
1331 { 0, 0,
1332 NULL,
1333 0, 0 },
1334 };
1335
1336 #ifdef WM_EVENT_COUNTERS
1337 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1338 #endif /* WM_EVENT_COUNTERS */
1339
1340
1341 /*
1342 * Register read/write functions.
1343 * Other than CSR_{READ|WRITE}().
1344 */
1345
1346 #if 0 /* Not currently used */
1347 static inline uint32_t
1348 wm_io_read(struct wm_softc *sc, int reg)
1349 {
1350
1351 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1352 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1353 }
1354 #endif
1355
1356 static inline void
1357 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1358 {
1359
1360 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1361 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1362 }
1363
1364 static inline void
1365 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1366 uint32_t data)
1367 {
1368 uint32_t regval;
1369 int i;
1370
1371 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1372
1373 CSR_WRITE(sc, reg, regval);
1374
1375 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1376 delay(5);
1377 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1378 break;
1379 }
1380 if (i == SCTL_CTL_POLL_TIMEOUT) {
1381 aprint_error("%s: WARNING:"
1382 " i82575 reg 0x%08x setup did not indicate ready\n",
1383 device_xname(sc->sc_dev), reg);
1384 }
1385 }
1386
1387 static inline void
1388 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1389 {
1390 wa->wa_low = htole32(v & 0xffffffffU);
1391 if (sizeof(bus_addr_t) == 8)
1392 wa->wa_high = htole32((uint64_t) v >> 32);
1393 else
1394 wa->wa_high = 0;
1395 }
1396
1397 /*
1398 * Descriptor sync/init functions.
1399 */
1400 static inline void
1401 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1402 {
1403 struct wm_softc *sc = txq->txq_sc;
1404
1405 /* If it will wrap around, sync to the end of the ring. */
1406 if ((start + num) > WM_NTXDESC(txq)) {
1407 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1408 WM_CDTXOFF(txq, start), txq->txq_descsize *
1409 (WM_NTXDESC(txq) - start), ops);
1410 num -= (WM_NTXDESC(txq) - start);
1411 start = 0;
1412 }
1413
1414 /* Now sync whatever is left. */
1415 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1416 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1417 }
1418
1419 static inline void
1420 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1421 {
1422 struct wm_softc *sc = rxq->rxq_sc;
1423
1424 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1425 WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1426 }
1427
1428 static inline void
1429 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1430 {
1431 struct wm_softc *sc = rxq->rxq_sc;
1432 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1433 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1434 struct mbuf *m = rxs->rxs_mbuf;
1435
1436 /*
1437 * Note: We scoot the packet forward 2 bytes in the buffer
1438 * so that the payload after the Ethernet header is aligned
1439 * to a 4-byte boundary.
1440
1441 * XXX BRAINDAMAGE ALERT!
1442 * The stupid chip uses the same size for every buffer, which
1443 * is set in the Receive Control register. We are using the 2K
1444 * size option, but what we REALLY want is (2K - 2)! For this
1445 * reason, we can't "scoot" packets longer than the standard
1446 * Ethernet MTU. On strict-alignment platforms, if the total
1447 * size exceeds (2K - 2) we set align_tweak to 0 and let
1448 * the upper layer copy the headers.
1449 */
1450 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1451
1452 wm_set_dma_addr(&rxd->wrx_addr,
1453 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1454 rxd->wrx_len = 0;
1455 rxd->wrx_cksum = 0;
1456 rxd->wrx_status = 0;
1457 rxd->wrx_errors = 0;
1458 rxd->wrx_special = 0;
1459 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1460
1461 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1462 }
1463
1464 /*
1465 * Device driver interface functions and commonly used functions.
1466 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1467 */
1468
1469 /* Lookup supported device table */
1470 static const struct wm_product *
1471 wm_lookup(const struct pci_attach_args *pa)
1472 {
1473 const struct wm_product *wmp;
1474
1475 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1476 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1477 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1478 return wmp;
1479 }
1480 return NULL;
1481 }
1482
1483 /* The match function (ca_match) */
1484 static int
1485 wm_match(device_t parent, cfdata_t cf, void *aux)
1486 {
1487 struct pci_attach_args *pa = aux;
1488
1489 if (wm_lookup(pa) != NULL)
1490 return 1;
1491
1492 return 0;
1493 }
1494
1495 /* The attach function (ca_attach) */
1496 static void
1497 wm_attach(device_t parent, device_t self, void *aux)
1498 {
1499 struct wm_softc *sc = device_private(self);
1500 struct pci_attach_args *pa = aux;
1501 prop_dictionary_t dict;
1502 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1503 pci_chipset_tag_t pc = pa->pa_pc;
1504 int counts[PCI_INTR_TYPE_SIZE];
1505 pci_intr_type_t max_type;
1506 const char *eetype, *xname;
1507 bus_space_tag_t memt;
1508 bus_space_handle_t memh;
1509 bus_size_t memsize;
1510 int memh_valid;
1511 int i, error;
1512 const struct wm_product *wmp;
1513 prop_data_t ea;
1514 prop_number_t pn;
1515 uint8_t enaddr[ETHER_ADDR_LEN];
1516 uint16_t cfg1, cfg2, swdpin, nvmword;
1517 pcireg_t preg, memtype;
1518 uint16_t eeprom_data, apme_mask;
1519 bool force_clear_smbi;
1520 uint32_t link_mode;
1521 uint32_t reg;
1522
1523 sc->sc_dev = self;
1524 callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1525 sc->sc_stopping = false;
1526
1527 wmp = wm_lookup(pa);
1528 #ifdef DIAGNOSTIC
1529 if (wmp == NULL) {
1530 printf("\n");
1531 panic("wm_attach: impossible");
1532 }
1533 #endif
1534 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1535
1536 sc->sc_pc = pa->pa_pc;
1537 sc->sc_pcitag = pa->pa_tag;
1538
1539 if (pci_dma64_available(pa))
1540 sc->sc_dmat = pa->pa_dmat64;
1541 else
1542 sc->sc_dmat = pa->pa_dmat;
1543
1544 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1545 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1546 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1547
1548 sc->sc_type = wmp->wmp_type;
1549 if (sc->sc_type < WM_T_82543) {
1550 if (sc->sc_rev < 2) {
1551 aprint_error_dev(sc->sc_dev,
1552 "i82542 must be at least rev. 2\n");
1553 return;
1554 }
1555 if (sc->sc_rev < 3)
1556 sc->sc_type = WM_T_82542_2_0;
1557 }
1558
1559 /*
1560 * Disable MSI for Errata:
1561 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1562 *
1563 * 82544: Errata 25
1564 * 82540: Errata 6 (easy to reproduce device timeout)
1565 * 82545: Errata 4 (easy to reproduce device timeout)
1566 * 82546: Errata 26 (easy to reproduce device timeout)
1567 * 82541: Errata 7 (easy to reproduce device timeout)
1568 *
1569 * "Byte Enables 2 and 3 are not set on MSI writes"
1570 *
1571 * 82571 & 82572: Errata 63
1572 */
1573 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1574 || (sc->sc_type == WM_T_82572))
1575 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1576
1577 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1578 || (sc->sc_type == WM_T_82580)
1579 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1580 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1581 sc->sc_flags |= WM_F_NEWQUEUE;
1582
1583 /* Set device properties (mactype) */
1584 dict = device_properties(sc->sc_dev);
1585 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1586
1587 /*
1588 * Map the device. All devices support memory-mapped acccess,
1589 * and it is really required for normal operation.
1590 */
1591 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1592 switch (memtype) {
1593 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1594 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1595 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1596 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1597 break;
1598 default:
1599 memh_valid = 0;
1600 break;
1601 }
1602
1603 if (memh_valid) {
1604 sc->sc_st = memt;
1605 sc->sc_sh = memh;
1606 sc->sc_ss = memsize;
1607 } else {
1608 aprint_error_dev(sc->sc_dev,
1609 "unable to map device registers\n");
1610 return;
1611 }
1612
1613 /*
1614 * In addition, i82544 and later support I/O mapped indirect
1615 * register access. It is not desirable (nor supported in
1616 * this driver) to use it for normal operation, though it is
1617 * required to work around bugs in some chip versions.
1618 */
1619 if (sc->sc_type >= WM_T_82544) {
1620 /* First we have to find the I/O BAR. */
1621 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1622 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1623 if (memtype == PCI_MAPREG_TYPE_IO)
1624 break;
1625 if (PCI_MAPREG_MEM_TYPE(memtype) ==
1626 PCI_MAPREG_MEM_TYPE_64BIT)
1627 i += 4; /* skip high bits, too */
1628 }
1629 if (i < PCI_MAPREG_END) {
1630 /*
1631 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1632 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1633 * It's no problem because newer chips has no this
1634 * bug.
1635 *
1636 * The i8254x doesn't apparently respond when the
1637 * I/O BAR is 0, which looks somewhat like it's not
1638 * been configured.
1639 */
1640 preg = pci_conf_read(pc, pa->pa_tag, i);
1641 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1642 aprint_error_dev(sc->sc_dev,
1643 "WARNING: I/O BAR at zero.\n");
1644 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1645 0, &sc->sc_iot, &sc->sc_ioh,
1646 NULL, &sc->sc_ios) == 0) {
1647 sc->sc_flags |= WM_F_IOH_VALID;
1648 } else {
1649 aprint_error_dev(sc->sc_dev,
1650 "WARNING: unable to map I/O space\n");
1651 }
1652 }
1653
1654 }
1655
1656 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1657 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1658 preg |= PCI_COMMAND_MASTER_ENABLE;
1659 if (sc->sc_type < WM_T_82542_2_1)
1660 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1661 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1662
1663 /* power up chip */
1664 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1665 NULL)) && error != EOPNOTSUPP) {
1666 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1667 return;
1668 }
1669
1670 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1671
1672 /* Allocation settings */
1673 max_type = PCI_INTR_TYPE_MSIX;
1674 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1675 counts[PCI_INTR_TYPE_MSI] = 1;
1676 counts[PCI_INTR_TYPE_INTX] = 1;
1677
1678 alloc_retry:
1679 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1680 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1681 return;
1682 }
1683
1684 if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1685 error = wm_setup_msix(sc);
1686 if (error) {
1687 pci_intr_release(pc, sc->sc_intrs,
1688 counts[PCI_INTR_TYPE_MSIX]);
1689
1690 /* Setup for MSI: Disable MSI-X */
1691 max_type = PCI_INTR_TYPE_MSI;
1692 counts[PCI_INTR_TYPE_MSI] = 1;
1693 counts[PCI_INTR_TYPE_INTX] = 1;
1694 goto alloc_retry;
1695 }
1696 } else if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1697 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1698 error = wm_setup_legacy(sc);
1699 if (error) {
1700 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1701 counts[PCI_INTR_TYPE_MSI]);
1702
1703 /* The next try is for INTx: Disable MSI */
1704 max_type = PCI_INTR_TYPE_INTX;
1705 counts[PCI_INTR_TYPE_INTX] = 1;
1706 goto alloc_retry;
1707 }
1708 } else {
1709 wm_adjust_qnum(sc, 0); /* must not use multiqueue */
1710 error = wm_setup_legacy(sc);
1711 if (error) {
1712 pci_intr_release(sc->sc_pc, sc->sc_intrs,
1713 counts[PCI_INTR_TYPE_INTX]);
1714 return;
1715 }
1716 }
1717
1718 /*
1719 * Check the function ID (unit number of the chip).
1720 */
1721 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1722 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1723 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1724 || (sc->sc_type == WM_T_82580)
1725 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1726 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1727 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1728 else
1729 sc->sc_funcid = 0;
1730
1731 /*
1732 * Determine a few things about the bus we're connected to.
1733 */
1734 if (sc->sc_type < WM_T_82543) {
1735 /* We don't really know the bus characteristics here. */
1736 sc->sc_bus_speed = 33;
1737 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1738 /*
1739 * CSA (Communication Streaming Architecture) is about as fast
1740 * a 32-bit 66MHz PCI Bus.
1741 */
1742 sc->sc_flags |= WM_F_CSA;
1743 sc->sc_bus_speed = 66;
1744 aprint_verbose_dev(sc->sc_dev,
1745 "Communication Streaming Architecture\n");
1746 if (sc->sc_type == WM_T_82547) {
1747 callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1748 callout_setfunc(&sc->sc_txfifo_ch,
1749 wm_82547_txfifo_stall, sc);
1750 aprint_verbose_dev(sc->sc_dev,
1751 "using 82547 Tx FIFO stall work-around\n");
1752 }
1753 } else if (sc->sc_type >= WM_T_82571) {
1754 sc->sc_flags |= WM_F_PCIE;
1755 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1756 && (sc->sc_type != WM_T_ICH10)
1757 && (sc->sc_type != WM_T_PCH)
1758 && (sc->sc_type != WM_T_PCH2)
1759 && (sc->sc_type != WM_T_PCH_LPT)
1760 && (sc->sc_type != WM_T_PCH_SPT)) {
1761 /* ICH* and PCH* have no PCIe capability registers */
1762 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1763 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1764 NULL) == 0)
1765 aprint_error_dev(sc->sc_dev,
1766 "unable to find PCIe capability\n");
1767 }
1768 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1769 } else {
1770 reg = CSR_READ(sc, WMREG_STATUS);
1771 if (reg & STATUS_BUS64)
1772 sc->sc_flags |= WM_F_BUS64;
1773 if ((reg & STATUS_PCIX_MODE) != 0) {
1774 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1775
1776 sc->sc_flags |= WM_F_PCIX;
1777 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1778 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1779 aprint_error_dev(sc->sc_dev,
1780 "unable to find PCIX capability\n");
1781 else if (sc->sc_type != WM_T_82545_3 &&
1782 sc->sc_type != WM_T_82546_3) {
1783 /*
1784 * Work around a problem caused by the BIOS
1785 * setting the max memory read byte count
1786 * incorrectly.
1787 */
1788 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1789 sc->sc_pcixe_capoff + PCIX_CMD);
1790 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1791 sc->sc_pcixe_capoff + PCIX_STATUS);
1792
1793 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1794 PCIX_CMD_BYTECNT_SHIFT;
1795 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1796 PCIX_STATUS_MAXB_SHIFT;
1797 if (bytecnt > maxb) {
1798 aprint_verbose_dev(sc->sc_dev,
1799 "resetting PCI-X MMRBC: %d -> %d\n",
1800 512 << bytecnt, 512 << maxb);
1801 pcix_cmd = (pcix_cmd &
1802 ~PCIX_CMD_BYTECNT_MASK) |
1803 (maxb << PCIX_CMD_BYTECNT_SHIFT);
1804 pci_conf_write(pa->pa_pc, pa->pa_tag,
1805 sc->sc_pcixe_capoff + PCIX_CMD,
1806 pcix_cmd);
1807 }
1808 }
1809 }
1810 /*
1811 * The quad port adapter is special; it has a PCIX-PCIX
1812 * bridge on the board, and can run the secondary bus at
1813 * a higher speed.
1814 */
1815 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1816 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1817 : 66;
1818 } else if (sc->sc_flags & WM_F_PCIX) {
1819 switch (reg & STATUS_PCIXSPD_MASK) {
1820 case STATUS_PCIXSPD_50_66:
1821 sc->sc_bus_speed = 66;
1822 break;
1823 case STATUS_PCIXSPD_66_100:
1824 sc->sc_bus_speed = 100;
1825 break;
1826 case STATUS_PCIXSPD_100_133:
1827 sc->sc_bus_speed = 133;
1828 break;
1829 default:
1830 aprint_error_dev(sc->sc_dev,
1831 "unknown PCIXSPD %d; assuming 66MHz\n",
1832 reg & STATUS_PCIXSPD_MASK);
1833 sc->sc_bus_speed = 66;
1834 break;
1835 }
1836 } else
1837 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1838 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1839 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1840 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1841 }
1842
1843 /* clear interesting stat counters */
1844 CSR_READ(sc, WMREG_COLC);
1845 CSR_READ(sc, WMREG_RXERRC);
1846
1847 /* get PHY control from SMBus to PCIe */
1848 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1849 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
1850 wm_smbustopci(sc);
1851
1852 /* Reset the chip to a known state. */
1853 wm_reset(sc);
1854
1855 /* Get some information about the EEPROM. */
1856 switch (sc->sc_type) {
1857 case WM_T_82542_2_0:
1858 case WM_T_82542_2_1:
1859 case WM_T_82543:
1860 case WM_T_82544:
1861 /* Microwire */
1862 sc->sc_nvm_wordsize = 64;
1863 sc->sc_nvm_addrbits = 6;
1864 break;
1865 case WM_T_82540:
1866 case WM_T_82545:
1867 case WM_T_82545_3:
1868 case WM_T_82546:
1869 case WM_T_82546_3:
1870 /* Microwire */
1871 reg = CSR_READ(sc, WMREG_EECD);
1872 if (reg & EECD_EE_SIZE) {
1873 sc->sc_nvm_wordsize = 256;
1874 sc->sc_nvm_addrbits = 8;
1875 } else {
1876 sc->sc_nvm_wordsize = 64;
1877 sc->sc_nvm_addrbits = 6;
1878 }
1879 sc->sc_flags |= WM_F_LOCK_EECD;
1880 break;
1881 case WM_T_82541:
1882 case WM_T_82541_2:
1883 case WM_T_82547:
1884 case WM_T_82547_2:
1885 sc->sc_flags |= WM_F_LOCK_EECD;
1886 reg = CSR_READ(sc, WMREG_EECD);
1887 if (reg & EECD_EE_TYPE) {
1888 /* SPI */
1889 sc->sc_flags |= WM_F_EEPROM_SPI;
1890 wm_nvm_set_addrbits_size_eecd(sc);
1891 } else {
1892 /* Microwire */
1893 if ((reg & EECD_EE_ABITS) != 0) {
1894 sc->sc_nvm_wordsize = 256;
1895 sc->sc_nvm_addrbits = 8;
1896 } else {
1897 sc->sc_nvm_wordsize = 64;
1898 sc->sc_nvm_addrbits = 6;
1899 }
1900 }
1901 break;
1902 case WM_T_82571:
1903 case WM_T_82572:
1904 /* SPI */
1905 sc->sc_flags |= WM_F_EEPROM_SPI;
1906 wm_nvm_set_addrbits_size_eecd(sc);
1907 sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1908 break;
1909 case WM_T_82573:
1910 sc->sc_flags |= WM_F_LOCK_SWSM;
1911 /* FALLTHROUGH */
1912 case WM_T_82574:
1913 case WM_T_82583:
1914 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1915 sc->sc_flags |= WM_F_EEPROM_FLASH;
1916 sc->sc_nvm_wordsize = 2048;
1917 } else {
1918 /* SPI */
1919 sc->sc_flags |= WM_F_EEPROM_SPI;
1920 wm_nvm_set_addrbits_size_eecd(sc);
1921 }
1922 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1923 break;
1924 case WM_T_82575:
1925 case WM_T_82576:
1926 case WM_T_82580:
1927 case WM_T_I350:
1928 case WM_T_I354:
1929 case WM_T_80003:
1930 /* SPI */
1931 sc->sc_flags |= WM_F_EEPROM_SPI;
1932 wm_nvm_set_addrbits_size_eecd(sc);
1933 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1934 | WM_F_LOCK_SWSM;
1935 break;
1936 case WM_T_ICH8:
1937 case WM_T_ICH9:
1938 case WM_T_ICH10:
1939 case WM_T_PCH:
1940 case WM_T_PCH2:
1941 case WM_T_PCH_LPT:
1942 /* FLASH */
1943 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1944 sc->sc_nvm_wordsize = 2048;
1945 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
1946 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1947 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1948 aprint_error_dev(sc->sc_dev,
1949 "can't map FLASH registers\n");
1950 goto out;
1951 }
1952 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1953 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1954 ICH_FLASH_SECTOR_SIZE;
1955 sc->sc_ich8_flash_bank_size =
1956 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1957 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
1958 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1959 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1960 sc->sc_flashreg_offset = 0;
1961 break;
1962 case WM_T_PCH_SPT:
1963 /* SPT has no GFPREG; flash registers mapped through BAR0 */
1964 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1965 sc->sc_flasht = sc->sc_st;
1966 sc->sc_flashh = sc->sc_sh;
1967 sc->sc_ich8_flash_base = 0;
1968 sc->sc_nvm_wordsize =
1969 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
1970 * NVM_SIZE_MULTIPLIER;
1971 /* It is size in bytes, we want words */
1972 sc->sc_nvm_wordsize /= 2;
1973 /* assume 2 banks */
1974 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
1975 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
1976 break;
1977 case WM_T_I210:
1978 case WM_T_I211:
1979 if (wm_nvm_get_flash_presence_i210(sc)) {
1980 wm_nvm_set_addrbits_size_eecd(sc);
1981 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1982 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1983 } else {
1984 sc->sc_nvm_wordsize = INVM_SIZE;
1985 sc->sc_flags |= WM_F_EEPROM_INVM;
1986 sc->sc_flags |= WM_F_LOCK_SWFW;
1987 }
1988 break;
1989 default:
1990 break;
1991 }
1992
1993 /* Ensure the SMBI bit is clear before first NVM or PHY access */
1994 switch (sc->sc_type) {
1995 case WM_T_82571:
1996 case WM_T_82572:
1997 reg = CSR_READ(sc, WMREG_SWSM2);
1998 if ((reg & SWSM2_LOCK) == 0) {
1999 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2000 force_clear_smbi = true;
2001 } else
2002 force_clear_smbi = false;
2003 break;
2004 case WM_T_82573:
2005 case WM_T_82574:
2006 case WM_T_82583:
2007 force_clear_smbi = true;
2008 break;
2009 default:
2010 force_clear_smbi = false;
2011 break;
2012 }
2013 if (force_clear_smbi) {
2014 reg = CSR_READ(sc, WMREG_SWSM);
2015 if ((reg & SWSM_SMBI) != 0)
2016 aprint_error_dev(sc->sc_dev,
2017 "Please update the Bootagent\n");
2018 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2019 }
2020
2021 /*
2022 * Defer printing the EEPROM type until after verifying the checksum
2023 * This allows the EEPROM type to be printed correctly in the case
2024 * that no EEPROM is attached.
2025 */
2026 /*
2027 * Validate the EEPROM checksum. If the checksum fails, flag
2028 * this for later, so we can fail future reads from the EEPROM.
2029 */
2030 if (wm_nvm_validate_checksum(sc)) {
2031 /*
2032 * Read twice again because some PCI-e parts fail the
2033 * first check due to the link being in sleep state.
2034 */
2035 if (wm_nvm_validate_checksum(sc))
2036 sc->sc_flags |= WM_F_EEPROM_INVALID;
2037 }
2038
2039 /* Set device properties (macflags) */
2040 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2041
2042 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2043 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2044 else {
2045 aprint_verbose_dev(sc->sc_dev, "%u words ",
2046 sc->sc_nvm_wordsize);
2047 if (sc->sc_flags & WM_F_EEPROM_INVM)
2048 aprint_verbose("iNVM");
2049 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2050 aprint_verbose("FLASH(HW)");
2051 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2052 aprint_verbose("FLASH");
2053 else {
2054 if (sc->sc_flags & WM_F_EEPROM_SPI)
2055 eetype = "SPI";
2056 else
2057 eetype = "MicroWire";
2058 aprint_verbose("(%d address bits) %s EEPROM",
2059 sc->sc_nvm_addrbits, eetype);
2060 }
2061 }
2062 wm_nvm_version(sc);
2063 aprint_verbose("\n");
2064
2065 /* Check for I21[01] PLL workaround */
2066 if (sc->sc_type == WM_T_I210)
2067 sc->sc_flags |= WM_F_PLL_WA_I210;
2068 if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2069 /* NVM image release 3.25 has a workaround */
2070 if ((sc->sc_nvm_ver_major < 3)
2071 || ((sc->sc_nvm_ver_major == 3)
2072 && (sc->sc_nvm_ver_minor < 25))) {
2073 aprint_verbose_dev(sc->sc_dev,
2074 "ROM image version %d.%d is older than 3.25\n",
2075 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2076 sc->sc_flags |= WM_F_PLL_WA_I210;
2077 }
2078 }
2079 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2080 wm_pll_workaround_i210(sc);
2081
2082 wm_get_wakeup(sc);
2083 switch (sc->sc_type) {
2084 case WM_T_82571:
2085 case WM_T_82572:
2086 case WM_T_82573:
2087 case WM_T_82574:
2088 case WM_T_82583:
2089 case WM_T_80003:
2090 case WM_T_ICH8:
2091 case WM_T_ICH9:
2092 case WM_T_ICH10:
2093 case WM_T_PCH:
2094 case WM_T_PCH2:
2095 case WM_T_PCH_LPT:
2096 case WM_T_PCH_SPT:
2097 /* Non-AMT based hardware can now take control from firmware */
2098 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2099 wm_get_hw_control(sc);
2100 break;
2101 default:
2102 break;
2103 }
2104
2105 /*
2106 * Read the Ethernet address from the EEPROM, if not first found
2107 * in device properties.
2108 */
2109 ea = prop_dictionary_get(dict, "mac-address");
2110 if (ea != NULL) {
2111 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2112 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2113 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2114 } else {
2115 if (wm_read_mac_addr(sc, enaddr) != 0) {
2116 aprint_error_dev(sc->sc_dev,
2117 "unable to read Ethernet address\n");
2118 goto out;
2119 }
2120 }
2121
2122 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2123 ether_sprintf(enaddr));
2124
2125 /*
2126 * Read the config info from the EEPROM, and set up various
2127 * bits in the control registers based on their contents.
2128 */
2129 pn = prop_dictionary_get(dict, "i82543-cfg1");
2130 if (pn != NULL) {
2131 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2132 cfg1 = (uint16_t) prop_number_integer_value(pn);
2133 } else {
2134 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2135 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2136 goto out;
2137 }
2138 }
2139
2140 pn = prop_dictionary_get(dict, "i82543-cfg2");
2141 if (pn != NULL) {
2142 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2143 cfg2 = (uint16_t) prop_number_integer_value(pn);
2144 } else {
2145 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2146 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2147 goto out;
2148 }
2149 }
2150
2151 /* check for WM_F_WOL */
2152 switch (sc->sc_type) {
2153 case WM_T_82542_2_0:
2154 case WM_T_82542_2_1:
2155 case WM_T_82543:
2156 /* dummy? */
2157 eeprom_data = 0;
2158 apme_mask = NVM_CFG3_APME;
2159 break;
2160 case WM_T_82544:
2161 apme_mask = NVM_CFG2_82544_APM_EN;
2162 eeprom_data = cfg2;
2163 break;
2164 case WM_T_82546:
2165 case WM_T_82546_3:
2166 case WM_T_82571:
2167 case WM_T_82572:
2168 case WM_T_82573:
2169 case WM_T_82574:
2170 case WM_T_82583:
2171 case WM_T_80003:
2172 default:
2173 apme_mask = NVM_CFG3_APME;
2174 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2175 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2176 break;
2177 case WM_T_82575:
2178 case WM_T_82576:
2179 case WM_T_82580:
2180 case WM_T_I350:
2181 case WM_T_I354: /* XXX ok? */
2182 case WM_T_ICH8:
2183 case WM_T_ICH9:
2184 case WM_T_ICH10:
2185 case WM_T_PCH:
2186 case WM_T_PCH2:
2187 case WM_T_PCH_LPT:
2188 case WM_T_PCH_SPT:
2189 /* XXX The funcid should be checked on some devices */
2190 apme_mask = WUC_APME;
2191 eeprom_data = CSR_READ(sc, WMREG_WUC);
2192 break;
2193 }
2194
2195 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2196 if ((eeprom_data & apme_mask) != 0)
2197 sc->sc_flags |= WM_F_WOL;
2198 #ifdef WM_DEBUG
2199 if ((sc->sc_flags & WM_F_WOL) != 0)
2200 printf("WOL\n");
2201 #endif
2202
2203 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2204 /* Check NVM for autonegotiation */
2205 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2206 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2207 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2208 }
2209 }
2210
2211 /*
2212 * XXX need special handling for some multiple port cards
2213 * to disable a paticular port.
2214 */
2215
2216 if (sc->sc_type >= WM_T_82544) {
2217 pn = prop_dictionary_get(dict, "i82543-swdpin");
2218 if (pn != NULL) {
2219 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2220 swdpin = (uint16_t) prop_number_integer_value(pn);
2221 } else {
2222 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2223 aprint_error_dev(sc->sc_dev,
2224 "unable to read SWDPIN\n");
2225 goto out;
2226 }
2227 }
2228 }
2229
2230 if (cfg1 & NVM_CFG1_ILOS)
2231 sc->sc_ctrl |= CTRL_ILOS;
2232
2233 /*
2234 * XXX
2235 * This code isn't correct because pin 2 and 3 are located
2236 * in different position on newer chips. Check all datasheet.
2237 *
2238 * Until resolve this problem, check if a chip < 82580
2239 */
2240 if (sc->sc_type <= WM_T_82580) {
2241 if (sc->sc_type >= WM_T_82544) {
2242 sc->sc_ctrl |=
2243 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2244 CTRL_SWDPIO_SHIFT;
2245 sc->sc_ctrl |=
2246 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2247 CTRL_SWDPINS_SHIFT;
2248 } else {
2249 sc->sc_ctrl |=
2250 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2251 CTRL_SWDPIO_SHIFT;
2252 }
2253 }
2254
2255 /* XXX For other than 82580? */
2256 if (sc->sc_type == WM_T_82580) {
2257 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2258 if (nvmword & __BIT(13))
2259 sc->sc_ctrl |= CTRL_ILOS;
2260 }
2261
2262 #if 0
2263 if (sc->sc_type >= WM_T_82544) {
2264 if (cfg1 & NVM_CFG1_IPS0)
2265 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2266 if (cfg1 & NVM_CFG1_IPS1)
2267 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2268 sc->sc_ctrl_ext |=
2269 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2270 CTRL_EXT_SWDPIO_SHIFT;
2271 sc->sc_ctrl_ext |=
2272 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2273 CTRL_EXT_SWDPINS_SHIFT;
2274 } else {
2275 sc->sc_ctrl_ext |=
2276 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2277 CTRL_EXT_SWDPIO_SHIFT;
2278 }
2279 #endif
2280
2281 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2282 #if 0
2283 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2284 #endif
2285
2286 if (sc->sc_type == WM_T_PCH) {
2287 uint16_t val;
2288
2289 /* Save the NVM K1 bit setting */
2290 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2291
2292 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2293 sc->sc_nvm_k1_enabled = 1;
2294 else
2295 sc->sc_nvm_k1_enabled = 0;
2296 }
2297
2298 /*
2299 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2300 * media structures accordingly.
2301 */
2302 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2303 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2304 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2305 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2306 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2307 /* STATUS_TBIMODE reserved/reused, can't rely on it */
2308 wm_gmii_mediainit(sc, wmp->wmp_product);
2309 } else if (sc->sc_type < WM_T_82543 ||
2310 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2311 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2312 aprint_error_dev(sc->sc_dev,
2313 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2314 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2315 }
2316 wm_tbi_mediainit(sc);
2317 } else {
2318 switch (sc->sc_type) {
2319 case WM_T_82575:
2320 case WM_T_82576:
2321 case WM_T_82580:
2322 case WM_T_I350:
2323 case WM_T_I354:
2324 case WM_T_I210:
2325 case WM_T_I211:
2326 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2327 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2328 switch (link_mode) {
2329 case CTRL_EXT_LINK_MODE_1000KX:
2330 aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2331 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2332 break;
2333 case CTRL_EXT_LINK_MODE_SGMII:
2334 if (wm_sgmii_uses_mdio(sc)) {
2335 aprint_verbose_dev(sc->sc_dev,
2336 "SGMII(MDIO)\n");
2337 sc->sc_flags |= WM_F_SGMII;
2338 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2339 break;
2340 }
2341 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2342 /*FALLTHROUGH*/
2343 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2344 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2345 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2346 if (link_mode
2347 == CTRL_EXT_LINK_MODE_SGMII) {
2348 sc->sc_mediatype
2349 = WM_MEDIATYPE_COPPER;
2350 sc->sc_flags |= WM_F_SGMII;
2351 } else {
2352 sc->sc_mediatype
2353 = WM_MEDIATYPE_SERDES;
2354 aprint_verbose_dev(sc->sc_dev,
2355 "SERDES\n");
2356 }
2357 break;
2358 }
2359 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2360 aprint_verbose_dev(sc->sc_dev,
2361 "SERDES\n");
2362
2363 /* Change current link mode setting */
2364 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2365 switch (sc->sc_mediatype) {
2366 case WM_MEDIATYPE_COPPER:
2367 reg |= CTRL_EXT_LINK_MODE_SGMII;
2368 break;
2369 case WM_MEDIATYPE_SERDES:
2370 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2371 break;
2372 default:
2373 break;
2374 }
2375 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2376 break;
2377 case CTRL_EXT_LINK_MODE_GMII:
2378 default:
2379 aprint_verbose_dev(sc->sc_dev, "Copper\n");
2380 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2381 break;
2382 }
2383
2384 reg &= ~CTRL_EXT_I2C_ENA;
2385 if ((sc->sc_flags & WM_F_SGMII) != 0)
2386 reg |= CTRL_EXT_I2C_ENA;
2387 else
2388 reg &= ~CTRL_EXT_I2C_ENA;
2389 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2390
2391 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2392 wm_gmii_mediainit(sc, wmp->wmp_product);
2393 else
2394 wm_tbi_mediainit(sc);
2395 break;
2396 default:
2397 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2398 aprint_error_dev(sc->sc_dev,
2399 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2400 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2401 wm_gmii_mediainit(sc, wmp->wmp_product);
2402 }
2403 }
2404
2405 ifp = &sc->sc_ethercom.ec_if;
2406 xname = device_xname(sc->sc_dev);
2407 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2408 ifp->if_softc = sc;
2409 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2410 ifp->if_ioctl = wm_ioctl;
2411 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2412 ifp->if_start = wm_nq_start;
2413 if (sc->sc_nqueues > 1)
2414 ifp->if_transmit = wm_nq_transmit;
2415 } else
2416 ifp->if_start = wm_start;
2417 ifp->if_watchdog = wm_watchdog;
2418 ifp->if_init = wm_init;
2419 ifp->if_stop = wm_stop;
2420 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2421 IFQ_SET_READY(&ifp->if_snd);
2422
2423 /* Check for jumbo frame */
2424 switch (sc->sc_type) {
2425 case WM_T_82573:
2426 /* XXX limited to 9234 if ASPM is disabled */
2427 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2428 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2429 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2430 break;
2431 case WM_T_82571:
2432 case WM_T_82572:
2433 case WM_T_82574:
2434 case WM_T_82575:
2435 case WM_T_82576:
2436 case WM_T_82580:
2437 case WM_T_I350:
2438 case WM_T_I354: /* XXXX ok? */
2439 case WM_T_I210:
2440 case WM_T_I211:
2441 case WM_T_80003:
2442 case WM_T_ICH9:
2443 case WM_T_ICH10:
2444 case WM_T_PCH2: /* PCH2 supports 9K frame size */
2445 case WM_T_PCH_LPT:
2446 case WM_T_PCH_SPT:
2447 /* XXX limited to 9234 */
2448 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2449 break;
2450 case WM_T_PCH:
2451 /* XXX limited to 4096 */
2452 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2453 break;
2454 case WM_T_82542_2_0:
2455 case WM_T_82542_2_1:
2456 case WM_T_82583:
2457 case WM_T_ICH8:
2458 /* No support for jumbo frame */
2459 break;
2460 default:
2461 /* ETHER_MAX_LEN_JUMBO */
2462 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2463 break;
2464 }
2465
2466 /* If we're a i82543 or greater, we can support VLANs. */
2467 if (sc->sc_type >= WM_T_82543)
2468 sc->sc_ethercom.ec_capabilities |=
2469 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2470
2471 /*
2472 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
2473 * on i82543 and later.
2474 */
2475 if (sc->sc_type >= WM_T_82543) {
2476 ifp->if_capabilities |=
2477 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2478 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2479 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2480 IFCAP_CSUM_TCPv6_Tx |
2481 IFCAP_CSUM_UDPv6_Tx;
2482 }
2483
2484 /*
2485 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2486 *
2487 * 82541GI (8086:1076) ... no
2488 * 82572EI (8086:10b9) ... yes
2489 */
2490 if (sc->sc_type >= WM_T_82571) {
2491 ifp->if_capabilities |=
2492 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2493 }
2494
2495 /*
2496 * If we're a i82544 or greater (except i82547), we can do
2497 * TCP segmentation offload.
2498 */
2499 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2500 ifp->if_capabilities |= IFCAP_TSOv4;
2501 }
2502
2503 if (sc->sc_type >= WM_T_82571) {
2504 ifp->if_capabilities |= IFCAP_TSOv6;
2505 }
2506
2507 #ifdef WM_MPSAFE
2508 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2509 #else
2510 sc->sc_core_lock = NULL;
2511 #endif
2512
2513 /* Attach the interface. */
2514 if_initialize(ifp);
2515 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2516 ether_ifattach(ifp, enaddr);
2517 if_register(ifp);
2518 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2519 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2520 RND_FLAG_DEFAULT);
2521
2522 #ifdef WM_EVENT_COUNTERS
2523 /* Attach event counters. */
2524 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2525 NULL, xname, "txsstall");
2526 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2527 NULL, xname, "txdstall");
2528 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2529 NULL, xname, "txfifo_stall");
2530 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2531 NULL, xname, "txdw");
2532 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2533 NULL, xname, "txqe");
2534 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2535 NULL, xname, "rxintr");
2536 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2537 NULL, xname, "linkintr");
2538
2539 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2540 NULL, xname, "rxipsum");
2541 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2542 NULL, xname, "rxtusum");
2543 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2544 NULL, xname, "txipsum");
2545 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2546 NULL, xname, "txtusum");
2547 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2548 NULL, xname, "txtusum6");
2549
2550 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2551 NULL, xname, "txtso");
2552 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2553 NULL, xname, "txtso6");
2554 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2555 NULL, xname, "txtsopain");
2556
2557 for (i = 0; i < WM_NTXSEGS; i++) {
2558 snprintf(wm_txseg_evcnt_names[i],
2559 sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2560 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2561 NULL, xname, wm_txseg_evcnt_names[i]);
2562 }
2563
2564 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2565 NULL, xname, "txdrop");
2566
2567 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2568 NULL, xname, "tu");
2569
2570 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2571 NULL, xname, "tx_xoff");
2572 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2573 NULL, xname, "tx_xon");
2574 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2575 NULL, xname, "rx_xoff");
2576 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2577 NULL, xname, "rx_xon");
2578 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2579 NULL, xname, "rx_macctl");
2580 #endif /* WM_EVENT_COUNTERS */
2581
2582 if (pmf_device_register(self, wm_suspend, wm_resume))
2583 pmf_class_network_register(self, ifp);
2584 else
2585 aprint_error_dev(self, "couldn't establish power handler\n");
2586
2587 sc->sc_flags |= WM_F_ATTACHED;
2588 out:
2589 return;
2590 }
2591
2592 /* The detach function (ca_detach) */
2593 static int
2594 wm_detach(device_t self, int flags __unused)
2595 {
2596 struct wm_softc *sc = device_private(self);
2597 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2598 int i;
2599
2600 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2601 return 0;
2602
2603 /* Stop the interface. Callouts are stopped in it. */
2604 wm_stop(ifp, 1);
2605
2606 pmf_device_deregister(self);
2607
2608 /* Tell the firmware about the release */
2609 WM_CORE_LOCK(sc);
2610 wm_release_manageability(sc);
2611 wm_release_hw_control(sc);
2612 WM_CORE_UNLOCK(sc);
2613
2614 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2615
2616 /* Delete all remaining media. */
2617 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2618
2619 ether_ifdetach(ifp);
2620 if_detach(ifp);
2621 if_percpuq_destroy(sc->sc_ipq);
2622
2623 /* Unload RX dmamaps and free mbufs */
2624 for (i = 0; i < sc->sc_nqueues; i++) {
2625 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2626 mutex_enter(rxq->rxq_lock);
2627 wm_rxdrain(rxq);
2628 mutex_exit(rxq->rxq_lock);
2629 }
2630 /* Must unlock here */
2631
2632 /* Disestablish the interrupt handler */
2633 for (i = 0; i < sc->sc_nintrs; i++) {
2634 if (sc->sc_ihs[i] != NULL) {
2635 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2636 sc->sc_ihs[i] = NULL;
2637 }
2638 }
2639 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2640
2641 wm_free_txrx_queues(sc);
2642
2643 /* Unmap the registers */
2644 if (sc->sc_ss) {
2645 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2646 sc->sc_ss = 0;
2647 }
2648 if (sc->sc_ios) {
2649 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2650 sc->sc_ios = 0;
2651 }
2652 if (sc->sc_flashs) {
2653 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2654 sc->sc_flashs = 0;
2655 }
2656
2657 if (sc->sc_core_lock)
2658 mutex_obj_free(sc->sc_core_lock);
2659
2660 return 0;
2661 }
2662
2663 static bool
2664 wm_suspend(device_t self, const pmf_qual_t *qual)
2665 {
2666 struct wm_softc *sc = device_private(self);
2667
2668 wm_release_manageability(sc);
2669 wm_release_hw_control(sc);
2670 #ifdef WM_WOL
2671 wm_enable_wakeup(sc);
2672 #endif
2673
2674 return true;
2675 }
2676
2677 static bool
2678 wm_resume(device_t self, const pmf_qual_t *qual)
2679 {
2680 struct wm_softc *sc = device_private(self);
2681
2682 wm_init_manageability(sc);
2683
2684 return true;
2685 }
2686
2687 /*
2688 * wm_watchdog: [ifnet interface function]
2689 *
2690 * Watchdog timer handler.
2691 */
2692 static void
2693 wm_watchdog(struct ifnet *ifp)
2694 {
2695 int qid;
2696 struct wm_softc *sc = ifp->if_softc;
2697
2698 for (qid = 0; qid < sc->sc_nqueues; qid++) {
2699 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2700
2701 wm_watchdog_txq(ifp, txq);
2702 }
2703
2704 /* Reset the interface. */
2705 (void) wm_init(ifp);
2706
2707 /*
2708 * There are still some upper layer processing which call
2709 * ifp->if_start(). e.g. ALTQ
2710 */
2711 /* Try to get more packets going. */
2712 ifp->if_start(ifp);
2713 }
2714
2715 static void
2716 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2717 {
2718 struct wm_softc *sc = ifp->if_softc;
2719
2720 /*
2721 * Since we're using delayed interrupts, sweep up
2722 * before we report an error.
2723 */
2724 mutex_enter(txq->txq_lock);
2725 wm_txeof(sc, txq);
2726 mutex_exit(txq->txq_lock);
2727
2728 if (txq->txq_free != WM_NTXDESC(txq)) {
2729 #ifdef WM_DEBUG
2730 int i, j;
2731 struct wm_txsoft *txs;
2732 #endif
2733 log(LOG_ERR,
2734 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2735 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2736 txq->txq_next);
2737 ifp->if_oerrors++;
2738 #ifdef WM_DEBUG
2739 for (i = txq->txq_sdirty; i != txq->txq_snext ;
2740 i = WM_NEXTTXS(txq, i)) {
2741 txs = &txq->txq_soft[i];
2742 printf("txs %d tx %d -> %d\n",
2743 i, txs->txs_firstdesc, txs->txs_lastdesc);
2744 for (j = txs->txs_firstdesc; ;
2745 j = WM_NEXTTX(txq, j)) {
2746 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2747 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2748 printf("\t %#08x%08x\n",
2749 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2750 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2751 if (j == txs->txs_lastdesc)
2752 break;
2753 }
2754 }
2755 #endif
2756 }
2757 }
2758
2759 /*
2760 * wm_tick:
2761 *
2762 * One second timer, used to check link status, sweep up
2763 * completed transmit jobs, etc.
2764 */
2765 static void
2766 wm_tick(void *arg)
2767 {
2768 struct wm_softc *sc = arg;
2769 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2770 #ifndef WM_MPSAFE
2771 int s = splnet();
2772 #endif
2773
2774 WM_CORE_LOCK(sc);
2775
2776 if (sc->sc_stopping)
2777 goto out;
2778
2779 if (sc->sc_type >= WM_T_82542_2_1) {
2780 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2781 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2782 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2783 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2784 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2785 }
2786
2787 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2788 ifp->if_ierrors += 0ULL + /* ensure quad_t */
2789 + CSR_READ(sc, WMREG_CRCERRS)
2790 + CSR_READ(sc, WMREG_ALGNERRC)
2791 + CSR_READ(sc, WMREG_SYMERRC)
2792 + CSR_READ(sc, WMREG_RXERRC)
2793 + CSR_READ(sc, WMREG_SEC)
2794 + CSR_READ(sc, WMREG_CEXTERR)
2795 + CSR_READ(sc, WMREG_RLEC);
2796 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2797
2798 if (sc->sc_flags & WM_F_HAS_MII)
2799 mii_tick(&sc->sc_mii);
2800 else if ((sc->sc_type >= WM_T_82575)
2801 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2802 wm_serdes_tick(sc);
2803 else
2804 wm_tbi_tick(sc);
2805
2806 out:
2807 WM_CORE_UNLOCK(sc);
2808 #ifndef WM_MPSAFE
2809 splx(s);
2810 #endif
2811
2812 if (!sc->sc_stopping)
2813 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2814 }
2815
2816 static int
2817 wm_ifflags_cb(struct ethercom *ec)
2818 {
2819 struct ifnet *ifp = &ec->ec_if;
2820 struct wm_softc *sc = ifp->if_softc;
2821 int change = ifp->if_flags ^ sc->sc_if_flags;
2822 int rc = 0;
2823
2824 WM_CORE_LOCK(sc);
2825
2826 if (change != 0)
2827 sc->sc_if_flags = ifp->if_flags;
2828
2829 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2830 rc = ENETRESET;
2831 goto out;
2832 }
2833
2834 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2835 wm_set_filter(sc);
2836
2837 wm_set_vlan(sc);
2838
2839 out:
2840 WM_CORE_UNLOCK(sc);
2841
2842 return rc;
2843 }
2844
2845 /*
2846 * wm_ioctl: [ifnet interface function]
2847 *
2848 * Handle control requests from the operator.
2849 */
2850 static int
2851 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2852 {
2853 struct wm_softc *sc = ifp->if_softc;
2854 struct ifreq *ifr = (struct ifreq *) data;
2855 struct ifaddr *ifa = (struct ifaddr *)data;
2856 struct sockaddr_dl *sdl;
2857 int s, error;
2858
2859 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2860 device_xname(sc->sc_dev), __func__));
2861 #ifndef WM_MPSAFE
2862 s = splnet();
2863 #endif
2864 switch (cmd) {
2865 case SIOCSIFMEDIA:
2866 case SIOCGIFMEDIA:
2867 WM_CORE_LOCK(sc);
2868 /* Flow control requires full-duplex mode. */
2869 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2870 (ifr->ifr_media & IFM_FDX) == 0)
2871 ifr->ifr_media &= ~IFM_ETH_FMASK;
2872 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2873 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2874 /* We can do both TXPAUSE and RXPAUSE. */
2875 ifr->ifr_media |=
2876 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2877 }
2878 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2879 }
2880 WM_CORE_UNLOCK(sc);
2881 #ifdef WM_MPSAFE
2882 s = splnet();
2883 #endif
2884 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2885 #ifdef WM_MPSAFE
2886 splx(s);
2887 #endif
2888 break;
2889 case SIOCINITIFADDR:
2890 WM_CORE_LOCK(sc);
2891 if (ifa->ifa_addr->sa_family == AF_LINK) {
2892 sdl = satosdl(ifp->if_dl->ifa_addr);
2893 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2894 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2895 /* unicast address is first multicast entry */
2896 wm_set_filter(sc);
2897 error = 0;
2898 WM_CORE_UNLOCK(sc);
2899 break;
2900 }
2901 WM_CORE_UNLOCK(sc);
2902 /*FALLTHROUGH*/
2903 default:
2904 #ifdef WM_MPSAFE
2905 s = splnet();
2906 #endif
2907 /* It may call wm_start, so unlock here */
2908 error = ether_ioctl(ifp, cmd, data);
2909 #ifdef WM_MPSAFE
2910 splx(s);
2911 #endif
2912 if (error != ENETRESET)
2913 break;
2914
2915 error = 0;
2916
2917 if (cmd == SIOCSIFCAP) {
2918 error = (*ifp->if_init)(ifp);
2919 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2920 ;
2921 else if (ifp->if_flags & IFF_RUNNING) {
2922 /*
2923 * Multicast list has changed; set the hardware filter
2924 * accordingly.
2925 */
2926 WM_CORE_LOCK(sc);
2927 wm_set_filter(sc);
2928 WM_CORE_UNLOCK(sc);
2929 }
2930 break;
2931 }
2932
2933 #ifndef WM_MPSAFE
2934 splx(s);
2935 #endif
2936 return error;
2937 }
2938
2939 /* MAC address related */
2940
2941 /*
2942 * Get the offset of MAC address and return it.
2943 * If error occured, use offset 0.
2944 */
2945 static uint16_t
2946 wm_check_alt_mac_addr(struct wm_softc *sc)
2947 {
2948 uint16_t myea[ETHER_ADDR_LEN / 2];
2949 uint16_t offset = NVM_OFF_MACADDR;
2950
2951 /* Try to read alternative MAC address pointer */
2952 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2953 return 0;
2954
2955 /* Check pointer if it's valid or not. */
2956 if ((offset == 0x0000) || (offset == 0xffff))
2957 return 0;
2958
2959 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2960 /*
2961 * Check whether alternative MAC address is valid or not.
2962 * Some cards have non 0xffff pointer but those don't use
2963 * alternative MAC address in reality.
2964 *
2965 * Check whether the broadcast bit is set or not.
2966 */
2967 if (wm_nvm_read(sc, offset, 1, myea) == 0)
2968 if (((myea[0] & 0xff) & 0x01) == 0)
2969 return offset; /* Found */
2970
2971 /* Not found */
2972 return 0;
2973 }
2974
2975 static int
2976 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2977 {
2978 uint16_t myea[ETHER_ADDR_LEN / 2];
2979 uint16_t offset = NVM_OFF_MACADDR;
2980 int do_invert = 0;
2981
2982 switch (sc->sc_type) {
2983 case WM_T_82580:
2984 case WM_T_I350:
2985 case WM_T_I354:
2986 /* EEPROM Top Level Partitioning */
2987 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2988 break;
2989 case WM_T_82571:
2990 case WM_T_82575:
2991 case WM_T_82576:
2992 case WM_T_80003:
2993 case WM_T_I210:
2994 case WM_T_I211:
2995 offset = wm_check_alt_mac_addr(sc);
2996 if (offset == 0)
2997 if ((sc->sc_funcid & 0x01) == 1)
2998 do_invert = 1;
2999 break;
3000 default:
3001 if ((sc->sc_funcid & 0x01) == 1)
3002 do_invert = 1;
3003 break;
3004 }
3005
3006 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3007 myea) != 0)
3008 goto bad;
3009
3010 enaddr[0] = myea[0] & 0xff;
3011 enaddr[1] = myea[0] >> 8;
3012 enaddr[2] = myea[1] & 0xff;
3013 enaddr[3] = myea[1] >> 8;
3014 enaddr[4] = myea[2] & 0xff;
3015 enaddr[5] = myea[2] >> 8;
3016
3017 /*
3018 * Toggle the LSB of the MAC address on the second port
3019 * of some dual port cards.
3020 */
3021 if (do_invert != 0)
3022 enaddr[5] ^= 1;
3023
3024 return 0;
3025
3026 bad:
3027 return -1;
3028 }
3029
3030 /*
3031 * wm_set_ral:
3032 *
3033 * Set an entery in the receive address list.
3034 */
3035 static void
3036 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3037 {
3038 uint32_t ral_lo, ral_hi;
3039
3040 if (enaddr != NULL) {
3041 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3042 (enaddr[3] << 24);
3043 ral_hi = enaddr[4] | (enaddr[5] << 8);
3044 ral_hi |= RAL_AV;
3045 } else {
3046 ral_lo = 0;
3047 ral_hi = 0;
3048 }
3049
3050 if (sc->sc_type >= WM_T_82544) {
3051 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3052 ral_lo);
3053 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3054 ral_hi);
3055 } else {
3056 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3057 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3058 }
3059 }
3060
3061 /*
3062 * wm_mchash:
3063 *
3064 * Compute the hash of the multicast address for the 4096-bit
3065 * multicast filter.
3066 */
3067 static uint32_t
3068 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3069 {
3070 static const int lo_shift[4] = { 4, 3, 2, 0 };
3071 static const int hi_shift[4] = { 4, 5, 6, 8 };
3072 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3073 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3074 uint32_t hash;
3075
3076 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3077 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3078 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3079 || (sc->sc_type == WM_T_PCH_SPT)) {
3080 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3081 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3082 return (hash & 0x3ff);
3083 }
3084 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3085 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3086
3087 return (hash & 0xfff);
3088 }
3089
3090 /*
3091 * wm_set_filter:
3092 *
3093 * Set up the receive filter.
3094 */
3095 static void
3096 wm_set_filter(struct wm_softc *sc)
3097 {
3098 struct ethercom *ec = &sc->sc_ethercom;
3099 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3100 struct ether_multi *enm;
3101 struct ether_multistep step;
3102 bus_addr_t mta_reg;
3103 uint32_t hash, reg, bit;
3104 int i, size, ralmax;
3105
3106 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3107 device_xname(sc->sc_dev), __func__));
3108 if (sc->sc_type >= WM_T_82544)
3109 mta_reg = WMREG_CORDOVA_MTA;
3110 else
3111 mta_reg = WMREG_MTA;
3112
3113 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3114
3115 if (ifp->if_flags & IFF_BROADCAST)
3116 sc->sc_rctl |= RCTL_BAM;
3117 if (ifp->if_flags & IFF_PROMISC) {
3118 sc->sc_rctl |= RCTL_UPE;
3119 goto allmulti;
3120 }
3121
3122 /*
3123 * Set the station address in the first RAL slot, and
3124 * clear the remaining slots.
3125 */
3126 if (sc->sc_type == WM_T_ICH8)
3127 size = WM_RAL_TABSIZE_ICH8 -1;
3128 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3129 || (sc->sc_type == WM_T_PCH))
3130 size = WM_RAL_TABSIZE_ICH8;
3131 else if (sc->sc_type == WM_T_PCH2)
3132 size = WM_RAL_TABSIZE_PCH2;
3133 else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3134 size = WM_RAL_TABSIZE_PCH_LPT;
3135 else if (sc->sc_type == WM_T_82575)
3136 size = WM_RAL_TABSIZE_82575;
3137 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3138 size = WM_RAL_TABSIZE_82576;
3139 else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3140 size = WM_RAL_TABSIZE_I350;
3141 else
3142 size = WM_RAL_TABSIZE;
3143 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3144
3145 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3146 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3147 switch (i) {
3148 case 0:
3149 /* We can use all entries */
3150 ralmax = size;
3151 break;
3152 case 1:
3153 /* Only RAR[0] */
3154 ralmax = 1;
3155 break;
3156 default:
3157 /* available SHRA + RAR[0] */
3158 ralmax = i + 1;
3159 }
3160 } else
3161 ralmax = size;
3162 for (i = 1; i < size; i++) {
3163 if (i < ralmax)
3164 wm_set_ral(sc, NULL, i);
3165 }
3166
3167 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3168 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3169 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3170 || (sc->sc_type == WM_T_PCH_SPT))
3171 size = WM_ICH8_MC_TABSIZE;
3172 else
3173 size = WM_MC_TABSIZE;
3174 /* Clear out the multicast table. */
3175 for (i = 0; i < size; i++)
3176 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3177
3178 ETHER_FIRST_MULTI(step, ec, enm);
3179 while (enm != NULL) {
3180 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3181 /*
3182 * We must listen to a range of multicast addresses.
3183 * For now, just accept all multicasts, rather than
3184 * trying to set only those filter bits needed to match
3185 * the range. (At this time, the only use of address
3186 * ranges is for IP multicast routing, for which the
3187 * range is big enough to require all bits set.)
3188 */
3189 goto allmulti;
3190 }
3191
3192 hash = wm_mchash(sc, enm->enm_addrlo);
3193
3194 reg = (hash >> 5);
3195 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3196 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3197 || (sc->sc_type == WM_T_PCH2)
3198 || (sc->sc_type == WM_T_PCH_LPT)
3199 || (sc->sc_type == WM_T_PCH_SPT))
3200 reg &= 0x1f;
3201 else
3202 reg &= 0x7f;
3203 bit = hash & 0x1f;
3204
3205 hash = CSR_READ(sc, mta_reg + (reg << 2));
3206 hash |= 1U << bit;
3207
3208 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3209 /*
3210 * 82544 Errata 9: Certain register cannot be written
3211 * with particular alignments in PCI-X bus operation
3212 * (FCAH, MTA and VFTA).
3213 */
3214 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3215 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3216 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3217 } else
3218 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3219
3220 ETHER_NEXT_MULTI(step, enm);
3221 }
3222
3223 ifp->if_flags &= ~IFF_ALLMULTI;
3224 goto setit;
3225
3226 allmulti:
3227 ifp->if_flags |= IFF_ALLMULTI;
3228 sc->sc_rctl |= RCTL_MPE;
3229
3230 setit:
3231 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3232 }
3233
3234 /* Reset and init related */
3235
3236 static void
3237 wm_set_vlan(struct wm_softc *sc)
3238 {
3239
3240 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3241 device_xname(sc->sc_dev), __func__));
3242 /* Deal with VLAN enables. */
3243 if (VLAN_ATTACHED(&sc->sc_ethercom))
3244 sc->sc_ctrl |= CTRL_VME;
3245 else
3246 sc->sc_ctrl &= ~CTRL_VME;
3247
3248 /* Write the control registers. */
3249 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3250 }
3251
3252 static void
3253 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3254 {
3255 uint32_t gcr;
3256 pcireg_t ctrl2;
3257
3258 gcr = CSR_READ(sc, WMREG_GCR);
3259
3260 /* Only take action if timeout value is defaulted to 0 */
3261 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3262 goto out;
3263
3264 if ((gcr & GCR_CAP_VER2) == 0) {
3265 gcr |= GCR_CMPL_TMOUT_10MS;
3266 goto out;
3267 }
3268
3269 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3270 sc->sc_pcixe_capoff + PCIE_DCSR2);
3271 ctrl2 |= WM_PCIE_DCSR2_16MS;
3272 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3273 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3274
3275 out:
3276 /* Disable completion timeout resend */
3277 gcr &= ~GCR_CMPL_TMOUT_RESEND;
3278
3279 CSR_WRITE(sc, WMREG_GCR, gcr);
3280 }
3281
3282 void
3283 wm_get_auto_rd_done(struct wm_softc *sc)
3284 {
3285 int i;
3286
3287 /* wait for eeprom to reload */
3288 switch (sc->sc_type) {
3289 case WM_T_82571:
3290 case WM_T_82572:
3291 case WM_T_82573:
3292 case WM_T_82574:
3293 case WM_T_82583:
3294 case WM_T_82575:
3295 case WM_T_82576:
3296 case WM_T_82580:
3297 case WM_T_I350:
3298 case WM_T_I354:
3299 case WM_T_I210:
3300 case WM_T_I211:
3301 case WM_T_80003:
3302 case WM_T_ICH8:
3303 case WM_T_ICH9:
3304 for (i = 0; i < 10; i++) {
3305 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3306 break;
3307 delay(1000);
3308 }
3309 if (i == 10) {
3310 log(LOG_ERR, "%s: auto read from eeprom failed to "
3311 "complete\n", device_xname(sc->sc_dev));
3312 }
3313 break;
3314 default:
3315 break;
3316 }
3317 }
3318
3319 void
3320 wm_lan_init_done(struct wm_softc *sc)
3321 {
3322 uint32_t reg = 0;
3323 int i;
3324
3325 /* wait for eeprom to reload */
3326 switch (sc->sc_type) {
3327 case WM_T_ICH10:
3328 case WM_T_PCH:
3329 case WM_T_PCH2:
3330 case WM_T_PCH_LPT:
3331 case WM_T_PCH_SPT:
3332 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3333 reg = CSR_READ(sc, WMREG_STATUS);
3334 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3335 break;
3336 delay(100);
3337 }
3338 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3339 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3340 "complete\n", device_xname(sc->sc_dev), __func__);
3341 }
3342 break;
3343 default:
3344 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3345 __func__);
3346 break;
3347 }
3348
3349 reg &= ~STATUS_LAN_INIT_DONE;
3350 CSR_WRITE(sc, WMREG_STATUS, reg);
3351 }
3352
3353 void
3354 wm_get_cfg_done(struct wm_softc *sc)
3355 {
3356 int mask;
3357 uint32_t reg;
3358 int i;
3359
3360 /* wait for eeprom to reload */
3361 switch (sc->sc_type) {
3362 case WM_T_82542_2_0:
3363 case WM_T_82542_2_1:
3364 /* null */
3365 break;
3366 case WM_T_82543:
3367 case WM_T_82544:
3368 case WM_T_82540:
3369 case WM_T_82545:
3370 case WM_T_82545_3:
3371 case WM_T_82546:
3372 case WM_T_82546_3:
3373 case WM_T_82541:
3374 case WM_T_82541_2:
3375 case WM_T_82547:
3376 case WM_T_82547_2:
3377 case WM_T_82573:
3378 case WM_T_82574:
3379 case WM_T_82583:
3380 /* generic */
3381 delay(10*1000);
3382 break;
3383 case WM_T_80003:
3384 case WM_T_82571:
3385 case WM_T_82572:
3386 case WM_T_82575:
3387 case WM_T_82576:
3388 case WM_T_82580:
3389 case WM_T_I350:
3390 case WM_T_I354:
3391 case WM_T_I210:
3392 case WM_T_I211:
3393 if (sc->sc_type == WM_T_82571) {
3394 /* Only 82571 shares port 0 */
3395 mask = EEMNGCTL_CFGDONE_0;
3396 } else
3397 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3398 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3399 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3400 break;
3401 delay(1000);
3402 }
3403 if (i >= WM_PHY_CFG_TIMEOUT) {
3404 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3405 device_xname(sc->sc_dev), __func__));
3406 }
3407 break;
3408 case WM_T_ICH8:
3409 case WM_T_ICH9:
3410 case WM_T_ICH10:
3411 case WM_T_PCH:
3412 case WM_T_PCH2:
3413 case WM_T_PCH_LPT:
3414 case WM_T_PCH_SPT:
3415 delay(10*1000);
3416 if (sc->sc_type >= WM_T_ICH10)
3417 wm_lan_init_done(sc);
3418 else
3419 wm_get_auto_rd_done(sc);
3420
3421 reg = CSR_READ(sc, WMREG_STATUS);
3422 if ((reg & STATUS_PHYRA) != 0)
3423 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3424 break;
3425 default:
3426 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3427 __func__);
3428 break;
3429 }
3430 }
3431
3432 /* Init hardware bits */
3433 void
3434 wm_initialize_hardware_bits(struct wm_softc *sc)
3435 {
3436 uint32_t tarc0, tarc1, reg;
3437
3438 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3439 device_xname(sc->sc_dev), __func__));
3440 /* For 82571 variant, 80003 and ICHs */
3441 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3442 || (sc->sc_type >= WM_T_80003)) {
3443
3444 /* Transmit Descriptor Control 0 */
3445 reg = CSR_READ(sc, WMREG_TXDCTL(0));
3446 reg |= TXDCTL_COUNT_DESC;
3447 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3448
3449 /* Transmit Descriptor Control 1 */
3450 reg = CSR_READ(sc, WMREG_TXDCTL(1));
3451 reg |= TXDCTL_COUNT_DESC;
3452 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3453
3454 /* TARC0 */
3455 tarc0 = CSR_READ(sc, WMREG_TARC0);
3456 switch (sc->sc_type) {
3457 case WM_T_82571:
3458 case WM_T_82572:
3459 case WM_T_82573:
3460 case WM_T_82574:
3461 case WM_T_82583:
3462 case WM_T_80003:
3463 /* Clear bits 30..27 */
3464 tarc0 &= ~__BITS(30, 27);
3465 break;
3466 default:
3467 break;
3468 }
3469
3470 switch (sc->sc_type) {
3471 case WM_T_82571:
3472 case WM_T_82572:
3473 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3474
3475 tarc1 = CSR_READ(sc, WMREG_TARC1);
3476 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3477 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3478 /* 8257[12] Errata No.7 */
3479 tarc1 |= __BIT(22); /* TARC1 bits 22 */
3480
3481 /* TARC1 bit 28 */
3482 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3483 tarc1 &= ~__BIT(28);
3484 else
3485 tarc1 |= __BIT(28);
3486 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3487
3488 /*
3489 * 8257[12] Errata No.13
3490 * Disable Dyamic Clock Gating.
3491 */
3492 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3493 reg &= ~CTRL_EXT_DMA_DYN_CLK;
3494 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3495 break;
3496 case WM_T_82573:
3497 case WM_T_82574:
3498 case WM_T_82583:
3499 if ((sc->sc_type == WM_T_82574)
3500 || (sc->sc_type == WM_T_82583))
3501 tarc0 |= __BIT(26); /* TARC0 bit 26 */
3502
3503 /* Extended Device Control */
3504 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3505 reg &= ~__BIT(23); /* Clear bit 23 */
3506 reg |= __BIT(22); /* Set bit 22 */
3507 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3508
3509 /* Device Control */
3510 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
3511 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3512
3513 /* PCIe Control Register */
3514 /*
3515 * 82573 Errata (unknown).
3516 *
3517 * 82574 Errata 25 and 82583 Errata 12
3518 * "Dropped Rx Packets":
3519 * NVM Image Version 2.1.4 and newer has no this bug.
3520 */
3521 reg = CSR_READ(sc, WMREG_GCR);
3522 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3523 CSR_WRITE(sc, WMREG_GCR, reg);
3524
3525 if ((sc->sc_type == WM_T_82574)
3526 || (sc->sc_type == WM_T_82583)) {
3527 /*
3528 * Document says this bit must be set for
3529 * proper operation.
3530 */
3531 reg = CSR_READ(sc, WMREG_GCR);
3532 reg |= __BIT(22);
3533 CSR_WRITE(sc, WMREG_GCR, reg);
3534
3535 /*
3536 * Apply workaround for hardware errata
3537 * documented in errata docs Fixes issue where
3538 * some error prone or unreliable PCIe
3539 * completions are occurring, particularly
3540 * with ASPM enabled. Without fix, issue can
3541 * cause Tx timeouts.
3542 */
3543 reg = CSR_READ(sc, WMREG_GCR2);
3544 reg |= __BIT(0);
3545 CSR_WRITE(sc, WMREG_GCR2, reg);
3546 }
3547 break;
3548 case WM_T_80003:
3549 /* TARC0 */
3550 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3551 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3552 tarc0 &= ~__BIT(20); /* Clear bits 20 */
3553
3554 /* TARC1 bit 28 */
3555 tarc1 = CSR_READ(sc, WMREG_TARC1);
3556 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3557 tarc1 &= ~__BIT(28);
3558 else
3559 tarc1 |= __BIT(28);
3560 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3561 break;
3562 case WM_T_ICH8:
3563 case WM_T_ICH9:
3564 case WM_T_ICH10:
3565 case WM_T_PCH:
3566 case WM_T_PCH2:
3567 case WM_T_PCH_LPT:
3568 case WM_T_PCH_SPT:
3569 /* TARC0 */
3570 if ((sc->sc_type == WM_T_ICH8)
3571 || (sc->sc_type == WM_T_PCH_SPT)) {
3572 /* Set TARC0 bits 29 and 28 */
3573 tarc0 |= __BITS(29, 28);
3574 }
3575 /* Set TARC0 bits 23,24,26,27 */
3576 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3577
3578 /* CTRL_EXT */
3579 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3580 reg |= __BIT(22); /* Set bit 22 */
3581 /*
3582 * Enable PHY low-power state when MAC is at D3
3583 * w/o WoL
3584 */
3585 if (sc->sc_type >= WM_T_PCH)
3586 reg |= CTRL_EXT_PHYPDEN;
3587 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3588
3589 /* TARC1 */
3590 tarc1 = CSR_READ(sc, WMREG_TARC1);
3591 /* bit 28 */
3592 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3593 tarc1 &= ~__BIT(28);
3594 else
3595 tarc1 |= __BIT(28);
3596 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3597 CSR_WRITE(sc, WMREG_TARC1, tarc1);
3598
3599 /* Device Status */
3600 if (sc->sc_type == WM_T_ICH8) {
3601 reg = CSR_READ(sc, WMREG_STATUS);
3602 reg &= ~__BIT(31);
3603 CSR_WRITE(sc, WMREG_STATUS, reg);
3604
3605 }
3606
3607 /* IOSFPC */
3608 if (sc->sc_type == WM_T_PCH_SPT) {
3609 reg = CSR_READ(sc, WMREG_IOSFPC);
3610 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3611 CSR_WRITE(sc, WMREG_IOSFPC, reg);
3612 }
3613 /*
3614 * Work-around descriptor data corruption issue during
3615 * NFS v2 UDP traffic, just disable the NFS filtering
3616 * capability.
3617 */
3618 reg = CSR_READ(sc, WMREG_RFCTL);
3619 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3620 CSR_WRITE(sc, WMREG_RFCTL, reg);
3621 break;
3622 default:
3623 break;
3624 }
3625 CSR_WRITE(sc, WMREG_TARC0, tarc0);
3626
3627 /*
3628 * 8257[12] Errata No.52 and some others.
3629 * Avoid RSS Hash Value bug.
3630 */
3631 switch (sc->sc_type) {
3632 case WM_T_82571:
3633 case WM_T_82572:
3634 case WM_T_82573:
3635 case WM_T_80003:
3636 case WM_T_ICH8:
3637 reg = CSR_READ(sc, WMREG_RFCTL);
3638 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3639 CSR_WRITE(sc, WMREG_RFCTL, reg);
3640 break;
3641 default:
3642 break;
3643 }
3644 }
3645 }
3646
3647 static uint32_t
3648 wm_rxpbs_adjust_82580(uint32_t val)
3649 {
3650 uint32_t rv = 0;
3651
3652 if (val < __arraycount(wm_82580_rxpbs_table))
3653 rv = wm_82580_rxpbs_table[val];
3654
3655 return rv;
3656 }
3657
3658 /*
3659 * wm_reset:
3660 *
3661 * Reset the i82542 chip.
3662 */
3663 static void
3664 wm_reset(struct wm_softc *sc)
3665 {
3666 int phy_reset = 0;
3667 int i, error = 0;
3668 uint32_t reg, mask;
3669
3670 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3671 device_xname(sc->sc_dev), __func__));
3672 /*
3673 * Allocate on-chip memory according to the MTU size.
3674 * The Packet Buffer Allocation register must be written
3675 * before the chip is reset.
3676 */
3677 switch (sc->sc_type) {
3678 case WM_T_82547:
3679 case WM_T_82547_2:
3680 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3681 PBA_22K : PBA_30K;
3682 for (i = 0; i < sc->sc_nqueues; i++) {
3683 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3684 txq->txq_fifo_head = 0;
3685 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3686 txq->txq_fifo_size =
3687 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3688 txq->txq_fifo_stall = 0;
3689 }
3690 break;
3691 case WM_T_82571:
3692 case WM_T_82572:
3693 case WM_T_82575: /* XXX need special handing for jumbo frames */
3694 case WM_T_80003:
3695 sc->sc_pba = PBA_32K;
3696 break;
3697 case WM_T_82573:
3698 sc->sc_pba = PBA_12K;
3699 break;
3700 case WM_T_82574:
3701 case WM_T_82583:
3702 sc->sc_pba = PBA_20K;
3703 break;
3704 case WM_T_82576:
3705 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3706 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3707 break;
3708 case WM_T_82580:
3709 case WM_T_I350:
3710 case WM_T_I354:
3711 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3712 break;
3713 case WM_T_I210:
3714 case WM_T_I211:
3715 sc->sc_pba = PBA_34K;
3716 break;
3717 case WM_T_ICH8:
3718 /* Workaround for a bit corruption issue in FIFO memory */
3719 sc->sc_pba = PBA_8K;
3720 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3721 break;
3722 case WM_T_ICH9:
3723 case WM_T_ICH10:
3724 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3725 PBA_14K : PBA_10K;
3726 break;
3727 case WM_T_PCH:
3728 case WM_T_PCH2:
3729 case WM_T_PCH_LPT:
3730 case WM_T_PCH_SPT:
3731 sc->sc_pba = PBA_26K;
3732 break;
3733 default:
3734 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3735 PBA_40K : PBA_48K;
3736 break;
3737 }
3738 /*
3739 * Only old or non-multiqueue devices have the PBA register
3740 * XXX Need special handling for 82575.
3741 */
3742 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3743 || (sc->sc_type == WM_T_82575))
3744 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3745
3746 /* Prevent the PCI-E bus from sticking */
3747 if (sc->sc_flags & WM_F_PCIE) {
3748 int timeout = 800;
3749
3750 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3751 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3752
3753 while (timeout--) {
3754 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3755 == 0)
3756 break;
3757 delay(100);
3758 }
3759 }
3760
3761 /* Set the completion timeout for interface */
3762 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3763 || (sc->sc_type == WM_T_82580)
3764 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3765 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3766 wm_set_pcie_completion_timeout(sc);
3767
3768 /* Clear interrupt */
3769 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3770 if (sc->sc_nintrs > 1) {
3771 if (sc->sc_type != WM_T_82574) {
3772 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3773 CSR_WRITE(sc, WMREG_EIAC, 0);
3774 } else {
3775 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3776 }
3777 }
3778
3779 /* Stop the transmit and receive processes. */
3780 CSR_WRITE(sc, WMREG_RCTL, 0);
3781 sc->sc_rctl &= ~RCTL_EN;
3782 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3783 CSR_WRITE_FLUSH(sc);
3784
3785 /* XXX set_tbi_sbp_82543() */
3786
3787 delay(10*1000);
3788
3789 /* Must acquire the MDIO ownership before MAC reset */
3790 switch (sc->sc_type) {
3791 case WM_T_82573:
3792 case WM_T_82574:
3793 case WM_T_82583:
3794 error = wm_get_hw_semaphore_82573(sc);
3795 break;
3796 default:
3797 break;
3798 }
3799
3800 /*
3801 * 82541 Errata 29? & 82547 Errata 28?
3802 * See also the description about PHY_RST bit in CTRL register
3803 * in 8254x_GBe_SDM.pdf.
3804 */
3805 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3806 CSR_WRITE(sc, WMREG_CTRL,
3807 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3808 CSR_WRITE_FLUSH(sc);
3809 delay(5000);
3810 }
3811
3812 switch (sc->sc_type) {
3813 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3814 case WM_T_82541:
3815 case WM_T_82541_2:
3816 case WM_T_82547:
3817 case WM_T_82547_2:
3818 /*
3819 * On some chipsets, a reset through a memory-mapped write
3820 * cycle can cause the chip to reset before completing the
3821 * write cycle. This causes major headache that can be
3822 * avoided by issuing the reset via indirect register writes
3823 * through I/O space.
3824 *
3825 * So, if we successfully mapped the I/O BAR at attach time,
3826 * use that. Otherwise, try our luck with a memory-mapped
3827 * reset.
3828 */
3829 if (sc->sc_flags & WM_F_IOH_VALID)
3830 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3831 else
3832 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3833 break;
3834 case WM_T_82545_3:
3835 case WM_T_82546_3:
3836 /* Use the shadow control register on these chips. */
3837 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3838 break;
3839 case WM_T_80003:
3840 mask = swfwphysem[sc->sc_funcid];
3841 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3842 wm_get_swfw_semaphore(sc, mask);
3843 CSR_WRITE(sc, WMREG_CTRL, reg);
3844 wm_put_swfw_semaphore(sc, mask);
3845 break;
3846 case WM_T_ICH8:
3847 case WM_T_ICH9:
3848 case WM_T_ICH10:
3849 case WM_T_PCH:
3850 case WM_T_PCH2:
3851 case WM_T_PCH_LPT:
3852 case WM_T_PCH_SPT:
3853 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3854 if (wm_phy_resetisblocked(sc) == false) {
3855 /*
3856 * Gate automatic PHY configuration by hardware on
3857 * non-managed 82579
3858 */
3859 if ((sc->sc_type == WM_T_PCH2)
3860 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3861 == 0))
3862 wm_gate_hw_phy_config_ich8lan(sc, true);
3863
3864 reg |= CTRL_PHY_RESET;
3865 phy_reset = 1;
3866 } else
3867 printf("XXX reset is blocked!!!\n");
3868 wm_get_swfwhw_semaphore(sc);
3869 CSR_WRITE(sc, WMREG_CTRL, reg);
3870 /* Don't insert a completion barrier when reset */
3871 delay(20*1000);
3872 wm_put_swfwhw_semaphore(sc);
3873 break;
3874 case WM_T_82580:
3875 case WM_T_I350:
3876 case WM_T_I354:
3877 case WM_T_I210:
3878 case WM_T_I211:
3879 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3880 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3881 CSR_WRITE_FLUSH(sc);
3882 delay(5000);
3883 break;
3884 case WM_T_82542_2_0:
3885 case WM_T_82542_2_1:
3886 case WM_T_82543:
3887 case WM_T_82540:
3888 case WM_T_82545:
3889 case WM_T_82546:
3890 case WM_T_82571:
3891 case WM_T_82572:
3892 case WM_T_82573:
3893 case WM_T_82574:
3894 case WM_T_82575:
3895 case WM_T_82576:
3896 case WM_T_82583:
3897 default:
3898 /* Everything else can safely use the documented method. */
3899 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3900 break;
3901 }
3902
3903 /* Must release the MDIO ownership after MAC reset */
3904 switch (sc->sc_type) {
3905 case WM_T_82573:
3906 case WM_T_82574:
3907 case WM_T_82583:
3908 if (error == 0)
3909 wm_put_hw_semaphore_82573(sc);
3910 break;
3911 default:
3912 break;
3913 }
3914
3915 if (phy_reset != 0)
3916 wm_get_cfg_done(sc);
3917
3918 /* reload EEPROM */
3919 switch (sc->sc_type) {
3920 case WM_T_82542_2_0:
3921 case WM_T_82542_2_1:
3922 case WM_T_82543:
3923 case WM_T_82544:
3924 delay(10);
3925 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3926 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3927 CSR_WRITE_FLUSH(sc);
3928 delay(2000);
3929 break;
3930 case WM_T_82540:
3931 case WM_T_82545:
3932 case WM_T_82545_3:
3933 case WM_T_82546:
3934 case WM_T_82546_3:
3935 delay(5*1000);
3936 /* XXX Disable HW ARPs on ASF enabled adapters */
3937 break;
3938 case WM_T_82541:
3939 case WM_T_82541_2:
3940 case WM_T_82547:
3941 case WM_T_82547_2:
3942 delay(20000);
3943 /* XXX Disable HW ARPs on ASF enabled adapters */
3944 break;
3945 case WM_T_82571:
3946 case WM_T_82572:
3947 case WM_T_82573:
3948 case WM_T_82574:
3949 case WM_T_82583:
3950 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3951 delay(10);
3952 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3953 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3954 CSR_WRITE_FLUSH(sc);
3955 }
3956 /* check EECD_EE_AUTORD */
3957 wm_get_auto_rd_done(sc);
3958 /*
3959 * Phy configuration from NVM just starts after EECD_AUTO_RD
3960 * is set.
3961 */
3962 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3963 || (sc->sc_type == WM_T_82583))
3964 delay(25*1000);
3965 break;
3966 case WM_T_82575:
3967 case WM_T_82576:
3968 case WM_T_82580:
3969 case WM_T_I350:
3970 case WM_T_I354:
3971 case WM_T_I210:
3972 case WM_T_I211:
3973 case WM_T_80003:
3974 /* check EECD_EE_AUTORD */
3975 wm_get_auto_rd_done(sc);
3976 break;
3977 case WM_T_ICH8:
3978 case WM_T_ICH9:
3979 case WM_T_ICH10:
3980 case WM_T_PCH:
3981 case WM_T_PCH2:
3982 case WM_T_PCH_LPT:
3983 case WM_T_PCH_SPT:
3984 break;
3985 default:
3986 panic("%s: unknown type\n", __func__);
3987 }
3988
3989 /* Check whether EEPROM is present or not */
3990 switch (sc->sc_type) {
3991 case WM_T_82575:
3992 case WM_T_82576:
3993 case WM_T_82580:
3994 case WM_T_I350:
3995 case WM_T_I354:
3996 case WM_T_ICH8:
3997 case WM_T_ICH9:
3998 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3999 /* Not found */
4000 sc->sc_flags |= WM_F_EEPROM_INVALID;
4001 if (sc->sc_type == WM_T_82575)
4002 wm_reset_init_script_82575(sc);
4003 }
4004 break;
4005 default:
4006 break;
4007 }
4008
4009 if ((sc->sc_type == WM_T_82580)
4010 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4011 /* clear global device reset status bit */
4012 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4013 }
4014
4015 /* Clear any pending interrupt events. */
4016 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4017 reg = CSR_READ(sc, WMREG_ICR);
4018 if (sc->sc_nintrs > 1) {
4019 if (sc->sc_type != WM_T_82574) {
4020 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4021 CSR_WRITE(sc, WMREG_EIAC, 0);
4022 } else
4023 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4024 }
4025
4026 /* reload sc_ctrl */
4027 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4028
4029 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4030 wm_set_eee_i350(sc);
4031
4032 /* dummy read from WUC */
4033 if (sc->sc_type == WM_T_PCH)
4034 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4035 /*
4036 * For PCH, this write will make sure that any noise will be detected
4037 * as a CRC error and be dropped rather than show up as a bad packet
4038 * to the DMA engine
4039 */
4040 if (sc->sc_type == WM_T_PCH)
4041 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4042
4043 if (sc->sc_type >= WM_T_82544)
4044 CSR_WRITE(sc, WMREG_WUC, 0);
4045
4046 wm_reset_mdicnfg_82580(sc);
4047
4048 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4049 wm_pll_workaround_i210(sc);
4050 }
4051
4052 /*
4053 * wm_add_rxbuf:
4054 *
4055 * Add a receive buffer to the indiciated descriptor.
4056 */
4057 static int
4058 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4059 {
4060 struct wm_softc *sc = rxq->rxq_sc;
4061 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4062 struct mbuf *m;
4063 int error;
4064
4065 KASSERT(mutex_owned(rxq->rxq_lock));
4066
4067 MGETHDR(m, M_DONTWAIT, MT_DATA);
4068 if (m == NULL)
4069 return ENOBUFS;
4070
4071 MCLGET(m, M_DONTWAIT);
4072 if ((m->m_flags & M_EXT) == 0) {
4073 m_freem(m);
4074 return ENOBUFS;
4075 }
4076
4077 if (rxs->rxs_mbuf != NULL)
4078 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4079
4080 rxs->rxs_mbuf = m;
4081
4082 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4083 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4084 BUS_DMA_READ | BUS_DMA_NOWAIT);
4085 if (error) {
4086 /* XXX XXX XXX */
4087 aprint_error_dev(sc->sc_dev,
4088 "unable to load rx DMA map %d, error = %d\n",
4089 idx, error);
4090 panic("wm_add_rxbuf");
4091 }
4092
4093 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4094 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4095
4096 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4097 if ((sc->sc_rctl & RCTL_EN) != 0)
4098 wm_init_rxdesc(rxq, idx);
4099 } else
4100 wm_init_rxdesc(rxq, idx);
4101
4102 return 0;
4103 }
4104
4105 /*
4106 * wm_rxdrain:
4107 *
4108 * Drain the receive queue.
4109 */
4110 static void
4111 wm_rxdrain(struct wm_rxqueue *rxq)
4112 {
4113 struct wm_softc *sc = rxq->rxq_sc;
4114 struct wm_rxsoft *rxs;
4115 int i;
4116
4117 KASSERT(mutex_owned(rxq->rxq_lock));
4118
4119 for (i = 0; i < WM_NRXDESC; i++) {
4120 rxs = &rxq->rxq_soft[i];
4121 if (rxs->rxs_mbuf != NULL) {
4122 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4123 m_freem(rxs->rxs_mbuf);
4124 rxs->rxs_mbuf = NULL;
4125 }
4126 }
4127 }
4128
4129
4130 /*
4131 * XXX copy from FreeBSD's sys/net/rss_config.c
4132 */
4133 /*
4134 * RSS secret key, intended to prevent attacks on load-balancing. Its
4135 * effectiveness may be limited by algorithm choice and available entropy
4136 * during the boot.
4137 *
4138 * XXXRW: And that we don't randomize it yet!
4139 *
4140 * This is the default Microsoft RSS specification key which is also
4141 * the Chelsio T5 firmware default key.
4142 */
4143 #define RSS_KEYSIZE 40
4144 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4145 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4146 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4147 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4148 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4149 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4150 };
4151
4152 /*
4153 * Caller must pass an array of size sizeof(rss_key).
4154 *
4155 * XXX
4156 * As if_ixgbe may use this function, this function should not be
4157 * if_wm specific function.
4158 */
4159 static void
4160 wm_rss_getkey(uint8_t *key)
4161 {
4162
4163 memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4164 }
4165
4166 /*
4167 * Setup registers for RSS.
4168 *
4169 * XXX not yet VMDq support
4170 */
4171 static void
4172 wm_init_rss(struct wm_softc *sc)
4173 {
4174 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4175 int i;
4176
4177 CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4178
4179 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4180 int qid, reta_ent;
4181
4182 qid = i % sc->sc_nqueues;
4183 switch(sc->sc_type) {
4184 case WM_T_82574:
4185 reta_ent = __SHIFTIN(qid,
4186 RETA_ENT_QINDEX_MASK_82574);
4187 break;
4188 case WM_T_82575:
4189 reta_ent = __SHIFTIN(qid,
4190 RETA_ENT_QINDEX1_MASK_82575);
4191 break;
4192 default:
4193 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4194 break;
4195 }
4196
4197 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4198 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4199 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4200 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4201 }
4202
4203 wm_rss_getkey((uint8_t *)rss_key);
4204 for (i = 0; i < RSSRK_NUM_REGS; i++)
4205 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4206
4207 if (sc->sc_type == WM_T_82574)
4208 mrqc = MRQC_ENABLE_RSS_MQ_82574;
4209 else
4210 mrqc = MRQC_ENABLE_RSS_MQ;
4211
4212 /* XXXX
4213 * The same as FreeBSD igb.
4214 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4215 */
4216 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4217 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4218 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4219 mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4220
4221 CSR_WRITE(sc, WMREG_MRQC, mrqc);
4222 }
4223
4224 /*
4225 * Adjust TX and RX queue numbers which the system actulally uses.
4226 *
4227 * The numbers are affected by below parameters.
4228 * - The nubmer of hardware queues
4229 * - The number of MSI-X vectors (= "nvectors" argument)
4230 * - ncpu
4231 */
4232 static void
4233 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4234 {
4235 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4236
4237 if (nvectors < 2) {
4238 sc->sc_nqueues = 1;
4239 return;
4240 }
4241
4242 switch(sc->sc_type) {
4243 case WM_T_82572:
4244 hw_ntxqueues = 2;
4245 hw_nrxqueues = 2;
4246 break;
4247 case WM_T_82574:
4248 hw_ntxqueues = 2;
4249 hw_nrxqueues = 2;
4250 break;
4251 case WM_T_82575:
4252 hw_ntxqueues = 4;
4253 hw_nrxqueues = 4;
4254 break;
4255 case WM_T_82576:
4256 hw_ntxqueues = 16;
4257 hw_nrxqueues = 16;
4258 break;
4259 case WM_T_82580:
4260 case WM_T_I350:
4261 case WM_T_I354:
4262 hw_ntxqueues = 8;
4263 hw_nrxqueues = 8;
4264 break;
4265 case WM_T_I210:
4266 hw_ntxqueues = 4;
4267 hw_nrxqueues = 4;
4268 break;
4269 case WM_T_I211:
4270 hw_ntxqueues = 2;
4271 hw_nrxqueues = 2;
4272 break;
4273 /*
4274 * As below ethernet controllers does not support MSI-X,
4275 * this driver let them not use multiqueue.
4276 * - WM_T_80003
4277 * - WM_T_ICH8
4278 * - WM_T_ICH9
4279 * - WM_T_ICH10
4280 * - WM_T_PCH
4281 * - WM_T_PCH2
4282 * - WM_T_PCH_LPT
4283 */
4284 default:
4285 hw_ntxqueues = 1;
4286 hw_nrxqueues = 1;
4287 break;
4288 }
4289
4290 hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4291
4292 /*
4293 * As queues more than MSI-X vectors cannot improve scaling, we limit
4294 * the number of queues used actually.
4295 */
4296 if (nvectors < hw_nqueues + 1) {
4297 sc->sc_nqueues = nvectors - 1;
4298 } else {
4299 sc->sc_nqueues = hw_nqueues;
4300 }
4301
4302 /*
4303 * As queues more then cpus cannot improve scaling, we limit
4304 * the number of queues used actually.
4305 */
4306 if (ncpu < sc->sc_nqueues)
4307 sc->sc_nqueues = ncpu;
4308 }
4309
4310 /*
4311 * Both single interrupt MSI and INTx can use this function.
4312 */
4313 static int
4314 wm_setup_legacy(struct wm_softc *sc)
4315 {
4316 pci_chipset_tag_t pc = sc->sc_pc;
4317 const char *intrstr = NULL;
4318 char intrbuf[PCI_INTRSTR_LEN];
4319 int error;
4320
4321 error = wm_alloc_txrx_queues(sc);
4322 if (error) {
4323 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4324 error);
4325 return ENOMEM;
4326 }
4327 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4328 sizeof(intrbuf));
4329 #ifdef WM_MPSAFE
4330 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4331 #endif
4332 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4333 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4334 if (sc->sc_ihs[0] == NULL) {
4335 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4336 (pci_intr_type(sc->sc_intrs[0])
4337 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4338 return ENOMEM;
4339 }
4340
4341 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4342 sc->sc_nintrs = 1;
4343 return 0;
4344 }
4345
4346 static int
4347 wm_setup_msix(struct wm_softc *sc)
4348 {
4349 void *vih;
4350 kcpuset_t *affinity;
4351 int qidx, error, intr_idx, txrx_established;
4352 pci_chipset_tag_t pc = sc->sc_pc;
4353 const char *intrstr = NULL;
4354 char intrbuf[PCI_INTRSTR_LEN];
4355 char intr_xname[INTRDEVNAMEBUF];
4356
4357 if (sc->sc_nqueues < ncpu) {
4358 /*
4359 * To avoid other devices' interrupts, the affinity of Tx/Rx
4360 * interrupts start from CPU#1.
4361 */
4362 sc->sc_affinity_offset = 1;
4363 } else {
4364 /*
4365 * In this case, this device use all CPUs. So, we unify
4366 * affinitied cpu_index to msix vector number for readability.
4367 */
4368 sc->sc_affinity_offset = 0;
4369 }
4370
4371 error = wm_alloc_txrx_queues(sc);
4372 if (error) {
4373 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4374 error);
4375 return ENOMEM;
4376 }
4377
4378 kcpuset_create(&affinity, false);
4379 intr_idx = 0;
4380
4381 /*
4382 * TX and RX
4383 */
4384 txrx_established = 0;
4385 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4386 struct wm_queue *wmq = &sc->sc_queue[qidx];
4387 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4388
4389 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4390 sizeof(intrbuf));
4391 #ifdef WM_MPSAFE
4392 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4393 PCI_INTR_MPSAFE, true);
4394 #endif
4395 memset(intr_xname, 0, sizeof(intr_xname));
4396 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4397 device_xname(sc->sc_dev), qidx);
4398 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4399 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4400 if (vih == NULL) {
4401 aprint_error_dev(sc->sc_dev,
4402 "unable to establish MSI-X(for TX and RX)%s%s\n",
4403 intrstr ? " at " : "",
4404 intrstr ? intrstr : "");
4405
4406 goto fail;
4407 }
4408 kcpuset_zero(affinity);
4409 /* Round-robin affinity */
4410 kcpuset_set(affinity, affinity_to);
4411 error = interrupt_distribute(vih, affinity, NULL);
4412 if (error == 0) {
4413 aprint_normal_dev(sc->sc_dev,
4414 "for TX and RX interrupting at %s affinity to %u\n",
4415 intrstr, affinity_to);
4416 } else {
4417 aprint_normal_dev(sc->sc_dev,
4418 "for TX and RX interrupting at %s\n", intrstr);
4419 }
4420 sc->sc_ihs[intr_idx] = vih;
4421 wmq->wmq_id= qidx;
4422 wmq->wmq_intr_idx = intr_idx;
4423
4424 txrx_established++;
4425 intr_idx++;
4426 }
4427
4428 /*
4429 * LINK
4430 */
4431 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4432 sizeof(intrbuf));
4433 #ifdef WM_MPSAFE
4434 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4435 #endif
4436 memset(intr_xname, 0, sizeof(intr_xname));
4437 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4438 device_xname(sc->sc_dev));
4439 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4440 IPL_NET, wm_linkintr_msix, sc, intr_xname);
4441 if (vih == NULL) {
4442 aprint_error_dev(sc->sc_dev,
4443 "unable to establish MSI-X(for LINK)%s%s\n",
4444 intrstr ? " at " : "",
4445 intrstr ? intrstr : "");
4446
4447 goto fail;
4448 }
4449 /* keep default affinity to LINK interrupt */
4450 aprint_normal_dev(sc->sc_dev,
4451 "for LINK interrupting at %s\n", intrstr);
4452 sc->sc_ihs[intr_idx] = vih;
4453 sc->sc_link_intr_idx = intr_idx;
4454
4455 sc->sc_nintrs = sc->sc_nqueues + 1;
4456 kcpuset_destroy(affinity);
4457 return 0;
4458
4459 fail:
4460 for (qidx = 0; qidx < txrx_established; qidx++) {
4461 struct wm_queue *wmq = &sc->sc_queue[qidx];
4462 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4463 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4464 }
4465
4466 kcpuset_destroy(affinity);
4467 return ENOMEM;
4468 }
4469
4470 /*
4471 * wm_init: [ifnet interface function]
4472 *
4473 * Initialize the interface.
4474 */
4475 static int
4476 wm_init(struct ifnet *ifp)
4477 {
4478 struct wm_softc *sc = ifp->if_softc;
4479 int ret;
4480
4481 WM_CORE_LOCK(sc);
4482 ret = wm_init_locked(ifp);
4483 WM_CORE_UNLOCK(sc);
4484
4485 return ret;
4486 }
4487
4488 static int
4489 wm_init_locked(struct ifnet *ifp)
4490 {
4491 struct wm_softc *sc = ifp->if_softc;
4492 int i, j, trynum, error = 0;
4493 uint32_t reg;
4494
4495 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4496 device_xname(sc->sc_dev), __func__));
4497 KASSERT(WM_CORE_LOCKED(sc));
4498 /*
4499 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4500 * There is a small but measurable benefit to avoiding the adjusment
4501 * of the descriptor so that the headers are aligned, for normal mtu,
4502 * on such platforms. One possibility is that the DMA itself is
4503 * slightly more efficient if the front of the entire packet (instead
4504 * of the front of the headers) is aligned.
4505 *
4506 * Note we must always set align_tweak to 0 if we are using
4507 * jumbo frames.
4508 */
4509 #ifdef __NO_STRICT_ALIGNMENT
4510 sc->sc_align_tweak = 0;
4511 #else
4512 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4513 sc->sc_align_tweak = 0;
4514 else
4515 sc->sc_align_tweak = 2;
4516 #endif /* __NO_STRICT_ALIGNMENT */
4517
4518 /* Cancel any pending I/O. */
4519 wm_stop_locked(ifp, 0);
4520
4521 /* update statistics before reset */
4522 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4523 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4524
4525 /* Reset the chip to a known state. */
4526 wm_reset(sc);
4527
4528 switch (sc->sc_type) {
4529 case WM_T_82571:
4530 case WM_T_82572:
4531 case WM_T_82573:
4532 case WM_T_82574:
4533 case WM_T_82583:
4534 case WM_T_80003:
4535 case WM_T_ICH8:
4536 case WM_T_ICH9:
4537 case WM_T_ICH10:
4538 case WM_T_PCH:
4539 case WM_T_PCH2:
4540 case WM_T_PCH_LPT:
4541 case WM_T_PCH_SPT:
4542 /* AMT based hardware can now take control from firmware */
4543 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4544 wm_get_hw_control(sc);
4545 break;
4546 default:
4547 break;
4548 }
4549
4550 /* Init hardware bits */
4551 wm_initialize_hardware_bits(sc);
4552
4553 /* Reset the PHY. */
4554 if (sc->sc_flags & WM_F_HAS_MII)
4555 wm_gmii_reset(sc);
4556
4557 /* Calculate (E)ITR value */
4558 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4559 sc->sc_itr = 450; /* For EITR */
4560 } else if (sc->sc_type >= WM_T_82543) {
4561 /*
4562 * Set up the interrupt throttling register (units of 256ns)
4563 * Note that a footnote in Intel's documentation says this
4564 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4565 * or 10Mbit mode. Empirically, it appears to be the case
4566 * that that is also true for the 1024ns units of the other
4567 * interrupt-related timer registers -- so, really, we ought
4568 * to divide this value by 4 when the link speed is low.
4569 *
4570 * XXX implement this division at link speed change!
4571 */
4572
4573 /*
4574 * For N interrupts/sec, set this value to:
4575 * 1000000000 / (N * 256). Note that we set the
4576 * absolute and packet timer values to this value
4577 * divided by 4 to get "simple timer" behavior.
4578 */
4579
4580 sc->sc_itr = 1500; /* 2604 ints/sec */
4581 }
4582
4583 error = wm_init_txrx_queues(sc);
4584 if (error)
4585 goto out;
4586
4587 /*
4588 * Clear out the VLAN table -- we don't use it (yet).
4589 */
4590 CSR_WRITE(sc, WMREG_VET, 0);
4591 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4592 trynum = 10; /* Due to hw errata */
4593 else
4594 trynum = 1;
4595 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4596 for (j = 0; j < trynum; j++)
4597 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4598
4599 /*
4600 * Set up flow-control parameters.
4601 *
4602 * XXX Values could probably stand some tuning.
4603 */
4604 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4605 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4606 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4607 && (sc->sc_type != WM_T_PCH_SPT)) {
4608 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4609 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4610 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4611 }
4612
4613 sc->sc_fcrtl = FCRTL_DFLT;
4614 if (sc->sc_type < WM_T_82543) {
4615 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4616 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4617 } else {
4618 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4619 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4620 }
4621
4622 if (sc->sc_type == WM_T_80003)
4623 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4624 else
4625 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4626
4627 /* Writes the control register. */
4628 wm_set_vlan(sc);
4629
4630 if (sc->sc_flags & WM_F_HAS_MII) {
4631 int val;
4632
4633 switch (sc->sc_type) {
4634 case WM_T_80003:
4635 case WM_T_ICH8:
4636 case WM_T_ICH9:
4637 case WM_T_ICH10:
4638 case WM_T_PCH:
4639 case WM_T_PCH2:
4640 case WM_T_PCH_LPT:
4641 case WM_T_PCH_SPT:
4642 /*
4643 * Set the mac to wait the maximum time between each
4644 * iteration and increase the max iterations when
4645 * polling the phy; this fixes erroneous timeouts at
4646 * 10Mbps.
4647 */
4648 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4649 0xFFFF);
4650 val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4651 val |= 0x3F;
4652 wm_kmrn_writereg(sc,
4653 KUMCTRLSTA_OFFSET_INB_PARAM, val);
4654 break;
4655 default:
4656 break;
4657 }
4658
4659 if (sc->sc_type == WM_T_80003) {
4660 val = CSR_READ(sc, WMREG_CTRL_EXT);
4661 val &= ~CTRL_EXT_LINK_MODE_MASK;
4662 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4663
4664 /* Bypass RX and TX FIFO's */
4665 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4666 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4667 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4668 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4669 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4670 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4671 }
4672 }
4673 #if 0
4674 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4675 #endif
4676
4677 /* Set up checksum offload parameters. */
4678 reg = CSR_READ(sc, WMREG_RXCSUM);
4679 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4680 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4681 reg |= RXCSUM_IPOFL;
4682 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4683 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4684 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4685 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4686 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4687
4688 /* Set up MSI-X */
4689 if (sc->sc_nintrs > 1) {
4690 uint32_t ivar;
4691 struct wm_queue *wmq;
4692 int qid, qintr_idx;
4693
4694 if (sc->sc_type == WM_T_82575) {
4695 /* Interrupt control */
4696 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4697 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4698 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4699
4700 /* TX and RX */
4701 for (i = 0; i < sc->sc_nqueues; i++) {
4702 wmq = &sc->sc_queue[i];
4703 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4704 EITR_TX_QUEUE(wmq->wmq_id)
4705 | EITR_RX_QUEUE(wmq->wmq_id));
4706 }
4707 /* Link status */
4708 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4709 EITR_OTHER);
4710 } else if (sc->sc_type == WM_T_82574) {
4711 /* Interrupt control */
4712 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4713 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4714 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4715
4716 ivar = 0;
4717 /* TX and RX */
4718 for (i = 0; i < sc->sc_nqueues; i++) {
4719 wmq = &sc->sc_queue[i];
4720 qid = wmq->wmq_id;
4721 qintr_idx = wmq->wmq_intr_idx;
4722
4723 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4724 IVAR_TX_MASK_Q_82574(qid));
4725 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4726 IVAR_RX_MASK_Q_82574(qid));
4727 }
4728 /* Link status */
4729 ivar |= __SHIFTIN((IVAR_VALID_82574
4730 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4731 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4732 } else {
4733 /* Interrupt control */
4734 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4735 | GPIE_EIAME | GPIE_PBA);
4736
4737 switch (sc->sc_type) {
4738 case WM_T_82580:
4739 case WM_T_I350:
4740 case WM_T_I354:
4741 case WM_T_I210:
4742 case WM_T_I211:
4743 /* TX and RX */
4744 for (i = 0; i < sc->sc_nqueues; i++) {
4745 wmq = &sc->sc_queue[i];
4746 qid = wmq->wmq_id;
4747 qintr_idx = wmq->wmq_intr_idx;
4748
4749 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4750 ivar &= ~IVAR_TX_MASK_Q(qid);
4751 ivar |= __SHIFTIN((qintr_idx
4752 | IVAR_VALID),
4753 IVAR_TX_MASK_Q(qid));
4754 ivar &= ~IVAR_RX_MASK_Q(qid);
4755 ivar |= __SHIFTIN((qintr_idx
4756 | IVAR_VALID),
4757 IVAR_RX_MASK_Q(qid));
4758 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4759 }
4760 break;
4761 case WM_T_82576:
4762 /* TX and RX */
4763 for (i = 0; i < sc->sc_nqueues; i++) {
4764 wmq = &sc->sc_queue[i];
4765 qid = wmq->wmq_id;
4766 qintr_idx = wmq->wmq_intr_idx;
4767
4768 ivar = CSR_READ(sc,
4769 WMREG_IVAR_Q_82576(qid));
4770 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4771 ivar |= __SHIFTIN((qintr_idx
4772 | IVAR_VALID),
4773 IVAR_TX_MASK_Q_82576(qid));
4774 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4775 ivar |= __SHIFTIN((qintr_idx
4776 | IVAR_VALID),
4777 IVAR_RX_MASK_Q_82576(qid));
4778 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4779 ivar);
4780 }
4781 break;
4782 default:
4783 break;
4784 }
4785
4786 /* Link status */
4787 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4788 IVAR_MISC_OTHER);
4789 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4790 }
4791
4792 if (sc->sc_nqueues > 1) {
4793 wm_init_rss(sc);
4794
4795 /*
4796 ** NOTE: Receive Full-Packet Checksum Offload
4797 ** is mutually exclusive with Multiqueue. However
4798 ** this is not the same as TCP/IP checksums which
4799 ** still work.
4800 */
4801 reg = CSR_READ(sc, WMREG_RXCSUM);
4802 reg |= RXCSUM_PCSD;
4803 CSR_WRITE(sc, WMREG_RXCSUM, reg);
4804 }
4805 }
4806
4807 /* Set up the interrupt registers. */
4808 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4809 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4810 ICR_RXO | ICR_RXT0;
4811 if (sc->sc_nintrs > 1) {
4812 uint32_t mask;
4813 struct wm_queue *wmq;
4814
4815 switch (sc->sc_type) {
4816 case WM_T_82574:
4817 CSR_WRITE(sc, WMREG_EIAC_82574,
4818 WMREG_EIAC_82574_MSIX_MASK);
4819 sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4820 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4821 break;
4822 default:
4823 if (sc->sc_type == WM_T_82575) {
4824 mask = 0;
4825 for (i = 0; i < sc->sc_nqueues; i++) {
4826 wmq = &sc->sc_queue[i];
4827 mask |= EITR_TX_QUEUE(wmq->wmq_id);
4828 mask |= EITR_RX_QUEUE(wmq->wmq_id);
4829 }
4830 mask |= EITR_OTHER;
4831 } else {
4832 mask = 0;
4833 for (i = 0; i < sc->sc_nqueues; i++) {
4834 wmq = &sc->sc_queue[i];
4835 mask |= 1 << wmq->wmq_intr_idx;
4836 }
4837 mask |= 1 << sc->sc_link_intr_idx;
4838 }
4839 CSR_WRITE(sc, WMREG_EIAC, mask);
4840 CSR_WRITE(sc, WMREG_EIAM, mask);
4841 CSR_WRITE(sc, WMREG_EIMS, mask);
4842 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4843 break;
4844 }
4845 } else
4846 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4847
4848 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4849 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4850 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4851 || (sc->sc_type == WM_T_PCH_SPT)) {
4852 reg = CSR_READ(sc, WMREG_KABGTXD);
4853 reg |= KABGTXD_BGSQLBIAS;
4854 CSR_WRITE(sc, WMREG_KABGTXD, reg);
4855 }
4856
4857 /* Set up the inter-packet gap. */
4858 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4859
4860 if (sc->sc_type >= WM_T_82543) {
4861 /*
4862 * XXX 82574 has both ITR and EITR. SET EITR when we use
4863 * the multi queue function with MSI-X.
4864 */
4865 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4866 int qidx;
4867 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4868 struct wm_queue *wmq = &sc->sc_queue[qidx];
4869 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
4870 sc->sc_itr);
4871 }
4872 /*
4873 * Link interrupts occur much less than TX
4874 * interrupts and RX interrupts. So, we don't
4875 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4876 * FreeBSD's if_igb.
4877 */
4878 } else
4879 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4880 }
4881
4882 /* Set the VLAN ethernetype. */
4883 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4884
4885 /*
4886 * Set up the transmit control register; we start out with
4887 * a collision distance suitable for FDX, but update it whe
4888 * we resolve the media type.
4889 */
4890 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4891 | TCTL_CT(TX_COLLISION_THRESHOLD)
4892 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4893 if (sc->sc_type >= WM_T_82571)
4894 sc->sc_tctl |= TCTL_MULR;
4895 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4896
4897 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4898 /* Write TDT after TCTL.EN is set. See the document. */
4899 CSR_WRITE(sc, WMREG_TDT(0), 0);
4900 }
4901
4902 if (sc->sc_type == WM_T_80003) {
4903 reg = CSR_READ(sc, WMREG_TCTL_EXT);
4904 reg &= ~TCTL_EXT_GCEX_MASK;
4905 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4906 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4907 }
4908
4909 /* Set the media. */
4910 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4911 goto out;
4912
4913 /* Configure for OS presence */
4914 wm_init_manageability(sc);
4915
4916 /*
4917 * Set up the receive control register; we actually program
4918 * the register when we set the receive filter. Use multicast
4919 * address offset type 0.
4920 *
4921 * Only the i82544 has the ability to strip the incoming
4922 * CRC, so we don't enable that feature.
4923 */
4924 sc->sc_mchash_type = 0;
4925 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4926 | RCTL_MO(sc->sc_mchash_type);
4927
4928 /*
4929 * The I350 has a bug where it always strips the CRC whether
4930 * asked to or not. So ask for stripped CRC here and cope in rxeof
4931 */
4932 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4933 || (sc->sc_type == WM_T_I210))
4934 sc->sc_rctl |= RCTL_SECRC;
4935
4936 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4937 && (ifp->if_mtu > ETHERMTU)) {
4938 sc->sc_rctl |= RCTL_LPE;
4939 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4940 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4941 }
4942
4943 if (MCLBYTES == 2048) {
4944 sc->sc_rctl |= RCTL_2k;
4945 } else {
4946 if (sc->sc_type >= WM_T_82543) {
4947 switch (MCLBYTES) {
4948 case 4096:
4949 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4950 break;
4951 case 8192:
4952 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4953 break;
4954 case 16384:
4955 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4956 break;
4957 default:
4958 panic("wm_init: MCLBYTES %d unsupported",
4959 MCLBYTES);
4960 break;
4961 }
4962 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4963 }
4964
4965 /* Set the receive filter. */
4966 wm_set_filter(sc);
4967
4968 /* Enable ECC */
4969 switch (sc->sc_type) {
4970 case WM_T_82571:
4971 reg = CSR_READ(sc, WMREG_PBA_ECC);
4972 reg |= PBA_ECC_CORR_EN;
4973 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4974 break;
4975 case WM_T_PCH_LPT:
4976 case WM_T_PCH_SPT:
4977 reg = CSR_READ(sc, WMREG_PBECCSTS);
4978 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4979 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4980
4981 reg = CSR_READ(sc, WMREG_CTRL);
4982 reg |= CTRL_MEHE;
4983 CSR_WRITE(sc, WMREG_CTRL, reg);
4984 break;
4985 default:
4986 break;
4987 }
4988
4989 /* On 575 and later set RDT only if RX enabled */
4990 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4991 int qidx;
4992 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4993 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
4994 for (i = 0; i < WM_NRXDESC; i++) {
4995 mutex_enter(rxq->rxq_lock);
4996 wm_init_rxdesc(rxq, i);
4997 mutex_exit(rxq->rxq_lock);
4998
4999 }
5000 }
5001 }
5002
5003 sc->sc_stopping = false;
5004
5005 /* Start the one second link check clock. */
5006 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5007
5008 /* ...all done! */
5009 ifp->if_flags |= IFF_RUNNING;
5010 ifp->if_flags &= ~IFF_OACTIVE;
5011
5012 out:
5013 sc->sc_if_flags = ifp->if_flags;
5014 if (error)
5015 log(LOG_ERR, "%s: interface not running\n",
5016 device_xname(sc->sc_dev));
5017 return error;
5018 }
5019
5020 /*
5021 * wm_stop: [ifnet interface function]
5022 *
5023 * Stop transmission on the interface.
5024 */
5025 static void
5026 wm_stop(struct ifnet *ifp, int disable)
5027 {
5028 struct wm_softc *sc = ifp->if_softc;
5029
5030 WM_CORE_LOCK(sc);
5031 wm_stop_locked(ifp, disable);
5032 WM_CORE_UNLOCK(sc);
5033 }
5034
5035 static void
5036 wm_stop_locked(struct ifnet *ifp, int disable)
5037 {
5038 struct wm_softc *sc = ifp->if_softc;
5039 struct wm_txsoft *txs;
5040 int i, qidx;
5041
5042 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5043 device_xname(sc->sc_dev), __func__));
5044 KASSERT(WM_CORE_LOCKED(sc));
5045
5046 sc->sc_stopping = true;
5047
5048 /* Stop the one second clock. */
5049 callout_stop(&sc->sc_tick_ch);
5050
5051 /* Stop the 82547 Tx FIFO stall check timer. */
5052 if (sc->sc_type == WM_T_82547)
5053 callout_stop(&sc->sc_txfifo_ch);
5054
5055 if (sc->sc_flags & WM_F_HAS_MII) {
5056 /* Down the MII. */
5057 mii_down(&sc->sc_mii);
5058 } else {
5059 #if 0
5060 /* Should we clear PHY's status properly? */
5061 wm_reset(sc);
5062 #endif
5063 }
5064
5065 /* Stop the transmit and receive processes. */
5066 CSR_WRITE(sc, WMREG_TCTL, 0);
5067 CSR_WRITE(sc, WMREG_RCTL, 0);
5068 sc->sc_rctl &= ~RCTL_EN;
5069
5070 /*
5071 * Clear the interrupt mask to ensure the device cannot assert its
5072 * interrupt line.
5073 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5074 * service any currently pending or shared interrupt.
5075 */
5076 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5077 sc->sc_icr = 0;
5078 if (sc->sc_nintrs > 1) {
5079 if (sc->sc_type != WM_T_82574) {
5080 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5081 CSR_WRITE(sc, WMREG_EIAC, 0);
5082 } else
5083 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5084 }
5085
5086 /* Release any queued transmit buffers. */
5087 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5088 struct wm_queue *wmq = &sc->sc_queue[qidx];
5089 struct wm_txqueue *txq = &wmq->wmq_txq;
5090 mutex_enter(txq->txq_lock);
5091 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5092 txs = &txq->txq_soft[i];
5093 if (txs->txs_mbuf != NULL) {
5094 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5095 m_freem(txs->txs_mbuf);
5096 txs->txs_mbuf = NULL;
5097 }
5098 }
5099 if (sc->sc_type == WM_T_PCH_SPT) {
5100 pcireg_t preg;
5101 uint32_t reg;
5102 int nexttx;
5103
5104 /* First, disable MULR fix in FEXTNVM11 */
5105 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5106 reg |= FEXTNVM11_DIS_MULRFIX;
5107 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5108
5109 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5110 WM_PCI_DESCRING_STATUS);
5111 reg = CSR_READ(sc, WMREG_TDLEN(0));
5112 printf("XXX RST: FLUSH = %08x, len = %u\n",
5113 (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
5114 if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
5115 && (reg != 0)) {
5116 /* TX */
5117 printf("XXX need TX flush (reg = %08x)\n",
5118 preg);
5119 wm_init_tx_descs(sc, txq);
5120 wm_init_tx_regs(sc, wmq, txq);
5121 nexttx = txq->txq_next;
5122 wm_set_dma_addr(
5123 &txq->txq_descs[nexttx].wtx_addr,
5124 WM_CDTXADDR(txq, nexttx));
5125 txq->txq_descs[nexttx].wtx_cmdlen
5126 = htole32(WTX_CMD_IFCS | 512);
5127 wm_cdtxsync(txq, nexttx, 1,
5128 BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
5129 CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
5130 CSR_WRITE(sc, WMREG_TDT(0), nexttx);
5131 CSR_WRITE_FLUSH(sc);
5132 delay(250);
5133 CSR_WRITE(sc, WMREG_TCTL, 0);
5134 }
5135 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5136 WM_PCI_DESCRING_STATUS);
5137 if (preg & DESCRING_STATUS_FLUSH_REQ) {
5138 /* RX */
5139 printf("XXX need RX flush\n");
5140 }
5141 }
5142 mutex_exit(txq->txq_lock);
5143 }
5144
5145 /* Mark the interface as down and cancel the watchdog timer. */
5146 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5147 ifp->if_timer = 0;
5148
5149 if (disable) {
5150 for (i = 0; i < sc->sc_nqueues; i++) {
5151 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5152 mutex_enter(rxq->rxq_lock);
5153 wm_rxdrain(rxq);
5154 mutex_exit(rxq->rxq_lock);
5155 }
5156 }
5157
5158 #if 0 /* notyet */
5159 if (sc->sc_type >= WM_T_82544)
5160 CSR_WRITE(sc, WMREG_WUC, 0);
5161 #endif
5162 }
5163
5164 static void
5165 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5166 {
5167 struct mbuf *m;
5168 int i;
5169
5170 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5171 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5172 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5173 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5174 m->m_data, m->m_len, m->m_flags);
5175 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5176 i, i == 1 ? "" : "s");
5177 }
5178
5179 /*
5180 * wm_82547_txfifo_stall:
5181 *
5182 * Callout used to wait for the 82547 Tx FIFO to drain,
5183 * reset the FIFO pointers, and restart packet transmission.
5184 */
5185 static void
5186 wm_82547_txfifo_stall(void *arg)
5187 {
5188 struct wm_softc *sc = arg;
5189 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5190
5191 mutex_enter(txq->txq_lock);
5192
5193 if (sc->sc_stopping)
5194 goto out;
5195
5196 if (txq->txq_fifo_stall) {
5197 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5198 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5199 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5200 /*
5201 * Packets have drained. Stop transmitter, reset
5202 * FIFO pointers, restart transmitter, and kick
5203 * the packet queue.
5204 */
5205 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5206 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5207 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5208 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5209 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5210 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5211 CSR_WRITE(sc, WMREG_TCTL, tctl);
5212 CSR_WRITE_FLUSH(sc);
5213
5214 txq->txq_fifo_head = 0;
5215 txq->txq_fifo_stall = 0;
5216 wm_start_locked(&sc->sc_ethercom.ec_if);
5217 } else {
5218 /*
5219 * Still waiting for packets to drain; try again in
5220 * another tick.
5221 */
5222 callout_schedule(&sc->sc_txfifo_ch, 1);
5223 }
5224 }
5225
5226 out:
5227 mutex_exit(txq->txq_lock);
5228 }
5229
5230 /*
5231 * wm_82547_txfifo_bugchk:
5232 *
5233 * Check for bug condition in the 82547 Tx FIFO. We need to
5234 * prevent enqueueing a packet that would wrap around the end
5235 * if the Tx FIFO ring buffer, otherwise the chip will croak.
5236 *
5237 * We do this by checking the amount of space before the end
5238 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
5239 * the Tx FIFO, wait for all remaining packets to drain, reset
5240 * the internal FIFO pointers to the beginning, and restart
5241 * transmission on the interface.
5242 */
5243 #define WM_FIFO_HDR 0x10
5244 #define WM_82547_PAD_LEN 0x3e0
5245 static int
5246 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5247 {
5248 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5249 int space = txq->txq_fifo_size - txq->txq_fifo_head;
5250 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5251
5252 /* Just return if already stalled. */
5253 if (txq->txq_fifo_stall)
5254 return 1;
5255
5256 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5257 /* Stall only occurs in half-duplex mode. */
5258 goto send_packet;
5259 }
5260
5261 if (len >= WM_82547_PAD_LEN + space) {
5262 txq->txq_fifo_stall = 1;
5263 callout_schedule(&sc->sc_txfifo_ch, 1);
5264 return 1;
5265 }
5266
5267 send_packet:
5268 txq->txq_fifo_head += len;
5269 if (txq->txq_fifo_head >= txq->txq_fifo_size)
5270 txq->txq_fifo_head -= txq->txq_fifo_size;
5271
5272 return 0;
5273 }
5274
5275 static int
5276 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5277 {
5278 int error;
5279
5280 /*
5281 * Allocate the control data structures, and create and load the
5282 * DMA map for it.
5283 *
5284 * NOTE: All Tx descriptors must be in the same 4G segment of
5285 * memory. So must Rx descriptors. We simplify by allocating
5286 * both sets within the same 4G segment.
5287 */
5288 if (sc->sc_type < WM_T_82544)
5289 WM_NTXDESC(txq) = WM_NTXDESC_82542;
5290 else
5291 WM_NTXDESC(txq) = WM_NTXDESC_82544;
5292 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5293 txq->txq_descsize = sizeof(nq_txdesc_t);
5294 else
5295 txq->txq_descsize = sizeof(wiseman_txdesc_t);
5296
5297 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5298 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5299 1, &txq->txq_desc_rseg, 0)) != 0) {
5300 aprint_error_dev(sc->sc_dev,
5301 "unable to allocate TX control data, error = %d\n",
5302 error);
5303 goto fail_0;
5304 }
5305
5306 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5307 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5308 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5309 aprint_error_dev(sc->sc_dev,
5310 "unable to map TX control data, error = %d\n", error);
5311 goto fail_1;
5312 }
5313
5314 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5315 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5316 aprint_error_dev(sc->sc_dev,
5317 "unable to create TX control data DMA map, error = %d\n",
5318 error);
5319 goto fail_2;
5320 }
5321
5322 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5323 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5324 aprint_error_dev(sc->sc_dev,
5325 "unable to load TX control data DMA map, error = %d\n",
5326 error);
5327 goto fail_3;
5328 }
5329
5330 return 0;
5331
5332 fail_3:
5333 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5334 fail_2:
5335 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5336 WM_TXDESCS_SIZE(txq));
5337 fail_1:
5338 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5339 fail_0:
5340 return error;
5341 }
5342
5343 static void
5344 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5345 {
5346
5347 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5348 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5349 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5350 WM_TXDESCS_SIZE(txq));
5351 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5352 }
5353
5354 static int
5355 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5356 {
5357 int error;
5358
5359 /*
5360 * Allocate the control data structures, and create and load the
5361 * DMA map for it.
5362 *
5363 * NOTE: All Tx descriptors must be in the same 4G segment of
5364 * memory. So must Rx descriptors. We simplify by allocating
5365 * both sets within the same 4G segment.
5366 */
5367 rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5368 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5369 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5370 1, &rxq->rxq_desc_rseg, 0)) != 0) {
5371 aprint_error_dev(sc->sc_dev,
5372 "unable to allocate RX control data, error = %d\n",
5373 error);
5374 goto fail_0;
5375 }
5376
5377 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5378 rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5379 (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5380 aprint_error_dev(sc->sc_dev,
5381 "unable to map RX control data, error = %d\n", error);
5382 goto fail_1;
5383 }
5384
5385 if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5386 rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5387 aprint_error_dev(sc->sc_dev,
5388 "unable to create RX control data DMA map, error = %d\n",
5389 error);
5390 goto fail_2;
5391 }
5392
5393 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5394 rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5395 aprint_error_dev(sc->sc_dev,
5396 "unable to load RX control data DMA map, error = %d\n",
5397 error);
5398 goto fail_3;
5399 }
5400
5401 return 0;
5402
5403 fail_3:
5404 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5405 fail_2:
5406 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5407 rxq->rxq_desc_size);
5408 fail_1:
5409 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5410 fail_0:
5411 return error;
5412 }
5413
5414 static void
5415 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5416 {
5417
5418 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5419 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5420 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5421 rxq->rxq_desc_size);
5422 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5423 }
5424
5425
5426 static int
5427 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5428 {
5429 int i, error;
5430
5431 /* Create the transmit buffer DMA maps. */
5432 WM_TXQUEUELEN(txq) =
5433 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5434 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5435 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5436 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5437 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5438 &txq->txq_soft[i].txs_dmamap)) != 0) {
5439 aprint_error_dev(sc->sc_dev,
5440 "unable to create Tx DMA map %d, error = %d\n",
5441 i, error);
5442 goto fail;
5443 }
5444 }
5445
5446 return 0;
5447
5448 fail:
5449 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5450 if (txq->txq_soft[i].txs_dmamap != NULL)
5451 bus_dmamap_destroy(sc->sc_dmat,
5452 txq->txq_soft[i].txs_dmamap);
5453 }
5454 return error;
5455 }
5456
5457 static void
5458 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5459 {
5460 int i;
5461
5462 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5463 if (txq->txq_soft[i].txs_dmamap != NULL)
5464 bus_dmamap_destroy(sc->sc_dmat,
5465 txq->txq_soft[i].txs_dmamap);
5466 }
5467 }
5468
5469 static int
5470 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5471 {
5472 int i, error;
5473
5474 /* Create the receive buffer DMA maps. */
5475 for (i = 0; i < WM_NRXDESC; i++) {
5476 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5477 MCLBYTES, 0, 0,
5478 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5479 aprint_error_dev(sc->sc_dev,
5480 "unable to create Rx DMA map %d error = %d\n",
5481 i, error);
5482 goto fail;
5483 }
5484 rxq->rxq_soft[i].rxs_mbuf = NULL;
5485 }
5486
5487 return 0;
5488
5489 fail:
5490 for (i = 0; i < WM_NRXDESC; i++) {
5491 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5492 bus_dmamap_destroy(sc->sc_dmat,
5493 rxq->rxq_soft[i].rxs_dmamap);
5494 }
5495 return error;
5496 }
5497
5498 static void
5499 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5500 {
5501 int i;
5502
5503 for (i = 0; i < WM_NRXDESC; i++) {
5504 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5505 bus_dmamap_destroy(sc->sc_dmat,
5506 rxq->rxq_soft[i].rxs_dmamap);
5507 }
5508 }
5509
5510 /*
5511 * wm_alloc_quques:
5512 * Allocate {tx,rx}descs and {tx,rx} buffers
5513 */
5514 static int
5515 wm_alloc_txrx_queues(struct wm_softc *sc)
5516 {
5517 int i, error, tx_done, rx_done;
5518
5519 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5520 KM_SLEEP);
5521 if (sc->sc_queue == NULL) {
5522 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5523 error = ENOMEM;
5524 goto fail_0;
5525 }
5526
5527 /*
5528 * For transmission
5529 */
5530 error = 0;
5531 tx_done = 0;
5532 for (i = 0; i < sc->sc_nqueues; i++) {
5533 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5534 txq->txq_sc = sc;
5535 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5536
5537 error = wm_alloc_tx_descs(sc, txq);
5538 if (error)
5539 break;
5540 error = wm_alloc_tx_buffer(sc, txq);
5541 if (error) {
5542 wm_free_tx_descs(sc, txq);
5543 break;
5544 }
5545 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5546 if (txq->txq_interq == NULL) {
5547 wm_free_tx_descs(sc, txq);
5548 wm_free_tx_buffer(sc, txq);
5549 error = ENOMEM;
5550 break;
5551 }
5552 tx_done++;
5553 }
5554 if (error)
5555 goto fail_1;
5556
5557 /*
5558 * For recieve
5559 */
5560 error = 0;
5561 rx_done = 0;
5562 for (i = 0; i < sc->sc_nqueues; i++) {
5563 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5564 rxq->rxq_sc = sc;
5565 #ifdef WM_MPSAFE
5566 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5567 #else
5568 rxq->rxq_lock = NULL;
5569 #endif
5570 error = wm_alloc_rx_descs(sc, rxq);
5571 if (error)
5572 break;
5573
5574 error = wm_alloc_rx_buffer(sc, rxq);
5575 if (error) {
5576 wm_free_rx_descs(sc, rxq);
5577 break;
5578 }
5579
5580 rx_done++;
5581 }
5582 if (error)
5583 goto fail_2;
5584
5585 return 0;
5586
5587 fail_2:
5588 for (i = 0; i < rx_done; i++) {
5589 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5590 wm_free_rx_buffer(sc, rxq);
5591 wm_free_rx_descs(sc, rxq);
5592 if (rxq->rxq_lock)
5593 mutex_obj_free(rxq->rxq_lock);
5594 }
5595 fail_1:
5596 for (i = 0; i < tx_done; i++) {
5597 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5598 pcq_destroy(txq->txq_interq);
5599 wm_free_tx_buffer(sc, txq);
5600 wm_free_tx_descs(sc, txq);
5601 if (txq->txq_lock)
5602 mutex_obj_free(txq->txq_lock);
5603 }
5604
5605 kmem_free(sc->sc_queue,
5606 sizeof(struct wm_queue) * sc->sc_nqueues);
5607 fail_0:
5608 return error;
5609 }
5610
5611 /*
5612 * wm_free_quques:
5613 * Free {tx,rx}descs and {tx,rx} buffers
5614 */
5615 static void
5616 wm_free_txrx_queues(struct wm_softc *sc)
5617 {
5618 int i;
5619
5620 for (i = 0; i < sc->sc_nqueues; i++) {
5621 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5622 wm_free_rx_buffer(sc, rxq);
5623 wm_free_rx_descs(sc, rxq);
5624 if (rxq->rxq_lock)
5625 mutex_obj_free(rxq->rxq_lock);
5626 }
5627
5628 for (i = 0; i < sc->sc_nqueues; i++) {
5629 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5630 wm_free_tx_buffer(sc, txq);
5631 wm_free_tx_descs(sc, txq);
5632 if (txq->txq_lock)
5633 mutex_obj_free(txq->txq_lock);
5634 }
5635
5636 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5637 }
5638
5639 static void
5640 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5641 {
5642
5643 KASSERT(mutex_owned(txq->txq_lock));
5644
5645 /* Initialize the transmit descriptor ring. */
5646 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5647 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5648 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5649 txq->txq_free = WM_NTXDESC(txq);
5650 txq->txq_next = 0;
5651 }
5652
5653 static void
5654 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5655 struct wm_txqueue *txq)
5656 {
5657
5658 KASSERT(mutex_owned(txq->txq_lock));
5659
5660 if (sc->sc_type < WM_T_82543) {
5661 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5662 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5663 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5664 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5665 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5666 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5667 } else {
5668 int qid = wmq->wmq_id;
5669
5670 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5671 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5672 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5673 CSR_WRITE(sc, WMREG_TDH(qid), 0);
5674
5675 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5676 /*
5677 * Don't write TDT before TCTL.EN is set.
5678 * See the document.
5679 */
5680 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5681 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5682 | TXDCTL_WTHRESH(0));
5683 else {
5684 /* ITR / 4 */
5685 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5686 if (sc->sc_type >= WM_T_82540) {
5687 /* should be same */
5688 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5689 }
5690
5691 CSR_WRITE(sc, WMREG_TDT(qid), 0);
5692 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5693 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5694 }
5695 }
5696 }
5697
5698 static void
5699 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5700 {
5701 int i;
5702
5703 KASSERT(mutex_owned(txq->txq_lock));
5704
5705 /* Initialize the transmit job descriptors. */
5706 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5707 txq->txq_soft[i].txs_mbuf = NULL;
5708 txq->txq_sfree = WM_TXQUEUELEN(txq);
5709 txq->txq_snext = 0;
5710 txq->txq_sdirty = 0;
5711 }
5712
5713 static void
5714 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5715 struct wm_txqueue *txq)
5716 {
5717
5718 KASSERT(mutex_owned(txq->txq_lock));
5719
5720 /*
5721 * Set up some register offsets that are different between
5722 * the i82542 and the i82543 and later chips.
5723 */
5724 if (sc->sc_type < WM_T_82543)
5725 txq->txq_tdt_reg = WMREG_OLD_TDT;
5726 else
5727 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5728
5729 wm_init_tx_descs(sc, txq);
5730 wm_init_tx_regs(sc, wmq, txq);
5731 wm_init_tx_buffer(sc, txq);
5732 }
5733
5734 static void
5735 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5736 struct wm_rxqueue *rxq)
5737 {
5738
5739 KASSERT(mutex_owned(rxq->rxq_lock));
5740
5741 /*
5742 * Initialize the receive descriptor and receive job
5743 * descriptor rings.
5744 */
5745 if (sc->sc_type < WM_T_82543) {
5746 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5747 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5748 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5749 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5750 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5751 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5752 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5753
5754 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5755 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5756 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5757 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5758 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5759 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5760 } else {
5761 int qid = wmq->wmq_id;
5762
5763 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5764 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5765 CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5766
5767 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5768 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5769 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5770 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5771 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5772 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5773 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5774 | RXDCTL_WTHRESH(1));
5775 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5776 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5777 } else {
5778 CSR_WRITE(sc, WMREG_RDH(qid), 0);
5779 CSR_WRITE(sc, WMREG_RDT(qid), 0);
5780 /* ITR / 4 */
5781 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5782 /* MUST be same */
5783 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5784 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5785 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5786 }
5787 }
5788 }
5789
5790 static int
5791 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5792 {
5793 struct wm_rxsoft *rxs;
5794 int error, i;
5795
5796 KASSERT(mutex_owned(rxq->rxq_lock));
5797
5798 for (i = 0; i < WM_NRXDESC; i++) {
5799 rxs = &rxq->rxq_soft[i];
5800 if (rxs->rxs_mbuf == NULL) {
5801 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5802 log(LOG_ERR, "%s: unable to allocate or map "
5803 "rx buffer %d, error = %d\n",
5804 device_xname(sc->sc_dev), i, error);
5805 /*
5806 * XXX Should attempt to run with fewer receive
5807 * XXX buffers instead of just failing.
5808 */
5809 wm_rxdrain(rxq);
5810 return ENOMEM;
5811 }
5812 } else {
5813 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5814 wm_init_rxdesc(rxq, i);
5815 /*
5816 * For 82575 and newer device, the RX descriptors
5817 * must be initialized after the setting of RCTL.EN in
5818 * wm_set_filter()
5819 */
5820 }
5821 }
5822 rxq->rxq_ptr = 0;
5823 rxq->rxq_discard = 0;
5824 WM_RXCHAIN_RESET(rxq);
5825
5826 return 0;
5827 }
5828
5829 static int
5830 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5831 struct wm_rxqueue *rxq)
5832 {
5833
5834 KASSERT(mutex_owned(rxq->rxq_lock));
5835
5836 /*
5837 * Set up some register offsets that are different between
5838 * the i82542 and the i82543 and later chips.
5839 */
5840 if (sc->sc_type < WM_T_82543)
5841 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5842 else
5843 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
5844
5845 wm_init_rx_regs(sc, wmq, rxq);
5846 return wm_init_rx_buffer(sc, rxq);
5847 }
5848
5849 /*
5850 * wm_init_quques:
5851 * Initialize {tx,rx}descs and {tx,rx} buffers
5852 */
5853 static int
5854 wm_init_txrx_queues(struct wm_softc *sc)
5855 {
5856 int i, error = 0;
5857
5858 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5859 device_xname(sc->sc_dev), __func__));
5860 for (i = 0; i < sc->sc_nqueues; i++) {
5861 struct wm_queue *wmq = &sc->sc_queue[i];
5862 struct wm_txqueue *txq = &wmq->wmq_txq;
5863 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5864
5865 mutex_enter(txq->txq_lock);
5866 wm_init_tx_queue(sc, wmq, txq);
5867 mutex_exit(txq->txq_lock);
5868
5869 mutex_enter(rxq->rxq_lock);
5870 error = wm_init_rx_queue(sc, wmq, rxq);
5871 mutex_exit(rxq->rxq_lock);
5872 if (error)
5873 break;
5874 }
5875
5876 return error;
5877 }
5878
5879 /*
5880 * wm_tx_offload:
5881 *
5882 * Set up TCP/IP checksumming parameters for the
5883 * specified packet.
5884 */
5885 static int
5886 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5887 uint8_t *fieldsp)
5888 {
5889 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5890 struct mbuf *m0 = txs->txs_mbuf;
5891 struct livengood_tcpip_ctxdesc *t;
5892 uint32_t ipcs, tucs, cmd, cmdlen, seg;
5893 uint32_t ipcse;
5894 struct ether_header *eh;
5895 int offset, iphl;
5896 uint8_t fields;
5897
5898 /*
5899 * XXX It would be nice if the mbuf pkthdr had offset
5900 * fields for the protocol headers.
5901 */
5902
5903 eh = mtod(m0, struct ether_header *);
5904 switch (htons(eh->ether_type)) {
5905 case ETHERTYPE_IP:
5906 case ETHERTYPE_IPV6:
5907 offset = ETHER_HDR_LEN;
5908 break;
5909
5910 case ETHERTYPE_VLAN:
5911 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5912 break;
5913
5914 default:
5915 /*
5916 * Don't support this protocol or encapsulation.
5917 */
5918 *fieldsp = 0;
5919 *cmdp = 0;
5920 return 0;
5921 }
5922
5923 if ((m0->m_pkthdr.csum_flags &
5924 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
5925 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5926 } else {
5927 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5928 }
5929 ipcse = offset + iphl - 1;
5930
5931 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5932 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5933 seg = 0;
5934 fields = 0;
5935
5936 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5937 int hlen = offset + iphl;
5938 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5939
5940 if (__predict_false(m0->m_len <
5941 (hlen + sizeof(struct tcphdr)))) {
5942 /*
5943 * TCP/IP headers are not in the first mbuf; we need
5944 * to do this the slow and painful way. Let's just
5945 * hope this doesn't happen very often.
5946 */
5947 struct tcphdr th;
5948
5949 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5950
5951 m_copydata(m0, hlen, sizeof(th), &th);
5952 if (v4) {
5953 struct ip ip;
5954
5955 m_copydata(m0, offset, sizeof(ip), &ip);
5956 ip.ip_len = 0;
5957 m_copyback(m0,
5958 offset + offsetof(struct ip, ip_len),
5959 sizeof(ip.ip_len), &ip.ip_len);
5960 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5961 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5962 } else {
5963 struct ip6_hdr ip6;
5964
5965 m_copydata(m0, offset, sizeof(ip6), &ip6);
5966 ip6.ip6_plen = 0;
5967 m_copyback(m0,
5968 offset + offsetof(struct ip6_hdr, ip6_plen),
5969 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5970 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5971 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5972 }
5973 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5974 sizeof(th.th_sum), &th.th_sum);
5975
5976 hlen += th.th_off << 2;
5977 } else {
5978 /*
5979 * TCP/IP headers are in the first mbuf; we can do
5980 * this the easy way.
5981 */
5982 struct tcphdr *th;
5983
5984 if (v4) {
5985 struct ip *ip =
5986 (void *)(mtod(m0, char *) + offset);
5987 th = (void *)(mtod(m0, char *) + hlen);
5988
5989 ip->ip_len = 0;
5990 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5991 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5992 } else {
5993 struct ip6_hdr *ip6 =
5994 (void *)(mtod(m0, char *) + offset);
5995 th = (void *)(mtod(m0, char *) + hlen);
5996
5997 ip6->ip6_plen = 0;
5998 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5999 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6000 }
6001 hlen += th->th_off << 2;
6002 }
6003
6004 if (v4) {
6005 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6006 cmdlen |= WTX_TCPIP_CMD_IP;
6007 } else {
6008 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6009 ipcse = 0;
6010 }
6011 cmd |= WTX_TCPIP_CMD_TSE;
6012 cmdlen |= WTX_TCPIP_CMD_TSE |
6013 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6014 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6015 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6016 }
6017
6018 /*
6019 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6020 * offload feature, if we load the context descriptor, we
6021 * MUST provide valid values for IPCSS and TUCSS fields.
6022 */
6023
6024 ipcs = WTX_TCPIP_IPCSS(offset) |
6025 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6026 WTX_TCPIP_IPCSE(ipcse);
6027 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6028 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
6029 fields |= WTX_IXSM;
6030 }
6031
6032 offset += iphl;
6033
6034 if (m0->m_pkthdr.csum_flags &
6035 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6036 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6037 fields |= WTX_TXSM;
6038 tucs = WTX_TCPIP_TUCSS(offset) |
6039 WTX_TCPIP_TUCSO(offset +
6040 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6041 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6042 } else if ((m0->m_pkthdr.csum_flags &
6043 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6044 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6045 fields |= WTX_TXSM;
6046 tucs = WTX_TCPIP_TUCSS(offset) |
6047 WTX_TCPIP_TUCSO(offset +
6048 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6049 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6050 } else {
6051 /* Just initialize it to a valid TCP context. */
6052 tucs = WTX_TCPIP_TUCSS(offset) |
6053 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6054 WTX_TCPIP_TUCSE(0) /* rest of packet */;
6055 }
6056
6057 /* Fill in the context descriptor. */
6058 t = (struct livengood_tcpip_ctxdesc *)
6059 &txq->txq_descs[txq->txq_next];
6060 t->tcpip_ipcs = htole32(ipcs);
6061 t->tcpip_tucs = htole32(tucs);
6062 t->tcpip_cmdlen = htole32(cmdlen);
6063 t->tcpip_seg = htole32(seg);
6064 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6065
6066 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6067 txs->txs_ndesc++;
6068
6069 *cmdp = cmd;
6070 *fieldsp = fields;
6071
6072 return 0;
6073 }
6074
6075 /*
6076 * wm_start: [ifnet interface function]
6077 *
6078 * Start packet transmission on the interface.
6079 */
6080 static void
6081 wm_start(struct ifnet *ifp)
6082 {
6083 struct wm_softc *sc = ifp->if_softc;
6084 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6085
6086 mutex_enter(txq->txq_lock);
6087 if (!sc->sc_stopping)
6088 wm_start_locked(ifp);
6089 mutex_exit(txq->txq_lock);
6090 }
6091
6092 static void
6093 wm_start_locked(struct ifnet *ifp)
6094 {
6095 struct wm_softc *sc = ifp->if_softc;
6096 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6097 struct mbuf *m0;
6098 struct m_tag *mtag;
6099 struct wm_txsoft *txs;
6100 bus_dmamap_t dmamap;
6101 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6102 bus_addr_t curaddr;
6103 bus_size_t seglen, curlen;
6104 uint32_t cksumcmd;
6105 uint8_t cksumfields;
6106
6107 KASSERT(mutex_owned(txq->txq_lock));
6108
6109 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6110 return;
6111
6112 /* Remember the previous number of free descriptors. */
6113 ofree = txq->txq_free;
6114
6115 /*
6116 * Loop through the send queue, setting up transmit descriptors
6117 * until we drain the queue, or use up all available transmit
6118 * descriptors.
6119 */
6120 for (;;) {
6121 m0 = NULL;
6122
6123 /* Get a work queue entry. */
6124 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6125 wm_txeof(sc, txq);
6126 if (txq->txq_sfree == 0) {
6127 DPRINTF(WM_DEBUG_TX,
6128 ("%s: TX: no free job descriptors\n",
6129 device_xname(sc->sc_dev)));
6130 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6131 break;
6132 }
6133 }
6134
6135 /* Grab a packet off the queue. */
6136 IFQ_DEQUEUE(&ifp->if_snd, m0);
6137 if (m0 == NULL)
6138 break;
6139
6140 DPRINTF(WM_DEBUG_TX,
6141 ("%s: TX: have packet to transmit: %p\n",
6142 device_xname(sc->sc_dev), m0));
6143
6144 txs = &txq->txq_soft[txq->txq_snext];
6145 dmamap = txs->txs_dmamap;
6146
6147 use_tso = (m0->m_pkthdr.csum_flags &
6148 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6149
6150 /*
6151 * So says the Linux driver:
6152 * The controller does a simple calculation to make sure
6153 * there is enough room in the FIFO before initiating the
6154 * DMA for each buffer. The calc is:
6155 * 4 = ceil(buffer len / MSS)
6156 * To make sure we don't overrun the FIFO, adjust the max
6157 * buffer len if the MSS drops.
6158 */
6159 dmamap->dm_maxsegsz =
6160 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6161 ? m0->m_pkthdr.segsz << 2
6162 : WTX_MAX_LEN;
6163
6164 /*
6165 * Load the DMA map. If this fails, the packet either
6166 * didn't fit in the allotted number of segments, or we
6167 * were short on resources. For the too-many-segments
6168 * case, we simply report an error and drop the packet,
6169 * since we can't sanely copy a jumbo packet to a single
6170 * buffer.
6171 */
6172 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6173 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6174 if (error) {
6175 if (error == EFBIG) {
6176 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6177 log(LOG_ERR, "%s: Tx packet consumes too many "
6178 "DMA segments, dropping...\n",
6179 device_xname(sc->sc_dev));
6180 wm_dump_mbuf_chain(sc, m0);
6181 m_freem(m0);
6182 continue;
6183 }
6184 /* Short on resources, just stop for now. */
6185 DPRINTF(WM_DEBUG_TX,
6186 ("%s: TX: dmamap load failed: %d\n",
6187 device_xname(sc->sc_dev), error));
6188 break;
6189 }
6190
6191 segs_needed = dmamap->dm_nsegs;
6192 if (use_tso) {
6193 /* For sentinel descriptor; see below. */
6194 segs_needed++;
6195 }
6196
6197 /*
6198 * Ensure we have enough descriptors free to describe
6199 * the packet. Note, we always reserve one descriptor
6200 * at the end of the ring due to the semantics of the
6201 * TDT register, plus one more in the event we need
6202 * to load offload context.
6203 */
6204 if (segs_needed > txq->txq_free - 2) {
6205 /*
6206 * Not enough free descriptors to transmit this
6207 * packet. We haven't committed anything yet,
6208 * so just unload the DMA map, put the packet
6209 * pack on the queue, and punt. Notify the upper
6210 * layer that there are no more slots left.
6211 */
6212 DPRINTF(WM_DEBUG_TX,
6213 ("%s: TX: need %d (%d) descriptors, have %d\n",
6214 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6215 segs_needed, txq->txq_free - 1));
6216 ifp->if_flags |= IFF_OACTIVE;
6217 bus_dmamap_unload(sc->sc_dmat, dmamap);
6218 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6219 break;
6220 }
6221
6222 /*
6223 * Check for 82547 Tx FIFO bug. We need to do this
6224 * once we know we can transmit the packet, since we
6225 * do some internal FIFO space accounting here.
6226 */
6227 if (sc->sc_type == WM_T_82547 &&
6228 wm_82547_txfifo_bugchk(sc, m0)) {
6229 DPRINTF(WM_DEBUG_TX,
6230 ("%s: TX: 82547 Tx FIFO bug detected\n",
6231 device_xname(sc->sc_dev)));
6232 ifp->if_flags |= IFF_OACTIVE;
6233 bus_dmamap_unload(sc->sc_dmat, dmamap);
6234 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6235 break;
6236 }
6237
6238 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6239
6240 DPRINTF(WM_DEBUG_TX,
6241 ("%s: TX: packet has %d (%d) DMA segments\n",
6242 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6243
6244 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6245
6246 /*
6247 * Store a pointer to the packet so that we can free it
6248 * later.
6249 *
6250 * Initially, we consider the number of descriptors the
6251 * packet uses the number of DMA segments. This may be
6252 * incremented by 1 if we do checksum offload (a descriptor
6253 * is used to set the checksum context).
6254 */
6255 txs->txs_mbuf = m0;
6256 txs->txs_firstdesc = txq->txq_next;
6257 txs->txs_ndesc = segs_needed;
6258
6259 /* Set up offload parameters for this packet. */
6260 if (m0->m_pkthdr.csum_flags &
6261 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6262 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6263 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6264 if (wm_tx_offload(sc, txs, &cksumcmd,
6265 &cksumfields) != 0) {
6266 /* Error message already displayed. */
6267 bus_dmamap_unload(sc->sc_dmat, dmamap);
6268 continue;
6269 }
6270 } else {
6271 cksumcmd = 0;
6272 cksumfields = 0;
6273 }
6274
6275 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6276
6277 /* Sync the DMA map. */
6278 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6279 BUS_DMASYNC_PREWRITE);
6280
6281 /* Initialize the transmit descriptor. */
6282 for (nexttx = txq->txq_next, seg = 0;
6283 seg < dmamap->dm_nsegs; seg++) {
6284 for (seglen = dmamap->dm_segs[seg].ds_len,
6285 curaddr = dmamap->dm_segs[seg].ds_addr;
6286 seglen != 0;
6287 curaddr += curlen, seglen -= curlen,
6288 nexttx = WM_NEXTTX(txq, nexttx)) {
6289 curlen = seglen;
6290
6291 /*
6292 * So says the Linux driver:
6293 * Work around for premature descriptor
6294 * write-backs in TSO mode. Append a
6295 * 4-byte sentinel descriptor.
6296 */
6297 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6298 curlen > 8)
6299 curlen -= 4;
6300
6301 wm_set_dma_addr(
6302 &txq->txq_descs[nexttx].wtx_addr, curaddr);
6303 txq->txq_descs[nexttx].wtx_cmdlen
6304 = htole32(cksumcmd | curlen);
6305 txq->txq_descs[nexttx].wtx_fields.wtxu_status
6306 = 0;
6307 txq->txq_descs[nexttx].wtx_fields.wtxu_options
6308 = cksumfields;
6309 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6310 lasttx = nexttx;
6311
6312 DPRINTF(WM_DEBUG_TX,
6313 ("%s: TX: desc %d: low %#" PRIx64 ", "
6314 "len %#04zx\n",
6315 device_xname(sc->sc_dev), nexttx,
6316 (uint64_t)curaddr, curlen));
6317 }
6318 }
6319
6320 KASSERT(lasttx != -1);
6321
6322 /*
6323 * Set up the command byte on the last descriptor of
6324 * the packet. If we're in the interrupt delay window,
6325 * delay the interrupt.
6326 */
6327 txq->txq_descs[lasttx].wtx_cmdlen |=
6328 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6329
6330 /*
6331 * If VLANs are enabled and the packet has a VLAN tag, set
6332 * up the descriptor to encapsulate the packet for us.
6333 *
6334 * This is only valid on the last descriptor of the packet.
6335 */
6336 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6337 txq->txq_descs[lasttx].wtx_cmdlen |=
6338 htole32(WTX_CMD_VLE);
6339 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6340 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6341 }
6342
6343 txs->txs_lastdesc = lasttx;
6344
6345 DPRINTF(WM_DEBUG_TX,
6346 ("%s: TX: desc %d: cmdlen 0x%08x\n",
6347 device_xname(sc->sc_dev),
6348 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6349
6350 /* Sync the descriptors we're using. */
6351 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6352 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6353
6354 /* Give the packet to the chip. */
6355 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6356
6357 DPRINTF(WM_DEBUG_TX,
6358 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6359
6360 DPRINTF(WM_DEBUG_TX,
6361 ("%s: TX: finished transmitting packet, job %d\n",
6362 device_xname(sc->sc_dev), txq->txq_snext));
6363
6364 /* Advance the tx pointer. */
6365 txq->txq_free -= txs->txs_ndesc;
6366 txq->txq_next = nexttx;
6367
6368 txq->txq_sfree--;
6369 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6370
6371 /* Pass the packet to any BPF listeners. */
6372 bpf_mtap(ifp, m0);
6373 }
6374
6375 if (m0 != NULL) {
6376 ifp->if_flags |= IFF_OACTIVE;
6377 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6378 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6379 __func__));
6380 m_freem(m0);
6381 }
6382
6383 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6384 /* No more slots; notify upper layer. */
6385 ifp->if_flags |= IFF_OACTIVE;
6386 }
6387
6388 if (txq->txq_free != ofree) {
6389 /* Set a watchdog timer in case the chip flakes out. */
6390 ifp->if_timer = 5;
6391 }
6392 }
6393
6394 /*
6395 * wm_nq_tx_offload:
6396 *
6397 * Set up TCP/IP checksumming parameters for the
6398 * specified packet, for NEWQUEUE devices
6399 */
6400 static int
6401 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6402 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6403 {
6404 struct mbuf *m0 = txs->txs_mbuf;
6405 struct m_tag *mtag;
6406 uint32_t vl_len, mssidx, cmdc;
6407 struct ether_header *eh;
6408 int offset, iphl;
6409
6410 /*
6411 * XXX It would be nice if the mbuf pkthdr had offset
6412 * fields for the protocol headers.
6413 */
6414 *cmdlenp = 0;
6415 *fieldsp = 0;
6416
6417 eh = mtod(m0, struct ether_header *);
6418 switch (htons(eh->ether_type)) {
6419 case ETHERTYPE_IP:
6420 case ETHERTYPE_IPV6:
6421 offset = ETHER_HDR_LEN;
6422 break;
6423
6424 case ETHERTYPE_VLAN:
6425 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6426 break;
6427
6428 default:
6429 /* Don't support this protocol or encapsulation. */
6430 *do_csum = false;
6431 return 0;
6432 }
6433 *do_csum = true;
6434 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6435 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6436
6437 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6438 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6439
6440 if ((m0->m_pkthdr.csum_flags &
6441 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6442 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6443 } else {
6444 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6445 }
6446 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6447 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6448
6449 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6450 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6451 << NQTXC_VLLEN_VLAN_SHIFT);
6452 *cmdlenp |= NQTX_CMD_VLE;
6453 }
6454
6455 mssidx = 0;
6456
6457 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6458 int hlen = offset + iphl;
6459 int tcp_hlen;
6460 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6461
6462 if (__predict_false(m0->m_len <
6463 (hlen + sizeof(struct tcphdr)))) {
6464 /*
6465 * TCP/IP headers are not in the first mbuf; we need
6466 * to do this the slow and painful way. Let's just
6467 * hope this doesn't happen very often.
6468 */
6469 struct tcphdr th;
6470
6471 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6472
6473 m_copydata(m0, hlen, sizeof(th), &th);
6474 if (v4) {
6475 struct ip ip;
6476
6477 m_copydata(m0, offset, sizeof(ip), &ip);
6478 ip.ip_len = 0;
6479 m_copyback(m0,
6480 offset + offsetof(struct ip, ip_len),
6481 sizeof(ip.ip_len), &ip.ip_len);
6482 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6483 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6484 } else {
6485 struct ip6_hdr ip6;
6486
6487 m_copydata(m0, offset, sizeof(ip6), &ip6);
6488 ip6.ip6_plen = 0;
6489 m_copyback(m0,
6490 offset + offsetof(struct ip6_hdr, ip6_plen),
6491 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6492 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6493 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6494 }
6495 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6496 sizeof(th.th_sum), &th.th_sum);
6497
6498 tcp_hlen = th.th_off << 2;
6499 } else {
6500 /*
6501 * TCP/IP headers are in the first mbuf; we can do
6502 * this the easy way.
6503 */
6504 struct tcphdr *th;
6505
6506 if (v4) {
6507 struct ip *ip =
6508 (void *)(mtod(m0, char *) + offset);
6509 th = (void *)(mtod(m0, char *) + hlen);
6510
6511 ip->ip_len = 0;
6512 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6513 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6514 } else {
6515 struct ip6_hdr *ip6 =
6516 (void *)(mtod(m0, char *) + offset);
6517 th = (void *)(mtod(m0, char *) + hlen);
6518
6519 ip6->ip6_plen = 0;
6520 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6521 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6522 }
6523 tcp_hlen = th->th_off << 2;
6524 }
6525 hlen += tcp_hlen;
6526 *cmdlenp |= NQTX_CMD_TSE;
6527
6528 if (v4) {
6529 WM_EVCNT_INCR(&sc->sc_ev_txtso);
6530 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6531 } else {
6532 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6533 *fieldsp |= NQTXD_FIELDS_TUXSM;
6534 }
6535 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6536 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6537 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6538 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6539 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6540 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6541 } else {
6542 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6543 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6544 }
6545
6546 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6547 *fieldsp |= NQTXD_FIELDS_IXSM;
6548 cmdc |= NQTXC_CMD_IP4;
6549 }
6550
6551 if (m0->m_pkthdr.csum_flags &
6552 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6553 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6554 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6555 cmdc |= NQTXC_CMD_TCP;
6556 } else {
6557 cmdc |= NQTXC_CMD_UDP;
6558 }
6559 cmdc |= NQTXC_CMD_IP4;
6560 *fieldsp |= NQTXD_FIELDS_TUXSM;
6561 }
6562 if (m0->m_pkthdr.csum_flags &
6563 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6564 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6565 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6566 cmdc |= NQTXC_CMD_TCP;
6567 } else {
6568 cmdc |= NQTXC_CMD_UDP;
6569 }
6570 cmdc |= NQTXC_CMD_IP6;
6571 *fieldsp |= NQTXD_FIELDS_TUXSM;
6572 }
6573
6574 /* Fill in the context descriptor. */
6575 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6576 htole32(vl_len);
6577 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6578 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6579 htole32(cmdc);
6580 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6581 htole32(mssidx);
6582 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6583 DPRINTF(WM_DEBUG_TX,
6584 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6585 txq->txq_next, 0, vl_len));
6586 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6587 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6588 txs->txs_ndesc++;
6589 return 0;
6590 }
6591
6592 /*
6593 * wm_nq_start: [ifnet interface function]
6594 *
6595 * Start packet transmission on the interface for NEWQUEUE devices
6596 */
6597 static void
6598 wm_nq_start(struct ifnet *ifp)
6599 {
6600 struct wm_softc *sc = ifp->if_softc;
6601 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6602
6603 mutex_enter(txq->txq_lock);
6604 if (!sc->sc_stopping)
6605 wm_nq_start_locked(ifp);
6606 mutex_exit(txq->txq_lock);
6607 }
6608
6609 static void
6610 wm_nq_start_locked(struct ifnet *ifp)
6611 {
6612 struct wm_softc *sc = ifp->if_softc;
6613 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6614
6615 wm_nq_send_common_locked(ifp, txq, false);
6616 }
6617
6618 static inline int
6619 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6620 {
6621 struct wm_softc *sc = ifp->if_softc;
6622 u_int cpuid = cpu_index(curcpu());
6623
6624 /*
6625 * Currently, simple distribute strategy.
6626 * TODO:
6627 * destribute by flowid(RSS has value).
6628 */
6629 return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6630 }
6631
6632 static int
6633 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6634 {
6635 int qid;
6636 struct wm_softc *sc = ifp->if_softc;
6637 struct wm_txqueue *txq;
6638
6639 qid = wm_nq_select_txqueue(ifp, m);
6640 txq = &sc->sc_queue[qid].wmq_txq;
6641
6642 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6643 m_freem(m);
6644 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6645 return ENOBUFS;
6646 }
6647
6648 if (mutex_tryenter(txq->txq_lock)) {
6649 /* XXXX should be per TX queue */
6650 ifp->if_obytes += m->m_pkthdr.len;
6651 if (m->m_flags & M_MCAST)
6652 ifp->if_omcasts++;
6653
6654 if (!sc->sc_stopping)
6655 wm_nq_transmit_locked(ifp, txq);
6656 mutex_exit(txq->txq_lock);
6657 }
6658
6659 return 0;
6660 }
6661
6662 static void
6663 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6664 {
6665
6666 wm_nq_send_common_locked(ifp, txq, true);
6667 }
6668
6669 static void
6670 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6671 bool is_transmit)
6672 {
6673 struct wm_softc *sc = ifp->if_softc;
6674 struct mbuf *m0;
6675 struct m_tag *mtag;
6676 struct wm_txsoft *txs;
6677 bus_dmamap_t dmamap;
6678 int error, nexttx, lasttx = -1, seg, segs_needed;
6679 bool do_csum, sent;
6680
6681 KASSERT(mutex_owned(txq->txq_lock));
6682
6683 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6684 return;
6685 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6686 return;
6687
6688 sent = false;
6689
6690 /*
6691 * Loop through the send queue, setting up transmit descriptors
6692 * until we drain the queue, or use up all available transmit
6693 * descriptors.
6694 */
6695 for (;;) {
6696 m0 = NULL;
6697
6698 /* Get a work queue entry. */
6699 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6700 wm_txeof(sc, txq);
6701 if (txq->txq_sfree == 0) {
6702 DPRINTF(WM_DEBUG_TX,
6703 ("%s: TX: no free job descriptors\n",
6704 device_xname(sc->sc_dev)));
6705 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6706 break;
6707 }
6708 }
6709
6710 /* Grab a packet off the queue. */
6711 if (is_transmit)
6712 m0 = pcq_get(txq->txq_interq);
6713 else
6714 IFQ_DEQUEUE(&ifp->if_snd, m0);
6715 if (m0 == NULL)
6716 break;
6717
6718 DPRINTF(WM_DEBUG_TX,
6719 ("%s: TX: have packet to transmit: %p\n",
6720 device_xname(sc->sc_dev), m0));
6721
6722 txs = &txq->txq_soft[txq->txq_snext];
6723 dmamap = txs->txs_dmamap;
6724
6725 /*
6726 * Load the DMA map. If this fails, the packet either
6727 * didn't fit in the allotted number of segments, or we
6728 * were short on resources. For the too-many-segments
6729 * case, we simply report an error and drop the packet,
6730 * since we can't sanely copy a jumbo packet to a single
6731 * buffer.
6732 */
6733 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6734 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6735 if (error) {
6736 if (error == EFBIG) {
6737 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6738 log(LOG_ERR, "%s: Tx packet consumes too many "
6739 "DMA segments, dropping...\n",
6740 device_xname(sc->sc_dev));
6741 wm_dump_mbuf_chain(sc, m0);
6742 m_freem(m0);
6743 continue;
6744 }
6745 /* Short on resources, just stop for now. */
6746 DPRINTF(WM_DEBUG_TX,
6747 ("%s: TX: dmamap load failed: %d\n",
6748 device_xname(sc->sc_dev), error));
6749 break;
6750 }
6751
6752 segs_needed = dmamap->dm_nsegs;
6753
6754 /*
6755 * Ensure we have enough descriptors free to describe
6756 * the packet. Note, we always reserve one descriptor
6757 * at the end of the ring due to the semantics of the
6758 * TDT register, plus one more in the event we need
6759 * to load offload context.
6760 */
6761 if (segs_needed > txq->txq_free - 2) {
6762 /*
6763 * Not enough free descriptors to transmit this
6764 * packet. We haven't committed anything yet,
6765 * so just unload the DMA map, put the packet
6766 * pack on the queue, and punt. Notify the upper
6767 * layer that there are no more slots left.
6768 */
6769 DPRINTF(WM_DEBUG_TX,
6770 ("%s: TX: need %d (%d) descriptors, have %d\n",
6771 device_xname(sc->sc_dev), dmamap->dm_nsegs,
6772 segs_needed, txq->txq_free - 1));
6773 txq->txq_flags |= WM_TXQ_NO_SPACE;
6774 bus_dmamap_unload(sc->sc_dmat, dmamap);
6775 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6776 break;
6777 }
6778
6779 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6780
6781 DPRINTF(WM_DEBUG_TX,
6782 ("%s: TX: packet has %d (%d) DMA segments\n",
6783 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6784
6785 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6786
6787 /*
6788 * Store a pointer to the packet so that we can free it
6789 * later.
6790 *
6791 * Initially, we consider the number of descriptors the
6792 * packet uses the number of DMA segments. This may be
6793 * incremented by 1 if we do checksum offload (a descriptor
6794 * is used to set the checksum context).
6795 */
6796 txs->txs_mbuf = m0;
6797 txs->txs_firstdesc = txq->txq_next;
6798 txs->txs_ndesc = segs_needed;
6799
6800 /* Set up offload parameters for this packet. */
6801 uint32_t cmdlen, fields, dcmdlen;
6802 if (m0->m_pkthdr.csum_flags &
6803 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6804 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6805 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6806 if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
6807 &do_csum) != 0) {
6808 /* Error message already displayed. */
6809 bus_dmamap_unload(sc->sc_dmat, dmamap);
6810 continue;
6811 }
6812 } else {
6813 do_csum = false;
6814 cmdlen = 0;
6815 fields = 0;
6816 }
6817
6818 /* Sync the DMA map. */
6819 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6820 BUS_DMASYNC_PREWRITE);
6821
6822 /* Initialize the first transmit descriptor. */
6823 nexttx = txq->txq_next;
6824 if (!do_csum) {
6825 /* setup a legacy descriptor */
6826 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
6827 dmamap->dm_segs[0].ds_addr);
6828 txq->txq_descs[nexttx].wtx_cmdlen =
6829 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6830 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6831 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6832 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6833 NULL) {
6834 txq->txq_descs[nexttx].wtx_cmdlen |=
6835 htole32(WTX_CMD_VLE);
6836 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6837 htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6838 } else {
6839 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6840 }
6841 dcmdlen = 0;
6842 } else {
6843 /* setup an advanced data descriptor */
6844 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6845 htole64(dmamap->dm_segs[0].ds_addr);
6846 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6847 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6848 htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6849 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6850 htole32(fields);
6851 DPRINTF(WM_DEBUG_TX,
6852 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6853 device_xname(sc->sc_dev), nexttx,
6854 (uint64_t)dmamap->dm_segs[0].ds_addr));
6855 DPRINTF(WM_DEBUG_TX,
6856 ("\t 0x%08x%08x\n", fields,
6857 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6858 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6859 }
6860
6861 lasttx = nexttx;
6862 nexttx = WM_NEXTTX(txq, nexttx);
6863 /*
6864 * fill in the next descriptors. legacy or adcanced format
6865 * is the same here
6866 */
6867 for (seg = 1; seg < dmamap->dm_nsegs;
6868 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6869 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6870 htole64(dmamap->dm_segs[seg].ds_addr);
6871 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6872 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6873 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6874 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6875 lasttx = nexttx;
6876
6877 DPRINTF(WM_DEBUG_TX,
6878 ("%s: TX: desc %d: %#" PRIx64 ", "
6879 "len %#04zx\n",
6880 device_xname(sc->sc_dev), nexttx,
6881 (uint64_t)dmamap->dm_segs[seg].ds_addr,
6882 dmamap->dm_segs[seg].ds_len));
6883 }
6884
6885 KASSERT(lasttx != -1);
6886
6887 /*
6888 * Set up the command byte on the last descriptor of
6889 * the packet. If we're in the interrupt delay window,
6890 * delay the interrupt.
6891 */
6892 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6893 (NQTX_CMD_EOP | NQTX_CMD_RS));
6894 txq->txq_descs[lasttx].wtx_cmdlen |=
6895 htole32(WTX_CMD_EOP | WTX_CMD_RS);
6896
6897 txs->txs_lastdesc = lasttx;
6898
6899 DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
6900 device_xname(sc->sc_dev),
6901 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6902
6903 /* Sync the descriptors we're using. */
6904 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6905 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6906
6907 /* Give the packet to the chip. */
6908 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6909 sent = true;
6910
6911 DPRINTF(WM_DEBUG_TX,
6912 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6913
6914 DPRINTF(WM_DEBUG_TX,
6915 ("%s: TX: finished transmitting packet, job %d\n",
6916 device_xname(sc->sc_dev), txq->txq_snext));
6917
6918 /* Advance the tx pointer. */
6919 txq->txq_free -= txs->txs_ndesc;
6920 txq->txq_next = nexttx;
6921
6922 txq->txq_sfree--;
6923 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6924
6925 /* Pass the packet to any BPF listeners. */
6926 bpf_mtap(ifp, m0);
6927 }
6928
6929 if (m0 != NULL) {
6930 txq->txq_flags |= WM_TXQ_NO_SPACE;
6931 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6932 DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6933 __func__));
6934 m_freem(m0);
6935 }
6936
6937 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6938 /* No more slots; notify upper layer. */
6939 txq->txq_flags |= WM_TXQ_NO_SPACE;
6940 }
6941
6942 if (sent) {
6943 /* Set a watchdog timer in case the chip flakes out. */
6944 ifp->if_timer = 5;
6945 }
6946 }
6947
6948 /* Interrupt */
6949
6950 /*
6951 * wm_txeof:
6952 *
6953 * Helper; handle transmit interrupts.
6954 */
6955 static int
6956 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
6957 {
6958 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6959 struct wm_txsoft *txs;
6960 bool processed = false;
6961 int count = 0;
6962 int i;
6963 uint8_t status;
6964
6965 KASSERT(mutex_owned(txq->txq_lock));
6966
6967 if (sc->sc_stopping)
6968 return 0;
6969
6970 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6971 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
6972 else
6973 ifp->if_flags &= ~IFF_OACTIVE;
6974
6975 /*
6976 * Go through the Tx list and free mbufs for those
6977 * frames which have been transmitted.
6978 */
6979 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6980 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6981 txs = &txq->txq_soft[i];
6982
6983 DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
6984 device_xname(sc->sc_dev), i));
6985
6986 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6987 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6988
6989 status =
6990 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6991 if ((status & WTX_ST_DD) == 0) {
6992 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6993 BUS_DMASYNC_PREREAD);
6994 break;
6995 }
6996
6997 processed = true;
6998 count++;
6999 DPRINTF(WM_DEBUG_TX,
7000 ("%s: TX: job %d done: descs %d..%d\n",
7001 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7002 txs->txs_lastdesc));
7003
7004 /*
7005 * XXX We should probably be using the statistics
7006 * XXX registers, but I don't know if they exist
7007 * XXX on chips before the i82544.
7008 */
7009
7010 #ifdef WM_EVENT_COUNTERS
7011 if (status & WTX_ST_TU)
7012 WM_EVCNT_INCR(&sc->sc_ev_tu);
7013 #endif /* WM_EVENT_COUNTERS */
7014
7015 if (status & (WTX_ST_EC | WTX_ST_LC)) {
7016 ifp->if_oerrors++;
7017 if (status & WTX_ST_LC)
7018 log(LOG_WARNING, "%s: late collision\n",
7019 device_xname(sc->sc_dev));
7020 else if (status & WTX_ST_EC) {
7021 ifp->if_collisions += 16;
7022 log(LOG_WARNING, "%s: excessive collisions\n",
7023 device_xname(sc->sc_dev));
7024 }
7025 } else
7026 ifp->if_opackets++;
7027
7028 txq->txq_free += txs->txs_ndesc;
7029 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7030 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7031 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7032 m_freem(txs->txs_mbuf);
7033 txs->txs_mbuf = NULL;
7034 }
7035
7036 /* Update the dirty transmit buffer pointer. */
7037 txq->txq_sdirty = i;
7038 DPRINTF(WM_DEBUG_TX,
7039 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7040
7041 if (count != 0)
7042 rnd_add_uint32(&sc->rnd_source, count);
7043
7044 /*
7045 * If there are no more pending transmissions, cancel the watchdog
7046 * timer.
7047 */
7048 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7049 ifp->if_timer = 0;
7050
7051 return processed;
7052 }
7053
7054 /*
7055 * wm_rxeof:
7056 *
7057 * Helper; handle receive interrupts.
7058 */
7059 static void
7060 wm_rxeof(struct wm_rxqueue *rxq)
7061 {
7062 struct wm_softc *sc = rxq->rxq_sc;
7063 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7064 struct wm_rxsoft *rxs;
7065 struct mbuf *m;
7066 int i, len;
7067 int count = 0;
7068 uint8_t status, errors;
7069 uint16_t vlantag;
7070
7071 KASSERT(mutex_owned(rxq->rxq_lock));
7072
7073 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7074 rxs = &rxq->rxq_soft[i];
7075
7076 DPRINTF(WM_DEBUG_RX,
7077 ("%s: RX: checking descriptor %d\n",
7078 device_xname(sc->sc_dev), i));
7079
7080 wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7081
7082 status = rxq->rxq_descs[i].wrx_status;
7083 errors = rxq->rxq_descs[i].wrx_errors;
7084 len = le16toh(rxq->rxq_descs[i].wrx_len);
7085 vlantag = rxq->rxq_descs[i].wrx_special;
7086
7087 if ((status & WRX_ST_DD) == 0) {
7088 /* We have processed all of the receive descriptors. */
7089 wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7090 break;
7091 }
7092
7093 count++;
7094 if (__predict_false(rxq->rxq_discard)) {
7095 DPRINTF(WM_DEBUG_RX,
7096 ("%s: RX: discarding contents of descriptor %d\n",
7097 device_xname(sc->sc_dev), i));
7098 wm_init_rxdesc(rxq, i);
7099 if (status & WRX_ST_EOP) {
7100 /* Reset our state. */
7101 DPRINTF(WM_DEBUG_RX,
7102 ("%s: RX: resetting rxdiscard -> 0\n",
7103 device_xname(sc->sc_dev)));
7104 rxq->rxq_discard = 0;
7105 }
7106 continue;
7107 }
7108
7109 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7110 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7111
7112 m = rxs->rxs_mbuf;
7113
7114 /*
7115 * Add a new receive buffer to the ring, unless of
7116 * course the length is zero. Treat the latter as a
7117 * failed mapping.
7118 */
7119 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7120 /*
7121 * Failed, throw away what we've done so
7122 * far, and discard the rest of the packet.
7123 */
7124 ifp->if_ierrors++;
7125 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7126 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7127 wm_init_rxdesc(rxq, i);
7128 if ((status & WRX_ST_EOP) == 0)
7129 rxq->rxq_discard = 1;
7130 if (rxq->rxq_head != NULL)
7131 m_freem(rxq->rxq_head);
7132 WM_RXCHAIN_RESET(rxq);
7133 DPRINTF(WM_DEBUG_RX,
7134 ("%s: RX: Rx buffer allocation failed, "
7135 "dropping packet%s\n", device_xname(sc->sc_dev),
7136 rxq->rxq_discard ? " (discard)" : ""));
7137 continue;
7138 }
7139
7140 m->m_len = len;
7141 rxq->rxq_len += len;
7142 DPRINTF(WM_DEBUG_RX,
7143 ("%s: RX: buffer at %p len %d\n",
7144 device_xname(sc->sc_dev), m->m_data, len));
7145
7146 /* If this is not the end of the packet, keep looking. */
7147 if ((status & WRX_ST_EOP) == 0) {
7148 WM_RXCHAIN_LINK(rxq, m);
7149 DPRINTF(WM_DEBUG_RX,
7150 ("%s: RX: not yet EOP, rxlen -> %d\n",
7151 device_xname(sc->sc_dev), rxq->rxq_len));
7152 continue;
7153 }
7154
7155 /*
7156 * Okay, we have the entire packet now. The chip is
7157 * configured to include the FCS except I350 and I21[01]
7158 * (not all chips can be configured to strip it),
7159 * so we need to trim it.
7160 * May need to adjust length of previous mbuf in the
7161 * chain if the current mbuf is too short.
7162 * For an eratta, the RCTL_SECRC bit in RCTL register
7163 * is always set in I350, so we don't trim it.
7164 */
7165 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7166 && (sc->sc_type != WM_T_I210)
7167 && (sc->sc_type != WM_T_I211)) {
7168 if (m->m_len < ETHER_CRC_LEN) {
7169 rxq->rxq_tail->m_len
7170 -= (ETHER_CRC_LEN - m->m_len);
7171 m->m_len = 0;
7172 } else
7173 m->m_len -= ETHER_CRC_LEN;
7174 len = rxq->rxq_len - ETHER_CRC_LEN;
7175 } else
7176 len = rxq->rxq_len;
7177
7178 WM_RXCHAIN_LINK(rxq, m);
7179
7180 *rxq->rxq_tailp = NULL;
7181 m = rxq->rxq_head;
7182
7183 WM_RXCHAIN_RESET(rxq);
7184
7185 DPRINTF(WM_DEBUG_RX,
7186 ("%s: RX: have entire packet, len -> %d\n",
7187 device_xname(sc->sc_dev), len));
7188
7189 /* If an error occurred, update stats and drop the packet. */
7190 if (errors &
7191 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7192 if (errors & WRX_ER_SE)
7193 log(LOG_WARNING, "%s: symbol error\n",
7194 device_xname(sc->sc_dev));
7195 else if (errors & WRX_ER_SEQ)
7196 log(LOG_WARNING, "%s: receive sequence error\n",
7197 device_xname(sc->sc_dev));
7198 else if (errors & WRX_ER_CE)
7199 log(LOG_WARNING, "%s: CRC error\n",
7200 device_xname(sc->sc_dev));
7201 m_freem(m);
7202 continue;
7203 }
7204
7205 /* No errors. Receive the packet. */
7206 m_set_rcvif(m, ifp);
7207 m->m_pkthdr.len = len;
7208
7209 /*
7210 * If VLANs are enabled, VLAN packets have been unwrapped
7211 * for us. Associate the tag with the packet.
7212 */
7213 /* XXXX should check for i350 and i354 */
7214 if ((status & WRX_ST_VP) != 0) {
7215 VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7216 }
7217
7218 /* Set up checksum info for this packet. */
7219 if ((status & WRX_ST_IXSM) == 0) {
7220 if (status & WRX_ST_IPCS) {
7221 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7222 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7223 if (errors & WRX_ER_IPE)
7224 m->m_pkthdr.csum_flags |=
7225 M_CSUM_IPv4_BAD;
7226 }
7227 if (status & WRX_ST_TCPCS) {
7228 /*
7229 * Note: we don't know if this was TCP or UDP,
7230 * so we just set both bits, and expect the
7231 * upper layers to deal.
7232 */
7233 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7234 m->m_pkthdr.csum_flags |=
7235 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7236 M_CSUM_TCPv6 | M_CSUM_UDPv6;
7237 if (errors & WRX_ER_TCPE)
7238 m->m_pkthdr.csum_flags |=
7239 M_CSUM_TCP_UDP_BAD;
7240 }
7241 }
7242
7243 ifp->if_ipackets++;
7244
7245 mutex_exit(rxq->rxq_lock);
7246
7247 /* Pass this up to any BPF listeners. */
7248 bpf_mtap(ifp, m);
7249
7250 /* Pass it on. */
7251 if_percpuq_enqueue(sc->sc_ipq, m);
7252
7253 mutex_enter(rxq->rxq_lock);
7254
7255 if (sc->sc_stopping)
7256 break;
7257 }
7258
7259 /* Update the receive pointer. */
7260 rxq->rxq_ptr = i;
7261 if (count != 0)
7262 rnd_add_uint32(&sc->rnd_source, count);
7263
7264 DPRINTF(WM_DEBUG_RX,
7265 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7266 }
7267
7268 /*
7269 * wm_linkintr_gmii:
7270 *
7271 * Helper; handle link interrupts for GMII.
7272 */
7273 static void
7274 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7275 {
7276
7277 KASSERT(WM_CORE_LOCKED(sc));
7278
7279 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7280 __func__));
7281
7282 if (icr & ICR_LSC) {
7283 uint32_t status = CSR_READ(sc, WMREG_STATUS);
7284
7285 if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7286 wm_gig_downshift_workaround_ich8lan(sc);
7287
7288 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7289 device_xname(sc->sc_dev)));
7290 mii_pollstat(&sc->sc_mii);
7291 if (sc->sc_type == WM_T_82543) {
7292 int miistatus, active;
7293
7294 /*
7295 * With 82543, we need to force speed and
7296 * duplex on the MAC equal to what the PHY
7297 * speed and duplex configuration is.
7298 */
7299 miistatus = sc->sc_mii.mii_media_status;
7300
7301 if (miistatus & IFM_ACTIVE) {
7302 active = sc->sc_mii.mii_media_active;
7303 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7304 switch (IFM_SUBTYPE(active)) {
7305 case IFM_10_T:
7306 sc->sc_ctrl |= CTRL_SPEED_10;
7307 break;
7308 case IFM_100_TX:
7309 sc->sc_ctrl |= CTRL_SPEED_100;
7310 break;
7311 case IFM_1000_T:
7312 sc->sc_ctrl |= CTRL_SPEED_1000;
7313 break;
7314 default:
7315 /*
7316 * fiber?
7317 * Shoud not enter here.
7318 */
7319 printf("unknown media (%x)\n", active);
7320 break;
7321 }
7322 if (active & IFM_FDX)
7323 sc->sc_ctrl |= CTRL_FD;
7324 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7325 }
7326 } else if ((sc->sc_type == WM_T_ICH8)
7327 && (sc->sc_phytype == WMPHY_IGP_3)) {
7328 wm_kmrn_lock_loss_workaround_ich8lan(sc);
7329 } else if (sc->sc_type == WM_T_PCH) {
7330 wm_k1_gig_workaround_hv(sc,
7331 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7332 }
7333
7334 if ((sc->sc_phytype == WMPHY_82578)
7335 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7336 == IFM_1000_T)) {
7337
7338 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7339 delay(200*1000); /* XXX too big */
7340
7341 /* Link stall fix for link up */
7342 wm_gmii_hv_writereg(sc->sc_dev, 1,
7343 HV_MUX_DATA_CTRL,
7344 HV_MUX_DATA_CTRL_GEN_TO_MAC
7345 | HV_MUX_DATA_CTRL_FORCE_SPEED);
7346 wm_gmii_hv_writereg(sc->sc_dev, 1,
7347 HV_MUX_DATA_CTRL,
7348 HV_MUX_DATA_CTRL_GEN_TO_MAC);
7349 }
7350 }
7351 } else if (icr & ICR_RXSEQ) {
7352 DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7353 device_xname(sc->sc_dev)));
7354 }
7355 }
7356
7357 /*
7358 * wm_linkintr_tbi:
7359 *
7360 * Helper; handle link interrupts for TBI mode.
7361 */
7362 static void
7363 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7364 {
7365 uint32_t status;
7366
7367 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7368 __func__));
7369
7370 status = CSR_READ(sc, WMREG_STATUS);
7371 if (icr & ICR_LSC) {
7372 if (status & STATUS_LU) {
7373 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7374 device_xname(sc->sc_dev),
7375 (status & STATUS_FD) ? "FDX" : "HDX"));
7376 /*
7377 * NOTE: CTRL will update TFCE and RFCE automatically,
7378 * so we should update sc->sc_ctrl
7379 */
7380
7381 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7382 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7383 sc->sc_fcrtl &= ~FCRTL_XONE;
7384 if (status & STATUS_FD)
7385 sc->sc_tctl |=
7386 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7387 else
7388 sc->sc_tctl |=
7389 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7390 if (sc->sc_ctrl & CTRL_TFCE)
7391 sc->sc_fcrtl |= FCRTL_XONE;
7392 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7393 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7394 WMREG_OLD_FCRTL : WMREG_FCRTL,
7395 sc->sc_fcrtl);
7396 sc->sc_tbi_linkup = 1;
7397 } else {
7398 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7399 device_xname(sc->sc_dev)));
7400 sc->sc_tbi_linkup = 0;
7401 }
7402 /* Update LED */
7403 wm_tbi_serdes_set_linkled(sc);
7404 } else if (icr & ICR_RXSEQ) {
7405 DPRINTF(WM_DEBUG_LINK,
7406 ("%s: LINK: Receive sequence error\n",
7407 device_xname(sc->sc_dev)));
7408 }
7409 }
7410
7411 /*
7412 * wm_linkintr_serdes:
7413 *
7414 * Helper; handle link interrupts for TBI mode.
7415 */
7416 static void
7417 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7418 {
7419 struct mii_data *mii = &sc->sc_mii;
7420 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7421 uint32_t pcs_adv, pcs_lpab, reg;
7422
7423 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7424 __func__));
7425
7426 if (icr & ICR_LSC) {
7427 /* Check PCS */
7428 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7429 if ((reg & PCS_LSTS_LINKOK) != 0) {
7430 mii->mii_media_status |= IFM_ACTIVE;
7431 sc->sc_tbi_linkup = 1;
7432 } else {
7433 mii->mii_media_status |= IFM_NONE;
7434 sc->sc_tbi_linkup = 0;
7435 wm_tbi_serdes_set_linkled(sc);
7436 return;
7437 }
7438 mii->mii_media_active |= IFM_1000_SX;
7439 if ((reg & PCS_LSTS_FDX) != 0)
7440 mii->mii_media_active |= IFM_FDX;
7441 else
7442 mii->mii_media_active |= IFM_HDX;
7443 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7444 /* Check flow */
7445 reg = CSR_READ(sc, WMREG_PCS_LSTS);
7446 if ((reg & PCS_LSTS_AN_COMP) == 0) {
7447 DPRINTF(WM_DEBUG_LINK,
7448 ("XXX LINKOK but not ACOMP\n"));
7449 return;
7450 }
7451 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7452 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7453 DPRINTF(WM_DEBUG_LINK,
7454 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7455 if ((pcs_adv & TXCW_SYM_PAUSE)
7456 && (pcs_lpab & TXCW_SYM_PAUSE)) {
7457 mii->mii_media_active |= IFM_FLOW
7458 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7459 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7460 && (pcs_adv & TXCW_ASYM_PAUSE)
7461 && (pcs_lpab & TXCW_SYM_PAUSE)
7462 && (pcs_lpab & TXCW_ASYM_PAUSE))
7463 mii->mii_media_active |= IFM_FLOW
7464 | IFM_ETH_TXPAUSE;
7465 else if ((pcs_adv & TXCW_SYM_PAUSE)
7466 && (pcs_adv & TXCW_ASYM_PAUSE)
7467 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7468 && (pcs_lpab & TXCW_ASYM_PAUSE))
7469 mii->mii_media_active |= IFM_FLOW
7470 | IFM_ETH_RXPAUSE;
7471 }
7472 /* Update LED */
7473 wm_tbi_serdes_set_linkled(sc);
7474 } else {
7475 DPRINTF(WM_DEBUG_LINK,
7476 ("%s: LINK: Receive sequence error\n",
7477 device_xname(sc->sc_dev)));
7478 }
7479 }
7480
7481 /*
7482 * wm_linkintr:
7483 *
7484 * Helper; handle link interrupts.
7485 */
7486 static void
7487 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7488 {
7489
7490 KASSERT(WM_CORE_LOCKED(sc));
7491
7492 if (sc->sc_flags & WM_F_HAS_MII)
7493 wm_linkintr_gmii(sc, icr);
7494 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7495 && (sc->sc_type >= WM_T_82575))
7496 wm_linkintr_serdes(sc, icr);
7497 else
7498 wm_linkintr_tbi(sc, icr);
7499 }
7500
7501 /*
7502 * wm_intr_legacy:
7503 *
7504 * Interrupt service routine for INTx and MSI.
7505 */
7506 static int
7507 wm_intr_legacy(void *arg)
7508 {
7509 struct wm_softc *sc = arg;
7510 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7511 struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7512 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7513 uint32_t icr, rndval = 0;
7514 int handled = 0;
7515
7516 DPRINTF(WM_DEBUG_TX,
7517 ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7518 while (1 /* CONSTCOND */) {
7519 icr = CSR_READ(sc, WMREG_ICR);
7520 if ((icr & sc->sc_icr) == 0)
7521 break;
7522 if (rndval == 0)
7523 rndval = icr;
7524
7525 mutex_enter(rxq->rxq_lock);
7526
7527 if (sc->sc_stopping) {
7528 mutex_exit(rxq->rxq_lock);
7529 break;
7530 }
7531
7532 handled = 1;
7533
7534 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7535 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7536 DPRINTF(WM_DEBUG_RX,
7537 ("%s: RX: got Rx intr 0x%08x\n",
7538 device_xname(sc->sc_dev),
7539 icr & (ICR_RXDMT0 | ICR_RXT0)));
7540 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7541 }
7542 #endif
7543 wm_rxeof(rxq);
7544
7545 mutex_exit(rxq->rxq_lock);
7546 mutex_enter(txq->txq_lock);
7547
7548 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7549 if (icr & ICR_TXDW) {
7550 DPRINTF(WM_DEBUG_TX,
7551 ("%s: TX: got TXDW interrupt\n",
7552 device_xname(sc->sc_dev)));
7553 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7554 }
7555 #endif
7556 wm_txeof(sc, txq);
7557
7558 mutex_exit(txq->txq_lock);
7559 WM_CORE_LOCK(sc);
7560
7561 if (icr & (ICR_LSC | ICR_RXSEQ)) {
7562 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7563 wm_linkintr(sc, icr);
7564 }
7565
7566 WM_CORE_UNLOCK(sc);
7567
7568 if (icr & ICR_RXO) {
7569 #if defined(WM_DEBUG)
7570 log(LOG_WARNING, "%s: Receive overrun\n",
7571 device_xname(sc->sc_dev));
7572 #endif /* defined(WM_DEBUG) */
7573 }
7574 }
7575
7576 rnd_add_uint32(&sc->rnd_source, rndval);
7577
7578 if (handled) {
7579 /* Try to get more packets going. */
7580 ifp->if_start(ifp);
7581 }
7582
7583 return handled;
7584 }
7585
7586 static int
7587 wm_txrxintr_msix(void *arg)
7588 {
7589 struct wm_queue *wmq = arg;
7590 struct wm_txqueue *txq = &wmq->wmq_txq;
7591 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7592 struct wm_softc *sc = txq->txq_sc;
7593 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7594
7595 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
7596
7597 DPRINTF(WM_DEBUG_TX,
7598 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7599
7600 if (sc->sc_type == WM_T_82574)
7601 CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7602 else if (sc->sc_type == WM_T_82575)
7603 CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7604 else
7605 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
7606
7607 if (!sc->sc_stopping) {
7608 mutex_enter(txq->txq_lock);
7609
7610 WM_EVCNT_INCR(&sc->sc_ev_txdw);
7611 wm_txeof(sc, txq);
7612
7613 /* Try to get more packets going. */
7614 if (pcq_peek(txq->txq_interq) != NULL)
7615 wm_nq_transmit_locked(ifp, txq);
7616 /*
7617 * There are still some upper layer processing which call
7618 * ifp->if_start(). e.g. ALTQ
7619 */
7620 if (wmq->wmq_id == 0) {
7621 if (!IFQ_IS_EMPTY(&ifp->if_snd))
7622 wm_nq_start_locked(ifp);
7623 }
7624 mutex_exit(txq->txq_lock);
7625 }
7626
7627 DPRINTF(WM_DEBUG_RX,
7628 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7629
7630 if (!sc->sc_stopping) {
7631 mutex_enter(rxq->rxq_lock);
7632 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7633 wm_rxeof(rxq);
7634 mutex_exit(rxq->rxq_lock);
7635 }
7636
7637 if (sc->sc_type == WM_T_82574)
7638 CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7639 else if (sc->sc_type == WM_T_82575)
7640 CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7641 else
7642 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
7643
7644 return 1;
7645 }
7646
7647 /*
7648 * wm_linkintr_msix:
7649 *
7650 * Interrupt service routine for link status change for MSI-X.
7651 */
7652 static int
7653 wm_linkintr_msix(void *arg)
7654 {
7655 struct wm_softc *sc = arg;
7656 uint32_t reg;
7657
7658 DPRINTF(WM_DEBUG_LINK,
7659 ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7660
7661 reg = CSR_READ(sc, WMREG_ICR);
7662 WM_CORE_LOCK(sc);
7663 if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7664 goto out;
7665
7666 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7667 wm_linkintr(sc, ICR_LSC);
7668
7669 out:
7670 WM_CORE_UNLOCK(sc);
7671
7672 if (sc->sc_type == WM_T_82574)
7673 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7674 else if (sc->sc_type == WM_T_82575)
7675 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7676 else
7677 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7678
7679 return 1;
7680 }
7681
7682 /*
7683 * Media related.
7684 * GMII, SGMII, TBI (and SERDES)
7685 */
7686
7687 /* Common */
7688
7689 /*
7690 * wm_tbi_serdes_set_linkled:
7691 *
7692 * Update the link LED on TBI and SERDES devices.
7693 */
7694 static void
7695 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7696 {
7697
7698 if (sc->sc_tbi_linkup)
7699 sc->sc_ctrl |= CTRL_SWDPIN(0);
7700 else
7701 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7702
7703 /* 82540 or newer devices are active low */
7704 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7705
7706 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7707 }
7708
7709 /* GMII related */
7710
7711 /*
7712 * wm_gmii_reset:
7713 *
7714 * Reset the PHY.
7715 */
7716 static void
7717 wm_gmii_reset(struct wm_softc *sc)
7718 {
7719 uint32_t reg;
7720 int rv;
7721
7722 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7723 device_xname(sc->sc_dev), __func__));
7724 /* get phy semaphore */
7725 switch (sc->sc_type) {
7726 case WM_T_82571:
7727 case WM_T_82572:
7728 case WM_T_82573:
7729 case WM_T_82574:
7730 case WM_T_82583:
7731 /* XXX should get sw semaphore, too */
7732 rv = wm_get_swsm_semaphore(sc);
7733 break;
7734 case WM_T_82575:
7735 case WM_T_82576:
7736 case WM_T_82580:
7737 case WM_T_I350:
7738 case WM_T_I354:
7739 case WM_T_I210:
7740 case WM_T_I211:
7741 case WM_T_80003:
7742 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7743 break;
7744 case WM_T_ICH8:
7745 case WM_T_ICH9:
7746 case WM_T_ICH10:
7747 case WM_T_PCH:
7748 case WM_T_PCH2:
7749 case WM_T_PCH_LPT:
7750 case WM_T_PCH_SPT:
7751 rv = wm_get_swfwhw_semaphore(sc);
7752 break;
7753 default:
7754 /* nothing to do*/
7755 rv = 0;
7756 break;
7757 }
7758 if (rv != 0) {
7759 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7760 __func__);
7761 return;
7762 }
7763
7764 switch (sc->sc_type) {
7765 case WM_T_82542_2_0:
7766 case WM_T_82542_2_1:
7767 /* null */
7768 break;
7769 case WM_T_82543:
7770 /*
7771 * With 82543, we need to force speed and duplex on the MAC
7772 * equal to what the PHY speed and duplex configuration is.
7773 * In addition, we need to perform a hardware reset on the PHY
7774 * to take it out of reset.
7775 */
7776 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7777 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7778
7779 /* The PHY reset pin is active-low. */
7780 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7781 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7782 CTRL_EXT_SWDPIN(4));
7783 reg |= CTRL_EXT_SWDPIO(4);
7784
7785 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7786 CSR_WRITE_FLUSH(sc);
7787 delay(10*1000);
7788
7789 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7790 CSR_WRITE_FLUSH(sc);
7791 delay(150);
7792 #if 0
7793 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7794 #endif
7795 delay(20*1000); /* XXX extra delay to get PHY ID? */
7796 break;
7797 case WM_T_82544: /* reset 10000us */
7798 case WM_T_82540:
7799 case WM_T_82545:
7800 case WM_T_82545_3:
7801 case WM_T_82546:
7802 case WM_T_82546_3:
7803 case WM_T_82541:
7804 case WM_T_82541_2:
7805 case WM_T_82547:
7806 case WM_T_82547_2:
7807 case WM_T_82571: /* reset 100us */
7808 case WM_T_82572:
7809 case WM_T_82573:
7810 case WM_T_82574:
7811 case WM_T_82575:
7812 case WM_T_82576:
7813 case WM_T_82580:
7814 case WM_T_I350:
7815 case WM_T_I354:
7816 case WM_T_I210:
7817 case WM_T_I211:
7818 case WM_T_82583:
7819 case WM_T_80003:
7820 /* generic reset */
7821 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7822 CSR_WRITE_FLUSH(sc);
7823 delay(20000);
7824 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7825 CSR_WRITE_FLUSH(sc);
7826 delay(20000);
7827
7828 if ((sc->sc_type == WM_T_82541)
7829 || (sc->sc_type == WM_T_82541_2)
7830 || (sc->sc_type == WM_T_82547)
7831 || (sc->sc_type == WM_T_82547_2)) {
7832 /* workaround for igp are done in igp_reset() */
7833 /* XXX add code to set LED after phy reset */
7834 }
7835 break;
7836 case WM_T_ICH8:
7837 case WM_T_ICH9:
7838 case WM_T_ICH10:
7839 case WM_T_PCH:
7840 case WM_T_PCH2:
7841 case WM_T_PCH_LPT:
7842 case WM_T_PCH_SPT:
7843 /* generic reset */
7844 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7845 CSR_WRITE_FLUSH(sc);
7846 delay(100);
7847 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7848 CSR_WRITE_FLUSH(sc);
7849 delay(150);
7850 break;
7851 default:
7852 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7853 __func__);
7854 break;
7855 }
7856
7857 /* release PHY semaphore */
7858 switch (sc->sc_type) {
7859 case WM_T_82571:
7860 case WM_T_82572:
7861 case WM_T_82573:
7862 case WM_T_82574:
7863 case WM_T_82583:
7864 /* XXX should put sw semaphore, too */
7865 wm_put_swsm_semaphore(sc);
7866 break;
7867 case WM_T_82575:
7868 case WM_T_82576:
7869 case WM_T_82580:
7870 case WM_T_I350:
7871 case WM_T_I354:
7872 case WM_T_I210:
7873 case WM_T_I211:
7874 case WM_T_80003:
7875 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7876 break;
7877 case WM_T_ICH8:
7878 case WM_T_ICH9:
7879 case WM_T_ICH10:
7880 case WM_T_PCH:
7881 case WM_T_PCH2:
7882 case WM_T_PCH_LPT:
7883 case WM_T_PCH_SPT:
7884 wm_put_swfwhw_semaphore(sc);
7885 break;
7886 default:
7887 /* nothing to do */
7888 rv = 0;
7889 break;
7890 }
7891
7892 /* get_cfg_done */
7893 wm_get_cfg_done(sc);
7894
7895 /* extra setup */
7896 switch (sc->sc_type) {
7897 case WM_T_82542_2_0:
7898 case WM_T_82542_2_1:
7899 case WM_T_82543:
7900 case WM_T_82544:
7901 case WM_T_82540:
7902 case WM_T_82545:
7903 case WM_T_82545_3:
7904 case WM_T_82546:
7905 case WM_T_82546_3:
7906 case WM_T_82541_2:
7907 case WM_T_82547_2:
7908 case WM_T_82571:
7909 case WM_T_82572:
7910 case WM_T_82573:
7911 case WM_T_82575:
7912 case WM_T_82576:
7913 case WM_T_82580:
7914 case WM_T_I350:
7915 case WM_T_I354:
7916 case WM_T_I210:
7917 case WM_T_I211:
7918 case WM_T_80003:
7919 /* null */
7920 break;
7921 case WM_T_82574:
7922 case WM_T_82583:
7923 wm_lplu_d0_disable(sc);
7924 break;
7925 case WM_T_82541:
7926 case WM_T_82547:
7927 /* XXX Configure actively LED after PHY reset */
7928 break;
7929 case WM_T_ICH8:
7930 case WM_T_ICH9:
7931 case WM_T_ICH10:
7932 case WM_T_PCH:
7933 case WM_T_PCH2:
7934 case WM_T_PCH_LPT:
7935 case WM_T_PCH_SPT:
7936 /* Allow time for h/w to get to a quiescent state afer reset */
7937 delay(10*1000);
7938
7939 if (sc->sc_type == WM_T_PCH)
7940 wm_hv_phy_workaround_ich8lan(sc);
7941
7942 if (sc->sc_type == WM_T_PCH2)
7943 wm_lv_phy_workaround_ich8lan(sc);
7944
7945 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7946 /*
7947 * dummy read to clear the phy wakeup bit after lcd
7948 * reset
7949 */
7950 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7951 }
7952
7953 /*
7954 * XXX Configure the LCD with th extended configuration region
7955 * in NVM
7956 */
7957
7958 /* Disable D0 LPLU. */
7959 if (sc->sc_type >= WM_T_PCH) /* PCH* */
7960 wm_lplu_d0_disable_pch(sc);
7961 else
7962 wm_lplu_d0_disable(sc); /* ICH* */
7963 break;
7964 default:
7965 panic("%s: unknown type\n", __func__);
7966 break;
7967 }
7968 }
7969
7970 /*
7971 * wm_get_phy_id_82575:
7972 *
7973 * Return PHY ID. Return -1 if it failed.
7974 */
7975 static int
7976 wm_get_phy_id_82575(struct wm_softc *sc)
7977 {
7978 uint32_t reg;
7979 int phyid = -1;
7980
7981 /* XXX */
7982 if ((sc->sc_flags & WM_F_SGMII) == 0)
7983 return -1;
7984
7985 if (wm_sgmii_uses_mdio(sc)) {
7986 switch (sc->sc_type) {
7987 case WM_T_82575:
7988 case WM_T_82576:
7989 reg = CSR_READ(sc, WMREG_MDIC);
7990 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7991 break;
7992 case WM_T_82580:
7993 case WM_T_I350:
7994 case WM_T_I354:
7995 case WM_T_I210:
7996 case WM_T_I211:
7997 reg = CSR_READ(sc, WMREG_MDICNFG);
7998 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7999 break;
8000 default:
8001 return -1;
8002 }
8003 }
8004
8005 return phyid;
8006 }
8007
8008
8009 /*
8010 * wm_gmii_mediainit:
8011 *
8012 * Initialize media for use on 1000BASE-T devices.
8013 */
8014 static void
8015 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8016 {
8017 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8018 struct mii_data *mii = &sc->sc_mii;
8019 uint32_t reg;
8020
8021 /* We have GMII. */
8022 sc->sc_flags |= WM_F_HAS_MII;
8023
8024 if (sc->sc_type == WM_T_80003)
8025 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8026 else
8027 sc->sc_tipg = TIPG_1000T_DFLT;
8028
8029 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8030 if ((sc->sc_type == WM_T_82580)
8031 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8032 || (sc->sc_type == WM_T_I211)) {
8033 reg = CSR_READ(sc, WMREG_PHPM);
8034 reg &= ~PHPM_GO_LINK_D;
8035 CSR_WRITE(sc, WMREG_PHPM, reg);
8036 }
8037
8038 /*
8039 * Let the chip set speed/duplex on its own based on
8040 * signals from the PHY.
8041 * XXXbouyer - I'm not sure this is right for the 80003,
8042 * the em driver only sets CTRL_SLU here - but it seems to work.
8043 */
8044 sc->sc_ctrl |= CTRL_SLU;
8045 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8046
8047 /* Initialize our media structures and probe the GMII. */
8048 mii->mii_ifp = ifp;
8049
8050 /*
8051 * Determine the PHY access method.
8052 *
8053 * For SGMII, use SGMII specific method.
8054 *
8055 * For some devices, we can determine the PHY access method
8056 * from sc_type.
8057 *
8058 * For ICH and PCH variants, it's difficult to determine the PHY
8059 * access method by sc_type, so use the PCI product ID for some
8060 * devices.
8061 * For other ICH8 variants, try to use igp's method. If the PHY
8062 * can't detect, then use bm's method.
8063 */
8064 switch (prodid) {
8065 case PCI_PRODUCT_INTEL_PCH_M_LM:
8066 case PCI_PRODUCT_INTEL_PCH_M_LC:
8067 /* 82577 */
8068 sc->sc_phytype = WMPHY_82577;
8069 break;
8070 case PCI_PRODUCT_INTEL_PCH_D_DM:
8071 case PCI_PRODUCT_INTEL_PCH_D_DC:
8072 /* 82578 */
8073 sc->sc_phytype = WMPHY_82578;
8074 break;
8075 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8076 case PCI_PRODUCT_INTEL_PCH2_LV_V:
8077 /* 82579 */
8078 sc->sc_phytype = WMPHY_82579;
8079 break;
8080 case PCI_PRODUCT_INTEL_82801I_BM:
8081 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8082 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8083 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8084 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8085 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8086 /* 82567 */
8087 sc->sc_phytype = WMPHY_BM;
8088 mii->mii_readreg = wm_gmii_bm_readreg;
8089 mii->mii_writereg = wm_gmii_bm_writereg;
8090 break;
8091 default:
8092 if (((sc->sc_flags & WM_F_SGMII) != 0)
8093 && !wm_sgmii_uses_mdio(sc)){
8094 /* SGMII */
8095 mii->mii_readreg = wm_sgmii_readreg;
8096 mii->mii_writereg = wm_sgmii_writereg;
8097 } else if (sc->sc_type >= WM_T_80003) {
8098 /* 80003 */
8099 mii->mii_readreg = wm_gmii_i80003_readreg;
8100 mii->mii_writereg = wm_gmii_i80003_writereg;
8101 } else if (sc->sc_type >= WM_T_I210) {
8102 /* I210 and I211 */
8103 mii->mii_readreg = wm_gmii_gs40g_readreg;
8104 mii->mii_writereg = wm_gmii_gs40g_writereg;
8105 } else if (sc->sc_type >= WM_T_82580) {
8106 /* 82580, I350 and I354 */
8107 sc->sc_phytype = WMPHY_82580;
8108 mii->mii_readreg = wm_gmii_82580_readreg;
8109 mii->mii_writereg = wm_gmii_82580_writereg;
8110 } else if (sc->sc_type >= WM_T_82544) {
8111 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
8112 mii->mii_readreg = wm_gmii_i82544_readreg;
8113 mii->mii_writereg = wm_gmii_i82544_writereg;
8114 } else {
8115 mii->mii_readreg = wm_gmii_i82543_readreg;
8116 mii->mii_writereg = wm_gmii_i82543_writereg;
8117 }
8118 break;
8119 }
8120 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8121 /* All PCH* use _hv_ */
8122 mii->mii_readreg = wm_gmii_hv_readreg;
8123 mii->mii_writereg = wm_gmii_hv_writereg;
8124 }
8125 mii->mii_statchg = wm_gmii_statchg;
8126
8127 wm_gmii_reset(sc);
8128
8129 sc->sc_ethercom.ec_mii = &sc->sc_mii;
8130 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8131 wm_gmii_mediastatus);
8132
8133 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8134 || (sc->sc_type == WM_T_82580)
8135 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8136 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8137 if ((sc->sc_flags & WM_F_SGMII) == 0) {
8138 /* Attach only one port */
8139 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8140 MII_OFFSET_ANY, MIIF_DOPAUSE);
8141 } else {
8142 int i, id;
8143 uint32_t ctrl_ext;
8144
8145 id = wm_get_phy_id_82575(sc);
8146 if (id != -1) {
8147 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8148 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8149 }
8150 if ((id == -1)
8151 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8152 /* Power on sgmii phy if it is disabled */
8153 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8154 CSR_WRITE(sc, WMREG_CTRL_EXT,
8155 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8156 CSR_WRITE_FLUSH(sc);
8157 delay(300*1000); /* XXX too long */
8158
8159 /* from 1 to 8 */
8160 for (i = 1; i < 8; i++)
8161 mii_attach(sc->sc_dev, &sc->sc_mii,
8162 0xffffffff, i, MII_OFFSET_ANY,
8163 MIIF_DOPAUSE);
8164
8165 /* restore previous sfp cage power state */
8166 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8167 }
8168 }
8169 } else {
8170 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8171 MII_OFFSET_ANY, MIIF_DOPAUSE);
8172 }
8173
8174 /*
8175 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8176 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8177 */
8178 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8179 (LIST_FIRST(&mii->mii_phys) == NULL)) {
8180 wm_set_mdio_slow_mode_hv(sc);
8181 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8182 MII_OFFSET_ANY, MIIF_DOPAUSE);
8183 }
8184
8185 /*
8186 * (For ICH8 variants)
8187 * If PHY detection failed, use BM's r/w function and retry.
8188 */
8189 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8190 /* if failed, retry with *_bm_* */
8191 mii->mii_readreg = wm_gmii_bm_readreg;
8192 mii->mii_writereg = wm_gmii_bm_writereg;
8193
8194 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8195 MII_OFFSET_ANY, MIIF_DOPAUSE);
8196 }
8197
8198 if (LIST_FIRST(&mii->mii_phys) == NULL) {
8199 /* Any PHY wasn't find */
8200 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8201 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8202 sc->sc_phytype = WMPHY_NONE;
8203 } else {
8204 /*
8205 * PHY Found!
8206 * Check PHY type.
8207 */
8208 uint32_t model;
8209 struct mii_softc *child;
8210
8211 child = LIST_FIRST(&mii->mii_phys);
8212 model = child->mii_mpd_model;
8213 if (model == MII_MODEL_yyINTEL_I82566)
8214 sc->sc_phytype = WMPHY_IGP_3;
8215
8216 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8217 }
8218 }
8219
8220 /*
8221 * wm_gmii_mediachange: [ifmedia interface function]
8222 *
8223 * Set hardware to newly-selected media on a 1000BASE-T device.
8224 */
8225 static int
8226 wm_gmii_mediachange(struct ifnet *ifp)
8227 {
8228 struct wm_softc *sc = ifp->if_softc;
8229 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8230 int rc;
8231
8232 if ((ifp->if_flags & IFF_UP) == 0)
8233 return 0;
8234
8235 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8236 sc->sc_ctrl |= CTRL_SLU;
8237 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8238 || (sc->sc_type > WM_T_82543)) {
8239 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8240 } else {
8241 sc->sc_ctrl &= ~CTRL_ASDE;
8242 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8243 if (ife->ifm_media & IFM_FDX)
8244 sc->sc_ctrl |= CTRL_FD;
8245 switch (IFM_SUBTYPE(ife->ifm_media)) {
8246 case IFM_10_T:
8247 sc->sc_ctrl |= CTRL_SPEED_10;
8248 break;
8249 case IFM_100_TX:
8250 sc->sc_ctrl |= CTRL_SPEED_100;
8251 break;
8252 case IFM_1000_T:
8253 sc->sc_ctrl |= CTRL_SPEED_1000;
8254 break;
8255 default:
8256 panic("wm_gmii_mediachange: bad media 0x%x",
8257 ife->ifm_media);
8258 }
8259 }
8260 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8261 if (sc->sc_type <= WM_T_82543)
8262 wm_gmii_reset(sc);
8263
8264 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8265 return 0;
8266 return rc;
8267 }
8268
8269 /*
8270 * wm_gmii_mediastatus: [ifmedia interface function]
8271 *
8272 * Get the current interface media status on a 1000BASE-T device.
8273 */
8274 static void
8275 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8276 {
8277 struct wm_softc *sc = ifp->if_softc;
8278
8279 ether_mediastatus(ifp, ifmr);
8280 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8281 | sc->sc_flowflags;
8282 }
8283
8284 #define MDI_IO CTRL_SWDPIN(2)
8285 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
8286 #define MDI_CLK CTRL_SWDPIN(3)
8287
8288 static void
8289 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8290 {
8291 uint32_t i, v;
8292
8293 v = CSR_READ(sc, WMREG_CTRL);
8294 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8295 v |= MDI_DIR | CTRL_SWDPIO(3);
8296
8297 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8298 if (data & i)
8299 v |= MDI_IO;
8300 else
8301 v &= ~MDI_IO;
8302 CSR_WRITE(sc, WMREG_CTRL, v);
8303 CSR_WRITE_FLUSH(sc);
8304 delay(10);
8305 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8306 CSR_WRITE_FLUSH(sc);
8307 delay(10);
8308 CSR_WRITE(sc, WMREG_CTRL, v);
8309 CSR_WRITE_FLUSH(sc);
8310 delay(10);
8311 }
8312 }
8313
8314 static uint32_t
8315 wm_i82543_mii_recvbits(struct wm_softc *sc)
8316 {
8317 uint32_t v, i, data = 0;
8318
8319 v = CSR_READ(sc, WMREG_CTRL);
8320 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8321 v |= CTRL_SWDPIO(3);
8322
8323 CSR_WRITE(sc, WMREG_CTRL, v);
8324 CSR_WRITE_FLUSH(sc);
8325 delay(10);
8326 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8327 CSR_WRITE_FLUSH(sc);
8328 delay(10);
8329 CSR_WRITE(sc, WMREG_CTRL, v);
8330 CSR_WRITE_FLUSH(sc);
8331 delay(10);
8332
8333 for (i = 0; i < 16; i++) {
8334 data <<= 1;
8335 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8336 CSR_WRITE_FLUSH(sc);
8337 delay(10);
8338 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8339 data |= 1;
8340 CSR_WRITE(sc, WMREG_CTRL, v);
8341 CSR_WRITE_FLUSH(sc);
8342 delay(10);
8343 }
8344
8345 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8346 CSR_WRITE_FLUSH(sc);
8347 delay(10);
8348 CSR_WRITE(sc, WMREG_CTRL, v);
8349 CSR_WRITE_FLUSH(sc);
8350 delay(10);
8351
8352 return data;
8353 }
8354
8355 #undef MDI_IO
8356 #undef MDI_DIR
8357 #undef MDI_CLK
8358
8359 /*
8360 * wm_gmii_i82543_readreg: [mii interface function]
8361 *
8362 * Read a PHY register on the GMII (i82543 version).
8363 */
8364 static int
8365 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8366 {
8367 struct wm_softc *sc = device_private(self);
8368 int rv;
8369
8370 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8371 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8372 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8373 rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8374
8375 DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8376 device_xname(sc->sc_dev), phy, reg, rv));
8377
8378 return rv;
8379 }
8380
8381 /*
8382 * wm_gmii_i82543_writereg: [mii interface function]
8383 *
8384 * Write a PHY register on the GMII (i82543 version).
8385 */
8386 static void
8387 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8388 {
8389 struct wm_softc *sc = device_private(self);
8390
8391 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8392 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8393 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8394 (MII_COMMAND_START << 30), 32);
8395 }
8396
8397 /*
8398 * wm_gmii_i82544_readreg: [mii interface function]
8399 *
8400 * Read a PHY register on the GMII.
8401 */
8402 static int
8403 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8404 {
8405 struct wm_softc *sc = device_private(self);
8406 uint32_t mdic = 0;
8407 int i, rv;
8408
8409 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8410 MDIC_REGADD(reg));
8411
8412 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8413 mdic = CSR_READ(sc, WMREG_MDIC);
8414 if (mdic & MDIC_READY)
8415 break;
8416 delay(50);
8417 }
8418
8419 if ((mdic & MDIC_READY) == 0) {
8420 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8421 device_xname(sc->sc_dev), phy, reg);
8422 rv = 0;
8423 } else if (mdic & MDIC_E) {
8424 #if 0 /* This is normal if no PHY is present. */
8425 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8426 device_xname(sc->sc_dev), phy, reg);
8427 #endif
8428 rv = 0;
8429 } else {
8430 rv = MDIC_DATA(mdic);
8431 if (rv == 0xffff)
8432 rv = 0;
8433 }
8434
8435 return rv;
8436 }
8437
8438 /*
8439 * wm_gmii_i82544_writereg: [mii interface function]
8440 *
8441 * Write a PHY register on the GMII.
8442 */
8443 static void
8444 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8445 {
8446 struct wm_softc *sc = device_private(self);
8447 uint32_t mdic = 0;
8448 int i;
8449
8450 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8451 MDIC_REGADD(reg) | MDIC_DATA(val));
8452
8453 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8454 mdic = CSR_READ(sc, WMREG_MDIC);
8455 if (mdic & MDIC_READY)
8456 break;
8457 delay(50);
8458 }
8459
8460 if ((mdic & MDIC_READY) == 0)
8461 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8462 device_xname(sc->sc_dev), phy, reg);
8463 else if (mdic & MDIC_E)
8464 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8465 device_xname(sc->sc_dev), phy, reg);
8466 }
8467
8468 /*
8469 * wm_gmii_i80003_readreg: [mii interface function]
8470 *
8471 * Read a PHY register on the kumeran
8472 * This could be handled by the PHY layer if we didn't have to lock the
8473 * ressource ...
8474 */
8475 static int
8476 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8477 {
8478 struct wm_softc *sc = device_private(self);
8479 int sem;
8480 int rv;
8481
8482 if (phy != 1) /* only one PHY on kumeran bus */
8483 return 0;
8484
8485 sem = swfwphysem[sc->sc_funcid];
8486 if (wm_get_swfw_semaphore(sc, sem)) {
8487 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8488 __func__);
8489 return 0;
8490 }
8491
8492 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8493 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8494 reg >> GG82563_PAGE_SHIFT);
8495 } else {
8496 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8497 reg >> GG82563_PAGE_SHIFT);
8498 }
8499 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8500 delay(200);
8501 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8502 delay(200);
8503
8504 wm_put_swfw_semaphore(sc, sem);
8505 return rv;
8506 }
8507
8508 /*
8509 * wm_gmii_i80003_writereg: [mii interface function]
8510 *
8511 * Write a PHY register on the kumeran.
8512 * This could be handled by the PHY layer if we didn't have to lock the
8513 * ressource ...
8514 */
8515 static void
8516 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8517 {
8518 struct wm_softc *sc = device_private(self);
8519 int sem;
8520
8521 if (phy != 1) /* only one PHY on kumeran bus */
8522 return;
8523
8524 sem = swfwphysem[sc->sc_funcid];
8525 if (wm_get_swfw_semaphore(sc, sem)) {
8526 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8527 __func__);
8528 return;
8529 }
8530
8531 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8532 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8533 reg >> GG82563_PAGE_SHIFT);
8534 } else {
8535 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8536 reg >> GG82563_PAGE_SHIFT);
8537 }
8538 /* Wait more 200us for a bug of the ready bit in the MDIC register */
8539 delay(200);
8540 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8541 delay(200);
8542
8543 wm_put_swfw_semaphore(sc, sem);
8544 }
8545
8546 /*
8547 * wm_gmii_bm_readreg: [mii interface function]
8548 *
8549 * Read a PHY register on the kumeran
8550 * This could be handled by the PHY layer if we didn't have to lock the
8551 * ressource ...
8552 */
8553 static int
8554 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8555 {
8556 struct wm_softc *sc = device_private(self);
8557 int sem;
8558 int rv;
8559
8560 sem = swfwphysem[sc->sc_funcid];
8561 if (wm_get_swfw_semaphore(sc, sem)) {
8562 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8563 __func__);
8564 return 0;
8565 }
8566
8567 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8568 if (phy == 1)
8569 wm_gmii_i82544_writereg(self, phy,
8570 MII_IGPHY_PAGE_SELECT, reg);
8571 else
8572 wm_gmii_i82544_writereg(self, phy,
8573 GG82563_PHY_PAGE_SELECT,
8574 reg >> GG82563_PAGE_SHIFT);
8575 }
8576
8577 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8578 wm_put_swfw_semaphore(sc, sem);
8579 return rv;
8580 }
8581
8582 /*
8583 * wm_gmii_bm_writereg: [mii interface function]
8584 *
8585 * Write a PHY register on the kumeran.
8586 * This could be handled by the PHY layer if we didn't have to lock the
8587 * ressource ...
8588 */
8589 static void
8590 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8591 {
8592 struct wm_softc *sc = device_private(self);
8593 int sem;
8594
8595 sem = swfwphysem[sc->sc_funcid];
8596 if (wm_get_swfw_semaphore(sc, sem)) {
8597 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8598 __func__);
8599 return;
8600 }
8601
8602 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8603 if (phy == 1)
8604 wm_gmii_i82544_writereg(self, phy,
8605 MII_IGPHY_PAGE_SELECT, reg);
8606 else
8607 wm_gmii_i82544_writereg(self, phy,
8608 GG82563_PHY_PAGE_SELECT,
8609 reg >> GG82563_PAGE_SHIFT);
8610 }
8611
8612 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8613 wm_put_swfw_semaphore(sc, sem);
8614 }
8615
8616 static void
8617 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8618 {
8619 struct wm_softc *sc = device_private(self);
8620 uint16_t regnum = BM_PHY_REG_NUM(offset);
8621 uint16_t wuce;
8622
8623 /* XXX Gig must be disabled for MDIO accesses to page 800 */
8624 if (sc->sc_type == WM_T_PCH) {
8625 /* XXX e1000 driver do nothing... why? */
8626 }
8627
8628 /* Set page 769 */
8629 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8630 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8631
8632 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8633
8634 wuce &= ~BM_WUC_HOST_WU_BIT;
8635 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8636 wuce | BM_WUC_ENABLE_BIT);
8637
8638 /* Select page 800 */
8639 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8640 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8641
8642 /* Write page 800 */
8643 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8644
8645 if (rd)
8646 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8647 else
8648 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8649
8650 /* Set page 769 */
8651 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8652 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8653
8654 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8655 }
8656
8657 /*
8658 * wm_gmii_hv_readreg: [mii interface function]
8659 *
8660 * Read a PHY register on the kumeran
8661 * This could be handled by the PHY layer if we didn't have to lock the
8662 * ressource ...
8663 */
8664 static int
8665 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8666 {
8667 struct wm_softc *sc = device_private(self);
8668 uint16_t page = BM_PHY_REG_PAGE(reg);
8669 uint16_t regnum = BM_PHY_REG_NUM(reg);
8670 uint16_t val;
8671 int rv;
8672
8673 if (wm_get_swfwhw_semaphore(sc)) {
8674 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8675 __func__);
8676 return 0;
8677 }
8678
8679 /* XXX Workaround failure in MDIO access while cable is disconnected */
8680 if (sc->sc_phytype == WMPHY_82577) {
8681 /* XXX must write */
8682 }
8683
8684 /* Page 800 works differently than the rest so it has its own func */
8685 if (page == BM_WUC_PAGE) {
8686 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8687 return val;
8688 }
8689
8690 /*
8691 * Lower than page 768 works differently than the rest so it has its
8692 * own func
8693 */
8694 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8695 printf("gmii_hv_readreg!!!\n");
8696 return 0;
8697 }
8698
8699 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8700 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8701 page << BME1000_PAGE_SHIFT);
8702 }
8703
8704 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8705 wm_put_swfwhw_semaphore(sc);
8706 return rv;
8707 }
8708
8709 /*
8710 * wm_gmii_hv_writereg: [mii interface function]
8711 *
8712 * Write a PHY register on the kumeran.
8713 * This could be handled by the PHY layer if we didn't have to lock the
8714 * ressource ...
8715 */
8716 static void
8717 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8718 {
8719 struct wm_softc *sc = device_private(self);
8720 uint16_t page = BM_PHY_REG_PAGE(reg);
8721 uint16_t regnum = BM_PHY_REG_NUM(reg);
8722
8723 if (wm_get_swfwhw_semaphore(sc)) {
8724 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8725 __func__);
8726 return;
8727 }
8728
8729 /* XXX Workaround failure in MDIO access while cable is disconnected */
8730
8731 /* Page 800 works differently than the rest so it has its own func */
8732 if (page == BM_WUC_PAGE) {
8733 uint16_t tmp;
8734
8735 tmp = val;
8736 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8737 return;
8738 }
8739
8740 /*
8741 * Lower than page 768 works differently than the rest so it has its
8742 * own func
8743 */
8744 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8745 printf("gmii_hv_writereg!!!\n");
8746 return;
8747 }
8748
8749 /*
8750 * XXX Workaround MDIO accesses being disabled after entering IEEE
8751 * Power Down (whenever bit 11 of the PHY control register is set)
8752 */
8753
8754 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8755 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8756 page << BME1000_PAGE_SHIFT);
8757 }
8758
8759 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8760 wm_put_swfwhw_semaphore(sc);
8761 }
8762
8763 /*
8764 * wm_gmii_82580_readreg: [mii interface function]
8765 *
8766 * Read a PHY register on the 82580 and I350.
8767 * This could be handled by the PHY layer if we didn't have to lock the
8768 * ressource ...
8769 */
8770 static int
8771 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8772 {
8773 struct wm_softc *sc = device_private(self);
8774 int sem;
8775 int rv;
8776
8777 sem = swfwphysem[sc->sc_funcid];
8778 if (wm_get_swfw_semaphore(sc, sem)) {
8779 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8780 __func__);
8781 return 0;
8782 }
8783
8784 rv = wm_gmii_i82544_readreg(self, phy, reg);
8785
8786 wm_put_swfw_semaphore(sc, sem);
8787 return rv;
8788 }
8789
8790 /*
8791 * wm_gmii_82580_writereg: [mii interface function]
8792 *
8793 * Write a PHY register on the 82580 and I350.
8794 * This could be handled by the PHY layer if we didn't have to lock the
8795 * ressource ...
8796 */
8797 static void
8798 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8799 {
8800 struct wm_softc *sc = device_private(self);
8801 int sem;
8802
8803 sem = swfwphysem[sc->sc_funcid];
8804 if (wm_get_swfw_semaphore(sc, sem)) {
8805 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8806 __func__);
8807 return;
8808 }
8809
8810 wm_gmii_i82544_writereg(self, phy, reg, val);
8811
8812 wm_put_swfw_semaphore(sc, sem);
8813 }
8814
8815 /*
8816 * wm_gmii_gs40g_readreg: [mii interface function]
8817 *
8818 * Read a PHY register on the I2100 and I211.
8819 * This could be handled by the PHY layer if we didn't have to lock the
8820 * ressource ...
8821 */
8822 static int
8823 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8824 {
8825 struct wm_softc *sc = device_private(self);
8826 int sem;
8827 int page, offset;
8828 int rv;
8829
8830 /* Acquire semaphore */
8831 sem = swfwphysem[sc->sc_funcid];
8832 if (wm_get_swfw_semaphore(sc, sem)) {
8833 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8834 __func__);
8835 return 0;
8836 }
8837
8838 /* Page select */
8839 page = reg >> GS40G_PAGE_SHIFT;
8840 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8841
8842 /* Read reg */
8843 offset = reg & GS40G_OFFSET_MASK;
8844 rv = wm_gmii_i82544_readreg(self, phy, offset);
8845
8846 wm_put_swfw_semaphore(sc, sem);
8847 return rv;
8848 }
8849
8850 /*
8851 * wm_gmii_gs40g_writereg: [mii interface function]
8852 *
8853 * Write a PHY register on the I210 and I211.
8854 * This could be handled by the PHY layer if we didn't have to lock the
8855 * ressource ...
8856 */
8857 static void
8858 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8859 {
8860 struct wm_softc *sc = device_private(self);
8861 int sem;
8862 int page, offset;
8863
8864 /* Acquire semaphore */
8865 sem = swfwphysem[sc->sc_funcid];
8866 if (wm_get_swfw_semaphore(sc, sem)) {
8867 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8868 __func__);
8869 return;
8870 }
8871
8872 /* Page select */
8873 page = reg >> GS40G_PAGE_SHIFT;
8874 wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8875
8876 /* Write reg */
8877 offset = reg & GS40G_OFFSET_MASK;
8878 wm_gmii_i82544_writereg(self, phy, offset, val);
8879
8880 /* Release semaphore */
8881 wm_put_swfw_semaphore(sc, sem);
8882 }
8883
8884 /*
8885 * wm_gmii_statchg: [mii interface function]
8886 *
8887 * Callback from MII layer when media changes.
8888 */
8889 static void
8890 wm_gmii_statchg(struct ifnet *ifp)
8891 {
8892 struct wm_softc *sc = ifp->if_softc;
8893 struct mii_data *mii = &sc->sc_mii;
8894
8895 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8896 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8897 sc->sc_fcrtl &= ~FCRTL_XONE;
8898
8899 /*
8900 * Get flow control negotiation result.
8901 */
8902 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8903 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8904 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8905 mii->mii_media_active &= ~IFM_ETH_FMASK;
8906 }
8907
8908 if (sc->sc_flowflags & IFM_FLOW) {
8909 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8910 sc->sc_ctrl |= CTRL_TFCE;
8911 sc->sc_fcrtl |= FCRTL_XONE;
8912 }
8913 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8914 sc->sc_ctrl |= CTRL_RFCE;
8915 }
8916
8917 if (sc->sc_mii.mii_media_active & IFM_FDX) {
8918 DPRINTF(WM_DEBUG_LINK,
8919 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8920 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8921 } else {
8922 DPRINTF(WM_DEBUG_LINK,
8923 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8924 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8925 }
8926
8927 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8928 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8929 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8930 : WMREG_FCRTL, sc->sc_fcrtl);
8931 if (sc->sc_type == WM_T_80003) {
8932 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8933 case IFM_1000_T:
8934 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8935 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8936 sc->sc_tipg = TIPG_1000T_80003_DFLT;
8937 break;
8938 default:
8939 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8940 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8941 sc->sc_tipg = TIPG_10_100_80003_DFLT;
8942 break;
8943 }
8944 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8945 }
8946 }
8947
8948 /*
8949 * wm_kmrn_readreg:
8950 *
8951 * Read a kumeran register
8952 */
8953 static int
8954 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8955 {
8956 int rv;
8957
8958 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8959 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8960 aprint_error_dev(sc->sc_dev,
8961 "%s: failed to get semaphore\n", __func__);
8962 return 0;
8963 }
8964 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8965 if (wm_get_swfwhw_semaphore(sc)) {
8966 aprint_error_dev(sc->sc_dev,
8967 "%s: failed to get semaphore\n", __func__);
8968 return 0;
8969 }
8970 }
8971
8972 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8973 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8974 KUMCTRLSTA_REN);
8975 CSR_WRITE_FLUSH(sc);
8976 delay(2);
8977
8978 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8979
8980 if (sc->sc_flags & WM_F_LOCK_SWFW)
8981 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8982 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8983 wm_put_swfwhw_semaphore(sc);
8984
8985 return rv;
8986 }
8987
8988 /*
8989 * wm_kmrn_writereg:
8990 *
8991 * Write a kumeran register
8992 */
8993 static void
8994 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8995 {
8996
8997 if (sc->sc_flags & WM_F_LOCK_SWFW) {
8998 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8999 aprint_error_dev(sc->sc_dev,
9000 "%s: failed to get semaphore\n", __func__);
9001 return;
9002 }
9003 } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9004 if (wm_get_swfwhw_semaphore(sc)) {
9005 aprint_error_dev(sc->sc_dev,
9006 "%s: failed to get semaphore\n", __func__);
9007 return;
9008 }
9009 }
9010
9011 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9012 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9013 (val & KUMCTRLSTA_MASK));
9014
9015 if (sc->sc_flags & WM_F_LOCK_SWFW)
9016 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9017 else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9018 wm_put_swfwhw_semaphore(sc);
9019 }
9020
9021 /* SGMII related */
9022
9023 /*
9024 * wm_sgmii_uses_mdio
9025 *
9026 * Check whether the transaction is to the internal PHY or the external
9027 * MDIO interface. Return true if it's MDIO.
9028 */
9029 static bool
9030 wm_sgmii_uses_mdio(struct wm_softc *sc)
9031 {
9032 uint32_t reg;
9033 bool ismdio = false;
9034
9035 switch (sc->sc_type) {
9036 case WM_T_82575:
9037 case WM_T_82576:
9038 reg = CSR_READ(sc, WMREG_MDIC);
9039 ismdio = ((reg & MDIC_DEST) != 0);
9040 break;
9041 case WM_T_82580:
9042 case WM_T_I350:
9043 case WM_T_I354:
9044 case WM_T_I210:
9045 case WM_T_I211:
9046 reg = CSR_READ(sc, WMREG_MDICNFG);
9047 ismdio = ((reg & MDICNFG_DEST) != 0);
9048 break;
9049 default:
9050 break;
9051 }
9052
9053 return ismdio;
9054 }
9055
9056 /*
9057 * wm_sgmii_readreg: [mii interface function]
9058 *
9059 * Read a PHY register on the SGMII
9060 * This could be handled by the PHY layer if we didn't have to lock the
9061 * ressource ...
9062 */
9063 static int
9064 wm_sgmii_readreg(device_t self, int phy, int reg)
9065 {
9066 struct wm_softc *sc = device_private(self);
9067 uint32_t i2ccmd;
9068 int i, rv;
9069
9070 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9071 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9072 __func__);
9073 return 0;
9074 }
9075
9076 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9077 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9078 | I2CCMD_OPCODE_READ;
9079 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9080
9081 /* Poll the ready bit */
9082 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9083 delay(50);
9084 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9085 if (i2ccmd & I2CCMD_READY)
9086 break;
9087 }
9088 if ((i2ccmd & I2CCMD_READY) == 0)
9089 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9090 if ((i2ccmd & I2CCMD_ERROR) != 0)
9091 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9092
9093 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9094
9095 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
9096 return rv;
9097 }
9098
9099 /*
9100 * wm_sgmii_writereg: [mii interface function]
9101 *
9102 * Write a PHY register on the SGMII.
9103 * This could be handled by the PHY layer if we didn't have to lock the
9104 * ressource ...
9105 */
9106 static void
9107 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9108 {
9109 struct wm_softc *sc = device_private(self);
9110 uint32_t i2ccmd;
9111 int i;
9112 int val_swapped;
9113
9114 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9115 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9116 __func__);
9117 return;
9118 }
9119 /* Swap the data bytes for the I2C interface */
9120 val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9121 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9122 | (phy << I2CCMD_PHY_ADDR_SHIFT)
9123 | I2CCMD_OPCODE_WRITE | val_swapped;
9124 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9125
9126 /* Poll the ready bit */
9127 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9128 delay(50);
9129 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9130 if (i2ccmd & I2CCMD_READY)
9131 break;
9132 }
9133 if ((i2ccmd & I2CCMD_READY) == 0)
9134 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9135 if ((i2ccmd & I2CCMD_ERROR) != 0)
9136 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9137
9138 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9139 }
9140
9141 /* TBI related */
9142
9143 /*
9144 * wm_tbi_mediainit:
9145 *
9146 * Initialize media for use on 1000BASE-X devices.
9147 */
9148 static void
9149 wm_tbi_mediainit(struct wm_softc *sc)
9150 {
9151 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9152 const char *sep = "";
9153
9154 if (sc->sc_type < WM_T_82543)
9155 sc->sc_tipg = TIPG_WM_DFLT;
9156 else
9157 sc->sc_tipg = TIPG_LG_DFLT;
9158
9159 sc->sc_tbi_serdes_anegticks = 5;
9160
9161 /* Initialize our media structures */
9162 sc->sc_mii.mii_ifp = ifp;
9163 sc->sc_ethercom.ec_mii = &sc->sc_mii;
9164
9165 if ((sc->sc_type >= WM_T_82575)
9166 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9167 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9168 wm_serdes_mediachange, wm_serdes_mediastatus);
9169 else
9170 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9171 wm_tbi_mediachange, wm_tbi_mediastatus);
9172
9173 /*
9174 * SWD Pins:
9175 *
9176 * 0 = Link LED (output)
9177 * 1 = Loss Of Signal (input)
9178 */
9179 sc->sc_ctrl |= CTRL_SWDPIO(0);
9180
9181 /* XXX Perhaps this is only for TBI */
9182 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9183 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9184
9185 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9186 sc->sc_ctrl &= ~CTRL_LRST;
9187
9188 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9189
9190 #define ADD(ss, mm, dd) \
9191 do { \
9192 aprint_normal("%s%s", sep, ss); \
9193 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9194 sep = ", "; \
9195 } while (/*CONSTCOND*/0)
9196
9197 aprint_normal_dev(sc->sc_dev, "");
9198
9199 /* Only 82545 is LX */
9200 if (sc->sc_type == WM_T_82545) {
9201 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9202 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9203 } else {
9204 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9205 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9206 }
9207 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9208 aprint_normal("\n");
9209
9210 #undef ADD
9211
9212 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9213 }
9214
9215 /*
9216 * wm_tbi_mediachange: [ifmedia interface function]
9217 *
9218 * Set hardware to newly-selected media on a 1000BASE-X device.
9219 */
9220 static int
9221 wm_tbi_mediachange(struct ifnet *ifp)
9222 {
9223 struct wm_softc *sc = ifp->if_softc;
9224 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9225 uint32_t status;
9226 int i;
9227
9228 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9229 /* XXX need some work for >= 82571 and < 82575 */
9230 if (sc->sc_type < WM_T_82575)
9231 return 0;
9232 }
9233
9234 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9235 || (sc->sc_type >= WM_T_82575))
9236 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9237
9238 sc->sc_ctrl &= ~CTRL_LRST;
9239 sc->sc_txcw = TXCW_ANE;
9240 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9241 sc->sc_txcw |= TXCW_FD | TXCW_HD;
9242 else if (ife->ifm_media & IFM_FDX)
9243 sc->sc_txcw |= TXCW_FD;
9244 else
9245 sc->sc_txcw |= TXCW_HD;
9246
9247 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9248 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9249
9250 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9251 device_xname(sc->sc_dev), sc->sc_txcw));
9252 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9253 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9254 CSR_WRITE_FLUSH(sc);
9255 delay(1000);
9256
9257 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9258 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9259
9260 /*
9261 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9262 * optics detect a signal, 0 if they don't.
9263 */
9264 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9265 /* Have signal; wait for the link to come up. */
9266 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9267 delay(10000);
9268 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9269 break;
9270 }
9271
9272 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9273 device_xname(sc->sc_dev),i));
9274
9275 status = CSR_READ(sc, WMREG_STATUS);
9276 DPRINTF(WM_DEBUG_LINK,
9277 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9278 device_xname(sc->sc_dev),status, STATUS_LU));
9279 if (status & STATUS_LU) {
9280 /* Link is up. */
9281 DPRINTF(WM_DEBUG_LINK,
9282 ("%s: LINK: set media -> link up %s\n",
9283 device_xname(sc->sc_dev),
9284 (status & STATUS_FD) ? "FDX" : "HDX"));
9285
9286 /*
9287 * NOTE: CTRL will update TFCE and RFCE automatically,
9288 * so we should update sc->sc_ctrl
9289 */
9290 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9291 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9292 sc->sc_fcrtl &= ~FCRTL_XONE;
9293 if (status & STATUS_FD)
9294 sc->sc_tctl |=
9295 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9296 else
9297 sc->sc_tctl |=
9298 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9299 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9300 sc->sc_fcrtl |= FCRTL_XONE;
9301 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9302 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9303 WMREG_OLD_FCRTL : WMREG_FCRTL,
9304 sc->sc_fcrtl);
9305 sc->sc_tbi_linkup = 1;
9306 } else {
9307 if (i == WM_LINKUP_TIMEOUT)
9308 wm_check_for_link(sc);
9309 /* Link is down. */
9310 DPRINTF(WM_DEBUG_LINK,
9311 ("%s: LINK: set media -> link down\n",
9312 device_xname(sc->sc_dev)));
9313 sc->sc_tbi_linkup = 0;
9314 }
9315 } else {
9316 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9317 device_xname(sc->sc_dev)));
9318 sc->sc_tbi_linkup = 0;
9319 }
9320
9321 wm_tbi_serdes_set_linkled(sc);
9322
9323 return 0;
9324 }
9325
9326 /*
9327 * wm_tbi_mediastatus: [ifmedia interface function]
9328 *
9329 * Get the current interface media status on a 1000BASE-X device.
9330 */
9331 static void
9332 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9333 {
9334 struct wm_softc *sc = ifp->if_softc;
9335 uint32_t ctrl, status;
9336
9337 ifmr->ifm_status = IFM_AVALID;
9338 ifmr->ifm_active = IFM_ETHER;
9339
9340 status = CSR_READ(sc, WMREG_STATUS);
9341 if ((status & STATUS_LU) == 0) {
9342 ifmr->ifm_active |= IFM_NONE;
9343 return;
9344 }
9345
9346 ifmr->ifm_status |= IFM_ACTIVE;
9347 /* Only 82545 is LX */
9348 if (sc->sc_type == WM_T_82545)
9349 ifmr->ifm_active |= IFM_1000_LX;
9350 else
9351 ifmr->ifm_active |= IFM_1000_SX;
9352 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9353 ifmr->ifm_active |= IFM_FDX;
9354 else
9355 ifmr->ifm_active |= IFM_HDX;
9356 ctrl = CSR_READ(sc, WMREG_CTRL);
9357 if (ctrl & CTRL_RFCE)
9358 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9359 if (ctrl & CTRL_TFCE)
9360 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9361 }
9362
9363 /* XXX TBI only */
9364 static int
9365 wm_check_for_link(struct wm_softc *sc)
9366 {
9367 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9368 uint32_t rxcw;
9369 uint32_t ctrl;
9370 uint32_t status;
9371 uint32_t sig;
9372
9373 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9374 /* XXX need some work for >= 82571 */
9375 if (sc->sc_type >= WM_T_82571) {
9376 sc->sc_tbi_linkup = 1;
9377 return 0;
9378 }
9379 }
9380
9381 rxcw = CSR_READ(sc, WMREG_RXCW);
9382 ctrl = CSR_READ(sc, WMREG_CTRL);
9383 status = CSR_READ(sc, WMREG_STATUS);
9384
9385 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9386
9387 DPRINTF(WM_DEBUG_LINK,
9388 ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9389 device_xname(sc->sc_dev), __func__,
9390 ((ctrl & CTRL_SWDPIN(1)) == sig),
9391 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9392
9393 /*
9394 * SWDPIN LU RXCW
9395 * 0 0 0
9396 * 0 0 1 (should not happen)
9397 * 0 1 0 (should not happen)
9398 * 0 1 1 (should not happen)
9399 * 1 0 0 Disable autonego and force linkup
9400 * 1 0 1 got /C/ but not linkup yet
9401 * 1 1 0 (linkup)
9402 * 1 1 1 If IFM_AUTO, back to autonego
9403 *
9404 */
9405 if (((ctrl & CTRL_SWDPIN(1)) == sig)
9406 && ((status & STATUS_LU) == 0)
9407 && ((rxcw & RXCW_C) == 0)) {
9408 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9409 __func__));
9410 sc->sc_tbi_linkup = 0;
9411 /* Disable auto-negotiation in the TXCW register */
9412 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9413
9414 /*
9415 * Force link-up and also force full-duplex.
9416 *
9417 * NOTE: CTRL was updated TFCE and RFCE automatically,
9418 * so we should update sc->sc_ctrl
9419 */
9420 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9421 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9422 } else if (((status & STATUS_LU) != 0)
9423 && ((rxcw & RXCW_C) != 0)
9424 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9425 sc->sc_tbi_linkup = 1;
9426 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9427 __func__));
9428 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9429 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9430 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9431 && ((rxcw & RXCW_C) != 0)) {
9432 DPRINTF(WM_DEBUG_LINK, ("/C/"));
9433 } else {
9434 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9435 status));
9436 }
9437
9438 return 0;
9439 }
9440
9441 /*
9442 * wm_tbi_tick:
9443 *
9444 * Check the link on TBI devices.
9445 * This function acts as mii_tick().
9446 */
9447 static void
9448 wm_tbi_tick(struct wm_softc *sc)
9449 {
9450 struct mii_data *mii = &sc->sc_mii;
9451 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9452 uint32_t status;
9453
9454 KASSERT(WM_CORE_LOCKED(sc));
9455
9456 status = CSR_READ(sc, WMREG_STATUS);
9457
9458 /* XXX is this needed? */
9459 (void)CSR_READ(sc, WMREG_RXCW);
9460 (void)CSR_READ(sc, WMREG_CTRL);
9461
9462 /* set link status */
9463 if ((status & STATUS_LU) == 0) {
9464 DPRINTF(WM_DEBUG_LINK,
9465 ("%s: LINK: checklink -> down\n",
9466 device_xname(sc->sc_dev)));
9467 sc->sc_tbi_linkup = 0;
9468 } else if (sc->sc_tbi_linkup == 0) {
9469 DPRINTF(WM_DEBUG_LINK,
9470 ("%s: LINK: checklink -> up %s\n",
9471 device_xname(sc->sc_dev),
9472 (status & STATUS_FD) ? "FDX" : "HDX"));
9473 sc->sc_tbi_linkup = 1;
9474 sc->sc_tbi_serdes_ticks = 0;
9475 }
9476
9477 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9478 goto setled;
9479
9480 if ((status & STATUS_LU) == 0) {
9481 sc->sc_tbi_linkup = 0;
9482 /* If the timer expired, retry autonegotiation */
9483 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9484 && (++sc->sc_tbi_serdes_ticks
9485 >= sc->sc_tbi_serdes_anegticks)) {
9486 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9487 sc->sc_tbi_serdes_ticks = 0;
9488 /*
9489 * Reset the link, and let autonegotiation do
9490 * its thing
9491 */
9492 sc->sc_ctrl |= CTRL_LRST;
9493 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9494 CSR_WRITE_FLUSH(sc);
9495 delay(1000);
9496 sc->sc_ctrl &= ~CTRL_LRST;
9497 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9498 CSR_WRITE_FLUSH(sc);
9499 delay(1000);
9500 CSR_WRITE(sc, WMREG_TXCW,
9501 sc->sc_txcw & ~TXCW_ANE);
9502 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9503 }
9504 }
9505
9506 setled:
9507 wm_tbi_serdes_set_linkled(sc);
9508 }
9509
9510 /* SERDES related */
9511 static void
9512 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9513 {
9514 uint32_t reg;
9515
9516 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9517 && ((sc->sc_flags & WM_F_SGMII) == 0))
9518 return;
9519
9520 reg = CSR_READ(sc, WMREG_PCS_CFG);
9521 reg |= PCS_CFG_PCS_EN;
9522 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9523
9524 reg = CSR_READ(sc, WMREG_CTRL_EXT);
9525 reg &= ~CTRL_EXT_SWDPIN(3);
9526 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9527 CSR_WRITE_FLUSH(sc);
9528 }
9529
9530 static int
9531 wm_serdes_mediachange(struct ifnet *ifp)
9532 {
9533 struct wm_softc *sc = ifp->if_softc;
9534 bool pcs_autoneg = true; /* XXX */
9535 uint32_t ctrl_ext, pcs_lctl, reg;
9536
9537 /* XXX Currently, this function is not called on 8257[12] */
9538 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9539 || (sc->sc_type >= WM_T_82575))
9540 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9541
9542 wm_serdes_power_up_link_82575(sc);
9543
9544 sc->sc_ctrl |= CTRL_SLU;
9545
9546 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9547 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9548
9549 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9550 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9551 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9552 case CTRL_EXT_LINK_MODE_SGMII:
9553 pcs_autoneg = true;
9554 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9555 break;
9556 case CTRL_EXT_LINK_MODE_1000KX:
9557 pcs_autoneg = false;
9558 /* FALLTHROUGH */
9559 default:
9560 if ((sc->sc_type == WM_T_82575)
9561 || (sc->sc_type == WM_T_82576)) {
9562 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9563 pcs_autoneg = false;
9564 }
9565 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9566 | CTRL_FRCFDX;
9567 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9568 }
9569 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9570
9571 if (pcs_autoneg) {
9572 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9573 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9574
9575 reg = CSR_READ(sc, WMREG_PCS_ANADV);
9576 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9577 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9578 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9579 } else
9580 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9581
9582 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9583
9584
9585 return 0;
9586 }
9587
9588 static void
9589 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9590 {
9591 struct wm_softc *sc = ifp->if_softc;
9592 struct mii_data *mii = &sc->sc_mii;
9593 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9594 uint32_t pcs_adv, pcs_lpab, reg;
9595
9596 ifmr->ifm_status = IFM_AVALID;
9597 ifmr->ifm_active = IFM_ETHER;
9598
9599 /* Check PCS */
9600 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9601 if ((reg & PCS_LSTS_LINKOK) == 0) {
9602 ifmr->ifm_active |= IFM_NONE;
9603 sc->sc_tbi_linkup = 0;
9604 goto setled;
9605 }
9606
9607 sc->sc_tbi_linkup = 1;
9608 ifmr->ifm_status |= IFM_ACTIVE;
9609 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9610 if ((reg & PCS_LSTS_FDX) != 0)
9611 ifmr->ifm_active |= IFM_FDX;
9612 else
9613 ifmr->ifm_active |= IFM_HDX;
9614 mii->mii_media_active &= ~IFM_ETH_FMASK;
9615 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9616 /* Check flow */
9617 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9618 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9619 DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9620 goto setled;
9621 }
9622 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9623 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9624 DPRINTF(WM_DEBUG_LINK,
9625 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9626 if ((pcs_adv & TXCW_SYM_PAUSE)
9627 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9628 mii->mii_media_active |= IFM_FLOW
9629 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9630 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9631 && (pcs_adv & TXCW_ASYM_PAUSE)
9632 && (pcs_lpab & TXCW_SYM_PAUSE)
9633 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9634 mii->mii_media_active |= IFM_FLOW
9635 | IFM_ETH_TXPAUSE;
9636 } else if ((pcs_adv & TXCW_SYM_PAUSE)
9637 && (pcs_adv & TXCW_ASYM_PAUSE)
9638 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9639 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9640 mii->mii_media_active |= IFM_FLOW
9641 | IFM_ETH_RXPAUSE;
9642 } else {
9643 }
9644 }
9645 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9646 | (mii->mii_media_active & IFM_ETH_FMASK);
9647 setled:
9648 wm_tbi_serdes_set_linkled(sc);
9649 }
9650
9651 /*
9652 * wm_serdes_tick:
9653 *
9654 * Check the link on serdes devices.
9655 */
9656 static void
9657 wm_serdes_tick(struct wm_softc *sc)
9658 {
9659 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9660 struct mii_data *mii = &sc->sc_mii;
9661 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9662 uint32_t reg;
9663
9664 KASSERT(WM_CORE_LOCKED(sc));
9665
9666 mii->mii_media_status = IFM_AVALID;
9667 mii->mii_media_active = IFM_ETHER;
9668
9669 /* Check PCS */
9670 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9671 if ((reg & PCS_LSTS_LINKOK) != 0) {
9672 mii->mii_media_status |= IFM_ACTIVE;
9673 sc->sc_tbi_linkup = 1;
9674 sc->sc_tbi_serdes_ticks = 0;
9675 mii->mii_media_active |= IFM_1000_SX; /* XXX */
9676 if ((reg & PCS_LSTS_FDX) != 0)
9677 mii->mii_media_active |= IFM_FDX;
9678 else
9679 mii->mii_media_active |= IFM_HDX;
9680 } else {
9681 mii->mii_media_status |= IFM_NONE;
9682 sc->sc_tbi_linkup = 0;
9683 /* If the timer expired, retry autonegotiation */
9684 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9685 && (++sc->sc_tbi_serdes_ticks
9686 >= sc->sc_tbi_serdes_anegticks)) {
9687 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9688 sc->sc_tbi_serdes_ticks = 0;
9689 /* XXX */
9690 wm_serdes_mediachange(ifp);
9691 }
9692 }
9693
9694 wm_tbi_serdes_set_linkled(sc);
9695 }
9696
9697 /* SFP related */
9698
9699 static int
9700 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9701 {
9702 uint32_t i2ccmd;
9703 int i;
9704
9705 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9706 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9707
9708 /* Poll the ready bit */
9709 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9710 delay(50);
9711 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9712 if (i2ccmd & I2CCMD_READY)
9713 break;
9714 }
9715 if ((i2ccmd & I2CCMD_READY) == 0)
9716 return -1;
9717 if ((i2ccmd & I2CCMD_ERROR) != 0)
9718 return -1;
9719
9720 *data = i2ccmd & 0x00ff;
9721
9722 return 0;
9723 }
9724
9725 static uint32_t
9726 wm_sfp_get_media_type(struct wm_softc *sc)
9727 {
9728 uint32_t ctrl_ext;
9729 uint8_t val = 0;
9730 int timeout = 3;
9731 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9732 int rv = -1;
9733
9734 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9735 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9736 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9737 CSR_WRITE_FLUSH(sc);
9738
9739 /* Read SFP module data */
9740 while (timeout) {
9741 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9742 if (rv == 0)
9743 break;
9744 delay(100*1000); /* XXX too big */
9745 timeout--;
9746 }
9747 if (rv != 0)
9748 goto out;
9749 switch (val) {
9750 case SFF_SFP_ID_SFF:
9751 aprint_normal_dev(sc->sc_dev,
9752 "Module/Connector soldered to board\n");
9753 break;
9754 case SFF_SFP_ID_SFP:
9755 aprint_normal_dev(sc->sc_dev, "SFP\n");
9756 break;
9757 case SFF_SFP_ID_UNKNOWN:
9758 goto out;
9759 default:
9760 break;
9761 }
9762
9763 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9764 if (rv != 0) {
9765 goto out;
9766 }
9767
9768 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9769 mediatype = WM_MEDIATYPE_SERDES;
9770 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9771 sc->sc_flags |= WM_F_SGMII;
9772 mediatype = WM_MEDIATYPE_COPPER;
9773 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9774 sc->sc_flags |= WM_F_SGMII;
9775 mediatype = WM_MEDIATYPE_SERDES;
9776 }
9777
9778 out:
9779 /* Restore I2C interface setting */
9780 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9781
9782 return mediatype;
9783 }
9784 /*
9785 * NVM related.
9786 * Microwire, SPI (w/wo EERD) and Flash.
9787 */
9788
9789 /* Both spi and uwire */
9790
9791 /*
9792 * wm_eeprom_sendbits:
9793 *
9794 * Send a series of bits to the EEPROM.
9795 */
9796 static void
9797 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9798 {
9799 uint32_t reg;
9800 int x;
9801
9802 reg = CSR_READ(sc, WMREG_EECD);
9803
9804 for (x = nbits; x > 0; x--) {
9805 if (bits & (1U << (x - 1)))
9806 reg |= EECD_DI;
9807 else
9808 reg &= ~EECD_DI;
9809 CSR_WRITE(sc, WMREG_EECD, reg);
9810 CSR_WRITE_FLUSH(sc);
9811 delay(2);
9812 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9813 CSR_WRITE_FLUSH(sc);
9814 delay(2);
9815 CSR_WRITE(sc, WMREG_EECD, reg);
9816 CSR_WRITE_FLUSH(sc);
9817 delay(2);
9818 }
9819 }
9820
9821 /*
9822 * wm_eeprom_recvbits:
9823 *
9824 * Receive a series of bits from the EEPROM.
9825 */
9826 static void
9827 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9828 {
9829 uint32_t reg, val;
9830 int x;
9831
9832 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9833
9834 val = 0;
9835 for (x = nbits; x > 0; x--) {
9836 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9837 CSR_WRITE_FLUSH(sc);
9838 delay(2);
9839 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9840 val |= (1U << (x - 1));
9841 CSR_WRITE(sc, WMREG_EECD, reg);
9842 CSR_WRITE_FLUSH(sc);
9843 delay(2);
9844 }
9845 *valp = val;
9846 }
9847
9848 /* Microwire */
9849
9850 /*
9851 * wm_nvm_read_uwire:
9852 *
9853 * Read a word from the EEPROM using the MicroWire protocol.
9854 */
9855 static int
9856 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9857 {
9858 uint32_t reg, val;
9859 int i;
9860
9861 for (i = 0; i < wordcnt; i++) {
9862 /* Clear SK and DI. */
9863 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9864 CSR_WRITE(sc, WMREG_EECD, reg);
9865
9866 /*
9867 * XXX: workaround for a bug in qemu-0.12.x and prior
9868 * and Xen.
9869 *
9870 * We use this workaround only for 82540 because qemu's
9871 * e1000 act as 82540.
9872 */
9873 if (sc->sc_type == WM_T_82540) {
9874 reg |= EECD_SK;
9875 CSR_WRITE(sc, WMREG_EECD, reg);
9876 reg &= ~EECD_SK;
9877 CSR_WRITE(sc, WMREG_EECD, reg);
9878 CSR_WRITE_FLUSH(sc);
9879 delay(2);
9880 }
9881 /* XXX: end of workaround */
9882
9883 /* Set CHIP SELECT. */
9884 reg |= EECD_CS;
9885 CSR_WRITE(sc, WMREG_EECD, reg);
9886 CSR_WRITE_FLUSH(sc);
9887 delay(2);
9888
9889 /* Shift in the READ command. */
9890 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9891
9892 /* Shift in address. */
9893 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9894
9895 /* Shift out the data. */
9896 wm_eeprom_recvbits(sc, &val, 16);
9897 data[i] = val & 0xffff;
9898
9899 /* Clear CHIP SELECT. */
9900 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9901 CSR_WRITE(sc, WMREG_EECD, reg);
9902 CSR_WRITE_FLUSH(sc);
9903 delay(2);
9904 }
9905
9906 return 0;
9907 }
9908
9909 /* SPI */
9910
9911 /*
9912 * Set SPI and FLASH related information from the EECD register.
9913 * For 82541 and 82547, the word size is taken from EEPROM.
9914 */
9915 static int
9916 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9917 {
9918 int size;
9919 uint32_t reg;
9920 uint16_t data;
9921
9922 reg = CSR_READ(sc, WMREG_EECD);
9923 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9924
9925 /* Read the size of NVM from EECD by default */
9926 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9927 switch (sc->sc_type) {
9928 case WM_T_82541:
9929 case WM_T_82541_2:
9930 case WM_T_82547:
9931 case WM_T_82547_2:
9932 /* Set dummy value to access EEPROM */
9933 sc->sc_nvm_wordsize = 64;
9934 wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9935 reg = data;
9936 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9937 if (size == 0)
9938 size = 6; /* 64 word size */
9939 else
9940 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9941 break;
9942 case WM_T_80003:
9943 case WM_T_82571:
9944 case WM_T_82572:
9945 case WM_T_82573: /* SPI case */
9946 case WM_T_82574: /* SPI case */
9947 case WM_T_82583: /* SPI case */
9948 size += NVM_WORD_SIZE_BASE_SHIFT;
9949 if (size > 14)
9950 size = 14;
9951 break;
9952 case WM_T_82575:
9953 case WM_T_82576:
9954 case WM_T_82580:
9955 case WM_T_I350:
9956 case WM_T_I354:
9957 case WM_T_I210:
9958 case WM_T_I211:
9959 size += NVM_WORD_SIZE_BASE_SHIFT;
9960 if (size > 15)
9961 size = 15;
9962 break;
9963 default:
9964 aprint_error_dev(sc->sc_dev,
9965 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9966 return -1;
9967 break;
9968 }
9969
9970 sc->sc_nvm_wordsize = 1 << size;
9971
9972 return 0;
9973 }
9974
9975 /*
9976 * wm_nvm_ready_spi:
9977 *
9978 * Wait for a SPI EEPROM to be ready for commands.
9979 */
9980 static int
9981 wm_nvm_ready_spi(struct wm_softc *sc)
9982 {
9983 uint32_t val;
9984 int usec;
9985
9986 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9987 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9988 wm_eeprom_recvbits(sc, &val, 8);
9989 if ((val & SPI_SR_RDY) == 0)
9990 break;
9991 }
9992 if (usec >= SPI_MAX_RETRIES) {
9993 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
9994 return 1;
9995 }
9996 return 0;
9997 }
9998
9999 /*
10000 * wm_nvm_read_spi:
10001 *
10002 * Read a work from the EEPROM using the SPI protocol.
10003 */
10004 static int
10005 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10006 {
10007 uint32_t reg, val;
10008 int i;
10009 uint8_t opc;
10010
10011 /* Clear SK and CS. */
10012 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10013 CSR_WRITE(sc, WMREG_EECD, reg);
10014 CSR_WRITE_FLUSH(sc);
10015 delay(2);
10016
10017 if (wm_nvm_ready_spi(sc))
10018 return 1;
10019
10020 /* Toggle CS to flush commands. */
10021 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10022 CSR_WRITE_FLUSH(sc);
10023 delay(2);
10024 CSR_WRITE(sc, WMREG_EECD, reg);
10025 CSR_WRITE_FLUSH(sc);
10026 delay(2);
10027
10028 opc = SPI_OPC_READ;
10029 if (sc->sc_nvm_addrbits == 8 && word >= 128)
10030 opc |= SPI_OPC_A8;
10031
10032 wm_eeprom_sendbits(sc, opc, 8);
10033 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10034
10035 for (i = 0; i < wordcnt; i++) {
10036 wm_eeprom_recvbits(sc, &val, 16);
10037 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10038 }
10039
10040 /* Raise CS and clear SK. */
10041 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10042 CSR_WRITE(sc, WMREG_EECD, reg);
10043 CSR_WRITE_FLUSH(sc);
10044 delay(2);
10045
10046 return 0;
10047 }
10048
10049 /* Using with EERD */
10050
10051 static int
10052 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10053 {
10054 uint32_t attempts = 100000;
10055 uint32_t i, reg = 0;
10056 int32_t done = -1;
10057
10058 for (i = 0; i < attempts; i++) {
10059 reg = CSR_READ(sc, rw);
10060
10061 if (reg & EERD_DONE) {
10062 done = 0;
10063 break;
10064 }
10065 delay(5);
10066 }
10067
10068 return done;
10069 }
10070
10071 static int
10072 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10073 uint16_t *data)
10074 {
10075 int i, eerd = 0;
10076 int error = 0;
10077
10078 for (i = 0; i < wordcnt; i++) {
10079 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10080
10081 CSR_WRITE(sc, WMREG_EERD, eerd);
10082 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10083 if (error != 0)
10084 break;
10085
10086 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10087 }
10088
10089 return error;
10090 }
10091
10092 /* Flash */
10093
10094 static int
10095 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10096 {
10097 uint32_t eecd;
10098 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10099 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10100 uint8_t sig_byte = 0;
10101
10102 switch (sc->sc_type) {
10103 case WM_T_PCH_SPT:
10104 /*
10105 * In SPT, read from the CTRL_EXT reg instead of accessing the
10106 * sector valid bits from the NVM.
10107 */
10108 *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10109 if ((*bank == 0) || (*bank == 1)) {
10110 aprint_error_dev(sc->sc_dev,
10111 "%s: no valid NVM bank present\n",
10112 __func__);
10113 return -1;
10114 } else {
10115 *bank = *bank - 2;
10116 return 0;
10117 }
10118 case WM_T_ICH8:
10119 case WM_T_ICH9:
10120 eecd = CSR_READ(sc, WMREG_EECD);
10121 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10122 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10123 return 0;
10124 }
10125 /* FALLTHROUGH */
10126 default:
10127 /* Default to 0 */
10128 *bank = 0;
10129
10130 /* Check bank 0 */
10131 wm_read_ich8_byte(sc, act_offset, &sig_byte);
10132 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10133 *bank = 0;
10134 return 0;
10135 }
10136
10137 /* Check bank 1 */
10138 wm_read_ich8_byte(sc, act_offset + bank1_offset,
10139 &sig_byte);
10140 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10141 *bank = 1;
10142 return 0;
10143 }
10144 }
10145
10146 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10147 device_xname(sc->sc_dev)));
10148 return -1;
10149 }
10150
10151 /******************************************************************************
10152 * This function does initial flash setup so that a new read/write/erase cycle
10153 * can be started.
10154 *
10155 * sc - The pointer to the hw structure
10156 ****************************************************************************/
10157 static int32_t
10158 wm_ich8_cycle_init(struct wm_softc *sc)
10159 {
10160 uint16_t hsfsts;
10161 int32_t error = 1;
10162 int32_t i = 0;
10163
10164 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10165
10166 /* May be check the Flash Des Valid bit in Hw status */
10167 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10168 return error;
10169 }
10170
10171 /* Clear FCERR in Hw status by writing 1 */
10172 /* Clear DAEL in Hw status by writing a 1 */
10173 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10174
10175 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10176
10177 /*
10178 * Either we should have a hardware SPI cycle in progress bit to check
10179 * against, in order to start a new cycle or FDONE bit should be
10180 * changed in the hardware so that it is 1 after harware reset, which
10181 * can then be used as an indication whether a cycle is in progress or
10182 * has been completed .. we should also have some software semaphore
10183 * mechanism to guard FDONE or the cycle in progress bit so that two
10184 * threads access to those bits can be sequentiallized or a way so that
10185 * 2 threads dont start the cycle at the same time
10186 */
10187
10188 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10189 /*
10190 * There is no cycle running at present, so we can start a
10191 * cycle
10192 */
10193
10194 /* Begin by setting Flash Cycle Done. */
10195 hsfsts |= HSFSTS_DONE;
10196 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10197 error = 0;
10198 } else {
10199 /*
10200 * otherwise poll for sometime so the current cycle has a
10201 * chance to end before giving up.
10202 */
10203 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10204 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10205 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10206 error = 0;
10207 break;
10208 }
10209 delay(1);
10210 }
10211 if (error == 0) {
10212 /*
10213 * Successful in waiting for previous cycle to timeout,
10214 * now set the Flash Cycle Done.
10215 */
10216 hsfsts |= HSFSTS_DONE;
10217 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10218 }
10219 }
10220 return error;
10221 }
10222
10223 /******************************************************************************
10224 * This function starts a flash cycle and waits for its completion
10225 *
10226 * sc - The pointer to the hw structure
10227 ****************************************************************************/
10228 static int32_t
10229 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10230 {
10231 uint16_t hsflctl;
10232 uint16_t hsfsts;
10233 int32_t error = 1;
10234 uint32_t i = 0;
10235
10236 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10237 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10238 hsflctl |= HSFCTL_GO;
10239 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10240
10241 /* Wait till FDONE bit is set to 1 */
10242 do {
10243 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10244 if (hsfsts & HSFSTS_DONE)
10245 break;
10246 delay(1);
10247 i++;
10248 } while (i < timeout);
10249 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10250 error = 0;
10251
10252 return error;
10253 }
10254
10255 /******************************************************************************
10256 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10257 *
10258 * sc - The pointer to the hw structure
10259 * index - The index of the byte or word to read.
10260 * size - Size of data to read, 1=byte 2=word, 4=dword
10261 * data - Pointer to the word to store the value read.
10262 *****************************************************************************/
10263 static int32_t
10264 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10265 uint32_t size, uint32_t *data)
10266 {
10267 uint16_t hsfsts;
10268 uint16_t hsflctl;
10269 uint32_t flash_linear_address;
10270 uint32_t flash_data = 0;
10271 int32_t error = 1;
10272 int32_t count = 0;
10273
10274 if (size < 1 || size > 4 || data == 0x0 ||
10275 index > ICH_FLASH_LINEAR_ADDR_MASK)
10276 return error;
10277
10278 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10279 sc->sc_ich8_flash_base;
10280
10281 do {
10282 delay(1);
10283 /* Steps */
10284 error = wm_ich8_cycle_init(sc);
10285 if (error)
10286 break;
10287
10288 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10289 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10290 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10291 & HSFCTL_BCOUNT_MASK;
10292 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10293 if (sc->sc_type == WM_T_PCH_SPT) {
10294 /*
10295 * In SPT, This register is in Lan memory space, not
10296 * flash. Therefore, only 32 bit access is supported.
10297 */
10298 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10299 (uint32_t)hsflctl);
10300 } else
10301 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10302
10303 /*
10304 * Write the last 24 bits of index into Flash Linear address
10305 * field in Flash Address
10306 */
10307 /* TODO: TBD maybe check the index against the size of flash */
10308
10309 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10310
10311 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10312
10313 /*
10314 * Check if FCERR is set to 1, if set to 1, clear it and try
10315 * the whole sequence a few more times, else read in (shift in)
10316 * the Flash Data0, the order is least significant byte first
10317 * msb to lsb
10318 */
10319 if (error == 0) {
10320 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10321 if (size == 1)
10322 *data = (uint8_t)(flash_data & 0x000000FF);
10323 else if (size == 2)
10324 *data = (uint16_t)(flash_data & 0x0000FFFF);
10325 else if (size == 4)
10326 *data = (uint32_t)flash_data;
10327 break;
10328 } else {
10329 /*
10330 * If we've gotten here, then things are probably
10331 * completely hosed, but if the error condition is
10332 * detected, it won't hurt to give it another try...
10333 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10334 */
10335 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10336 if (hsfsts & HSFSTS_ERR) {
10337 /* Repeat for some time before giving up. */
10338 continue;
10339 } else if ((hsfsts & HSFSTS_DONE) == 0)
10340 break;
10341 }
10342 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10343
10344 return error;
10345 }
10346
10347 /******************************************************************************
10348 * Reads a single byte from the NVM using the ICH8 flash access registers.
10349 *
10350 * sc - pointer to wm_hw structure
10351 * index - The index of the byte to read.
10352 * data - Pointer to a byte to store the value read.
10353 *****************************************************************************/
10354 static int32_t
10355 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10356 {
10357 int32_t status;
10358 uint32_t word = 0;
10359
10360 status = wm_read_ich8_data(sc, index, 1, &word);
10361 if (status == 0)
10362 *data = (uint8_t)word;
10363 else
10364 *data = 0;
10365
10366 return status;
10367 }
10368
10369 /******************************************************************************
10370 * Reads a word from the NVM using the ICH8 flash access registers.
10371 *
10372 * sc - pointer to wm_hw structure
10373 * index - The starting byte index of the word to read.
10374 * data - Pointer to a word to store the value read.
10375 *****************************************************************************/
10376 static int32_t
10377 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10378 {
10379 int32_t status;
10380 uint32_t word = 0;
10381
10382 status = wm_read_ich8_data(sc, index, 2, &word);
10383 if (status == 0)
10384 *data = (uint16_t)word;
10385 else
10386 *data = 0;
10387
10388 return status;
10389 }
10390
10391 /******************************************************************************
10392 * Reads a dword from the NVM using the ICH8 flash access registers.
10393 *
10394 * sc - pointer to wm_hw structure
10395 * index - The starting byte index of the word to read.
10396 * data - Pointer to a word to store the value read.
10397 *****************************************************************************/
10398 static int32_t
10399 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10400 {
10401 int32_t status;
10402
10403 status = wm_read_ich8_data(sc, index, 4, data);
10404 return status;
10405 }
10406
10407 /******************************************************************************
10408 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10409 * register.
10410 *
10411 * sc - Struct containing variables accessed by shared code
10412 * offset - offset of word in the EEPROM to read
10413 * data - word read from the EEPROM
10414 * words - number of words to read
10415 *****************************************************************************/
10416 static int
10417 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10418 {
10419 int32_t error = 0;
10420 uint32_t flash_bank = 0;
10421 uint32_t act_offset = 0;
10422 uint32_t bank_offset = 0;
10423 uint16_t word = 0;
10424 uint16_t i = 0;
10425
10426 /*
10427 * We need to know which is the valid flash bank. In the event
10428 * that we didn't allocate eeprom_shadow_ram, we may not be
10429 * managing flash_bank. So it cannot be trusted and needs
10430 * to be updated with each read.
10431 */
10432 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10433 if (error) {
10434 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10435 device_xname(sc->sc_dev)));
10436 flash_bank = 0;
10437 }
10438
10439 /*
10440 * Adjust offset appropriately if we're on bank 1 - adjust for word
10441 * size
10442 */
10443 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10444
10445 error = wm_get_swfwhw_semaphore(sc);
10446 if (error) {
10447 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10448 __func__);
10449 return error;
10450 }
10451
10452 for (i = 0; i < words; i++) {
10453 /* The NVM part needs a byte offset, hence * 2 */
10454 act_offset = bank_offset + ((offset + i) * 2);
10455 error = wm_read_ich8_word(sc, act_offset, &word);
10456 if (error) {
10457 aprint_error_dev(sc->sc_dev,
10458 "%s: failed to read NVM\n", __func__);
10459 break;
10460 }
10461 data[i] = word;
10462 }
10463
10464 wm_put_swfwhw_semaphore(sc);
10465 return error;
10466 }
10467
10468 /******************************************************************************
10469 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10470 * register.
10471 *
10472 * sc - Struct containing variables accessed by shared code
10473 * offset - offset of word in the EEPROM to read
10474 * data - word read from the EEPROM
10475 * words - number of words to read
10476 *****************************************************************************/
10477 static int
10478 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10479 {
10480 int32_t error = 0;
10481 uint32_t flash_bank = 0;
10482 uint32_t act_offset = 0;
10483 uint32_t bank_offset = 0;
10484 uint32_t dword = 0;
10485 uint16_t i = 0;
10486
10487 /*
10488 * We need to know which is the valid flash bank. In the event
10489 * that we didn't allocate eeprom_shadow_ram, we may not be
10490 * managing flash_bank. So it cannot be trusted and needs
10491 * to be updated with each read.
10492 */
10493 error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10494 if (error) {
10495 DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10496 device_xname(sc->sc_dev)));
10497 flash_bank = 0;
10498 }
10499
10500 /*
10501 * Adjust offset appropriately if we're on bank 1 - adjust for word
10502 * size
10503 */
10504 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10505
10506 error = wm_get_swfwhw_semaphore(sc);
10507 if (error) {
10508 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10509 __func__);
10510 return error;
10511 }
10512
10513 for (i = 0; i < words; i++) {
10514 /* The NVM part needs a byte offset, hence * 2 */
10515 act_offset = bank_offset + ((offset + i) * 2);
10516 /* but we must read dword aligned, so mask ... */
10517 error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
10518 if (error) {
10519 aprint_error_dev(sc->sc_dev,
10520 "%s: failed to read NVM\n", __func__);
10521 break;
10522 }
10523 /* ... and pick out low or high word */
10524 if ((act_offset & 0x2) == 0)
10525 data[i] = (uint16_t)(dword & 0xFFFF);
10526 else
10527 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
10528 }
10529
10530 wm_put_swfwhw_semaphore(sc);
10531 return error;
10532 }
10533
10534 /* iNVM */
10535
10536 static int
10537 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10538 {
10539 int32_t rv = 0;
10540 uint32_t invm_dword;
10541 uint16_t i;
10542 uint8_t record_type, word_address;
10543
10544 for (i = 0; i < INVM_SIZE; i++) {
10545 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10546 /* Get record type */
10547 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10548 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10549 break;
10550 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10551 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10552 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10553 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10554 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10555 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10556 if (word_address == address) {
10557 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10558 rv = 0;
10559 break;
10560 }
10561 }
10562 }
10563
10564 return rv;
10565 }
10566
10567 static int
10568 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10569 {
10570 int rv = 0;
10571 int i;
10572
10573 for (i = 0; i < words; i++) {
10574 switch (offset + i) {
10575 case NVM_OFF_MACADDR:
10576 case NVM_OFF_MACADDR1:
10577 case NVM_OFF_MACADDR2:
10578 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10579 if (rv != 0) {
10580 data[i] = 0xffff;
10581 rv = -1;
10582 }
10583 break;
10584 case NVM_OFF_CFG2:
10585 rv = wm_nvm_read_word_invm(sc, offset, data);
10586 if (rv != 0) {
10587 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
10588 rv = 0;
10589 }
10590 break;
10591 case NVM_OFF_CFG4:
10592 rv = wm_nvm_read_word_invm(sc, offset, data);
10593 if (rv != 0) {
10594 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
10595 rv = 0;
10596 }
10597 break;
10598 case NVM_OFF_LED_1_CFG:
10599 rv = wm_nvm_read_word_invm(sc, offset, data);
10600 if (rv != 0) {
10601 *data = NVM_LED_1_CFG_DEFAULT_I211;
10602 rv = 0;
10603 }
10604 break;
10605 case NVM_OFF_LED_0_2_CFG:
10606 rv = wm_nvm_read_word_invm(sc, offset, data);
10607 if (rv != 0) {
10608 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
10609 rv = 0;
10610 }
10611 break;
10612 case NVM_OFF_ID_LED_SETTINGS:
10613 rv = wm_nvm_read_word_invm(sc, offset, data);
10614 if (rv != 0) {
10615 *data = ID_LED_RESERVED_FFFF;
10616 rv = 0;
10617 }
10618 break;
10619 default:
10620 DPRINTF(WM_DEBUG_NVM,
10621 ("NVM word 0x%02x is not mapped.\n", offset));
10622 *data = NVM_RESERVED_WORD;
10623 break;
10624 }
10625 }
10626
10627 return rv;
10628 }
10629
10630 /* Lock, detecting NVM type, validate checksum, version and read */
10631
10632 /*
10633 * wm_nvm_acquire:
10634 *
10635 * Perform the EEPROM handshake required on some chips.
10636 */
10637 static int
10638 wm_nvm_acquire(struct wm_softc *sc)
10639 {
10640 uint32_t reg;
10641 int x;
10642 int ret = 0;
10643
10644 /* always success */
10645 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10646 return 0;
10647
10648 if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10649 ret = wm_get_swfwhw_semaphore(sc);
10650 } else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10651 /* This will also do wm_get_swsm_semaphore() if needed */
10652 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10653 } else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10654 ret = wm_get_swsm_semaphore(sc);
10655 }
10656
10657 if (ret) {
10658 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10659 __func__);
10660 return 1;
10661 }
10662
10663 if (sc->sc_flags & WM_F_LOCK_EECD) {
10664 reg = CSR_READ(sc, WMREG_EECD);
10665
10666 /* Request EEPROM access. */
10667 reg |= EECD_EE_REQ;
10668 CSR_WRITE(sc, WMREG_EECD, reg);
10669
10670 /* ..and wait for it to be granted. */
10671 for (x = 0; x < 1000; x++) {
10672 reg = CSR_READ(sc, WMREG_EECD);
10673 if (reg & EECD_EE_GNT)
10674 break;
10675 delay(5);
10676 }
10677 if ((reg & EECD_EE_GNT) == 0) {
10678 aprint_error_dev(sc->sc_dev,
10679 "could not acquire EEPROM GNT\n");
10680 reg &= ~EECD_EE_REQ;
10681 CSR_WRITE(sc, WMREG_EECD, reg);
10682 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10683 wm_put_swfwhw_semaphore(sc);
10684 if (sc->sc_flags & WM_F_LOCK_SWFW)
10685 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10686 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10687 wm_put_swsm_semaphore(sc);
10688 return 1;
10689 }
10690 }
10691
10692 return 0;
10693 }
10694
10695 /*
10696 * wm_nvm_release:
10697 *
10698 * Release the EEPROM mutex.
10699 */
10700 static void
10701 wm_nvm_release(struct wm_softc *sc)
10702 {
10703 uint32_t reg;
10704
10705 /* always success */
10706 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10707 return;
10708
10709 if (sc->sc_flags & WM_F_LOCK_EECD) {
10710 reg = CSR_READ(sc, WMREG_EECD);
10711 reg &= ~EECD_EE_REQ;
10712 CSR_WRITE(sc, WMREG_EECD, reg);
10713 }
10714
10715 if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10716 wm_put_swfwhw_semaphore(sc);
10717 if (sc->sc_flags & WM_F_LOCK_SWFW)
10718 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10719 else if (sc->sc_flags & WM_F_LOCK_SWSM)
10720 wm_put_swsm_semaphore(sc);
10721 }
10722
10723 static int
10724 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10725 {
10726 uint32_t eecd = 0;
10727
10728 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10729 || sc->sc_type == WM_T_82583) {
10730 eecd = CSR_READ(sc, WMREG_EECD);
10731
10732 /* Isolate bits 15 & 16 */
10733 eecd = ((eecd >> 15) & 0x03);
10734
10735 /* If both bits are set, device is Flash type */
10736 if (eecd == 0x03)
10737 return 0;
10738 }
10739 return 1;
10740 }
10741
10742 static int
10743 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10744 {
10745 uint32_t eec;
10746
10747 eec = CSR_READ(sc, WMREG_EEC);
10748 if ((eec & EEC_FLASH_DETECTED) != 0)
10749 return 1;
10750
10751 return 0;
10752 }
10753
10754 /*
10755 * wm_nvm_validate_checksum
10756 *
10757 * The checksum is defined as the sum of the first 64 (16 bit) words.
10758 */
10759 static int
10760 wm_nvm_validate_checksum(struct wm_softc *sc)
10761 {
10762 uint16_t checksum;
10763 uint16_t eeprom_data;
10764 #ifdef WM_DEBUG
10765 uint16_t csum_wordaddr, valid_checksum;
10766 #endif
10767 int i;
10768
10769 checksum = 0;
10770
10771 /* Don't check for I211 */
10772 if (sc->sc_type == WM_T_I211)
10773 return 0;
10774
10775 #ifdef WM_DEBUG
10776 if (sc->sc_type == WM_T_PCH_LPT) {
10777 csum_wordaddr = NVM_OFF_COMPAT;
10778 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10779 } else {
10780 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10781 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10782 }
10783
10784 /* Dump EEPROM image for debug */
10785 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10786 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10787 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10788 /* XXX PCH_SPT? */
10789 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10790 if ((eeprom_data & valid_checksum) == 0) {
10791 DPRINTF(WM_DEBUG_NVM,
10792 ("%s: NVM need to be updated (%04x != %04x)\n",
10793 device_xname(sc->sc_dev), eeprom_data,
10794 valid_checksum));
10795 }
10796 }
10797
10798 if ((wm_debug & WM_DEBUG_NVM) != 0) {
10799 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10800 for (i = 0; i < NVM_SIZE; i++) {
10801 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10802 printf("XXXX ");
10803 else
10804 printf("%04hx ", eeprom_data);
10805 if (i % 8 == 7)
10806 printf("\n");
10807 }
10808 }
10809
10810 #endif /* WM_DEBUG */
10811
10812 for (i = 0; i < NVM_SIZE; i++) {
10813 if (wm_nvm_read(sc, i, 1, &eeprom_data))
10814 return 1;
10815 checksum += eeprom_data;
10816 }
10817
10818 if (checksum != (uint16_t) NVM_CHECKSUM) {
10819 #ifdef WM_DEBUG
10820 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10821 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10822 #endif
10823 }
10824
10825 return 0;
10826 }
10827
10828 static void
10829 wm_nvm_version_invm(struct wm_softc *sc)
10830 {
10831 uint32_t dword;
10832
10833 /*
10834 * Linux's code to decode version is very strange, so we don't
10835 * obey that algorithm and just use word 61 as the document.
10836 * Perhaps it's not perfect though...
10837 *
10838 * Example:
10839 *
10840 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10841 */
10842 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10843 dword = __SHIFTOUT(dword, INVM_VER_1);
10844 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10845 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10846 }
10847
10848 static void
10849 wm_nvm_version(struct wm_softc *sc)
10850 {
10851 uint16_t major, minor, build, patch;
10852 uint16_t uid0, uid1;
10853 uint16_t nvm_data;
10854 uint16_t off;
10855 bool check_version = false;
10856 bool check_optionrom = false;
10857 bool have_build = false;
10858
10859 /*
10860 * Version format:
10861 *
10862 * XYYZ
10863 * X0YZ
10864 * X0YY
10865 *
10866 * Example:
10867 *
10868 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
10869 * 82571 0x50a6 5.10.6?
10870 * 82572 0x506a 5.6.10?
10871 * 82572EI 0x5069 5.6.9?
10872 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
10873 * 0x2013 2.1.3?
10874 * 82583 0x10a0 1.10.0? (document says it's default vaule)
10875 */
10876 wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10877 switch (sc->sc_type) {
10878 case WM_T_82571:
10879 case WM_T_82572:
10880 case WM_T_82574:
10881 case WM_T_82583:
10882 check_version = true;
10883 check_optionrom = true;
10884 have_build = true;
10885 break;
10886 case WM_T_82575:
10887 case WM_T_82576:
10888 case WM_T_82580:
10889 if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10890 check_version = true;
10891 break;
10892 case WM_T_I211:
10893 wm_nvm_version_invm(sc);
10894 goto printver;
10895 case WM_T_I210:
10896 if (!wm_nvm_get_flash_presence_i210(sc)) {
10897 wm_nvm_version_invm(sc);
10898 goto printver;
10899 }
10900 /* FALLTHROUGH */
10901 case WM_T_I350:
10902 case WM_T_I354:
10903 check_version = true;
10904 check_optionrom = true;
10905 break;
10906 default:
10907 return;
10908 }
10909 if (check_version) {
10910 wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10911 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10912 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10913 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10914 build = nvm_data & NVM_BUILD_MASK;
10915 have_build = true;
10916 } else
10917 minor = nvm_data & 0x00ff;
10918
10919 /* Decimal */
10920 minor = (minor / 16) * 10 + (minor % 16);
10921 sc->sc_nvm_ver_major = major;
10922 sc->sc_nvm_ver_minor = minor;
10923
10924 printver:
10925 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10926 sc->sc_nvm_ver_minor);
10927 if (have_build) {
10928 sc->sc_nvm_ver_build = build;
10929 aprint_verbose(".%d", build);
10930 }
10931 }
10932 if (check_optionrom) {
10933 wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10934 /* Option ROM Version */
10935 if ((off != 0x0000) && (off != 0xffff)) {
10936 off += NVM_COMBO_VER_OFF;
10937 wm_nvm_read(sc, off + 1, 1, &uid1);
10938 wm_nvm_read(sc, off, 1, &uid0);
10939 if ((uid0 != 0) && (uid0 != 0xffff)
10940 && (uid1 != 0) && (uid1 != 0xffff)) {
10941 /* 16bits */
10942 major = uid0 >> 8;
10943 build = (uid0 << 8) | (uid1 >> 8);
10944 patch = uid1 & 0x00ff;
10945 aprint_verbose(", option ROM Version %d.%d.%d",
10946 major, build, patch);
10947 }
10948 }
10949 }
10950
10951 wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10952 aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10953 }
10954
10955 /*
10956 * wm_nvm_read:
10957 *
10958 * Read data from the serial EEPROM.
10959 */
10960 static int
10961 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10962 {
10963 int rv;
10964
10965 if (sc->sc_flags & WM_F_EEPROM_INVALID)
10966 return 1;
10967
10968 if (wm_nvm_acquire(sc))
10969 return 1;
10970
10971 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10972 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10973 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10974 rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10975 else if (sc->sc_type == WM_T_PCH_SPT)
10976 rv = wm_nvm_read_spt(sc, word, wordcnt, data);
10977 else if (sc->sc_flags & WM_F_EEPROM_INVM)
10978 rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10979 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10980 rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10981 else if (sc->sc_flags & WM_F_EEPROM_SPI)
10982 rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10983 else
10984 rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10985
10986 wm_nvm_release(sc);
10987 return rv;
10988 }
10989
10990 /*
10991 * Hardware semaphores.
10992 * Very complexed...
10993 */
10994
10995 static int
10996 wm_get_swsm_semaphore(struct wm_softc *sc)
10997 {
10998 int32_t timeout;
10999 uint32_t swsm;
11000
11001 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11002 /* Get the SW semaphore. */
11003 timeout = sc->sc_nvm_wordsize + 1;
11004 while (timeout) {
11005 swsm = CSR_READ(sc, WMREG_SWSM);
11006
11007 if ((swsm & SWSM_SMBI) == 0)
11008 break;
11009
11010 delay(50);
11011 timeout--;
11012 }
11013
11014 if (timeout == 0) {
11015 aprint_error_dev(sc->sc_dev,
11016 "could not acquire SWSM SMBI\n");
11017 return 1;
11018 }
11019 }
11020
11021 /* Get the FW semaphore. */
11022 timeout = sc->sc_nvm_wordsize + 1;
11023 while (timeout) {
11024 swsm = CSR_READ(sc, WMREG_SWSM);
11025 swsm |= SWSM_SWESMBI;
11026 CSR_WRITE(sc, WMREG_SWSM, swsm);
11027 /* If we managed to set the bit we got the semaphore. */
11028 swsm = CSR_READ(sc, WMREG_SWSM);
11029 if (swsm & SWSM_SWESMBI)
11030 break;
11031
11032 delay(50);
11033 timeout--;
11034 }
11035
11036 if (timeout == 0) {
11037 aprint_error_dev(sc->sc_dev,
11038 "could not acquire SWSM SWESMBI\n");
11039 /* Release semaphores */
11040 wm_put_swsm_semaphore(sc);
11041 return 1;
11042 }
11043 return 0;
11044 }
11045
11046 static void
11047 wm_put_swsm_semaphore(struct wm_softc *sc)
11048 {
11049 uint32_t swsm;
11050
11051 swsm = CSR_READ(sc, WMREG_SWSM);
11052 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11053 CSR_WRITE(sc, WMREG_SWSM, swsm);
11054 }
11055
11056 static int
11057 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11058 {
11059 uint32_t swfw_sync;
11060 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11061 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11062 int timeout = 200;
11063
11064 for (timeout = 0; timeout < 200; timeout++) {
11065 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11066 if (wm_get_swsm_semaphore(sc)) {
11067 aprint_error_dev(sc->sc_dev,
11068 "%s: failed to get semaphore\n",
11069 __func__);
11070 return 1;
11071 }
11072 }
11073 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11074 if ((swfw_sync & (swmask | fwmask)) == 0) {
11075 swfw_sync |= swmask;
11076 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11077 if (sc->sc_flags & WM_F_LOCK_SWSM)
11078 wm_put_swsm_semaphore(sc);
11079 return 0;
11080 }
11081 if (sc->sc_flags & WM_F_LOCK_SWSM)
11082 wm_put_swsm_semaphore(sc);
11083 delay(5000);
11084 }
11085 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11086 device_xname(sc->sc_dev), mask, swfw_sync);
11087 return 1;
11088 }
11089
11090 static void
11091 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11092 {
11093 uint32_t swfw_sync;
11094
11095 if (sc->sc_flags & WM_F_LOCK_SWSM) {
11096 while (wm_get_swsm_semaphore(sc) != 0)
11097 continue;
11098 }
11099 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11100 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11101 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11102 if (sc->sc_flags & WM_F_LOCK_SWSM)
11103 wm_put_swsm_semaphore(sc);
11104 }
11105
11106 static int
11107 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11108 {
11109 uint32_t ext_ctrl;
11110 int timeout = 200;
11111
11112 for (timeout = 0; timeout < 200; timeout++) {
11113 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11114 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11115 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11116
11117 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11118 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11119 return 0;
11120 delay(5000);
11121 }
11122 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11123 device_xname(sc->sc_dev), ext_ctrl);
11124 return 1;
11125 }
11126
11127 static void
11128 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11129 {
11130 uint32_t ext_ctrl;
11131
11132 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11133 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11134 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11135 }
11136
11137 static int
11138 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11139 {
11140 int i = 0;
11141 uint32_t reg;
11142
11143 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11144 do {
11145 CSR_WRITE(sc, WMREG_EXTCNFCTR,
11146 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11147 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11148 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11149 break;
11150 delay(2*1000);
11151 i++;
11152 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11153
11154 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11155 wm_put_hw_semaphore_82573(sc);
11156 log(LOG_ERR, "%s: Driver can't access the PHY\n",
11157 device_xname(sc->sc_dev));
11158 return -1;
11159 }
11160
11161 return 0;
11162 }
11163
11164 static void
11165 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11166 {
11167 uint32_t reg;
11168
11169 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11170 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11171 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11172 }
11173
11174 /*
11175 * Management mode and power management related subroutines.
11176 * BMC, AMT, suspend/resume and EEE.
11177 */
11178
11179 #ifdef WM_WOL
11180 static int
11181 wm_check_mng_mode(struct wm_softc *sc)
11182 {
11183 int rv;
11184
11185 switch (sc->sc_type) {
11186 case WM_T_ICH8:
11187 case WM_T_ICH9:
11188 case WM_T_ICH10:
11189 case WM_T_PCH:
11190 case WM_T_PCH2:
11191 case WM_T_PCH_LPT:
11192 case WM_T_PCH_SPT:
11193 rv = wm_check_mng_mode_ich8lan(sc);
11194 break;
11195 case WM_T_82574:
11196 case WM_T_82583:
11197 rv = wm_check_mng_mode_82574(sc);
11198 break;
11199 case WM_T_82571:
11200 case WM_T_82572:
11201 case WM_T_82573:
11202 case WM_T_80003:
11203 rv = wm_check_mng_mode_generic(sc);
11204 break;
11205 default:
11206 /* noting to do */
11207 rv = 0;
11208 break;
11209 }
11210
11211 return rv;
11212 }
11213
11214 static int
11215 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11216 {
11217 uint32_t fwsm;
11218
11219 fwsm = CSR_READ(sc, WMREG_FWSM);
11220
11221 if (((fwsm & FWSM_FW_VALID) != 0)
11222 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11223 return 1;
11224
11225 return 0;
11226 }
11227
11228 static int
11229 wm_check_mng_mode_82574(struct wm_softc *sc)
11230 {
11231 uint16_t data;
11232
11233 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11234
11235 if ((data & NVM_CFG2_MNGM_MASK) != 0)
11236 return 1;
11237
11238 return 0;
11239 }
11240
11241 static int
11242 wm_check_mng_mode_generic(struct wm_softc *sc)
11243 {
11244 uint32_t fwsm;
11245
11246 fwsm = CSR_READ(sc, WMREG_FWSM);
11247
11248 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11249 return 1;
11250
11251 return 0;
11252 }
11253 #endif /* WM_WOL */
11254
11255 static int
11256 wm_enable_mng_pass_thru(struct wm_softc *sc)
11257 {
11258 uint32_t manc, fwsm, factps;
11259
11260 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11261 return 0;
11262
11263 manc = CSR_READ(sc, WMREG_MANC);
11264
11265 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11266 device_xname(sc->sc_dev), manc));
11267 if ((manc & MANC_RECV_TCO_EN) == 0)
11268 return 0;
11269
11270 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11271 fwsm = CSR_READ(sc, WMREG_FWSM);
11272 factps = CSR_READ(sc, WMREG_FACTPS);
11273 if (((factps & FACTPS_MNGCG) == 0)
11274 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11275 return 1;
11276 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11277 uint16_t data;
11278
11279 factps = CSR_READ(sc, WMREG_FACTPS);
11280 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11281 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11282 device_xname(sc->sc_dev), factps, data));
11283 if (((factps & FACTPS_MNGCG) == 0)
11284 && ((data & NVM_CFG2_MNGM_MASK)
11285 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11286 return 1;
11287 } else if (((manc & MANC_SMBUS_EN) != 0)
11288 && ((manc & MANC_ASF_EN) == 0))
11289 return 1;
11290
11291 return 0;
11292 }
11293
11294 static bool
11295 wm_phy_resetisblocked(struct wm_softc *sc)
11296 {
11297 bool blocked = false;
11298 uint32_t reg;
11299 int i = 0;
11300
11301 switch (sc->sc_type) {
11302 case WM_T_ICH8:
11303 case WM_T_ICH9:
11304 case WM_T_ICH10:
11305 case WM_T_PCH:
11306 case WM_T_PCH2:
11307 case WM_T_PCH_LPT:
11308 case WM_T_PCH_SPT:
11309 do {
11310 reg = CSR_READ(sc, WMREG_FWSM);
11311 if ((reg & FWSM_RSPCIPHY) == 0) {
11312 blocked = true;
11313 delay(10*1000);
11314 continue;
11315 }
11316 blocked = false;
11317 } while (blocked && (i++ < 10));
11318 return blocked;
11319 break;
11320 case WM_T_82571:
11321 case WM_T_82572:
11322 case WM_T_82573:
11323 case WM_T_82574:
11324 case WM_T_82583:
11325 case WM_T_80003:
11326 reg = CSR_READ(sc, WMREG_MANC);
11327 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11328 return true;
11329 else
11330 return false;
11331 break;
11332 default:
11333 /* no problem */
11334 break;
11335 }
11336
11337 return false;
11338 }
11339
11340 static void
11341 wm_get_hw_control(struct wm_softc *sc)
11342 {
11343 uint32_t reg;
11344
11345 switch (sc->sc_type) {
11346 case WM_T_82573:
11347 reg = CSR_READ(sc, WMREG_SWSM);
11348 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11349 break;
11350 case WM_T_82571:
11351 case WM_T_82572:
11352 case WM_T_82574:
11353 case WM_T_82583:
11354 case WM_T_80003:
11355 case WM_T_ICH8:
11356 case WM_T_ICH9:
11357 case WM_T_ICH10:
11358 case WM_T_PCH:
11359 case WM_T_PCH2:
11360 case WM_T_PCH_LPT:
11361 case WM_T_PCH_SPT:
11362 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11363 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11364 break;
11365 default:
11366 break;
11367 }
11368 }
11369
11370 static void
11371 wm_release_hw_control(struct wm_softc *sc)
11372 {
11373 uint32_t reg;
11374
11375 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11376 return;
11377
11378 if (sc->sc_type == WM_T_82573) {
11379 reg = CSR_READ(sc, WMREG_SWSM);
11380 reg &= ~SWSM_DRV_LOAD;
11381 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11382 } else {
11383 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11384 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11385 }
11386 }
11387
11388 static void
11389 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
11390 {
11391 uint32_t reg;
11392
11393 if (sc->sc_type < WM_T_PCH2)
11394 return;
11395
11396 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11397
11398 if (gate)
11399 reg |= EXTCNFCTR_GATE_PHY_CFG;
11400 else
11401 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11402
11403 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11404 }
11405
11406 static void
11407 wm_smbustopci(struct wm_softc *sc)
11408 {
11409 uint32_t fwsm, reg;
11410
11411 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
11412 wm_gate_hw_phy_config_ich8lan(sc, true);
11413
11414 /* Acquire semaphore */
11415 wm_get_swfwhw_semaphore(sc);
11416
11417 fwsm = CSR_READ(sc, WMREG_FWSM);
11418 if (((fwsm & FWSM_FW_VALID) == 0)
11419 && ((wm_phy_resetisblocked(sc) == false))) {
11420 if (sc->sc_type >= WM_T_PCH_LPT) {
11421 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11422 reg |= CTRL_EXT_FORCE_SMBUS;
11423 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11424 CSR_WRITE_FLUSH(sc);
11425 delay(50*1000);
11426 }
11427
11428 /* Toggle LANPHYPC */
11429 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11430 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11431 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11432 CSR_WRITE_FLUSH(sc);
11433 delay(10);
11434 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11435 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11436 CSR_WRITE_FLUSH(sc);
11437 delay(50*1000);
11438
11439 if (sc->sc_type >= WM_T_PCH_LPT) {
11440 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11441 reg &= ~CTRL_EXT_FORCE_SMBUS;
11442 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11443 }
11444 }
11445
11446 /* Release semaphore */
11447 wm_put_swfwhw_semaphore(sc);
11448
11449 /*
11450 * Ungate automatic PHY configuration by hardware on non-managed 82579
11451 */
11452 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
11453 wm_gate_hw_phy_config_ich8lan(sc, false);
11454 }
11455
11456 static void
11457 wm_init_manageability(struct wm_softc *sc)
11458 {
11459
11460 DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11461 device_xname(sc->sc_dev), __func__));
11462 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11463 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11464 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11465
11466 /* Disable hardware interception of ARP */
11467 manc &= ~MANC_ARP_EN;
11468
11469 /* Enable receiving management packets to the host */
11470 if (sc->sc_type >= WM_T_82571) {
11471 manc |= MANC_EN_MNG2HOST;
11472 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11473 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11474 }
11475
11476 CSR_WRITE(sc, WMREG_MANC, manc);
11477 }
11478 }
11479
11480 static void
11481 wm_release_manageability(struct wm_softc *sc)
11482 {
11483
11484 if (sc->sc_flags & WM_F_HAS_MANAGE) {
11485 uint32_t manc = CSR_READ(sc, WMREG_MANC);
11486
11487 manc |= MANC_ARP_EN;
11488 if (sc->sc_type >= WM_T_82571)
11489 manc &= ~MANC_EN_MNG2HOST;
11490
11491 CSR_WRITE(sc, WMREG_MANC, manc);
11492 }
11493 }
11494
11495 static void
11496 wm_get_wakeup(struct wm_softc *sc)
11497 {
11498
11499 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11500 switch (sc->sc_type) {
11501 case WM_T_82573:
11502 case WM_T_82583:
11503 sc->sc_flags |= WM_F_HAS_AMT;
11504 /* FALLTHROUGH */
11505 case WM_T_80003:
11506 case WM_T_82541:
11507 case WM_T_82547:
11508 case WM_T_82571:
11509 case WM_T_82572:
11510 case WM_T_82574:
11511 case WM_T_82575:
11512 case WM_T_82576:
11513 case WM_T_82580:
11514 case WM_T_I350:
11515 case WM_T_I354:
11516 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11517 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11518 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11519 break;
11520 case WM_T_ICH8:
11521 case WM_T_ICH9:
11522 case WM_T_ICH10:
11523 case WM_T_PCH:
11524 case WM_T_PCH2:
11525 case WM_T_PCH_LPT:
11526 case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
11527 sc->sc_flags |= WM_F_HAS_AMT;
11528 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11529 break;
11530 default:
11531 break;
11532 }
11533
11534 /* 1: HAS_MANAGE */
11535 if (wm_enable_mng_pass_thru(sc) != 0)
11536 sc->sc_flags |= WM_F_HAS_MANAGE;
11537
11538 #ifdef WM_DEBUG
11539 printf("\n");
11540 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11541 printf("HAS_AMT,");
11542 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11543 printf("ARC_SUBSYS_VALID,");
11544 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11545 printf("ASF_FIRMWARE_PRES,");
11546 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11547 printf("HAS_MANAGE,");
11548 printf("\n");
11549 #endif
11550 /*
11551 * Note that the WOL flags is set after the resetting of the eeprom
11552 * stuff
11553 */
11554 }
11555
11556 #ifdef WM_WOL
11557 /* WOL in the newer chipset interfaces (pchlan) */
11558 static void
11559 wm_enable_phy_wakeup(struct wm_softc *sc)
11560 {
11561 #if 0
11562 uint16_t preg;
11563
11564 /* Copy MAC RARs to PHY RARs */
11565
11566 /* Copy MAC MTA to PHY MTA */
11567
11568 /* Configure PHY Rx Control register */
11569
11570 /* Enable PHY wakeup in MAC register */
11571
11572 /* Configure and enable PHY wakeup in PHY registers */
11573
11574 /* Activate PHY wakeup */
11575
11576 /* XXX */
11577 #endif
11578 }
11579
11580 /* Power down workaround on D3 */
11581 static void
11582 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11583 {
11584 uint32_t reg;
11585 int i;
11586
11587 for (i = 0; i < 2; i++) {
11588 /* Disable link */
11589 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11590 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11591 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11592
11593 /*
11594 * Call gig speed drop workaround on Gig disable before
11595 * accessing any PHY registers
11596 */
11597 if (sc->sc_type == WM_T_ICH8)
11598 wm_gig_downshift_workaround_ich8lan(sc);
11599
11600 /* Write VR power-down enable */
11601 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11602 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11603 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11604 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11605
11606 /* Read it back and test */
11607 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11608 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11609 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11610 break;
11611
11612 /* Issue PHY reset and repeat at most one more time */
11613 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11614 }
11615 }
11616
11617 static void
11618 wm_enable_wakeup(struct wm_softc *sc)
11619 {
11620 uint32_t reg, pmreg;
11621 pcireg_t pmode;
11622
11623 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11624 &pmreg, NULL) == 0)
11625 return;
11626
11627 /* Advertise the wakeup capability */
11628 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11629 | CTRL_SWDPIN(3));
11630 CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11631
11632 /* ICH workaround */
11633 switch (sc->sc_type) {
11634 case WM_T_ICH8:
11635 case WM_T_ICH9:
11636 case WM_T_ICH10:
11637 case WM_T_PCH:
11638 case WM_T_PCH2:
11639 case WM_T_PCH_LPT:
11640 case WM_T_PCH_SPT:
11641 /* Disable gig during WOL */
11642 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11643 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11644 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11645 if (sc->sc_type == WM_T_PCH)
11646 wm_gmii_reset(sc);
11647
11648 /* Power down workaround */
11649 if (sc->sc_phytype == WMPHY_82577) {
11650 struct mii_softc *child;
11651
11652 /* Assume that the PHY is copper */
11653 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11654 if (child->mii_mpd_rev <= 2)
11655 sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11656 (768 << 5) | 25, 0x0444); /* magic num */
11657 }
11658 break;
11659 default:
11660 break;
11661 }
11662
11663 /* Keep the laser running on fiber adapters */
11664 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11665 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11666 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11667 reg |= CTRL_EXT_SWDPIN(3);
11668 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11669 }
11670
11671 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11672 #if 0 /* for the multicast packet */
11673 reg |= WUFC_MC;
11674 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11675 #endif
11676
11677 if (sc->sc_type == WM_T_PCH) {
11678 wm_enable_phy_wakeup(sc);
11679 } else {
11680 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11681 CSR_WRITE(sc, WMREG_WUFC, reg);
11682 }
11683
11684 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11685 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11686 || (sc->sc_type == WM_T_PCH2))
11687 && (sc->sc_phytype == WMPHY_IGP_3))
11688 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11689
11690 /* Request PME */
11691 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11692 #if 0
11693 /* Disable WOL */
11694 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11695 #else
11696 /* For WOL */
11697 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11698 #endif
11699 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11700 }
11701 #endif /* WM_WOL */
11702
11703 /* LPLU */
11704
11705 static void
11706 wm_lplu_d0_disable(struct wm_softc *sc)
11707 {
11708 uint32_t reg;
11709
11710 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11711 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
11712 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11713 }
11714
11715 static void
11716 wm_lplu_d0_disable_pch(struct wm_softc *sc)
11717 {
11718 uint32_t reg;
11719
11720 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
11721 reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
11722 reg |= HV_OEM_BITS_ANEGNOW;
11723 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
11724 }
11725
11726 /* EEE */
11727
11728 static void
11729 wm_set_eee_i350(struct wm_softc *sc)
11730 {
11731 uint32_t ipcnfg, eeer;
11732
11733 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11734 eeer = CSR_READ(sc, WMREG_EEER);
11735
11736 if ((sc->sc_flags & WM_F_EEE) != 0) {
11737 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11738 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11739 | EEER_LPI_FC);
11740 } else {
11741 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11742 ipcnfg &= ~IPCNFG_10BASE_TE;
11743 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11744 | EEER_LPI_FC);
11745 }
11746
11747 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11748 CSR_WRITE(sc, WMREG_EEER, eeer);
11749 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11750 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11751 }
11752
11753 /*
11754 * Workarounds (mainly PHY related).
11755 * Basically, PHY's workarounds are in the PHY drivers.
11756 */
11757
11758 /* Work-around for 82566 Kumeran PCS lock loss */
11759 static void
11760 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11761 {
11762 #if 0
11763 int miistatus, active, i;
11764 int reg;
11765
11766 miistatus = sc->sc_mii.mii_media_status;
11767
11768 /* If the link is not up, do nothing */
11769 if ((miistatus & IFM_ACTIVE) == 0)
11770 return;
11771
11772 active = sc->sc_mii.mii_media_active;
11773
11774 /* Nothing to do if the link is other than 1Gbps */
11775 if (IFM_SUBTYPE(active) != IFM_1000_T)
11776 return;
11777
11778 for (i = 0; i < 10; i++) {
11779 /* read twice */
11780 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11781 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11782 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
11783 goto out; /* GOOD! */
11784
11785 /* Reset the PHY */
11786 wm_gmii_reset(sc);
11787 delay(5*1000);
11788 }
11789
11790 /* Disable GigE link negotiation */
11791 reg = CSR_READ(sc, WMREG_PHY_CTRL);
11792 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11793 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11794
11795 /*
11796 * Call gig speed drop workaround on Gig disable before accessing
11797 * any PHY registers.
11798 */
11799 wm_gig_downshift_workaround_ich8lan(sc);
11800
11801 out:
11802 return;
11803 #endif
11804 }
11805
11806 /* WOL from S5 stops working */
11807 static void
11808 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11809 {
11810 uint16_t kmrn_reg;
11811
11812 /* Only for igp3 */
11813 if (sc->sc_phytype == WMPHY_IGP_3) {
11814 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11815 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11816 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11817 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11818 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11819 }
11820 }
11821
11822 /*
11823 * Workaround for pch's PHYs
11824 * XXX should be moved to new PHY driver?
11825 */
11826 static void
11827 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11828 {
11829 if (sc->sc_phytype == WMPHY_82577)
11830 wm_set_mdio_slow_mode_hv(sc);
11831
11832 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11833
11834 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11835
11836 /* 82578 */
11837 if (sc->sc_phytype == WMPHY_82578) {
11838 /* PCH rev. < 3 */
11839 if (sc->sc_rev < 3) {
11840 /* XXX 6 bit shift? Why? Is it page2? */
11841 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11842 0x66c0);
11843 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11844 0xffff);
11845 }
11846
11847 /* XXX phy rev. < 2 */
11848 }
11849
11850 /* Select page 0 */
11851
11852 /* XXX acquire semaphore */
11853 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11854 /* XXX release semaphore */
11855
11856 /*
11857 * Configure the K1 Si workaround during phy reset assuming there is
11858 * link so that it disables K1 if link is in 1Gbps.
11859 */
11860 wm_k1_gig_workaround_hv(sc, 1);
11861 }
11862
11863 static void
11864 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11865 {
11866
11867 wm_set_mdio_slow_mode_hv(sc);
11868 }
11869
11870 static void
11871 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11872 {
11873 int k1_enable = sc->sc_nvm_k1_enabled;
11874
11875 /* XXX acquire semaphore */
11876
11877 if (link) {
11878 k1_enable = 0;
11879
11880 /* Link stall fix for link up */
11881 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11882 } else {
11883 /* Link stall fix for link down */
11884 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11885 }
11886
11887 wm_configure_k1_ich8lan(sc, k1_enable);
11888
11889 /* XXX release semaphore */
11890 }
11891
11892 static void
11893 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11894 {
11895 uint32_t reg;
11896
11897 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11898 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11899 reg | HV_KMRN_MDIO_SLOW);
11900 }
11901
11902 static void
11903 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11904 {
11905 uint32_t ctrl, ctrl_ext, tmp;
11906 uint16_t kmrn_reg;
11907
11908 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11909
11910 if (k1_enable)
11911 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11912 else
11913 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11914
11915 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11916
11917 delay(20);
11918
11919 ctrl = CSR_READ(sc, WMREG_CTRL);
11920 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11921
11922 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11923 tmp |= CTRL_FRCSPD;
11924
11925 CSR_WRITE(sc, WMREG_CTRL, tmp);
11926 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11927 CSR_WRITE_FLUSH(sc);
11928 delay(20);
11929
11930 CSR_WRITE(sc, WMREG_CTRL, ctrl);
11931 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11932 CSR_WRITE_FLUSH(sc);
11933 delay(20);
11934 }
11935
11936 /* special case - for 82575 - need to do manual init ... */
11937 static void
11938 wm_reset_init_script_82575(struct wm_softc *sc)
11939 {
11940 /*
11941 * remark: this is untested code - we have no board without EEPROM
11942 * same setup as mentioned int the FreeBSD driver for the i82575
11943 */
11944
11945 /* SerDes configuration via SERDESCTRL */
11946 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11947 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11948 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11949 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11950
11951 /* CCM configuration via CCMCTL register */
11952 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11953 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11954
11955 /* PCIe lanes configuration */
11956 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11957 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11958 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11959 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11960
11961 /* PCIe PLL Configuration */
11962 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11963 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11964 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11965 }
11966
11967 static void
11968 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11969 {
11970 uint32_t reg;
11971 uint16_t nvmword;
11972 int rv;
11973
11974 if ((sc->sc_flags & WM_F_SGMII) == 0)
11975 return;
11976
11977 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11978 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11979 if (rv != 0) {
11980 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11981 __func__);
11982 return;
11983 }
11984
11985 reg = CSR_READ(sc, WMREG_MDICNFG);
11986 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11987 reg |= MDICNFG_DEST;
11988 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11989 reg |= MDICNFG_COM_MDIO;
11990 CSR_WRITE(sc, WMREG_MDICNFG, reg);
11991 }
11992
11993 /*
11994 * I210 Errata 25 and I211 Errata 10
11995 * Slow System Clock.
11996 */
11997 static void
11998 wm_pll_workaround_i210(struct wm_softc *sc)
11999 {
12000 uint32_t mdicnfg, wuc;
12001 uint32_t reg;
12002 pcireg_t pcireg;
12003 uint32_t pmreg;
12004 uint16_t nvmword, tmp_nvmword;
12005 int phyval;
12006 bool wa_done = false;
12007 int i;
12008
12009 /* Save WUC and MDICNFG registers */
12010 wuc = CSR_READ(sc, WMREG_WUC);
12011 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
12012
12013 reg = mdicnfg & ~MDICNFG_DEST;
12014 CSR_WRITE(sc, WMREG_MDICNFG, reg);
12015
12016 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
12017 nvmword = INVM_DEFAULT_AL;
12018 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
12019
12020 /* Get Power Management cap offset */
12021 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12022 &pmreg, NULL) == 0)
12023 return;
12024 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
12025 phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
12026 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
12027
12028 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
12029 break; /* OK */
12030 }
12031
12032 wa_done = true;
12033 /* Directly reset the internal PHY */
12034 reg = CSR_READ(sc, WMREG_CTRL);
12035 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
12036
12037 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12038 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
12039 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12040
12041 CSR_WRITE(sc, WMREG_WUC, 0);
12042 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
12043 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12044
12045 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12046 pmreg + PCI_PMCSR);
12047 pcireg |= PCI_PMCSR_STATE_D3;
12048 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12049 pmreg + PCI_PMCSR, pcireg);
12050 delay(1000);
12051 pcireg &= ~PCI_PMCSR_STATE_D3;
12052 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12053 pmreg + PCI_PMCSR, pcireg);
12054
12055 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
12056 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12057
12058 /* Restore WUC register */
12059 CSR_WRITE(sc, WMREG_WUC, wuc);
12060 }
12061
12062 /* Restore MDICNFG setting */
12063 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
12064 if (wa_done)
12065 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
12066 }
12067