if_wm.c revision 1.769 1 /* $NetBSD: if_wm.c,v 1.769 2023/05/11 06:56:49 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.769 2023/05/11 06:56:49 msaitoh Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90
91 #include <sys/param.h>
92
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116
117 #include <net/bpf.h>
118
119 #include <net/rss_config.h>
120
121 #include <netinet/in.h> /* XXX for struct ip */
122 #include <netinet/in_systm.h> /* XXX for struct ip */
123 #include <netinet/ip.h> /* XXX for struct ip */
124 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h> /* XXX for struct tcphdr */
126
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149
150 #ifdef WM_DEBUG
151 #define WM_DEBUG_LINK __BIT(0)
152 #define WM_DEBUG_TX __BIT(1)
153 #define WM_DEBUG_RX __BIT(2)
154 #define WM_DEBUG_GMII __BIT(3)
155 #define WM_DEBUG_MANAGE __BIT(4)
156 #define WM_DEBUG_NVM __BIT(5)
157 #define WM_DEBUG_INIT __BIT(6)
158 #define WM_DEBUG_LOCK __BIT(7)
159
160 #if 0
161 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
163 WM_DEBUG_LOCK
164 #endif
165
166 #define DPRINTF(sc, x, y) \
167 do { \
168 if ((sc)->sc_debug & (x)) \
169 printf y; \
170 } while (0)
171 #else
172 #define DPRINTF(sc, x, y) __nothing
173 #endif /* WM_DEBUG */
174
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176
177 /*
178 * This device driver's max interrupt numbers.
179 */
180 #define WM_MAX_NQUEUEINTR 16
181 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
182
183 #ifndef WM_DISABLE_MSI
184 #define WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define WM_DISABLE_MSIX 0
188 #endif
189
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197
198 /*
199 * Transmit descriptor list size. Due to errata, we can only have
200 * 256 hardware descriptors in the ring on < 82544, but we use 4096
201 * on >= 82544. We tell the upper layers that they can queue a lot
202 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203 * of them at a time.
204 *
205 * We allow up to 64 DMA segments per packet. Pathological packet
206 * chains containing many small mbufs have been observed in zero-copy
207 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208 * m_defrag() is called to reduce it.
209 */
210 #define WM_NTXSEGS 64
211 #define WM_IFQUEUELEN 256
212 #define WM_TXQUEUELEN_MAX 64
213 #define WM_TXQUEUELEN_MAX_82547 16
214 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
215 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
216 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
217 #define WM_NTXDESC_82542 256
218 #define WM_NTXDESC_82544 4096
219 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
220 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
221 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224
225 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
226
227 #define WM_TXINTERQSIZE 256
228
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
234 #endif
235
236 /*
237 * Receive descriptor list size. We have one Rx buffer for normal
238 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
239 * packet. We allocate 256 receive descriptors, each with a 2k
240 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241 */
242 #define WM_NRXDESC 256U
243 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
244 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
245 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
246
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
252 #endif
253
254 typedef union txdescs {
255 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258
259 typedef union rxdescs {
260 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264
265 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
266 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
267
268 /*
269 * Software state for transmit jobs.
270 */
271 struct wm_txsoft {
272 struct mbuf *txs_mbuf; /* head of our mbuf chain */
273 bus_dmamap_t txs_dmamap; /* our DMA map */
274 int txs_firstdesc; /* first descriptor in packet */
275 int txs_lastdesc; /* last descriptor in packet */
276 int txs_ndesc; /* # of descriptors used */
277 };
278
279 /*
280 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281 * buffer and a DMA map. For packets which fill more than one buffer, we chain
282 * them together.
283 */
284 struct wm_rxsoft {
285 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
286 bus_dmamap_t rxs_dmamap; /* our DMA map */
287 };
288
289 #define WM_LINKUP_TIMEOUT 50
290
291 static uint16_t swfwphysem[] = {
292 SWFW_PHY0_SM,
293 SWFW_PHY1_SM,
294 SWFW_PHY2_SM,
295 SWFW_PHY3_SM
296 };
297
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301
302 struct wm_softc;
303
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname) \
312 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 struct evcnt qname##_ev_##evname
314
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
316 do { \
317 snprintf((q)->qname##_##evname##_evcnt_name, \
318 sizeof((q)->qname##_##evname##_evcnt_name), \
319 "%s%02d%s", #qname, (qnum), #evname); \
320 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
321 (evtype), NULL, (xname), \
322 (q)->qname##_##evname##_evcnt_name); \
323 } while (0)
324
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
326 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
329 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
332 evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334
335 struct wm_txqueue {
336 kmutex_t *txq_lock; /* lock for tx operations */
337
338 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
339
340 /* Software state for the transmit descriptors. */
341 int txq_num; /* must be a power of two */
342 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343
344 /* TX control data structures. */
345 int txq_ndesc; /* must be a power of two */
346 size_t txq_descsize; /* a tx descriptor size */
347 txdescs_t *txq_descs_u;
348 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
349 bus_dma_segment_t txq_desc_seg; /* control data segment */
350 int txq_desc_rseg; /* real number of control segment */
351 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
352 #define txq_descs txq_descs_u->sctxu_txdescs
353 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
354
355 bus_addr_t txq_tdt_reg; /* offset of TDT register */
356
357 int txq_free; /* number of free Tx descriptors */
358 int txq_next; /* next ready Tx descriptor */
359
360 int txq_sfree; /* number of free Tx jobs */
361 int txq_snext; /* next free Tx job */
362 int txq_sdirty; /* dirty Tx jobs */
363
364 /* These 4 variables are used only on the 82547. */
365 int txq_fifo_size; /* Tx FIFO size */
366 int txq_fifo_head; /* current head of FIFO */
367 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
368 int txq_fifo_stall; /* Tx FIFO is stalled */
369
370 /*
371 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 * CPUs. This queue intermediate them without block.
373 */
374 pcq_t *txq_interq;
375
376 /*
377 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 * to manage Tx H/W queue's busy flag.
379 */
380 int txq_flags; /* flags for H/W queue, see below */
381 #define WM_TXQ_NO_SPACE 0x1
382 #define WM_TXQ_LINKDOWN_DISCARD 0x2
383
384 bool txq_stopping;
385
386 bool txq_sending;
387 time_t txq_lastsent;
388
389 /* Checksum flags used for previous packet */
390 uint32_t txq_last_hw_cmd;
391 uint8_t txq_last_hw_fields;
392 uint16_t txq_last_hw_ipcs;
393 uint16_t txq_last_hw_tucs;
394
395 uint32_t txq_packets; /* for AIM */
396 uint32_t txq_bytes; /* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 /* TX event counters */
399 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
400 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
401 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
403 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
404 /* XXX not used? */
405
406 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
407 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
408 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
409 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
410 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
411 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
412 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
413 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
414 /* other than toomanyseg */
415
416 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
418 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
419 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420
421 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425
426 struct wm_rxqueue {
427 kmutex_t *rxq_lock; /* lock for rx operations */
428
429 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
430
431 /* Software state for the receive descriptors. */
432 struct wm_rxsoft rxq_soft[WM_NRXDESC];
433
434 /* RX control data structures. */
435 int rxq_ndesc; /* must be a power of two */
436 size_t rxq_descsize; /* a rx descriptor size */
437 rxdescs_t *rxq_descs_u;
438 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
439 bus_dma_segment_t rxq_desc_seg; /* control data segment */
440 int rxq_desc_rseg; /* real number of control segment */
441 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define rxq_descs rxq_descs_u->sctxu_rxdescs
443 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
444 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
445
446 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
447
448 int rxq_ptr; /* next ready Rx desc/queue ent */
449 int rxq_discard;
450 int rxq_len;
451 struct mbuf *rxq_head;
452 struct mbuf *rxq_tail;
453 struct mbuf **rxq_tailp;
454
455 bool rxq_stopping;
456
457 uint32_t rxq_packets; /* for AIM */
458 uint32_t rxq_bytes; /* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 /* RX event counters */
461 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
462 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
463
464 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
465 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
466 #endif
467 };
468
469 struct wm_queue {
470 int wmq_id; /* index of TX/RX queues */
471 int wmq_intr_idx; /* index of MSI-X tables */
472
473 uint32_t wmq_itr; /* interrupt interval per queue. */
474 bool wmq_set_itr;
475
476 struct wm_txqueue wmq_txq;
477 struct wm_rxqueue wmq_rxq;
478 char sysctlname[32]; /* Name for sysctl */
479
480 bool wmq_txrx_use_workqueue;
481 bool wmq_wq_enqueued;
482 struct work wmq_cookie;
483 void *wmq_si;
484 };
485
486 struct wm_phyop {
487 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
488 void (*release)(struct wm_softc *);
489 int (*readreg_locked)(device_t, int, int, uint16_t *);
490 int (*writereg_locked)(device_t, int, int, uint16_t);
491 int reset_delay_us;
492 bool no_errprint;
493 };
494
495 struct wm_nvmop {
496 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
497 void (*release)(struct wm_softc *);
498 int (*read)(struct wm_softc *, int, int, uint16_t *);
499 };
500
501 /*
502 * Software state per device.
503 */
504 struct wm_softc {
505 device_t sc_dev; /* generic device information */
506 bus_space_tag_t sc_st; /* bus space tag */
507 bus_space_handle_t sc_sh; /* bus space handle */
508 bus_size_t sc_ss; /* bus space size */
509 bus_space_tag_t sc_iot; /* I/O space tag */
510 bus_space_handle_t sc_ioh; /* I/O space handle */
511 bus_size_t sc_ios; /* I/O space size */
512 bus_space_tag_t sc_flasht; /* flash registers space tag */
513 bus_space_handle_t sc_flashh; /* flash registers space handle */
514 bus_size_t sc_flashs; /* flash registers space size */
515 off_t sc_flashreg_offset; /*
516 * offset to flash registers from
517 * start of BAR
518 */
519 bus_dma_tag_t sc_dmat; /* bus DMA tag */
520
521 struct ethercom sc_ethercom; /* Ethernet common data */
522 struct mii_data sc_mii; /* MII/media information */
523
524 pci_chipset_tag_t sc_pc;
525 pcitag_t sc_pcitag;
526 int sc_bus_speed; /* PCI/PCIX bus speed */
527 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
528
529 uint16_t sc_pcidevid; /* PCI device ID */
530 wm_chip_type sc_type; /* MAC type */
531 int sc_rev; /* MAC revision */
532 wm_phy_type sc_phytype; /* PHY type */
533 uint8_t sc_sfptype; /* SFP type */
534 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
535 #define WM_MEDIATYPE_UNKNOWN 0x00
536 #define WM_MEDIATYPE_FIBER 0x01
537 #define WM_MEDIATYPE_COPPER 0x02
538 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
539 int sc_funcid; /* unit number of the chip (0 to 3) */
540 int sc_flags; /* flags; see below */
541 u_short sc_if_flags; /* last if_flags */
542 int sc_ec_capenable; /* last ec_capenable */
543 int sc_flowflags; /* 802.3x flow control flags */
544 uint16_t eee_lp_ability; /* EEE link partner's ability */
545 int sc_align_tweak;
546
547 void *sc_ihs[WM_MAX_NINTR]; /*
548 * interrupt cookie.
549 * - legacy and msi use sc_ihs[0] only
550 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
551 */
552 pci_intr_handle_t *sc_intrs; /*
553 * legacy and msi use sc_intrs[0] only
554 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
555 */
556 int sc_nintrs; /* number of interrupts */
557
558 int sc_link_intr_idx; /* index of MSI-X tables */
559
560 callout_t sc_tick_ch; /* tick callout */
561 bool sc_core_stopping;
562
563 int sc_nvm_ver_major;
564 int sc_nvm_ver_minor;
565 int sc_nvm_ver_build;
566 int sc_nvm_addrbits; /* NVM address bits */
567 unsigned int sc_nvm_wordsize; /* NVM word size */
568 int sc_ich8_flash_base;
569 int sc_ich8_flash_bank_size;
570 int sc_nvm_k1_enabled;
571
572 int sc_nqueues;
573 struct wm_queue *sc_queue;
574 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
575 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
576 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
577 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
578 struct workqueue *sc_queue_wq;
579 bool sc_txrx_use_workqueue;
580
581 int sc_affinity_offset;
582
583 #ifdef WM_EVENT_COUNTERS
584 /* Event counters. */
585 struct evcnt sc_ev_linkintr; /* Link interrupts */
586
587 /* >= WM_T_82542_2_1 */
588 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
589 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
590 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
591 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
592 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
593
594 struct evcnt sc_ev_crcerrs; /* CRC Error */
595 struct evcnt sc_ev_algnerrc; /* Alignment Error */
596 struct evcnt sc_ev_symerrc; /* Symbol Error */
597 struct evcnt sc_ev_rxerrc; /* Receive Error */
598 struct evcnt sc_ev_mpc; /* Missed Packets */
599 struct evcnt sc_ev_colc; /* Collision */
600 struct evcnt sc_ev_sec; /* Sequence Error */
601 struct evcnt sc_ev_cexterr; /* Carrier Extension Error */
602 struct evcnt sc_ev_rlec; /* Receive Length Error */
603 struct evcnt sc_ev_scc; /* Single Collision */
604 struct evcnt sc_ev_ecol; /* Excessive Collision */
605 struct evcnt sc_ev_mcc; /* Multiple Collision */
606 struct evcnt sc_ev_latecol; /* Late Collision */
607 struct evcnt sc_ev_dc; /* Defer */
608 struct evcnt sc_ev_gprc; /* Good Packets Rx */
609 struct evcnt sc_ev_bprc; /* Broadcast Packets Rx */
610 struct evcnt sc_ev_mprc; /* Multicast Packets Rx */
611 struct evcnt sc_ev_gptc; /* Good Packets Tx */
612 struct evcnt sc_ev_gorc; /* Good Octets Rx */
613 struct evcnt sc_ev_gotc; /* Good Octets Tx */
614 struct evcnt sc_ev_rnbc; /* Rx No Buffers */
615 struct evcnt sc_ev_ruc; /* Rx Undersize */
616 struct evcnt sc_ev_rfc; /* Rx Fragment */
617 struct evcnt sc_ev_roc; /* Rx Oversize */
618 struct evcnt sc_ev_rjc; /* Rx Jabber */
619 struct evcnt sc_ev_tor; /* Total Octets Rx */
620 struct evcnt sc_ev_tot; /* Total Octets Tx */
621 struct evcnt sc_ev_tpr; /* Total Packets Rx */
622 struct evcnt sc_ev_tpt; /* Total Packets Tx */
623 struct evcnt sc_ev_mptc; /* Multicast Packets Tx */
624 struct evcnt sc_ev_bptc; /* Broadcast Packets Tx Count */
625 struct evcnt sc_ev_prc64; /* Packets Rx (64 bytes) */
626 struct evcnt sc_ev_prc127; /* Packets Rx (65-127 bytes) */
627 struct evcnt sc_ev_prc255; /* Packets Rx (128-255 bytes) */
628 struct evcnt sc_ev_prc511; /* Packets Rx (255-511 bytes) */
629 struct evcnt sc_ev_prc1023; /* Packets Rx (512-1023 bytes) */
630 struct evcnt sc_ev_prc1522; /* Packets Rx (1024-1522 bytes) */
631 struct evcnt sc_ev_ptc64; /* Packets Tx (64 bytes) */
632 struct evcnt sc_ev_ptc127; /* Packets Tx (65-127 bytes) */
633 struct evcnt sc_ev_ptc255; /* Packets Tx (128-255 bytes) */
634 struct evcnt sc_ev_ptc511; /* Packets Tx (256-511 bytes) */
635 struct evcnt sc_ev_ptc1023; /* Packets Tx (512-1023 bytes) */
636 struct evcnt sc_ev_ptc1522; /* Packets Tx (1024-1522 Bytes) */
637 struct evcnt sc_ev_iac; /* Interrupt Assertion */
638 struct evcnt sc_ev_icrxptc; /* Intr. Cause Rx Pkt Timer Expire */
639 struct evcnt sc_ev_icrxatc; /* Intr. Cause Rx Abs Timer Expire */
640 struct evcnt sc_ev_ictxptc; /* Intr. Cause Tx Pkt Timer Expire */
641 struct evcnt sc_ev_ictxact; /* Intr. Cause Tx Abs Timer Expire */
642 struct evcnt sc_ev_ictxqec; /* Intr. Cause Tx Queue Empty */
643 struct evcnt sc_ev_ictxqmtc; /* Intr. Cause Tx Queue Min Thresh */
644 struct evcnt sc_ev_icrxdmtc; /* Intr. Cause Rx Desc Min Thresh */
645 struct evcnt sc_ev_icrxoc; /* Intr. Cause Receiver Overrun */
646 struct evcnt sc_ev_tncrs; /* Tx-No CRS */
647 struct evcnt sc_ev_tsctc; /* TCP Segmentation Context Tx */
648 struct evcnt sc_ev_tsctfc; /* TCP Segmentation Context Tx Fail */
649 struct evcnt sc_ev_mgtprc; /* Management Packets RX */
650 struct evcnt sc_ev_mgtpdc; /* Management Packets Dropped */
651 struct evcnt sc_ev_mgtptc; /* Management Packets TX */
652 struct evcnt sc_ev_b2ogprc; /* BMC2OS pkts received by host */
653 struct evcnt sc_ev_o2bspc; /* OS2BMC pkts transmitted by host */
654 struct evcnt sc_ev_b2ospc; /* BMC2OS pkts sent by BMC */
655 struct evcnt sc_ev_o2bgptc; /* OS2BMC pkts received by BMC */
656
657 #endif /* WM_EVENT_COUNTERS */
658
659 struct sysctllog *sc_sysctllog;
660
661 /* This variable are used only on the 82547. */
662 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
663
664 uint32_t sc_ctrl; /* prototype CTRL register */
665 #if 0
666 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
667 #endif
668 uint32_t sc_icr; /* prototype interrupt bits */
669 uint32_t sc_itr_init; /* prototype intr throttling reg */
670 uint32_t sc_tctl; /* prototype TCTL register */
671 uint32_t sc_rctl; /* prototype RCTL register */
672 uint32_t sc_txcw; /* prototype TXCW register */
673 uint32_t sc_tipg; /* prototype TIPG register */
674 uint32_t sc_fcrtl; /* prototype FCRTL register */
675 uint32_t sc_pba; /* prototype PBA register */
676
677 int sc_tbi_linkup; /* TBI link status */
678 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
679 int sc_tbi_serdes_ticks; /* tbi ticks */
680
681 int sc_mchash_type; /* multicast filter offset */
682
683 krndsource_t rnd_source; /* random source */
684
685 struct if_percpuq *sc_ipq; /* softint-based input queues */
686
687 kmutex_t *sc_core_lock; /* lock for softc operations */
688 kmutex_t *sc_ich_phymtx; /*
689 * 82574/82583/ICH/PCH specific PHY
690 * mutex. For 82574/82583, the mutex
691 * is used for both PHY and NVM.
692 */
693 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
694
695 struct wm_phyop phy;
696 struct wm_nvmop nvm;
697
698 struct workqueue *sc_reset_wq;
699 struct work sc_reset_work;
700 volatile unsigned sc_reset_pending;
701
702 bool sc_dying;
703
704 #ifdef WM_DEBUG
705 uint32_t sc_debug;
706 bool sc_trigger_reset;
707 #endif
708 };
709
710 #define WM_RXCHAIN_RESET(rxq) \
711 do { \
712 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
713 *(rxq)->rxq_tailp = NULL; \
714 (rxq)->rxq_len = 0; \
715 } while (/*CONSTCOND*/0)
716
717 #define WM_RXCHAIN_LINK(rxq, m) \
718 do { \
719 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
720 (rxq)->rxq_tailp = &(m)->m_next; \
721 } while (/*CONSTCOND*/0)
722
723 #ifdef WM_EVENT_COUNTERS
724 #ifdef __HAVE_ATOMIC64_LOADSTORE
725 #define WM_EVCNT_INCR(ev) \
726 atomic_store_relaxed(&((ev)->ev_count), \
727 atomic_load_relaxed(&(ev)->ev_count) + 1)
728 #define WM_EVCNT_ADD(ev, val) \
729 atomic_store_relaxed(&((ev)->ev_count), \
730 atomic_load_relaxed(&(ev)->ev_count) + (val))
731 #else
732 #define WM_EVCNT_INCR(ev) \
733 ((ev)->ev_count)++
734 #define WM_EVCNT_ADD(ev, val) \
735 (ev)->ev_count += (val)
736 #endif
737
738 #define WM_Q_EVCNT_INCR(qname, evname) \
739 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
740 #define WM_Q_EVCNT_ADD(qname, evname, val) \
741 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
742 #else /* !WM_EVENT_COUNTERS */
743 #define WM_EVCNT_INCR(ev) /* nothing */
744 #define WM_EVCNT_ADD(ev, val) /* nothing */
745
746 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
747 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
748 #endif /* !WM_EVENT_COUNTERS */
749
750 #define CSR_READ(sc, reg) \
751 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
752 #define CSR_WRITE(sc, reg, val) \
753 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
754 #define CSR_WRITE_FLUSH(sc) \
755 (void)CSR_READ((sc), WMREG_STATUS)
756
757 #define ICH8_FLASH_READ32(sc, reg) \
758 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
759 (reg) + sc->sc_flashreg_offset)
760 #define ICH8_FLASH_WRITE32(sc, reg, data) \
761 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
762 (reg) + sc->sc_flashreg_offset, (data))
763
764 #define ICH8_FLASH_READ16(sc, reg) \
765 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
766 (reg) + sc->sc_flashreg_offset)
767 #define ICH8_FLASH_WRITE16(sc, reg, data) \
768 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
769 (reg) + sc->sc_flashreg_offset, (data))
770
771 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
772 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
773
774 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
775 #define WM_CDTXADDR_HI(txq, x) \
776 (sizeof(bus_addr_t) == 8 ? \
777 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
778
779 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
780 #define WM_CDRXADDR_HI(rxq, x) \
781 (sizeof(bus_addr_t) == 8 ? \
782 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
783
784 /*
785 * Register read/write functions.
786 * Other than CSR_{READ|WRITE}().
787 */
788 #if 0
789 static inline uint32_t wm_io_read(struct wm_softc *, int);
790 #endif
791 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
792 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
793 uint32_t, uint32_t);
794 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
795
796 /*
797 * Descriptor sync/init functions.
798 */
799 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
800 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
801 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
802
803 /*
804 * Device driver interface functions and commonly used functions.
805 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
806 */
807 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
808 static int wm_match(device_t, cfdata_t, void *);
809 static void wm_attach(device_t, device_t, void *);
810 static int wm_detach(device_t, int);
811 static bool wm_suspend(device_t, const pmf_qual_t *);
812 static bool wm_resume(device_t, const pmf_qual_t *);
813 static bool wm_watchdog(struct ifnet *);
814 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
815 uint16_t *);
816 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
817 uint16_t *);
818 static void wm_tick(void *);
819 static int wm_ifflags_cb(struct ethercom *);
820 static int wm_ioctl(struct ifnet *, u_long, void *);
821 /* MAC address related */
822 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
823 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
824 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
825 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
826 static int wm_rar_count(struct wm_softc *);
827 static void wm_set_filter(struct wm_softc *);
828 /* Reset and init related */
829 static void wm_set_vlan(struct wm_softc *);
830 static void wm_set_pcie_completion_timeout(struct wm_softc *);
831 static void wm_get_auto_rd_done(struct wm_softc *);
832 static void wm_lan_init_done(struct wm_softc *);
833 static void wm_get_cfg_done(struct wm_softc *);
834 static int wm_phy_post_reset(struct wm_softc *);
835 static int wm_write_smbus_addr(struct wm_softc *);
836 static int wm_init_lcd_from_nvm(struct wm_softc *);
837 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
838 static void wm_initialize_hardware_bits(struct wm_softc *);
839 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
840 static int wm_reset_phy(struct wm_softc *);
841 static void wm_flush_desc_rings(struct wm_softc *);
842 static void wm_reset(struct wm_softc *);
843 static int wm_add_rxbuf(struct wm_rxqueue *, int);
844 static void wm_rxdrain(struct wm_rxqueue *);
845 static void wm_init_rss(struct wm_softc *);
846 static void wm_adjust_qnum(struct wm_softc *, int);
847 static inline bool wm_is_using_msix(struct wm_softc *);
848 static inline bool wm_is_using_multiqueue(struct wm_softc *);
849 static int wm_softint_establish_queue(struct wm_softc *, int, int);
850 static int wm_setup_legacy(struct wm_softc *);
851 static int wm_setup_msix(struct wm_softc *);
852 static int wm_init(struct ifnet *);
853 static int wm_init_locked(struct ifnet *);
854 static void wm_init_sysctls(struct wm_softc *);
855 static void wm_unset_stopping_flags(struct wm_softc *);
856 static void wm_set_stopping_flags(struct wm_softc *);
857 static void wm_stop(struct ifnet *, int);
858 static void wm_stop_locked(struct ifnet *, bool, bool);
859 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
860 static void wm_82547_txfifo_stall(void *);
861 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
862 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
863 /* DMA related */
864 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
865 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
866 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
867 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
868 struct wm_txqueue *);
869 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
870 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
871 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
872 struct wm_rxqueue *);
873 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
874 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
875 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
876 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
877 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
878 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
879 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
880 struct wm_txqueue *);
881 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
882 struct wm_rxqueue *);
883 static int wm_alloc_txrx_queues(struct wm_softc *);
884 static void wm_free_txrx_queues(struct wm_softc *);
885 static int wm_init_txrx_queues(struct wm_softc *);
886 /* Start */
887 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
888 struct wm_txsoft *, uint32_t *, uint8_t *);
889 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
890 static void wm_start(struct ifnet *);
891 static void wm_start_locked(struct ifnet *);
892 static int wm_transmit(struct ifnet *, struct mbuf *);
893 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
894 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
895 bool);
896 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
897 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
898 static void wm_nq_start(struct ifnet *);
899 static void wm_nq_start_locked(struct ifnet *);
900 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
901 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
902 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
903 bool);
904 static void wm_deferred_start_locked(struct wm_txqueue *);
905 static void wm_handle_queue(void *);
906 static void wm_handle_queue_work(struct work *, void *);
907 static void wm_handle_reset_work(struct work *, void *);
908 /* Interrupt */
909 static bool wm_txeof(struct wm_txqueue *, u_int);
910 static bool wm_rxeof(struct wm_rxqueue *, u_int);
911 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
912 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
913 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
914 static void wm_linkintr(struct wm_softc *, uint32_t);
915 static int wm_intr_legacy(void *);
916 static inline void wm_txrxintr_disable(struct wm_queue *);
917 static inline void wm_txrxintr_enable(struct wm_queue *);
918 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
919 static int wm_txrxintr_msix(void *);
920 static int wm_linkintr_msix(void *);
921
922 /*
923 * Media related.
924 * GMII, SGMII, TBI, SERDES and SFP.
925 */
926 /* Common */
927 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
928 /* GMII related */
929 static void wm_gmii_reset(struct wm_softc *);
930 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
931 static int wm_get_phy_id_82575(struct wm_softc *);
932 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
933 static int wm_gmii_mediachange(struct ifnet *);
934 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
935 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
936 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
937 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
938 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
939 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
940 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
941 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
942 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
943 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
944 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
945 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
946 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
947 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
948 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
949 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
950 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
951 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
952 bool);
953 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
954 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
955 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
956 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
957 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
958 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
959 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
960 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
961 static void wm_gmii_statchg(struct ifnet *);
962 /*
963 * kumeran related (80003, ICH* and PCH*).
964 * These functions are not for accessing MII registers but for accessing
965 * kumeran specific registers.
966 */
967 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
968 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
969 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
970 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
971 /* EMI register related */
972 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
973 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
974 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
975 /* SGMII */
976 static bool wm_sgmii_uses_mdio(struct wm_softc *);
977 static void wm_sgmii_sfp_preconfig(struct wm_softc *);
978 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
979 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
980 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
981 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
982 /* TBI related */
983 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
984 static void wm_tbi_mediainit(struct wm_softc *);
985 static int wm_tbi_mediachange(struct ifnet *);
986 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
987 static int wm_check_for_link(struct wm_softc *);
988 static void wm_tbi_tick(struct wm_softc *);
989 /* SERDES related */
990 static void wm_serdes_power_up_link_82575(struct wm_softc *);
991 static int wm_serdes_mediachange(struct ifnet *);
992 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
993 static void wm_serdes_tick(struct wm_softc *);
994 /* SFP related */
995 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
996 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
997
998 /*
999 * NVM related.
1000 * Microwire, SPI (w/wo EERD) and Flash.
1001 */
1002 /* Misc functions */
1003 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1004 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1005 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1006 /* Microwire */
1007 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1008 /* SPI */
1009 static int wm_nvm_ready_spi(struct wm_softc *);
1010 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1011 /* Using with EERD */
1012 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
1013 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1014 /* Flash */
1015 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1016 unsigned int *);
1017 static int32_t wm_ich8_cycle_init(struct wm_softc *);
1018 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1019 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1020 uint32_t *);
1021 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1022 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1023 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1024 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1025 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1026 /* iNVM */
1027 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1028 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1029 /* Lock, detecting NVM type, validate checksum and read */
1030 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
1031 static int wm_nvm_flash_presence_i210(struct wm_softc *);
1032 static int wm_nvm_validate_checksum(struct wm_softc *);
1033 static void wm_nvm_version_invm(struct wm_softc *);
1034 static void wm_nvm_version(struct wm_softc *);
1035 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1036
1037 /*
1038 * Hardware semaphores.
1039 * Very complexed...
1040 */
1041 static int wm_get_null(struct wm_softc *);
1042 static void wm_put_null(struct wm_softc *);
1043 static int wm_get_eecd(struct wm_softc *);
1044 static void wm_put_eecd(struct wm_softc *);
1045 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1046 static void wm_put_swsm_semaphore(struct wm_softc *);
1047 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1048 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1049 static int wm_get_nvm_80003(struct wm_softc *);
1050 static void wm_put_nvm_80003(struct wm_softc *);
1051 static int wm_get_nvm_82571(struct wm_softc *);
1052 static void wm_put_nvm_82571(struct wm_softc *);
1053 static int wm_get_phy_82575(struct wm_softc *);
1054 static void wm_put_phy_82575(struct wm_softc *);
1055 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1056 static void wm_put_swfwhw_semaphore(struct wm_softc *);
1057 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1058 static void wm_put_swflag_ich8lan(struct wm_softc *);
1059 static int wm_get_nvm_ich8lan(struct wm_softc *);
1060 static void wm_put_nvm_ich8lan(struct wm_softc *);
1061 static int wm_get_hw_semaphore_82573(struct wm_softc *);
1062 static void wm_put_hw_semaphore_82573(struct wm_softc *);
1063
1064 /*
1065 * Management mode and power management related subroutines.
1066 * BMC, AMT, suspend/resume and EEE.
1067 */
1068 #if 0
1069 static int wm_check_mng_mode(struct wm_softc *);
1070 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1071 static int wm_check_mng_mode_82574(struct wm_softc *);
1072 static int wm_check_mng_mode_generic(struct wm_softc *);
1073 #endif
1074 static int wm_enable_mng_pass_thru(struct wm_softc *);
1075 static bool wm_phy_resetisblocked(struct wm_softc *);
1076 static void wm_get_hw_control(struct wm_softc *);
1077 static void wm_release_hw_control(struct wm_softc *);
1078 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1079 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1080 static void wm_init_manageability(struct wm_softc *);
1081 static void wm_release_manageability(struct wm_softc *);
1082 static void wm_get_wakeup(struct wm_softc *);
1083 static int wm_ulp_disable(struct wm_softc *);
1084 static int wm_enable_phy_wakeup(struct wm_softc *);
1085 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1086 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1087 static int wm_resume_workarounds_pchlan(struct wm_softc *);
1088 static void wm_enable_wakeup(struct wm_softc *);
1089 static void wm_disable_aspm(struct wm_softc *);
1090 /* LPLU (Low Power Link Up) */
1091 static void wm_lplu_d0_disable(struct wm_softc *);
1092 /* EEE */
1093 static int wm_set_eee_i350(struct wm_softc *);
1094 static int wm_set_eee_pchlan(struct wm_softc *);
1095 static int wm_set_eee(struct wm_softc *);
1096
1097 /*
1098 * Workarounds (mainly PHY related).
1099 * Basically, PHY's workarounds are in the PHY drivers.
1100 */
1101 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1102 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1103 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1104 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1105 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1106 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1107 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1108 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1109 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1110 static int wm_k1_workaround_lv(struct wm_softc *);
1111 static int wm_link_stall_workaround_hv(struct wm_softc *);
1112 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1113 static int wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1114 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1115 static void wm_reset_init_script_82575(struct wm_softc *);
1116 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1117 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1118 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1119 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1120 static int wm_pll_workaround_i210(struct wm_softc *);
1121 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1122 static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1123 static void wm_set_linkdown_discard(struct wm_softc *);
1124 static void wm_clear_linkdown_discard(struct wm_softc *);
1125
1126 static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1127 static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1128 #ifdef WM_DEBUG
1129 static int wm_sysctl_debug(SYSCTLFN_PROTO);
1130 #endif
1131
1132 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1133 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1134
1135 /*
1136 * Devices supported by this driver.
1137 */
1138 static const struct wm_product {
1139 pci_vendor_id_t wmp_vendor;
1140 pci_product_id_t wmp_product;
1141 const char *wmp_name;
1142 wm_chip_type wmp_type;
1143 uint32_t wmp_flags;
1144 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1145 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1146 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1147 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1148 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1149 } wm_products[] = {
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1151 "Intel i82542 1000BASE-X Ethernet",
1152 WM_T_82542_2_1, WMP_F_FIBER },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1155 "Intel i82543GC 1000BASE-X Ethernet",
1156 WM_T_82543, WMP_F_FIBER },
1157
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1159 "Intel i82543GC 1000BASE-T Ethernet",
1160 WM_T_82543, WMP_F_COPPER },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1163 "Intel i82544EI 1000BASE-T Ethernet",
1164 WM_T_82544, WMP_F_COPPER },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1167 "Intel i82544EI 1000BASE-X Ethernet",
1168 WM_T_82544, WMP_F_FIBER },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1171 "Intel i82544GC 1000BASE-T Ethernet",
1172 WM_T_82544, WMP_F_COPPER },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1175 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1176 WM_T_82544, WMP_F_COPPER },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1179 "Intel i82540EM 1000BASE-T Ethernet",
1180 WM_T_82540, WMP_F_COPPER },
1181
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1183 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1184 WM_T_82540, WMP_F_COPPER },
1185
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1187 "Intel i82540EP 1000BASE-T Ethernet",
1188 WM_T_82540, WMP_F_COPPER },
1189
1190 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1191 "Intel i82540EP 1000BASE-T Ethernet",
1192 WM_T_82540, WMP_F_COPPER },
1193
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1195 "Intel i82540EP 1000BASE-T Ethernet",
1196 WM_T_82540, WMP_F_COPPER },
1197
1198 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1199 "Intel i82545EM 1000BASE-T Ethernet",
1200 WM_T_82545, WMP_F_COPPER },
1201
1202 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1203 "Intel i82545GM 1000BASE-T Ethernet",
1204 WM_T_82545_3, WMP_F_COPPER },
1205
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1207 "Intel i82545GM 1000BASE-X Ethernet",
1208 WM_T_82545_3, WMP_F_FIBER },
1209
1210 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1211 "Intel i82545GM Gigabit Ethernet (SERDES)",
1212 WM_T_82545_3, WMP_F_SERDES },
1213
1214 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1215 "Intel i82546EB 1000BASE-T Ethernet",
1216 WM_T_82546, WMP_F_COPPER },
1217
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1219 "Intel i82546EB 1000BASE-T Ethernet",
1220 WM_T_82546, WMP_F_COPPER },
1221
1222 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1223 "Intel i82545EM 1000BASE-X Ethernet",
1224 WM_T_82545, WMP_F_FIBER },
1225
1226 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1227 "Intel i82546EB 1000BASE-X Ethernet",
1228 WM_T_82546, WMP_F_FIBER },
1229
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1231 "Intel i82546GB 1000BASE-T Ethernet",
1232 WM_T_82546_3, WMP_F_COPPER },
1233
1234 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1235 "Intel i82546GB 1000BASE-X Ethernet",
1236 WM_T_82546_3, WMP_F_FIBER },
1237
1238 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1239 "Intel i82546GB Gigabit Ethernet (SERDES)",
1240 WM_T_82546_3, WMP_F_SERDES },
1241
1242 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1243 "i82546GB quad-port Gigabit Ethernet",
1244 WM_T_82546_3, WMP_F_COPPER },
1245
1246 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1247 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1248 WM_T_82546_3, WMP_F_COPPER },
1249
1250 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1251 "Intel PRO/1000MT (82546GB)",
1252 WM_T_82546_3, WMP_F_COPPER },
1253
1254 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1255 "Intel i82541EI 1000BASE-T Ethernet",
1256 WM_T_82541, WMP_F_COPPER },
1257
1258 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1259 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1260 WM_T_82541, WMP_F_COPPER },
1261
1262 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1263 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1264 WM_T_82541, WMP_F_COPPER },
1265
1266 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1267 "Intel i82541ER 1000BASE-T Ethernet",
1268 WM_T_82541_2, WMP_F_COPPER },
1269
1270 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1271 "Intel i82541GI 1000BASE-T Ethernet",
1272 WM_T_82541_2, WMP_F_COPPER },
1273
1274 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1275 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1276 WM_T_82541_2, WMP_F_COPPER },
1277
1278 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1279 "Intel i82541PI 1000BASE-T Ethernet",
1280 WM_T_82541_2, WMP_F_COPPER },
1281
1282 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1283 "Intel i82547EI 1000BASE-T Ethernet",
1284 WM_T_82547, WMP_F_COPPER },
1285
1286 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1287 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1288 WM_T_82547, WMP_F_COPPER },
1289
1290 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1291 "Intel i82547GI 1000BASE-T Ethernet",
1292 WM_T_82547_2, WMP_F_COPPER },
1293
1294 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1295 "Intel PRO/1000 PT (82571EB)",
1296 WM_T_82571, WMP_F_COPPER },
1297
1298 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1299 "Intel PRO/1000 PF (82571EB)",
1300 WM_T_82571, WMP_F_FIBER },
1301
1302 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1303 "Intel PRO/1000 PB (82571EB)",
1304 WM_T_82571, WMP_F_SERDES },
1305
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1307 "Intel PRO/1000 QT (82571EB)",
1308 WM_T_82571, WMP_F_COPPER },
1309
1310 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1311 "Intel PRO/1000 PT Quad Port Server Adapter",
1312 WM_T_82571, WMP_F_COPPER },
1313
1314 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1315 "Intel Gigabit PT Quad Port Server ExpressModule",
1316 WM_T_82571, WMP_F_COPPER },
1317
1318 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1319 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1320 WM_T_82571, WMP_F_SERDES },
1321
1322 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1323 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1324 WM_T_82571, WMP_F_SERDES },
1325
1326 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1327 "Intel 82571EB Quad 1000baseX Ethernet",
1328 WM_T_82571, WMP_F_FIBER },
1329
1330 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1331 "Intel i82572EI 1000baseT Ethernet",
1332 WM_T_82572, WMP_F_COPPER },
1333
1334 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1335 "Intel i82572EI 1000baseX Ethernet",
1336 WM_T_82572, WMP_F_FIBER },
1337
1338 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1339 "Intel i82572EI Gigabit Ethernet (SERDES)",
1340 WM_T_82572, WMP_F_SERDES },
1341
1342 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1343 "Intel i82572EI 1000baseT Ethernet",
1344 WM_T_82572, WMP_F_COPPER },
1345
1346 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1347 "Intel i82573E",
1348 WM_T_82573, WMP_F_COPPER },
1349
1350 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1351 "Intel i82573E IAMT",
1352 WM_T_82573, WMP_F_COPPER },
1353
1354 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1355 "Intel i82573L Gigabit Ethernet",
1356 WM_T_82573, WMP_F_COPPER },
1357
1358 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1359 "Intel i82574L",
1360 WM_T_82574, WMP_F_COPPER },
1361
1362 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1363 "Intel i82574L",
1364 WM_T_82574, WMP_F_COPPER },
1365
1366 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1367 "Intel i82583V",
1368 WM_T_82583, WMP_F_COPPER },
1369
1370 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1371 "i80003 dual 1000baseT Ethernet",
1372 WM_T_80003, WMP_F_COPPER },
1373
1374 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1375 "i80003 dual 1000baseX Ethernet",
1376 WM_T_80003, WMP_F_COPPER },
1377
1378 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1379 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1380 WM_T_80003, WMP_F_SERDES },
1381
1382 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1383 "Intel i80003 1000baseT Ethernet",
1384 WM_T_80003, WMP_F_COPPER },
1385
1386 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1387 "Intel i80003 Gigabit Ethernet (SERDES)",
1388 WM_T_80003, WMP_F_SERDES },
1389
1390 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1391 "Intel i82801H (M_AMT) LAN Controller",
1392 WM_T_ICH8, WMP_F_COPPER },
1393 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1394 "Intel i82801H (AMT) LAN Controller",
1395 WM_T_ICH8, WMP_F_COPPER },
1396 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1397 "Intel i82801H LAN Controller",
1398 WM_T_ICH8, WMP_F_COPPER },
1399 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1400 "Intel i82801H (IFE) 10/100 LAN Controller",
1401 WM_T_ICH8, WMP_F_COPPER },
1402 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1403 "Intel i82801H (M) LAN Controller",
1404 WM_T_ICH8, WMP_F_COPPER },
1405 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1406 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1407 WM_T_ICH8, WMP_F_COPPER },
1408 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1409 "Intel i82801H IFE (G) 10/100 LAN Controller",
1410 WM_T_ICH8, WMP_F_COPPER },
1411 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1412 "82567V-3 LAN Controller",
1413 WM_T_ICH8, WMP_F_COPPER },
1414 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1415 "82801I (AMT) LAN Controller",
1416 WM_T_ICH9, WMP_F_COPPER },
1417 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1418 "82801I 10/100 LAN Controller",
1419 WM_T_ICH9, WMP_F_COPPER },
1420 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1421 "82801I (G) 10/100 LAN Controller",
1422 WM_T_ICH9, WMP_F_COPPER },
1423 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1424 "82801I (GT) 10/100 LAN Controller",
1425 WM_T_ICH9, WMP_F_COPPER },
1426 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1427 "82801I (C) LAN Controller",
1428 WM_T_ICH9, WMP_F_COPPER },
1429 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1430 "82801I mobile LAN Controller",
1431 WM_T_ICH9, WMP_F_COPPER },
1432 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1433 "82801I mobile (V) LAN Controller",
1434 WM_T_ICH9, WMP_F_COPPER },
1435 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1436 "82801I mobile (AMT) LAN Controller",
1437 WM_T_ICH9, WMP_F_COPPER },
1438 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1439 "82567LM-4 LAN Controller",
1440 WM_T_ICH9, WMP_F_COPPER },
1441 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1442 "82567LM-2 LAN Controller",
1443 WM_T_ICH10, WMP_F_COPPER },
1444 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1445 "82567LF-2 LAN Controller",
1446 WM_T_ICH10, WMP_F_COPPER },
1447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1448 "82567LM-3 LAN Controller",
1449 WM_T_ICH10, WMP_F_COPPER },
1450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1451 "82567LF-3 LAN Controller",
1452 WM_T_ICH10, WMP_F_COPPER },
1453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1454 "82567V-2 LAN Controller",
1455 WM_T_ICH10, WMP_F_COPPER },
1456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1457 "82567V-3? LAN Controller",
1458 WM_T_ICH10, WMP_F_COPPER },
1459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1460 "HANKSVILLE LAN Controller",
1461 WM_T_ICH10, WMP_F_COPPER },
1462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1463 "PCH LAN (82577LM) Controller",
1464 WM_T_PCH, WMP_F_COPPER },
1465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1466 "PCH LAN (82577LC) Controller",
1467 WM_T_PCH, WMP_F_COPPER },
1468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1469 "PCH LAN (82578DM) Controller",
1470 WM_T_PCH, WMP_F_COPPER },
1471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1472 "PCH LAN (82578DC) Controller",
1473 WM_T_PCH, WMP_F_COPPER },
1474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1475 "PCH2 LAN (82579LM) Controller",
1476 WM_T_PCH2, WMP_F_COPPER },
1477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1478 "PCH2 LAN (82579V) Controller",
1479 WM_T_PCH2, WMP_F_COPPER },
1480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1481 "82575EB dual-1000baseT Ethernet",
1482 WM_T_82575, WMP_F_COPPER },
1483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1484 "82575EB dual-1000baseX Ethernet (SERDES)",
1485 WM_T_82575, WMP_F_SERDES },
1486 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1487 "82575GB quad-1000baseT Ethernet",
1488 WM_T_82575, WMP_F_COPPER },
1489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1490 "82575GB quad-1000baseT Ethernet (PM)",
1491 WM_T_82575, WMP_F_COPPER },
1492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1493 "82576 1000BaseT Ethernet",
1494 WM_T_82576, WMP_F_COPPER },
1495 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1496 "82576 1000BaseX Ethernet",
1497 WM_T_82576, WMP_F_FIBER },
1498
1499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1500 "82576 gigabit Ethernet (SERDES)",
1501 WM_T_82576, WMP_F_SERDES },
1502
1503 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1504 "82576 quad-1000BaseT Ethernet",
1505 WM_T_82576, WMP_F_COPPER },
1506
1507 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1508 "82576 Gigabit ET2 Quad Port Server Adapter",
1509 WM_T_82576, WMP_F_COPPER },
1510
1511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1512 "82576 gigabit Ethernet",
1513 WM_T_82576, WMP_F_COPPER },
1514
1515 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1516 "82576 gigabit Ethernet (SERDES)",
1517 WM_T_82576, WMP_F_SERDES },
1518 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1519 "82576 quad-gigabit Ethernet (SERDES)",
1520 WM_T_82576, WMP_F_SERDES },
1521
1522 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1523 "82580 1000BaseT Ethernet",
1524 WM_T_82580, WMP_F_COPPER },
1525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1526 "82580 1000BaseX Ethernet",
1527 WM_T_82580, WMP_F_FIBER },
1528
1529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1530 "82580 1000BaseT Ethernet (SERDES)",
1531 WM_T_82580, WMP_F_SERDES },
1532
1533 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1534 "82580 gigabit Ethernet (SGMII)",
1535 WM_T_82580, WMP_F_COPPER },
1536 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1537 "82580 dual-1000BaseT Ethernet",
1538 WM_T_82580, WMP_F_COPPER },
1539
1540 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1541 "82580 quad-1000BaseX Ethernet",
1542 WM_T_82580, WMP_F_FIBER },
1543
1544 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1545 "DH89XXCC Gigabit Ethernet (SGMII)",
1546 WM_T_82580, WMP_F_COPPER },
1547
1548 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1549 "DH89XXCC Gigabit Ethernet (SERDES)",
1550 WM_T_82580, WMP_F_SERDES },
1551
1552 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1553 "DH89XXCC 1000BASE-KX Ethernet",
1554 WM_T_82580, WMP_F_SERDES },
1555
1556 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1557 "DH89XXCC Gigabit Ethernet (SFP)",
1558 WM_T_82580, WMP_F_SERDES },
1559
1560 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1561 "I350 Gigabit Network Connection",
1562 WM_T_I350, WMP_F_COPPER },
1563
1564 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1565 "I350 Gigabit Fiber Network Connection",
1566 WM_T_I350, WMP_F_FIBER },
1567
1568 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1569 "I350 Gigabit Backplane Connection",
1570 WM_T_I350, WMP_F_SERDES },
1571
1572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1573 "I350 Quad Port Gigabit Ethernet",
1574 WM_T_I350, WMP_F_SERDES },
1575
1576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1577 "I350 Gigabit Connection",
1578 WM_T_I350, WMP_F_COPPER },
1579
1580 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1581 "I354 Gigabit Ethernet (KX)",
1582 WM_T_I354, WMP_F_SERDES },
1583
1584 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1585 "I354 Gigabit Ethernet (SGMII)",
1586 WM_T_I354, WMP_F_COPPER },
1587
1588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1589 "I354 Gigabit Ethernet (2.5G)",
1590 WM_T_I354, WMP_F_COPPER },
1591
1592 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1593 "I210-T1 Ethernet Server Adapter",
1594 WM_T_I210, WMP_F_COPPER },
1595
1596 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1597 "I210 Ethernet (Copper OEM)",
1598 WM_T_I210, WMP_F_COPPER },
1599
1600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1601 "I210 Ethernet (Copper IT)",
1602 WM_T_I210, WMP_F_COPPER },
1603
1604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1605 "I210 Ethernet (Copper, FLASH less)",
1606 WM_T_I210, WMP_F_COPPER },
1607
1608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1609 "I210 Gigabit Ethernet (Fiber)",
1610 WM_T_I210, WMP_F_FIBER },
1611
1612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1613 "I210 Gigabit Ethernet (SERDES)",
1614 WM_T_I210, WMP_F_SERDES },
1615
1616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1617 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1618 WM_T_I210, WMP_F_SERDES },
1619
1620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1621 "I210 Gigabit Ethernet (SGMII)",
1622 WM_T_I210, WMP_F_COPPER },
1623
1624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1625 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1626 WM_T_I210, WMP_F_COPPER },
1627
1628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1629 "I211 Ethernet (COPPER)",
1630 WM_T_I211, WMP_F_COPPER },
1631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1632 "I217 V Ethernet Connection",
1633 WM_T_PCH_LPT, WMP_F_COPPER },
1634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1635 "I217 LM Ethernet Connection",
1636 WM_T_PCH_LPT, WMP_F_COPPER },
1637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1638 "I218 V Ethernet Connection",
1639 WM_T_PCH_LPT, WMP_F_COPPER },
1640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1641 "I218 V Ethernet Connection",
1642 WM_T_PCH_LPT, WMP_F_COPPER },
1643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1644 "I218 V Ethernet Connection",
1645 WM_T_PCH_LPT, WMP_F_COPPER },
1646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1647 "I218 LM Ethernet Connection",
1648 WM_T_PCH_LPT, WMP_F_COPPER },
1649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1650 "I218 LM Ethernet Connection",
1651 WM_T_PCH_LPT, WMP_F_COPPER },
1652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1653 "I218 LM Ethernet Connection",
1654 WM_T_PCH_LPT, WMP_F_COPPER },
1655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1656 "I219 LM Ethernet Connection",
1657 WM_T_PCH_SPT, WMP_F_COPPER },
1658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1659 "I219 LM (2) Ethernet Connection",
1660 WM_T_PCH_SPT, WMP_F_COPPER },
1661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1662 "I219 LM (3) Ethernet Connection",
1663 WM_T_PCH_SPT, WMP_F_COPPER },
1664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1665 "I219 LM (4) Ethernet Connection",
1666 WM_T_PCH_SPT, WMP_F_COPPER },
1667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1668 "I219 LM (5) Ethernet Connection",
1669 WM_T_PCH_SPT, WMP_F_COPPER },
1670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1671 "I219 LM (6) Ethernet Connection",
1672 WM_T_PCH_CNP, WMP_F_COPPER },
1673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1674 "I219 LM (7) Ethernet Connection",
1675 WM_T_PCH_CNP, WMP_F_COPPER },
1676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1677 "I219 LM (8) Ethernet Connection",
1678 WM_T_PCH_CNP, WMP_F_COPPER },
1679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1680 "I219 LM (9) Ethernet Connection",
1681 WM_T_PCH_CNP, WMP_F_COPPER },
1682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1683 "I219 LM (10) Ethernet Connection",
1684 WM_T_PCH_CNP, WMP_F_COPPER },
1685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1686 "I219 LM (11) Ethernet Connection",
1687 WM_T_PCH_CNP, WMP_F_COPPER },
1688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1689 "I219 LM (12) Ethernet Connection",
1690 WM_T_PCH_SPT, WMP_F_COPPER },
1691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1692 "I219 LM (13) Ethernet Connection",
1693 WM_T_PCH_CNP, WMP_F_COPPER },
1694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1695 "I219 LM (14) Ethernet Connection",
1696 WM_T_PCH_CNP, WMP_F_COPPER },
1697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1698 "I219 LM (15) Ethernet Connection",
1699 WM_T_PCH_CNP, WMP_F_COPPER },
1700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16,
1701 "I219 LM (16) Ethernet Connection",
1702 WM_T_PCH_CNP, WMP_F_COPPER },
1703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17,
1704 "I219 LM (17) Ethernet Connection",
1705 WM_T_PCH_CNP, WMP_F_COPPER },
1706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18,
1707 "I219 LM (18) Ethernet Connection",
1708 WM_T_PCH_CNP, WMP_F_COPPER },
1709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19,
1710 "I219 LM (19) Ethernet Connection",
1711 WM_T_PCH_CNP, WMP_F_COPPER },
1712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1713 "I219 V Ethernet Connection",
1714 WM_T_PCH_SPT, WMP_F_COPPER },
1715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1716 "I219 V (2) Ethernet Connection",
1717 WM_T_PCH_SPT, WMP_F_COPPER },
1718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1719 "I219 V (4) Ethernet Connection",
1720 WM_T_PCH_SPT, WMP_F_COPPER },
1721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1722 "I219 V (5) Ethernet Connection",
1723 WM_T_PCH_SPT, WMP_F_COPPER },
1724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1725 "I219 V (6) Ethernet Connection",
1726 WM_T_PCH_CNP, WMP_F_COPPER },
1727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1728 "I219 V (7) Ethernet Connection",
1729 WM_T_PCH_CNP, WMP_F_COPPER },
1730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1731 "I219 V (8) Ethernet Connection",
1732 WM_T_PCH_CNP, WMP_F_COPPER },
1733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1734 "I219 V (9) Ethernet Connection",
1735 WM_T_PCH_CNP, WMP_F_COPPER },
1736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1737 "I219 V (10) Ethernet Connection",
1738 WM_T_PCH_CNP, WMP_F_COPPER },
1739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1740 "I219 V (11) Ethernet Connection",
1741 WM_T_PCH_CNP, WMP_F_COPPER },
1742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1743 "I219 V (12) Ethernet Connection",
1744 WM_T_PCH_SPT, WMP_F_COPPER },
1745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1746 "I219 V (13) Ethernet Connection",
1747 WM_T_PCH_CNP, WMP_F_COPPER },
1748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1749 "I219 V (14) Ethernet Connection",
1750 WM_T_PCH_CNP, WMP_F_COPPER },
1751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15,
1752 "I219 V (15) Ethernet Connection",
1753 WM_T_PCH_CNP, WMP_F_COPPER },
1754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16,
1755 "I219 V (16) Ethernet Connection",
1756 WM_T_PCH_CNP, WMP_F_COPPER },
1757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17,
1758 "I219 V (17) Ethernet Connection",
1759 WM_T_PCH_CNP, WMP_F_COPPER },
1760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18,
1761 "I219 V (18) Ethernet Connection",
1762 WM_T_PCH_CNP, WMP_F_COPPER },
1763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19,
1764 "I219 V (19) Ethernet Connection",
1765 WM_T_PCH_CNP, WMP_F_COPPER },
1766 { 0, 0,
1767 NULL,
1768 0, 0 },
1769 };
1770
1771 /*
1772 * Register read/write functions.
1773 * Other than CSR_{READ|WRITE}().
1774 */
1775
1776 #if 0 /* Not currently used */
1777 static inline uint32_t
1778 wm_io_read(struct wm_softc *sc, int reg)
1779 {
1780
1781 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1782 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1783 }
1784 #endif
1785
1786 static inline void
1787 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1788 {
1789
1790 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1791 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1792 }
1793
1794 static inline void
1795 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1796 uint32_t data)
1797 {
1798 uint32_t regval;
1799 int i;
1800
1801 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1802
1803 CSR_WRITE(sc, reg, regval);
1804
1805 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1806 delay(5);
1807 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1808 break;
1809 }
1810 if (i == SCTL_CTL_POLL_TIMEOUT) {
1811 aprint_error("%s: WARNING:"
1812 " i82575 reg 0x%08x setup did not indicate ready\n",
1813 device_xname(sc->sc_dev), reg);
1814 }
1815 }
1816
1817 static inline void
1818 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1819 {
1820 wa->wa_low = htole32(BUS_ADDR_LO32(v));
1821 wa->wa_high = htole32(BUS_ADDR_HI32(v));
1822 }
1823
1824 /*
1825 * Descriptor sync/init functions.
1826 */
1827 static inline void
1828 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1829 {
1830 struct wm_softc *sc = txq->txq_sc;
1831
1832 /* If it will wrap around, sync to the end of the ring. */
1833 if ((start + num) > WM_NTXDESC(txq)) {
1834 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1835 WM_CDTXOFF(txq, start), txq->txq_descsize *
1836 (WM_NTXDESC(txq) - start), ops);
1837 num -= (WM_NTXDESC(txq) - start);
1838 start = 0;
1839 }
1840
1841 /* Now sync whatever is left. */
1842 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1843 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1844 }
1845
1846 static inline void
1847 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1848 {
1849 struct wm_softc *sc = rxq->rxq_sc;
1850
1851 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1852 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1853 }
1854
1855 static inline void
1856 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1857 {
1858 struct wm_softc *sc = rxq->rxq_sc;
1859 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1860 struct mbuf *m = rxs->rxs_mbuf;
1861
1862 /*
1863 * Note: We scoot the packet forward 2 bytes in the buffer
1864 * so that the payload after the Ethernet header is aligned
1865 * to a 4-byte boundary.
1866
1867 * XXX BRAINDAMAGE ALERT!
1868 * The stupid chip uses the same size for every buffer, which
1869 * is set in the Receive Control register. We are using the 2K
1870 * size option, but what we REALLY want is (2K - 2)! For this
1871 * reason, we can't "scoot" packets longer than the standard
1872 * Ethernet MTU. On strict-alignment platforms, if the total
1873 * size exceeds (2K - 2) we set align_tweak to 0 and let
1874 * the upper layer copy the headers.
1875 */
1876 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1877
1878 if (sc->sc_type == WM_T_82574) {
1879 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1880 rxd->erx_data.erxd_addr =
1881 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1882 rxd->erx_data.erxd_dd = 0;
1883 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1884 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1885
1886 rxd->nqrx_data.nrxd_paddr =
1887 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1888 /* Currently, split header is not supported. */
1889 rxd->nqrx_data.nrxd_haddr = 0;
1890 } else {
1891 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1892
1893 wm_set_dma_addr(&rxd->wrx_addr,
1894 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1895 rxd->wrx_len = 0;
1896 rxd->wrx_cksum = 0;
1897 rxd->wrx_status = 0;
1898 rxd->wrx_errors = 0;
1899 rxd->wrx_special = 0;
1900 }
1901 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1902
1903 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1904 }
1905
1906 /*
1907 * Device driver interface functions and commonly used functions.
1908 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1909 */
1910
1911 /* Lookup supported device table */
1912 static const struct wm_product *
1913 wm_lookup(const struct pci_attach_args *pa)
1914 {
1915 const struct wm_product *wmp;
1916
1917 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1918 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1919 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1920 return wmp;
1921 }
1922 return NULL;
1923 }
1924
1925 /* The match function (ca_match) */
1926 static int
1927 wm_match(device_t parent, cfdata_t cf, void *aux)
1928 {
1929 struct pci_attach_args *pa = aux;
1930
1931 if (wm_lookup(pa) != NULL)
1932 return 1;
1933
1934 return 0;
1935 }
1936
1937 /* The attach function (ca_attach) */
1938 static void
1939 wm_attach(device_t parent, device_t self, void *aux)
1940 {
1941 struct wm_softc *sc = device_private(self);
1942 struct pci_attach_args *pa = aux;
1943 prop_dictionary_t dict;
1944 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1945 pci_chipset_tag_t pc = pa->pa_pc;
1946 int counts[PCI_INTR_TYPE_SIZE];
1947 pci_intr_type_t max_type;
1948 const char *eetype, *xname;
1949 bus_space_tag_t memt;
1950 bus_space_handle_t memh;
1951 bus_size_t memsize;
1952 int memh_valid;
1953 int i, error;
1954 const struct wm_product *wmp;
1955 prop_data_t ea;
1956 prop_number_t pn;
1957 uint8_t enaddr[ETHER_ADDR_LEN];
1958 char buf[256];
1959 char wqname[MAXCOMLEN];
1960 uint16_t cfg1, cfg2, swdpin, nvmword;
1961 pcireg_t preg, memtype;
1962 uint16_t eeprom_data, apme_mask;
1963 bool force_clear_smbi;
1964 uint32_t link_mode;
1965 uint32_t reg;
1966
1967 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1968 sc->sc_debug = WM_DEBUG_DEFAULT;
1969 #endif
1970 sc->sc_dev = self;
1971 callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
1972 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1973 sc->sc_core_stopping = false;
1974
1975 wmp = wm_lookup(pa);
1976 #ifdef DIAGNOSTIC
1977 if (wmp == NULL) {
1978 printf("\n");
1979 panic("wm_attach: impossible");
1980 }
1981 #endif
1982 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1983
1984 sc->sc_pc = pa->pa_pc;
1985 sc->sc_pcitag = pa->pa_tag;
1986
1987 if (pci_dma64_available(pa)) {
1988 aprint_verbose(", 64-bit DMA");
1989 sc->sc_dmat = pa->pa_dmat64;
1990 } else {
1991 aprint_verbose(", 32-bit DMA");
1992 sc->sc_dmat = pa->pa_dmat;
1993 }
1994
1995 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1996 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1997 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1998
1999 sc->sc_type = wmp->wmp_type;
2000
2001 /* Set default function pointers */
2002 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2003 sc->phy.release = sc->nvm.release = wm_put_null;
2004 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2005
2006 if (sc->sc_type < WM_T_82543) {
2007 if (sc->sc_rev < 2) {
2008 aprint_error_dev(sc->sc_dev,
2009 "i82542 must be at least rev. 2\n");
2010 return;
2011 }
2012 if (sc->sc_rev < 3)
2013 sc->sc_type = WM_T_82542_2_0;
2014 }
2015
2016 /*
2017 * Disable MSI for Errata:
2018 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2019 *
2020 * 82544: Errata 25
2021 * 82540: Errata 6 (easy to reproduce device timeout)
2022 * 82545: Errata 4 (easy to reproduce device timeout)
2023 * 82546: Errata 26 (easy to reproduce device timeout)
2024 * 82541: Errata 7 (easy to reproduce device timeout)
2025 *
2026 * "Byte Enables 2 and 3 are not set on MSI writes"
2027 *
2028 * 82571 & 82572: Errata 63
2029 */
2030 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2031 || (sc->sc_type == WM_T_82572))
2032 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2033
2034 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2035 || (sc->sc_type == WM_T_82580)
2036 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2037 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2038 sc->sc_flags |= WM_F_NEWQUEUE;
2039
2040 /* Set device properties (mactype) */
2041 dict = device_properties(sc->sc_dev);
2042 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2043
2044 /*
2045 * Map the device. All devices support memory-mapped acccess,
2046 * and it is really required for normal operation.
2047 */
2048 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2049 switch (memtype) {
2050 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2051 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2052 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2053 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2054 break;
2055 default:
2056 memh_valid = 0;
2057 break;
2058 }
2059
2060 if (memh_valid) {
2061 sc->sc_st = memt;
2062 sc->sc_sh = memh;
2063 sc->sc_ss = memsize;
2064 } else {
2065 aprint_error_dev(sc->sc_dev,
2066 "unable to map device registers\n");
2067 return;
2068 }
2069
2070 /*
2071 * In addition, i82544 and later support I/O mapped indirect
2072 * register access. It is not desirable (nor supported in
2073 * this driver) to use it for normal operation, though it is
2074 * required to work around bugs in some chip versions.
2075 */
2076 switch (sc->sc_type) {
2077 case WM_T_82544:
2078 case WM_T_82541:
2079 case WM_T_82541_2:
2080 case WM_T_82547:
2081 case WM_T_82547_2:
2082 /* First we have to find the I/O BAR. */
2083 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2084 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2085 if (memtype == PCI_MAPREG_TYPE_IO)
2086 break;
2087 if (PCI_MAPREG_MEM_TYPE(memtype) ==
2088 PCI_MAPREG_MEM_TYPE_64BIT)
2089 i += 4; /* skip high bits, too */
2090 }
2091 if (i < PCI_MAPREG_END) {
2092 /*
2093 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2094 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2095 * It's no problem because newer chips has no this
2096 * bug.
2097 *
2098 * The i8254x doesn't apparently respond when the
2099 * I/O BAR is 0, which looks somewhat like it's not
2100 * been configured.
2101 */
2102 preg = pci_conf_read(pc, pa->pa_tag, i);
2103 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2104 aprint_error_dev(sc->sc_dev,
2105 "WARNING: I/O BAR at zero.\n");
2106 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2107 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2108 == 0) {
2109 sc->sc_flags |= WM_F_IOH_VALID;
2110 } else
2111 aprint_error_dev(sc->sc_dev,
2112 "WARNING: unable to map I/O space\n");
2113 }
2114 break;
2115 default:
2116 break;
2117 }
2118
2119 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2120 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2121 preg |= PCI_COMMAND_MASTER_ENABLE;
2122 if (sc->sc_type < WM_T_82542_2_1)
2123 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2124 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2125
2126 /* Power up chip */
2127 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2128 && error != EOPNOTSUPP) {
2129 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2130 return;
2131 }
2132
2133 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2134 /*
2135 * Don't use MSI-X if we can use only one queue to save interrupt
2136 * resource.
2137 */
2138 if (sc->sc_nqueues > 1) {
2139 max_type = PCI_INTR_TYPE_MSIX;
2140 /*
2141 * 82583 has a MSI-X capability in the PCI configuration space
2142 * but it doesn't support it. At least the document doesn't
2143 * say anything about MSI-X.
2144 */
2145 counts[PCI_INTR_TYPE_MSIX]
2146 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2147 } else {
2148 max_type = PCI_INTR_TYPE_MSI;
2149 counts[PCI_INTR_TYPE_MSIX] = 0;
2150 }
2151
2152 /* Allocation settings */
2153 counts[PCI_INTR_TYPE_MSI] = 1;
2154 counts[PCI_INTR_TYPE_INTX] = 1;
2155 /* overridden by disable flags */
2156 if (wm_disable_msi != 0) {
2157 counts[PCI_INTR_TYPE_MSI] = 0;
2158 if (wm_disable_msix != 0) {
2159 max_type = PCI_INTR_TYPE_INTX;
2160 counts[PCI_INTR_TYPE_MSIX] = 0;
2161 }
2162 } else if (wm_disable_msix != 0) {
2163 max_type = PCI_INTR_TYPE_MSI;
2164 counts[PCI_INTR_TYPE_MSIX] = 0;
2165 }
2166
2167 alloc_retry:
2168 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2169 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2170 return;
2171 }
2172
2173 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2174 error = wm_setup_msix(sc);
2175 if (error) {
2176 pci_intr_release(pc, sc->sc_intrs,
2177 counts[PCI_INTR_TYPE_MSIX]);
2178
2179 /* Setup for MSI: Disable MSI-X */
2180 max_type = PCI_INTR_TYPE_MSI;
2181 counts[PCI_INTR_TYPE_MSI] = 1;
2182 counts[PCI_INTR_TYPE_INTX] = 1;
2183 goto alloc_retry;
2184 }
2185 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2186 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2187 error = wm_setup_legacy(sc);
2188 if (error) {
2189 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2190 counts[PCI_INTR_TYPE_MSI]);
2191
2192 /* The next try is for INTx: Disable MSI */
2193 max_type = PCI_INTR_TYPE_INTX;
2194 counts[PCI_INTR_TYPE_INTX] = 1;
2195 goto alloc_retry;
2196 }
2197 } else {
2198 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2199 error = wm_setup_legacy(sc);
2200 if (error) {
2201 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2202 counts[PCI_INTR_TYPE_INTX]);
2203 return;
2204 }
2205 }
2206
2207 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2208 error = workqueue_create(&sc->sc_queue_wq, wqname,
2209 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2210 WQ_PERCPU | WQ_MPSAFE);
2211 if (error) {
2212 aprint_error_dev(sc->sc_dev,
2213 "unable to create TxRx workqueue\n");
2214 goto out;
2215 }
2216
2217 snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2218 error = workqueue_create(&sc->sc_reset_wq, wqname,
2219 wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2220 WQ_MPSAFE);
2221 if (error) {
2222 workqueue_destroy(sc->sc_queue_wq);
2223 aprint_error_dev(sc->sc_dev,
2224 "unable to create reset workqueue\n");
2225 goto out;
2226 }
2227
2228 /*
2229 * Check the function ID (unit number of the chip).
2230 */
2231 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2232 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2233 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2234 || (sc->sc_type == WM_T_82580)
2235 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2236 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2237 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2238 else
2239 sc->sc_funcid = 0;
2240
2241 /*
2242 * Determine a few things about the bus we're connected to.
2243 */
2244 if (sc->sc_type < WM_T_82543) {
2245 /* We don't really know the bus characteristics here. */
2246 sc->sc_bus_speed = 33;
2247 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2248 /*
2249 * CSA (Communication Streaming Architecture) is about as fast
2250 * a 32-bit 66MHz PCI Bus.
2251 */
2252 sc->sc_flags |= WM_F_CSA;
2253 sc->sc_bus_speed = 66;
2254 aprint_verbose_dev(sc->sc_dev,
2255 "Communication Streaming Architecture\n");
2256 if (sc->sc_type == WM_T_82547) {
2257 callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2258 callout_setfunc(&sc->sc_txfifo_ch,
2259 wm_82547_txfifo_stall, sc);
2260 aprint_verbose_dev(sc->sc_dev,
2261 "using 82547 Tx FIFO stall work-around\n");
2262 }
2263 } else if (sc->sc_type >= WM_T_82571) {
2264 sc->sc_flags |= WM_F_PCIE;
2265 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2266 && (sc->sc_type != WM_T_ICH10)
2267 && (sc->sc_type != WM_T_PCH)
2268 && (sc->sc_type != WM_T_PCH2)
2269 && (sc->sc_type != WM_T_PCH_LPT)
2270 && (sc->sc_type != WM_T_PCH_SPT)
2271 && (sc->sc_type != WM_T_PCH_CNP)) {
2272 /* ICH* and PCH* have no PCIe capability registers */
2273 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2274 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2275 NULL) == 0)
2276 aprint_error_dev(sc->sc_dev,
2277 "unable to find PCIe capability\n");
2278 }
2279 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2280 } else {
2281 reg = CSR_READ(sc, WMREG_STATUS);
2282 if (reg & STATUS_BUS64)
2283 sc->sc_flags |= WM_F_BUS64;
2284 if ((reg & STATUS_PCIX_MODE) != 0) {
2285 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2286
2287 sc->sc_flags |= WM_F_PCIX;
2288 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2289 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2290 aprint_error_dev(sc->sc_dev,
2291 "unable to find PCIX capability\n");
2292 else if (sc->sc_type != WM_T_82545_3 &&
2293 sc->sc_type != WM_T_82546_3) {
2294 /*
2295 * Work around a problem caused by the BIOS
2296 * setting the max memory read byte count
2297 * incorrectly.
2298 */
2299 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2300 sc->sc_pcixe_capoff + PCIX_CMD);
2301 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2302 sc->sc_pcixe_capoff + PCIX_STATUS);
2303
2304 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2305 PCIX_CMD_BYTECNT_SHIFT;
2306 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2307 PCIX_STATUS_MAXB_SHIFT;
2308 if (bytecnt > maxb) {
2309 aprint_verbose_dev(sc->sc_dev,
2310 "resetting PCI-X MMRBC: %d -> %d\n",
2311 512 << bytecnt, 512 << maxb);
2312 pcix_cmd = (pcix_cmd &
2313 ~PCIX_CMD_BYTECNT_MASK) |
2314 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2315 pci_conf_write(pa->pa_pc, pa->pa_tag,
2316 sc->sc_pcixe_capoff + PCIX_CMD,
2317 pcix_cmd);
2318 }
2319 }
2320 }
2321 /*
2322 * The quad port adapter is special; it has a PCIX-PCIX
2323 * bridge on the board, and can run the secondary bus at
2324 * a higher speed.
2325 */
2326 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2327 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2328 : 66;
2329 } else if (sc->sc_flags & WM_F_PCIX) {
2330 switch (reg & STATUS_PCIXSPD_MASK) {
2331 case STATUS_PCIXSPD_50_66:
2332 sc->sc_bus_speed = 66;
2333 break;
2334 case STATUS_PCIXSPD_66_100:
2335 sc->sc_bus_speed = 100;
2336 break;
2337 case STATUS_PCIXSPD_100_133:
2338 sc->sc_bus_speed = 133;
2339 break;
2340 default:
2341 aprint_error_dev(sc->sc_dev,
2342 "unknown PCIXSPD %d; assuming 66MHz\n",
2343 reg & STATUS_PCIXSPD_MASK);
2344 sc->sc_bus_speed = 66;
2345 break;
2346 }
2347 } else
2348 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2349 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2350 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2351 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2352 }
2353
2354 /* clear interesting stat counters */
2355 CSR_READ(sc, WMREG_COLC);
2356 CSR_READ(sc, WMREG_RXERRC);
2357
2358 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2359 || (sc->sc_type >= WM_T_ICH8))
2360 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2361 if (sc->sc_type >= WM_T_ICH8)
2362 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2363
2364 /* Set PHY, NVM mutex related stuff */
2365 switch (sc->sc_type) {
2366 case WM_T_82542_2_0:
2367 case WM_T_82542_2_1:
2368 case WM_T_82543:
2369 case WM_T_82544:
2370 /* Microwire */
2371 sc->nvm.read = wm_nvm_read_uwire;
2372 sc->sc_nvm_wordsize = 64;
2373 sc->sc_nvm_addrbits = 6;
2374 break;
2375 case WM_T_82540:
2376 case WM_T_82545:
2377 case WM_T_82545_3:
2378 case WM_T_82546:
2379 case WM_T_82546_3:
2380 /* Microwire */
2381 sc->nvm.read = wm_nvm_read_uwire;
2382 reg = CSR_READ(sc, WMREG_EECD);
2383 if (reg & EECD_EE_SIZE) {
2384 sc->sc_nvm_wordsize = 256;
2385 sc->sc_nvm_addrbits = 8;
2386 } else {
2387 sc->sc_nvm_wordsize = 64;
2388 sc->sc_nvm_addrbits = 6;
2389 }
2390 sc->sc_flags |= WM_F_LOCK_EECD;
2391 sc->nvm.acquire = wm_get_eecd;
2392 sc->nvm.release = wm_put_eecd;
2393 break;
2394 case WM_T_82541:
2395 case WM_T_82541_2:
2396 case WM_T_82547:
2397 case WM_T_82547_2:
2398 reg = CSR_READ(sc, WMREG_EECD);
2399 /*
2400 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2401 * on 8254[17], so set flags and functios before calling it.
2402 */
2403 sc->sc_flags |= WM_F_LOCK_EECD;
2404 sc->nvm.acquire = wm_get_eecd;
2405 sc->nvm.release = wm_put_eecd;
2406 if (reg & EECD_EE_TYPE) {
2407 /* SPI */
2408 sc->nvm.read = wm_nvm_read_spi;
2409 sc->sc_flags |= WM_F_EEPROM_SPI;
2410 wm_nvm_set_addrbits_size_eecd(sc);
2411 } else {
2412 /* Microwire */
2413 sc->nvm.read = wm_nvm_read_uwire;
2414 if ((reg & EECD_EE_ABITS) != 0) {
2415 sc->sc_nvm_wordsize = 256;
2416 sc->sc_nvm_addrbits = 8;
2417 } else {
2418 sc->sc_nvm_wordsize = 64;
2419 sc->sc_nvm_addrbits = 6;
2420 }
2421 }
2422 break;
2423 case WM_T_82571:
2424 case WM_T_82572:
2425 /* SPI */
2426 sc->nvm.read = wm_nvm_read_eerd;
2427 /* Not use WM_F_LOCK_EECD because we use EERD */
2428 sc->sc_flags |= WM_F_EEPROM_SPI;
2429 wm_nvm_set_addrbits_size_eecd(sc);
2430 sc->phy.acquire = wm_get_swsm_semaphore;
2431 sc->phy.release = wm_put_swsm_semaphore;
2432 sc->nvm.acquire = wm_get_nvm_82571;
2433 sc->nvm.release = wm_put_nvm_82571;
2434 break;
2435 case WM_T_82573:
2436 case WM_T_82574:
2437 case WM_T_82583:
2438 sc->nvm.read = wm_nvm_read_eerd;
2439 /* Not use WM_F_LOCK_EECD because we use EERD */
2440 if (sc->sc_type == WM_T_82573) {
2441 sc->phy.acquire = wm_get_swsm_semaphore;
2442 sc->phy.release = wm_put_swsm_semaphore;
2443 sc->nvm.acquire = wm_get_nvm_82571;
2444 sc->nvm.release = wm_put_nvm_82571;
2445 } else {
2446 /* Both PHY and NVM use the same semaphore. */
2447 sc->phy.acquire = sc->nvm.acquire
2448 = wm_get_swfwhw_semaphore;
2449 sc->phy.release = sc->nvm.release
2450 = wm_put_swfwhw_semaphore;
2451 }
2452 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2453 sc->sc_flags |= WM_F_EEPROM_FLASH;
2454 sc->sc_nvm_wordsize = 2048;
2455 } else {
2456 /* SPI */
2457 sc->sc_flags |= WM_F_EEPROM_SPI;
2458 wm_nvm_set_addrbits_size_eecd(sc);
2459 }
2460 break;
2461 case WM_T_82575:
2462 case WM_T_82576:
2463 case WM_T_82580:
2464 case WM_T_I350:
2465 case WM_T_I354:
2466 case WM_T_80003:
2467 /* SPI */
2468 sc->sc_flags |= WM_F_EEPROM_SPI;
2469 wm_nvm_set_addrbits_size_eecd(sc);
2470 if ((sc->sc_type == WM_T_80003)
2471 || (sc->sc_nvm_wordsize < (1 << 15))) {
2472 sc->nvm.read = wm_nvm_read_eerd;
2473 /* Don't use WM_F_LOCK_EECD because we use EERD */
2474 } else {
2475 sc->nvm.read = wm_nvm_read_spi;
2476 sc->sc_flags |= WM_F_LOCK_EECD;
2477 }
2478 sc->phy.acquire = wm_get_phy_82575;
2479 sc->phy.release = wm_put_phy_82575;
2480 sc->nvm.acquire = wm_get_nvm_80003;
2481 sc->nvm.release = wm_put_nvm_80003;
2482 break;
2483 case WM_T_ICH8:
2484 case WM_T_ICH9:
2485 case WM_T_ICH10:
2486 case WM_T_PCH:
2487 case WM_T_PCH2:
2488 case WM_T_PCH_LPT:
2489 sc->nvm.read = wm_nvm_read_ich8;
2490 /* FLASH */
2491 sc->sc_flags |= WM_F_EEPROM_FLASH;
2492 sc->sc_nvm_wordsize = 2048;
2493 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2494 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2495 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2496 aprint_error_dev(sc->sc_dev,
2497 "can't map FLASH registers\n");
2498 goto out;
2499 }
2500 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2501 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2502 ICH_FLASH_SECTOR_SIZE;
2503 sc->sc_ich8_flash_bank_size =
2504 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2505 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2506 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2507 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2508 sc->sc_flashreg_offset = 0;
2509 sc->phy.acquire = wm_get_swflag_ich8lan;
2510 sc->phy.release = wm_put_swflag_ich8lan;
2511 sc->nvm.acquire = wm_get_nvm_ich8lan;
2512 sc->nvm.release = wm_put_nvm_ich8lan;
2513 break;
2514 case WM_T_PCH_SPT:
2515 case WM_T_PCH_CNP:
2516 sc->nvm.read = wm_nvm_read_spt;
2517 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2518 sc->sc_flags |= WM_F_EEPROM_FLASH;
2519 sc->sc_flasht = sc->sc_st;
2520 sc->sc_flashh = sc->sc_sh;
2521 sc->sc_ich8_flash_base = 0;
2522 sc->sc_nvm_wordsize =
2523 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2524 * NVM_SIZE_MULTIPLIER;
2525 /* It is size in bytes, we want words */
2526 sc->sc_nvm_wordsize /= 2;
2527 /* Assume 2 banks */
2528 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2529 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2530 sc->phy.acquire = wm_get_swflag_ich8lan;
2531 sc->phy.release = wm_put_swflag_ich8lan;
2532 sc->nvm.acquire = wm_get_nvm_ich8lan;
2533 sc->nvm.release = wm_put_nvm_ich8lan;
2534 break;
2535 case WM_T_I210:
2536 case WM_T_I211:
2537 /* Allow a single clear of the SW semaphore on I210 and newer*/
2538 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2539 if (wm_nvm_flash_presence_i210(sc)) {
2540 sc->nvm.read = wm_nvm_read_eerd;
2541 /* Don't use WM_F_LOCK_EECD because we use EERD */
2542 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2543 wm_nvm_set_addrbits_size_eecd(sc);
2544 } else {
2545 sc->nvm.read = wm_nvm_read_invm;
2546 sc->sc_flags |= WM_F_EEPROM_INVM;
2547 sc->sc_nvm_wordsize = INVM_SIZE;
2548 }
2549 sc->phy.acquire = wm_get_phy_82575;
2550 sc->phy.release = wm_put_phy_82575;
2551 sc->nvm.acquire = wm_get_nvm_80003;
2552 sc->nvm.release = wm_put_nvm_80003;
2553 break;
2554 default:
2555 break;
2556 }
2557
2558 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2559 switch (sc->sc_type) {
2560 case WM_T_82571:
2561 case WM_T_82572:
2562 reg = CSR_READ(sc, WMREG_SWSM2);
2563 if ((reg & SWSM2_LOCK) == 0) {
2564 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2565 force_clear_smbi = true;
2566 } else
2567 force_clear_smbi = false;
2568 break;
2569 case WM_T_82573:
2570 case WM_T_82574:
2571 case WM_T_82583:
2572 force_clear_smbi = true;
2573 break;
2574 default:
2575 force_clear_smbi = false;
2576 break;
2577 }
2578 if (force_clear_smbi) {
2579 reg = CSR_READ(sc, WMREG_SWSM);
2580 if ((reg & SWSM_SMBI) != 0)
2581 aprint_error_dev(sc->sc_dev,
2582 "Please update the Bootagent\n");
2583 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2584 }
2585
2586 /*
2587 * Defer printing the EEPROM type until after verifying the checksum
2588 * This allows the EEPROM type to be printed correctly in the case
2589 * that no EEPROM is attached.
2590 */
2591 /*
2592 * Validate the EEPROM checksum. If the checksum fails, flag
2593 * this for later, so we can fail future reads from the EEPROM.
2594 */
2595 if (wm_nvm_validate_checksum(sc)) {
2596 /*
2597 * Read twice again because some PCI-e parts fail the
2598 * first check due to the link being in sleep state.
2599 */
2600 if (wm_nvm_validate_checksum(sc))
2601 sc->sc_flags |= WM_F_EEPROM_INVALID;
2602 }
2603
2604 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2605 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2606 else {
2607 aprint_verbose_dev(sc->sc_dev, "%u words ",
2608 sc->sc_nvm_wordsize);
2609 if (sc->sc_flags & WM_F_EEPROM_INVM)
2610 aprint_verbose("iNVM");
2611 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2612 aprint_verbose("FLASH(HW)");
2613 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2614 aprint_verbose("FLASH");
2615 else {
2616 if (sc->sc_flags & WM_F_EEPROM_SPI)
2617 eetype = "SPI";
2618 else
2619 eetype = "MicroWire";
2620 aprint_verbose("(%d address bits) %s EEPROM",
2621 sc->sc_nvm_addrbits, eetype);
2622 }
2623 }
2624 wm_nvm_version(sc);
2625 aprint_verbose("\n");
2626
2627 /*
2628 * XXX The first call of wm_gmii_setup_phytype. The result might be
2629 * incorrect.
2630 */
2631 wm_gmii_setup_phytype(sc, 0, 0);
2632
2633 /* Check for WM_F_WOL on some chips before wm_reset() */
2634 switch (sc->sc_type) {
2635 case WM_T_ICH8:
2636 case WM_T_ICH9:
2637 case WM_T_ICH10:
2638 case WM_T_PCH:
2639 case WM_T_PCH2:
2640 case WM_T_PCH_LPT:
2641 case WM_T_PCH_SPT:
2642 case WM_T_PCH_CNP:
2643 apme_mask = WUC_APME;
2644 eeprom_data = CSR_READ(sc, WMREG_WUC);
2645 if ((eeprom_data & apme_mask) != 0)
2646 sc->sc_flags |= WM_F_WOL;
2647 break;
2648 default:
2649 break;
2650 }
2651
2652 /* Reset the chip to a known state. */
2653 wm_reset(sc);
2654
2655 /*
2656 * Check for I21[01] PLL workaround.
2657 *
2658 * Three cases:
2659 * a) Chip is I211.
2660 * b) Chip is I210 and it uses INVM (not FLASH).
2661 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2662 */
2663 if (sc->sc_type == WM_T_I211)
2664 sc->sc_flags |= WM_F_PLL_WA_I210;
2665 if (sc->sc_type == WM_T_I210) {
2666 if (!wm_nvm_flash_presence_i210(sc))
2667 sc->sc_flags |= WM_F_PLL_WA_I210;
2668 else if ((sc->sc_nvm_ver_major < 3)
2669 || ((sc->sc_nvm_ver_major == 3)
2670 && (sc->sc_nvm_ver_minor < 25))) {
2671 aprint_verbose_dev(sc->sc_dev,
2672 "ROM image version %d.%d is older than 3.25\n",
2673 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2674 sc->sc_flags |= WM_F_PLL_WA_I210;
2675 }
2676 }
2677 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2678 wm_pll_workaround_i210(sc);
2679
2680 wm_get_wakeup(sc);
2681
2682 /* Non-AMT based hardware can now take control from firmware */
2683 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2684 wm_get_hw_control(sc);
2685
2686 /*
2687 * Read the Ethernet address from the EEPROM, if not first found
2688 * in device properties.
2689 */
2690 ea = prop_dictionary_get(dict, "mac-address");
2691 if (ea != NULL) {
2692 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2693 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2694 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2695 } else {
2696 if (wm_read_mac_addr(sc, enaddr) != 0) {
2697 aprint_error_dev(sc->sc_dev,
2698 "unable to read Ethernet address\n");
2699 goto out;
2700 }
2701 }
2702
2703 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2704 ether_sprintf(enaddr));
2705
2706 /*
2707 * Read the config info from the EEPROM, and set up various
2708 * bits in the control registers based on their contents.
2709 */
2710 pn = prop_dictionary_get(dict, "i82543-cfg1");
2711 if (pn != NULL) {
2712 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2713 cfg1 = (uint16_t) prop_number_signed_value(pn);
2714 } else {
2715 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2716 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2717 goto out;
2718 }
2719 }
2720
2721 pn = prop_dictionary_get(dict, "i82543-cfg2");
2722 if (pn != NULL) {
2723 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2724 cfg2 = (uint16_t) prop_number_signed_value(pn);
2725 } else {
2726 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2727 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2728 goto out;
2729 }
2730 }
2731
2732 /* check for WM_F_WOL */
2733 switch (sc->sc_type) {
2734 case WM_T_82542_2_0:
2735 case WM_T_82542_2_1:
2736 case WM_T_82543:
2737 /* dummy? */
2738 eeprom_data = 0;
2739 apme_mask = NVM_CFG3_APME;
2740 break;
2741 case WM_T_82544:
2742 apme_mask = NVM_CFG2_82544_APM_EN;
2743 eeprom_data = cfg2;
2744 break;
2745 case WM_T_82546:
2746 case WM_T_82546_3:
2747 case WM_T_82571:
2748 case WM_T_82572:
2749 case WM_T_82573:
2750 case WM_T_82574:
2751 case WM_T_82583:
2752 case WM_T_80003:
2753 case WM_T_82575:
2754 case WM_T_82576:
2755 apme_mask = NVM_CFG3_APME;
2756 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2757 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2758 break;
2759 case WM_T_82580:
2760 case WM_T_I350:
2761 case WM_T_I354:
2762 case WM_T_I210:
2763 case WM_T_I211:
2764 apme_mask = NVM_CFG3_APME;
2765 wm_nvm_read(sc,
2766 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2767 1, &eeprom_data);
2768 break;
2769 case WM_T_ICH8:
2770 case WM_T_ICH9:
2771 case WM_T_ICH10:
2772 case WM_T_PCH:
2773 case WM_T_PCH2:
2774 case WM_T_PCH_LPT:
2775 case WM_T_PCH_SPT:
2776 case WM_T_PCH_CNP:
2777 /* Already checked before wm_reset () */
2778 apme_mask = eeprom_data = 0;
2779 break;
2780 default: /* XXX 82540 */
2781 apme_mask = NVM_CFG3_APME;
2782 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2783 break;
2784 }
2785 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2786 if ((eeprom_data & apme_mask) != 0)
2787 sc->sc_flags |= WM_F_WOL;
2788
2789 /*
2790 * We have the eeprom settings, now apply the special cases
2791 * where the eeprom may be wrong or the board won't support
2792 * wake on lan on a particular port
2793 */
2794 switch (sc->sc_pcidevid) {
2795 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2796 sc->sc_flags &= ~WM_F_WOL;
2797 break;
2798 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2799 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2800 /* Wake events only supported on port A for dual fiber
2801 * regardless of eeprom setting */
2802 if (sc->sc_funcid == 1)
2803 sc->sc_flags &= ~WM_F_WOL;
2804 break;
2805 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2806 /* If quad port adapter, disable WoL on all but port A */
2807 if (sc->sc_funcid != 0)
2808 sc->sc_flags &= ~WM_F_WOL;
2809 break;
2810 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2811 /* Wake events only supported on port A for dual fiber
2812 * regardless of eeprom setting */
2813 if (sc->sc_funcid == 1)
2814 sc->sc_flags &= ~WM_F_WOL;
2815 break;
2816 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2817 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2818 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2819 /* If quad port adapter, disable WoL on all but port A */
2820 if (sc->sc_funcid != 0)
2821 sc->sc_flags &= ~WM_F_WOL;
2822 break;
2823 }
2824
2825 if (sc->sc_type >= WM_T_82575) {
2826 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2827 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2828 nvmword);
2829 if ((sc->sc_type == WM_T_82575) ||
2830 (sc->sc_type == WM_T_82576)) {
2831 /* Check NVM for autonegotiation */
2832 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2833 != 0)
2834 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2835 }
2836 if ((sc->sc_type == WM_T_82575) ||
2837 (sc->sc_type == WM_T_I350)) {
2838 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2839 sc->sc_flags |= WM_F_MAS;
2840 }
2841 }
2842 }
2843
2844 /*
2845 * XXX need special handling for some multiple port cards
2846 * to disable a paticular port.
2847 */
2848
2849 if (sc->sc_type >= WM_T_82544) {
2850 pn = prop_dictionary_get(dict, "i82543-swdpin");
2851 if (pn != NULL) {
2852 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2853 swdpin = (uint16_t) prop_number_signed_value(pn);
2854 } else {
2855 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2856 aprint_error_dev(sc->sc_dev,
2857 "unable to read SWDPIN\n");
2858 goto out;
2859 }
2860 }
2861 }
2862
2863 if (cfg1 & NVM_CFG1_ILOS)
2864 sc->sc_ctrl |= CTRL_ILOS;
2865
2866 /*
2867 * XXX
2868 * This code isn't correct because pin 2 and 3 are located
2869 * in different position on newer chips. Check all datasheet.
2870 *
2871 * Until resolve this problem, check if a chip < 82580
2872 */
2873 if (sc->sc_type <= WM_T_82580) {
2874 if (sc->sc_type >= WM_T_82544) {
2875 sc->sc_ctrl |=
2876 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2877 CTRL_SWDPIO_SHIFT;
2878 sc->sc_ctrl |=
2879 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2880 CTRL_SWDPINS_SHIFT;
2881 } else {
2882 sc->sc_ctrl |=
2883 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2884 CTRL_SWDPIO_SHIFT;
2885 }
2886 }
2887
2888 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2889 wm_nvm_read(sc,
2890 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2891 1, &nvmword);
2892 if (nvmword & NVM_CFG3_ILOS)
2893 sc->sc_ctrl |= CTRL_ILOS;
2894 }
2895
2896 #if 0
2897 if (sc->sc_type >= WM_T_82544) {
2898 if (cfg1 & NVM_CFG1_IPS0)
2899 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2900 if (cfg1 & NVM_CFG1_IPS1)
2901 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2902 sc->sc_ctrl_ext |=
2903 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2904 CTRL_EXT_SWDPIO_SHIFT;
2905 sc->sc_ctrl_ext |=
2906 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2907 CTRL_EXT_SWDPINS_SHIFT;
2908 } else {
2909 sc->sc_ctrl_ext |=
2910 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2911 CTRL_EXT_SWDPIO_SHIFT;
2912 }
2913 #endif
2914
2915 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2916 #if 0
2917 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2918 #endif
2919
2920 if (sc->sc_type == WM_T_PCH) {
2921 uint16_t val;
2922
2923 /* Save the NVM K1 bit setting */
2924 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2925
2926 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2927 sc->sc_nvm_k1_enabled = 1;
2928 else
2929 sc->sc_nvm_k1_enabled = 0;
2930 }
2931
2932 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2933 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2934 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2935 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2936 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2937 || sc->sc_type == WM_T_82573
2938 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2939 /* Copper only */
2940 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2941 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2942 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2943 || (sc->sc_type ==WM_T_I211)) {
2944 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2945 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2946 switch (link_mode) {
2947 case CTRL_EXT_LINK_MODE_1000KX:
2948 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2949 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2950 break;
2951 case CTRL_EXT_LINK_MODE_SGMII:
2952 if (wm_sgmii_uses_mdio(sc)) {
2953 aprint_normal_dev(sc->sc_dev,
2954 "SGMII(MDIO)\n");
2955 sc->sc_flags |= WM_F_SGMII;
2956 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2957 break;
2958 }
2959 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2960 /*FALLTHROUGH*/
2961 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2962 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2963 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2964 if (link_mode
2965 == CTRL_EXT_LINK_MODE_SGMII) {
2966 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2967 sc->sc_flags |= WM_F_SGMII;
2968 aprint_verbose_dev(sc->sc_dev,
2969 "SGMII\n");
2970 } else {
2971 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2972 aprint_verbose_dev(sc->sc_dev,
2973 "SERDES\n");
2974 }
2975 break;
2976 }
2977 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2978 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2979 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2980 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2981 sc->sc_flags |= WM_F_SGMII;
2982 }
2983 /* Do not change link mode for 100BaseFX */
2984 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2985 break;
2986
2987 /* Change current link mode setting */
2988 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2989 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2990 reg |= CTRL_EXT_LINK_MODE_SGMII;
2991 else
2992 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2993 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2994 break;
2995 case CTRL_EXT_LINK_MODE_GMII:
2996 default:
2997 aprint_normal_dev(sc->sc_dev, "Copper\n");
2998 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2999 break;
3000 }
3001
3002 reg &= ~CTRL_EXT_I2C_ENA;
3003 if ((sc->sc_flags & WM_F_SGMII) != 0)
3004 reg |= CTRL_EXT_I2C_ENA;
3005 else
3006 reg &= ~CTRL_EXT_I2C_ENA;
3007 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3008 if ((sc->sc_flags & WM_F_SGMII) != 0) {
3009 if (!wm_sgmii_uses_mdio(sc))
3010 wm_gmii_setup_phytype(sc, 0, 0);
3011 wm_reset_mdicnfg_82580(sc);
3012 }
3013 } else if (sc->sc_type < WM_T_82543 ||
3014 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3015 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3016 aprint_error_dev(sc->sc_dev,
3017 "WARNING: TBIMODE set on 1000BASE-T product!\n");
3018 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3019 }
3020 } else {
3021 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3022 aprint_error_dev(sc->sc_dev,
3023 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3024 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3025 }
3026 }
3027
3028 if (sc->sc_type >= WM_T_PCH2)
3029 sc->sc_flags |= WM_F_EEE;
3030 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3031 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3032 /* XXX: Need special handling for I354. (not yet) */
3033 if (sc->sc_type != WM_T_I354)
3034 sc->sc_flags |= WM_F_EEE;
3035 }
3036
3037 /*
3038 * The I350 has a bug where it always strips the CRC whether
3039 * asked to or not. So ask for stripped CRC here and cope in rxeof
3040 */
3041 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3042 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3043 sc->sc_flags |= WM_F_CRC_STRIP;
3044
3045 /* Set device properties (macflags) */
3046 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3047
3048 if (sc->sc_flags != 0) {
3049 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3050 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3051 }
3052
3053 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3054
3055 /* Initialize the media structures accordingly. */
3056 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3057 wm_gmii_mediainit(sc, wmp->wmp_product);
3058 else
3059 wm_tbi_mediainit(sc); /* All others */
3060
3061 ifp = &sc->sc_ethercom.ec_if;
3062 xname = device_xname(sc->sc_dev);
3063 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3064 ifp->if_softc = sc;
3065 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3066 ifp->if_extflags = IFEF_MPSAFE;
3067 ifp->if_ioctl = wm_ioctl;
3068 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3069 ifp->if_start = wm_nq_start;
3070 /*
3071 * When the number of CPUs is one and the controller can use
3072 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3073 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3074 * and the other is used for link status changing.
3075 * In this situation, wm_nq_transmit() is disadvantageous
3076 * because of wm_select_txqueue() and pcq(9) overhead.
3077 */
3078 if (wm_is_using_multiqueue(sc))
3079 ifp->if_transmit = wm_nq_transmit;
3080 } else {
3081 ifp->if_start = wm_start;
3082 /*
3083 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3084 * described above.
3085 */
3086 if (wm_is_using_multiqueue(sc))
3087 ifp->if_transmit = wm_transmit;
3088 }
3089 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3090 ifp->if_init = wm_init;
3091 ifp->if_stop = wm_stop;
3092 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3093 IFQ_SET_READY(&ifp->if_snd);
3094
3095 /* Check for jumbo frame */
3096 switch (sc->sc_type) {
3097 case WM_T_82573:
3098 /* XXX limited to 9234 if ASPM is disabled */
3099 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3100 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3101 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3102 break;
3103 case WM_T_82571:
3104 case WM_T_82572:
3105 case WM_T_82574:
3106 case WM_T_82583:
3107 case WM_T_82575:
3108 case WM_T_82576:
3109 case WM_T_82580:
3110 case WM_T_I350:
3111 case WM_T_I354:
3112 case WM_T_I210:
3113 case WM_T_I211:
3114 case WM_T_80003:
3115 case WM_T_ICH9:
3116 case WM_T_ICH10:
3117 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3118 case WM_T_PCH_LPT:
3119 case WM_T_PCH_SPT:
3120 case WM_T_PCH_CNP:
3121 /* XXX limited to 9234 */
3122 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3123 break;
3124 case WM_T_PCH:
3125 /* XXX limited to 4096 */
3126 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3127 break;
3128 case WM_T_82542_2_0:
3129 case WM_T_82542_2_1:
3130 case WM_T_ICH8:
3131 /* No support for jumbo frame */
3132 break;
3133 default:
3134 /* ETHER_MAX_LEN_JUMBO */
3135 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3136 break;
3137 }
3138
3139 /* If we're a i82543 or greater, we can support VLANs. */
3140 if (sc->sc_type >= WM_T_82543) {
3141 sc->sc_ethercom.ec_capabilities |=
3142 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3143 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3144 }
3145
3146 if ((sc->sc_flags & WM_F_EEE) != 0)
3147 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3148
3149 /*
3150 * We can perform TCPv4 and UDPv4 checksums in-bound. Only
3151 * on i82543 and later.
3152 */
3153 if (sc->sc_type >= WM_T_82543) {
3154 ifp->if_capabilities |=
3155 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3156 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3157 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3158 IFCAP_CSUM_TCPv6_Tx |
3159 IFCAP_CSUM_UDPv6_Tx;
3160 }
3161
3162 /*
3163 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3164 *
3165 * 82541GI (8086:1076) ... no
3166 * 82572EI (8086:10b9) ... yes
3167 */
3168 if (sc->sc_type >= WM_T_82571) {
3169 ifp->if_capabilities |=
3170 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3171 }
3172
3173 /*
3174 * If we're a i82544 or greater (except i82547), we can do
3175 * TCP segmentation offload.
3176 */
3177 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3178 ifp->if_capabilities |= IFCAP_TSOv4;
3179
3180 if (sc->sc_type >= WM_T_82571)
3181 ifp->if_capabilities |= IFCAP_TSOv6;
3182
3183 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3184 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3185 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3186 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3187
3188 /* Attach the interface. */
3189 if_initialize(ifp);
3190 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3191 ether_ifattach(ifp, enaddr);
3192 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3193 if_register(ifp);
3194 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3195 RND_FLAG_DEFAULT);
3196
3197 #ifdef WM_EVENT_COUNTERS
3198 /* Attach event counters. */
3199 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3200 NULL, xname, "linkintr");
3201
3202 if (sc->sc_type >= WM_T_82542_2_1) {
3203 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3204 NULL, xname, "tx_xoff");
3205 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3206 NULL, xname, "tx_xon");
3207 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3208 NULL, xname, "rx_xoff");
3209 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3210 NULL, xname, "rx_xon");
3211 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3212 NULL, xname, "rx_macctl");
3213 }
3214
3215 evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3216 NULL, xname, "CRC Error");
3217 evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3218 NULL, xname, "Symbol Error");
3219
3220 if (sc->sc_type >= WM_T_82543) {
3221 evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3222 NULL, xname, "Alignment Error");
3223 evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3224 NULL, xname, "Receive Error");
3225 evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
3226 NULL, xname, "Carrier Extension Error");
3227 }
3228
3229 evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3230 NULL, xname, "Missed Packets");
3231 evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3232 NULL, xname, "Collision");
3233 evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3234 NULL, xname, "Sequence Error");
3235 evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3236 NULL, xname, "Receive Length Error");
3237 evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3238 NULL, xname, "Single Collision");
3239 evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3240 NULL, xname, "Excessive Collisions");
3241 evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3242 NULL, xname, "Multiple Collision");
3243 evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3244 NULL, xname, "Late Collisions");
3245 evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3246 NULL, xname, "Defer");
3247 evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3248 NULL, xname, "Good Packets Rx");
3249 evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3250 NULL, xname, "Broadcast Packets Rx");
3251 evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3252 NULL, xname, "Multicast Packets Rx");
3253 evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3254 NULL, xname, "Good Packets Tx");
3255 evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3256 NULL, xname, "Good Octets Rx");
3257 evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3258 NULL, xname, "Good Octets Tx");
3259 evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3260 NULL, xname, "Rx No Buffers");
3261 evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3262 NULL, xname, "Rx Undersize");
3263 evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3264 NULL, xname, "Rx Fragment");
3265 evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3266 NULL, xname, "Rx Oversize");
3267 evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3268 NULL, xname, "Rx Jabber");
3269 evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3270 NULL, xname, "Total Octets Rx");
3271 evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3272 NULL, xname, "Total Octets Tx");
3273 evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3274 NULL, xname, "Total Packets Rx");
3275 evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3276 NULL, xname, "Total Packets Tx");
3277 evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3278 NULL, xname, "Multicast Packets Tx");
3279 evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3280 NULL, xname, "Broadcast Packets Tx Count");
3281 evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3282 NULL, xname, "Packets Rx (64 bytes)");
3283 evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3284 NULL, xname, "Packets Rx (65-127 bytes)");
3285 evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3286 NULL, xname, "Packets Rx (128-255 bytes)");
3287 evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3288 NULL, xname, "Packets Rx (255-511 bytes)");
3289 evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3290 NULL, xname, "Packets Rx (512-1023 bytes)");
3291 evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3292 NULL, xname, "Packets Rx (1024-1522 bytes)");
3293 evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3294 NULL, xname, "Packets Tx (64 bytes)");
3295 evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3296 NULL, xname, "Packets Tx (65-127 bytes)");
3297 evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3298 NULL, xname, "Packets Tx (128-255 bytes)");
3299 evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3300 NULL, xname, "Packets Tx (256-511 bytes)");
3301 evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3302 NULL, xname, "Packets Tx (512-1023 bytes)");
3303 evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3304 NULL, xname, "Packets Tx (1024-1522 Bytes)");
3305 evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3306 NULL, xname, "Interrupt Assertion");
3307 evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3308 NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3309 evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3310 NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3311 evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3312 NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3313 evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
3314 NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3315 evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3316 NULL, xname, "Intr. Cause Tx Queue Empty");
3317 evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3318 NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3319 evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
3320 NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3321 evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3322 NULL, xname, "Interrupt Cause Receiver Overrun");
3323 if (sc->sc_type >= WM_T_82543) {
3324 evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3325 NULL, xname, "Tx with No CRS");
3326 evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3327 NULL, xname, "TCP Segmentation Context Tx");
3328 evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
3329 NULL, xname, "TCP Segmentation Context Tx Fail");
3330 }
3331 if (sc->sc_type >= WM_T_82540) {
3332 evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3333 NULL, xname, "Management Packets RX");
3334 evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3335 NULL, xname, "Management Packets Dropped");
3336 evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3337 NULL, xname, "Management Packets TX");
3338 }
3339 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3340 evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3341 NULL, xname, "BMC2OS Packets received by host");
3342 evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3343 NULL, xname, "OS2BMC Packets transmitted by host");
3344 evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3345 NULL, xname, "BMC2OS Packets sent by BMC");
3346 evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3347 NULL, xname, "OS2BMC Packets received by BMC");
3348 }
3349 #endif /* WM_EVENT_COUNTERS */
3350
3351 sc->sc_txrx_use_workqueue = false;
3352
3353 if (wm_phy_need_linkdown_discard(sc)) {
3354 DPRINTF(sc, WM_DEBUG_LINK,
3355 ("%s: %s: Set linkdown discard flag\n",
3356 device_xname(sc->sc_dev), __func__));
3357 wm_set_linkdown_discard(sc);
3358 }
3359
3360 wm_init_sysctls(sc);
3361
3362 if (pmf_device_register(self, wm_suspend, wm_resume))
3363 pmf_class_network_register(self, ifp);
3364 else
3365 aprint_error_dev(self, "couldn't establish power handler\n");
3366
3367 sc->sc_flags |= WM_F_ATTACHED;
3368 out:
3369 return;
3370 }
3371
3372 /* The detach function (ca_detach) */
3373 static int
3374 wm_detach(device_t self, int flags __unused)
3375 {
3376 struct wm_softc *sc = device_private(self);
3377 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3378 int i;
3379
3380 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3381 return 0;
3382
3383 /* Stop the interface. Callouts are stopped in it. */
3384 IFNET_LOCK(ifp);
3385 sc->sc_dying = true;
3386 wm_stop(ifp, 1);
3387 IFNET_UNLOCK(ifp);
3388
3389 pmf_device_deregister(self);
3390
3391 sysctl_teardown(&sc->sc_sysctllog);
3392
3393 #ifdef WM_EVENT_COUNTERS
3394 evcnt_detach(&sc->sc_ev_linkintr);
3395
3396 if (sc->sc_type >= WM_T_82542_2_1) {
3397 evcnt_detach(&sc->sc_ev_tx_xoff);
3398 evcnt_detach(&sc->sc_ev_tx_xon);
3399 evcnt_detach(&sc->sc_ev_rx_xoff);
3400 evcnt_detach(&sc->sc_ev_rx_xon);
3401 evcnt_detach(&sc->sc_ev_rx_macctl);
3402 }
3403
3404 evcnt_detach(&sc->sc_ev_crcerrs);
3405 evcnt_detach(&sc->sc_ev_symerrc);
3406
3407 if (sc->sc_type >= WM_T_82543) {
3408 evcnt_detach(&sc->sc_ev_algnerrc);
3409 evcnt_detach(&sc->sc_ev_rxerrc);
3410 evcnt_detach(&sc->sc_ev_cexterr);
3411 }
3412 evcnt_detach(&sc->sc_ev_mpc);
3413 evcnt_detach(&sc->sc_ev_colc);
3414 evcnt_detach(&sc->sc_ev_sec);
3415 evcnt_detach(&sc->sc_ev_rlec);
3416 evcnt_detach(&sc->sc_ev_scc);
3417 evcnt_detach(&sc->sc_ev_ecol);
3418 evcnt_detach(&sc->sc_ev_mcc);
3419 evcnt_detach(&sc->sc_ev_latecol);
3420 evcnt_detach(&sc->sc_ev_dc);
3421 evcnt_detach(&sc->sc_ev_gprc);
3422 evcnt_detach(&sc->sc_ev_bprc);
3423 evcnt_detach(&sc->sc_ev_mprc);
3424 evcnt_detach(&sc->sc_ev_gptc);
3425 evcnt_detach(&sc->sc_ev_gorc);
3426 evcnt_detach(&sc->sc_ev_gotc);
3427 evcnt_detach(&sc->sc_ev_rnbc);
3428 evcnt_detach(&sc->sc_ev_ruc);
3429 evcnt_detach(&sc->sc_ev_rfc);
3430 evcnt_detach(&sc->sc_ev_roc);
3431 evcnt_detach(&sc->sc_ev_rjc);
3432 evcnt_detach(&sc->sc_ev_tor);
3433 evcnt_detach(&sc->sc_ev_tot);
3434 evcnt_detach(&sc->sc_ev_tpr);
3435 evcnt_detach(&sc->sc_ev_tpt);
3436 evcnt_detach(&sc->sc_ev_mptc);
3437 evcnt_detach(&sc->sc_ev_bptc);
3438 evcnt_detach(&sc->sc_ev_prc64);
3439 evcnt_detach(&sc->sc_ev_prc127);
3440 evcnt_detach(&sc->sc_ev_prc255);
3441 evcnt_detach(&sc->sc_ev_prc511);
3442 evcnt_detach(&sc->sc_ev_prc1023);
3443 evcnt_detach(&sc->sc_ev_prc1522);
3444 evcnt_detach(&sc->sc_ev_ptc64);
3445 evcnt_detach(&sc->sc_ev_ptc127);
3446 evcnt_detach(&sc->sc_ev_ptc255);
3447 evcnt_detach(&sc->sc_ev_ptc511);
3448 evcnt_detach(&sc->sc_ev_ptc1023);
3449 evcnt_detach(&sc->sc_ev_ptc1522);
3450 evcnt_detach(&sc->sc_ev_iac);
3451 evcnt_detach(&sc->sc_ev_icrxptc);
3452 evcnt_detach(&sc->sc_ev_icrxatc);
3453 evcnt_detach(&sc->sc_ev_ictxptc);
3454 evcnt_detach(&sc->sc_ev_ictxact);
3455 evcnt_detach(&sc->sc_ev_ictxqec);
3456 evcnt_detach(&sc->sc_ev_ictxqmtc);
3457 evcnt_detach(&sc->sc_ev_icrxdmtc);
3458 evcnt_detach(&sc->sc_ev_icrxoc);
3459 if (sc->sc_type >= WM_T_82543) {
3460 evcnt_detach(&sc->sc_ev_tncrs);
3461 evcnt_detach(&sc->sc_ev_tsctc);
3462 evcnt_detach(&sc->sc_ev_tsctfc);
3463 }
3464 if (sc->sc_type >= WM_T_82540) {
3465 evcnt_detach(&sc->sc_ev_mgtprc);
3466 evcnt_detach(&sc->sc_ev_mgtpdc);
3467 evcnt_detach(&sc->sc_ev_mgtptc);
3468 }
3469 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3470 evcnt_detach(&sc->sc_ev_b2ogprc);
3471 evcnt_detach(&sc->sc_ev_o2bspc);
3472 evcnt_detach(&sc->sc_ev_b2ospc);
3473 evcnt_detach(&sc->sc_ev_o2bgptc);
3474 }
3475 #endif /* WM_EVENT_COUNTERS */
3476
3477 rnd_detach_source(&sc->rnd_source);
3478
3479 /* Tell the firmware about the release */
3480 mutex_enter(sc->sc_core_lock);
3481 wm_release_manageability(sc);
3482 wm_release_hw_control(sc);
3483 wm_enable_wakeup(sc);
3484 mutex_exit(sc->sc_core_lock);
3485
3486 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3487
3488 ether_ifdetach(ifp);
3489 if_detach(ifp);
3490 if_percpuq_destroy(sc->sc_ipq);
3491
3492 /* Delete all remaining media. */
3493 ifmedia_fini(&sc->sc_mii.mii_media);
3494
3495 /* Unload RX dmamaps and free mbufs */
3496 for (i = 0; i < sc->sc_nqueues; i++) {
3497 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3498 mutex_enter(rxq->rxq_lock);
3499 wm_rxdrain(rxq);
3500 mutex_exit(rxq->rxq_lock);
3501 }
3502 /* Must unlock here */
3503
3504 /* Disestablish the interrupt handler */
3505 for (i = 0; i < sc->sc_nintrs; i++) {
3506 if (sc->sc_ihs[i] != NULL) {
3507 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3508 sc->sc_ihs[i] = NULL;
3509 }
3510 }
3511 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3512
3513 /* wm_stop() ensured that the workqueues are stopped. */
3514 workqueue_destroy(sc->sc_queue_wq);
3515 workqueue_destroy(sc->sc_reset_wq);
3516
3517 for (i = 0; i < sc->sc_nqueues; i++)
3518 softint_disestablish(sc->sc_queue[i].wmq_si);
3519
3520 wm_free_txrx_queues(sc);
3521
3522 /* Unmap the registers */
3523 if (sc->sc_ss) {
3524 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3525 sc->sc_ss = 0;
3526 }
3527 if (sc->sc_ios) {
3528 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3529 sc->sc_ios = 0;
3530 }
3531 if (sc->sc_flashs) {
3532 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3533 sc->sc_flashs = 0;
3534 }
3535
3536 if (sc->sc_core_lock)
3537 mutex_obj_free(sc->sc_core_lock);
3538 if (sc->sc_ich_phymtx)
3539 mutex_obj_free(sc->sc_ich_phymtx);
3540 if (sc->sc_ich_nvmmtx)
3541 mutex_obj_free(sc->sc_ich_nvmmtx);
3542
3543 return 0;
3544 }
3545
3546 static bool
3547 wm_suspend(device_t self, const pmf_qual_t *qual)
3548 {
3549 struct wm_softc *sc = device_private(self);
3550
3551 wm_release_manageability(sc);
3552 wm_release_hw_control(sc);
3553 wm_enable_wakeup(sc);
3554
3555 return true;
3556 }
3557
3558 static bool
3559 wm_resume(device_t self, const pmf_qual_t *qual)
3560 {
3561 struct wm_softc *sc = device_private(self);
3562 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3563 pcireg_t reg;
3564 char buf[256];
3565
3566 reg = CSR_READ(sc, WMREG_WUS);
3567 if (reg != 0) {
3568 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3569 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3570 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3571 }
3572
3573 if (sc->sc_type >= WM_T_PCH2)
3574 wm_resume_workarounds_pchlan(sc);
3575 IFNET_LOCK(ifp);
3576 if ((ifp->if_flags & IFF_UP) == 0) {
3577 /* >= PCH_SPT hardware workaround before reset. */
3578 if (sc->sc_type >= WM_T_PCH_SPT)
3579 wm_flush_desc_rings(sc);
3580
3581 wm_reset(sc);
3582 /* Non-AMT based hardware can now take control from firmware */
3583 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3584 wm_get_hw_control(sc);
3585 wm_init_manageability(sc);
3586 } else {
3587 /*
3588 * We called pmf_class_network_register(), so if_init() is
3589 * automatically called when IFF_UP. wm_reset(),
3590 * wm_get_hw_control() and wm_init_manageability() are called
3591 * via wm_init().
3592 */
3593 }
3594 IFNET_UNLOCK(ifp);
3595
3596 return true;
3597 }
3598
3599 /*
3600 * wm_watchdog:
3601 *
3602 * Watchdog checker.
3603 */
3604 static bool
3605 wm_watchdog(struct ifnet *ifp)
3606 {
3607 int qid;
3608 struct wm_softc *sc = ifp->if_softc;
3609 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3610
3611 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3612 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3613
3614 wm_watchdog_txq(ifp, txq, &hang_queue);
3615 }
3616
3617 #ifdef WM_DEBUG
3618 if (sc->sc_trigger_reset) {
3619 /* debug operation, no need for atomicity or reliability */
3620 sc->sc_trigger_reset = 0;
3621 hang_queue++;
3622 }
3623 #endif
3624
3625 if (hang_queue == 0)
3626 return true;
3627
3628 if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3629 workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3630
3631 return false;
3632 }
3633
3634 /*
3635 * Perform an interface watchdog reset.
3636 */
3637 static void
3638 wm_handle_reset_work(struct work *work, void *arg)
3639 {
3640 struct wm_softc * const sc = arg;
3641 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3642
3643 /* Don't want ioctl operations to happen */
3644 IFNET_LOCK(ifp);
3645
3646 /* reset the interface. */
3647 wm_init(ifp);
3648
3649 IFNET_UNLOCK(ifp);
3650
3651 /*
3652 * There are still some upper layer processing which call
3653 * ifp->if_start(). e.g. ALTQ or one CPU system
3654 */
3655 /* Try to get more packets going. */
3656 ifp->if_start(ifp);
3657
3658 atomic_store_relaxed(&sc->sc_reset_pending, 0);
3659 }
3660
3661
3662 static void
3663 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3664 {
3665
3666 mutex_enter(txq->txq_lock);
3667 if (txq->txq_sending &&
3668 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3669 wm_watchdog_txq_locked(ifp, txq, hang);
3670
3671 mutex_exit(txq->txq_lock);
3672 }
3673
3674 static void
3675 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3676 uint16_t *hang)
3677 {
3678 struct wm_softc *sc = ifp->if_softc;
3679 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3680
3681 KASSERT(mutex_owned(txq->txq_lock));
3682
3683 /*
3684 * Since we're using delayed interrupts, sweep up
3685 * before we report an error.
3686 */
3687 wm_txeof(txq, UINT_MAX);
3688
3689 if (txq->txq_sending)
3690 *hang |= __BIT(wmq->wmq_id);
3691
3692 if (txq->txq_free == WM_NTXDESC(txq)) {
3693 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3694 device_xname(sc->sc_dev));
3695 } else {
3696 #ifdef WM_DEBUG
3697 int i, j;
3698 struct wm_txsoft *txs;
3699 #endif
3700 log(LOG_ERR,
3701 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3702 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3703 txq->txq_next);
3704 if_statinc(ifp, if_oerrors);
3705 #ifdef WM_DEBUG
3706 for (i = txq->txq_sdirty; i != txq->txq_snext;
3707 i = WM_NEXTTXS(txq, i)) {
3708 txs = &txq->txq_soft[i];
3709 printf("txs %d tx %d -> %d\n",
3710 i, txs->txs_firstdesc, txs->txs_lastdesc);
3711 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3712 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3713 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3714 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3715 printf("\t %#08x%08x\n",
3716 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3717 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3718 } else {
3719 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3720 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3721 txq->txq_descs[j].wtx_addr.wa_low);
3722 printf("\t %#04x%02x%02x%08x\n",
3723 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3724 txq->txq_descs[j].wtx_fields.wtxu_options,
3725 txq->txq_descs[j].wtx_fields.wtxu_status,
3726 txq->txq_descs[j].wtx_cmdlen);
3727 }
3728 if (j == txs->txs_lastdesc)
3729 break;
3730 }
3731 }
3732 #endif
3733 }
3734 }
3735
3736 /*
3737 * wm_tick:
3738 *
3739 * One second timer, used to check link status, sweep up
3740 * completed transmit jobs, etc.
3741 */
3742 static void
3743 wm_tick(void *arg)
3744 {
3745 struct wm_softc *sc = arg;
3746 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3747 uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc,
3748 cexterr;
3749
3750 mutex_enter(sc->sc_core_lock);
3751
3752 if (sc->sc_core_stopping) {
3753 mutex_exit(sc->sc_core_lock);
3754 return;
3755 }
3756
3757 crcerrs = CSR_READ(sc, WMREG_CRCERRS);
3758 symerrc = CSR_READ(sc, WMREG_SYMERRC);
3759 mpc = CSR_READ(sc, WMREG_MPC);
3760 colc = CSR_READ(sc, WMREG_COLC);
3761 sec = CSR_READ(sc, WMREG_SEC);
3762 rlec = CSR_READ(sc, WMREG_RLEC);
3763
3764 WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
3765 WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
3766 WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
3767 WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
3768 WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
3769 WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
3770
3771 if (sc->sc_type >= WM_T_82542_2_1) {
3772 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3773 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3774 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3775 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3776 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3777 }
3778 WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
3779 WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
3780 WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
3781 WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
3782 WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
3783 WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
3784 WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
3785 WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
3786 WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
3787
3788 WM_EVCNT_ADD(&sc->sc_ev_gorc,
3789 CSR_READ(sc, WMREG_GORCL) +
3790 ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
3791 WM_EVCNT_ADD(&sc->sc_ev_gotc,
3792 CSR_READ(sc, WMREG_GOTCL) +
3793 ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
3794
3795 WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
3796 WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
3797 WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
3798 WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
3799 WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
3800
3801 /*
3802 * The TOR(L) register includes:
3803 * - Error
3804 * - Flow control
3805 * - Broadcast rejected (This note is described in 82574 and newer
3806 * datasheets. What does "broadcast rejected" mean?)
3807 */
3808 WM_EVCNT_ADD(&sc->sc_ev_tor,
3809 CSR_READ(sc, WMREG_TORL) +
3810 ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
3811 WM_EVCNT_ADD(&sc->sc_ev_tot,
3812 CSR_READ(sc, WMREG_TOTL) +
3813 ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
3814
3815 WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
3816 WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
3817 WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
3818 WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
3819 WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
3820 WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
3821 WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
3822 WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
3823 WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
3824 WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
3825 WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
3826 WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
3827 WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
3828 WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
3829 WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
3830 WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
3831 WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
3832 WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
3833 WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
3834 WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
3835 WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
3836 WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
3837 WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
3838 WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
3839 WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
3840
3841 if (sc->sc_type >= WM_T_82543) {
3842 algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
3843 rxerrc = CSR_READ(sc, WMREG_RXERRC);
3844 cexterr = CSR_READ(sc, WMREG_CEXTERR);
3845 WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
3846 WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
3847 WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
3848
3849 WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
3850 WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
3851 WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
3852 } else
3853 algnerrc = rxerrc = cexterr = 0;
3854
3855 if (sc->sc_type >= WM_T_82540) {
3856 WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
3857 WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
3858 WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
3859 }
3860 if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
3861 && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
3862 WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
3863 WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
3864 WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
3865 WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
3866 }
3867 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3868 if_statadd_ref(nsr, if_collisions, colc);
3869 if_statadd_ref(nsr, if_ierrors,
3870 crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
3871 /*
3872 * WMREG_RNBC is incremented when there are no available buffers in
3873 * host memory. It does not mean the number of dropped packets, because
3874 * an Ethernet controller can receive packets in such case if there is
3875 * space in the phy's FIFO.
3876 *
3877 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3878 * own EVCNT instead of if_iqdrops.
3879 */
3880 if_statadd_ref(nsr, if_iqdrops, mpc);
3881 IF_STAT_PUTREF(ifp);
3882
3883 if (sc->sc_flags & WM_F_HAS_MII)
3884 mii_tick(&sc->sc_mii);
3885 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3886 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3887 wm_serdes_tick(sc);
3888 else
3889 wm_tbi_tick(sc);
3890
3891 mutex_exit(sc->sc_core_lock);
3892
3893 if (wm_watchdog(ifp))
3894 callout_schedule(&sc->sc_tick_ch, hz);
3895 }
3896
3897 static int
3898 wm_ifflags_cb(struct ethercom *ec)
3899 {
3900 struct ifnet *ifp = &ec->ec_if;
3901 struct wm_softc *sc = ifp->if_softc;
3902 u_short iffchange;
3903 int ecchange;
3904 bool needreset = false;
3905 int rc = 0;
3906
3907 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3908 device_xname(sc->sc_dev), __func__));
3909
3910 KASSERT(IFNET_LOCKED(ifp));
3911
3912 mutex_enter(sc->sc_core_lock);
3913
3914 /*
3915 * Check for if_flags.
3916 * Main usage is to prevent linkdown when opening bpf.
3917 */
3918 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3919 sc->sc_if_flags = ifp->if_flags;
3920 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3921 needreset = true;
3922 goto ec;
3923 }
3924
3925 /* iff related updates */
3926 if ((iffchange & IFF_PROMISC) != 0)
3927 wm_set_filter(sc);
3928
3929 wm_set_vlan(sc);
3930
3931 ec:
3932 /* Check for ec_capenable. */
3933 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3934 sc->sc_ec_capenable = ec->ec_capenable;
3935 if ((ecchange & ~ETHERCAP_EEE) != 0) {
3936 needreset = true;
3937 goto out;
3938 }
3939
3940 /* ec related updates */
3941 wm_set_eee(sc);
3942
3943 out:
3944 if (needreset)
3945 rc = ENETRESET;
3946 mutex_exit(sc->sc_core_lock);
3947
3948 return rc;
3949 }
3950
3951 static bool
3952 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3953 {
3954
3955 switch (sc->sc_phytype) {
3956 case WMPHY_82577: /* ihphy */
3957 case WMPHY_82578: /* atphy */
3958 case WMPHY_82579: /* ihphy */
3959 case WMPHY_I217: /* ihphy */
3960 case WMPHY_82580: /* ihphy */
3961 case WMPHY_I350: /* ihphy */
3962 return true;
3963 default:
3964 return false;
3965 }
3966 }
3967
3968 static void
3969 wm_set_linkdown_discard(struct wm_softc *sc)
3970 {
3971
3972 for (int i = 0; i < sc->sc_nqueues; i++) {
3973 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3974
3975 mutex_enter(txq->txq_lock);
3976 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3977 mutex_exit(txq->txq_lock);
3978 }
3979 }
3980
3981 static void
3982 wm_clear_linkdown_discard(struct wm_softc *sc)
3983 {
3984
3985 for (int i = 0; i < sc->sc_nqueues; i++) {
3986 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3987
3988 mutex_enter(txq->txq_lock);
3989 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3990 mutex_exit(txq->txq_lock);
3991 }
3992 }
3993
3994 /*
3995 * wm_ioctl: [ifnet interface function]
3996 *
3997 * Handle control requests from the operator.
3998 */
3999 static int
4000 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4001 {
4002 struct wm_softc *sc = ifp->if_softc;
4003 struct ifreq *ifr = (struct ifreq *)data;
4004 struct ifaddr *ifa = (struct ifaddr *)data;
4005 struct sockaddr_dl *sdl;
4006 int error;
4007
4008 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4009 device_xname(sc->sc_dev), __func__));
4010
4011 switch (cmd) {
4012 case SIOCADDMULTI:
4013 case SIOCDELMULTI:
4014 break;
4015 default:
4016 KASSERT(IFNET_LOCKED(ifp));
4017 }
4018
4019 switch (cmd) {
4020 case SIOCSIFMEDIA:
4021 mutex_enter(sc->sc_core_lock);
4022 /* Flow control requires full-duplex mode. */
4023 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4024 (ifr->ifr_media & IFM_FDX) == 0)
4025 ifr->ifr_media &= ~IFM_ETH_FMASK;
4026 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4027 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4028 /* We can do both TXPAUSE and RXPAUSE. */
4029 ifr->ifr_media |=
4030 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4031 }
4032 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4033 }
4034 mutex_exit(sc->sc_core_lock);
4035 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4036 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4037 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4038 DPRINTF(sc, WM_DEBUG_LINK,
4039 ("%s: %s: Set linkdown discard flag\n",
4040 device_xname(sc->sc_dev), __func__));
4041 wm_set_linkdown_discard(sc);
4042 }
4043 }
4044 break;
4045 case SIOCINITIFADDR:
4046 mutex_enter(sc->sc_core_lock);
4047 if (ifa->ifa_addr->sa_family == AF_LINK) {
4048 sdl = satosdl(ifp->if_dl->ifa_addr);
4049 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4050 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4051 /* Unicast address is the first multicast entry */
4052 wm_set_filter(sc);
4053 error = 0;
4054 mutex_exit(sc->sc_core_lock);
4055 break;
4056 }
4057 mutex_exit(sc->sc_core_lock);
4058 /*FALLTHROUGH*/
4059 default:
4060 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4061 if (((ifp->if_flags & IFF_UP) != 0) &&
4062 ((ifr->ifr_flags & IFF_UP) == 0)) {
4063 DPRINTF(sc, WM_DEBUG_LINK,
4064 ("%s: %s: Set linkdown discard flag\n",
4065 device_xname(sc->sc_dev), __func__));
4066 wm_set_linkdown_discard(sc);
4067 }
4068 }
4069 const int s = splnet();
4070 /* It may call wm_start, so unlock here */
4071 error = ether_ioctl(ifp, cmd, data);
4072 splx(s);
4073 if (error != ENETRESET)
4074 break;
4075
4076 error = 0;
4077
4078 if (cmd == SIOCSIFCAP)
4079 error = if_init(ifp);
4080 else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4081 mutex_enter(sc->sc_core_lock);
4082 if (sc->sc_if_flags & IFF_RUNNING) {
4083 /*
4084 * Multicast list has changed; set the
4085 * hardware filter accordingly.
4086 */
4087 wm_set_filter(sc);
4088 }
4089 mutex_exit(sc->sc_core_lock);
4090 }
4091 break;
4092 }
4093
4094 return error;
4095 }
4096
4097 /* MAC address related */
4098
4099 /*
4100 * Get the offset of MAC address and return it.
4101 * If error occured, use offset 0.
4102 */
4103 static uint16_t
4104 wm_check_alt_mac_addr(struct wm_softc *sc)
4105 {
4106 uint16_t myea[ETHER_ADDR_LEN / 2];
4107 uint16_t offset = NVM_OFF_MACADDR;
4108
4109 /* Try to read alternative MAC address pointer */
4110 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4111 return 0;
4112
4113 /* Check pointer if it's valid or not. */
4114 if ((offset == 0x0000) || (offset == 0xffff))
4115 return 0;
4116
4117 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4118 /*
4119 * Check whether alternative MAC address is valid or not.
4120 * Some cards have non 0xffff pointer but those don't use
4121 * alternative MAC address in reality.
4122 *
4123 * Check whether the broadcast bit is set or not.
4124 */
4125 if (wm_nvm_read(sc, offset, 1, myea) == 0)
4126 if (((myea[0] & 0xff) & 0x01) == 0)
4127 return offset; /* Found */
4128
4129 /* Not found */
4130 return 0;
4131 }
4132
4133 static int
4134 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4135 {
4136 uint16_t myea[ETHER_ADDR_LEN / 2];
4137 uint16_t offset = NVM_OFF_MACADDR;
4138 int do_invert = 0;
4139
4140 switch (sc->sc_type) {
4141 case WM_T_82580:
4142 case WM_T_I350:
4143 case WM_T_I354:
4144 /* EEPROM Top Level Partitioning */
4145 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4146 break;
4147 case WM_T_82571:
4148 case WM_T_82575:
4149 case WM_T_82576:
4150 case WM_T_80003:
4151 case WM_T_I210:
4152 case WM_T_I211:
4153 offset = wm_check_alt_mac_addr(sc);
4154 if (offset == 0)
4155 if ((sc->sc_funcid & 0x01) == 1)
4156 do_invert = 1;
4157 break;
4158 default:
4159 if ((sc->sc_funcid & 0x01) == 1)
4160 do_invert = 1;
4161 break;
4162 }
4163
4164 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4165 goto bad;
4166
4167 enaddr[0] = myea[0] & 0xff;
4168 enaddr[1] = myea[0] >> 8;
4169 enaddr[2] = myea[1] & 0xff;
4170 enaddr[3] = myea[1] >> 8;
4171 enaddr[4] = myea[2] & 0xff;
4172 enaddr[5] = myea[2] >> 8;
4173
4174 /*
4175 * Toggle the LSB of the MAC address on the second port
4176 * of some dual port cards.
4177 */
4178 if (do_invert != 0)
4179 enaddr[5] ^= 1;
4180
4181 return 0;
4182
4183 bad:
4184 return -1;
4185 }
4186
4187 /*
4188 * wm_set_ral:
4189 *
4190 * Set an entery in the receive address list.
4191 */
4192 static void
4193 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4194 {
4195 uint32_t ral_lo, ral_hi, addrl, addrh;
4196 uint32_t wlock_mac;
4197 int rv;
4198
4199 if (enaddr != NULL) {
4200 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4201 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4202 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4203 ral_hi |= RAL_AV;
4204 } else {
4205 ral_lo = 0;
4206 ral_hi = 0;
4207 }
4208
4209 switch (sc->sc_type) {
4210 case WM_T_82542_2_0:
4211 case WM_T_82542_2_1:
4212 case WM_T_82543:
4213 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4214 CSR_WRITE_FLUSH(sc);
4215 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4216 CSR_WRITE_FLUSH(sc);
4217 break;
4218 case WM_T_PCH2:
4219 case WM_T_PCH_LPT:
4220 case WM_T_PCH_SPT:
4221 case WM_T_PCH_CNP:
4222 if (idx == 0) {
4223 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4224 CSR_WRITE_FLUSH(sc);
4225 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4226 CSR_WRITE_FLUSH(sc);
4227 return;
4228 }
4229 if (sc->sc_type != WM_T_PCH2) {
4230 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4231 FWSM_WLOCK_MAC);
4232 addrl = WMREG_SHRAL(idx - 1);
4233 addrh = WMREG_SHRAH(idx - 1);
4234 } else {
4235 wlock_mac = 0;
4236 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4237 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4238 }
4239
4240 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4241 rv = wm_get_swflag_ich8lan(sc);
4242 if (rv != 0)
4243 return;
4244 CSR_WRITE(sc, addrl, ral_lo);
4245 CSR_WRITE_FLUSH(sc);
4246 CSR_WRITE(sc, addrh, ral_hi);
4247 CSR_WRITE_FLUSH(sc);
4248 wm_put_swflag_ich8lan(sc);
4249 }
4250
4251 break;
4252 default:
4253 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4254 CSR_WRITE_FLUSH(sc);
4255 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4256 CSR_WRITE_FLUSH(sc);
4257 break;
4258 }
4259 }
4260
4261 /*
4262 * wm_mchash:
4263 *
4264 * Compute the hash of the multicast address for the 4096-bit
4265 * multicast filter.
4266 */
4267 static uint32_t
4268 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4269 {
4270 static const int lo_shift[4] = { 4, 3, 2, 0 };
4271 static const int hi_shift[4] = { 4, 5, 6, 8 };
4272 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4273 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4274 uint32_t hash;
4275
4276 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4277 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4278 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4279 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4280 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4281 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4282 return (hash & 0x3ff);
4283 }
4284 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4285 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4286
4287 return (hash & 0xfff);
4288 }
4289
4290 /*
4291 *
4292 *
4293 */
4294 static int
4295 wm_rar_count(struct wm_softc *sc)
4296 {
4297 int size;
4298
4299 switch (sc->sc_type) {
4300 case WM_T_ICH8:
4301 size = WM_RAL_TABSIZE_ICH8 -1;
4302 break;
4303 case WM_T_ICH9:
4304 case WM_T_ICH10:
4305 case WM_T_PCH:
4306 size = WM_RAL_TABSIZE_ICH8;
4307 break;
4308 case WM_T_PCH2:
4309 size = WM_RAL_TABSIZE_PCH2;
4310 break;
4311 case WM_T_PCH_LPT:
4312 case WM_T_PCH_SPT:
4313 case WM_T_PCH_CNP:
4314 size = WM_RAL_TABSIZE_PCH_LPT;
4315 break;
4316 case WM_T_82575:
4317 case WM_T_I210:
4318 case WM_T_I211:
4319 size = WM_RAL_TABSIZE_82575;
4320 break;
4321 case WM_T_82576:
4322 case WM_T_82580:
4323 size = WM_RAL_TABSIZE_82576;
4324 break;
4325 case WM_T_I350:
4326 case WM_T_I354:
4327 size = WM_RAL_TABSIZE_I350;
4328 break;
4329 default:
4330 size = WM_RAL_TABSIZE;
4331 }
4332
4333 return size;
4334 }
4335
4336 /*
4337 * wm_set_filter:
4338 *
4339 * Set up the receive filter.
4340 */
4341 static void
4342 wm_set_filter(struct wm_softc *sc)
4343 {
4344 struct ethercom *ec = &sc->sc_ethercom;
4345 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4346 struct ether_multi *enm;
4347 struct ether_multistep step;
4348 bus_addr_t mta_reg;
4349 uint32_t hash, reg, bit;
4350 int i, size, ralmax, rv;
4351
4352 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4353 device_xname(sc->sc_dev), __func__));
4354 KASSERT(mutex_owned(sc->sc_core_lock));
4355
4356 if (sc->sc_type >= WM_T_82544)
4357 mta_reg = WMREG_CORDOVA_MTA;
4358 else
4359 mta_reg = WMREG_MTA;
4360
4361 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4362
4363 if (sc->sc_if_flags & IFF_BROADCAST)
4364 sc->sc_rctl |= RCTL_BAM;
4365 if (sc->sc_if_flags & IFF_PROMISC) {
4366 sc->sc_rctl |= RCTL_UPE;
4367 ETHER_LOCK(ec);
4368 ec->ec_flags |= ETHER_F_ALLMULTI;
4369 ETHER_UNLOCK(ec);
4370 goto allmulti;
4371 }
4372
4373 /*
4374 * Set the station address in the first RAL slot, and
4375 * clear the remaining slots.
4376 */
4377 size = wm_rar_count(sc);
4378 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4379
4380 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
4381 || (sc->sc_type == WM_T_PCH_CNP)) {
4382 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4383 switch (i) {
4384 case 0:
4385 /* We can use all entries */
4386 ralmax = size;
4387 break;
4388 case 1:
4389 /* Only RAR[0] */
4390 ralmax = 1;
4391 break;
4392 default:
4393 /* Available SHRA + RAR[0] */
4394 ralmax = i + 1;
4395 }
4396 } else
4397 ralmax = size;
4398 for (i = 1; i < size; i++) {
4399 if (i < ralmax)
4400 wm_set_ral(sc, NULL, i);
4401 }
4402
4403 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4404 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4405 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4406 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4407 size = WM_ICH8_MC_TABSIZE;
4408 else
4409 size = WM_MC_TABSIZE;
4410 /* Clear out the multicast table. */
4411 for (i = 0; i < size; i++) {
4412 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4413 CSR_WRITE_FLUSH(sc);
4414 }
4415
4416 ETHER_LOCK(ec);
4417 ETHER_FIRST_MULTI(step, ec, enm);
4418 while (enm != NULL) {
4419 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4420 ec->ec_flags |= ETHER_F_ALLMULTI;
4421 ETHER_UNLOCK(ec);
4422 /*
4423 * We must listen to a range of multicast addresses.
4424 * For now, just accept all multicasts, rather than
4425 * trying to set only those filter bits needed to match
4426 * the range. (At this time, the only use of address
4427 * ranges is for IP multicast routing, for which the
4428 * range is big enough to require all bits set.)
4429 */
4430 goto allmulti;
4431 }
4432
4433 hash = wm_mchash(sc, enm->enm_addrlo);
4434
4435 reg = (hash >> 5);
4436 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4437 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4438 || (sc->sc_type == WM_T_PCH2)
4439 || (sc->sc_type == WM_T_PCH_LPT)
4440 || (sc->sc_type == WM_T_PCH_SPT)
4441 || (sc->sc_type == WM_T_PCH_CNP))
4442 reg &= 0x1f;
4443 else
4444 reg &= 0x7f;
4445 bit = hash & 0x1f;
4446
4447 hash = CSR_READ(sc, mta_reg + (reg << 2));
4448 hash |= 1U << bit;
4449
4450 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4451 /*
4452 * 82544 Errata 9: Certain register cannot be written
4453 * with particular alignments in PCI-X bus operation
4454 * (FCAH, MTA and VFTA).
4455 */
4456 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4457 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4458 CSR_WRITE_FLUSH(sc);
4459 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4460 CSR_WRITE_FLUSH(sc);
4461 } else {
4462 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4463 CSR_WRITE_FLUSH(sc);
4464 }
4465
4466 ETHER_NEXT_MULTI(step, enm);
4467 }
4468 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4469 ETHER_UNLOCK(ec);
4470
4471 goto setit;
4472
4473 allmulti:
4474 sc->sc_rctl |= RCTL_MPE;
4475
4476 setit:
4477 if (sc->sc_type >= WM_T_PCH2) {
4478 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4479 && (ifp->if_mtu > ETHERMTU))
4480 rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4481 else
4482 rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4483 if (rv != 0)
4484 device_printf(sc->sc_dev,
4485 "Failed to do workaround for jumbo frame.\n");
4486 }
4487
4488 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4489 }
4490
4491 /* Reset and init related */
4492
4493 static void
4494 wm_set_vlan(struct wm_softc *sc)
4495 {
4496
4497 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4498 device_xname(sc->sc_dev), __func__));
4499
4500 /* Deal with VLAN enables. */
4501 if (VLAN_ATTACHED(&sc->sc_ethercom))
4502 sc->sc_ctrl |= CTRL_VME;
4503 else
4504 sc->sc_ctrl &= ~CTRL_VME;
4505
4506 /* Write the control registers. */
4507 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4508 }
4509
4510 static void
4511 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4512 {
4513 uint32_t gcr;
4514 pcireg_t ctrl2;
4515
4516 gcr = CSR_READ(sc, WMREG_GCR);
4517
4518 /* Only take action if timeout value is defaulted to 0 */
4519 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4520 goto out;
4521
4522 if ((gcr & GCR_CAP_VER2) == 0) {
4523 gcr |= GCR_CMPL_TMOUT_10MS;
4524 goto out;
4525 }
4526
4527 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4528 sc->sc_pcixe_capoff + PCIE_DCSR2);
4529 ctrl2 |= WM_PCIE_DCSR2_16MS;
4530 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4531 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4532
4533 out:
4534 /* Disable completion timeout resend */
4535 gcr &= ~GCR_CMPL_TMOUT_RESEND;
4536
4537 CSR_WRITE(sc, WMREG_GCR, gcr);
4538 }
4539
4540 void
4541 wm_get_auto_rd_done(struct wm_softc *sc)
4542 {
4543 int i;
4544
4545 /* wait for eeprom to reload */
4546 switch (sc->sc_type) {
4547 case WM_T_82571:
4548 case WM_T_82572:
4549 case WM_T_82573:
4550 case WM_T_82574:
4551 case WM_T_82583:
4552 case WM_T_82575:
4553 case WM_T_82576:
4554 case WM_T_82580:
4555 case WM_T_I350:
4556 case WM_T_I354:
4557 case WM_T_I210:
4558 case WM_T_I211:
4559 case WM_T_80003:
4560 case WM_T_ICH8:
4561 case WM_T_ICH9:
4562 for (i = 0; i < 10; i++) {
4563 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4564 break;
4565 delay(1000);
4566 }
4567 if (i == 10) {
4568 log(LOG_ERR, "%s: auto read from eeprom failed to "
4569 "complete\n", device_xname(sc->sc_dev));
4570 }
4571 break;
4572 default:
4573 break;
4574 }
4575 }
4576
4577 void
4578 wm_lan_init_done(struct wm_softc *sc)
4579 {
4580 uint32_t reg = 0;
4581 int i;
4582
4583 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4584 device_xname(sc->sc_dev), __func__));
4585
4586 /* Wait for eeprom to reload */
4587 switch (sc->sc_type) {
4588 case WM_T_ICH10:
4589 case WM_T_PCH:
4590 case WM_T_PCH2:
4591 case WM_T_PCH_LPT:
4592 case WM_T_PCH_SPT:
4593 case WM_T_PCH_CNP:
4594 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4595 reg = CSR_READ(sc, WMREG_STATUS);
4596 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4597 break;
4598 delay(100);
4599 }
4600 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4601 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4602 "complete\n", device_xname(sc->sc_dev), __func__);
4603 }
4604 break;
4605 default:
4606 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4607 __func__);
4608 break;
4609 }
4610
4611 reg &= ~STATUS_LAN_INIT_DONE;
4612 CSR_WRITE(sc, WMREG_STATUS, reg);
4613 }
4614
4615 void
4616 wm_get_cfg_done(struct wm_softc *sc)
4617 {
4618 int mask;
4619 uint32_t reg;
4620 int i;
4621
4622 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4623 device_xname(sc->sc_dev), __func__));
4624
4625 /* Wait for eeprom to reload */
4626 switch (sc->sc_type) {
4627 case WM_T_82542_2_0:
4628 case WM_T_82542_2_1:
4629 /* null */
4630 break;
4631 case WM_T_82543:
4632 case WM_T_82544:
4633 case WM_T_82540:
4634 case WM_T_82545:
4635 case WM_T_82545_3:
4636 case WM_T_82546:
4637 case WM_T_82546_3:
4638 case WM_T_82541:
4639 case WM_T_82541_2:
4640 case WM_T_82547:
4641 case WM_T_82547_2:
4642 case WM_T_82573:
4643 case WM_T_82574:
4644 case WM_T_82583:
4645 /* generic */
4646 delay(10*1000);
4647 break;
4648 case WM_T_80003:
4649 case WM_T_82571:
4650 case WM_T_82572:
4651 case WM_T_82575:
4652 case WM_T_82576:
4653 case WM_T_82580:
4654 case WM_T_I350:
4655 case WM_T_I354:
4656 case WM_T_I210:
4657 case WM_T_I211:
4658 if (sc->sc_type == WM_T_82571) {
4659 /* Only 82571 shares port 0 */
4660 mask = EEMNGCTL_CFGDONE_0;
4661 } else
4662 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4663 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4664 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4665 break;
4666 delay(1000);
4667 }
4668 if (i >= WM_PHY_CFG_TIMEOUT)
4669 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4670 device_xname(sc->sc_dev), __func__));
4671 break;
4672 case WM_T_ICH8:
4673 case WM_T_ICH9:
4674 case WM_T_ICH10:
4675 case WM_T_PCH:
4676 case WM_T_PCH2:
4677 case WM_T_PCH_LPT:
4678 case WM_T_PCH_SPT:
4679 case WM_T_PCH_CNP:
4680 delay(10*1000);
4681 if (sc->sc_type >= WM_T_ICH10)
4682 wm_lan_init_done(sc);
4683 else
4684 wm_get_auto_rd_done(sc);
4685
4686 /* Clear PHY Reset Asserted bit */
4687 reg = CSR_READ(sc, WMREG_STATUS);
4688 if ((reg & STATUS_PHYRA) != 0)
4689 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4690 break;
4691 default:
4692 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4693 __func__);
4694 break;
4695 }
4696 }
4697
4698 int
4699 wm_phy_post_reset(struct wm_softc *sc)
4700 {
4701 device_t dev = sc->sc_dev;
4702 uint16_t reg;
4703 int rv = 0;
4704
4705 /* This function is only for ICH8 and newer. */
4706 if (sc->sc_type < WM_T_ICH8)
4707 return 0;
4708
4709 if (wm_phy_resetisblocked(sc)) {
4710 /* XXX */
4711 device_printf(dev, "PHY is blocked\n");
4712 return -1;
4713 }
4714
4715 /* Allow time for h/w to get to quiescent state after reset */
4716 delay(10*1000);
4717
4718 /* Perform any necessary post-reset workarounds */
4719 if (sc->sc_type == WM_T_PCH)
4720 rv = wm_hv_phy_workarounds_ich8lan(sc);
4721 else if (sc->sc_type == WM_T_PCH2)
4722 rv = wm_lv_phy_workarounds_ich8lan(sc);
4723 if (rv != 0)
4724 return rv;
4725
4726 /* Clear the host wakeup bit after lcd reset */
4727 if (sc->sc_type >= WM_T_PCH) {
4728 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4729 reg &= ~BM_WUC_HOST_WU_BIT;
4730 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4731 }
4732
4733 /* Configure the LCD with the extended configuration region in NVM */
4734 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4735 return rv;
4736
4737 /* Configure the LCD with the OEM bits in NVM */
4738 rv = wm_oem_bits_config_ich8lan(sc, true);
4739
4740 if (sc->sc_type == WM_T_PCH2) {
4741 /* Ungate automatic PHY configuration on non-managed 82579 */
4742 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4743 delay(10 * 1000);
4744 wm_gate_hw_phy_config_ich8lan(sc, false);
4745 }
4746 /* Set EEE LPI Update Timer to 200usec */
4747 rv = sc->phy.acquire(sc);
4748 if (rv)
4749 return rv;
4750 rv = wm_write_emi_reg_locked(dev,
4751 I82579_LPI_UPDATE_TIMER, 0x1387);
4752 sc->phy.release(sc);
4753 }
4754
4755 return rv;
4756 }
4757
4758 /* Only for PCH and newer */
4759 static int
4760 wm_write_smbus_addr(struct wm_softc *sc)
4761 {
4762 uint32_t strap, freq;
4763 uint16_t phy_data;
4764 int rv;
4765
4766 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4767 device_xname(sc->sc_dev), __func__));
4768 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4769
4770 strap = CSR_READ(sc, WMREG_STRAP);
4771 freq = __SHIFTOUT(strap, STRAP_FREQ);
4772
4773 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4774 if (rv != 0)
4775 return rv;
4776
4777 phy_data &= ~HV_SMB_ADDR_ADDR;
4778 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4779 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4780
4781 if (sc->sc_phytype == WMPHY_I217) {
4782 /* Restore SMBus frequency */
4783 if (freq --) {
4784 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4785 | HV_SMB_ADDR_FREQ_HIGH);
4786 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4787 HV_SMB_ADDR_FREQ_LOW);
4788 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4789 HV_SMB_ADDR_FREQ_HIGH);
4790 } else
4791 DPRINTF(sc, WM_DEBUG_INIT,
4792 ("%s: %s Unsupported SMB frequency in PHY\n",
4793 device_xname(sc->sc_dev), __func__));
4794 }
4795
4796 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4797 phy_data);
4798 }
4799
4800 static int
4801 wm_init_lcd_from_nvm(struct wm_softc *sc)
4802 {
4803 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4804 uint16_t phy_page = 0;
4805 int rv = 0;
4806
4807 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4808 device_xname(sc->sc_dev), __func__));
4809
4810 switch (sc->sc_type) {
4811 case WM_T_ICH8:
4812 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4813 || (sc->sc_phytype != WMPHY_IGP_3))
4814 return 0;
4815
4816 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4817 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4818 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4819 break;
4820 }
4821 /* FALLTHROUGH */
4822 case WM_T_PCH:
4823 case WM_T_PCH2:
4824 case WM_T_PCH_LPT:
4825 case WM_T_PCH_SPT:
4826 case WM_T_PCH_CNP:
4827 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4828 break;
4829 default:
4830 return 0;
4831 }
4832
4833 if ((rv = sc->phy.acquire(sc)) != 0)
4834 return rv;
4835
4836 reg = CSR_READ(sc, WMREG_FEXTNVM);
4837 if ((reg & sw_cfg_mask) == 0)
4838 goto release;
4839
4840 /*
4841 * Make sure HW does not configure LCD from PHY extended configuration
4842 * before SW configuration
4843 */
4844 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4845 if ((sc->sc_type < WM_T_PCH2)
4846 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4847 goto release;
4848
4849 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4850 device_xname(sc->sc_dev), __func__));
4851 /* word_addr is in DWORD */
4852 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4853
4854 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4855 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4856 if (cnf_size == 0)
4857 goto release;
4858
4859 if (((sc->sc_type == WM_T_PCH)
4860 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4861 || (sc->sc_type > WM_T_PCH)) {
4862 /*
4863 * HW configures the SMBus address and LEDs when the OEM and
4864 * LCD Write Enable bits are set in the NVM. When both NVM bits
4865 * are cleared, SW will configure them instead.
4866 */
4867 DPRINTF(sc, WM_DEBUG_INIT,
4868 ("%s: %s: Configure SMBus and LED\n",
4869 device_xname(sc->sc_dev), __func__));
4870 if ((rv = wm_write_smbus_addr(sc)) != 0)
4871 goto release;
4872
4873 reg = CSR_READ(sc, WMREG_LEDCTL);
4874 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4875 (uint16_t)reg);
4876 if (rv != 0)
4877 goto release;
4878 }
4879
4880 /* Configure LCD from extended configuration region. */
4881 for (i = 0; i < cnf_size; i++) {
4882 uint16_t reg_data, reg_addr;
4883
4884 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4885 goto release;
4886
4887 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4888 goto release;
4889
4890 if (reg_addr == IGPHY_PAGE_SELECT)
4891 phy_page = reg_data;
4892
4893 reg_addr &= IGPHY_MAXREGADDR;
4894 reg_addr |= phy_page;
4895
4896 KASSERT(sc->phy.writereg_locked != NULL);
4897 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4898 reg_data);
4899 }
4900
4901 release:
4902 sc->phy.release(sc);
4903 return rv;
4904 }
4905
4906 /*
4907 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4908 * @sc: pointer to the HW structure
4909 * @d0_state: boolean if entering d0 or d3 device state
4910 *
4911 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4912 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
4913 * in NVM determines whether HW should configure LPLU and Gbe Disable.
4914 */
4915 int
4916 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4917 {
4918 uint32_t mac_reg;
4919 uint16_t oem_reg;
4920 int rv;
4921
4922 if (sc->sc_type < WM_T_PCH)
4923 return 0;
4924
4925 rv = sc->phy.acquire(sc);
4926 if (rv != 0)
4927 return rv;
4928
4929 if (sc->sc_type == WM_T_PCH) {
4930 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4931 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4932 goto release;
4933 }
4934
4935 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4936 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4937 goto release;
4938
4939 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4940
4941 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4942 if (rv != 0)
4943 goto release;
4944 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4945
4946 if (d0_state) {
4947 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4948 oem_reg |= HV_OEM_BITS_A1KDIS;
4949 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4950 oem_reg |= HV_OEM_BITS_LPLU;
4951 } else {
4952 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4953 != 0)
4954 oem_reg |= HV_OEM_BITS_A1KDIS;
4955 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4956 != 0)
4957 oem_reg |= HV_OEM_BITS_LPLU;
4958 }
4959
4960 /* Set Restart auto-neg to activate the bits */
4961 if ((d0_state || (sc->sc_type != WM_T_PCH))
4962 && (wm_phy_resetisblocked(sc) == false))
4963 oem_reg |= HV_OEM_BITS_ANEGNOW;
4964
4965 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4966
4967 release:
4968 sc->phy.release(sc);
4969
4970 return rv;
4971 }
4972
4973 /* Init hardware bits */
4974 void
4975 wm_initialize_hardware_bits(struct wm_softc *sc)
4976 {
4977 uint32_t tarc0, tarc1, reg;
4978
4979 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4980 device_xname(sc->sc_dev), __func__));
4981
4982 /* For 82571 variant, 80003 and ICHs */
4983 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4984 || (sc->sc_type >= WM_T_80003)) {
4985
4986 /* Transmit Descriptor Control 0 */
4987 reg = CSR_READ(sc, WMREG_TXDCTL(0));
4988 reg |= TXDCTL_COUNT_DESC;
4989 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4990
4991 /* Transmit Descriptor Control 1 */
4992 reg = CSR_READ(sc, WMREG_TXDCTL(1));
4993 reg |= TXDCTL_COUNT_DESC;
4994 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4995
4996 /* TARC0 */
4997 tarc0 = CSR_READ(sc, WMREG_TARC0);
4998 switch (sc->sc_type) {
4999 case WM_T_82571:
5000 case WM_T_82572:
5001 case WM_T_82573:
5002 case WM_T_82574:
5003 case WM_T_82583:
5004 case WM_T_80003:
5005 /* Clear bits 30..27 */
5006 tarc0 &= ~__BITS(30, 27);
5007 break;
5008 default:
5009 break;
5010 }
5011
5012 switch (sc->sc_type) {
5013 case WM_T_82571:
5014 case WM_T_82572:
5015 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5016
5017 tarc1 = CSR_READ(sc, WMREG_TARC1);
5018 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5019 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5020 /* 8257[12] Errata No.7 */
5021 tarc1 |= __BIT(22); /* TARC1 bits 22 */
5022
5023 /* TARC1 bit 28 */
5024 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5025 tarc1 &= ~__BIT(28);
5026 else
5027 tarc1 |= __BIT(28);
5028 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5029
5030 /*
5031 * 8257[12] Errata No.13
5032 * Disable Dyamic Clock Gating.
5033 */
5034 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5035 reg &= ~CTRL_EXT_DMA_DYN_CLK;
5036 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5037 break;
5038 case WM_T_82573:
5039 case WM_T_82574:
5040 case WM_T_82583:
5041 if ((sc->sc_type == WM_T_82574)
5042 || (sc->sc_type == WM_T_82583))
5043 tarc0 |= __BIT(26); /* TARC0 bit 26 */
5044
5045 /* Extended Device Control */
5046 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5047 reg &= ~__BIT(23); /* Clear bit 23 */
5048 reg |= __BIT(22); /* Set bit 22 */
5049 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5050
5051 /* Device Control */
5052 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
5053 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5054
5055 /* PCIe Control Register */
5056 /*
5057 * 82573 Errata (unknown).
5058 *
5059 * 82574 Errata 25 and 82583 Errata 12
5060 * "Dropped Rx Packets":
5061 * NVM Image Version 2.1.4 and newer has no this bug.
5062 */
5063 reg = CSR_READ(sc, WMREG_GCR);
5064 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5065 CSR_WRITE(sc, WMREG_GCR, reg);
5066
5067 if ((sc->sc_type == WM_T_82574)
5068 || (sc->sc_type == WM_T_82583)) {
5069 /*
5070 * Document says this bit must be set for
5071 * proper operation.
5072 */
5073 reg = CSR_READ(sc, WMREG_GCR);
5074 reg |= __BIT(22);
5075 CSR_WRITE(sc, WMREG_GCR, reg);
5076
5077 /*
5078 * Apply workaround for hardware errata
5079 * documented in errata docs Fixes issue where
5080 * some error prone or unreliable PCIe
5081 * completions are occurring, particularly
5082 * with ASPM enabled. Without fix, issue can
5083 * cause Tx timeouts.
5084 */
5085 reg = CSR_READ(sc, WMREG_GCR2);
5086 reg |= __BIT(0);
5087 CSR_WRITE(sc, WMREG_GCR2, reg);
5088 }
5089 break;
5090 case WM_T_80003:
5091 /* TARC0 */
5092 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5093 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5094 tarc0 &= ~__BIT(20); /* Clear bits 20 */
5095
5096 /* TARC1 bit 28 */
5097 tarc1 = CSR_READ(sc, WMREG_TARC1);
5098 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5099 tarc1 &= ~__BIT(28);
5100 else
5101 tarc1 |= __BIT(28);
5102 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5103 break;
5104 case WM_T_ICH8:
5105 case WM_T_ICH9:
5106 case WM_T_ICH10:
5107 case WM_T_PCH:
5108 case WM_T_PCH2:
5109 case WM_T_PCH_LPT:
5110 case WM_T_PCH_SPT:
5111 case WM_T_PCH_CNP:
5112 /* TARC0 */
5113 if (sc->sc_type == WM_T_ICH8) {
5114 /* Set TARC0 bits 29 and 28 */
5115 tarc0 |= __BITS(29, 28);
5116 } else if (sc->sc_type == WM_T_PCH_SPT) {
5117 tarc0 |= __BIT(29);
5118 /*
5119 * Drop bit 28. From Linux.
5120 * See I218/I219 spec update
5121 * "5. Buffer Overrun While the I219 is
5122 * Processing DMA Transactions"
5123 */
5124 tarc0 &= ~__BIT(28);
5125 }
5126 /* Set TARC0 bits 23,24,26,27 */
5127 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5128
5129 /* CTRL_EXT */
5130 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5131 reg |= __BIT(22); /* Set bit 22 */
5132 /*
5133 * Enable PHY low-power state when MAC is at D3
5134 * w/o WoL
5135 */
5136 if (sc->sc_type >= WM_T_PCH)
5137 reg |= CTRL_EXT_PHYPDEN;
5138 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5139
5140 /* TARC1 */
5141 tarc1 = CSR_READ(sc, WMREG_TARC1);
5142 /* bit 28 */
5143 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5144 tarc1 &= ~__BIT(28);
5145 else
5146 tarc1 |= __BIT(28);
5147 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5148 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5149
5150 /* Device Status */
5151 if (sc->sc_type == WM_T_ICH8) {
5152 reg = CSR_READ(sc, WMREG_STATUS);
5153 reg &= ~__BIT(31);
5154 CSR_WRITE(sc, WMREG_STATUS, reg);
5155
5156 }
5157
5158 /* IOSFPC */
5159 if (sc->sc_type == WM_T_PCH_SPT) {
5160 reg = CSR_READ(sc, WMREG_IOSFPC);
5161 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5162 CSR_WRITE(sc, WMREG_IOSFPC, reg);
5163 }
5164 /*
5165 * Work-around descriptor data corruption issue during
5166 * NFS v2 UDP traffic, just disable the NFS filtering
5167 * capability.
5168 */
5169 reg = CSR_READ(sc, WMREG_RFCTL);
5170 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5171 CSR_WRITE(sc, WMREG_RFCTL, reg);
5172 break;
5173 default:
5174 break;
5175 }
5176 CSR_WRITE(sc, WMREG_TARC0, tarc0);
5177
5178 switch (sc->sc_type) {
5179 case WM_T_82571:
5180 case WM_T_82572:
5181 case WM_T_82573:
5182 case WM_T_80003:
5183 case WM_T_ICH8:
5184 /*
5185 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5186 * others to avoid RSS Hash Value bug.
5187 */
5188 reg = CSR_READ(sc, WMREG_RFCTL);
5189 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5190 CSR_WRITE(sc, WMREG_RFCTL, reg);
5191 break;
5192 case WM_T_82574:
5193 /* Use extened Rx descriptor. */
5194 reg = CSR_READ(sc, WMREG_RFCTL);
5195 reg |= WMREG_RFCTL_EXSTEN;
5196 CSR_WRITE(sc, WMREG_RFCTL, reg);
5197 break;
5198 default:
5199 break;
5200 }
5201 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5202 /*
5203 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5204 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5205 * "Certain Malformed IPv6 Extension Headers are Not Processed
5206 * Correctly by the Device"
5207 *
5208 * I354(C2000) Errata AVR53:
5209 * "Malformed IPv6 Extension Headers May Result in LAN Device
5210 * Hang"
5211 */
5212 reg = CSR_READ(sc, WMREG_RFCTL);
5213 reg |= WMREG_RFCTL_IPV6EXDIS;
5214 CSR_WRITE(sc, WMREG_RFCTL, reg);
5215 }
5216 }
5217
5218 static uint32_t
5219 wm_rxpbs_adjust_82580(uint32_t val)
5220 {
5221 uint32_t rv = 0;
5222
5223 if (val < __arraycount(wm_82580_rxpbs_table))
5224 rv = wm_82580_rxpbs_table[val];
5225
5226 return rv;
5227 }
5228
5229 /*
5230 * wm_reset_phy:
5231 *
5232 * generic PHY reset function.
5233 * Same as e1000_phy_hw_reset_generic()
5234 */
5235 static int
5236 wm_reset_phy(struct wm_softc *sc)
5237 {
5238 uint32_t reg;
5239 int rv;
5240
5241 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5242 device_xname(sc->sc_dev), __func__));
5243 if (wm_phy_resetisblocked(sc))
5244 return -1;
5245
5246 rv = sc->phy.acquire(sc);
5247 if (rv) {
5248 device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5249 __func__, rv);
5250 return rv;
5251 }
5252
5253 reg = CSR_READ(sc, WMREG_CTRL);
5254 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5255 CSR_WRITE_FLUSH(sc);
5256
5257 delay(sc->phy.reset_delay_us);
5258
5259 CSR_WRITE(sc, WMREG_CTRL, reg);
5260 CSR_WRITE_FLUSH(sc);
5261
5262 delay(150);
5263
5264 sc->phy.release(sc);
5265
5266 wm_get_cfg_done(sc);
5267 wm_phy_post_reset(sc);
5268
5269 return 0;
5270 }
5271
5272 /*
5273 * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5274 *
5275 * In i219, the descriptor rings must be emptied before resetting the HW
5276 * or before changing the device state to D3 during runtime (runtime PM).
5277 *
5278 * Failure to do this will cause the HW to enter a unit hang state which can
5279 * only be released by PCI reset on the device.
5280 *
5281 * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5282 */
5283 static void
5284 wm_flush_desc_rings(struct wm_softc *sc)
5285 {
5286 pcireg_t preg;
5287 uint32_t reg;
5288 struct wm_txqueue *txq;
5289 wiseman_txdesc_t *txd;
5290 int nexttx;
5291 uint32_t rctl;
5292
5293 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5294
5295 /* First, disable MULR fix in FEXTNVM11 */
5296 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5297 reg |= FEXTNVM11_DIS_MULRFIX;
5298 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5299
5300 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5301 reg = CSR_READ(sc, WMREG_TDLEN(0));
5302 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5303 return;
5304
5305 /*
5306 * Remove all descriptors from the tx_ring.
5307 *
5308 * We want to clear all pending descriptors from the TX ring. Zeroing
5309 * happens when the HW reads the regs. We assign the ring itself as
5310 * the data of the next descriptor. We don't care about the data we are
5311 * about to reset the HW.
5312 */
5313 #ifdef WM_DEBUG
5314 device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5315 #endif
5316 reg = CSR_READ(sc, WMREG_TCTL);
5317 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5318
5319 txq = &sc->sc_queue[0].wmq_txq;
5320 nexttx = txq->txq_next;
5321 txd = &txq->txq_descs[nexttx];
5322 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5323 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5324 txd->wtx_fields.wtxu_status = 0;
5325 txd->wtx_fields.wtxu_options = 0;
5326 txd->wtx_fields.wtxu_vlan = 0;
5327
5328 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5329 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5330
5331 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5332 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5333 CSR_WRITE_FLUSH(sc);
5334 delay(250);
5335
5336 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5337 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5338 return;
5339
5340 /*
5341 * Mark all descriptors in the RX ring as consumed and disable the
5342 * rx ring.
5343 */
5344 #ifdef WM_DEBUG
5345 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5346 #endif
5347 rctl = CSR_READ(sc, WMREG_RCTL);
5348 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5349 CSR_WRITE_FLUSH(sc);
5350 delay(150);
5351
5352 reg = CSR_READ(sc, WMREG_RXDCTL(0));
5353 /* Zero the lower 14 bits (prefetch and host thresholds) */
5354 reg &= 0xffffc000;
5355 /*
5356 * Update thresholds: prefetch threshold to 31, host threshold
5357 * to 1 and make sure the granularity is "descriptors" and not
5358 * "cache lines"
5359 */
5360 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5361 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5362
5363 /* Momentarily enable the RX ring for the changes to take effect */
5364 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5365 CSR_WRITE_FLUSH(sc);
5366 delay(150);
5367 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5368 }
5369
5370 /*
5371 * wm_reset:
5372 *
5373 * Reset the i82542 chip.
5374 */
5375 static void
5376 wm_reset(struct wm_softc *sc)
5377 {
5378 int phy_reset = 0;
5379 int i, error = 0;
5380 uint32_t reg;
5381 uint16_t kmreg;
5382 int rv;
5383
5384 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5385 device_xname(sc->sc_dev), __func__));
5386 KASSERT(sc->sc_type != 0);
5387
5388 /*
5389 * Allocate on-chip memory according to the MTU size.
5390 * The Packet Buffer Allocation register must be written
5391 * before the chip is reset.
5392 */
5393 switch (sc->sc_type) {
5394 case WM_T_82547:
5395 case WM_T_82547_2:
5396 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5397 PBA_22K : PBA_30K;
5398 for (i = 0; i < sc->sc_nqueues; i++) {
5399 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5400 txq->txq_fifo_head = 0;
5401 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5402 txq->txq_fifo_size =
5403 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5404 txq->txq_fifo_stall = 0;
5405 }
5406 break;
5407 case WM_T_82571:
5408 case WM_T_82572:
5409 case WM_T_82575: /* XXX need special handing for jumbo frames */
5410 case WM_T_80003:
5411 sc->sc_pba = PBA_32K;
5412 break;
5413 case WM_T_82573:
5414 sc->sc_pba = PBA_12K;
5415 break;
5416 case WM_T_82574:
5417 case WM_T_82583:
5418 sc->sc_pba = PBA_20K;
5419 break;
5420 case WM_T_82576:
5421 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5422 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5423 break;
5424 case WM_T_82580:
5425 case WM_T_I350:
5426 case WM_T_I354:
5427 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5428 break;
5429 case WM_T_I210:
5430 case WM_T_I211:
5431 sc->sc_pba = PBA_34K;
5432 break;
5433 case WM_T_ICH8:
5434 /* Workaround for a bit corruption issue in FIFO memory */
5435 sc->sc_pba = PBA_8K;
5436 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5437 break;
5438 case WM_T_ICH9:
5439 case WM_T_ICH10:
5440 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5441 PBA_14K : PBA_10K;
5442 break;
5443 case WM_T_PCH:
5444 case WM_T_PCH2: /* XXX 14K? */
5445 case WM_T_PCH_LPT:
5446 case WM_T_PCH_SPT:
5447 case WM_T_PCH_CNP:
5448 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5449 PBA_12K : PBA_26K;
5450 break;
5451 default:
5452 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5453 PBA_40K : PBA_48K;
5454 break;
5455 }
5456 /*
5457 * Only old or non-multiqueue devices have the PBA register
5458 * XXX Need special handling for 82575.
5459 */
5460 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5461 || (sc->sc_type == WM_T_82575))
5462 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5463
5464 /* Prevent the PCI-E bus from sticking */
5465 if (sc->sc_flags & WM_F_PCIE) {
5466 int timeout = 800;
5467
5468 sc->sc_ctrl |= CTRL_GIO_M_DIS;
5469 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5470
5471 while (timeout--) {
5472 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5473 == 0)
5474 break;
5475 delay(100);
5476 }
5477 if (timeout == 0)
5478 device_printf(sc->sc_dev,
5479 "failed to disable bus mastering\n");
5480 }
5481
5482 /* Set the completion timeout for interface */
5483 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5484 || (sc->sc_type == WM_T_82580)
5485 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5486 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5487 wm_set_pcie_completion_timeout(sc);
5488
5489 /* Clear interrupt */
5490 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5491 if (wm_is_using_msix(sc)) {
5492 if (sc->sc_type != WM_T_82574) {
5493 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5494 CSR_WRITE(sc, WMREG_EIAC, 0);
5495 } else
5496 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5497 }
5498
5499 /* Stop the transmit and receive processes. */
5500 CSR_WRITE(sc, WMREG_RCTL, 0);
5501 sc->sc_rctl &= ~RCTL_EN;
5502 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5503 CSR_WRITE_FLUSH(sc);
5504
5505 /* XXX set_tbi_sbp_82543() */
5506
5507 delay(10*1000);
5508
5509 /* Must acquire the MDIO ownership before MAC reset */
5510 switch (sc->sc_type) {
5511 case WM_T_82573:
5512 case WM_T_82574:
5513 case WM_T_82583:
5514 error = wm_get_hw_semaphore_82573(sc);
5515 break;
5516 default:
5517 break;
5518 }
5519
5520 /*
5521 * 82541 Errata 29? & 82547 Errata 28?
5522 * See also the description about PHY_RST bit in CTRL register
5523 * in 8254x_GBe_SDM.pdf.
5524 */
5525 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5526 CSR_WRITE(sc, WMREG_CTRL,
5527 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5528 CSR_WRITE_FLUSH(sc);
5529 delay(5000);
5530 }
5531
5532 switch (sc->sc_type) {
5533 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5534 case WM_T_82541:
5535 case WM_T_82541_2:
5536 case WM_T_82547:
5537 case WM_T_82547_2:
5538 /*
5539 * On some chipsets, a reset through a memory-mapped write
5540 * cycle can cause the chip to reset before completing the
5541 * write cycle. This causes major headache that can be avoided
5542 * by issuing the reset via indirect register writes through
5543 * I/O space.
5544 *
5545 * So, if we successfully mapped the I/O BAR at attach time,
5546 * use that. Otherwise, try our luck with a memory-mapped
5547 * reset.
5548 */
5549 if (sc->sc_flags & WM_F_IOH_VALID)
5550 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5551 else
5552 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5553 break;
5554 case WM_T_82545_3:
5555 case WM_T_82546_3:
5556 /* Use the shadow control register on these chips. */
5557 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5558 break;
5559 case WM_T_80003:
5560 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5561 if (sc->phy.acquire(sc) != 0)
5562 break;
5563 CSR_WRITE(sc, WMREG_CTRL, reg);
5564 sc->phy.release(sc);
5565 break;
5566 case WM_T_ICH8:
5567 case WM_T_ICH9:
5568 case WM_T_ICH10:
5569 case WM_T_PCH:
5570 case WM_T_PCH2:
5571 case WM_T_PCH_LPT:
5572 case WM_T_PCH_SPT:
5573 case WM_T_PCH_CNP:
5574 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5575 if (wm_phy_resetisblocked(sc) == false) {
5576 /*
5577 * Gate automatic PHY configuration by hardware on
5578 * non-managed 82579
5579 */
5580 if ((sc->sc_type == WM_T_PCH2)
5581 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5582 == 0))
5583 wm_gate_hw_phy_config_ich8lan(sc, true);
5584
5585 reg |= CTRL_PHY_RESET;
5586 phy_reset = 1;
5587 } else
5588 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5589 if (sc->phy.acquire(sc) != 0)
5590 break;
5591 CSR_WRITE(sc, WMREG_CTRL, reg);
5592 /* Don't insert a completion barrier when reset */
5593 delay(20*1000);
5594 /*
5595 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5596 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5597 * only. See also wm_get_swflag_ich8lan().
5598 */
5599 mutex_exit(sc->sc_ich_phymtx);
5600 break;
5601 case WM_T_82580:
5602 case WM_T_I350:
5603 case WM_T_I354:
5604 case WM_T_I210:
5605 case WM_T_I211:
5606 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5607 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5608 CSR_WRITE_FLUSH(sc);
5609 delay(5000);
5610 break;
5611 case WM_T_82542_2_0:
5612 case WM_T_82542_2_1:
5613 case WM_T_82543:
5614 case WM_T_82540:
5615 case WM_T_82545:
5616 case WM_T_82546:
5617 case WM_T_82571:
5618 case WM_T_82572:
5619 case WM_T_82573:
5620 case WM_T_82574:
5621 case WM_T_82575:
5622 case WM_T_82576:
5623 case WM_T_82583:
5624 default:
5625 /* Everything else can safely use the documented method. */
5626 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5627 break;
5628 }
5629
5630 /* Must release the MDIO ownership after MAC reset */
5631 switch (sc->sc_type) {
5632 case WM_T_82573:
5633 case WM_T_82574:
5634 case WM_T_82583:
5635 if (error == 0)
5636 wm_put_hw_semaphore_82573(sc);
5637 break;
5638 default:
5639 break;
5640 }
5641
5642 /* Set Phy Config Counter to 50msec */
5643 if (sc->sc_type == WM_T_PCH2) {
5644 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5645 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5646 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5647 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5648 }
5649
5650 if (phy_reset != 0)
5651 wm_get_cfg_done(sc);
5652
5653 /* Reload EEPROM */
5654 switch (sc->sc_type) {
5655 case WM_T_82542_2_0:
5656 case WM_T_82542_2_1:
5657 case WM_T_82543:
5658 case WM_T_82544:
5659 delay(10);
5660 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5661 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5662 CSR_WRITE_FLUSH(sc);
5663 delay(2000);
5664 break;
5665 case WM_T_82540:
5666 case WM_T_82545:
5667 case WM_T_82545_3:
5668 case WM_T_82546:
5669 case WM_T_82546_3:
5670 delay(5*1000);
5671 /* XXX Disable HW ARPs on ASF enabled adapters */
5672 break;
5673 case WM_T_82541:
5674 case WM_T_82541_2:
5675 case WM_T_82547:
5676 case WM_T_82547_2:
5677 delay(20000);
5678 /* XXX Disable HW ARPs on ASF enabled adapters */
5679 break;
5680 case WM_T_82571:
5681 case WM_T_82572:
5682 case WM_T_82573:
5683 case WM_T_82574:
5684 case WM_T_82583:
5685 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5686 delay(10);
5687 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5688 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5689 CSR_WRITE_FLUSH(sc);
5690 }
5691 /* check EECD_EE_AUTORD */
5692 wm_get_auto_rd_done(sc);
5693 /*
5694 * Phy configuration from NVM just starts after EECD_AUTO_RD
5695 * is set.
5696 */
5697 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5698 || (sc->sc_type == WM_T_82583))
5699 delay(25*1000);
5700 break;
5701 case WM_T_82575:
5702 case WM_T_82576:
5703 case WM_T_82580:
5704 case WM_T_I350:
5705 case WM_T_I354:
5706 case WM_T_I210:
5707 case WM_T_I211:
5708 case WM_T_80003:
5709 /* check EECD_EE_AUTORD */
5710 wm_get_auto_rd_done(sc);
5711 break;
5712 case WM_T_ICH8:
5713 case WM_T_ICH9:
5714 case WM_T_ICH10:
5715 case WM_T_PCH:
5716 case WM_T_PCH2:
5717 case WM_T_PCH_LPT:
5718 case WM_T_PCH_SPT:
5719 case WM_T_PCH_CNP:
5720 break;
5721 default:
5722 panic("%s: unknown type\n", __func__);
5723 }
5724
5725 /* Check whether EEPROM is present or not */
5726 switch (sc->sc_type) {
5727 case WM_T_82575:
5728 case WM_T_82576:
5729 case WM_T_82580:
5730 case WM_T_I350:
5731 case WM_T_I354:
5732 case WM_T_ICH8:
5733 case WM_T_ICH9:
5734 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5735 /* Not found */
5736 sc->sc_flags |= WM_F_EEPROM_INVALID;
5737 if (sc->sc_type == WM_T_82575)
5738 wm_reset_init_script_82575(sc);
5739 }
5740 break;
5741 default:
5742 break;
5743 }
5744
5745 if (phy_reset != 0)
5746 wm_phy_post_reset(sc);
5747
5748 if ((sc->sc_type == WM_T_82580)
5749 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5750 /* Clear global device reset status bit */
5751 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5752 }
5753
5754 /* Clear any pending interrupt events. */
5755 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5756 reg = CSR_READ(sc, WMREG_ICR);
5757 if (wm_is_using_msix(sc)) {
5758 if (sc->sc_type != WM_T_82574) {
5759 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5760 CSR_WRITE(sc, WMREG_EIAC, 0);
5761 } else
5762 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5763 }
5764
5765 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5766 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5767 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5768 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5769 reg = CSR_READ(sc, WMREG_KABGTXD);
5770 reg |= KABGTXD_BGSQLBIAS;
5771 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5772 }
5773
5774 /* Reload sc_ctrl */
5775 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5776
5777 wm_set_eee(sc);
5778
5779 /*
5780 * For PCH, this write will make sure that any noise will be detected
5781 * as a CRC error and be dropped rather than show up as a bad packet
5782 * to the DMA engine
5783 */
5784 if (sc->sc_type == WM_T_PCH)
5785 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5786
5787 if (sc->sc_type >= WM_T_82544)
5788 CSR_WRITE(sc, WMREG_WUC, 0);
5789
5790 if (sc->sc_type < WM_T_82575)
5791 wm_disable_aspm(sc); /* Workaround for some chips */
5792
5793 wm_reset_mdicnfg_82580(sc);
5794
5795 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5796 wm_pll_workaround_i210(sc);
5797
5798 if (sc->sc_type == WM_T_80003) {
5799 /* Default to TRUE to enable the MDIC W/A */
5800 sc->sc_flags |= WM_F_80003_MDIC_WA;
5801
5802 rv = wm_kmrn_readreg(sc,
5803 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5804 if (rv == 0) {
5805 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5806 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5807 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5808 else
5809 sc->sc_flags |= WM_F_80003_MDIC_WA;
5810 }
5811 }
5812 }
5813
5814 /*
5815 * wm_add_rxbuf:
5816 *
5817 * Add a receive buffer to the indiciated descriptor.
5818 */
5819 static int
5820 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5821 {
5822 struct wm_softc *sc = rxq->rxq_sc;
5823 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5824 struct mbuf *m;
5825 int error;
5826
5827 KASSERT(mutex_owned(rxq->rxq_lock));
5828
5829 MGETHDR(m, M_DONTWAIT, MT_DATA);
5830 if (m == NULL)
5831 return ENOBUFS;
5832
5833 MCLGET(m, M_DONTWAIT);
5834 if ((m->m_flags & M_EXT) == 0) {
5835 m_freem(m);
5836 return ENOBUFS;
5837 }
5838
5839 if (rxs->rxs_mbuf != NULL)
5840 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5841
5842 rxs->rxs_mbuf = m;
5843
5844 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5845 /*
5846 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5847 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5848 */
5849 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5850 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5851 if (error) {
5852 /* XXX XXX XXX */
5853 aprint_error_dev(sc->sc_dev,
5854 "unable to load rx DMA map %d, error = %d\n", idx, error);
5855 panic("wm_add_rxbuf");
5856 }
5857
5858 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5859 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5860
5861 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5862 if ((sc->sc_rctl & RCTL_EN) != 0)
5863 wm_init_rxdesc(rxq, idx);
5864 } else
5865 wm_init_rxdesc(rxq, idx);
5866
5867 return 0;
5868 }
5869
5870 /*
5871 * wm_rxdrain:
5872 *
5873 * Drain the receive queue.
5874 */
5875 static void
5876 wm_rxdrain(struct wm_rxqueue *rxq)
5877 {
5878 struct wm_softc *sc = rxq->rxq_sc;
5879 struct wm_rxsoft *rxs;
5880 int i;
5881
5882 KASSERT(mutex_owned(rxq->rxq_lock));
5883
5884 for (i = 0; i < WM_NRXDESC; i++) {
5885 rxs = &rxq->rxq_soft[i];
5886 if (rxs->rxs_mbuf != NULL) {
5887 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5888 m_freem(rxs->rxs_mbuf);
5889 rxs->rxs_mbuf = NULL;
5890 }
5891 }
5892 }
5893
5894 /*
5895 * Setup registers for RSS.
5896 *
5897 * XXX not yet VMDq support
5898 */
5899 static void
5900 wm_init_rss(struct wm_softc *sc)
5901 {
5902 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5903 int i;
5904
5905 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5906
5907 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5908 unsigned int qid, reta_ent;
5909
5910 qid = i % sc->sc_nqueues;
5911 switch (sc->sc_type) {
5912 case WM_T_82574:
5913 reta_ent = __SHIFTIN(qid,
5914 RETA_ENT_QINDEX_MASK_82574);
5915 break;
5916 case WM_T_82575:
5917 reta_ent = __SHIFTIN(qid,
5918 RETA_ENT_QINDEX1_MASK_82575);
5919 break;
5920 default:
5921 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5922 break;
5923 }
5924
5925 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5926 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5927 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5928 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5929 }
5930
5931 rss_getkey((uint8_t *)rss_key);
5932 for (i = 0; i < RSSRK_NUM_REGS; i++)
5933 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5934
5935 if (sc->sc_type == WM_T_82574)
5936 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5937 else
5938 mrqc = MRQC_ENABLE_RSS_MQ;
5939
5940 /*
5941 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5942 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5943 */
5944 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5945 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5946 #if 0
5947 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5948 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5949 #endif
5950 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5951
5952 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5953 }
5954
5955 /*
5956 * Adjust TX and RX queue numbers which the system actulally uses.
5957 *
5958 * The numbers are affected by below parameters.
5959 * - The nubmer of hardware queues
5960 * - The number of MSI-X vectors (= "nvectors" argument)
5961 * - ncpu
5962 */
5963 static void
5964 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5965 {
5966 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5967
5968 if (nvectors < 2) {
5969 sc->sc_nqueues = 1;
5970 return;
5971 }
5972
5973 switch (sc->sc_type) {
5974 case WM_T_82572:
5975 hw_ntxqueues = 2;
5976 hw_nrxqueues = 2;
5977 break;
5978 case WM_T_82574:
5979 hw_ntxqueues = 2;
5980 hw_nrxqueues = 2;
5981 break;
5982 case WM_T_82575:
5983 hw_ntxqueues = 4;
5984 hw_nrxqueues = 4;
5985 break;
5986 case WM_T_82576:
5987 hw_ntxqueues = 16;
5988 hw_nrxqueues = 16;
5989 break;
5990 case WM_T_82580:
5991 case WM_T_I350:
5992 case WM_T_I354:
5993 hw_ntxqueues = 8;
5994 hw_nrxqueues = 8;
5995 break;
5996 case WM_T_I210:
5997 hw_ntxqueues = 4;
5998 hw_nrxqueues = 4;
5999 break;
6000 case WM_T_I211:
6001 hw_ntxqueues = 2;
6002 hw_nrxqueues = 2;
6003 break;
6004 /*
6005 * The below Ethernet controllers do not support MSI-X;
6006 * this driver doesn't let them use multiqueue.
6007 * - WM_T_80003
6008 * - WM_T_ICH8
6009 * - WM_T_ICH9
6010 * - WM_T_ICH10
6011 * - WM_T_PCH
6012 * - WM_T_PCH2
6013 * - WM_T_PCH_LPT
6014 */
6015 default:
6016 hw_ntxqueues = 1;
6017 hw_nrxqueues = 1;
6018 break;
6019 }
6020
6021 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6022
6023 /*
6024 * As queues more than MSI-X vectors cannot improve scaling, we limit
6025 * the number of queues used actually.
6026 */
6027 if (nvectors < hw_nqueues + 1)
6028 sc->sc_nqueues = nvectors - 1;
6029 else
6030 sc->sc_nqueues = hw_nqueues;
6031
6032 /*
6033 * As queues more than CPUs cannot improve scaling, we limit
6034 * the number of queues used actually.
6035 */
6036 if (ncpu < sc->sc_nqueues)
6037 sc->sc_nqueues = ncpu;
6038 }
6039
6040 static inline bool
6041 wm_is_using_msix(struct wm_softc *sc)
6042 {
6043
6044 return (sc->sc_nintrs > 1);
6045 }
6046
6047 static inline bool
6048 wm_is_using_multiqueue(struct wm_softc *sc)
6049 {
6050
6051 return (sc->sc_nqueues > 1);
6052 }
6053
6054 static int
6055 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6056 {
6057 struct wm_queue *wmq = &sc->sc_queue[qidx];
6058
6059 wmq->wmq_id = qidx;
6060 wmq->wmq_intr_idx = intr_idx;
6061 wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6062 wm_handle_queue, wmq);
6063 if (wmq->wmq_si != NULL)
6064 return 0;
6065
6066 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6067 wmq->wmq_id);
6068 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6069 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6070 return ENOMEM;
6071 }
6072
6073 /*
6074 * Both single interrupt MSI and INTx can use this function.
6075 */
6076 static int
6077 wm_setup_legacy(struct wm_softc *sc)
6078 {
6079 pci_chipset_tag_t pc = sc->sc_pc;
6080 const char *intrstr = NULL;
6081 char intrbuf[PCI_INTRSTR_LEN];
6082 int error;
6083
6084 error = wm_alloc_txrx_queues(sc);
6085 if (error) {
6086 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6087 error);
6088 return ENOMEM;
6089 }
6090 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6091 sizeof(intrbuf));
6092 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6093 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6094 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6095 if (sc->sc_ihs[0] == NULL) {
6096 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6097 (pci_intr_type(pc, sc->sc_intrs[0])
6098 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6099 return ENOMEM;
6100 }
6101
6102 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6103 sc->sc_nintrs = 1;
6104
6105 return wm_softint_establish_queue(sc, 0, 0);
6106 }
6107
6108 static int
6109 wm_setup_msix(struct wm_softc *sc)
6110 {
6111 void *vih;
6112 kcpuset_t *affinity;
6113 int qidx, error, intr_idx, txrx_established;
6114 pci_chipset_tag_t pc = sc->sc_pc;
6115 const char *intrstr = NULL;
6116 char intrbuf[PCI_INTRSTR_LEN];
6117 char intr_xname[INTRDEVNAMEBUF];
6118
6119 if (sc->sc_nqueues < ncpu) {
6120 /*
6121 * To avoid other devices' interrupts, the affinity of Tx/Rx
6122 * interrupts start from CPU#1.
6123 */
6124 sc->sc_affinity_offset = 1;
6125 } else {
6126 /*
6127 * In this case, this device use all CPUs. So, we unify
6128 * affinitied cpu_index to msix vector number for readability.
6129 */
6130 sc->sc_affinity_offset = 0;
6131 }
6132
6133 error = wm_alloc_txrx_queues(sc);
6134 if (error) {
6135 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6136 error);
6137 return ENOMEM;
6138 }
6139
6140 kcpuset_create(&affinity, false);
6141 intr_idx = 0;
6142
6143 /*
6144 * TX and RX
6145 */
6146 txrx_established = 0;
6147 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6148 struct wm_queue *wmq = &sc->sc_queue[qidx];
6149 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6150
6151 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6152 sizeof(intrbuf));
6153 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6154 PCI_INTR_MPSAFE, true);
6155 memset(intr_xname, 0, sizeof(intr_xname));
6156 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6157 device_xname(sc->sc_dev), qidx);
6158 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6159 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6160 if (vih == NULL) {
6161 aprint_error_dev(sc->sc_dev,
6162 "unable to establish MSI-X(for TX and RX)%s%s\n",
6163 intrstr ? " at " : "",
6164 intrstr ? intrstr : "");
6165
6166 goto fail;
6167 }
6168 kcpuset_zero(affinity);
6169 /* Round-robin affinity */
6170 kcpuset_set(affinity, affinity_to);
6171 error = interrupt_distribute(vih, affinity, NULL);
6172 if (error == 0) {
6173 aprint_normal_dev(sc->sc_dev,
6174 "for TX and RX interrupting at %s affinity to %u\n",
6175 intrstr, affinity_to);
6176 } else {
6177 aprint_normal_dev(sc->sc_dev,
6178 "for TX and RX interrupting at %s\n", intrstr);
6179 }
6180 sc->sc_ihs[intr_idx] = vih;
6181 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6182 goto fail;
6183 txrx_established++;
6184 intr_idx++;
6185 }
6186
6187 /* LINK */
6188 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6189 sizeof(intrbuf));
6190 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6191 memset(intr_xname, 0, sizeof(intr_xname));
6192 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6193 device_xname(sc->sc_dev));
6194 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6195 IPL_NET, wm_linkintr_msix, sc, intr_xname);
6196 if (vih == NULL) {
6197 aprint_error_dev(sc->sc_dev,
6198 "unable to establish MSI-X(for LINK)%s%s\n",
6199 intrstr ? " at " : "",
6200 intrstr ? intrstr : "");
6201
6202 goto fail;
6203 }
6204 /* Keep default affinity to LINK interrupt */
6205 aprint_normal_dev(sc->sc_dev,
6206 "for LINK interrupting at %s\n", intrstr);
6207 sc->sc_ihs[intr_idx] = vih;
6208 sc->sc_link_intr_idx = intr_idx;
6209
6210 sc->sc_nintrs = sc->sc_nqueues + 1;
6211 kcpuset_destroy(affinity);
6212 return 0;
6213
6214 fail:
6215 for (qidx = 0; qidx < txrx_established; qidx++) {
6216 struct wm_queue *wmq = &sc->sc_queue[qidx];
6217 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6218 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6219 }
6220
6221 kcpuset_destroy(affinity);
6222 return ENOMEM;
6223 }
6224
6225 static void
6226 wm_unset_stopping_flags(struct wm_softc *sc)
6227 {
6228 int i;
6229
6230 KASSERT(mutex_owned(sc->sc_core_lock));
6231
6232 /* Must unset stopping flags in ascending order. */
6233 for (i = 0; i < sc->sc_nqueues; i++) {
6234 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6235 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6236
6237 mutex_enter(txq->txq_lock);
6238 txq->txq_stopping = false;
6239 mutex_exit(txq->txq_lock);
6240
6241 mutex_enter(rxq->rxq_lock);
6242 rxq->rxq_stopping = false;
6243 mutex_exit(rxq->rxq_lock);
6244 }
6245
6246 sc->sc_core_stopping = false;
6247 }
6248
6249 static void
6250 wm_set_stopping_flags(struct wm_softc *sc)
6251 {
6252 int i;
6253
6254 KASSERT(mutex_owned(sc->sc_core_lock));
6255
6256 sc->sc_core_stopping = true;
6257
6258 /* Must set stopping flags in ascending order. */
6259 for (i = 0; i < sc->sc_nqueues; i++) {
6260 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6261 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6262
6263 mutex_enter(rxq->rxq_lock);
6264 rxq->rxq_stopping = true;
6265 mutex_exit(rxq->rxq_lock);
6266
6267 mutex_enter(txq->txq_lock);
6268 txq->txq_stopping = true;
6269 mutex_exit(txq->txq_lock);
6270 }
6271 }
6272
6273 /*
6274 * Write interrupt interval value to ITR or EITR
6275 */
6276 static void
6277 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6278 {
6279
6280 if (!wmq->wmq_set_itr)
6281 return;
6282
6283 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6284 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6285
6286 /*
6287 * 82575 doesn't have CNT_INGR field.
6288 * So, overwrite counter field by software.
6289 */
6290 if (sc->sc_type == WM_T_82575)
6291 eitr |= __SHIFTIN(wmq->wmq_itr,
6292 EITR_COUNTER_MASK_82575);
6293 else
6294 eitr |= EITR_CNT_INGR;
6295
6296 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6297 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6298 /*
6299 * 82574 has both ITR and EITR. SET EITR when we use
6300 * the multi queue function with MSI-X.
6301 */
6302 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6303 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6304 } else {
6305 KASSERT(wmq->wmq_id == 0);
6306 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6307 }
6308
6309 wmq->wmq_set_itr = false;
6310 }
6311
6312 /*
6313 * TODO
6314 * Below dynamic calculation of itr is almost the same as Linux igb,
6315 * however it does not fit to wm(4). So, we will have been disable AIM
6316 * until we will find appropriate calculation of itr.
6317 */
6318 /*
6319 * Calculate interrupt interval value to be going to write register in
6320 * wm_itrs_writereg(). This function does not write ITR/EITR register.
6321 */
6322 static void
6323 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6324 {
6325 #ifdef NOTYET
6326 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6327 struct wm_txqueue *txq = &wmq->wmq_txq;
6328 uint32_t avg_size = 0;
6329 uint32_t new_itr;
6330
6331 if (rxq->rxq_packets)
6332 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
6333 if (txq->txq_packets)
6334 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6335
6336 if (avg_size == 0) {
6337 new_itr = 450; /* restore default value */
6338 goto out;
6339 }
6340
6341 /* Add 24 bytes to size to account for CRC, preamble, and gap */
6342 avg_size += 24;
6343
6344 /* Don't starve jumbo frames */
6345 avg_size = uimin(avg_size, 3000);
6346
6347 /* Give a little boost to mid-size frames */
6348 if ((avg_size > 300) && (avg_size < 1200))
6349 new_itr = avg_size / 3;
6350 else
6351 new_itr = avg_size / 2;
6352
6353 out:
6354 /*
6355 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6356 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6357 */
6358 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6359 new_itr *= 4;
6360
6361 if (new_itr != wmq->wmq_itr) {
6362 wmq->wmq_itr = new_itr;
6363 wmq->wmq_set_itr = true;
6364 } else
6365 wmq->wmq_set_itr = false;
6366
6367 rxq->rxq_packets = 0;
6368 rxq->rxq_bytes = 0;
6369 txq->txq_packets = 0;
6370 txq->txq_bytes = 0;
6371 #endif
6372 }
6373
6374 static void
6375 wm_init_sysctls(struct wm_softc *sc)
6376 {
6377 struct sysctllog **log;
6378 const struct sysctlnode *rnode, *qnode, *cnode;
6379 int i, rv;
6380 const char *dvname;
6381
6382 log = &sc->sc_sysctllog;
6383 dvname = device_xname(sc->sc_dev);
6384
6385 rv = sysctl_createv(log, 0, NULL, &rnode,
6386 0, CTLTYPE_NODE, dvname,
6387 SYSCTL_DESCR("wm information and settings"),
6388 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6389 if (rv != 0)
6390 goto err;
6391
6392 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6393 CTLTYPE_BOOL, "txrx_workqueue",
6394 SYSCTL_DESCR("Use workqueue for packet processing"),
6395 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6396 if (rv != 0)
6397 goto teardown;
6398
6399 for (i = 0; i < sc->sc_nqueues; i++) {
6400 struct wm_queue *wmq = &sc->sc_queue[i];
6401 struct wm_txqueue *txq = &wmq->wmq_txq;
6402 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6403
6404 snprintf(sc->sc_queue[i].sysctlname,
6405 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6406
6407 if (sysctl_createv(log, 0, &rnode, &qnode,
6408 0, CTLTYPE_NODE,
6409 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6410 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6411 break;
6412
6413 if (sysctl_createv(log, 0, &qnode, &cnode,
6414 CTLFLAG_READONLY, CTLTYPE_INT,
6415 "txq_free", SYSCTL_DESCR("TX queue free"),
6416 NULL, 0, &txq->txq_free,
6417 0, CTL_CREATE, CTL_EOL) != 0)
6418 break;
6419 if (sysctl_createv(log, 0, &qnode, &cnode,
6420 CTLFLAG_READONLY, CTLTYPE_INT,
6421 "txd_head", SYSCTL_DESCR("TX descriptor head"),
6422 wm_sysctl_tdh_handler, 0, (void *)txq,
6423 0, CTL_CREATE, CTL_EOL) != 0)
6424 break;
6425 if (sysctl_createv(log, 0, &qnode, &cnode,
6426 CTLFLAG_READONLY, CTLTYPE_INT,
6427 "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6428 wm_sysctl_tdt_handler, 0, (void *)txq,
6429 0, CTL_CREATE, CTL_EOL) != 0)
6430 break;
6431 if (sysctl_createv(log, 0, &qnode, &cnode,
6432 CTLFLAG_READONLY, CTLTYPE_INT,
6433 "txq_next", SYSCTL_DESCR("TX queue next"),
6434 NULL, 0, &txq->txq_next,
6435 0, CTL_CREATE, CTL_EOL) != 0)
6436 break;
6437 if (sysctl_createv(log, 0, &qnode, &cnode,
6438 CTLFLAG_READONLY, CTLTYPE_INT,
6439 "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6440 NULL, 0, &txq->txq_sfree,
6441 0, CTL_CREATE, CTL_EOL) != 0)
6442 break;
6443 if (sysctl_createv(log, 0, &qnode, &cnode,
6444 CTLFLAG_READONLY, CTLTYPE_INT,
6445 "txq_snext", SYSCTL_DESCR("TX queue snext"),
6446 NULL, 0, &txq->txq_snext,
6447 0, CTL_CREATE, CTL_EOL) != 0)
6448 break;
6449 if (sysctl_createv(log, 0, &qnode, &cnode,
6450 CTLFLAG_READONLY, CTLTYPE_INT,
6451 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6452 NULL, 0, &txq->txq_sdirty,
6453 0, CTL_CREATE, CTL_EOL) != 0)
6454 break;
6455 if (sysctl_createv(log, 0, &qnode, &cnode,
6456 CTLFLAG_READONLY, CTLTYPE_INT,
6457 "txq_flags", SYSCTL_DESCR("TX queue flags"),
6458 NULL, 0, &txq->txq_flags,
6459 0, CTL_CREATE, CTL_EOL) != 0)
6460 break;
6461 if (sysctl_createv(log, 0, &qnode, &cnode,
6462 CTLFLAG_READONLY, CTLTYPE_BOOL,
6463 "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6464 NULL, 0, &txq->txq_stopping,
6465 0, CTL_CREATE, CTL_EOL) != 0)
6466 break;
6467 if (sysctl_createv(log, 0, &qnode, &cnode,
6468 CTLFLAG_READONLY, CTLTYPE_BOOL,
6469 "txq_sending", SYSCTL_DESCR("TX queue sending"),
6470 NULL, 0, &txq->txq_sending,
6471 0, CTL_CREATE, CTL_EOL) != 0)
6472 break;
6473
6474 if (sysctl_createv(log, 0, &qnode, &cnode,
6475 CTLFLAG_READONLY, CTLTYPE_INT,
6476 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6477 NULL, 0, &rxq->rxq_ptr,
6478 0, CTL_CREATE, CTL_EOL) != 0)
6479 break;
6480 }
6481
6482 #ifdef WM_DEBUG
6483 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6484 CTLTYPE_INT, "debug_flags",
6485 SYSCTL_DESCR(
6486 "Debug flags:\n" \
6487 "\t0x01 LINK\n" \
6488 "\t0x02 TX\n" \
6489 "\t0x04 RX\n" \
6490 "\t0x08 GMII\n" \
6491 "\t0x10 MANAGE\n" \
6492 "\t0x20 NVM\n" \
6493 "\t0x40 INIT\n" \
6494 "\t0x80 LOCK"),
6495 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6496 if (rv != 0)
6497 goto teardown;
6498 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6499 CTLTYPE_BOOL, "trigger_reset",
6500 SYSCTL_DESCR("Trigger an interface reset"),
6501 NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6502 if (rv != 0)
6503 goto teardown;
6504 #endif
6505
6506 return;
6507
6508 teardown:
6509 sysctl_teardown(log);
6510 err:
6511 sc->sc_sysctllog = NULL;
6512 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6513 __func__, rv);
6514 }
6515
6516 /*
6517 * wm_init: [ifnet interface function]
6518 *
6519 * Initialize the interface.
6520 */
6521 static int
6522 wm_init(struct ifnet *ifp)
6523 {
6524 struct wm_softc *sc = ifp->if_softc;
6525 int ret;
6526
6527 KASSERT(IFNET_LOCKED(ifp));
6528
6529 if (sc->sc_dying)
6530 return ENXIO;
6531
6532 mutex_enter(sc->sc_core_lock);
6533 ret = wm_init_locked(ifp);
6534 mutex_exit(sc->sc_core_lock);
6535
6536 return ret;
6537 }
6538
6539 static int
6540 wm_init_locked(struct ifnet *ifp)
6541 {
6542 struct wm_softc *sc = ifp->if_softc;
6543 struct ethercom *ec = &sc->sc_ethercom;
6544 int i, j, trynum, error = 0;
6545 uint32_t reg, sfp_mask = 0;
6546
6547 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6548 device_xname(sc->sc_dev), __func__));
6549 KASSERT(IFNET_LOCKED(ifp));
6550 KASSERT(mutex_owned(sc->sc_core_lock));
6551
6552 /*
6553 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6554 * There is a small but measurable benefit to avoiding the adjusment
6555 * of the descriptor so that the headers are aligned, for normal mtu,
6556 * on such platforms. One possibility is that the DMA itself is
6557 * slightly more efficient if the front of the entire packet (instead
6558 * of the front of the headers) is aligned.
6559 *
6560 * Note we must always set align_tweak to 0 if we are using
6561 * jumbo frames.
6562 */
6563 #ifdef __NO_STRICT_ALIGNMENT
6564 sc->sc_align_tweak = 0;
6565 #else
6566 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6567 sc->sc_align_tweak = 0;
6568 else
6569 sc->sc_align_tweak = 2;
6570 #endif /* __NO_STRICT_ALIGNMENT */
6571
6572 /* Cancel any pending I/O. */
6573 wm_stop_locked(ifp, false, false);
6574
6575 /* Update statistics before reset */
6576 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6577 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6578
6579 /* >= PCH_SPT hardware workaround before reset. */
6580 if (sc->sc_type >= WM_T_PCH_SPT)
6581 wm_flush_desc_rings(sc);
6582
6583 /* Reset the chip to a known state. */
6584 wm_reset(sc);
6585
6586 /*
6587 * AMT based hardware can now take control from firmware
6588 * Do this after reset.
6589 */
6590 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6591 wm_get_hw_control(sc);
6592
6593 if ((sc->sc_type >= WM_T_PCH_SPT) &&
6594 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6595 wm_legacy_irq_quirk_spt(sc);
6596
6597 /* Init hardware bits */
6598 wm_initialize_hardware_bits(sc);
6599
6600 /* Reset the PHY. */
6601 if (sc->sc_flags & WM_F_HAS_MII)
6602 wm_gmii_reset(sc);
6603
6604 if (sc->sc_type >= WM_T_ICH8) {
6605 reg = CSR_READ(sc, WMREG_GCR);
6606 /*
6607 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6608 * default after reset.
6609 */
6610 if (sc->sc_type == WM_T_ICH8)
6611 reg |= GCR_NO_SNOOP_ALL;
6612 else
6613 reg &= ~GCR_NO_SNOOP_ALL;
6614 CSR_WRITE(sc, WMREG_GCR, reg);
6615 }
6616
6617 if ((sc->sc_type >= WM_T_ICH8)
6618 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6619 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6620
6621 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6622 reg |= CTRL_EXT_RO_DIS;
6623 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6624 }
6625
6626 /* Calculate (E)ITR value */
6627 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6628 /*
6629 * For NEWQUEUE's EITR (except for 82575).
6630 * 82575's EITR should be set same throttling value as other
6631 * old controllers' ITR because the interrupt/sec calculation
6632 * is the same, that is, 1,000,000,000 / (N * 256).
6633 *
6634 * 82574's EITR should be set same throttling value as ITR.
6635 *
6636 * For N interrupts/sec, set this value to:
6637 * 1,000,000 / N in contrast to ITR throttling value.
6638 */
6639 sc->sc_itr_init = 450;
6640 } else if (sc->sc_type >= WM_T_82543) {
6641 /*
6642 * Set up the interrupt throttling register (units of 256ns)
6643 * Note that a footnote in Intel's documentation says this
6644 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6645 * or 10Mbit mode. Empirically, it appears to be the case
6646 * that that is also true for the 1024ns units of the other
6647 * interrupt-related timer registers -- so, really, we ought
6648 * to divide this value by 4 when the link speed is low.
6649 *
6650 * XXX implement this division at link speed change!
6651 */
6652
6653 /*
6654 * For N interrupts/sec, set this value to:
6655 * 1,000,000,000 / (N * 256). Note that we set the
6656 * absolute and packet timer values to this value
6657 * divided by 4 to get "simple timer" behavior.
6658 */
6659 sc->sc_itr_init = 1500; /* 2604 ints/sec */
6660 }
6661
6662 error = wm_init_txrx_queues(sc);
6663 if (error)
6664 goto out;
6665
6666 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6667 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6668 (sc->sc_type >= WM_T_82575))
6669 wm_serdes_power_up_link_82575(sc);
6670
6671 /* Clear out the VLAN table -- we don't use it (yet). */
6672 CSR_WRITE(sc, WMREG_VET, 0);
6673 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6674 trynum = 10; /* Due to hw errata */
6675 else
6676 trynum = 1;
6677 for (i = 0; i < WM_VLAN_TABSIZE; i++)
6678 for (j = 0; j < trynum; j++)
6679 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6680
6681 /*
6682 * Set up flow-control parameters.
6683 *
6684 * XXX Values could probably stand some tuning.
6685 */
6686 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6687 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6688 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6689 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6690 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6691 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6692 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6693 }
6694
6695 sc->sc_fcrtl = FCRTL_DFLT;
6696 if (sc->sc_type < WM_T_82543) {
6697 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6698 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6699 } else {
6700 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6701 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6702 }
6703
6704 if (sc->sc_type == WM_T_80003)
6705 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6706 else
6707 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6708
6709 /* Writes the control register. */
6710 wm_set_vlan(sc);
6711
6712 if (sc->sc_flags & WM_F_HAS_MII) {
6713 uint16_t kmreg;
6714
6715 switch (sc->sc_type) {
6716 case WM_T_80003:
6717 case WM_T_ICH8:
6718 case WM_T_ICH9:
6719 case WM_T_ICH10:
6720 case WM_T_PCH:
6721 case WM_T_PCH2:
6722 case WM_T_PCH_LPT:
6723 case WM_T_PCH_SPT:
6724 case WM_T_PCH_CNP:
6725 /*
6726 * Set the mac to wait the maximum time between each
6727 * iteration and increase the max iterations when
6728 * polling the phy; this fixes erroneous timeouts at
6729 * 10Mbps.
6730 */
6731 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6732 0xFFFF);
6733 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6734 &kmreg);
6735 kmreg |= 0x3F;
6736 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6737 kmreg);
6738 break;
6739 default:
6740 break;
6741 }
6742
6743 if (sc->sc_type == WM_T_80003) {
6744 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6745 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6746 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6747
6748 /* Bypass RX and TX FIFOs */
6749 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6750 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6751 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6752 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6753 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6754 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6755 }
6756 }
6757 #if 0
6758 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6759 #endif
6760
6761 /* Set up checksum offload parameters. */
6762 reg = CSR_READ(sc, WMREG_RXCSUM);
6763 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6764 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6765 reg |= RXCSUM_IPOFL;
6766 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6767 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6768 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6769 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6770 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6771
6772 /* Set registers about MSI-X */
6773 if (wm_is_using_msix(sc)) {
6774 uint32_t ivar, qintr_idx;
6775 struct wm_queue *wmq;
6776 unsigned int qid;
6777
6778 if (sc->sc_type == WM_T_82575) {
6779 /* Interrupt control */
6780 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6781 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6782 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6783
6784 /* TX and RX */
6785 for (i = 0; i < sc->sc_nqueues; i++) {
6786 wmq = &sc->sc_queue[i];
6787 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6788 EITR_TX_QUEUE(wmq->wmq_id)
6789 | EITR_RX_QUEUE(wmq->wmq_id));
6790 }
6791 /* Link status */
6792 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6793 EITR_OTHER);
6794 } else if (sc->sc_type == WM_T_82574) {
6795 /* Interrupt control */
6796 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6797 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6798 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6799
6800 /*
6801 * Work around issue with spurious interrupts
6802 * in MSI-X mode.
6803 * At wm_initialize_hardware_bits(), sc_nintrs has not
6804 * initialized yet. So re-initialize WMREG_RFCTL here.
6805 */
6806 reg = CSR_READ(sc, WMREG_RFCTL);
6807 reg |= WMREG_RFCTL_ACKDIS;
6808 CSR_WRITE(sc, WMREG_RFCTL, reg);
6809
6810 ivar = 0;
6811 /* TX and RX */
6812 for (i = 0; i < sc->sc_nqueues; i++) {
6813 wmq = &sc->sc_queue[i];
6814 qid = wmq->wmq_id;
6815 qintr_idx = wmq->wmq_intr_idx;
6816
6817 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6818 IVAR_TX_MASK_Q_82574(qid));
6819 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6820 IVAR_RX_MASK_Q_82574(qid));
6821 }
6822 /* Link status */
6823 ivar |= __SHIFTIN((IVAR_VALID_82574
6824 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6825 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6826 } else {
6827 /* Interrupt control */
6828 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6829 | GPIE_EIAME | GPIE_PBA);
6830
6831 switch (sc->sc_type) {
6832 case WM_T_82580:
6833 case WM_T_I350:
6834 case WM_T_I354:
6835 case WM_T_I210:
6836 case WM_T_I211:
6837 /* TX and RX */
6838 for (i = 0; i < sc->sc_nqueues; i++) {
6839 wmq = &sc->sc_queue[i];
6840 qid = wmq->wmq_id;
6841 qintr_idx = wmq->wmq_intr_idx;
6842
6843 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6844 ivar &= ~IVAR_TX_MASK_Q(qid);
6845 ivar |= __SHIFTIN((qintr_idx
6846 | IVAR_VALID),
6847 IVAR_TX_MASK_Q(qid));
6848 ivar &= ~IVAR_RX_MASK_Q(qid);
6849 ivar |= __SHIFTIN((qintr_idx
6850 | IVAR_VALID),
6851 IVAR_RX_MASK_Q(qid));
6852 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6853 }
6854 break;
6855 case WM_T_82576:
6856 /* TX and RX */
6857 for (i = 0; i < sc->sc_nqueues; i++) {
6858 wmq = &sc->sc_queue[i];
6859 qid = wmq->wmq_id;
6860 qintr_idx = wmq->wmq_intr_idx;
6861
6862 ivar = CSR_READ(sc,
6863 WMREG_IVAR_Q_82576(qid));
6864 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6865 ivar |= __SHIFTIN((qintr_idx
6866 | IVAR_VALID),
6867 IVAR_TX_MASK_Q_82576(qid));
6868 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6869 ivar |= __SHIFTIN((qintr_idx
6870 | IVAR_VALID),
6871 IVAR_RX_MASK_Q_82576(qid));
6872 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6873 ivar);
6874 }
6875 break;
6876 default:
6877 break;
6878 }
6879
6880 /* Link status */
6881 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6882 IVAR_MISC_OTHER);
6883 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6884 }
6885
6886 if (wm_is_using_multiqueue(sc)) {
6887 wm_init_rss(sc);
6888
6889 /*
6890 ** NOTE: Receive Full-Packet Checksum Offload
6891 ** is mutually exclusive with Multiqueue. However
6892 ** this is not the same as TCP/IP checksums which
6893 ** still work.
6894 */
6895 reg = CSR_READ(sc, WMREG_RXCSUM);
6896 reg |= RXCSUM_PCSD;
6897 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6898 }
6899 }
6900
6901 /* Set up the interrupt registers. */
6902 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6903
6904 /* Enable SFP module insertion interrupt if it's required */
6905 if ((sc->sc_flags & WM_F_SFP) != 0) {
6906 sc->sc_ctrl |= CTRL_EXTLINK_EN;
6907 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6908 sfp_mask = ICR_GPI(0);
6909 }
6910
6911 if (wm_is_using_msix(sc)) {
6912 uint32_t mask;
6913 struct wm_queue *wmq;
6914
6915 switch (sc->sc_type) {
6916 case WM_T_82574:
6917 mask = 0;
6918 for (i = 0; i < sc->sc_nqueues; i++) {
6919 wmq = &sc->sc_queue[i];
6920 mask |= ICR_TXQ(wmq->wmq_id);
6921 mask |= ICR_RXQ(wmq->wmq_id);
6922 }
6923 mask |= ICR_OTHER;
6924 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6925 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6926 break;
6927 default:
6928 if (sc->sc_type == WM_T_82575) {
6929 mask = 0;
6930 for (i = 0; i < sc->sc_nqueues; i++) {
6931 wmq = &sc->sc_queue[i];
6932 mask |= EITR_TX_QUEUE(wmq->wmq_id);
6933 mask |= EITR_RX_QUEUE(wmq->wmq_id);
6934 }
6935 mask |= EITR_OTHER;
6936 } else {
6937 mask = 0;
6938 for (i = 0; i < sc->sc_nqueues; i++) {
6939 wmq = &sc->sc_queue[i];
6940 mask |= 1 << wmq->wmq_intr_idx;
6941 }
6942 mask |= 1 << sc->sc_link_intr_idx;
6943 }
6944 CSR_WRITE(sc, WMREG_EIAC, mask);
6945 CSR_WRITE(sc, WMREG_EIAM, mask);
6946 CSR_WRITE(sc, WMREG_EIMS, mask);
6947
6948 /* For other interrupts */
6949 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6950 break;
6951 }
6952 } else {
6953 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6954 ICR_RXO | ICR_RXT0 | sfp_mask;
6955 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6956 }
6957
6958 /* Set up the inter-packet gap. */
6959 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6960
6961 if (sc->sc_type >= WM_T_82543) {
6962 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6963 struct wm_queue *wmq = &sc->sc_queue[qidx];
6964 wm_itrs_writereg(sc, wmq);
6965 }
6966 /*
6967 * Link interrupts occur much less than TX
6968 * interrupts and RX interrupts. So, we don't
6969 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6970 * FreeBSD's if_igb.
6971 */
6972 }
6973
6974 /* Set the VLAN EtherType. */
6975 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6976
6977 /*
6978 * Set up the transmit control register; we start out with
6979 * a collision distance suitable for FDX, but update it when
6980 * we resolve the media type.
6981 */
6982 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6983 | TCTL_CT(TX_COLLISION_THRESHOLD)
6984 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6985 if (sc->sc_type >= WM_T_82571)
6986 sc->sc_tctl |= TCTL_MULR;
6987 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6988
6989 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6990 /* Write TDT after TCTL.EN is set. See the document. */
6991 CSR_WRITE(sc, WMREG_TDT(0), 0);
6992 }
6993
6994 if (sc->sc_type == WM_T_80003) {
6995 reg = CSR_READ(sc, WMREG_TCTL_EXT);
6996 reg &= ~TCTL_EXT_GCEX_MASK;
6997 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6998 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6999 }
7000
7001 /* Set the media. */
7002 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
7003 goto out;
7004
7005 /* Configure for OS presence */
7006 wm_init_manageability(sc);
7007
7008 /*
7009 * Set up the receive control register; we actually program the
7010 * register when we set the receive filter. Use multicast address
7011 * offset type 0.
7012 *
7013 * Only the i82544 has the ability to strip the incoming CRC, so we
7014 * don't enable that feature.
7015 */
7016 sc->sc_mchash_type = 0;
7017 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7018 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7019
7020 /* 82574 use one buffer extended Rx descriptor. */
7021 if (sc->sc_type == WM_T_82574)
7022 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7023
7024 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7025 sc->sc_rctl |= RCTL_SECRC;
7026
7027 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7028 && (ifp->if_mtu > ETHERMTU)) {
7029 sc->sc_rctl |= RCTL_LPE;
7030 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7031 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7032 }
7033
7034 if (MCLBYTES == 2048)
7035 sc->sc_rctl |= RCTL_2k;
7036 else {
7037 if (sc->sc_type >= WM_T_82543) {
7038 switch (MCLBYTES) {
7039 case 4096:
7040 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7041 break;
7042 case 8192:
7043 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7044 break;
7045 case 16384:
7046 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7047 break;
7048 default:
7049 panic("wm_init: MCLBYTES %d unsupported",
7050 MCLBYTES);
7051 break;
7052 }
7053 } else
7054 panic("wm_init: i82542 requires MCLBYTES = 2048");
7055 }
7056
7057 /* Enable ECC */
7058 switch (sc->sc_type) {
7059 case WM_T_82571:
7060 reg = CSR_READ(sc, WMREG_PBA_ECC);
7061 reg |= PBA_ECC_CORR_EN;
7062 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7063 break;
7064 case WM_T_PCH_LPT:
7065 case WM_T_PCH_SPT:
7066 case WM_T_PCH_CNP:
7067 reg = CSR_READ(sc, WMREG_PBECCSTS);
7068 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7069 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7070
7071 sc->sc_ctrl |= CTRL_MEHE;
7072 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7073 break;
7074 default:
7075 break;
7076 }
7077
7078 /*
7079 * Set the receive filter.
7080 *
7081 * For 82575 and 82576, the RX descriptors must be initialized after
7082 * the setting of RCTL.EN in wm_set_filter()
7083 */
7084 wm_set_filter(sc);
7085
7086 /* On 575 and later set RDT only if RX enabled */
7087 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7088 int qidx;
7089 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7090 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7091 for (i = 0; i < WM_NRXDESC; i++) {
7092 mutex_enter(rxq->rxq_lock);
7093 wm_init_rxdesc(rxq, i);
7094 mutex_exit(rxq->rxq_lock);
7095
7096 }
7097 }
7098 }
7099
7100 wm_unset_stopping_flags(sc);
7101
7102 /* Start the one second link check clock. */
7103 callout_schedule(&sc->sc_tick_ch, hz);
7104
7105 /*
7106 * ...all done! (IFNET_LOCKED asserted above.)
7107 */
7108 ifp->if_flags |= IFF_RUNNING;
7109
7110 out:
7111 /* Save last flags for the callback */
7112 sc->sc_if_flags = ifp->if_flags;
7113 sc->sc_ec_capenable = ec->ec_capenable;
7114 if (error)
7115 log(LOG_ERR, "%s: interface not running\n",
7116 device_xname(sc->sc_dev));
7117 return error;
7118 }
7119
7120 /*
7121 * wm_stop: [ifnet interface function]
7122 *
7123 * Stop transmission on the interface.
7124 */
7125 static void
7126 wm_stop(struct ifnet *ifp, int disable)
7127 {
7128 struct wm_softc *sc = ifp->if_softc;
7129
7130 ASSERT_SLEEPABLE();
7131 KASSERT(IFNET_LOCKED(ifp));
7132
7133 mutex_enter(sc->sc_core_lock);
7134 wm_stop_locked(ifp, disable ? true : false, true);
7135 mutex_exit(sc->sc_core_lock);
7136
7137 /*
7138 * After wm_set_stopping_flags(), it is guaranteed that
7139 * wm_handle_queue_work() does not call workqueue_enqueue().
7140 * However, workqueue_wait() cannot call in wm_stop_locked()
7141 * because it can sleep...
7142 * so, call workqueue_wait() here.
7143 */
7144 for (int i = 0; i < sc->sc_nqueues; i++)
7145 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7146 workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7147 }
7148
7149 static void
7150 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7151 {
7152 struct wm_softc *sc = ifp->if_softc;
7153 struct wm_txsoft *txs;
7154 int i, qidx;
7155
7156 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7157 device_xname(sc->sc_dev), __func__));
7158 KASSERT(IFNET_LOCKED(ifp));
7159 KASSERT(mutex_owned(sc->sc_core_lock));
7160
7161 wm_set_stopping_flags(sc);
7162
7163 if (sc->sc_flags & WM_F_HAS_MII) {
7164 /* Down the MII. */
7165 mii_down(&sc->sc_mii);
7166 } else {
7167 #if 0
7168 /* Should we clear PHY's status properly? */
7169 wm_reset(sc);
7170 #endif
7171 }
7172
7173 /* Stop the transmit and receive processes. */
7174 CSR_WRITE(sc, WMREG_TCTL, 0);
7175 CSR_WRITE(sc, WMREG_RCTL, 0);
7176 sc->sc_rctl &= ~RCTL_EN;
7177
7178 /*
7179 * Clear the interrupt mask to ensure the device cannot assert its
7180 * interrupt line.
7181 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7182 * service any currently pending or shared interrupt.
7183 */
7184 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7185 sc->sc_icr = 0;
7186 if (wm_is_using_msix(sc)) {
7187 if (sc->sc_type != WM_T_82574) {
7188 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7189 CSR_WRITE(sc, WMREG_EIAC, 0);
7190 } else
7191 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7192 }
7193
7194 /*
7195 * Stop callouts after interrupts are disabled; if we have
7196 * to wait for them, we will be releasing the CORE_LOCK
7197 * briefly, which will unblock interrupts on the current CPU.
7198 */
7199
7200 /* Stop the one second clock. */
7201 if (wait)
7202 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7203 else
7204 callout_stop(&sc->sc_tick_ch);
7205
7206 /* Stop the 82547 Tx FIFO stall check timer. */
7207 if (sc->sc_type == WM_T_82547) {
7208 if (wait)
7209 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7210 else
7211 callout_stop(&sc->sc_txfifo_ch);
7212 }
7213
7214 /* Release any queued transmit buffers. */
7215 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7216 struct wm_queue *wmq = &sc->sc_queue[qidx];
7217 struct wm_txqueue *txq = &wmq->wmq_txq;
7218 struct mbuf *m;
7219
7220 mutex_enter(txq->txq_lock);
7221 txq->txq_sending = false; /* Ensure watchdog disabled */
7222 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7223 txs = &txq->txq_soft[i];
7224 if (txs->txs_mbuf != NULL) {
7225 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7226 m_freem(txs->txs_mbuf);
7227 txs->txs_mbuf = NULL;
7228 }
7229 }
7230 /* Drain txq_interq */
7231 while ((m = pcq_get(txq->txq_interq)) != NULL)
7232 m_freem(m);
7233 mutex_exit(txq->txq_lock);
7234 }
7235
7236 /* Mark the interface as down and cancel the watchdog timer. */
7237 ifp->if_flags &= ~IFF_RUNNING;
7238 sc->sc_if_flags = ifp->if_flags;
7239
7240 if (disable) {
7241 for (i = 0; i < sc->sc_nqueues; i++) {
7242 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7243 mutex_enter(rxq->rxq_lock);
7244 wm_rxdrain(rxq);
7245 mutex_exit(rxq->rxq_lock);
7246 }
7247 }
7248
7249 #if 0 /* notyet */
7250 if (sc->sc_type >= WM_T_82544)
7251 CSR_WRITE(sc, WMREG_WUC, 0);
7252 #endif
7253 }
7254
7255 static void
7256 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7257 {
7258 struct mbuf *m;
7259 int i;
7260
7261 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7262 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7263 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7264 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7265 m->m_data, m->m_len, m->m_flags);
7266 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7267 i, i == 1 ? "" : "s");
7268 }
7269
7270 /*
7271 * wm_82547_txfifo_stall:
7272 *
7273 * Callout used to wait for the 82547 Tx FIFO to drain,
7274 * reset the FIFO pointers, and restart packet transmission.
7275 */
7276 static void
7277 wm_82547_txfifo_stall(void *arg)
7278 {
7279 struct wm_softc *sc = arg;
7280 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7281
7282 mutex_enter(txq->txq_lock);
7283
7284 if (txq->txq_stopping)
7285 goto out;
7286
7287 if (txq->txq_fifo_stall) {
7288 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7289 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7290 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7291 /*
7292 * Packets have drained. Stop transmitter, reset
7293 * FIFO pointers, restart transmitter, and kick
7294 * the packet queue.
7295 */
7296 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7297 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7298 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7299 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7300 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7301 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7302 CSR_WRITE(sc, WMREG_TCTL, tctl);
7303 CSR_WRITE_FLUSH(sc);
7304
7305 txq->txq_fifo_head = 0;
7306 txq->txq_fifo_stall = 0;
7307 wm_start_locked(&sc->sc_ethercom.ec_if);
7308 } else {
7309 /*
7310 * Still waiting for packets to drain; try again in
7311 * another tick.
7312 */
7313 callout_schedule(&sc->sc_txfifo_ch, 1);
7314 }
7315 }
7316
7317 out:
7318 mutex_exit(txq->txq_lock);
7319 }
7320
7321 /*
7322 * wm_82547_txfifo_bugchk:
7323 *
7324 * Check for bug condition in the 82547 Tx FIFO. We need to
7325 * prevent enqueueing a packet that would wrap around the end
7326 * if the Tx FIFO ring buffer, otherwise the chip will croak.
7327 *
7328 * We do this by checking the amount of space before the end
7329 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
7330 * the Tx FIFO, wait for all remaining packets to drain, reset
7331 * the internal FIFO pointers to the beginning, and restart
7332 * transmission on the interface.
7333 */
7334 #define WM_FIFO_HDR 0x10
7335 #define WM_82547_PAD_LEN 0x3e0
7336 static int
7337 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7338 {
7339 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7340 int space = txq->txq_fifo_size - txq->txq_fifo_head;
7341 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7342
7343 /* Just return if already stalled. */
7344 if (txq->txq_fifo_stall)
7345 return 1;
7346
7347 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7348 /* Stall only occurs in half-duplex mode. */
7349 goto send_packet;
7350 }
7351
7352 if (len >= WM_82547_PAD_LEN + space) {
7353 txq->txq_fifo_stall = 1;
7354 callout_schedule(&sc->sc_txfifo_ch, 1);
7355 return 1;
7356 }
7357
7358 send_packet:
7359 txq->txq_fifo_head += len;
7360 if (txq->txq_fifo_head >= txq->txq_fifo_size)
7361 txq->txq_fifo_head -= txq->txq_fifo_size;
7362
7363 return 0;
7364 }
7365
7366 static int
7367 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7368 {
7369 int error;
7370
7371 /*
7372 * Allocate the control data structures, and create and load the
7373 * DMA map for it.
7374 *
7375 * NOTE: All Tx descriptors must be in the same 4G segment of
7376 * memory. So must Rx descriptors. We simplify by allocating
7377 * both sets within the same 4G segment.
7378 */
7379 if (sc->sc_type < WM_T_82544)
7380 WM_NTXDESC(txq) = WM_NTXDESC_82542;
7381 else
7382 WM_NTXDESC(txq) = WM_NTXDESC_82544;
7383 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7384 txq->txq_descsize = sizeof(nq_txdesc_t);
7385 else
7386 txq->txq_descsize = sizeof(wiseman_txdesc_t);
7387
7388 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7389 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7390 1, &txq->txq_desc_rseg, 0)) != 0) {
7391 aprint_error_dev(sc->sc_dev,
7392 "unable to allocate TX control data, error = %d\n",
7393 error);
7394 goto fail_0;
7395 }
7396
7397 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7398 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7399 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7400 aprint_error_dev(sc->sc_dev,
7401 "unable to map TX control data, error = %d\n", error);
7402 goto fail_1;
7403 }
7404
7405 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7406 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7407 aprint_error_dev(sc->sc_dev,
7408 "unable to create TX control data DMA map, error = %d\n",
7409 error);
7410 goto fail_2;
7411 }
7412
7413 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7414 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7415 aprint_error_dev(sc->sc_dev,
7416 "unable to load TX control data DMA map, error = %d\n",
7417 error);
7418 goto fail_3;
7419 }
7420
7421 return 0;
7422
7423 fail_3:
7424 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7425 fail_2:
7426 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7427 WM_TXDESCS_SIZE(txq));
7428 fail_1:
7429 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7430 fail_0:
7431 return error;
7432 }
7433
7434 static void
7435 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7436 {
7437
7438 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7439 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7440 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7441 WM_TXDESCS_SIZE(txq));
7442 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7443 }
7444
7445 static int
7446 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7447 {
7448 int error;
7449 size_t rxq_descs_size;
7450
7451 /*
7452 * Allocate the control data structures, and create and load the
7453 * DMA map for it.
7454 *
7455 * NOTE: All Tx descriptors must be in the same 4G segment of
7456 * memory. So must Rx descriptors. We simplify by allocating
7457 * both sets within the same 4G segment.
7458 */
7459 rxq->rxq_ndesc = WM_NRXDESC;
7460 if (sc->sc_type == WM_T_82574)
7461 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7462 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7463 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7464 else
7465 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7466 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7467
7468 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7469 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7470 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7471 aprint_error_dev(sc->sc_dev,
7472 "unable to allocate RX control data, error = %d\n",
7473 error);
7474 goto fail_0;
7475 }
7476
7477 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7478 rxq->rxq_desc_rseg, rxq_descs_size,
7479 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7480 aprint_error_dev(sc->sc_dev,
7481 "unable to map RX control data, error = %d\n", error);
7482 goto fail_1;
7483 }
7484
7485 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7486 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7487 aprint_error_dev(sc->sc_dev,
7488 "unable to create RX control data DMA map, error = %d\n",
7489 error);
7490 goto fail_2;
7491 }
7492
7493 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7494 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7495 aprint_error_dev(sc->sc_dev,
7496 "unable to load RX control data DMA map, error = %d\n",
7497 error);
7498 goto fail_3;
7499 }
7500
7501 return 0;
7502
7503 fail_3:
7504 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7505 fail_2:
7506 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7507 rxq_descs_size);
7508 fail_1:
7509 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7510 fail_0:
7511 return error;
7512 }
7513
7514 static void
7515 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7516 {
7517
7518 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7519 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7520 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7521 rxq->rxq_descsize * rxq->rxq_ndesc);
7522 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7523 }
7524
7525
7526 static int
7527 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7528 {
7529 int i, error;
7530
7531 /* Create the transmit buffer DMA maps. */
7532 WM_TXQUEUELEN(txq) =
7533 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7534 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7535 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7536 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7537 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7538 &txq->txq_soft[i].txs_dmamap)) != 0) {
7539 aprint_error_dev(sc->sc_dev,
7540 "unable to create Tx DMA map %d, error = %d\n",
7541 i, error);
7542 goto fail;
7543 }
7544 }
7545
7546 return 0;
7547
7548 fail:
7549 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7550 if (txq->txq_soft[i].txs_dmamap != NULL)
7551 bus_dmamap_destroy(sc->sc_dmat,
7552 txq->txq_soft[i].txs_dmamap);
7553 }
7554 return error;
7555 }
7556
7557 static void
7558 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7559 {
7560 int i;
7561
7562 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7563 if (txq->txq_soft[i].txs_dmamap != NULL)
7564 bus_dmamap_destroy(sc->sc_dmat,
7565 txq->txq_soft[i].txs_dmamap);
7566 }
7567 }
7568
7569 static int
7570 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7571 {
7572 int i, error;
7573
7574 /* Create the receive buffer DMA maps. */
7575 for (i = 0; i < rxq->rxq_ndesc; i++) {
7576 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7577 MCLBYTES, 0, 0,
7578 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7579 aprint_error_dev(sc->sc_dev,
7580 "unable to create Rx DMA map %d error = %d\n",
7581 i, error);
7582 goto fail;
7583 }
7584 rxq->rxq_soft[i].rxs_mbuf = NULL;
7585 }
7586
7587 return 0;
7588
7589 fail:
7590 for (i = 0; i < rxq->rxq_ndesc; i++) {
7591 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7592 bus_dmamap_destroy(sc->sc_dmat,
7593 rxq->rxq_soft[i].rxs_dmamap);
7594 }
7595 return error;
7596 }
7597
7598 static void
7599 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7600 {
7601 int i;
7602
7603 for (i = 0; i < rxq->rxq_ndesc; i++) {
7604 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7605 bus_dmamap_destroy(sc->sc_dmat,
7606 rxq->rxq_soft[i].rxs_dmamap);
7607 }
7608 }
7609
7610 /*
7611 * wm_alloc_quques:
7612 * Allocate {tx,rx}descs and {tx,rx} buffers
7613 */
7614 static int
7615 wm_alloc_txrx_queues(struct wm_softc *sc)
7616 {
7617 int i, error, tx_done, rx_done;
7618
7619 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7620 KM_SLEEP);
7621 if (sc->sc_queue == NULL) {
7622 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7623 error = ENOMEM;
7624 goto fail_0;
7625 }
7626
7627 /* For transmission */
7628 error = 0;
7629 tx_done = 0;
7630 for (i = 0; i < sc->sc_nqueues; i++) {
7631 #ifdef WM_EVENT_COUNTERS
7632 int j;
7633 const char *xname;
7634 #endif
7635 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7636 txq->txq_sc = sc;
7637 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7638
7639 error = wm_alloc_tx_descs(sc, txq);
7640 if (error)
7641 break;
7642 error = wm_alloc_tx_buffer(sc, txq);
7643 if (error) {
7644 wm_free_tx_descs(sc, txq);
7645 break;
7646 }
7647 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7648 if (txq->txq_interq == NULL) {
7649 wm_free_tx_descs(sc, txq);
7650 wm_free_tx_buffer(sc, txq);
7651 error = ENOMEM;
7652 break;
7653 }
7654
7655 #ifdef WM_EVENT_COUNTERS
7656 xname = device_xname(sc->sc_dev);
7657
7658 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7659 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7660 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7661 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7662 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7663 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7664 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7665 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7666 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7667 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7668 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7669
7670 for (j = 0; j < WM_NTXSEGS; j++) {
7671 snprintf(txq->txq_txseg_evcnt_names[j],
7672 sizeof(txq->txq_txseg_evcnt_names[j]),
7673 "txq%02dtxseg%d", i, j);
7674 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
7675 EVCNT_TYPE_MISC,
7676 NULL, xname, txq->txq_txseg_evcnt_names[j]);
7677 }
7678
7679 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7680 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7681 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7682 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7683 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7684 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7685 #endif /* WM_EVENT_COUNTERS */
7686
7687 tx_done++;
7688 }
7689 if (error)
7690 goto fail_1;
7691
7692 /* For receive */
7693 error = 0;
7694 rx_done = 0;
7695 for (i = 0; i < sc->sc_nqueues; i++) {
7696 #ifdef WM_EVENT_COUNTERS
7697 const char *xname;
7698 #endif
7699 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7700 rxq->rxq_sc = sc;
7701 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7702
7703 error = wm_alloc_rx_descs(sc, rxq);
7704 if (error)
7705 break;
7706
7707 error = wm_alloc_rx_buffer(sc, rxq);
7708 if (error) {
7709 wm_free_rx_descs(sc, rxq);
7710 break;
7711 }
7712
7713 #ifdef WM_EVENT_COUNTERS
7714 xname = device_xname(sc->sc_dev);
7715
7716 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7717 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7718
7719 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7720 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7721 #endif /* WM_EVENT_COUNTERS */
7722
7723 rx_done++;
7724 }
7725 if (error)
7726 goto fail_2;
7727
7728 return 0;
7729
7730 fail_2:
7731 for (i = 0; i < rx_done; i++) {
7732 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7733 wm_free_rx_buffer(sc, rxq);
7734 wm_free_rx_descs(sc, rxq);
7735 if (rxq->rxq_lock)
7736 mutex_obj_free(rxq->rxq_lock);
7737 }
7738 fail_1:
7739 for (i = 0; i < tx_done; i++) {
7740 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7741 pcq_destroy(txq->txq_interq);
7742 wm_free_tx_buffer(sc, txq);
7743 wm_free_tx_descs(sc, txq);
7744 if (txq->txq_lock)
7745 mutex_obj_free(txq->txq_lock);
7746 }
7747
7748 kmem_free(sc->sc_queue,
7749 sizeof(struct wm_queue) * sc->sc_nqueues);
7750 fail_0:
7751 return error;
7752 }
7753
7754 /*
7755 * wm_free_quques:
7756 * Free {tx,rx}descs and {tx,rx} buffers
7757 */
7758 static void
7759 wm_free_txrx_queues(struct wm_softc *sc)
7760 {
7761 int i;
7762
7763 for (i = 0; i < sc->sc_nqueues; i++) {
7764 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7765
7766 #ifdef WM_EVENT_COUNTERS
7767 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7768 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7769 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7770 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7771 #endif /* WM_EVENT_COUNTERS */
7772
7773 wm_free_rx_buffer(sc, rxq);
7774 wm_free_rx_descs(sc, rxq);
7775 if (rxq->rxq_lock)
7776 mutex_obj_free(rxq->rxq_lock);
7777 }
7778
7779 for (i = 0; i < sc->sc_nqueues; i++) {
7780 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7781 struct mbuf *m;
7782 #ifdef WM_EVENT_COUNTERS
7783 int j;
7784
7785 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7786 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7787 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7788 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7789 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7790 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7791 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7792 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7793 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7794 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7795 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7796
7797 for (j = 0; j < WM_NTXSEGS; j++)
7798 evcnt_detach(&txq->txq_ev_txseg[j]);
7799
7800 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7801 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7802 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7803 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7804 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7805 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7806 #endif /* WM_EVENT_COUNTERS */
7807
7808 /* Drain txq_interq */
7809 while ((m = pcq_get(txq->txq_interq)) != NULL)
7810 m_freem(m);
7811 pcq_destroy(txq->txq_interq);
7812
7813 wm_free_tx_buffer(sc, txq);
7814 wm_free_tx_descs(sc, txq);
7815 if (txq->txq_lock)
7816 mutex_obj_free(txq->txq_lock);
7817 }
7818
7819 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7820 }
7821
7822 static void
7823 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7824 {
7825
7826 KASSERT(mutex_owned(txq->txq_lock));
7827
7828 /* Initialize the transmit descriptor ring. */
7829 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7830 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7831 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7832 txq->txq_free = WM_NTXDESC(txq);
7833 txq->txq_next = 0;
7834 }
7835
7836 static void
7837 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7838 struct wm_txqueue *txq)
7839 {
7840
7841 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7842 device_xname(sc->sc_dev), __func__));
7843 KASSERT(mutex_owned(txq->txq_lock));
7844
7845 if (sc->sc_type < WM_T_82543) {
7846 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7847 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7848 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7849 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7850 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7851 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7852 } else {
7853 int qid = wmq->wmq_id;
7854
7855 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7856 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7857 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7858 CSR_WRITE(sc, WMREG_TDH(qid), 0);
7859
7860 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7861 /*
7862 * Don't write TDT before TCTL.EN is set.
7863 * See the document.
7864 */
7865 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7866 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7867 | TXDCTL_WTHRESH(0));
7868 else {
7869 /* XXX should update with AIM? */
7870 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7871 if (sc->sc_type >= WM_T_82540) {
7872 /* Should be the same */
7873 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7874 }
7875
7876 CSR_WRITE(sc, WMREG_TDT(qid), 0);
7877 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7878 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7879 }
7880 }
7881 }
7882
7883 static void
7884 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7885 {
7886 int i;
7887
7888 KASSERT(mutex_owned(txq->txq_lock));
7889
7890 /* Initialize the transmit job descriptors. */
7891 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7892 txq->txq_soft[i].txs_mbuf = NULL;
7893 txq->txq_sfree = WM_TXQUEUELEN(txq);
7894 txq->txq_snext = 0;
7895 txq->txq_sdirty = 0;
7896 }
7897
7898 static void
7899 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7900 struct wm_txqueue *txq)
7901 {
7902
7903 KASSERT(mutex_owned(txq->txq_lock));
7904
7905 /*
7906 * Set up some register offsets that are different between
7907 * the i82542 and the i82543 and later chips.
7908 */
7909 if (sc->sc_type < WM_T_82543)
7910 txq->txq_tdt_reg = WMREG_OLD_TDT;
7911 else
7912 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7913
7914 wm_init_tx_descs(sc, txq);
7915 wm_init_tx_regs(sc, wmq, txq);
7916 wm_init_tx_buffer(sc, txq);
7917
7918 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */
7919 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
7920
7921 txq->txq_sending = false;
7922 }
7923
7924 static void
7925 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7926 struct wm_rxqueue *rxq)
7927 {
7928
7929 KASSERT(mutex_owned(rxq->rxq_lock));
7930
7931 /*
7932 * Initialize the receive descriptor and receive job
7933 * descriptor rings.
7934 */
7935 if (sc->sc_type < WM_T_82543) {
7936 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7937 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7938 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7939 rxq->rxq_descsize * rxq->rxq_ndesc);
7940 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7941 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7942 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7943
7944 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7945 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7946 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7947 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7948 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7949 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7950 } else {
7951 int qid = wmq->wmq_id;
7952
7953 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7954 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7955 CSR_WRITE(sc, WMREG_RDLEN(qid),
7956 rxq->rxq_descsize * rxq->rxq_ndesc);
7957
7958 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7959 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7960 panic("%s: MCLBYTES %d unsupported for 82575 "
7961 "or higher\n", __func__, MCLBYTES);
7962
7963 /*
7964 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
7965 * only.
7966 */
7967 CSR_WRITE(sc, WMREG_SRRCTL(qid),
7968 SRRCTL_DESCTYPE_ADV_ONEBUF
7969 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7970 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7971 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7972 | RXDCTL_WTHRESH(1));
7973 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7974 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7975 } else {
7976 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7977 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7978 /* XXX should update with AIM? */
7979 CSR_WRITE(sc, WMREG_RDTR,
7980 (wmq->wmq_itr / 4) | RDTR_FPD);
7981 /* MUST be same */
7982 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7983 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7984 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7985 }
7986 }
7987 }
7988
7989 static int
7990 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7991 {
7992 struct wm_rxsoft *rxs;
7993 int error, i;
7994
7995 KASSERT(mutex_owned(rxq->rxq_lock));
7996
7997 for (i = 0; i < rxq->rxq_ndesc; i++) {
7998 rxs = &rxq->rxq_soft[i];
7999 if (rxs->rxs_mbuf == NULL) {
8000 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8001 log(LOG_ERR, "%s: unable to allocate or map "
8002 "rx buffer %d, error = %d\n",
8003 device_xname(sc->sc_dev), i, error);
8004 /*
8005 * XXX Should attempt to run with fewer receive
8006 * XXX buffers instead of just failing.
8007 */
8008 wm_rxdrain(rxq);
8009 return ENOMEM;
8010 }
8011 } else {
8012 /*
8013 * For 82575 and 82576, the RX descriptors must be
8014 * initialized after the setting of RCTL.EN in
8015 * wm_set_filter()
8016 */
8017 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8018 wm_init_rxdesc(rxq, i);
8019 }
8020 }
8021 rxq->rxq_ptr = 0;
8022 rxq->rxq_discard = 0;
8023 WM_RXCHAIN_RESET(rxq);
8024
8025 return 0;
8026 }
8027
8028 static int
8029 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8030 struct wm_rxqueue *rxq)
8031 {
8032
8033 KASSERT(mutex_owned(rxq->rxq_lock));
8034
8035 /*
8036 * Set up some register offsets that are different between
8037 * the i82542 and the i82543 and later chips.
8038 */
8039 if (sc->sc_type < WM_T_82543)
8040 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8041 else
8042 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8043
8044 wm_init_rx_regs(sc, wmq, rxq);
8045 return wm_init_rx_buffer(sc, rxq);
8046 }
8047
8048 /*
8049 * wm_init_quques:
8050 * Initialize {tx,rx}descs and {tx,rx} buffers
8051 */
8052 static int
8053 wm_init_txrx_queues(struct wm_softc *sc)
8054 {
8055 int i, error = 0;
8056
8057 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8058 device_xname(sc->sc_dev), __func__));
8059
8060 for (i = 0; i < sc->sc_nqueues; i++) {
8061 struct wm_queue *wmq = &sc->sc_queue[i];
8062 struct wm_txqueue *txq = &wmq->wmq_txq;
8063 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8064
8065 /*
8066 * TODO
8067 * Currently, use constant variable instead of AIM.
8068 * Furthermore, the interrupt interval of multiqueue which use
8069 * polling mode is less than default value.
8070 * More tuning and AIM are required.
8071 */
8072 if (wm_is_using_multiqueue(sc))
8073 wmq->wmq_itr = 50;
8074 else
8075 wmq->wmq_itr = sc->sc_itr_init;
8076 wmq->wmq_set_itr = true;
8077
8078 mutex_enter(txq->txq_lock);
8079 wm_init_tx_queue(sc, wmq, txq);
8080 mutex_exit(txq->txq_lock);
8081
8082 mutex_enter(rxq->rxq_lock);
8083 error = wm_init_rx_queue(sc, wmq, rxq);
8084 mutex_exit(rxq->rxq_lock);
8085 if (error)
8086 break;
8087 }
8088
8089 return error;
8090 }
8091
8092 /*
8093 * wm_tx_offload:
8094 *
8095 * Set up TCP/IP checksumming parameters for the
8096 * specified packet.
8097 */
8098 static void
8099 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8100 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8101 {
8102 struct mbuf *m0 = txs->txs_mbuf;
8103 struct livengood_tcpip_ctxdesc *t;
8104 uint32_t ipcs, tucs, cmd, cmdlen, seg;
8105 uint32_t ipcse;
8106 struct ether_header *eh;
8107 int offset, iphl;
8108 uint8_t fields;
8109
8110 /*
8111 * XXX It would be nice if the mbuf pkthdr had offset
8112 * fields for the protocol headers.
8113 */
8114
8115 eh = mtod(m0, struct ether_header *);
8116 switch (htons(eh->ether_type)) {
8117 case ETHERTYPE_IP:
8118 case ETHERTYPE_IPV6:
8119 offset = ETHER_HDR_LEN;
8120 break;
8121
8122 case ETHERTYPE_VLAN:
8123 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8124 break;
8125
8126 default:
8127 /* Don't support this protocol or encapsulation. */
8128 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8129 txq->txq_last_hw_ipcs = 0;
8130 txq->txq_last_hw_tucs = 0;
8131 *fieldsp = 0;
8132 *cmdp = 0;
8133 return;
8134 }
8135
8136 if ((m0->m_pkthdr.csum_flags &
8137 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8138 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8139 } else
8140 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8141
8142 ipcse = offset + iphl - 1;
8143
8144 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8145 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8146 seg = 0;
8147 fields = 0;
8148
8149 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8150 int hlen = offset + iphl;
8151 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8152
8153 if (__predict_false(m0->m_len <
8154 (hlen + sizeof(struct tcphdr)))) {
8155 /*
8156 * TCP/IP headers are not in the first mbuf; we need
8157 * to do this the slow and painful way. Let's just
8158 * hope this doesn't happen very often.
8159 */
8160 struct tcphdr th;
8161
8162 WM_Q_EVCNT_INCR(txq, tsopain);
8163
8164 m_copydata(m0, hlen, sizeof(th), &th);
8165 if (v4) {
8166 struct ip ip;
8167
8168 m_copydata(m0, offset, sizeof(ip), &ip);
8169 ip.ip_len = 0;
8170 m_copyback(m0,
8171 offset + offsetof(struct ip, ip_len),
8172 sizeof(ip.ip_len), &ip.ip_len);
8173 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8174 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8175 } else {
8176 struct ip6_hdr ip6;
8177
8178 m_copydata(m0, offset, sizeof(ip6), &ip6);
8179 ip6.ip6_plen = 0;
8180 m_copyback(m0,
8181 offset + offsetof(struct ip6_hdr, ip6_plen),
8182 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8183 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8184 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8185 }
8186 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8187 sizeof(th.th_sum), &th.th_sum);
8188
8189 hlen += th.th_off << 2;
8190 } else {
8191 /*
8192 * TCP/IP headers are in the first mbuf; we can do
8193 * this the easy way.
8194 */
8195 struct tcphdr *th;
8196
8197 if (v4) {
8198 struct ip *ip =
8199 (void *)(mtod(m0, char *) + offset);
8200 th = (void *)(mtod(m0, char *) + hlen);
8201
8202 ip->ip_len = 0;
8203 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8204 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8205 } else {
8206 struct ip6_hdr *ip6 =
8207 (void *)(mtod(m0, char *) + offset);
8208 th = (void *)(mtod(m0, char *) + hlen);
8209
8210 ip6->ip6_plen = 0;
8211 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8212 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8213 }
8214 hlen += th->th_off << 2;
8215 }
8216
8217 if (v4) {
8218 WM_Q_EVCNT_INCR(txq, tso);
8219 cmdlen |= WTX_TCPIP_CMD_IP;
8220 } else {
8221 WM_Q_EVCNT_INCR(txq, tso6);
8222 ipcse = 0;
8223 }
8224 cmd |= WTX_TCPIP_CMD_TSE;
8225 cmdlen |= WTX_TCPIP_CMD_TSE |
8226 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8227 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8228 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8229 }
8230
8231 /*
8232 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8233 * offload feature, if we load the context descriptor, we
8234 * MUST provide valid values for IPCSS and TUCSS fields.
8235 */
8236
8237 ipcs = WTX_TCPIP_IPCSS(offset) |
8238 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8239 WTX_TCPIP_IPCSE(ipcse);
8240 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8241 WM_Q_EVCNT_INCR(txq, ipsum);
8242 fields |= WTX_IXSM;
8243 }
8244
8245 offset += iphl;
8246
8247 if (m0->m_pkthdr.csum_flags &
8248 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8249 WM_Q_EVCNT_INCR(txq, tusum);
8250 fields |= WTX_TXSM;
8251 tucs = WTX_TCPIP_TUCSS(offset) |
8252 WTX_TCPIP_TUCSO(offset +
8253 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8254 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8255 } else if ((m0->m_pkthdr.csum_flags &
8256 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8257 WM_Q_EVCNT_INCR(txq, tusum6);
8258 fields |= WTX_TXSM;
8259 tucs = WTX_TCPIP_TUCSS(offset) |
8260 WTX_TCPIP_TUCSO(offset +
8261 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8262 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8263 } else {
8264 /* Just initialize it to a valid TCP context. */
8265 tucs = WTX_TCPIP_TUCSS(offset) |
8266 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8267 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8268 }
8269
8270 *cmdp = cmd;
8271 *fieldsp = fields;
8272
8273 /*
8274 * We don't have to write context descriptor for every packet
8275 * except for 82574. For 82574, we must write context descriptor
8276 * for every packet when we use two descriptor queues.
8277 *
8278 * The 82574L can only remember the *last* context used
8279 * regardless of queue that it was use for. We cannot reuse
8280 * contexts on this hardware platform and must generate a new
8281 * context every time. 82574L hardware spec, section 7.2.6,
8282 * second note.
8283 */
8284 if (sc->sc_nqueues < 2) {
8285 /*
8286 * Setting up new checksum offload context for every
8287 * frames takes a lot of processing time for hardware.
8288 * This also reduces performance a lot for small sized
8289 * frames so avoid it if driver can use previously
8290 * configured checksum offload context.
8291 * For TSO, in theory we can use the same TSO context only if
8292 * frame is the same type(IP/TCP) and the same MSS. However
8293 * checking whether a frame has the same IP/TCP structure is a
8294 * hard thing so just ignore that and always restablish a
8295 * new TSO context.
8296 */
8297 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8298 == 0) {
8299 if (txq->txq_last_hw_cmd == cmd &&
8300 txq->txq_last_hw_fields == fields &&
8301 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8302 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8303 WM_Q_EVCNT_INCR(txq, skipcontext);
8304 return;
8305 }
8306 }
8307
8308 txq->txq_last_hw_cmd = cmd;
8309 txq->txq_last_hw_fields = fields;
8310 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8311 txq->txq_last_hw_tucs = (tucs & 0xffff);
8312 }
8313
8314 /* Fill in the context descriptor. */
8315 t = (struct livengood_tcpip_ctxdesc *)
8316 &txq->txq_descs[txq->txq_next];
8317 t->tcpip_ipcs = htole32(ipcs);
8318 t->tcpip_tucs = htole32(tucs);
8319 t->tcpip_cmdlen = htole32(cmdlen);
8320 t->tcpip_seg = htole32(seg);
8321 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8322
8323 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8324 txs->txs_ndesc++;
8325 }
8326
8327 static inline int
8328 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8329 {
8330 struct wm_softc *sc = ifp->if_softc;
8331 u_int cpuid = cpu_index(curcpu());
8332
8333 /*
8334 * Currently, simple distribute strategy.
8335 * TODO:
8336 * distribute by flowid(RSS has value).
8337 */
8338 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8339 }
8340
8341 static inline bool
8342 wm_linkdown_discard(struct wm_txqueue *txq)
8343 {
8344
8345 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8346 return true;
8347
8348 return false;
8349 }
8350
8351 /*
8352 * wm_start: [ifnet interface function]
8353 *
8354 * Start packet transmission on the interface.
8355 */
8356 static void
8357 wm_start(struct ifnet *ifp)
8358 {
8359 struct wm_softc *sc = ifp->if_softc;
8360 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8361
8362 KASSERT(if_is_mpsafe(ifp));
8363 /*
8364 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8365 */
8366
8367 mutex_enter(txq->txq_lock);
8368 if (!txq->txq_stopping)
8369 wm_start_locked(ifp);
8370 mutex_exit(txq->txq_lock);
8371 }
8372
8373 static void
8374 wm_start_locked(struct ifnet *ifp)
8375 {
8376 struct wm_softc *sc = ifp->if_softc;
8377 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8378
8379 wm_send_common_locked(ifp, txq, false);
8380 }
8381
8382 static int
8383 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8384 {
8385 int qid;
8386 struct wm_softc *sc = ifp->if_softc;
8387 struct wm_txqueue *txq;
8388
8389 qid = wm_select_txqueue(ifp, m);
8390 txq = &sc->sc_queue[qid].wmq_txq;
8391
8392 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8393 m_freem(m);
8394 WM_Q_EVCNT_INCR(txq, pcqdrop);
8395 return ENOBUFS;
8396 }
8397
8398 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8399 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8400 if (m->m_flags & M_MCAST)
8401 if_statinc_ref(nsr, if_omcasts);
8402 IF_STAT_PUTREF(ifp);
8403
8404 if (mutex_tryenter(txq->txq_lock)) {
8405 if (!txq->txq_stopping)
8406 wm_transmit_locked(ifp, txq);
8407 mutex_exit(txq->txq_lock);
8408 }
8409
8410 return 0;
8411 }
8412
8413 static void
8414 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8415 {
8416
8417 wm_send_common_locked(ifp, txq, true);
8418 }
8419
8420 static void
8421 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8422 bool is_transmit)
8423 {
8424 struct wm_softc *sc = ifp->if_softc;
8425 struct mbuf *m0;
8426 struct wm_txsoft *txs;
8427 bus_dmamap_t dmamap;
8428 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8429 bus_addr_t curaddr;
8430 bus_size_t seglen, curlen;
8431 uint32_t cksumcmd;
8432 uint8_t cksumfields;
8433 bool remap = true;
8434
8435 KASSERT(mutex_owned(txq->txq_lock));
8436 KASSERT(!txq->txq_stopping);
8437
8438 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8439 return;
8440
8441 if (__predict_false(wm_linkdown_discard(txq))) {
8442 do {
8443 if (is_transmit)
8444 m0 = pcq_get(txq->txq_interq);
8445 else
8446 IFQ_DEQUEUE(&ifp->if_snd, m0);
8447 /*
8448 * increment successed packet counter as in the case
8449 * which the packet is discarded by link down PHY.
8450 */
8451 if (m0 != NULL) {
8452 if_statinc(ifp, if_opackets);
8453 m_freem(m0);
8454 }
8455 } while (m0 != NULL);
8456 return;
8457 }
8458
8459 /* Remember the previous number of free descriptors. */
8460 ofree = txq->txq_free;
8461
8462 /*
8463 * Loop through the send queue, setting up transmit descriptors
8464 * until we drain the queue, or use up all available transmit
8465 * descriptors.
8466 */
8467 for (;;) {
8468 m0 = NULL;
8469
8470 /* Get a work queue entry. */
8471 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8472 wm_txeof(txq, UINT_MAX);
8473 if (txq->txq_sfree == 0) {
8474 DPRINTF(sc, WM_DEBUG_TX,
8475 ("%s: TX: no free job descriptors\n",
8476 device_xname(sc->sc_dev)));
8477 WM_Q_EVCNT_INCR(txq, txsstall);
8478 break;
8479 }
8480 }
8481
8482 /* Grab a packet off the queue. */
8483 if (is_transmit)
8484 m0 = pcq_get(txq->txq_interq);
8485 else
8486 IFQ_DEQUEUE(&ifp->if_snd, m0);
8487 if (m0 == NULL)
8488 break;
8489
8490 DPRINTF(sc, WM_DEBUG_TX,
8491 ("%s: TX: have packet to transmit: %p\n",
8492 device_xname(sc->sc_dev), m0));
8493
8494 txs = &txq->txq_soft[txq->txq_snext];
8495 dmamap = txs->txs_dmamap;
8496
8497 use_tso = (m0->m_pkthdr.csum_flags &
8498 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8499
8500 /*
8501 * So says the Linux driver:
8502 * The controller does a simple calculation to make sure
8503 * there is enough room in the FIFO before initiating the
8504 * DMA for each buffer. The calc is:
8505 * 4 = ceil(buffer len / MSS)
8506 * To make sure we don't overrun the FIFO, adjust the max
8507 * buffer len if the MSS drops.
8508 */
8509 dmamap->dm_maxsegsz =
8510 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8511 ? m0->m_pkthdr.segsz << 2
8512 : WTX_MAX_LEN;
8513
8514 /*
8515 * Load the DMA map. If this fails, the packet either
8516 * didn't fit in the allotted number of segments, or we
8517 * were short on resources. For the too-many-segments
8518 * case, we simply report an error and drop the packet,
8519 * since we can't sanely copy a jumbo packet to a single
8520 * buffer.
8521 */
8522 retry:
8523 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8524 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8525 if (__predict_false(error)) {
8526 if (error == EFBIG) {
8527 if (remap == true) {
8528 struct mbuf *m;
8529
8530 remap = false;
8531 m = m_defrag(m0, M_NOWAIT);
8532 if (m != NULL) {
8533 WM_Q_EVCNT_INCR(txq, defrag);
8534 m0 = m;
8535 goto retry;
8536 }
8537 }
8538 WM_Q_EVCNT_INCR(txq, toomanyseg);
8539 log(LOG_ERR, "%s: Tx packet consumes too many "
8540 "DMA segments, dropping...\n",
8541 device_xname(sc->sc_dev));
8542 wm_dump_mbuf_chain(sc, m0);
8543 m_freem(m0);
8544 continue;
8545 }
8546 /* Short on resources, just stop for now. */
8547 DPRINTF(sc, WM_DEBUG_TX,
8548 ("%s: TX: dmamap load failed: %d\n",
8549 device_xname(sc->sc_dev), error));
8550 break;
8551 }
8552
8553 segs_needed = dmamap->dm_nsegs;
8554 if (use_tso) {
8555 /* For sentinel descriptor; see below. */
8556 segs_needed++;
8557 }
8558
8559 /*
8560 * Ensure we have enough descriptors free to describe
8561 * the packet. Note, we always reserve one descriptor
8562 * at the end of the ring due to the semantics of the
8563 * TDT register, plus one more in the event we need
8564 * to load offload context.
8565 */
8566 if (segs_needed > txq->txq_free - 2) {
8567 /*
8568 * Not enough free descriptors to transmit this
8569 * packet. We haven't committed anything yet,
8570 * so just unload the DMA map, put the packet
8571 * pack on the queue, and punt. Notify the upper
8572 * layer that there are no more slots left.
8573 */
8574 DPRINTF(sc, WM_DEBUG_TX,
8575 ("%s: TX: need %d (%d) descriptors, have %d\n",
8576 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8577 segs_needed, txq->txq_free - 1));
8578 txq->txq_flags |= WM_TXQ_NO_SPACE;
8579 bus_dmamap_unload(sc->sc_dmat, dmamap);
8580 WM_Q_EVCNT_INCR(txq, txdstall);
8581 break;
8582 }
8583
8584 /*
8585 * Check for 82547 Tx FIFO bug. We need to do this
8586 * once we know we can transmit the packet, since we
8587 * do some internal FIFO space accounting here.
8588 */
8589 if (sc->sc_type == WM_T_82547 &&
8590 wm_82547_txfifo_bugchk(sc, m0)) {
8591 DPRINTF(sc, WM_DEBUG_TX,
8592 ("%s: TX: 82547 Tx FIFO bug detected\n",
8593 device_xname(sc->sc_dev)));
8594 txq->txq_flags |= WM_TXQ_NO_SPACE;
8595 bus_dmamap_unload(sc->sc_dmat, dmamap);
8596 WM_Q_EVCNT_INCR(txq, fifo_stall);
8597 break;
8598 }
8599
8600 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8601
8602 DPRINTF(sc, WM_DEBUG_TX,
8603 ("%s: TX: packet has %d (%d) DMA segments\n",
8604 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8605
8606 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8607
8608 /*
8609 * Store a pointer to the packet so that we can free it
8610 * later.
8611 *
8612 * Initially, we consider the number of descriptors the
8613 * packet uses the number of DMA segments. This may be
8614 * incremented by 1 if we do checksum offload (a descriptor
8615 * is used to set the checksum context).
8616 */
8617 txs->txs_mbuf = m0;
8618 txs->txs_firstdesc = txq->txq_next;
8619 txs->txs_ndesc = segs_needed;
8620
8621 /* Set up offload parameters for this packet. */
8622 if (m0->m_pkthdr.csum_flags &
8623 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8624 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8625 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8626 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8627 } else {
8628 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8629 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8630 cksumcmd = 0;
8631 cksumfields = 0;
8632 }
8633
8634 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8635
8636 /* Sync the DMA map. */
8637 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8638 BUS_DMASYNC_PREWRITE);
8639
8640 /* Initialize the transmit descriptor. */
8641 for (nexttx = txq->txq_next, seg = 0;
8642 seg < dmamap->dm_nsegs; seg++) {
8643 for (seglen = dmamap->dm_segs[seg].ds_len,
8644 curaddr = dmamap->dm_segs[seg].ds_addr;
8645 seglen != 0;
8646 curaddr += curlen, seglen -= curlen,
8647 nexttx = WM_NEXTTX(txq, nexttx)) {
8648 curlen = seglen;
8649
8650 /*
8651 * So says the Linux driver:
8652 * Work around for premature descriptor
8653 * write-backs in TSO mode. Append a
8654 * 4-byte sentinel descriptor.
8655 */
8656 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8657 curlen > 8)
8658 curlen -= 4;
8659
8660 wm_set_dma_addr(
8661 &txq->txq_descs[nexttx].wtx_addr, curaddr);
8662 txq->txq_descs[nexttx].wtx_cmdlen
8663 = htole32(cksumcmd | curlen);
8664 txq->txq_descs[nexttx].wtx_fields.wtxu_status
8665 = 0;
8666 txq->txq_descs[nexttx].wtx_fields.wtxu_options
8667 = cksumfields;
8668 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8669 lasttx = nexttx;
8670
8671 DPRINTF(sc, WM_DEBUG_TX,
8672 ("%s: TX: desc %d: low %#" PRIx64 ", "
8673 "len %#04zx\n",
8674 device_xname(sc->sc_dev), nexttx,
8675 (uint64_t)curaddr, curlen));
8676 }
8677 }
8678
8679 KASSERT(lasttx != -1);
8680
8681 /*
8682 * Set up the command byte on the last descriptor of
8683 * the packet. If we're in the interrupt delay window,
8684 * delay the interrupt.
8685 */
8686 txq->txq_descs[lasttx].wtx_cmdlen |=
8687 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8688
8689 /*
8690 * If VLANs are enabled and the packet has a VLAN tag, set
8691 * up the descriptor to encapsulate the packet for us.
8692 *
8693 * This is only valid on the last descriptor of the packet.
8694 */
8695 if (vlan_has_tag(m0)) {
8696 txq->txq_descs[lasttx].wtx_cmdlen |=
8697 htole32(WTX_CMD_VLE);
8698 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8699 = htole16(vlan_get_tag(m0));
8700 }
8701
8702 txs->txs_lastdesc = lasttx;
8703
8704 DPRINTF(sc, WM_DEBUG_TX,
8705 ("%s: TX: desc %d: cmdlen 0x%08x\n",
8706 device_xname(sc->sc_dev),
8707 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8708
8709 /* Sync the descriptors we're using. */
8710 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8711 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8712
8713 /* Give the packet to the chip. */
8714 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8715
8716 DPRINTF(sc, WM_DEBUG_TX,
8717 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8718
8719 DPRINTF(sc, WM_DEBUG_TX,
8720 ("%s: TX: finished transmitting packet, job %d\n",
8721 device_xname(sc->sc_dev), txq->txq_snext));
8722
8723 /* Advance the tx pointer. */
8724 txq->txq_free -= txs->txs_ndesc;
8725 txq->txq_next = nexttx;
8726
8727 txq->txq_sfree--;
8728 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8729
8730 /* Pass the packet to any BPF listeners. */
8731 bpf_mtap(ifp, m0, BPF_D_OUT);
8732 }
8733
8734 if (m0 != NULL) {
8735 txq->txq_flags |= WM_TXQ_NO_SPACE;
8736 WM_Q_EVCNT_INCR(txq, descdrop);
8737 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8738 __func__));
8739 m_freem(m0);
8740 }
8741
8742 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8743 /* No more slots; notify upper layer. */
8744 txq->txq_flags |= WM_TXQ_NO_SPACE;
8745 }
8746
8747 if (txq->txq_free != ofree) {
8748 /* Set a watchdog timer in case the chip flakes out. */
8749 txq->txq_lastsent = time_uptime;
8750 txq->txq_sending = true;
8751 }
8752 }
8753
8754 /*
8755 * wm_nq_tx_offload:
8756 *
8757 * Set up TCP/IP checksumming parameters for the
8758 * specified packet, for NEWQUEUE devices
8759 */
8760 static void
8761 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8762 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8763 {
8764 struct mbuf *m0 = txs->txs_mbuf;
8765 uint32_t vl_len, mssidx, cmdc;
8766 struct ether_header *eh;
8767 int offset, iphl;
8768
8769 /*
8770 * XXX It would be nice if the mbuf pkthdr had offset
8771 * fields for the protocol headers.
8772 */
8773 *cmdlenp = 0;
8774 *fieldsp = 0;
8775
8776 eh = mtod(m0, struct ether_header *);
8777 switch (htons(eh->ether_type)) {
8778 case ETHERTYPE_IP:
8779 case ETHERTYPE_IPV6:
8780 offset = ETHER_HDR_LEN;
8781 break;
8782
8783 case ETHERTYPE_VLAN:
8784 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8785 break;
8786
8787 default:
8788 /* Don't support this protocol or encapsulation. */
8789 *do_csum = false;
8790 return;
8791 }
8792 *do_csum = true;
8793 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8794 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8795
8796 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8797 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8798
8799 if ((m0->m_pkthdr.csum_flags &
8800 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8801 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8802 } else {
8803 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8804 }
8805 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8806 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8807
8808 if (vlan_has_tag(m0)) {
8809 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8810 << NQTXC_VLLEN_VLAN_SHIFT);
8811 *cmdlenp |= NQTX_CMD_VLE;
8812 }
8813
8814 mssidx = 0;
8815
8816 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8817 int hlen = offset + iphl;
8818 int tcp_hlen;
8819 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8820
8821 if (__predict_false(m0->m_len <
8822 (hlen + sizeof(struct tcphdr)))) {
8823 /*
8824 * TCP/IP headers are not in the first mbuf; we need
8825 * to do this the slow and painful way. Let's just
8826 * hope this doesn't happen very often.
8827 */
8828 struct tcphdr th;
8829
8830 WM_Q_EVCNT_INCR(txq, tsopain);
8831
8832 m_copydata(m0, hlen, sizeof(th), &th);
8833 if (v4) {
8834 struct ip ip;
8835
8836 m_copydata(m0, offset, sizeof(ip), &ip);
8837 ip.ip_len = 0;
8838 m_copyback(m0,
8839 offset + offsetof(struct ip, ip_len),
8840 sizeof(ip.ip_len), &ip.ip_len);
8841 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8842 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8843 } else {
8844 struct ip6_hdr ip6;
8845
8846 m_copydata(m0, offset, sizeof(ip6), &ip6);
8847 ip6.ip6_plen = 0;
8848 m_copyback(m0,
8849 offset + offsetof(struct ip6_hdr, ip6_plen),
8850 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8851 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8852 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8853 }
8854 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8855 sizeof(th.th_sum), &th.th_sum);
8856
8857 tcp_hlen = th.th_off << 2;
8858 } else {
8859 /*
8860 * TCP/IP headers are in the first mbuf; we can do
8861 * this the easy way.
8862 */
8863 struct tcphdr *th;
8864
8865 if (v4) {
8866 struct ip *ip =
8867 (void *)(mtod(m0, char *) + offset);
8868 th = (void *)(mtod(m0, char *) + hlen);
8869
8870 ip->ip_len = 0;
8871 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8872 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8873 } else {
8874 struct ip6_hdr *ip6 =
8875 (void *)(mtod(m0, char *) + offset);
8876 th = (void *)(mtod(m0, char *) + hlen);
8877
8878 ip6->ip6_plen = 0;
8879 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8880 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8881 }
8882 tcp_hlen = th->th_off << 2;
8883 }
8884 hlen += tcp_hlen;
8885 *cmdlenp |= NQTX_CMD_TSE;
8886
8887 if (v4) {
8888 WM_Q_EVCNT_INCR(txq, tso);
8889 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8890 } else {
8891 WM_Q_EVCNT_INCR(txq, tso6);
8892 *fieldsp |= NQTXD_FIELDS_TUXSM;
8893 }
8894 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8895 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8896 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8897 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8898 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8899 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8900 } else {
8901 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8902 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8903 }
8904
8905 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8906 *fieldsp |= NQTXD_FIELDS_IXSM;
8907 cmdc |= NQTXC_CMD_IP4;
8908 }
8909
8910 if (m0->m_pkthdr.csum_flags &
8911 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8912 WM_Q_EVCNT_INCR(txq, tusum);
8913 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8914 cmdc |= NQTXC_CMD_TCP;
8915 else
8916 cmdc |= NQTXC_CMD_UDP;
8917
8918 cmdc |= NQTXC_CMD_IP4;
8919 *fieldsp |= NQTXD_FIELDS_TUXSM;
8920 }
8921 if (m0->m_pkthdr.csum_flags &
8922 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8923 WM_Q_EVCNT_INCR(txq, tusum6);
8924 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8925 cmdc |= NQTXC_CMD_TCP;
8926 else
8927 cmdc |= NQTXC_CMD_UDP;
8928
8929 cmdc |= NQTXC_CMD_IP6;
8930 *fieldsp |= NQTXD_FIELDS_TUXSM;
8931 }
8932
8933 /*
8934 * We don't have to write context descriptor for every packet to
8935 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8936 * I210 and I211. It is enough to write once per a Tx queue for these
8937 * controllers.
8938 * It would be overhead to write context descriptor for every packet,
8939 * however it does not cause problems.
8940 */
8941 /* Fill in the context descriptor. */
8942 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
8943 htole32(vl_len);
8944 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
8945 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
8946 htole32(cmdc);
8947 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
8948 htole32(mssidx);
8949 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8950 DPRINTF(sc, WM_DEBUG_TX,
8951 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8952 txq->txq_next, 0, vl_len));
8953 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8954 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8955 txs->txs_ndesc++;
8956 }
8957
8958 /*
8959 * wm_nq_start: [ifnet interface function]
8960 *
8961 * Start packet transmission on the interface for NEWQUEUE devices
8962 */
8963 static void
8964 wm_nq_start(struct ifnet *ifp)
8965 {
8966 struct wm_softc *sc = ifp->if_softc;
8967 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8968
8969 KASSERT(if_is_mpsafe(ifp));
8970 /*
8971 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8972 */
8973
8974 mutex_enter(txq->txq_lock);
8975 if (!txq->txq_stopping)
8976 wm_nq_start_locked(ifp);
8977 mutex_exit(txq->txq_lock);
8978 }
8979
8980 static void
8981 wm_nq_start_locked(struct ifnet *ifp)
8982 {
8983 struct wm_softc *sc = ifp->if_softc;
8984 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8985
8986 wm_nq_send_common_locked(ifp, txq, false);
8987 }
8988
8989 static int
8990 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8991 {
8992 int qid;
8993 struct wm_softc *sc = ifp->if_softc;
8994 struct wm_txqueue *txq;
8995
8996 qid = wm_select_txqueue(ifp, m);
8997 txq = &sc->sc_queue[qid].wmq_txq;
8998
8999 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
9000 m_freem(m);
9001 WM_Q_EVCNT_INCR(txq, pcqdrop);
9002 return ENOBUFS;
9003 }
9004
9005 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
9006 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
9007 if (m->m_flags & M_MCAST)
9008 if_statinc_ref(nsr, if_omcasts);
9009 IF_STAT_PUTREF(ifp);
9010
9011 /*
9012 * The situations which this mutex_tryenter() fails at running time
9013 * are below two patterns.
9014 * (1) contention with interrupt handler(wm_txrxintr_msix())
9015 * (2) contention with deferred if_start softint(wm_handle_queue())
9016 * In the case of (1), the last packet enqueued to txq->txq_interq is
9017 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9018 * In the case of (2), the last packet enqueued to txq->txq_interq is
9019 * also dequeued by wm_deferred_start_locked(). So, it does not get
9020 * stuck, either.
9021 */
9022 if (mutex_tryenter(txq->txq_lock)) {
9023 if (!txq->txq_stopping)
9024 wm_nq_transmit_locked(ifp, txq);
9025 mutex_exit(txq->txq_lock);
9026 }
9027
9028 return 0;
9029 }
9030
9031 static void
9032 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9033 {
9034
9035 wm_nq_send_common_locked(ifp, txq, true);
9036 }
9037
9038 static void
9039 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9040 bool is_transmit)
9041 {
9042 struct wm_softc *sc = ifp->if_softc;
9043 struct mbuf *m0;
9044 struct wm_txsoft *txs;
9045 bus_dmamap_t dmamap;
9046 int error, nexttx, lasttx = -1, seg, segs_needed;
9047 bool do_csum, sent;
9048 bool remap = true;
9049
9050 KASSERT(mutex_owned(txq->txq_lock));
9051 KASSERT(!txq->txq_stopping);
9052
9053 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9054 return;
9055
9056 if (__predict_false(wm_linkdown_discard(txq))) {
9057 do {
9058 if (is_transmit)
9059 m0 = pcq_get(txq->txq_interq);
9060 else
9061 IFQ_DEQUEUE(&ifp->if_snd, m0);
9062 /*
9063 * increment successed packet counter as in the case
9064 * which the packet is discarded by link down PHY.
9065 */
9066 if (m0 != NULL) {
9067 if_statinc(ifp, if_opackets);
9068 m_freem(m0);
9069 }
9070 } while (m0 != NULL);
9071 return;
9072 }
9073
9074 sent = false;
9075
9076 /*
9077 * Loop through the send queue, setting up transmit descriptors
9078 * until we drain the queue, or use up all available transmit
9079 * descriptors.
9080 */
9081 for (;;) {
9082 m0 = NULL;
9083
9084 /* Get a work queue entry. */
9085 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9086 wm_txeof(txq, UINT_MAX);
9087 if (txq->txq_sfree == 0) {
9088 DPRINTF(sc, WM_DEBUG_TX,
9089 ("%s: TX: no free job descriptors\n",
9090 device_xname(sc->sc_dev)));
9091 WM_Q_EVCNT_INCR(txq, txsstall);
9092 break;
9093 }
9094 }
9095
9096 /* Grab a packet off the queue. */
9097 if (is_transmit)
9098 m0 = pcq_get(txq->txq_interq);
9099 else
9100 IFQ_DEQUEUE(&ifp->if_snd, m0);
9101 if (m0 == NULL)
9102 break;
9103
9104 DPRINTF(sc, WM_DEBUG_TX,
9105 ("%s: TX: have packet to transmit: %p\n",
9106 device_xname(sc->sc_dev), m0));
9107
9108 txs = &txq->txq_soft[txq->txq_snext];
9109 dmamap = txs->txs_dmamap;
9110
9111 /*
9112 * Load the DMA map. If this fails, the packet either
9113 * didn't fit in the allotted number of segments, or we
9114 * were short on resources. For the too-many-segments
9115 * case, we simply report an error and drop the packet,
9116 * since we can't sanely copy a jumbo packet to a single
9117 * buffer.
9118 */
9119 retry:
9120 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9121 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9122 if (__predict_false(error)) {
9123 if (error == EFBIG) {
9124 if (remap == true) {
9125 struct mbuf *m;
9126
9127 remap = false;
9128 m = m_defrag(m0, M_NOWAIT);
9129 if (m != NULL) {
9130 WM_Q_EVCNT_INCR(txq, defrag);
9131 m0 = m;
9132 goto retry;
9133 }
9134 }
9135 WM_Q_EVCNT_INCR(txq, toomanyseg);
9136 log(LOG_ERR, "%s: Tx packet consumes too many "
9137 "DMA segments, dropping...\n",
9138 device_xname(sc->sc_dev));
9139 wm_dump_mbuf_chain(sc, m0);
9140 m_freem(m0);
9141 continue;
9142 }
9143 /* Short on resources, just stop for now. */
9144 DPRINTF(sc, WM_DEBUG_TX,
9145 ("%s: TX: dmamap load failed: %d\n",
9146 device_xname(sc->sc_dev), error));
9147 break;
9148 }
9149
9150 segs_needed = dmamap->dm_nsegs;
9151
9152 /*
9153 * Ensure we have enough descriptors free to describe
9154 * the packet. Note, we always reserve one descriptor
9155 * at the end of the ring due to the semantics of the
9156 * TDT register, plus one more in the event we need
9157 * to load offload context.
9158 */
9159 if (segs_needed > txq->txq_free - 2) {
9160 /*
9161 * Not enough free descriptors to transmit this
9162 * packet. We haven't committed anything yet,
9163 * so just unload the DMA map, put the packet
9164 * pack on the queue, and punt. Notify the upper
9165 * layer that there are no more slots left.
9166 */
9167 DPRINTF(sc, WM_DEBUG_TX,
9168 ("%s: TX: need %d (%d) descriptors, have %d\n",
9169 device_xname(sc->sc_dev), dmamap->dm_nsegs,
9170 segs_needed, txq->txq_free - 1));
9171 txq->txq_flags |= WM_TXQ_NO_SPACE;
9172 bus_dmamap_unload(sc->sc_dmat, dmamap);
9173 WM_Q_EVCNT_INCR(txq, txdstall);
9174 break;
9175 }
9176
9177 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9178
9179 DPRINTF(sc, WM_DEBUG_TX,
9180 ("%s: TX: packet has %d (%d) DMA segments\n",
9181 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9182
9183 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9184
9185 /*
9186 * Store a pointer to the packet so that we can free it
9187 * later.
9188 *
9189 * Initially, we consider the number of descriptors the
9190 * packet uses the number of DMA segments. This may be
9191 * incremented by 1 if we do checksum offload (a descriptor
9192 * is used to set the checksum context).
9193 */
9194 txs->txs_mbuf = m0;
9195 txs->txs_firstdesc = txq->txq_next;
9196 txs->txs_ndesc = segs_needed;
9197
9198 /* Set up offload parameters for this packet. */
9199 uint32_t cmdlen, fields, dcmdlen;
9200 if (m0->m_pkthdr.csum_flags &
9201 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9202 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9203 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9204 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9205 &do_csum);
9206 } else {
9207 do_csum = false;
9208 cmdlen = 0;
9209 fields = 0;
9210 }
9211
9212 /* Sync the DMA map. */
9213 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9214 BUS_DMASYNC_PREWRITE);
9215
9216 /* Initialize the first transmit descriptor. */
9217 nexttx = txq->txq_next;
9218 if (!do_csum) {
9219 /* Set up a legacy descriptor */
9220 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9221 dmamap->dm_segs[0].ds_addr);
9222 txq->txq_descs[nexttx].wtx_cmdlen =
9223 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9224 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9225 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9226 if (vlan_has_tag(m0)) {
9227 txq->txq_descs[nexttx].wtx_cmdlen |=
9228 htole32(WTX_CMD_VLE);
9229 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9230 htole16(vlan_get_tag(m0));
9231 } else
9232 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9233
9234 dcmdlen = 0;
9235 } else {
9236 /* Set up an advanced data descriptor */
9237 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9238 htole64(dmamap->dm_segs[0].ds_addr);
9239 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9240 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9241 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9242 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9243 htole32(fields);
9244 DPRINTF(sc, WM_DEBUG_TX,
9245 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9246 device_xname(sc->sc_dev), nexttx,
9247 (uint64_t)dmamap->dm_segs[0].ds_addr));
9248 DPRINTF(sc, WM_DEBUG_TX,
9249 ("\t 0x%08x%08x\n", fields,
9250 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9251 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9252 }
9253
9254 lasttx = nexttx;
9255 nexttx = WM_NEXTTX(txq, nexttx);
9256 /*
9257 * Fill in the next descriptors. Legacy or advanced format
9258 * is the same here.
9259 */
9260 for (seg = 1; seg < dmamap->dm_nsegs;
9261 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9262 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9263 htole64(dmamap->dm_segs[seg].ds_addr);
9264 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9265 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9266 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9267 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9268 lasttx = nexttx;
9269
9270 DPRINTF(sc, WM_DEBUG_TX,
9271 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9272 device_xname(sc->sc_dev), nexttx,
9273 (uint64_t)dmamap->dm_segs[seg].ds_addr,
9274 dmamap->dm_segs[seg].ds_len));
9275 }
9276
9277 KASSERT(lasttx != -1);
9278
9279 /*
9280 * Set up the command byte on the last descriptor of
9281 * the packet. If we're in the interrupt delay window,
9282 * delay the interrupt.
9283 */
9284 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9285 (NQTX_CMD_EOP | NQTX_CMD_RS));
9286 txq->txq_descs[lasttx].wtx_cmdlen |=
9287 htole32(WTX_CMD_EOP | WTX_CMD_RS);
9288
9289 txs->txs_lastdesc = lasttx;
9290
9291 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9292 device_xname(sc->sc_dev),
9293 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9294
9295 /* Sync the descriptors we're using. */
9296 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9297 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9298
9299 /* Give the packet to the chip. */
9300 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9301 sent = true;
9302
9303 DPRINTF(sc, WM_DEBUG_TX,
9304 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9305
9306 DPRINTF(sc, WM_DEBUG_TX,
9307 ("%s: TX: finished transmitting packet, job %d\n",
9308 device_xname(sc->sc_dev), txq->txq_snext));
9309
9310 /* Advance the tx pointer. */
9311 txq->txq_free -= txs->txs_ndesc;
9312 txq->txq_next = nexttx;
9313
9314 txq->txq_sfree--;
9315 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9316
9317 /* Pass the packet to any BPF listeners. */
9318 bpf_mtap(ifp, m0, BPF_D_OUT);
9319 }
9320
9321 if (m0 != NULL) {
9322 txq->txq_flags |= WM_TXQ_NO_SPACE;
9323 WM_Q_EVCNT_INCR(txq, descdrop);
9324 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9325 __func__));
9326 m_freem(m0);
9327 }
9328
9329 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9330 /* No more slots; notify upper layer. */
9331 txq->txq_flags |= WM_TXQ_NO_SPACE;
9332 }
9333
9334 if (sent) {
9335 /* Set a watchdog timer in case the chip flakes out. */
9336 txq->txq_lastsent = time_uptime;
9337 txq->txq_sending = true;
9338 }
9339 }
9340
9341 static void
9342 wm_deferred_start_locked(struct wm_txqueue *txq)
9343 {
9344 struct wm_softc *sc = txq->txq_sc;
9345 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9346 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9347 int qid = wmq->wmq_id;
9348
9349 KASSERT(mutex_owned(txq->txq_lock));
9350 KASSERT(!txq->txq_stopping);
9351
9352 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9353 /* XXX need for ALTQ or one CPU system */
9354 if (qid == 0)
9355 wm_nq_start_locked(ifp);
9356 wm_nq_transmit_locked(ifp, txq);
9357 } else {
9358 /* XXX need for ALTQ or one CPU system */
9359 if (qid == 0)
9360 wm_start_locked(ifp);
9361 wm_transmit_locked(ifp, txq);
9362 }
9363 }
9364
9365 /* Interrupt */
9366
9367 /*
9368 * wm_txeof:
9369 *
9370 * Helper; handle transmit interrupts.
9371 */
9372 static bool
9373 wm_txeof(struct wm_txqueue *txq, u_int limit)
9374 {
9375 struct wm_softc *sc = txq->txq_sc;
9376 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9377 struct wm_txsoft *txs;
9378 int count = 0;
9379 int i;
9380 uint8_t status;
9381 bool more = false;
9382
9383 KASSERT(mutex_owned(txq->txq_lock));
9384
9385 if (txq->txq_stopping)
9386 return false;
9387
9388 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9389
9390 /*
9391 * Go through the Tx list and free mbufs for those
9392 * frames which have been transmitted.
9393 */
9394 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9395 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9396 txs = &txq->txq_soft[i];
9397
9398 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9399 device_xname(sc->sc_dev), i));
9400
9401 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9402 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9403
9404 status =
9405 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9406 if ((status & WTX_ST_DD) == 0) {
9407 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9408 BUS_DMASYNC_PREREAD);
9409 break;
9410 }
9411
9412 if (limit-- == 0) {
9413 more = true;
9414 DPRINTF(sc, WM_DEBUG_TX,
9415 ("%s: TX: loop limited, job %d is not processed\n",
9416 device_xname(sc->sc_dev), i));
9417 break;
9418 }
9419
9420 count++;
9421 DPRINTF(sc, WM_DEBUG_TX,
9422 ("%s: TX: job %d done: descs %d..%d\n",
9423 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9424 txs->txs_lastdesc));
9425
9426 /*
9427 * XXX We should probably be using the statistics
9428 * XXX registers, but I don't know if they exist
9429 * XXX on chips before the i82544.
9430 */
9431
9432 #ifdef WM_EVENT_COUNTERS
9433 if (status & WTX_ST_TU)
9434 WM_Q_EVCNT_INCR(txq, underrun);
9435 #endif /* WM_EVENT_COUNTERS */
9436
9437 /*
9438 * 82574 and newer's document says the status field has neither
9439 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9440 * (reserved). Refer "PCIe GbE Controller Open Source Software
9441 * Developer's Manual", 82574 datasheet and newer.
9442 *
9443 * XXX I saw the LC bit was set on I218 even though the media
9444 * was full duplex, so the bit might be used for other
9445 * meaning ...(I have no document).
9446 */
9447
9448 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9449 && ((sc->sc_type < WM_T_82574)
9450 || (sc->sc_type == WM_T_80003))) {
9451 if_statinc(ifp, if_oerrors);
9452 if (status & WTX_ST_LC)
9453 log(LOG_WARNING, "%s: late collision\n",
9454 device_xname(sc->sc_dev));
9455 else if (status & WTX_ST_EC) {
9456 if_statadd(ifp, if_collisions,
9457 TX_COLLISION_THRESHOLD + 1);
9458 log(LOG_WARNING, "%s: excessive collisions\n",
9459 device_xname(sc->sc_dev));
9460 }
9461 } else
9462 if_statinc(ifp, if_opackets);
9463
9464 txq->txq_packets++;
9465 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9466
9467 txq->txq_free += txs->txs_ndesc;
9468 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9469 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9470 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9471 m_freem(txs->txs_mbuf);
9472 txs->txs_mbuf = NULL;
9473 }
9474
9475 /* Update the dirty transmit buffer pointer. */
9476 txq->txq_sdirty = i;
9477 DPRINTF(sc, WM_DEBUG_TX,
9478 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9479
9480 if (count != 0)
9481 rnd_add_uint32(&sc->rnd_source, count);
9482
9483 /*
9484 * If there are no more pending transmissions, cancel the watchdog
9485 * timer.
9486 */
9487 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9488 txq->txq_sending = false;
9489
9490 return more;
9491 }
9492
9493 static inline uint32_t
9494 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9495 {
9496 struct wm_softc *sc = rxq->rxq_sc;
9497
9498 if (sc->sc_type == WM_T_82574)
9499 return EXTRXC_STATUS(
9500 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9501 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9502 return NQRXC_STATUS(
9503 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9504 else
9505 return rxq->rxq_descs[idx].wrx_status;
9506 }
9507
9508 static inline uint32_t
9509 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9510 {
9511 struct wm_softc *sc = rxq->rxq_sc;
9512
9513 if (sc->sc_type == WM_T_82574)
9514 return EXTRXC_ERROR(
9515 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9516 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9517 return NQRXC_ERROR(
9518 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9519 else
9520 return rxq->rxq_descs[idx].wrx_errors;
9521 }
9522
9523 static inline uint16_t
9524 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9525 {
9526 struct wm_softc *sc = rxq->rxq_sc;
9527
9528 if (sc->sc_type == WM_T_82574)
9529 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9530 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9531 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9532 else
9533 return rxq->rxq_descs[idx].wrx_special;
9534 }
9535
9536 static inline int
9537 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9538 {
9539 struct wm_softc *sc = rxq->rxq_sc;
9540
9541 if (sc->sc_type == WM_T_82574)
9542 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9543 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9544 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9545 else
9546 return rxq->rxq_descs[idx].wrx_len;
9547 }
9548
9549 #ifdef WM_DEBUG
9550 static inline uint32_t
9551 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9552 {
9553 struct wm_softc *sc = rxq->rxq_sc;
9554
9555 if (sc->sc_type == WM_T_82574)
9556 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9557 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9558 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9559 else
9560 return 0;
9561 }
9562
9563 static inline uint8_t
9564 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9565 {
9566 struct wm_softc *sc = rxq->rxq_sc;
9567
9568 if (sc->sc_type == WM_T_82574)
9569 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9570 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9571 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9572 else
9573 return 0;
9574 }
9575 #endif /* WM_DEBUG */
9576
9577 static inline bool
9578 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9579 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9580 {
9581
9582 if (sc->sc_type == WM_T_82574)
9583 return (status & ext_bit) != 0;
9584 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9585 return (status & nq_bit) != 0;
9586 else
9587 return (status & legacy_bit) != 0;
9588 }
9589
9590 static inline bool
9591 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9592 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9593 {
9594
9595 if (sc->sc_type == WM_T_82574)
9596 return (error & ext_bit) != 0;
9597 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9598 return (error & nq_bit) != 0;
9599 else
9600 return (error & legacy_bit) != 0;
9601 }
9602
9603 static inline bool
9604 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9605 {
9606
9607 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9608 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9609 return true;
9610 else
9611 return false;
9612 }
9613
9614 static inline bool
9615 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9616 {
9617 struct wm_softc *sc = rxq->rxq_sc;
9618
9619 /* XXX missing error bit for newqueue? */
9620 if (wm_rxdesc_is_set_error(sc, errors,
9621 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9622 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9623 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9624 NQRXC_ERROR_RXE)) {
9625 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9626 EXTRXC_ERROR_SE, 0))
9627 log(LOG_WARNING, "%s: symbol error\n",
9628 device_xname(sc->sc_dev));
9629 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9630 EXTRXC_ERROR_SEQ, 0))
9631 log(LOG_WARNING, "%s: receive sequence error\n",
9632 device_xname(sc->sc_dev));
9633 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9634 EXTRXC_ERROR_CE, 0))
9635 log(LOG_WARNING, "%s: CRC error\n",
9636 device_xname(sc->sc_dev));
9637 return true;
9638 }
9639
9640 return false;
9641 }
9642
9643 static inline bool
9644 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9645 {
9646 struct wm_softc *sc = rxq->rxq_sc;
9647
9648 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9649 NQRXC_STATUS_DD)) {
9650 /* We have processed all of the receive descriptors. */
9651 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9652 return false;
9653 }
9654
9655 return true;
9656 }
9657
9658 static inline bool
9659 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9660 uint16_t vlantag, struct mbuf *m)
9661 {
9662
9663 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9664 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9665 vlan_set_tag(m, le16toh(vlantag));
9666 }
9667
9668 return true;
9669 }
9670
9671 static inline void
9672 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9673 uint32_t errors, struct mbuf *m)
9674 {
9675 struct wm_softc *sc = rxq->rxq_sc;
9676
9677 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9678 if (wm_rxdesc_is_set_status(sc, status,
9679 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9680 WM_Q_EVCNT_INCR(rxq, ipsum);
9681 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9682 if (wm_rxdesc_is_set_error(sc, errors,
9683 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9684 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9685 }
9686 if (wm_rxdesc_is_set_status(sc, status,
9687 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9688 /*
9689 * Note: we don't know if this was TCP or UDP,
9690 * so we just set both bits, and expect the
9691 * upper layers to deal.
9692 */
9693 WM_Q_EVCNT_INCR(rxq, tusum);
9694 m->m_pkthdr.csum_flags |=
9695 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9696 M_CSUM_TCPv6 | M_CSUM_UDPv6;
9697 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9698 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9699 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9700 }
9701 }
9702 }
9703
9704 /*
9705 * wm_rxeof:
9706 *
9707 * Helper; handle receive interrupts.
9708 */
9709 static bool
9710 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9711 {
9712 struct wm_softc *sc = rxq->rxq_sc;
9713 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9714 struct wm_rxsoft *rxs;
9715 struct mbuf *m;
9716 int i, len;
9717 int count = 0;
9718 uint32_t status, errors;
9719 uint16_t vlantag;
9720 bool more = false;
9721
9722 KASSERT(mutex_owned(rxq->rxq_lock));
9723
9724 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9725 rxs = &rxq->rxq_soft[i];
9726
9727 DPRINTF(sc, WM_DEBUG_RX,
9728 ("%s: RX: checking descriptor %d\n",
9729 device_xname(sc->sc_dev), i));
9730 wm_cdrxsync(rxq, i,
9731 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9732
9733 status = wm_rxdesc_get_status(rxq, i);
9734 errors = wm_rxdesc_get_errors(rxq, i);
9735 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9736 vlantag = wm_rxdesc_get_vlantag(rxq, i);
9737 #ifdef WM_DEBUG
9738 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9739 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9740 #endif
9741
9742 if (!wm_rxdesc_dd(rxq, i, status))
9743 break;
9744
9745 if (limit-- == 0) {
9746 more = true;
9747 DPRINTF(sc, WM_DEBUG_RX,
9748 ("%s: RX: loop limited, descriptor %d is not processed\n",
9749 device_xname(sc->sc_dev), i));
9750 break;
9751 }
9752
9753 count++;
9754 if (__predict_false(rxq->rxq_discard)) {
9755 DPRINTF(sc, WM_DEBUG_RX,
9756 ("%s: RX: discarding contents of descriptor %d\n",
9757 device_xname(sc->sc_dev), i));
9758 wm_init_rxdesc(rxq, i);
9759 if (wm_rxdesc_is_eop(rxq, status)) {
9760 /* Reset our state. */
9761 DPRINTF(sc, WM_DEBUG_RX,
9762 ("%s: RX: resetting rxdiscard -> 0\n",
9763 device_xname(sc->sc_dev)));
9764 rxq->rxq_discard = 0;
9765 }
9766 continue;
9767 }
9768
9769 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9770 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9771
9772 m = rxs->rxs_mbuf;
9773
9774 /*
9775 * Add a new receive buffer to the ring, unless of
9776 * course the length is zero. Treat the latter as a
9777 * failed mapping.
9778 */
9779 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9780 /*
9781 * Failed, throw away what we've done so
9782 * far, and discard the rest of the packet.
9783 */
9784 if_statinc(ifp, if_ierrors);
9785 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9786 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9787 wm_init_rxdesc(rxq, i);
9788 if (!wm_rxdesc_is_eop(rxq, status))
9789 rxq->rxq_discard = 1;
9790 if (rxq->rxq_head != NULL)
9791 m_freem(rxq->rxq_head);
9792 WM_RXCHAIN_RESET(rxq);
9793 DPRINTF(sc, WM_DEBUG_RX,
9794 ("%s: RX: Rx buffer allocation failed, "
9795 "dropping packet%s\n", device_xname(sc->sc_dev),
9796 rxq->rxq_discard ? " (discard)" : ""));
9797 continue;
9798 }
9799
9800 m->m_len = len;
9801 rxq->rxq_len += len;
9802 DPRINTF(sc, WM_DEBUG_RX,
9803 ("%s: RX: buffer at %p len %d\n",
9804 device_xname(sc->sc_dev), m->m_data, len));
9805
9806 /* If this is not the end of the packet, keep looking. */
9807 if (!wm_rxdesc_is_eop(rxq, status)) {
9808 WM_RXCHAIN_LINK(rxq, m);
9809 DPRINTF(sc, WM_DEBUG_RX,
9810 ("%s: RX: not yet EOP, rxlen -> %d\n",
9811 device_xname(sc->sc_dev), rxq->rxq_len));
9812 continue;
9813 }
9814
9815 /*
9816 * Okay, we have the entire packet now. The chip is
9817 * configured to include the FCS except I35[04], I21[01].
9818 * (not all chips can be configured to strip it), so we need
9819 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9820 * in RCTL register is always set, so we don't trim it.
9821 * PCH2 and newer chip also not include FCS when jumbo
9822 * frame is used to do workaround an errata.
9823 * May need to adjust length of previous mbuf in the
9824 * chain if the current mbuf is too short.
9825 */
9826 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9827 if (m->m_len < ETHER_CRC_LEN) {
9828 rxq->rxq_tail->m_len
9829 -= (ETHER_CRC_LEN - m->m_len);
9830 m->m_len = 0;
9831 } else
9832 m->m_len -= ETHER_CRC_LEN;
9833 len = rxq->rxq_len - ETHER_CRC_LEN;
9834 } else
9835 len = rxq->rxq_len;
9836
9837 WM_RXCHAIN_LINK(rxq, m);
9838
9839 *rxq->rxq_tailp = NULL;
9840 m = rxq->rxq_head;
9841
9842 WM_RXCHAIN_RESET(rxq);
9843
9844 DPRINTF(sc, WM_DEBUG_RX,
9845 ("%s: RX: have entire packet, len -> %d\n",
9846 device_xname(sc->sc_dev), len));
9847
9848 /* If an error occurred, update stats and drop the packet. */
9849 if (wm_rxdesc_has_errors(rxq, errors)) {
9850 m_freem(m);
9851 continue;
9852 }
9853
9854 /* No errors. Receive the packet. */
9855 m_set_rcvif(m, ifp);
9856 m->m_pkthdr.len = len;
9857 /*
9858 * TODO
9859 * should be save rsshash and rsstype to this mbuf.
9860 */
9861 DPRINTF(sc, WM_DEBUG_RX,
9862 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9863 device_xname(sc->sc_dev), rsstype, rsshash));
9864
9865 /*
9866 * If VLANs are enabled, VLAN packets have been unwrapped
9867 * for us. Associate the tag with the packet.
9868 */
9869 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9870 continue;
9871
9872 /* Set up checksum info for this packet. */
9873 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9874
9875 rxq->rxq_packets++;
9876 rxq->rxq_bytes += len;
9877 /* Pass it on. */
9878 if_percpuq_enqueue(sc->sc_ipq, m);
9879
9880 if (rxq->rxq_stopping)
9881 break;
9882 }
9883 rxq->rxq_ptr = i;
9884
9885 if (count != 0)
9886 rnd_add_uint32(&sc->rnd_source, count);
9887
9888 DPRINTF(sc, WM_DEBUG_RX,
9889 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9890
9891 return more;
9892 }
9893
9894 /*
9895 * wm_linkintr_gmii:
9896 *
9897 * Helper; handle link interrupts for GMII.
9898 */
9899 static void
9900 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9901 {
9902 device_t dev = sc->sc_dev;
9903 uint32_t status, reg;
9904 bool link;
9905 int rv;
9906
9907 KASSERT(mutex_owned(sc->sc_core_lock));
9908
9909 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9910 __func__));
9911
9912 if ((icr & ICR_LSC) == 0) {
9913 if (icr & ICR_RXSEQ)
9914 DPRINTF(sc, WM_DEBUG_LINK,
9915 ("%s: LINK Receive sequence error\n",
9916 device_xname(dev)));
9917 return;
9918 }
9919
9920 /* Link status changed */
9921 status = CSR_READ(sc, WMREG_STATUS);
9922 link = status & STATUS_LU;
9923 if (link) {
9924 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9925 device_xname(dev),
9926 (status & STATUS_FD) ? "FDX" : "HDX"));
9927 if (wm_phy_need_linkdown_discard(sc)) {
9928 DPRINTF(sc, WM_DEBUG_LINK,
9929 ("%s: linkintr: Clear linkdown discard flag\n",
9930 device_xname(dev)));
9931 wm_clear_linkdown_discard(sc);
9932 }
9933 } else {
9934 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9935 device_xname(dev)));
9936 if (wm_phy_need_linkdown_discard(sc)) {
9937 DPRINTF(sc, WM_DEBUG_LINK,
9938 ("%s: linkintr: Set linkdown discard flag\n",
9939 device_xname(dev)));
9940 wm_set_linkdown_discard(sc);
9941 }
9942 }
9943 if ((sc->sc_type == WM_T_ICH8) && (link == false))
9944 wm_gig_downshift_workaround_ich8lan(sc);
9945
9946 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
9947 wm_kmrn_lock_loss_workaround_ich8lan(sc);
9948
9949 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9950 device_xname(dev)));
9951 mii_pollstat(&sc->sc_mii);
9952 if (sc->sc_type == WM_T_82543) {
9953 int miistatus, active;
9954
9955 /*
9956 * With 82543, we need to force speed and
9957 * duplex on the MAC equal to what the PHY
9958 * speed and duplex configuration is.
9959 */
9960 miistatus = sc->sc_mii.mii_media_status;
9961
9962 if (miistatus & IFM_ACTIVE) {
9963 active = sc->sc_mii.mii_media_active;
9964 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9965 switch (IFM_SUBTYPE(active)) {
9966 case IFM_10_T:
9967 sc->sc_ctrl |= CTRL_SPEED_10;
9968 break;
9969 case IFM_100_TX:
9970 sc->sc_ctrl |= CTRL_SPEED_100;
9971 break;
9972 case IFM_1000_T:
9973 sc->sc_ctrl |= CTRL_SPEED_1000;
9974 break;
9975 default:
9976 /*
9977 * Fiber?
9978 * Shoud not enter here.
9979 */
9980 device_printf(dev, "unknown media (%x)\n",
9981 active);
9982 break;
9983 }
9984 if (active & IFM_FDX)
9985 sc->sc_ctrl |= CTRL_FD;
9986 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9987 }
9988 } else if (sc->sc_type == WM_T_PCH) {
9989 wm_k1_gig_workaround_hv(sc,
9990 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9991 }
9992
9993 /*
9994 * When connected at 10Mbps half-duplex, some parts are excessively
9995 * aggressive resulting in many collisions. To avoid this, increase
9996 * the IPG and reduce Rx latency in the PHY.
9997 */
9998 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9999 && link) {
10000 uint32_t tipg_reg;
10001 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
10002 bool fdx;
10003 uint16_t emi_addr, emi_val;
10004
10005 tipg_reg = CSR_READ(sc, WMREG_TIPG);
10006 tipg_reg &= ~TIPG_IPGT_MASK;
10007 fdx = status & STATUS_FD;
10008
10009 if (!fdx && (speed == STATUS_SPEED_10)) {
10010 tipg_reg |= 0xff;
10011 /* Reduce Rx latency in analog PHY */
10012 emi_val = 0;
10013 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10014 fdx && speed != STATUS_SPEED_1000) {
10015 tipg_reg |= 0xc;
10016 emi_val = 1;
10017 } else {
10018 /* Roll back the default values */
10019 tipg_reg |= 0x08;
10020 emi_val = 1;
10021 }
10022
10023 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10024
10025 rv = sc->phy.acquire(sc);
10026 if (rv)
10027 return;
10028
10029 if (sc->sc_type == WM_T_PCH2)
10030 emi_addr = I82579_RX_CONFIG;
10031 else
10032 emi_addr = I217_RX_CONFIG;
10033 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10034
10035 if (sc->sc_type >= WM_T_PCH_LPT) {
10036 uint16_t phy_reg;
10037
10038 sc->phy.readreg_locked(dev, 2,
10039 I217_PLL_CLOCK_GATE_REG, &phy_reg);
10040 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10041 if (speed == STATUS_SPEED_100
10042 || speed == STATUS_SPEED_10)
10043 phy_reg |= 0x3e8;
10044 else
10045 phy_reg |= 0xfa;
10046 sc->phy.writereg_locked(dev, 2,
10047 I217_PLL_CLOCK_GATE_REG, phy_reg);
10048
10049 if (speed == STATUS_SPEED_1000) {
10050 sc->phy.readreg_locked(dev, 2,
10051 HV_PM_CTRL, &phy_reg);
10052
10053 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10054
10055 sc->phy.writereg_locked(dev, 2,
10056 HV_PM_CTRL, phy_reg);
10057 }
10058 }
10059 sc->phy.release(sc);
10060
10061 if (rv)
10062 return;
10063
10064 if (sc->sc_type >= WM_T_PCH_SPT) {
10065 uint16_t data, ptr_gap;
10066
10067 if (speed == STATUS_SPEED_1000) {
10068 rv = sc->phy.acquire(sc);
10069 if (rv)
10070 return;
10071
10072 rv = sc->phy.readreg_locked(dev, 2,
10073 I82579_UNKNOWN1, &data);
10074 if (rv) {
10075 sc->phy.release(sc);
10076 return;
10077 }
10078
10079 ptr_gap = (data & (0x3ff << 2)) >> 2;
10080 if (ptr_gap < 0x18) {
10081 data &= ~(0x3ff << 2);
10082 data |= (0x18 << 2);
10083 rv = sc->phy.writereg_locked(dev,
10084 2, I82579_UNKNOWN1, data);
10085 }
10086 sc->phy.release(sc);
10087 if (rv)
10088 return;
10089 } else {
10090 rv = sc->phy.acquire(sc);
10091 if (rv)
10092 return;
10093
10094 rv = sc->phy.writereg_locked(dev, 2,
10095 I82579_UNKNOWN1, 0xc023);
10096 sc->phy.release(sc);
10097 if (rv)
10098 return;
10099
10100 }
10101 }
10102 }
10103
10104 /*
10105 * I217 Packet Loss issue:
10106 * ensure that FEXTNVM4 Beacon Duration is set correctly
10107 * on power up.
10108 * Set the Beacon Duration for I217 to 8 usec
10109 */
10110 if (sc->sc_type >= WM_T_PCH_LPT) {
10111 reg = CSR_READ(sc, WMREG_FEXTNVM4);
10112 reg &= ~FEXTNVM4_BEACON_DURATION;
10113 reg |= FEXTNVM4_BEACON_DURATION_8US;
10114 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10115 }
10116
10117 /* Work-around I218 hang issue */
10118 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10119 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10120 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10121 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10122 wm_k1_workaround_lpt_lp(sc, link);
10123
10124 if (sc->sc_type >= WM_T_PCH_LPT) {
10125 /*
10126 * Set platform power management values for Latency
10127 * Tolerance Reporting (LTR)
10128 */
10129 wm_platform_pm_pch_lpt(sc,
10130 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10131 }
10132
10133 /* Clear link partner's EEE ability */
10134 sc->eee_lp_ability = 0;
10135
10136 /* FEXTNVM6 K1-off workaround */
10137 if (sc->sc_type == WM_T_PCH_SPT) {
10138 reg = CSR_READ(sc, WMREG_FEXTNVM6);
10139 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10140 reg |= FEXTNVM6_K1_OFF_ENABLE;
10141 else
10142 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10143 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10144 }
10145
10146 if (!link)
10147 return;
10148
10149 switch (sc->sc_type) {
10150 case WM_T_PCH2:
10151 wm_k1_workaround_lv(sc);
10152 /* FALLTHROUGH */
10153 case WM_T_PCH:
10154 if (sc->sc_phytype == WMPHY_82578)
10155 wm_link_stall_workaround_hv(sc);
10156 break;
10157 default:
10158 break;
10159 }
10160
10161 /* Enable/Disable EEE after link up */
10162 if (sc->sc_phytype > WMPHY_82579)
10163 wm_set_eee_pchlan(sc);
10164 }
10165
10166 /*
10167 * wm_linkintr_tbi:
10168 *
10169 * Helper; handle link interrupts for TBI mode.
10170 */
10171 static void
10172 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10173 {
10174 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10175 uint32_t status;
10176
10177 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10178 __func__));
10179
10180 status = CSR_READ(sc, WMREG_STATUS);
10181 if (icr & ICR_LSC) {
10182 wm_check_for_link(sc);
10183 if (status & STATUS_LU) {
10184 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10185 device_xname(sc->sc_dev),
10186 (status & STATUS_FD) ? "FDX" : "HDX"));
10187 /*
10188 * NOTE: CTRL will update TFCE and RFCE automatically,
10189 * so we should update sc->sc_ctrl
10190 */
10191
10192 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10193 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10194 sc->sc_fcrtl &= ~FCRTL_XONE;
10195 if (status & STATUS_FD)
10196 sc->sc_tctl |=
10197 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10198 else
10199 sc->sc_tctl |=
10200 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10201 if (sc->sc_ctrl & CTRL_TFCE)
10202 sc->sc_fcrtl |= FCRTL_XONE;
10203 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10204 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10205 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10206 sc->sc_tbi_linkup = 1;
10207 if_link_state_change(ifp, LINK_STATE_UP);
10208 } else {
10209 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10210 device_xname(sc->sc_dev)));
10211 sc->sc_tbi_linkup = 0;
10212 if_link_state_change(ifp, LINK_STATE_DOWN);
10213 }
10214 /* Update LED */
10215 wm_tbi_serdes_set_linkled(sc);
10216 } else if (icr & ICR_RXSEQ)
10217 DPRINTF(sc, WM_DEBUG_LINK,
10218 ("%s: LINK: Receive sequence error\n",
10219 device_xname(sc->sc_dev)));
10220 }
10221
10222 /*
10223 * wm_linkintr_serdes:
10224 *
10225 * Helper; handle link interrupts for TBI mode.
10226 */
10227 static void
10228 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10229 {
10230 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10231 struct mii_data *mii = &sc->sc_mii;
10232 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10233 uint32_t pcs_adv, pcs_lpab, reg;
10234
10235 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10236 __func__));
10237
10238 if (icr & ICR_LSC) {
10239 /* Check PCS */
10240 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10241 if ((reg & PCS_LSTS_LINKOK) != 0) {
10242 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10243 device_xname(sc->sc_dev)));
10244 mii->mii_media_status |= IFM_ACTIVE;
10245 sc->sc_tbi_linkup = 1;
10246 if_link_state_change(ifp, LINK_STATE_UP);
10247 } else {
10248 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10249 device_xname(sc->sc_dev)));
10250 mii->mii_media_status |= IFM_NONE;
10251 sc->sc_tbi_linkup = 0;
10252 if_link_state_change(ifp, LINK_STATE_DOWN);
10253 wm_tbi_serdes_set_linkled(sc);
10254 return;
10255 }
10256 mii->mii_media_active |= IFM_1000_SX;
10257 if ((reg & PCS_LSTS_FDX) != 0)
10258 mii->mii_media_active |= IFM_FDX;
10259 else
10260 mii->mii_media_active |= IFM_HDX;
10261 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10262 /* Check flow */
10263 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10264 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10265 DPRINTF(sc, WM_DEBUG_LINK,
10266 ("XXX LINKOK but not ACOMP\n"));
10267 return;
10268 }
10269 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10270 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10271 DPRINTF(sc, WM_DEBUG_LINK,
10272 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10273 if ((pcs_adv & TXCW_SYM_PAUSE)
10274 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10275 mii->mii_media_active |= IFM_FLOW
10276 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10277 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10278 && (pcs_adv & TXCW_ASYM_PAUSE)
10279 && (pcs_lpab & TXCW_SYM_PAUSE)
10280 && (pcs_lpab & TXCW_ASYM_PAUSE))
10281 mii->mii_media_active |= IFM_FLOW
10282 | IFM_ETH_TXPAUSE;
10283 else if ((pcs_adv & TXCW_SYM_PAUSE)
10284 && (pcs_adv & TXCW_ASYM_PAUSE)
10285 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10286 && (pcs_lpab & TXCW_ASYM_PAUSE))
10287 mii->mii_media_active |= IFM_FLOW
10288 | IFM_ETH_RXPAUSE;
10289 }
10290 /* Update LED */
10291 wm_tbi_serdes_set_linkled(sc);
10292 } else
10293 DPRINTF(sc, WM_DEBUG_LINK,
10294 ("%s: LINK: Receive sequence error\n",
10295 device_xname(sc->sc_dev)));
10296 }
10297
10298 /*
10299 * wm_linkintr:
10300 *
10301 * Helper; handle link interrupts.
10302 */
10303 static void
10304 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10305 {
10306
10307 KASSERT(mutex_owned(sc->sc_core_lock));
10308
10309 if (sc->sc_flags & WM_F_HAS_MII)
10310 wm_linkintr_gmii(sc, icr);
10311 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10312 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10313 wm_linkintr_serdes(sc, icr);
10314 else
10315 wm_linkintr_tbi(sc, icr);
10316 }
10317
10318
10319 static inline void
10320 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10321 {
10322
10323 if (wmq->wmq_txrx_use_workqueue) {
10324 if (!wmq->wmq_wq_enqueued) {
10325 wmq->wmq_wq_enqueued = true;
10326 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10327 curcpu());
10328 }
10329 } else
10330 softint_schedule(wmq->wmq_si);
10331 }
10332
10333 static inline void
10334 wm_legacy_intr_disable(struct wm_softc *sc)
10335 {
10336
10337 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10338 }
10339
10340 static inline void
10341 wm_legacy_intr_enable(struct wm_softc *sc)
10342 {
10343
10344 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10345 }
10346
10347 /*
10348 * wm_intr_legacy:
10349 *
10350 * Interrupt service routine for INTx and MSI.
10351 */
10352 static int
10353 wm_intr_legacy(void *arg)
10354 {
10355 struct wm_softc *sc = arg;
10356 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10357 struct wm_queue *wmq = &sc->sc_queue[0];
10358 struct wm_txqueue *txq = &wmq->wmq_txq;
10359 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10360 u_int txlimit = sc->sc_tx_intr_process_limit;
10361 u_int rxlimit = sc->sc_rx_intr_process_limit;
10362 uint32_t icr, rndval = 0;
10363 bool more = false;
10364
10365 icr = CSR_READ(sc, WMREG_ICR);
10366 if ((icr & sc->sc_icr) == 0)
10367 return 0;
10368
10369 DPRINTF(sc, WM_DEBUG_TX,
10370 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10371 if (rndval == 0)
10372 rndval = icr;
10373
10374 mutex_enter(txq->txq_lock);
10375
10376 if (txq->txq_stopping) {
10377 mutex_exit(txq->txq_lock);
10378 return 1;
10379 }
10380
10381 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10382 if (icr & ICR_TXDW) {
10383 DPRINTF(sc, WM_DEBUG_TX,
10384 ("%s: TX: got TXDW interrupt\n",
10385 device_xname(sc->sc_dev)));
10386 WM_Q_EVCNT_INCR(txq, txdw);
10387 }
10388 #endif
10389 if (txlimit > 0) {
10390 more |= wm_txeof(txq, txlimit);
10391 if (!IF_IS_EMPTY(&ifp->if_snd))
10392 more = true;
10393 } else
10394 more = true;
10395 mutex_exit(txq->txq_lock);
10396
10397 mutex_enter(rxq->rxq_lock);
10398
10399 if (rxq->rxq_stopping) {
10400 mutex_exit(rxq->rxq_lock);
10401 return 1;
10402 }
10403
10404 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10405 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10406 DPRINTF(sc, WM_DEBUG_RX,
10407 ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10408 device_xname(sc->sc_dev),
10409 icr & (ICR_RXDMT0 | ICR_RXT0)));
10410 WM_Q_EVCNT_INCR(rxq, intr);
10411 }
10412 #endif
10413 if (rxlimit > 0) {
10414 /*
10415 * wm_rxeof() does *not* call upper layer functions directly,
10416 * as if_percpuq_enqueue() just call softint_schedule().
10417 * So, we can call wm_rxeof() in interrupt context.
10418 */
10419 more = wm_rxeof(rxq, rxlimit);
10420 } else
10421 more = true;
10422
10423 mutex_exit(rxq->rxq_lock);
10424
10425 mutex_enter(sc->sc_core_lock);
10426
10427 if (sc->sc_core_stopping) {
10428 mutex_exit(sc->sc_core_lock);
10429 return 1;
10430 }
10431
10432 if (icr & (ICR_LSC | ICR_RXSEQ)) {
10433 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10434 wm_linkintr(sc, icr);
10435 }
10436 if ((icr & ICR_GPI(0)) != 0)
10437 device_printf(sc->sc_dev, "got module interrupt\n");
10438
10439 mutex_exit(sc->sc_core_lock);
10440
10441 if (icr & ICR_RXO) {
10442 #if defined(WM_DEBUG)
10443 log(LOG_WARNING, "%s: Receive overrun\n",
10444 device_xname(sc->sc_dev));
10445 #endif /* defined(WM_DEBUG) */
10446 }
10447
10448 rnd_add_uint32(&sc->rnd_source, rndval);
10449
10450 if (more) {
10451 /* Try to get more packets going. */
10452 wm_legacy_intr_disable(sc);
10453 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10454 wm_sched_handle_queue(sc, wmq);
10455 }
10456
10457 return 1;
10458 }
10459
10460 static inline void
10461 wm_txrxintr_disable(struct wm_queue *wmq)
10462 {
10463 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10464
10465 if (__predict_false(!wm_is_using_msix(sc))) {
10466 wm_legacy_intr_disable(sc);
10467 return;
10468 }
10469
10470 if (sc->sc_type == WM_T_82574)
10471 CSR_WRITE(sc, WMREG_IMC,
10472 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10473 else if (sc->sc_type == WM_T_82575)
10474 CSR_WRITE(sc, WMREG_EIMC,
10475 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10476 else
10477 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10478 }
10479
10480 static inline void
10481 wm_txrxintr_enable(struct wm_queue *wmq)
10482 {
10483 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10484
10485 wm_itrs_calculate(sc, wmq);
10486
10487 if (__predict_false(!wm_is_using_msix(sc))) {
10488 wm_legacy_intr_enable(sc);
10489 return;
10490 }
10491
10492 /*
10493 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10494 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10495 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10496 * while each wm_handle_queue(wmq) is runnig.
10497 */
10498 if (sc->sc_type == WM_T_82574)
10499 CSR_WRITE(sc, WMREG_IMS,
10500 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10501 else if (sc->sc_type == WM_T_82575)
10502 CSR_WRITE(sc, WMREG_EIMS,
10503 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10504 else
10505 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10506 }
10507
10508 static int
10509 wm_txrxintr_msix(void *arg)
10510 {
10511 struct wm_queue *wmq = arg;
10512 struct wm_txqueue *txq = &wmq->wmq_txq;
10513 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10514 struct wm_softc *sc = txq->txq_sc;
10515 u_int txlimit = sc->sc_tx_intr_process_limit;
10516 u_int rxlimit = sc->sc_rx_intr_process_limit;
10517 bool txmore;
10518 bool rxmore;
10519
10520 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10521
10522 DPRINTF(sc, WM_DEBUG_TX,
10523 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10524
10525 wm_txrxintr_disable(wmq);
10526
10527 mutex_enter(txq->txq_lock);
10528
10529 if (txq->txq_stopping) {
10530 mutex_exit(txq->txq_lock);
10531 return 1;
10532 }
10533
10534 WM_Q_EVCNT_INCR(txq, txdw);
10535 if (txlimit > 0) {
10536 txmore = wm_txeof(txq, txlimit);
10537 /* wm_deferred start() is done in wm_handle_queue(). */
10538 } else
10539 txmore = true;
10540 mutex_exit(txq->txq_lock);
10541
10542 DPRINTF(sc, WM_DEBUG_RX,
10543 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10544 mutex_enter(rxq->rxq_lock);
10545
10546 if (rxq->rxq_stopping) {
10547 mutex_exit(rxq->rxq_lock);
10548 return 1;
10549 }
10550
10551 WM_Q_EVCNT_INCR(rxq, intr);
10552 if (rxlimit > 0) {
10553 rxmore = wm_rxeof(rxq, rxlimit);
10554 } else
10555 rxmore = true;
10556 mutex_exit(rxq->rxq_lock);
10557
10558 wm_itrs_writereg(sc, wmq);
10559
10560 if (txmore || rxmore) {
10561 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10562 wm_sched_handle_queue(sc, wmq);
10563 } else
10564 wm_txrxintr_enable(wmq);
10565
10566 return 1;
10567 }
10568
10569 static void
10570 wm_handle_queue(void *arg)
10571 {
10572 struct wm_queue *wmq = arg;
10573 struct wm_txqueue *txq = &wmq->wmq_txq;
10574 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10575 struct wm_softc *sc = txq->txq_sc;
10576 u_int txlimit = sc->sc_tx_process_limit;
10577 u_int rxlimit = sc->sc_rx_process_limit;
10578 bool txmore;
10579 bool rxmore;
10580
10581 mutex_enter(txq->txq_lock);
10582 if (txq->txq_stopping) {
10583 mutex_exit(txq->txq_lock);
10584 return;
10585 }
10586 txmore = wm_txeof(txq, txlimit);
10587 wm_deferred_start_locked(txq);
10588 mutex_exit(txq->txq_lock);
10589
10590 mutex_enter(rxq->rxq_lock);
10591 if (rxq->rxq_stopping) {
10592 mutex_exit(rxq->rxq_lock);
10593 return;
10594 }
10595 WM_Q_EVCNT_INCR(rxq, defer);
10596 rxmore = wm_rxeof(rxq, rxlimit);
10597 mutex_exit(rxq->rxq_lock);
10598
10599 if (txmore || rxmore) {
10600 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10601 wm_sched_handle_queue(sc, wmq);
10602 } else
10603 wm_txrxintr_enable(wmq);
10604 }
10605
10606 static void
10607 wm_handle_queue_work(struct work *wk, void *context)
10608 {
10609 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10610
10611 /*
10612 * Some qemu environment workaround. They don't stop interrupt
10613 * immediately.
10614 */
10615 wmq->wmq_wq_enqueued = false;
10616 wm_handle_queue(wmq);
10617 }
10618
10619 /*
10620 * wm_linkintr_msix:
10621 *
10622 * Interrupt service routine for link status change for MSI-X.
10623 */
10624 static int
10625 wm_linkintr_msix(void *arg)
10626 {
10627 struct wm_softc *sc = arg;
10628 uint32_t reg;
10629 bool has_rxo;
10630
10631 reg = CSR_READ(sc, WMREG_ICR);
10632 mutex_enter(sc->sc_core_lock);
10633 DPRINTF(sc, WM_DEBUG_LINK,
10634 ("%s: LINK: got link intr. ICR = %08x\n",
10635 device_xname(sc->sc_dev), reg));
10636
10637 if (sc->sc_core_stopping)
10638 goto out;
10639
10640 if ((reg & ICR_LSC) != 0) {
10641 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10642 wm_linkintr(sc, ICR_LSC);
10643 }
10644 if ((reg & ICR_GPI(0)) != 0)
10645 device_printf(sc->sc_dev, "got module interrupt\n");
10646
10647 /*
10648 * XXX 82574 MSI-X mode workaround
10649 *
10650 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10651 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10652 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10653 * interrupts by writing WMREG_ICS to process receive packets.
10654 */
10655 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10656 #if defined(WM_DEBUG)
10657 log(LOG_WARNING, "%s: Receive overrun\n",
10658 device_xname(sc->sc_dev));
10659 #endif /* defined(WM_DEBUG) */
10660
10661 has_rxo = true;
10662 /*
10663 * The RXO interrupt is very high rate when receive traffic is
10664 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10665 * interrupts. ICR_OTHER will be enabled at the end of
10666 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10667 * ICR_RXQ(1) interrupts.
10668 */
10669 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10670
10671 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10672 }
10673
10674
10675
10676 out:
10677 mutex_exit(sc->sc_core_lock);
10678
10679 if (sc->sc_type == WM_T_82574) {
10680 if (!has_rxo)
10681 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10682 else
10683 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10684 } else if (sc->sc_type == WM_T_82575)
10685 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10686 else
10687 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10688
10689 return 1;
10690 }
10691
10692 /*
10693 * Media related.
10694 * GMII, SGMII, TBI (and SERDES)
10695 */
10696
10697 /* Common */
10698
10699 /*
10700 * wm_tbi_serdes_set_linkled:
10701 *
10702 * Update the link LED on TBI and SERDES devices.
10703 */
10704 static void
10705 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10706 {
10707
10708 if (sc->sc_tbi_linkup)
10709 sc->sc_ctrl |= CTRL_SWDPIN(0);
10710 else
10711 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10712
10713 /* 82540 or newer devices are active low */
10714 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10715
10716 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10717 }
10718
10719 /* GMII related */
10720
10721 /*
10722 * wm_gmii_reset:
10723 *
10724 * Reset the PHY.
10725 */
10726 static void
10727 wm_gmii_reset(struct wm_softc *sc)
10728 {
10729 uint32_t reg;
10730 int rv;
10731
10732 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10733 device_xname(sc->sc_dev), __func__));
10734
10735 rv = sc->phy.acquire(sc);
10736 if (rv != 0) {
10737 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10738 __func__);
10739 return;
10740 }
10741
10742 switch (sc->sc_type) {
10743 case WM_T_82542_2_0:
10744 case WM_T_82542_2_1:
10745 /* null */
10746 break;
10747 case WM_T_82543:
10748 /*
10749 * With 82543, we need to force speed and duplex on the MAC
10750 * equal to what the PHY speed and duplex configuration is.
10751 * In addition, we need to perform a hardware reset on the PHY
10752 * to take it out of reset.
10753 */
10754 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10755 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10756
10757 /* The PHY reset pin is active-low. */
10758 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10759 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10760 CTRL_EXT_SWDPIN(4));
10761 reg |= CTRL_EXT_SWDPIO(4);
10762
10763 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10764 CSR_WRITE_FLUSH(sc);
10765 delay(10*1000);
10766
10767 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10768 CSR_WRITE_FLUSH(sc);
10769 delay(150);
10770 #if 0
10771 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10772 #endif
10773 delay(20*1000); /* XXX extra delay to get PHY ID? */
10774 break;
10775 case WM_T_82544: /* Reset 10000us */
10776 case WM_T_82540:
10777 case WM_T_82545:
10778 case WM_T_82545_3:
10779 case WM_T_82546:
10780 case WM_T_82546_3:
10781 case WM_T_82541:
10782 case WM_T_82541_2:
10783 case WM_T_82547:
10784 case WM_T_82547_2:
10785 case WM_T_82571: /* Reset 100us */
10786 case WM_T_82572:
10787 case WM_T_82573:
10788 case WM_T_82574:
10789 case WM_T_82575:
10790 case WM_T_82576:
10791 case WM_T_82580:
10792 case WM_T_I350:
10793 case WM_T_I354:
10794 case WM_T_I210:
10795 case WM_T_I211:
10796 case WM_T_82583:
10797 case WM_T_80003:
10798 /* Generic reset */
10799 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10800 CSR_WRITE_FLUSH(sc);
10801 delay(20000);
10802 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10803 CSR_WRITE_FLUSH(sc);
10804 delay(20000);
10805
10806 if ((sc->sc_type == WM_T_82541)
10807 || (sc->sc_type == WM_T_82541_2)
10808 || (sc->sc_type == WM_T_82547)
10809 || (sc->sc_type == WM_T_82547_2)) {
10810 /* Workaround for igp are done in igp_reset() */
10811 /* XXX add code to set LED after phy reset */
10812 }
10813 break;
10814 case WM_T_ICH8:
10815 case WM_T_ICH9:
10816 case WM_T_ICH10:
10817 case WM_T_PCH:
10818 case WM_T_PCH2:
10819 case WM_T_PCH_LPT:
10820 case WM_T_PCH_SPT:
10821 case WM_T_PCH_CNP:
10822 /* Generic reset */
10823 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10824 CSR_WRITE_FLUSH(sc);
10825 delay(100);
10826 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10827 CSR_WRITE_FLUSH(sc);
10828 delay(150);
10829 break;
10830 default:
10831 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10832 __func__);
10833 break;
10834 }
10835
10836 sc->phy.release(sc);
10837
10838 /* get_cfg_done */
10839 wm_get_cfg_done(sc);
10840
10841 /* Extra setup */
10842 switch (sc->sc_type) {
10843 case WM_T_82542_2_0:
10844 case WM_T_82542_2_1:
10845 case WM_T_82543:
10846 case WM_T_82544:
10847 case WM_T_82540:
10848 case WM_T_82545:
10849 case WM_T_82545_3:
10850 case WM_T_82546:
10851 case WM_T_82546_3:
10852 case WM_T_82541_2:
10853 case WM_T_82547_2:
10854 case WM_T_82571:
10855 case WM_T_82572:
10856 case WM_T_82573:
10857 case WM_T_82574:
10858 case WM_T_82583:
10859 case WM_T_82575:
10860 case WM_T_82576:
10861 case WM_T_82580:
10862 case WM_T_I350:
10863 case WM_T_I354:
10864 case WM_T_I210:
10865 case WM_T_I211:
10866 case WM_T_80003:
10867 /* Null */
10868 break;
10869 case WM_T_82541:
10870 case WM_T_82547:
10871 /* XXX Configure actively LED after PHY reset */
10872 break;
10873 case WM_T_ICH8:
10874 case WM_T_ICH9:
10875 case WM_T_ICH10:
10876 case WM_T_PCH:
10877 case WM_T_PCH2:
10878 case WM_T_PCH_LPT:
10879 case WM_T_PCH_SPT:
10880 case WM_T_PCH_CNP:
10881 wm_phy_post_reset(sc);
10882 break;
10883 default:
10884 panic("%s: unknown type\n", __func__);
10885 break;
10886 }
10887 }
10888
10889 /*
10890 * Set up sc_phytype and mii_{read|write}reg.
10891 *
10892 * To identify PHY type, correct read/write function should be selected.
10893 * To select correct read/write function, PCI ID or MAC type are required
10894 * without accessing PHY registers.
10895 *
10896 * On the first call of this function, PHY ID is not known yet. Check
10897 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10898 * result might be incorrect.
10899 *
10900 * In the second call, PHY OUI and model is used to identify PHY type.
10901 * It might not be perfect because of the lack of compared entry, but it
10902 * would be better than the first call.
10903 *
10904 * If the detected new result and previous assumption is different,
10905 * a diagnostic message will be printed.
10906 */
10907 static void
10908 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10909 uint16_t phy_model)
10910 {
10911 device_t dev = sc->sc_dev;
10912 struct mii_data *mii = &sc->sc_mii;
10913 uint16_t new_phytype = WMPHY_UNKNOWN;
10914 uint16_t doubt_phytype = WMPHY_UNKNOWN;
10915 mii_readreg_t new_readreg;
10916 mii_writereg_t new_writereg;
10917 bool dodiag = true;
10918
10919 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10920 device_xname(sc->sc_dev), __func__));
10921
10922 /*
10923 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10924 * incorrect. So don't print diag output when it's 2nd call.
10925 */
10926 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10927 dodiag = false;
10928
10929 if (mii->mii_readreg == NULL) {
10930 /*
10931 * This is the first call of this function. For ICH and PCH
10932 * variants, it's difficult to determine the PHY access method
10933 * by sc_type, so use the PCI product ID for some devices.
10934 */
10935
10936 switch (sc->sc_pcidevid) {
10937 case PCI_PRODUCT_INTEL_PCH_M_LM:
10938 case PCI_PRODUCT_INTEL_PCH_M_LC:
10939 /* 82577 */
10940 new_phytype = WMPHY_82577;
10941 break;
10942 case PCI_PRODUCT_INTEL_PCH_D_DM:
10943 case PCI_PRODUCT_INTEL_PCH_D_DC:
10944 /* 82578 */
10945 new_phytype = WMPHY_82578;
10946 break;
10947 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10948 case PCI_PRODUCT_INTEL_PCH2_LV_V:
10949 /* 82579 */
10950 new_phytype = WMPHY_82579;
10951 break;
10952 case PCI_PRODUCT_INTEL_82801H_82567V_3:
10953 case PCI_PRODUCT_INTEL_82801I_BM:
10954 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10955 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10956 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10957 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10958 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10959 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10960 /* ICH8, 9, 10 with 82567 */
10961 new_phytype = WMPHY_BM;
10962 break;
10963 default:
10964 break;
10965 }
10966 } else {
10967 /* It's not the first call. Use PHY OUI and model */
10968 switch (phy_oui) {
10969 case MII_OUI_ATTANSIC: /* atphy(4) */
10970 switch (phy_model) {
10971 case MII_MODEL_ATTANSIC_AR8021:
10972 new_phytype = WMPHY_82578;
10973 break;
10974 default:
10975 break;
10976 }
10977 break;
10978 case MII_OUI_xxMARVELL:
10979 switch (phy_model) {
10980 case MII_MODEL_xxMARVELL_I210:
10981 new_phytype = WMPHY_I210;
10982 break;
10983 case MII_MODEL_xxMARVELL_E1011:
10984 case MII_MODEL_xxMARVELL_E1000_3:
10985 case MII_MODEL_xxMARVELL_E1000_5:
10986 case MII_MODEL_xxMARVELL_E1112:
10987 new_phytype = WMPHY_M88;
10988 break;
10989 case MII_MODEL_xxMARVELL_E1149:
10990 new_phytype = WMPHY_BM;
10991 break;
10992 case MII_MODEL_xxMARVELL_E1111:
10993 case MII_MODEL_xxMARVELL_I347:
10994 case MII_MODEL_xxMARVELL_E1512:
10995 case MII_MODEL_xxMARVELL_E1340M:
10996 case MII_MODEL_xxMARVELL_E1543:
10997 new_phytype = WMPHY_M88;
10998 break;
10999 case MII_MODEL_xxMARVELL_I82563:
11000 new_phytype = WMPHY_GG82563;
11001 break;
11002 default:
11003 break;
11004 }
11005 break;
11006 case MII_OUI_INTEL:
11007 switch (phy_model) {
11008 case MII_MODEL_INTEL_I82577:
11009 new_phytype = WMPHY_82577;
11010 break;
11011 case MII_MODEL_INTEL_I82579:
11012 new_phytype = WMPHY_82579;
11013 break;
11014 case MII_MODEL_INTEL_I217:
11015 new_phytype = WMPHY_I217;
11016 break;
11017 case MII_MODEL_INTEL_I82580:
11018 new_phytype = WMPHY_82580;
11019 break;
11020 case MII_MODEL_INTEL_I350:
11021 new_phytype = WMPHY_I350;
11022 break;
11023 default:
11024 break;
11025 }
11026 break;
11027 case MII_OUI_yyINTEL:
11028 switch (phy_model) {
11029 case MII_MODEL_yyINTEL_I82562G:
11030 case MII_MODEL_yyINTEL_I82562EM:
11031 case MII_MODEL_yyINTEL_I82562ET:
11032 new_phytype = WMPHY_IFE;
11033 break;
11034 case MII_MODEL_yyINTEL_IGP01E1000:
11035 new_phytype = WMPHY_IGP;
11036 break;
11037 case MII_MODEL_yyINTEL_I82566:
11038 new_phytype = WMPHY_IGP_3;
11039 break;
11040 default:
11041 break;
11042 }
11043 break;
11044 default:
11045 break;
11046 }
11047
11048 if (dodiag) {
11049 if (new_phytype == WMPHY_UNKNOWN)
11050 aprint_verbose_dev(dev,
11051 "%s: Unknown PHY model. OUI=%06x, "
11052 "model=%04x\n", __func__, phy_oui,
11053 phy_model);
11054
11055 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11056 && (sc->sc_phytype != new_phytype)) {
11057 aprint_error_dev(dev, "Previously assumed PHY "
11058 "type(%u) was incorrect. PHY type from PHY"
11059 "ID = %u\n", sc->sc_phytype, new_phytype);
11060 }
11061 }
11062 }
11063
11064 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11065 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11066 /* SGMII */
11067 new_readreg = wm_sgmii_readreg;
11068 new_writereg = wm_sgmii_writereg;
11069 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11070 /* BM2 (phyaddr == 1) */
11071 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11072 && (new_phytype != WMPHY_BM)
11073 && (new_phytype != WMPHY_UNKNOWN))
11074 doubt_phytype = new_phytype;
11075 new_phytype = WMPHY_BM;
11076 new_readreg = wm_gmii_bm_readreg;
11077 new_writereg = wm_gmii_bm_writereg;
11078 } else if (sc->sc_type >= WM_T_PCH) {
11079 /* All PCH* use _hv_ */
11080 new_readreg = wm_gmii_hv_readreg;
11081 new_writereg = wm_gmii_hv_writereg;
11082 } else if (sc->sc_type >= WM_T_ICH8) {
11083 /* non-82567 ICH8, 9 and 10 */
11084 new_readreg = wm_gmii_i82544_readreg;
11085 new_writereg = wm_gmii_i82544_writereg;
11086 } else if (sc->sc_type >= WM_T_80003) {
11087 /* 80003 */
11088 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11089 && (new_phytype != WMPHY_GG82563)
11090 && (new_phytype != WMPHY_UNKNOWN))
11091 doubt_phytype = new_phytype;
11092 new_phytype = WMPHY_GG82563;
11093 new_readreg = wm_gmii_i80003_readreg;
11094 new_writereg = wm_gmii_i80003_writereg;
11095 } else if (sc->sc_type >= WM_T_I210) {
11096 /* I210 and I211 */
11097 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11098 && (new_phytype != WMPHY_I210)
11099 && (new_phytype != WMPHY_UNKNOWN))
11100 doubt_phytype = new_phytype;
11101 new_phytype = WMPHY_I210;
11102 new_readreg = wm_gmii_gs40g_readreg;
11103 new_writereg = wm_gmii_gs40g_writereg;
11104 } else if (sc->sc_type >= WM_T_82580) {
11105 /* 82580, I350 and I354 */
11106 new_readreg = wm_gmii_82580_readreg;
11107 new_writereg = wm_gmii_82580_writereg;
11108 } else if (sc->sc_type >= WM_T_82544) {
11109 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
11110 new_readreg = wm_gmii_i82544_readreg;
11111 new_writereg = wm_gmii_i82544_writereg;
11112 } else {
11113 new_readreg = wm_gmii_i82543_readreg;
11114 new_writereg = wm_gmii_i82543_writereg;
11115 }
11116
11117 if (new_phytype == WMPHY_BM) {
11118 /* All BM use _bm_ */
11119 new_readreg = wm_gmii_bm_readreg;
11120 new_writereg = wm_gmii_bm_writereg;
11121 }
11122 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
11123 /* All PCH* use _hv_ */
11124 new_readreg = wm_gmii_hv_readreg;
11125 new_writereg = wm_gmii_hv_writereg;
11126 }
11127
11128 /* Diag output */
11129 if (dodiag) {
11130 if (doubt_phytype != WMPHY_UNKNOWN)
11131 aprint_error_dev(dev, "Assumed new PHY type was "
11132 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11133 new_phytype);
11134 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11135 && (sc->sc_phytype != new_phytype))
11136 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11137 "was incorrect. New PHY type = %u\n",
11138 sc->sc_phytype, new_phytype);
11139
11140 if ((mii->mii_readreg != NULL) &&
11141 (new_phytype == WMPHY_UNKNOWN))
11142 aprint_error_dev(dev, "PHY type is still unknown.\n");
11143
11144 if ((mii->mii_readreg != NULL) &&
11145 (mii->mii_readreg != new_readreg))
11146 aprint_error_dev(dev, "Previously assumed PHY "
11147 "read/write function was incorrect.\n");
11148 }
11149
11150 /* Update now */
11151 sc->sc_phytype = new_phytype;
11152 mii->mii_readreg = new_readreg;
11153 mii->mii_writereg = new_writereg;
11154 if (new_readreg == wm_gmii_hv_readreg) {
11155 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11156 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11157 } else if (new_readreg == wm_sgmii_readreg) {
11158 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11159 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11160 } else if (new_readreg == wm_gmii_i82544_readreg) {
11161 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11162 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11163 }
11164 }
11165
11166 /*
11167 * wm_get_phy_id_82575:
11168 *
11169 * Return PHY ID. Return -1 if it failed.
11170 */
11171 static int
11172 wm_get_phy_id_82575(struct wm_softc *sc)
11173 {
11174 uint32_t reg;
11175 int phyid = -1;
11176
11177 /* XXX */
11178 if ((sc->sc_flags & WM_F_SGMII) == 0)
11179 return -1;
11180
11181 if (wm_sgmii_uses_mdio(sc)) {
11182 switch (sc->sc_type) {
11183 case WM_T_82575:
11184 case WM_T_82576:
11185 reg = CSR_READ(sc, WMREG_MDIC);
11186 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11187 break;
11188 case WM_T_82580:
11189 case WM_T_I350:
11190 case WM_T_I354:
11191 case WM_T_I210:
11192 case WM_T_I211:
11193 reg = CSR_READ(sc, WMREG_MDICNFG);
11194 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11195 break;
11196 default:
11197 return -1;
11198 }
11199 }
11200
11201 return phyid;
11202 }
11203
11204 /*
11205 * wm_gmii_mediainit:
11206 *
11207 * Initialize media for use on 1000BASE-T devices.
11208 */
11209 static void
11210 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11211 {
11212 device_t dev = sc->sc_dev;
11213 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11214 struct mii_data *mii = &sc->sc_mii;
11215
11216 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11217 device_xname(sc->sc_dev), __func__));
11218
11219 /* We have GMII. */
11220 sc->sc_flags |= WM_F_HAS_MII;
11221
11222 if (sc->sc_type == WM_T_80003)
11223 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11224 else
11225 sc->sc_tipg = TIPG_1000T_DFLT;
11226
11227 /*
11228 * Let the chip set speed/duplex on its own based on
11229 * signals from the PHY.
11230 * XXXbouyer - I'm not sure this is right for the 80003,
11231 * the em driver only sets CTRL_SLU here - but it seems to work.
11232 */
11233 sc->sc_ctrl |= CTRL_SLU;
11234 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11235
11236 /* Initialize our media structures and probe the GMII. */
11237 mii->mii_ifp = ifp;
11238
11239 mii->mii_statchg = wm_gmii_statchg;
11240
11241 /* get PHY control from SMBus to PCIe */
11242 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11243 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11244 || (sc->sc_type == WM_T_PCH_CNP))
11245 wm_init_phy_workarounds_pchlan(sc);
11246
11247 wm_gmii_reset(sc);
11248
11249 sc->sc_ethercom.ec_mii = &sc->sc_mii;
11250 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11251 wm_gmii_mediastatus, sc->sc_core_lock);
11252
11253 /* Setup internal SGMII PHY for SFP */
11254 wm_sgmii_sfp_preconfig(sc);
11255
11256 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11257 || (sc->sc_type == WM_T_82580)
11258 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11259 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11260 if ((sc->sc_flags & WM_F_SGMII) == 0) {
11261 /* Attach only one port */
11262 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11263 MII_OFFSET_ANY, MIIF_DOPAUSE);
11264 } else {
11265 int i, id;
11266 uint32_t ctrl_ext;
11267
11268 id = wm_get_phy_id_82575(sc);
11269 if (id != -1) {
11270 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11271 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11272 }
11273 if ((id == -1)
11274 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11275 /* Power on sgmii phy if it is disabled */
11276 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11277 CSR_WRITE(sc, WMREG_CTRL_EXT,
11278 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11279 CSR_WRITE_FLUSH(sc);
11280 delay(300*1000); /* XXX too long */
11281
11282 /*
11283 * From 1 to 8.
11284 *
11285 * I2C access fails with I2C register's ERROR
11286 * bit set, so prevent error message while
11287 * scanning.
11288 */
11289 sc->phy.no_errprint = true;
11290 for (i = 1; i < 8; i++)
11291 mii_attach(sc->sc_dev, &sc->sc_mii,
11292 0xffffffff, i, MII_OFFSET_ANY,
11293 MIIF_DOPAUSE);
11294 sc->phy.no_errprint = false;
11295
11296 /* Restore previous sfp cage power state */
11297 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11298 }
11299 }
11300 } else
11301 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11302 MII_OFFSET_ANY, MIIF_DOPAUSE);
11303
11304 /*
11305 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11306 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11307 */
11308 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
11309 || (sc->sc_type == WM_T_PCH_SPT)
11310 || (sc->sc_type == WM_T_PCH_CNP))
11311 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11312 wm_set_mdio_slow_mode_hv(sc);
11313 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11314 MII_OFFSET_ANY, MIIF_DOPAUSE);
11315 }
11316
11317 /*
11318 * (For ICH8 variants)
11319 * If PHY detection failed, use BM's r/w function and retry.
11320 */
11321 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11322 /* if failed, retry with *_bm_* */
11323 aprint_verbose_dev(dev, "Assumed PHY access function "
11324 "(type = %d) might be incorrect. Use BM and retry.\n",
11325 sc->sc_phytype);
11326 sc->sc_phytype = WMPHY_BM;
11327 mii->mii_readreg = wm_gmii_bm_readreg;
11328 mii->mii_writereg = wm_gmii_bm_writereg;
11329
11330 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11331 MII_OFFSET_ANY, MIIF_DOPAUSE);
11332 }
11333
11334 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11335 /* Any PHY wasn't found */
11336 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11337 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11338 sc->sc_phytype = WMPHY_NONE;
11339 } else {
11340 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11341
11342 /*
11343 * PHY found! Check PHY type again by the second call of
11344 * wm_gmii_setup_phytype.
11345 */
11346 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11347 child->mii_mpd_model);
11348
11349 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11350 }
11351 }
11352
11353 /*
11354 * wm_gmii_mediachange: [ifmedia interface function]
11355 *
11356 * Set hardware to newly-selected media on a 1000BASE-T device.
11357 */
11358 static int
11359 wm_gmii_mediachange(struct ifnet *ifp)
11360 {
11361 struct wm_softc *sc = ifp->if_softc;
11362 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11363 uint32_t reg;
11364 int rc;
11365
11366 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11367 device_xname(sc->sc_dev), __func__));
11368
11369 KASSERT(mutex_owned(sc->sc_core_lock));
11370
11371 if ((sc->sc_if_flags & IFF_UP) == 0)
11372 return 0;
11373
11374 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11375 if ((sc->sc_type == WM_T_82580)
11376 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11377 || (sc->sc_type == WM_T_I211)) {
11378 reg = CSR_READ(sc, WMREG_PHPM);
11379 reg &= ~PHPM_GO_LINK_D;
11380 CSR_WRITE(sc, WMREG_PHPM, reg);
11381 }
11382
11383 /* Disable D0 LPLU. */
11384 wm_lplu_d0_disable(sc);
11385
11386 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11387 sc->sc_ctrl |= CTRL_SLU;
11388 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11389 || (sc->sc_type > WM_T_82543)) {
11390 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11391 } else {
11392 sc->sc_ctrl &= ~CTRL_ASDE;
11393 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11394 if (ife->ifm_media & IFM_FDX)
11395 sc->sc_ctrl |= CTRL_FD;
11396 switch (IFM_SUBTYPE(ife->ifm_media)) {
11397 case IFM_10_T:
11398 sc->sc_ctrl |= CTRL_SPEED_10;
11399 break;
11400 case IFM_100_TX:
11401 sc->sc_ctrl |= CTRL_SPEED_100;
11402 break;
11403 case IFM_1000_T:
11404 sc->sc_ctrl |= CTRL_SPEED_1000;
11405 break;
11406 case IFM_NONE:
11407 /* There is no specific setting for IFM_NONE */
11408 break;
11409 default:
11410 panic("wm_gmii_mediachange: bad media 0x%x",
11411 ife->ifm_media);
11412 }
11413 }
11414 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11415 CSR_WRITE_FLUSH(sc);
11416
11417 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11418 wm_serdes_mediachange(ifp);
11419
11420 if (sc->sc_type <= WM_T_82543)
11421 wm_gmii_reset(sc);
11422 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11423 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11424 /* allow time for SFP cage time to power up phy */
11425 delay(300 * 1000);
11426 wm_gmii_reset(sc);
11427 }
11428
11429 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11430 return 0;
11431 return rc;
11432 }
11433
11434 /*
11435 * wm_gmii_mediastatus: [ifmedia interface function]
11436 *
11437 * Get the current interface media status on a 1000BASE-T device.
11438 */
11439 static void
11440 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11441 {
11442 struct wm_softc *sc = ifp->if_softc;
11443
11444 KASSERT(mutex_owned(sc->sc_core_lock));
11445
11446 ether_mediastatus(ifp, ifmr);
11447 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11448 | sc->sc_flowflags;
11449 }
11450
11451 #define MDI_IO CTRL_SWDPIN(2)
11452 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11453 #define MDI_CLK CTRL_SWDPIN(3)
11454
11455 static void
11456 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11457 {
11458 uint32_t i, v;
11459
11460 v = CSR_READ(sc, WMREG_CTRL);
11461 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11462 v |= MDI_DIR | CTRL_SWDPIO(3);
11463
11464 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11465 if (data & i)
11466 v |= MDI_IO;
11467 else
11468 v &= ~MDI_IO;
11469 CSR_WRITE(sc, WMREG_CTRL, v);
11470 CSR_WRITE_FLUSH(sc);
11471 delay(10);
11472 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11473 CSR_WRITE_FLUSH(sc);
11474 delay(10);
11475 CSR_WRITE(sc, WMREG_CTRL, v);
11476 CSR_WRITE_FLUSH(sc);
11477 delay(10);
11478 }
11479 }
11480
11481 static uint16_t
11482 wm_i82543_mii_recvbits(struct wm_softc *sc)
11483 {
11484 uint32_t v, i;
11485 uint16_t data = 0;
11486
11487 v = CSR_READ(sc, WMREG_CTRL);
11488 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11489 v |= CTRL_SWDPIO(3);
11490
11491 CSR_WRITE(sc, WMREG_CTRL, v);
11492 CSR_WRITE_FLUSH(sc);
11493 delay(10);
11494 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11495 CSR_WRITE_FLUSH(sc);
11496 delay(10);
11497 CSR_WRITE(sc, WMREG_CTRL, v);
11498 CSR_WRITE_FLUSH(sc);
11499 delay(10);
11500
11501 for (i = 0; i < 16; i++) {
11502 data <<= 1;
11503 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11504 CSR_WRITE_FLUSH(sc);
11505 delay(10);
11506 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11507 data |= 1;
11508 CSR_WRITE(sc, WMREG_CTRL, v);
11509 CSR_WRITE_FLUSH(sc);
11510 delay(10);
11511 }
11512
11513 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11514 CSR_WRITE_FLUSH(sc);
11515 delay(10);
11516 CSR_WRITE(sc, WMREG_CTRL, v);
11517 CSR_WRITE_FLUSH(sc);
11518 delay(10);
11519
11520 return data;
11521 }
11522
11523 #undef MDI_IO
11524 #undef MDI_DIR
11525 #undef MDI_CLK
11526
11527 /*
11528 * wm_gmii_i82543_readreg: [mii interface function]
11529 *
11530 * Read a PHY register on the GMII (i82543 version).
11531 */
11532 static int
11533 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11534 {
11535 struct wm_softc *sc = device_private(dev);
11536
11537 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11538 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11539 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11540 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
11541
11542 DPRINTF(sc, WM_DEBUG_GMII,
11543 ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11544 device_xname(dev), phy, reg, *val));
11545
11546 return 0;
11547 }
11548
11549 /*
11550 * wm_gmii_i82543_writereg: [mii interface function]
11551 *
11552 * Write a PHY register on the GMII (i82543 version).
11553 */
11554 static int
11555 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11556 {
11557 struct wm_softc *sc = device_private(dev);
11558
11559 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11560 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11561 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11562 (MII_COMMAND_START << 30), 32);
11563
11564 return 0;
11565 }
11566
11567 /*
11568 * wm_gmii_mdic_readreg: [mii interface function]
11569 *
11570 * Read a PHY register on the GMII.
11571 */
11572 static int
11573 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11574 {
11575 struct wm_softc *sc = device_private(dev);
11576 uint32_t mdic = 0;
11577 int i;
11578
11579 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11580 && (reg > MII_ADDRMASK)) {
11581 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11582 __func__, sc->sc_phytype, reg);
11583 reg &= MII_ADDRMASK;
11584 }
11585
11586 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11587 MDIC_REGADD(reg));
11588
11589 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11590 delay(50);
11591 mdic = CSR_READ(sc, WMREG_MDIC);
11592 if (mdic & MDIC_READY)
11593 break;
11594 }
11595
11596 if ((mdic & MDIC_READY) == 0) {
11597 DPRINTF(sc, WM_DEBUG_GMII,
11598 ("%s: MDIC read timed out: phy %d reg %d\n",
11599 device_xname(dev), phy, reg));
11600 return ETIMEDOUT;
11601 } else if (mdic & MDIC_E) {
11602 /* This is normal if no PHY is present. */
11603 DPRINTF(sc, WM_DEBUG_GMII,
11604 ("%s: MDIC read error: phy %d reg %d\n",
11605 device_xname(sc->sc_dev), phy, reg));
11606 return -1;
11607 } else
11608 *val = MDIC_DATA(mdic);
11609
11610 /*
11611 * Allow some time after each MDIC transaction to avoid
11612 * reading duplicate data in the next MDIC transaction.
11613 */
11614 if (sc->sc_type == WM_T_PCH2)
11615 delay(100);
11616
11617 return 0;
11618 }
11619
11620 /*
11621 * wm_gmii_mdic_writereg: [mii interface function]
11622 *
11623 * Write a PHY register on the GMII.
11624 */
11625 static int
11626 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11627 {
11628 struct wm_softc *sc = device_private(dev);
11629 uint32_t mdic = 0;
11630 int i;
11631
11632 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11633 && (reg > MII_ADDRMASK)) {
11634 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11635 __func__, sc->sc_phytype, reg);
11636 reg &= MII_ADDRMASK;
11637 }
11638
11639 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11640 MDIC_REGADD(reg) | MDIC_DATA(val));
11641
11642 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11643 delay(50);
11644 mdic = CSR_READ(sc, WMREG_MDIC);
11645 if (mdic & MDIC_READY)
11646 break;
11647 }
11648
11649 if ((mdic & MDIC_READY) == 0) {
11650 DPRINTF(sc, WM_DEBUG_GMII,
11651 ("%s: MDIC write timed out: phy %d reg %d\n",
11652 device_xname(dev), phy, reg));
11653 return ETIMEDOUT;
11654 } else if (mdic & MDIC_E) {
11655 DPRINTF(sc, WM_DEBUG_GMII,
11656 ("%s: MDIC write error: phy %d reg %d\n",
11657 device_xname(dev), phy, reg));
11658 return -1;
11659 }
11660
11661 /*
11662 * Allow some time after each MDIC transaction to avoid
11663 * reading duplicate data in the next MDIC transaction.
11664 */
11665 if (sc->sc_type == WM_T_PCH2)
11666 delay(100);
11667
11668 return 0;
11669 }
11670
11671 /*
11672 * wm_gmii_i82544_readreg: [mii interface function]
11673 *
11674 * Read a PHY register on the GMII.
11675 */
11676 static int
11677 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11678 {
11679 struct wm_softc *sc = device_private(dev);
11680 int rv;
11681
11682 rv = sc->phy.acquire(sc);
11683 if (rv != 0) {
11684 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11685 return rv;
11686 }
11687
11688 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11689
11690 sc->phy.release(sc);
11691
11692 return rv;
11693 }
11694
11695 static int
11696 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11697 {
11698 struct wm_softc *sc = device_private(dev);
11699 int rv;
11700
11701 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11702 switch (sc->sc_phytype) {
11703 case WMPHY_IGP:
11704 case WMPHY_IGP_2:
11705 case WMPHY_IGP_3:
11706 rv = wm_gmii_mdic_writereg(dev, phy,
11707 IGPHY_PAGE_SELECT, reg);
11708 if (rv != 0)
11709 return rv;
11710 break;
11711 default:
11712 #ifdef WM_DEBUG
11713 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11714 __func__, sc->sc_phytype, reg);
11715 #endif
11716 break;
11717 }
11718 }
11719
11720 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11721 }
11722
11723 /*
11724 * wm_gmii_i82544_writereg: [mii interface function]
11725 *
11726 * Write a PHY register on the GMII.
11727 */
11728 static int
11729 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11730 {
11731 struct wm_softc *sc = device_private(dev);
11732 int rv;
11733
11734 rv = sc->phy.acquire(sc);
11735 if (rv != 0) {
11736 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11737 return rv;
11738 }
11739
11740 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11741 sc->phy.release(sc);
11742
11743 return rv;
11744 }
11745
11746 static int
11747 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11748 {
11749 struct wm_softc *sc = device_private(dev);
11750 int rv;
11751
11752 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11753 switch (sc->sc_phytype) {
11754 case WMPHY_IGP:
11755 case WMPHY_IGP_2:
11756 case WMPHY_IGP_3:
11757 rv = wm_gmii_mdic_writereg(dev, phy,
11758 IGPHY_PAGE_SELECT, reg);
11759 if (rv != 0)
11760 return rv;
11761 break;
11762 default:
11763 #ifdef WM_DEBUG
11764 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11765 __func__, sc->sc_phytype, reg);
11766 #endif
11767 break;
11768 }
11769 }
11770
11771 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11772 }
11773
11774 /*
11775 * wm_gmii_i80003_readreg: [mii interface function]
11776 *
11777 * Read a PHY register on the kumeran
11778 * This could be handled by the PHY layer if we didn't have to lock the
11779 * resource ...
11780 */
11781 static int
11782 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11783 {
11784 struct wm_softc *sc = device_private(dev);
11785 int page_select;
11786 uint16_t temp, temp2;
11787 int rv;
11788
11789 if (phy != 1) /* Only one PHY on kumeran bus */
11790 return -1;
11791
11792 rv = sc->phy.acquire(sc);
11793 if (rv != 0) {
11794 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11795 return rv;
11796 }
11797
11798 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11799 page_select = GG82563_PHY_PAGE_SELECT;
11800 else {
11801 /*
11802 * Use Alternative Page Select register to access registers
11803 * 30 and 31.
11804 */
11805 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11806 }
11807 temp = reg >> GG82563_PAGE_SHIFT;
11808 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11809 goto out;
11810
11811 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11812 /*
11813 * Wait more 200us for a bug of the ready bit in the MDIC
11814 * register.
11815 */
11816 delay(200);
11817 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11818 if ((rv != 0) || (temp2 != temp)) {
11819 device_printf(dev, "%s failed\n", __func__);
11820 rv = -1;
11821 goto out;
11822 }
11823 delay(200);
11824 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11825 delay(200);
11826 } else
11827 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11828
11829 out:
11830 sc->phy.release(sc);
11831 return rv;
11832 }
11833
11834 /*
11835 * wm_gmii_i80003_writereg: [mii interface function]
11836 *
11837 * Write a PHY register on the kumeran.
11838 * This could be handled by the PHY layer if we didn't have to lock the
11839 * resource ...
11840 */
11841 static int
11842 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11843 {
11844 struct wm_softc *sc = device_private(dev);
11845 int page_select, rv;
11846 uint16_t temp, temp2;
11847
11848 if (phy != 1) /* Only one PHY on kumeran bus */
11849 return -1;
11850
11851 rv = sc->phy.acquire(sc);
11852 if (rv != 0) {
11853 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11854 return rv;
11855 }
11856
11857 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11858 page_select = GG82563_PHY_PAGE_SELECT;
11859 else {
11860 /*
11861 * Use Alternative Page Select register to access registers
11862 * 30 and 31.
11863 */
11864 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11865 }
11866 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11867 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11868 goto out;
11869
11870 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11871 /*
11872 * Wait more 200us for a bug of the ready bit in the MDIC
11873 * register.
11874 */
11875 delay(200);
11876 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11877 if ((rv != 0) || (temp2 != temp)) {
11878 device_printf(dev, "%s failed\n", __func__);
11879 rv = -1;
11880 goto out;
11881 }
11882 delay(200);
11883 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11884 delay(200);
11885 } else
11886 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11887
11888 out:
11889 sc->phy.release(sc);
11890 return rv;
11891 }
11892
11893 /*
11894 * wm_gmii_bm_readreg: [mii interface function]
11895 *
11896 * Read a PHY register on the kumeran
11897 * This could be handled by the PHY layer if we didn't have to lock the
11898 * resource ...
11899 */
11900 static int
11901 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11902 {
11903 struct wm_softc *sc = device_private(dev);
11904 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11905 int rv;
11906
11907 rv = sc->phy.acquire(sc);
11908 if (rv != 0) {
11909 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11910 return rv;
11911 }
11912
11913 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11914 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11915 || (reg == 31)) ? 1 : phy;
11916 /* Page 800 works differently than the rest so it has its own func */
11917 if (page == BM_WUC_PAGE) {
11918 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11919 goto release;
11920 }
11921
11922 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11923 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11924 && (sc->sc_type != WM_T_82583))
11925 rv = wm_gmii_mdic_writereg(dev, phy,
11926 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11927 else
11928 rv = wm_gmii_mdic_writereg(dev, phy,
11929 BME1000_PHY_PAGE_SELECT, page);
11930 if (rv != 0)
11931 goto release;
11932 }
11933
11934 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11935
11936 release:
11937 sc->phy.release(sc);
11938 return rv;
11939 }
11940
11941 /*
11942 * wm_gmii_bm_writereg: [mii interface function]
11943 *
11944 * Write a PHY register on the kumeran.
11945 * This could be handled by the PHY layer if we didn't have to lock the
11946 * resource ...
11947 */
11948 static int
11949 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11950 {
11951 struct wm_softc *sc = device_private(dev);
11952 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11953 int rv;
11954
11955 rv = sc->phy.acquire(sc);
11956 if (rv != 0) {
11957 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11958 return rv;
11959 }
11960
11961 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11962 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11963 || (reg == 31)) ? 1 : phy;
11964 /* Page 800 works differently than the rest so it has its own func */
11965 if (page == BM_WUC_PAGE) {
11966 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11967 goto release;
11968 }
11969
11970 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11971 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11972 && (sc->sc_type != WM_T_82583))
11973 rv = wm_gmii_mdic_writereg(dev, phy,
11974 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11975 else
11976 rv = wm_gmii_mdic_writereg(dev, phy,
11977 BME1000_PHY_PAGE_SELECT, page);
11978 if (rv != 0)
11979 goto release;
11980 }
11981
11982 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11983
11984 release:
11985 sc->phy.release(sc);
11986 return rv;
11987 }
11988
11989 /*
11990 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11991 * @dev: pointer to the HW structure
11992 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11993 *
11994 * Assumes semaphore already acquired and phy_reg points to a valid memory
11995 * address to store contents of the BM_WUC_ENABLE_REG register.
11996 */
11997 static int
11998 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11999 {
12000 #ifdef WM_DEBUG
12001 struct wm_softc *sc = device_private(dev);
12002 #endif
12003 uint16_t temp;
12004 int rv;
12005
12006 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12007 device_xname(dev), __func__));
12008
12009 if (!phy_regp)
12010 return -1;
12011
12012 /* All page select, port ctrl and wakeup registers use phy address 1 */
12013
12014 /* Select Port Control Registers page */
12015 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12016 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12017 if (rv != 0)
12018 return rv;
12019
12020 /* Read WUCE and save it */
12021 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12022 if (rv != 0)
12023 return rv;
12024
12025 /* Enable both PHY wakeup mode and Wakeup register page writes.
12026 * Prevent a power state change by disabling ME and Host PHY wakeup.
12027 */
12028 temp = *phy_regp;
12029 temp |= BM_WUC_ENABLE_BIT;
12030 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12031
12032 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12033 return rv;
12034
12035 /* Select Host Wakeup Registers page - caller now able to write
12036 * registers on the Wakeup registers page
12037 */
12038 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12039 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12040 }
12041
12042 /*
12043 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12044 * @dev: pointer to the HW structure
12045 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12046 *
12047 * Restore BM_WUC_ENABLE_REG to its original value.
12048 *
12049 * Assumes semaphore already acquired and *phy_reg is the contents of the
12050 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12051 * caller.
12052 */
12053 static int
12054 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12055 {
12056 #ifdef WM_DEBUG
12057 struct wm_softc *sc = device_private(dev);
12058 #endif
12059
12060 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12061 device_xname(dev), __func__));
12062
12063 if (!phy_regp)
12064 return -1;
12065
12066 /* Select Port Control Registers page */
12067 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12068 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12069
12070 /* Restore 769.17 to its original value */
12071 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12072
12073 return 0;
12074 }
12075
12076 /*
12077 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12078 * @sc: pointer to the HW structure
12079 * @offset: register offset to be read or written
12080 * @val: pointer to the data to read or write
12081 * @rd: determines if operation is read or write
12082 * @page_set: BM_WUC_PAGE already set and access enabled
12083 *
12084 * Read the PHY register at offset and store the retrieved information in
12085 * data, or write data to PHY register at offset. Note the procedure to
12086 * access the PHY wakeup registers is different than reading the other PHY
12087 * registers. It works as such:
12088 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12089 * 2) Set page to 800 for host (801 if we were manageability)
12090 * 3) Write the address using the address opcode (0x11)
12091 * 4) Read or write the data using the data opcode (0x12)
12092 * 5) Restore 769.17.2 to its original value
12093 *
12094 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12095 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12096 *
12097 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
12098 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12099 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12100 */
12101 static int
12102 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12103 bool page_set)
12104 {
12105 struct wm_softc *sc = device_private(dev);
12106 uint16_t regnum = BM_PHY_REG_NUM(offset);
12107 uint16_t page = BM_PHY_REG_PAGE(offset);
12108 uint16_t wuce;
12109 int rv = 0;
12110
12111 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12112 device_xname(dev), __func__));
12113 /* XXX Gig must be disabled for MDIO accesses to page 800 */
12114 if ((sc->sc_type == WM_T_PCH)
12115 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12116 device_printf(dev,
12117 "Attempting to access page %d while gig enabled.\n", page);
12118 }
12119
12120 if (!page_set) {
12121 /* Enable access to PHY wakeup registers */
12122 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12123 if (rv != 0) {
12124 device_printf(dev,
12125 "%s: Could not enable PHY wakeup reg access\n",
12126 __func__);
12127 return rv;
12128 }
12129 }
12130 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12131 device_xname(sc->sc_dev), __func__, page, regnum));
12132
12133 /*
12134 * 2) Access PHY wakeup register.
12135 * See wm_access_phy_wakeup_reg_bm.
12136 */
12137
12138 /* Write the Wakeup register page offset value using opcode 0x11 */
12139 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12140 if (rv != 0)
12141 return rv;
12142
12143 if (rd) {
12144 /* Read the Wakeup register page value using opcode 0x12 */
12145 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12146 } else {
12147 /* Write the Wakeup register page value using opcode 0x12 */
12148 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12149 }
12150 if (rv != 0)
12151 return rv;
12152
12153 if (!page_set)
12154 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12155
12156 return rv;
12157 }
12158
12159 /*
12160 * wm_gmii_hv_readreg: [mii interface function]
12161 *
12162 * Read a PHY register on the kumeran
12163 * This could be handled by the PHY layer if we didn't have to lock the
12164 * resource ...
12165 */
12166 static int
12167 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12168 {
12169 struct wm_softc *sc = device_private(dev);
12170 int rv;
12171
12172 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12173 device_xname(dev), __func__));
12174
12175 rv = sc->phy.acquire(sc);
12176 if (rv != 0) {
12177 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12178 return rv;
12179 }
12180
12181 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12182 sc->phy.release(sc);
12183 return rv;
12184 }
12185
12186 static int
12187 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12188 {
12189 uint16_t page = BM_PHY_REG_PAGE(reg);
12190 uint16_t regnum = BM_PHY_REG_NUM(reg);
12191 int rv;
12192
12193 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12194
12195 /* Page 800 works differently than the rest so it has its own func */
12196 if (page == BM_WUC_PAGE)
12197 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12198
12199 /*
12200 * Lower than page 768 works differently than the rest so it has its
12201 * own func
12202 */
12203 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12204 device_printf(dev, "gmii_hv_readreg!!!\n");
12205 return -1;
12206 }
12207
12208 /*
12209 * XXX I21[789] documents say that the SMBus Address register is at
12210 * PHY address 01, Page 0 (not 768), Register 26.
12211 */
12212 if (page == HV_INTC_FC_PAGE_START)
12213 page = 0;
12214
12215 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12216 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12217 page << BME1000_PAGE_SHIFT);
12218 if (rv != 0)
12219 return rv;
12220 }
12221
12222 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12223 }
12224
12225 /*
12226 * wm_gmii_hv_writereg: [mii interface function]
12227 *
12228 * Write a PHY register on the kumeran.
12229 * This could be handled by the PHY layer if we didn't have to lock the
12230 * resource ...
12231 */
12232 static int
12233 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12234 {
12235 struct wm_softc *sc = device_private(dev);
12236 int rv;
12237
12238 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12239 device_xname(dev), __func__));
12240
12241 rv = sc->phy.acquire(sc);
12242 if (rv != 0) {
12243 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12244 return rv;
12245 }
12246
12247 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12248 sc->phy.release(sc);
12249
12250 return rv;
12251 }
12252
12253 static int
12254 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12255 {
12256 struct wm_softc *sc = device_private(dev);
12257 uint16_t page = BM_PHY_REG_PAGE(reg);
12258 uint16_t regnum = BM_PHY_REG_NUM(reg);
12259 int rv;
12260
12261 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12262
12263 /* Page 800 works differently than the rest so it has its own func */
12264 if (page == BM_WUC_PAGE)
12265 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12266 false);
12267
12268 /*
12269 * Lower than page 768 works differently than the rest so it has its
12270 * own func
12271 */
12272 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12273 device_printf(dev, "gmii_hv_writereg!!!\n");
12274 return -1;
12275 }
12276
12277 {
12278 /*
12279 * XXX I21[789] documents say that the SMBus Address register
12280 * is at PHY address 01, Page 0 (not 768), Register 26.
12281 */
12282 if (page == HV_INTC_FC_PAGE_START)
12283 page = 0;
12284
12285 /*
12286 * XXX Workaround MDIO accesses being disabled after entering
12287 * IEEE Power Down (whenever bit 11 of the PHY control
12288 * register is set)
12289 */
12290 if (sc->sc_phytype == WMPHY_82578) {
12291 struct mii_softc *child;
12292
12293 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12294 if ((child != NULL) && (child->mii_mpd_rev >= 1)
12295 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12296 && ((val & (1 << 11)) != 0)) {
12297 device_printf(dev, "XXX need workaround\n");
12298 }
12299 }
12300
12301 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12302 rv = wm_gmii_mdic_writereg(dev, 1,
12303 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12304 if (rv != 0)
12305 return rv;
12306 }
12307 }
12308
12309 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12310 }
12311
12312 /*
12313 * wm_gmii_82580_readreg: [mii interface function]
12314 *
12315 * Read a PHY register on the 82580 and I350.
12316 * This could be handled by the PHY layer if we didn't have to lock the
12317 * resource ...
12318 */
12319 static int
12320 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12321 {
12322 struct wm_softc *sc = device_private(dev);
12323 int rv;
12324
12325 rv = sc->phy.acquire(sc);
12326 if (rv != 0) {
12327 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12328 return rv;
12329 }
12330
12331 #ifdef DIAGNOSTIC
12332 if (reg > MII_ADDRMASK) {
12333 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12334 __func__, sc->sc_phytype, reg);
12335 reg &= MII_ADDRMASK;
12336 }
12337 #endif
12338 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12339
12340 sc->phy.release(sc);
12341 return rv;
12342 }
12343
12344 /*
12345 * wm_gmii_82580_writereg: [mii interface function]
12346 *
12347 * Write a PHY register on the 82580 and I350.
12348 * This could be handled by the PHY layer if we didn't have to lock the
12349 * resource ...
12350 */
12351 static int
12352 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12353 {
12354 struct wm_softc *sc = device_private(dev);
12355 int rv;
12356
12357 rv = sc->phy.acquire(sc);
12358 if (rv != 0) {
12359 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12360 return rv;
12361 }
12362
12363 #ifdef DIAGNOSTIC
12364 if (reg > MII_ADDRMASK) {
12365 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12366 __func__, sc->sc_phytype, reg);
12367 reg &= MII_ADDRMASK;
12368 }
12369 #endif
12370 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12371
12372 sc->phy.release(sc);
12373 return rv;
12374 }
12375
12376 /*
12377 * wm_gmii_gs40g_readreg: [mii interface function]
12378 *
12379 * Read a PHY register on the I2100 and I211.
12380 * This could be handled by the PHY layer if we didn't have to lock the
12381 * resource ...
12382 */
12383 static int
12384 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12385 {
12386 struct wm_softc *sc = device_private(dev);
12387 int page, offset;
12388 int rv;
12389
12390 /* Acquire semaphore */
12391 rv = sc->phy.acquire(sc);
12392 if (rv != 0) {
12393 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12394 return rv;
12395 }
12396
12397 /* Page select */
12398 page = reg >> GS40G_PAGE_SHIFT;
12399 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12400 if (rv != 0)
12401 goto release;
12402
12403 /* Read reg */
12404 offset = reg & GS40G_OFFSET_MASK;
12405 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12406
12407 release:
12408 sc->phy.release(sc);
12409 return rv;
12410 }
12411
12412 /*
12413 * wm_gmii_gs40g_writereg: [mii interface function]
12414 *
12415 * Write a PHY register on the I210 and I211.
12416 * This could be handled by the PHY layer if we didn't have to lock the
12417 * resource ...
12418 */
12419 static int
12420 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
12421 {
12422 struct wm_softc *sc = device_private(dev);
12423 uint16_t page;
12424 int offset, rv;
12425
12426 /* Acquire semaphore */
12427 rv = sc->phy.acquire(sc);
12428 if (rv != 0) {
12429 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12430 return rv;
12431 }
12432
12433 /* Page select */
12434 page = reg >> GS40G_PAGE_SHIFT;
12435 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12436 if (rv != 0)
12437 goto release;
12438
12439 /* Write reg */
12440 offset = reg & GS40G_OFFSET_MASK;
12441 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
12442
12443 release:
12444 /* Release semaphore */
12445 sc->phy.release(sc);
12446 return rv;
12447 }
12448
12449 /*
12450 * wm_gmii_statchg: [mii interface function]
12451 *
12452 * Callback from MII layer when media changes.
12453 */
12454 static void
12455 wm_gmii_statchg(struct ifnet *ifp)
12456 {
12457 struct wm_softc *sc = ifp->if_softc;
12458 struct mii_data *mii = &sc->sc_mii;
12459
12460 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12461 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12462 sc->sc_fcrtl &= ~FCRTL_XONE;
12463
12464 /* Get flow control negotiation result. */
12465 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12466 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12467 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12468 mii->mii_media_active &= ~IFM_ETH_FMASK;
12469 }
12470
12471 if (sc->sc_flowflags & IFM_FLOW) {
12472 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12473 sc->sc_ctrl |= CTRL_TFCE;
12474 sc->sc_fcrtl |= FCRTL_XONE;
12475 }
12476 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12477 sc->sc_ctrl |= CTRL_RFCE;
12478 }
12479
12480 if (mii->mii_media_active & IFM_FDX) {
12481 DPRINTF(sc, WM_DEBUG_LINK,
12482 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12483 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12484 } else {
12485 DPRINTF(sc, WM_DEBUG_LINK,
12486 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12487 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12488 }
12489
12490 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12491 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12492 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12493 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12494 if (sc->sc_type == WM_T_80003) {
12495 switch (IFM_SUBTYPE(mii->mii_media_active)) {
12496 case IFM_1000_T:
12497 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12498 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12499 sc->sc_tipg = TIPG_1000T_80003_DFLT;
12500 break;
12501 default:
12502 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12503 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12504 sc->sc_tipg = TIPG_10_100_80003_DFLT;
12505 break;
12506 }
12507 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12508 }
12509 }
12510
12511 /* kumeran related (80003, ICH* and PCH*) */
12512
12513 /*
12514 * wm_kmrn_readreg:
12515 *
12516 * Read a kumeran register
12517 */
12518 static int
12519 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12520 {
12521 int rv;
12522
12523 if (sc->sc_type == WM_T_80003)
12524 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12525 else
12526 rv = sc->phy.acquire(sc);
12527 if (rv != 0) {
12528 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12529 __func__);
12530 return rv;
12531 }
12532
12533 rv = wm_kmrn_readreg_locked(sc, reg, val);
12534
12535 if (sc->sc_type == WM_T_80003)
12536 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12537 else
12538 sc->phy.release(sc);
12539
12540 return rv;
12541 }
12542
12543 static int
12544 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12545 {
12546
12547 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12548 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12549 KUMCTRLSTA_REN);
12550 CSR_WRITE_FLUSH(sc);
12551 delay(2);
12552
12553 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12554
12555 return 0;
12556 }
12557
12558 /*
12559 * wm_kmrn_writereg:
12560 *
12561 * Write a kumeran register
12562 */
12563 static int
12564 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12565 {
12566 int rv;
12567
12568 if (sc->sc_type == WM_T_80003)
12569 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12570 else
12571 rv = sc->phy.acquire(sc);
12572 if (rv != 0) {
12573 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12574 __func__);
12575 return rv;
12576 }
12577
12578 rv = wm_kmrn_writereg_locked(sc, reg, val);
12579
12580 if (sc->sc_type == WM_T_80003)
12581 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12582 else
12583 sc->phy.release(sc);
12584
12585 return rv;
12586 }
12587
12588 static int
12589 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12590 {
12591
12592 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12593 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12594
12595 return 0;
12596 }
12597
12598 /*
12599 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12600 * This access method is different from IEEE MMD.
12601 */
12602 static int
12603 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12604 {
12605 struct wm_softc *sc = device_private(dev);
12606 int rv;
12607
12608 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12609 if (rv != 0)
12610 return rv;
12611
12612 if (rd)
12613 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12614 else
12615 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12616 return rv;
12617 }
12618
12619 static int
12620 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12621 {
12622
12623 return wm_access_emi_reg_locked(dev, reg, val, true);
12624 }
12625
12626 static int
12627 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12628 {
12629
12630 return wm_access_emi_reg_locked(dev, reg, &val, false);
12631 }
12632
12633 /* SGMII related */
12634
12635 /*
12636 * wm_sgmii_uses_mdio
12637 *
12638 * Check whether the transaction is to the internal PHY or the external
12639 * MDIO interface. Return true if it's MDIO.
12640 */
12641 static bool
12642 wm_sgmii_uses_mdio(struct wm_softc *sc)
12643 {
12644 uint32_t reg;
12645 bool ismdio = false;
12646
12647 switch (sc->sc_type) {
12648 case WM_T_82575:
12649 case WM_T_82576:
12650 reg = CSR_READ(sc, WMREG_MDIC);
12651 ismdio = ((reg & MDIC_DEST) != 0);
12652 break;
12653 case WM_T_82580:
12654 case WM_T_I350:
12655 case WM_T_I354:
12656 case WM_T_I210:
12657 case WM_T_I211:
12658 reg = CSR_READ(sc, WMREG_MDICNFG);
12659 ismdio = ((reg & MDICNFG_DEST) != 0);
12660 break;
12661 default:
12662 break;
12663 }
12664
12665 return ismdio;
12666 }
12667
12668 /* Setup internal SGMII PHY for SFP */
12669 static void
12670 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12671 {
12672 uint16_t id1, id2, phyreg;
12673 int i, rv;
12674
12675 if (((sc->sc_flags & WM_F_SGMII) == 0)
12676 || ((sc->sc_flags & WM_F_SFP) == 0))
12677 return;
12678
12679 for (i = 0; i < MII_NPHY; i++) {
12680 sc->phy.no_errprint = true;
12681 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12682 if (rv != 0)
12683 continue;
12684 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12685 if (rv != 0)
12686 continue;
12687 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12688 continue;
12689 sc->phy.no_errprint = false;
12690
12691 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12692 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12693 phyreg |= ESSR_SGMII_WOC_COPPER;
12694 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12695 break;
12696 }
12697
12698 }
12699
12700 /*
12701 * wm_sgmii_readreg: [mii interface function]
12702 *
12703 * Read a PHY register on the SGMII
12704 * This could be handled by the PHY layer if we didn't have to lock the
12705 * resource ...
12706 */
12707 static int
12708 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12709 {
12710 struct wm_softc *sc = device_private(dev);
12711 int rv;
12712
12713 rv = sc->phy.acquire(sc);
12714 if (rv != 0) {
12715 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12716 return rv;
12717 }
12718
12719 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12720
12721 sc->phy.release(sc);
12722 return rv;
12723 }
12724
12725 static int
12726 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12727 {
12728 struct wm_softc *sc = device_private(dev);
12729 uint32_t i2ccmd;
12730 int i, rv = 0;
12731
12732 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12733 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12734 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12735
12736 /* Poll the ready bit */
12737 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12738 delay(50);
12739 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12740 if (i2ccmd & I2CCMD_READY)
12741 break;
12742 }
12743 if ((i2ccmd & I2CCMD_READY) == 0) {
12744 device_printf(dev, "I2CCMD Read did not complete\n");
12745 rv = ETIMEDOUT;
12746 }
12747 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12748 if (!sc->phy.no_errprint)
12749 device_printf(dev, "I2CCMD Error bit set\n");
12750 rv = EIO;
12751 }
12752
12753 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12754
12755 return rv;
12756 }
12757
12758 /*
12759 * wm_sgmii_writereg: [mii interface function]
12760 *
12761 * Write a PHY register on the SGMII.
12762 * This could be handled by the PHY layer if we didn't have to lock the
12763 * resource ...
12764 */
12765 static int
12766 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12767 {
12768 struct wm_softc *sc = device_private(dev);
12769 int rv;
12770
12771 rv = sc->phy.acquire(sc);
12772 if (rv != 0) {
12773 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12774 return rv;
12775 }
12776
12777 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12778
12779 sc->phy.release(sc);
12780
12781 return rv;
12782 }
12783
12784 static int
12785 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12786 {
12787 struct wm_softc *sc = device_private(dev);
12788 uint32_t i2ccmd;
12789 uint16_t swapdata;
12790 int rv = 0;
12791 int i;
12792
12793 /* Swap the data bytes for the I2C interface */
12794 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12795 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12796 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12797 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12798
12799 /* Poll the ready bit */
12800 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12801 delay(50);
12802 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12803 if (i2ccmd & I2CCMD_READY)
12804 break;
12805 }
12806 if ((i2ccmd & I2CCMD_READY) == 0) {
12807 device_printf(dev, "I2CCMD Write did not complete\n");
12808 rv = ETIMEDOUT;
12809 }
12810 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12811 device_printf(dev, "I2CCMD Error bit set\n");
12812 rv = EIO;
12813 }
12814
12815 return rv;
12816 }
12817
12818 /* TBI related */
12819
12820 static bool
12821 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12822 {
12823 bool sig;
12824
12825 sig = ctrl & CTRL_SWDPIN(1);
12826
12827 /*
12828 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12829 * detect a signal, 1 if they don't.
12830 */
12831 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12832 sig = !sig;
12833
12834 return sig;
12835 }
12836
12837 /*
12838 * wm_tbi_mediainit:
12839 *
12840 * Initialize media for use on 1000BASE-X devices.
12841 */
12842 static void
12843 wm_tbi_mediainit(struct wm_softc *sc)
12844 {
12845 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12846 const char *sep = "";
12847
12848 if (sc->sc_type < WM_T_82543)
12849 sc->sc_tipg = TIPG_WM_DFLT;
12850 else
12851 sc->sc_tipg = TIPG_LG_DFLT;
12852
12853 sc->sc_tbi_serdes_anegticks = 5;
12854
12855 /* Initialize our media structures */
12856 sc->sc_mii.mii_ifp = ifp;
12857 sc->sc_ethercom.ec_mii = &sc->sc_mii;
12858
12859 ifp->if_baudrate = IF_Gbps(1);
12860 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12861 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12862 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12863 wm_serdes_mediachange, wm_serdes_mediastatus,
12864 sc->sc_core_lock);
12865 } else {
12866 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12867 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12868 }
12869
12870 /*
12871 * SWD Pins:
12872 *
12873 * 0 = Link LED (output)
12874 * 1 = Loss Of Signal (input)
12875 */
12876 sc->sc_ctrl |= CTRL_SWDPIO(0);
12877
12878 /* XXX Perhaps this is only for TBI */
12879 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12880 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12881
12882 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12883 sc->sc_ctrl &= ~CTRL_LRST;
12884
12885 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12886
12887 #define ADD(ss, mm, dd) \
12888 do { \
12889 aprint_normal("%s%s", sep, ss); \
12890 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12891 sep = ", "; \
12892 } while (/*CONSTCOND*/0)
12893
12894 aprint_normal_dev(sc->sc_dev, "");
12895
12896 if (sc->sc_type == WM_T_I354) {
12897 uint32_t status;
12898
12899 status = CSR_READ(sc, WMREG_STATUS);
12900 if (((status & STATUS_2P5_SKU) != 0)
12901 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12902 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12903 } else
12904 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12905 } else if (sc->sc_type == WM_T_82545) {
12906 /* Only 82545 is LX (XXX except SFP) */
12907 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12908 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12909 } else if (sc->sc_sfptype != 0) {
12910 /* XXX wm(4) fiber/serdes don't use ifm_data */
12911 switch (sc->sc_sfptype) {
12912 default:
12913 case SFF_SFP_ETH_FLAGS_1000SX:
12914 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12915 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12916 break;
12917 case SFF_SFP_ETH_FLAGS_1000LX:
12918 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12919 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12920 break;
12921 case SFF_SFP_ETH_FLAGS_1000CX:
12922 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12923 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12924 break;
12925 case SFF_SFP_ETH_FLAGS_1000T:
12926 ADD("1000baseT", IFM_1000_T, 0);
12927 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12928 break;
12929 case SFF_SFP_ETH_FLAGS_100FX:
12930 ADD("100baseFX", IFM_100_FX, ANAR_TX);
12931 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12932 break;
12933 }
12934 } else {
12935 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12936 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12937 }
12938 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12939 aprint_normal("\n");
12940
12941 #undef ADD
12942
12943 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12944 }
12945
12946 /*
12947 * wm_tbi_mediachange: [ifmedia interface function]
12948 *
12949 * Set hardware to newly-selected media on a 1000BASE-X device.
12950 */
12951 static int
12952 wm_tbi_mediachange(struct ifnet *ifp)
12953 {
12954 struct wm_softc *sc = ifp->if_softc;
12955 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12956 uint32_t status, ctrl;
12957 bool signal;
12958 int i;
12959
12960 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12961 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12962 /* XXX need some work for >= 82571 and < 82575 */
12963 if (sc->sc_type < WM_T_82575)
12964 return 0;
12965 }
12966
12967 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12968 || (sc->sc_type >= WM_T_82575))
12969 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12970
12971 sc->sc_ctrl &= ~CTRL_LRST;
12972 sc->sc_txcw = TXCW_ANE;
12973 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12974 sc->sc_txcw |= TXCW_FD | TXCW_HD;
12975 else if (ife->ifm_media & IFM_FDX)
12976 sc->sc_txcw |= TXCW_FD;
12977 else
12978 sc->sc_txcw |= TXCW_HD;
12979
12980 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12981 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12982
12983 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12984 device_xname(sc->sc_dev), sc->sc_txcw));
12985 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12986 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12987 CSR_WRITE_FLUSH(sc);
12988 delay(1000);
12989
12990 ctrl = CSR_READ(sc, WMREG_CTRL);
12991 signal = wm_tbi_havesignal(sc, ctrl);
12992
12993 DPRINTF(sc, WM_DEBUG_LINK,
12994 ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
12995
12996 if (signal) {
12997 /* Have signal; wait for the link to come up. */
12998 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12999 delay(10000);
13000 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
13001 break;
13002 }
13003
13004 DPRINTF(sc, WM_DEBUG_LINK,
13005 ("%s: i = %d after waiting for link\n",
13006 device_xname(sc->sc_dev), i));
13007
13008 status = CSR_READ(sc, WMREG_STATUS);
13009 DPRINTF(sc, WM_DEBUG_LINK,
13010 ("%s: status after final read = 0x%x, STATUS_LU = %#"
13011 __PRIxBIT "\n",
13012 device_xname(sc->sc_dev), status, STATUS_LU));
13013 if (status & STATUS_LU) {
13014 /* Link is up. */
13015 DPRINTF(sc, WM_DEBUG_LINK,
13016 ("%s: LINK: set media -> link up %s\n",
13017 device_xname(sc->sc_dev),
13018 (status & STATUS_FD) ? "FDX" : "HDX"));
13019
13020 /*
13021 * NOTE: CTRL will update TFCE and RFCE automatically,
13022 * so we should update sc->sc_ctrl
13023 */
13024 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13025 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13026 sc->sc_fcrtl &= ~FCRTL_XONE;
13027 if (status & STATUS_FD)
13028 sc->sc_tctl |=
13029 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13030 else
13031 sc->sc_tctl |=
13032 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13033 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13034 sc->sc_fcrtl |= FCRTL_XONE;
13035 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13036 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13037 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13038 sc->sc_tbi_linkup = 1;
13039 } else {
13040 if (i == WM_LINKUP_TIMEOUT)
13041 wm_check_for_link(sc);
13042 /* Link is down. */
13043 DPRINTF(sc, WM_DEBUG_LINK,
13044 ("%s: LINK: set media -> link down\n",
13045 device_xname(sc->sc_dev)));
13046 sc->sc_tbi_linkup = 0;
13047 }
13048 } else {
13049 DPRINTF(sc, WM_DEBUG_LINK,
13050 ("%s: LINK: set media -> no signal\n",
13051 device_xname(sc->sc_dev)));
13052 sc->sc_tbi_linkup = 0;
13053 }
13054
13055 wm_tbi_serdes_set_linkled(sc);
13056
13057 return 0;
13058 }
13059
13060 /*
13061 * wm_tbi_mediastatus: [ifmedia interface function]
13062 *
13063 * Get the current interface media status on a 1000BASE-X device.
13064 */
13065 static void
13066 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13067 {
13068 struct wm_softc *sc = ifp->if_softc;
13069 uint32_t ctrl, status;
13070
13071 ifmr->ifm_status = IFM_AVALID;
13072 ifmr->ifm_active = IFM_ETHER;
13073
13074 status = CSR_READ(sc, WMREG_STATUS);
13075 if ((status & STATUS_LU) == 0) {
13076 ifmr->ifm_active |= IFM_NONE;
13077 return;
13078 }
13079
13080 ifmr->ifm_status |= IFM_ACTIVE;
13081 /* Only 82545 is LX */
13082 if (sc->sc_type == WM_T_82545)
13083 ifmr->ifm_active |= IFM_1000_LX;
13084 else
13085 ifmr->ifm_active |= IFM_1000_SX;
13086 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13087 ifmr->ifm_active |= IFM_FDX;
13088 else
13089 ifmr->ifm_active |= IFM_HDX;
13090 ctrl = CSR_READ(sc, WMREG_CTRL);
13091 if (ctrl & CTRL_RFCE)
13092 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13093 if (ctrl & CTRL_TFCE)
13094 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13095 }
13096
13097 /* XXX TBI only */
13098 static int
13099 wm_check_for_link(struct wm_softc *sc)
13100 {
13101 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13102 uint32_t rxcw;
13103 uint32_t ctrl;
13104 uint32_t status;
13105 bool signal;
13106
13107 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13108 device_xname(sc->sc_dev), __func__));
13109
13110 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13111 /* XXX need some work for >= 82571 */
13112 if (sc->sc_type >= WM_T_82571) {
13113 sc->sc_tbi_linkup = 1;
13114 return 0;
13115 }
13116 }
13117
13118 rxcw = CSR_READ(sc, WMREG_RXCW);
13119 ctrl = CSR_READ(sc, WMREG_CTRL);
13120 status = CSR_READ(sc, WMREG_STATUS);
13121 signal = wm_tbi_havesignal(sc, ctrl);
13122
13123 DPRINTF(sc, WM_DEBUG_LINK,
13124 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13125 device_xname(sc->sc_dev), __func__, signal,
13126 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13127
13128 /*
13129 * SWDPIN LU RXCW
13130 * 0 0 0
13131 * 0 0 1 (should not happen)
13132 * 0 1 0 (should not happen)
13133 * 0 1 1 (should not happen)
13134 * 1 0 0 Disable autonego and force linkup
13135 * 1 0 1 got /C/ but not linkup yet
13136 * 1 1 0 (linkup)
13137 * 1 1 1 If IFM_AUTO, back to autonego
13138 *
13139 */
13140 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13141 DPRINTF(sc, WM_DEBUG_LINK,
13142 ("%s: %s: force linkup and fullduplex\n",
13143 device_xname(sc->sc_dev), __func__));
13144 sc->sc_tbi_linkup = 0;
13145 /* Disable auto-negotiation in the TXCW register */
13146 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13147
13148 /*
13149 * Force link-up and also force full-duplex.
13150 *
13151 * NOTE: CTRL was updated TFCE and RFCE automatically,
13152 * so we should update sc->sc_ctrl
13153 */
13154 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13155 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13156 } else if (((status & STATUS_LU) != 0)
13157 && ((rxcw & RXCW_C) != 0)
13158 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13159 sc->sc_tbi_linkup = 1;
13160 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13161 device_xname(sc->sc_dev), __func__));
13162 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13163 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13164 } else if (signal && ((rxcw & RXCW_C) != 0)) {
13165 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13166 device_xname(sc->sc_dev), __func__));
13167 } else {
13168 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13169 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13170 status));
13171 }
13172
13173 return 0;
13174 }
13175
13176 /*
13177 * wm_tbi_tick:
13178 *
13179 * Check the link on TBI devices.
13180 * This function acts as mii_tick().
13181 */
13182 static void
13183 wm_tbi_tick(struct wm_softc *sc)
13184 {
13185 struct mii_data *mii = &sc->sc_mii;
13186 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13187 uint32_t status;
13188
13189 KASSERT(mutex_owned(sc->sc_core_lock));
13190
13191 status = CSR_READ(sc, WMREG_STATUS);
13192
13193 /* XXX is this needed? */
13194 (void)CSR_READ(sc, WMREG_RXCW);
13195 (void)CSR_READ(sc, WMREG_CTRL);
13196
13197 /* set link status */
13198 if ((status & STATUS_LU) == 0) {
13199 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13200 device_xname(sc->sc_dev)));
13201 sc->sc_tbi_linkup = 0;
13202 } else if (sc->sc_tbi_linkup == 0) {
13203 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13204 device_xname(sc->sc_dev),
13205 (status & STATUS_FD) ? "FDX" : "HDX"));
13206 sc->sc_tbi_linkup = 1;
13207 sc->sc_tbi_serdes_ticks = 0;
13208 }
13209
13210 if ((sc->sc_if_flags & IFF_UP) == 0)
13211 goto setled;
13212
13213 if ((status & STATUS_LU) == 0) {
13214 sc->sc_tbi_linkup = 0;
13215 /* If the timer expired, retry autonegotiation */
13216 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13217 && (++sc->sc_tbi_serdes_ticks
13218 >= sc->sc_tbi_serdes_anegticks)) {
13219 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13220 device_xname(sc->sc_dev), __func__));
13221 sc->sc_tbi_serdes_ticks = 0;
13222 /*
13223 * Reset the link, and let autonegotiation do
13224 * its thing
13225 */
13226 sc->sc_ctrl |= CTRL_LRST;
13227 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13228 CSR_WRITE_FLUSH(sc);
13229 delay(1000);
13230 sc->sc_ctrl &= ~CTRL_LRST;
13231 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13232 CSR_WRITE_FLUSH(sc);
13233 delay(1000);
13234 CSR_WRITE(sc, WMREG_TXCW,
13235 sc->sc_txcw & ~TXCW_ANE);
13236 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13237 }
13238 }
13239
13240 setled:
13241 wm_tbi_serdes_set_linkled(sc);
13242 }
13243
13244 /* SERDES related */
13245 static void
13246 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13247 {
13248 uint32_t reg;
13249
13250 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13251 && ((sc->sc_flags & WM_F_SGMII) == 0))
13252 return;
13253
13254 /* Enable PCS to turn on link */
13255 reg = CSR_READ(sc, WMREG_PCS_CFG);
13256 reg |= PCS_CFG_PCS_EN;
13257 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13258
13259 /* Power up the laser */
13260 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13261 reg &= ~CTRL_EXT_SWDPIN(3);
13262 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13263
13264 /* Flush the write to verify completion */
13265 CSR_WRITE_FLUSH(sc);
13266 delay(1000);
13267 }
13268
13269 static int
13270 wm_serdes_mediachange(struct ifnet *ifp)
13271 {
13272 struct wm_softc *sc = ifp->if_softc;
13273 bool pcs_autoneg = true; /* XXX */
13274 uint32_t ctrl_ext, pcs_lctl, reg;
13275
13276 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13277 && ((sc->sc_flags & WM_F_SGMII) == 0))
13278 return 0;
13279
13280 /* XXX Currently, this function is not called on 8257[12] */
13281 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13282 || (sc->sc_type >= WM_T_82575))
13283 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13284
13285 /* Power on the sfp cage if present */
13286 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13287 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13288 ctrl_ext |= CTRL_EXT_I2C_ENA;
13289 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13290
13291 sc->sc_ctrl |= CTRL_SLU;
13292
13293 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13294 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13295
13296 reg = CSR_READ(sc, WMREG_CONNSW);
13297 reg |= CONNSW_ENRGSRC;
13298 CSR_WRITE(sc, WMREG_CONNSW, reg);
13299 }
13300
13301 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13302 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13303 case CTRL_EXT_LINK_MODE_SGMII:
13304 /* SGMII mode lets the phy handle forcing speed/duplex */
13305 pcs_autoneg = true;
13306 /* Autoneg time out should be disabled for SGMII mode */
13307 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13308 break;
13309 case CTRL_EXT_LINK_MODE_1000KX:
13310 pcs_autoneg = false;
13311 /* FALLTHROUGH */
13312 default:
13313 if ((sc->sc_type == WM_T_82575)
13314 || (sc->sc_type == WM_T_82576)) {
13315 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13316 pcs_autoneg = false;
13317 }
13318 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13319 | CTRL_FRCFDX;
13320
13321 /* Set speed of 1000/Full if speed/duplex is forced */
13322 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13323 }
13324 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13325
13326 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13327 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13328
13329 if (pcs_autoneg) {
13330 /* Set PCS register for autoneg */
13331 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13332
13333 /* Disable force flow control for autoneg */
13334 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13335
13336 /* Configure flow control advertisement for autoneg */
13337 reg = CSR_READ(sc, WMREG_PCS_ANADV);
13338 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13339 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13340 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13341 } else
13342 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13343
13344 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13345
13346 return 0;
13347 }
13348
13349 static void
13350 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13351 {
13352 struct wm_softc *sc = ifp->if_softc;
13353 struct mii_data *mii = &sc->sc_mii;
13354 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13355 uint32_t pcs_adv, pcs_lpab, reg;
13356
13357 ifmr->ifm_status = IFM_AVALID;
13358 ifmr->ifm_active = IFM_ETHER;
13359
13360 /* Check PCS */
13361 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13362 if ((reg & PCS_LSTS_LINKOK) == 0) {
13363 ifmr->ifm_active |= IFM_NONE;
13364 sc->sc_tbi_linkup = 0;
13365 goto setled;
13366 }
13367
13368 sc->sc_tbi_linkup = 1;
13369 ifmr->ifm_status |= IFM_ACTIVE;
13370 if (sc->sc_type == WM_T_I354) {
13371 uint32_t status;
13372
13373 status = CSR_READ(sc, WMREG_STATUS);
13374 if (((status & STATUS_2P5_SKU) != 0)
13375 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13376 ifmr->ifm_active |= IFM_2500_KX;
13377 } else
13378 ifmr->ifm_active |= IFM_1000_KX;
13379 } else {
13380 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13381 case PCS_LSTS_SPEED_10:
13382 ifmr->ifm_active |= IFM_10_T; /* XXX */
13383 break;
13384 case PCS_LSTS_SPEED_100:
13385 ifmr->ifm_active |= IFM_100_FX; /* XXX */
13386 break;
13387 case PCS_LSTS_SPEED_1000:
13388 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13389 break;
13390 default:
13391 device_printf(sc->sc_dev, "Unknown speed\n");
13392 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13393 break;
13394 }
13395 }
13396 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13397 if ((reg & PCS_LSTS_FDX) != 0)
13398 ifmr->ifm_active |= IFM_FDX;
13399 else
13400 ifmr->ifm_active |= IFM_HDX;
13401 mii->mii_media_active &= ~IFM_ETH_FMASK;
13402 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13403 /* Check flow */
13404 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13405 if ((reg & PCS_LSTS_AN_COMP) == 0) {
13406 DPRINTF(sc, WM_DEBUG_LINK,
13407 ("XXX LINKOK but not ACOMP\n"));
13408 goto setled;
13409 }
13410 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13411 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13412 DPRINTF(sc, WM_DEBUG_LINK,
13413 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13414 if ((pcs_adv & TXCW_SYM_PAUSE)
13415 && (pcs_lpab & TXCW_SYM_PAUSE)) {
13416 mii->mii_media_active |= IFM_FLOW
13417 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
13418 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
13419 && (pcs_adv & TXCW_ASYM_PAUSE)
13420 && (pcs_lpab & TXCW_SYM_PAUSE)
13421 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13422 mii->mii_media_active |= IFM_FLOW
13423 | IFM_ETH_TXPAUSE;
13424 } else if ((pcs_adv & TXCW_SYM_PAUSE)
13425 && (pcs_adv & TXCW_ASYM_PAUSE)
13426 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
13427 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13428 mii->mii_media_active |= IFM_FLOW
13429 | IFM_ETH_RXPAUSE;
13430 }
13431 }
13432 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
13433 | (mii->mii_media_active & IFM_ETH_FMASK);
13434 setled:
13435 wm_tbi_serdes_set_linkled(sc);
13436 }
13437
13438 /*
13439 * wm_serdes_tick:
13440 *
13441 * Check the link on serdes devices.
13442 */
13443 static void
13444 wm_serdes_tick(struct wm_softc *sc)
13445 {
13446 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13447 struct mii_data *mii = &sc->sc_mii;
13448 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13449 uint32_t reg;
13450
13451 KASSERT(mutex_owned(sc->sc_core_lock));
13452
13453 mii->mii_media_status = IFM_AVALID;
13454 mii->mii_media_active = IFM_ETHER;
13455
13456 /* Check PCS */
13457 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13458 if ((reg & PCS_LSTS_LINKOK) != 0) {
13459 mii->mii_media_status |= IFM_ACTIVE;
13460 sc->sc_tbi_linkup = 1;
13461 sc->sc_tbi_serdes_ticks = 0;
13462 mii->mii_media_active |= IFM_1000_SX; /* XXX */
13463 if ((reg & PCS_LSTS_FDX) != 0)
13464 mii->mii_media_active |= IFM_FDX;
13465 else
13466 mii->mii_media_active |= IFM_HDX;
13467 } else {
13468 mii->mii_media_status |= IFM_NONE;
13469 sc->sc_tbi_linkup = 0;
13470 /* If the timer expired, retry autonegotiation */
13471 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13472 && (++sc->sc_tbi_serdes_ticks
13473 >= sc->sc_tbi_serdes_anegticks)) {
13474 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13475 device_xname(sc->sc_dev), __func__));
13476 sc->sc_tbi_serdes_ticks = 0;
13477 /* XXX */
13478 wm_serdes_mediachange(ifp);
13479 }
13480 }
13481
13482 wm_tbi_serdes_set_linkled(sc);
13483 }
13484
13485 /* SFP related */
13486
13487 static int
13488 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13489 {
13490 uint32_t i2ccmd;
13491 int i;
13492
13493 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13494 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13495
13496 /* Poll the ready bit */
13497 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13498 delay(50);
13499 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13500 if (i2ccmd & I2CCMD_READY)
13501 break;
13502 }
13503 if ((i2ccmd & I2CCMD_READY) == 0)
13504 return -1;
13505 if ((i2ccmd & I2CCMD_ERROR) != 0)
13506 return -1;
13507
13508 *data = i2ccmd & 0x00ff;
13509
13510 return 0;
13511 }
13512
13513 static uint32_t
13514 wm_sfp_get_media_type(struct wm_softc *sc)
13515 {
13516 uint32_t ctrl_ext;
13517 uint8_t val = 0;
13518 int timeout = 3;
13519 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13520 int rv = -1;
13521
13522 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13523 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13524 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13525 CSR_WRITE_FLUSH(sc);
13526
13527 /* Read SFP module data */
13528 while (timeout) {
13529 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13530 if (rv == 0)
13531 break;
13532 delay(100*1000); /* XXX too big */
13533 timeout--;
13534 }
13535 if (rv != 0)
13536 goto out;
13537
13538 switch (val) {
13539 case SFF_SFP_ID_SFF:
13540 aprint_normal_dev(sc->sc_dev,
13541 "Module/Connector soldered to board\n");
13542 break;
13543 case SFF_SFP_ID_SFP:
13544 sc->sc_flags |= WM_F_SFP;
13545 break;
13546 case SFF_SFP_ID_UNKNOWN:
13547 goto out;
13548 default:
13549 break;
13550 }
13551
13552 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13553 if (rv != 0)
13554 goto out;
13555
13556 sc->sc_sfptype = val;
13557 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13558 mediatype = WM_MEDIATYPE_SERDES;
13559 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13560 sc->sc_flags |= WM_F_SGMII;
13561 mediatype = WM_MEDIATYPE_COPPER;
13562 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13563 sc->sc_flags |= WM_F_SGMII;
13564 mediatype = WM_MEDIATYPE_SERDES;
13565 } else {
13566 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13567 __func__, sc->sc_sfptype);
13568 sc->sc_sfptype = 0; /* XXX unknown */
13569 }
13570
13571 out:
13572 /* Restore I2C interface setting */
13573 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13574
13575 return mediatype;
13576 }
13577
13578 /*
13579 * NVM related.
13580 * Microwire, SPI (w/wo EERD) and Flash.
13581 */
13582
13583 /* Both spi and uwire */
13584
13585 /*
13586 * wm_eeprom_sendbits:
13587 *
13588 * Send a series of bits to the EEPROM.
13589 */
13590 static void
13591 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13592 {
13593 uint32_t reg;
13594 int x;
13595
13596 reg = CSR_READ(sc, WMREG_EECD);
13597
13598 for (x = nbits; x > 0; x--) {
13599 if (bits & (1U << (x - 1)))
13600 reg |= EECD_DI;
13601 else
13602 reg &= ~EECD_DI;
13603 CSR_WRITE(sc, WMREG_EECD, reg);
13604 CSR_WRITE_FLUSH(sc);
13605 delay(2);
13606 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13607 CSR_WRITE_FLUSH(sc);
13608 delay(2);
13609 CSR_WRITE(sc, WMREG_EECD, reg);
13610 CSR_WRITE_FLUSH(sc);
13611 delay(2);
13612 }
13613 }
13614
13615 /*
13616 * wm_eeprom_recvbits:
13617 *
13618 * Receive a series of bits from the EEPROM.
13619 */
13620 static void
13621 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13622 {
13623 uint32_t reg, val;
13624 int x;
13625
13626 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13627
13628 val = 0;
13629 for (x = nbits; x > 0; x--) {
13630 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13631 CSR_WRITE_FLUSH(sc);
13632 delay(2);
13633 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13634 val |= (1U << (x - 1));
13635 CSR_WRITE(sc, WMREG_EECD, reg);
13636 CSR_WRITE_FLUSH(sc);
13637 delay(2);
13638 }
13639 *valp = val;
13640 }
13641
13642 /* Microwire */
13643
13644 /*
13645 * wm_nvm_read_uwire:
13646 *
13647 * Read a word from the EEPROM using the MicroWire protocol.
13648 */
13649 static int
13650 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13651 {
13652 uint32_t reg, val;
13653 int i, rv;
13654
13655 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13656 device_xname(sc->sc_dev), __func__));
13657
13658 rv = sc->nvm.acquire(sc);
13659 if (rv != 0)
13660 return rv;
13661
13662 for (i = 0; i < wordcnt; i++) {
13663 /* Clear SK and DI. */
13664 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13665 CSR_WRITE(sc, WMREG_EECD, reg);
13666
13667 /*
13668 * XXX: workaround for a bug in qemu-0.12.x and prior
13669 * and Xen.
13670 *
13671 * We use this workaround only for 82540 because qemu's
13672 * e1000 act as 82540.
13673 */
13674 if (sc->sc_type == WM_T_82540) {
13675 reg |= EECD_SK;
13676 CSR_WRITE(sc, WMREG_EECD, reg);
13677 reg &= ~EECD_SK;
13678 CSR_WRITE(sc, WMREG_EECD, reg);
13679 CSR_WRITE_FLUSH(sc);
13680 delay(2);
13681 }
13682 /* XXX: end of workaround */
13683
13684 /* Set CHIP SELECT. */
13685 reg |= EECD_CS;
13686 CSR_WRITE(sc, WMREG_EECD, reg);
13687 CSR_WRITE_FLUSH(sc);
13688 delay(2);
13689
13690 /* Shift in the READ command. */
13691 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13692
13693 /* Shift in address. */
13694 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13695
13696 /* Shift out the data. */
13697 wm_eeprom_recvbits(sc, &val, 16);
13698 data[i] = val & 0xffff;
13699
13700 /* Clear CHIP SELECT. */
13701 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13702 CSR_WRITE(sc, WMREG_EECD, reg);
13703 CSR_WRITE_FLUSH(sc);
13704 delay(2);
13705 }
13706
13707 sc->nvm.release(sc);
13708 return 0;
13709 }
13710
13711 /* SPI */
13712
13713 /*
13714 * Set SPI and FLASH related information from the EECD register.
13715 * For 82541 and 82547, the word size is taken from EEPROM.
13716 */
13717 static int
13718 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13719 {
13720 int size;
13721 uint32_t reg;
13722 uint16_t data;
13723
13724 reg = CSR_READ(sc, WMREG_EECD);
13725 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13726
13727 /* Read the size of NVM from EECD by default */
13728 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13729 switch (sc->sc_type) {
13730 case WM_T_82541:
13731 case WM_T_82541_2:
13732 case WM_T_82547:
13733 case WM_T_82547_2:
13734 /* Set dummy value to access EEPROM */
13735 sc->sc_nvm_wordsize = 64;
13736 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13737 aprint_error_dev(sc->sc_dev,
13738 "%s: failed to read EEPROM size\n", __func__);
13739 }
13740 reg = data;
13741 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13742 if (size == 0)
13743 size = 6; /* 64 word size */
13744 else
13745 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13746 break;
13747 case WM_T_80003:
13748 case WM_T_82571:
13749 case WM_T_82572:
13750 case WM_T_82573: /* SPI case */
13751 case WM_T_82574: /* SPI case */
13752 case WM_T_82583: /* SPI case */
13753 size += NVM_WORD_SIZE_BASE_SHIFT;
13754 if (size > 14)
13755 size = 14;
13756 break;
13757 case WM_T_82575:
13758 case WM_T_82576:
13759 case WM_T_82580:
13760 case WM_T_I350:
13761 case WM_T_I354:
13762 case WM_T_I210:
13763 case WM_T_I211:
13764 size += NVM_WORD_SIZE_BASE_SHIFT;
13765 if (size > 15)
13766 size = 15;
13767 break;
13768 default:
13769 aprint_error_dev(sc->sc_dev,
13770 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13771 return -1;
13772 break;
13773 }
13774
13775 sc->sc_nvm_wordsize = 1 << size;
13776
13777 return 0;
13778 }
13779
13780 /*
13781 * wm_nvm_ready_spi:
13782 *
13783 * Wait for a SPI EEPROM to be ready for commands.
13784 */
13785 static int
13786 wm_nvm_ready_spi(struct wm_softc *sc)
13787 {
13788 uint32_t val;
13789 int usec;
13790
13791 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13792 device_xname(sc->sc_dev), __func__));
13793
13794 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13795 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13796 wm_eeprom_recvbits(sc, &val, 8);
13797 if ((val & SPI_SR_RDY) == 0)
13798 break;
13799 }
13800 if (usec >= SPI_MAX_RETRIES) {
13801 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13802 return -1;
13803 }
13804 return 0;
13805 }
13806
13807 /*
13808 * wm_nvm_read_spi:
13809 *
13810 * Read a work from the EEPROM using the SPI protocol.
13811 */
13812 static int
13813 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13814 {
13815 uint32_t reg, val;
13816 int i;
13817 uint8_t opc;
13818 int rv;
13819
13820 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13821 device_xname(sc->sc_dev), __func__));
13822
13823 rv = sc->nvm.acquire(sc);
13824 if (rv != 0)
13825 return rv;
13826
13827 /* Clear SK and CS. */
13828 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13829 CSR_WRITE(sc, WMREG_EECD, reg);
13830 CSR_WRITE_FLUSH(sc);
13831 delay(2);
13832
13833 if ((rv = wm_nvm_ready_spi(sc)) != 0)
13834 goto out;
13835
13836 /* Toggle CS to flush commands. */
13837 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13838 CSR_WRITE_FLUSH(sc);
13839 delay(2);
13840 CSR_WRITE(sc, WMREG_EECD, reg);
13841 CSR_WRITE_FLUSH(sc);
13842 delay(2);
13843
13844 opc = SPI_OPC_READ;
13845 if (sc->sc_nvm_addrbits == 8 && word >= 128)
13846 opc |= SPI_OPC_A8;
13847
13848 wm_eeprom_sendbits(sc, opc, 8);
13849 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13850
13851 for (i = 0; i < wordcnt; i++) {
13852 wm_eeprom_recvbits(sc, &val, 16);
13853 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13854 }
13855
13856 /* Raise CS and clear SK. */
13857 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13858 CSR_WRITE(sc, WMREG_EECD, reg);
13859 CSR_WRITE_FLUSH(sc);
13860 delay(2);
13861
13862 out:
13863 sc->nvm.release(sc);
13864 return rv;
13865 }
13866
13867 /* Using with EERD */
13868
13869 static int
13870 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13871 {
13872 uint32_t attempts = 100000;
13873 uint32_t i, reg = 0;
13874 int32_t done = -1;
13875
13876 for (i = 0; i < attempts; i++) {
13877 reg = CSR_READ(sc, rw);
13878
13879 if (reg & EERD_DONE) {
13880 done = 0;
13881 break;
13882 }
13883 delay(5);
13884 }
13885
13886 return done;
13887 }
13888
13889 static int
13890 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13891 {
13892 int i, eerd = 0;
13893 int rv;
13894
13895 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13896 device_xname(sc->sc_dev), __func__));
13897
13898 rv = sc->nvm.acquire(sc);
13899 if (rv != 0)
13900 return rv;
13901
13902 for (i = 0; i < wordcnt; i++) {
13903 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13904 CSR_WRITE(sc, WMREG_EERD, eerd);
13905 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13906 if (rv != 0) {
13907 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13908 "offset=%d. wordcnt=%d\n", offset, wordcnt);
13909 break;
13910 }
13911 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13912 }
13913
13914 sc->nvm.release(sc);
13915 return rv;
13916 }
13917
13918 /* Flash */
13919
13920 static int
13921 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13922 {
13923 uint32_t eecd;
13924 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13925 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13926 uint32_t nvm_dword = 0;
13927 uint8_t sig_byte = 0;
13928 int rv;
13929
13930 switch (sc->sc_type) {
13931 case WM_T_PCH_SPT:
13932 case WM_T_PCH_CNP:
13933 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13934 act_offset = ICH_NVM_SIG_WORD * 2;
13935
13936 /* Set bank to 0 in case flash read fails. */
13937 *bank = 0;
13938
13939 /* Check bank 0 */
13940 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13941 if (rv != 0)
13942 return rv;
13943 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13944 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13945 *bank = 0;
13946 return 0;
13947 }
13948
13949 /* Check bank 1 */
13950 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13951 &nvm_dword);
13952 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13953 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13954 *bank = 1;
13955 return 0;
13956 }
13957 aprint_error_dev(sc->sc_dev,
13958 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13959 return -1;
13960 case WM_T_ICH8:
13961 case WM_T_ICH9:
13962 eecd = CSR_READ(sc, WMREG_EECD);
13963 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13964 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13965 return 0;
13966 }
13967 /* FALLTHROUGH */
13968 default:
13969 /* Default to 0 */
13970 *bank = 0;
13971
13972 /* Check bank 0 */
13973 wm_read_ich8_byte(sc, act_offset, &sig_byte);
13974 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13975 *bank = 0;
13976 return 0;
13977 }
13978
13979 /* Check bank 1 */
13980 wm_read_ich8_byte(sc, act_offset + bank1_offset,
13981 &sig_byte);
13982 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13983 *bank = 1;
13984 return 0;
13985 }
13986 }
13987
13988 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13989 device_xname(sc->sc_dev)));
13990 return -1;
13991 }
13992
13993 /******************************************************************************
13994 * This function does initial flash setup so that a new read/write/erase cycle
13995 * can be started.
13996 *
13997 * sc - The pointer to the hw structure
13998 ****************************************************************************/
13999 static int32_t
14000 wm_ich8_cycle_init(struct wm_softc *sc)
14001 {
14002 uint16_t hsfsts;
14003 int32_t error = 1;
14004 int32_t i = 0;
14005
14006 if (sc->sc_type >= WM_T_PCH_SPT)
14007 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
14008 else
14009 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14010
14011 /* May be check the Flash Des Valid bit in Hw status */
14012 if ((hsfsts & HSFSTS_FLDVAL) == 0)
14013 return error;
14014
14015 /* Clear FCERR in Hw status by writing 1 */
14016 /* Clear DAEL in Hw status by writing a 1 */
14017 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14018
14019 if (sc->sc_type >= WM_T_PCH_SPT)
14020 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14021 else
14022 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14023
14024 /*
14025 * Either we should have a hardware SPI cycle in progress bit to check
14026 * against, in order to start a new cycle or FDONE bit should be
14027 * changed in the hardware so that it is 1 after hardware reset, which
14028 * can then be used as an indication whether a cycle is in progress or
14029 * has been completed .. we should also have some software semaphore
14030 * mechanism to guard FDONE or the cycle in progress bit so that two
14031 * threads access to those bits can be sequentiallized or a way so that
14032 * 2 threads don't start the cycle at the same time
14033 */
14034
14035 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14036 /*
14037 * There is no cycle running at present, so we can start a
14038 * cycle
14039 */
14040
14041 /* Begin by setting Flash Cycle Done. */
14042 hsfsts |= HSFSTS_DONE;
14043 if (sc->sc_type >= WM_T_PCH_SPT)
14044 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14045 hsfsts & 0xffffUL);
14046 else
14047 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14048 error = 0;
14049 } else {
14050 /*
14051 * Otherwise poll for sometime so the current cycle has a
14052 * chance to end before giving up.
14053 */
14054 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14055 if (sc->sc_type >= WM_T_PCH_SPT)
14056 hsfsts = ICH8_FLASH_READ32(sc,
14057 ICH_FLASH_HSFSTS) & 0xffffUL;
14058 else
14059 hsfsts = ICH8_FLASH_READ16(sc,
14060 ICH_FLASH_HSFSTS);
14061 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14062 error = 0;
14063 break;
14064 }
14065 delay(1);
14066 }
14067 if (error == 0) {
14068 /*
14069 * Successful in waiting for previous cycle to timeout,
14070 * now set the Flash Cycle Done.
14071 */
14072 hsfsts |= HSFSTS_DONE;
14073 if (sc->sc_type >= WM_T_PCH_SPT)
14074 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14075 hsfsts & 0xffffUL);
14076 else
14077 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14078 hsfsts);
14079 }
14080 }
14081 return error;
14082 }
14083
14084 /******************************************************************************
14085 * This function starts a flash cycle and waits for its completion
14086 *
14087 * sc - The pointer to the hw structure
14088 ****************************************************************************/
14089 static int32_t
14090 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14091 {
14092 uint16_t hsflctl;
14093 uint16_t hsfsts;
14094 int32_t error = 1;
14095 uint32_t i = 0;
14096
14097 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14098 if (sc->sc_type >= WM_T_PCH_SPT)
14099 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14100 else
14101 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14102 hsflctl |= HSFCTL_GO;
14103 if (sc->sc_type >= WM_T_PCH_SPT)
14104 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14105 (uint32_t)hsflctl << 16);
14106 else
14107 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14108
14109 /* Wait till FDONE bit is set to 1 */
14110 do {
14111 if (sc->sc_type >= WM_T_PCH_SPT)
14112 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14113 & 0xffffUL;
14114 else
14115 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14116 if (hsfsts & HSFSTS_DONE)
14117 break;
14118 delay(1);
14119 i++;
14120 } while (i < timeout);
14121 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14122 error = 0;
14123
14124 return error;
14125 }
14126
14127 /******************************************************************************
14128 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14129 *
14130 * sc - The pointer to the hw structure
14131 * index - The index of the byte or word to read.
14132 * size - Size of data to read, 1=byte 2=word, 4=dword
14133 * data - Pointer to the word to store the value read.
14134 *****************************************************************************/
14135 static int32_t
14136 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14137 uint32_t size, uint32_t *data)
14138 {
14139 uint16_t hsfsts;
14140 uint16_t hsflctl;
14141 uint32_t flash_linear_address;
14142 uint32_t flash_data = 0;
14143 int32_t error = 1;
14144 int32_t count = 0;
14145
14146 if (size < 1 || size > 4 || data == 0x0 ||
14147 index > ICH_FLASH_LINEAR_ADDR_MASK)
14148 return error;
14149
14150 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14151 sc->sc_ich8_flash_base;
14152
14153 do {
14154 delay(1);
14155 /* Steps */
14156 error = wm_ich8_cycle_init(sc);
14157 if (error)
14158 break;
14159
14160 if (sc->sc_type >= WM_T_PCH_SPT)
14161 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14162 >> 16;
14163 else
14164 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14165 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14166 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14167 & HSFCTL_BCOUNT_MASK;
14168 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14169 if (sc->sc_type >= WM_T_PCH_SPT) {
14170 /*
14171 * In SPT, This register is in Lan memory space, not
14172 * flash. Therefore, only 32 bit access is supported.
14173 */
14174 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14175 (uint32_t)hsflctl << 16);
14176 } else
14177 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14178
14179 /*
14180 * Write the last 24 bits of index into Flash Linear address
14181 * field in Flash Address
14182 */
14183 /* TODO: TBD maybe check the index against the size of flash */
14184
14185 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14186
14187 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14188
14189 /*
14190 * Check if FCERR is set to 1, if set to 1, clear it and try
14191 * the whole sequence a few more times, else read in (shift in)
14192 * the Flash Data0, the order is least significant byte first
14193 * msb to lsb
14194 */
14195 if (error == 0) {
14196 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14197 if (size == 1)
14198 *data = (uint8_t)(flash_data & 0x000000FF);
14199 else if (size == 2)
14200 *data = (uint16_t)(flash_data & 0x0000FFFF);
14201 else if (size == 4)
14202 *data = (uint32_t)flash_data;
14203 break;
14204 } else {
14205 /*
14206 * If we've gotten here, then things are probably
14207 * completely hosed, but if the error condition is
14208 * detected, it won't hurt to give it another try...
14209 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14210 */
14211 if (sc->sc_type >= WM_T_PCH_SPT)
14212 hsfsts = ICH8_FLASH_READ32(sc,
14213 ICH_FLASH_HSFSTS) & 0xffffUL;
14214 else
14215 hsfsts = ICH8_FLASH_READ16(sc,
14216 ICH_FLASH_HSFSTS);
14217
14218 if (hsfsts & HSFSTS_ERR) {
14219 /* Repeat for some time before giving up. */
14220 continue;
14221 } else if ((hsfsts & HSFSTS_DONE) == 0)
14222 break;
14223 }
14224 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14225
14226 return error;
14227 }
14228
14229 /******************************************************************************
14230 * Reads a single byte from the NVM using the ICH8 flash access registers.
14231 *
14232 * sc - pointer to wm_hw structure
14233 * index - The index of the byte to read.
14234 * data - Pointer to a byte to store the value read.
14235 *****************************************************************************/
14236 static int32_t
14237 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14238 {
14239 int32_t status;
14240 uint32_t word = 0;
14241
14242 status = wm_read_ich8_data(sc, index, 1, &word);
14243 if (status == 0)
14244 *data = (uint8_t)word;
14245 else
14246 *data = 0;
14247
14248 return status;
14249 }
14250
14251 /******************************************************************************
14252 * Reads a word from the NVM using the ICH8 flash access registers.
14253 *
14254 * sc - pointer to wm_hw structure
14255 * index - The starting byte index of the word to read.
14256 * data - Pointer to a word to store the value read.
14257 *****************************************************************************/
14258 static int32_t
14259 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14260 {
14261 int32_t status;
14262 uint32_t word = 0;
14263
14264 status = wm_read_ich8_data(sc, index, 2, &word);
14265 if (status == 0)
14266 *data = (uint16_t)word;
14267 else
14268 *data = 0;
14269
14270 return status;
14271 }
14272
14273 /******************************************************************************
14274 * Reads a dword from the NVM using the ICH8 flash access registers.
14275 *
14276 * sc - pointer to wm_hw structure
14277 * index - The starting byte index of the word to read.
14278 * data - Pointer to a word to store the value read.
14279 *****************************************************************************/
14280 static int32_t
14281 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14282 {
14283 int32_t status;
14284
14285 status = wm_read_ich8_data(sc, index, 4, data);
14286 return status;
14287 }
14288
14289 /******************************************************************************
14290 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14291 * register.
14292 *
14293 * sc - Struct containing variables accessed by shared code
14294 * offset - offset of word in the EEPROM to read
14295 * data - word read from the EEPROM
14296 * words - number of words to read
14297 *****************************************************************************/
14298 static int
14299 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14300 {
14301 int rv;
14302 uint32_t flash_bank = 0;
14303 uint32_t act_offset = 0;
14304 uint32_t bank_offset = 0;
14305 uint16_t word = 0;
14306 uint16_t i = 0;
14307
14308 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14309 device_xname(sc->sc_dev), __func__));
14310
14311 rv = sc->nvm.acquire(sc);
14312 if (rv != 0)
14313 return rv;
14314
14315 /*
14316 * We need to know which is the valid flash bank. In the event
14317 * that we didn't allocate eeprom_shadow_ram, we may not be
14318 * managing flash_bank. So it cannot be trusted and needs
14319 * to be updated with each read.
14320 */
14321 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14322 if (rv) {
14323 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14324 device_xname(sc->sc_dev)));
14325 flash_bank = 0;
14326 }
14327
14328 /*
14329 * Adjust offset appropriately if we're on bank 1 - adjust for word
14330 * size
14331 */
14332 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14333
14334 for (i = 0; i < words; i++) {
14335 /* The NVM part needs a byte offset, hence * 2 */
14336 act_offset = bank_offset + ((offset + i) * 2);
14337 rv = wm_read_ich8_word(sc, act_offset, &word);
14338 if (rv) {
14339 aprint_error_dev(sc->sc_dev,
14340 "%s: failed to read NVM\n", __func__);
14341 break;
14342 }
14343 data[i] = word;
14344 }
14345
14346 sc->nvm.release(sc);
14347 return rv;
14348 }
14349
14350 /******************************************************************************
14351 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14352 * register.
14353 *
14354 * sc - Struct containing variables accessed by shared code
14355 * offset - offset of word in the EEPROM to read
14356 * data - word read from the EEPROM
14357 * words - number of words to read
14358 *****************************************************************************/
14359 static int
14360 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14361 {
14362 int rv;
14363 uint32_t flash_bank = 0;
14364 uint32_t act_offset = 0;
14365 uint32_t bank_offset = 0;
14366 uint32_t dword = 0;
14367 uint16_t i = 0;
14368
14369 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14370 device_xname(sc->sc_dev), __func__));
14371
14372 rv = sc->nvm.acquire(sc);
14373 if (rv != 0)
14374 return rv;
14375
14376 /*
14377 * We need to know which is the valid flash bank. In the event
14378 * that we didn't allocate eeprom_shadow_ram, we may not be
14379 * managing flash_bank. So it cannot be trusted and needs
14380 * to be updated with each read.
14381 */
14382 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14383 if (rv) {
14384 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14385 device_xname(sc->sc_dev)));
14386 flash_bank = 0;
14387 }
14388
14389 /*
14390 * Adjust offset appropriately if we're on bank 1 - adjust for word
14391 * size
14392 */
14393 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14394
14395 for (i = 0; i < words; i++) {
14396 /* The NVM part needs a byte offset, hence * 2 */
14397 act_offset = bank_offset + ((offset + i) * 2);
14398 /* but we must read dword aligned, so mask ... */
14399 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14400 if (rv) {
14401 aprint_error_dev(sc->sc_dev,
14402 "%s: failed to read NVM\n", __func__);
14403 break;
14404 }
14405 /* ... and pick out low or high word */
14406 if ((act_offset & 0x2) == 0)
14407 data[i] = (uint16_t)(dword & 0xFFFF);
14408 else
14409 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14410 }
14411
14412 sc->nvm.release(sc);
14413 return rv;
14414 }
14415
14416 /* iNVM */
14417
14418 static int
14419 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14420 {
14421 int32_t rv = 0;
14422 uint32_t invm_dword;
14423 uint16_t i;
14424 uint8_t record_type, word_address;
14425
14426 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14427 device_xname(sc->sc_dev), __func__));
14428
14429 for (i = 0; i < INVM_SIZE; i++) {
14430 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14431 /* Get record type */
14432 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14433 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14434 break;
14435 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14436 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14437 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14438 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14439 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14440 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14441 if (word_address == address) {
14442 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14443 rv = 0;
14444 break;
14445 }
14446 }
14447 }
14448
14449 return rv;
14450 }
14451
14452 static int
14453 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14454 {
14455 int i, rv;
14456
14457 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14458 device_xname(sc->sc_dev), __func__));
14459
14460 rv = sc->nvm.acquire(sc);
14461 if (rv != 0)
14462 return rv;
14463
14464 for (i = 0; i < words; i++) {
14465 switch (offset + i) {
14466 case NVM_OFF_MACADDR:
14467 case NVM_OFF_MACADDR1:
14468 case NVM_OFF_MACADDR2:
14469 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14470 if (rv != 0) {
14471 data[i] = 0xffff;
14472 rv = -1;
14473 }
14474 break;
14475 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14476 rv = wm_nvm_read_word_invm(sc, offset, data);
14477 if (rv != 0) {
14478 *data = INVM_DEFAULT_AL;
14479 rv = 0;
14480 }
14481 break;
14482 case NVM_OFF_CFG2:
14483 rv = wm_nvm_read_word_invm(sc, offset, data);
14484 if (rv != 0) {
14485 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
14486 rv = 0;
14487 }
14488 break;
14489 case NVM_OFF_CFG4:
14490 rv = wm_nvm_read_word_invm(sc, offset, data);
14491 if (rv != 0) {
14492 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
14493 rv = 0;
14494 }
14495 break;
14496 case NVM_OFF_LED_1_CFG:
14497 rv = wm_nvm_read_word_invm(sc, offset, data);
14498 if (rv != 0) {
14499 *data = NVM_LED_1_CFG_DEFAULT_I211;
14500 rv = 0;
14501 }
14502 break;
14503 case NVM_OFF_LED_0_2_CFG:
14504 rv = wm_nvm_read_word_invm(sc, offset, data);
14505 if (rv != 0) {
14506 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
14507 rv = 0;
14508 }
14509 break;
14510 case NVM_OFF_ID_LED_SETTINGS:
14511 rv = wm_nvm_read_word_invm(sc, offset, data);
14512 if (rv != 0) {
14513 *data = ID_LED_RESERVED_FFFF;
14514 rv = 0;
14515 }
14516 break;
14517 default:
14518 DPRINTF(sc, WM_DEBUG_NVM,
14519 ("NVM word 0x%02x is not mapped.\n", offset));
14520 *data = NVM_RESERVED_WORD;
14521 break;
14522 }
14523 }
14524
14525 sc->nvm.release(sc);
14526 return rv;
14527 }
14528
14529 /* Lock, detecting NVM type, validate checksum, version and read */
14530
14531 static int
14532 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14533 {
14534 uint32_t eecd = 0;
14535
14536 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14537 || sc->sc_type == WM_T_82583) {
14538 eecd = CSR_READ(sc, WMREG_EECD);
14539
14540 /* Isolate bits 15 & 16 */
14541 eecd = ((eecd >> 15) & 0x03);
14542
14543 /* If both bits are set, device is Flash type */
14544 if (eecd == 0x03)
14545 return 0;
14546 }
14547 return 1;
14548 }
14549
14550 static int
14551 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14552 {
14553 uint32_t eec;
14554
14555 eec = CSR_READ(sc, WMREG_EEC);
14556 if ((eec & EEC_FLASH_DETECTED) != 0)
14557 return 1;
14558
14559 return 0;
14560 }
14561
14562 /*
14563 * wm_nvm_validate_checksum
14564 *
14565 * The checksum is defined as the sum of the first 64 (16 bit) words.
14566 */
14567 static int
14568 wm_nvm_validate_checksum(struct wm_softc *sc)
14569 {
14570 uint16_t checksum;
14571 uint16_t eeprom_data;
14572 #ifdef WM_DEBUG
14573 uint16_t csum_wordaddr, valid_checksum;
14574 #endif
14575 int i;
14576
14577 checksum = 0;
14578
14579 /* Don't check for I211 */
14580 if (sc->sc_type == WM_T_I211)
14581 return 0;
14582
14583 #ifdef WM_DEBUG
14584 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14585 || (sc->sc_type == WM_T_PCH_CNP)) {
14586 csum_wordaddr = NVM_OFF_COMPAT;
14587 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14588 } else {
14589 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14590 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14591 }
14592
14593 /* Dump EEPROM image for debug */
14594 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14595 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14596 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14597 /* XXX PCH_SPT? */
14598 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14599 if ((eeprom_data & valid_checksum) == 0)
14600 DPRINTF(sc, WM_DEBUG_NVM,
14601 ("%s: NVM need to be updated (%04x != %04x)\n",
14602 device_xname(sc->sc_dev), eeprom_data,
14603 valid_checksum));
14604 }
14605
14606 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14607 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14608 for (i = 0; i < NVM_SIZE; i++) {
14609 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14610 printf("XXXX ");
14611 else
14612 printf("%04hx ", eeprom_data);
14613 if (i % 8 == 7)
14614 printf("\n");
14615 }
14616 }
14617
14618 #endif /* WM_DEBUG */
14619
14620 for (i = 0; i < NVM_SIZE; i++) {
14621 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14622 return -1;
14623 checksum += eeprom_data;
14624 }
14625
14626 if (checksum != (uint16_t) NVM_CHECKSUM) {
14627 #ifdef WM_DEBUG
14628 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14629 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14630 #endif
14631 }
14632
14633 return 0;
14634 }
14635
14636 static void
14637 wm_nvm_version_invm(struct wm_softc *sc)
14638 {
14639 uint32_t dword;
14640
14641 /*
14642 * Linux's code to decode version is very strange, so we don't
14643 * obey that algorithm and just use word 61 as the document.
14644 * Perhaps it's not perfect though...
14645 *
14646 * Example:
14647 *
14648 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14649 */
14650 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14651 dword = __SHIFTOUT(dword, INVM_VER_1);
14652 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14653 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14654 }
14655
14656 static void
14657 wm_nvm_version(struct wm_softc *sc)
14658 {
14659 uint16_t major, minor, build, patch;
14660 uint16_t uid0, uid1;
14661 uint16_t nvm_data;
14662 uint16_t off;
14663 bool check_version = false;
14664 bool check_optionrom = false;
14665 bool have_build = false;
14666 bool have_uid = true;
14667
14668 /*
14669 * Version format:
14670 *
14671 * XYYZ
14672 * X0YZ
14673 * X0YY
14674 *
14675 * Example:
14676 *
14677 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
14678 * 82571 0x50a6 5.10.6?
14679 * 82572 0x506a 5.6.10?
14680 * 82572EI 0x5069 5.6.9?
14681 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
14682 * 0x2013 2.1.3?
14683 * 82583 0x10a0 1.10.0? (document says it's default value)
14684 * ICH8+82567 0x0040 0.4.0?
14685 * ICH9+82566 0x1040 1.4.0?
14686 *ICH10+82567 0x0043 0.4.3?
14687 * PCH+82577 0x00c1 0.12.1?
14688 * PCH2+82579 0x00d3 0.13.3?
14689 * 0x00d4 0.13.4?
14690 * LPT+I218 0x0023 0.2.3?
14691 * SPT+I219 0x0084 0.8.4?
14692 * CNP+I219 0x0054 0.5.4?
14693 */
14694
14695 /*
14696 * XXX
14697 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14698 * I've never seen real 82574 hardware with such small SPI ROM.
14699 */
14700 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14701 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14702 have_uid = false;
14703
14704 switch (sc->sc_type) {
14705 case WM_T_82571:
14706 case WM_T_82572:
14707 case WM_T_82574:
14708 case WM_T_82583:
14709 check_version = true;
14710 check_optionrom = true;
14711 have_build = true;
14712 break;
14713 case WM_T_ICH8:
14714 case WM_T_ICH9:
14715 case WM_T_ICH10:
14716 case WM_T_PCH:
14717 case WM_T_PCH2:
14718 case WM_T_PCH_LPT:
14719 case WM_T_PCH_SPT:
14720 case WM_T_PCH_CNP:
14721 check_version = true;
14722 have_build = true;
14723 have_uid = false;
14724 break;
14725 case WM_T_82575:
14726 case WM_T_82576:
14727 case WM_T_82580:
14728 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14729 check_version = true;
14730 break;
14731 case WM_T_I211:
14732 wm_nvm_version_invm(sc);
14733 have_uid = false;
14734 goto printver;
14735 case WM_T_I210:
14736 if (!wm_nvm_flash_presence_i210(sc)) {
14737 wm_nvm_version_invm(sc);
14738 have_uid = false;
14739 goto printver;
14740 }
14741 /* FALLTHROUGH */
14742 case WM_T_I350:
14743 case WM_T_I354:
14744 check_version = true;
14745 check_optionrom = true;
14746 break;
14747 default:
14748 return;
14749 }
14750 if (check_version
14751 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14752 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14753 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14754 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14755 build = nvm_data & NVM_BUILD_MASK;
14756 have_build = true;
14757 } else
14758 minor = nvm_data & 0x00ff;
14759
14760 /* Decimal */
14761 minor = (minor / 16) * 10 + (minor % 16);
14762 sc->sc_nvm_ver_major = major;
14763 sc->sc_nvm_ver_minor = minor;
14764
14765 printver:
14766 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14767 sc->sc_nvm_ver_minor);
14768 if (have_build) {
14769 sc->sc_nvm_ver_build = build;
14770 aprint_verbose(".%d", build);
14771 }
14772 }
14773
14774 /* Assume the Option ROM area is at avove NVM_SIZE */
14775 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14776 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14777 /* Option ROM Version */
14778 if ((off != 0x0000) && (off != 0xffff)) {
14779 int rv;
14780
14781 off += NVM_COMBO_VER_OFF;
14782 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14783 rv |= wm_nvm_read(sc, off, 1, &uid0);
14784 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14785 && (uid1 != 0) && (uid1 != 0xffff)) {
14786 /* 16bits */
14787 major = uid0 >> 8;
14788 build = (uid0 << 8) | (uid1 >> 8);
14789 patch = uid1 & 0x00ff;
14790 aprint_verbose(", option ROM Version %d.%d.%d",
14791 major, build, patch);
14792 }
14793 }
14794 }
14795
14796 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14797 aprint_verbose(", Image Unique ID %08x",
14798 ((uint32_t)uid1 << 16) | uid0);
14799 }
14800
14801 /*
14802 * wm_nvm_read:
14803 *
14804 * Read data from the serial EEPROM.
14805 */
14806 static int
14807 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14808 {
14809 int rv;
14810
14811 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14812 device_xname(sc->sc_dev), __func__));
14813
14814 if (sc->sc_flags & WM_F_EEPROM_INVALID)
14815 return -1;
14816
14817 rv = sc->nvm.read(sc, word, wordcnt, data);
14818
14819 return rv;
14820 }
14821
14822 /*
14823 * Hardware semaphores.
14824 * Very complexed...
14825 */
14826
14827 static int
14828 wm_get_null(struct wm_softc *sc)
14829 {
14830
14831 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14832 device_xname(sc->sc_dev), __func__));
14833 return 0;
14834 }
14835
14836 static void
14837 wm_put_null(struct wm_softc *sc)
14838 {
14839
14840 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14841 device_xname(sc->sc_dev), __func__));
14842 return;
14843 }
14844
14845 static int
14846 wm_get_eecd(struct wm_softc *sc)
14847 {
14848 uint32_t reg;
14849 int x;
14850
14851 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14852 device_xname(sc->sc_dev), __func__));
14853
14854 reg = CSR_READ(sc, WMREG_EECD);
14855
14856 /* Request EEPROM access. */
14857 reg |= EECD_EE_REQ;
14858 CSR_WRITE(sc, WMREG_EECD, reg);
14859
14860 /* ..and wait for it to be granted. */
14861 for (x = 0; x < 1000; x++) {
14862 reg = CSR_READ(sc, WMREG_EECD);
14863 if (reg & EECD_EE_GNT)
14864 break;
14865 delay(5);
14866 }
14867 if ((reg & EECD_EE_GNT) == 0) {
14868 aprint_error_dev(sc->sc_dev,
14869 "could not acquire EEPROM GNT\n");
14870 reg &= ~EECD_EE_REQ;
14871 CSR_WRITE(sc, WMREG_EECD, reg);
14872 return -1;
14873 }
14874
14875 return 0;
14876 }
14877
14878 static void
14879 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14880 {
14881
14882 *eecd |= EECD_SK;
14883 CSR_WRITE(sc, WMREG_EECD, *eecd);
14884 CSR_WRITE_FLUSH(sc);
14885 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14886 delay(1);
14887 else
14888 delay(50);
14889 }
14890
14891 static void
14892 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14893 {
14894
14895 *eecd &= ~EECD_SK;
14896 CSR_WRITE(sc, WMREG_EECD, *eecd);
14897 CSR_WRITE_FLUSH(sc);
14898 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14899 delay(1);
14900 else
14901 delay(50);
14902 }
14903
14904 static void
14905 wm_put_eecd(struct wm_softc *sc)
14906 {
14907 uint32_t reg;
14908
14909 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14910 device_xname(sc->sc_dev), __func__));
14911
14912 /* Stop nvm */
14913 reg = CSR_READ(sc, WMREG_EECD);
14914 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14915 /* Pull CS high */
14916 reg |= EECD_CS;
14917 wm_nvm_eec_clock_lower(sc, ®);
14918 } else {
14919 /* CS on Microwire is active-high */
14920 reg &= ~(EECD_CS | EECD_DI);
14921 CSR_WRITE(sc, WMREG_EECD, reg);
14922 wm_nvm_eec_clock_raise(sc, ®);
14923 wm_nvm_eec_clock_lower(sc, ®);
14924 }
14925
14926 reg = CSR_READ(sc, WMREG_EECD);
14927 reg &= ~EECD_EE_REQ;
14928 CSR_WRITE(sc, WMREG_EECD, reg);
14929
14930 return;
14931 }
14932
14933 /*
14934 * Get hardware semaphore.
14935 * Same as e1000_get_hw_semaphore_generic()
14936 */
14937 static int
14938 wm_get_swsm_semaphore(struct wm_softc *sc)
14939 {
14940 int32_t timeout;
14941 uint32_t swsm;
14942
14943 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14944 device_xname(sc->sc_dev), __func__));
14945 KASSERT(sc->sc_nvm_wordsize > 0);
14946
14947 retry:
14948 /* Get the SW semaphore. */
14949 timeout = sc->sc_nvm_wordsize + 1;
14950 while (timeout) {
14951 swsm = CSR_READ(sc, WMREG_SWSM);
14952
14953 if ((swsm & SWSM_SMBI) == 0)
14954 break;
14955
14956 delay(50);
14957 timeout--;
14958 }
14959
14960 if (timeout == 0) {
14961 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14962 /*
14963 * In rare circumstances, the SW semaphore may already
14964 * be held unintentionally. Clear the semaphore once
14965 * before giving up.
14966 */
14967 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14968 wm_put_swsm_semaphore(sc);
14969 goto retry;
14970 }
14971 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
14972 return -1;
14973 }
14974
14975 /* Get the FW semaphore. */
14976 timeout = sc->sc_nvm_wordsize + 1;
14977 while (timeout) {
14978 swsm = CSR_READ(sc, WMREG_SWSM);
14979 swsm |= SWSM_SWESMBI;
14980 CSR_WRITE(sc, WMREG_SWSM, swsm);
14981 /* If we managed to set the bit we got the semaphore. */
14982 swsm = CSR_READ(sc, WMREG_SWSM);
14983 if (swsm & SWSM_SWESMBI)
14984 break;
14985
14986 delay(50);
14987 timeout--;
14988 }
14989
14990 if (timeout == 0) {
14991 aprint_error_dev(sc->sc_dev,
14992 "could not acquire SWSM SWESMBI\n");
14993 /* Release semaphores */
14994 wm_put_swsm_semaphore(sc);
14995 return -1;
14996 }
14997 return 0;
14998 }
14999
15000 /*
15001 * Put hardware semaphore.
15002 * Same as e1000_put_hw_semaphore_generic()
15003 */
15004 static void
15005 wm_put_swsm_semaphore(struct wm_softc *sc)
15006 {
15007 uint32_t swsm;
15008
15009 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15010 device_xname(sc->sc_dev), __func__));
15011
15012 swsm = CSR_READ(sc, WMREG_SWSM);
15013 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15014 CSR_WRITE(sc, WMREG_SWSM, swsm);
15015 }
15016
15017 /*
15018 * Get SW/FW semaphore.
15019 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15020 */
15021 static int
15022 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15023 {
15024 uint32_t swfw_sync;
15025 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15026 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15027 int timeout;
15028
15029 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15030 device_xname(sc->sc_dev), __func__));
15031
15032 if (sc->sc_type == WM_T_80003)
15033 timeout = 50;
15034 else
15035 timeout = 200;
15036
15037 while (timeout) {
15038 if (wm_get_swsm_semaphore(sc)) {
15039 aprint_error_dev(sc->sc_dev,
15040 "%s: failed to get semaphore\n",
15041 __func__);
15042 return -1;
15043 }
15044 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15045 if ((swfw_sync & (swmask | fwmask)) == 0) {
15046 swfw_sync |= swmask;
15047 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15048 wm_put_swsm_semaphore(sc);
15049 return 0;
15050 }
15051 wm_put_swsm_semaphore(sc);
15052 delay(5000);
15053 timeout--;
15054 }
15055 device_printf(sc->sc_dev,
15056 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15057 mask, swfw_sync);
15058 return -1;
15059 }
15060
15061 static void
15062 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15063 {
15064 uint32_t swfw_sync;
15065
15066 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15067 device_xname(sc->sc_dev), __func__));
15068
15069 while (wm_get_swsm_semaphore(sc) != 0)
15070 continue;
15071
15072 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15073 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15074 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15075
15076 wm_put_swsm_semaphore(sc);
15077 }
15078
15079 static int
15080 wm_get_nvm_80003(struct wm_softc *sc)
15081 {
15082 int rv;
15083
15084 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15085 device_xname(sc->sc_dev), __func__));
15086
15087 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15088 aprint_error_dev(sc->sc_dev,
15089 "%s: failed to get semaphore(SWFW)\n", __func__);
15090 return rv;
15091 }
15092
15093 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15094 && (rv = wm_get_eecd(sc)) != 0) {
15095 aprint_error_dev(sc->sc_dev,
15096 "%s: failed to get semaphore(EECD)\n", __func__);
15097 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15098 return rv;
15099 }
15100
15101 return 0;
15102 }
15103
15104 static void
15105 wm_put_nvm_80003(struct wm_softc *sc)
15106 {
15107
15108 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15109 device_xname(sc->sc_dev), __func__));
15110
15111 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15112 wm_put_eecd(sc);
15113 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15114 }
15115
15116 static int
15117 wm_get_nvm_82571(struct wm_softc *sc)
15118 {
15119 int rv;
15120
15121 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15122 device_xname(sc->sc_dev), __func__));
15123
15124 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15125 return rv;
15126
15127 switch (sc->sc_type) {
15128 case WM_T_82573:
15129 break;
15130 default:
15131 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15132 rv = wm_get_eecd(sc);
15133 break;
15134 }
15135
15136 if (rv != 0) {
15137 aprint_error_dev(sc->sc_dev,
15138 "%s: failed to get semaphore\n",
15139 __func__);
15140 wm_put_swsm_semaphore(sc);
15141 }
15142
15143 return rv;
15144 }
15145
15146 static void
15147 wm_put_nvm_82571(struct wm_softc *sc)
15148 {
15149
15150 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15151 device_xname(sc->sc_dev), __func__));
15152
15153 switch (sc->sc_type) {
15154 case WM_T_82573:
15155 break;
15156 default:
15157 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15158 wm_put_eecd(sc);
15159 break;
15160 }
15161
15162 wm_put_swsm_semaphore(sc);
15163 }
15164
15165 static int
15166 wm_get_phy_82575(struct wm_softc *sc)
15167 {
15168
15169 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15170 device_xname(sc->sc_dev), __func__));
15171 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15172 }
15173
15174 static void
15175 wm_put_phy_82575(struct wm_softc *sc)
15176 {
15177
15178 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15179 device_xname(sc->sc_dev), __func__));
15180 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15181 }
15182
15183 static int
15184 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15185 {
15186 uint32_t ext_ctrl;
15187 int timeout = 200;
15188
15189 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15190 device_xname(sc->sc_dev), __func__));
15191
15192 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15193 for (timeout = 0; timeout < 200; timeout++) {
15194 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15195 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15196 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15197
15198 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15199 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15200 return 0;
15201 delay(5000);
15202 }
15203 device_printf(sc->sc_dev,
15204 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15205 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15206 return -1;
15207 }
15208
15209 static void
15210 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15211 {
15212 uint32_t ext_ctrl;
15213
15214 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15215 device_xname(sc->sc_dev), __func__));
15216
15217 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15218 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15219 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15220
15221 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15222 }
15223
15224 static int
15225 wm_get_swflag_ich8lan(struct wm_softc *sc)
15226 {
15227 uint32_t ext_ctrl;
15228 int timeout;
15229
15230 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15231 device_xname(sc->sc_dev), __func__));
15232 mutex_enter(sc->sc_ich_phymtx);
15233 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15234 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15235 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15236 break;
15237 delay(1000);
15238 }
15239 if (timeout >= WM_PHY_CFG_TIMEOUT) {
15240 device_printf(sc->sc_dev,
15241 "SW has already locked the resource\n");
15242 goto out;
15243 }
15244
15245 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15246 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15247 for (timeout = 0; timeout < 1000; timeout++) {
15248 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15249 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15250 break;
15251 delay(1000);
15252 }
15253 if (timeout >= 1000) {
15254 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15255 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15256 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15257 goto out;
15258 }
15259 return 0;
15260
15261 out:
15262 mutex_exit(sc->sc_ich_phymtx);
15263 return -1;
15264 }
15265
15266 static void
15267 wm_put_swflag_ich8lan(struct wm_softc *sc)
15268 {
15269 uint32_t ext_ctrl;
15270
15271 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15272 device_xname(sc->sc_dev), __func__));
15273 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15274 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15275 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15276 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15277 } else
15278 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15279
15280 mutex_exit(sc->sc_ich_phymtx);
15281 }
15282
15283 static int
15284 wm_get_nvm_ich8lan(struct wm_softc *sc)
15285 {
15286
15287 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15288 device_xname(sc->sc_dev), __func__));
15289 mutex_enter(sc->sc_ich_nvmmtx);
15290
15291 return 0;
15292 }
15293
15294 static void
15295 wm_put_nvm_ich8lan(struct wm_softc *sc)
15296 {
15297
15298 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15299 device_xname(sc->sc_dev), __func__));
15300 mutex_exit(sc->sc_ich_nvmmtx);
15301 }
15302
15303 static int
15304 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15305 {
15306 int i = 0;
15307 uint32_t reg;
15308
15309 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15310 device_xname(sc->sc_dev), __func__));
15311
15312 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15313 do {
15314 CSR_WRITE(sc, WMREG_EXTCNFCTR,
15315 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15316 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15317 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15318 break;
15319 delay(2*1000);
15320 i++;
15321 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15322
15323 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15324 wm_put_hw_semaphore_82573(sc);
15325 log(LOG_ERR, "%s: Driver can't access the PHY\n",
15326 device_xname(sc->sc_dev));
15327 return -1;
15328 }
15329
15330 return 0;
15331 }
15332
15333 static void
15334 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15335 {
15336 uint32_t reg;
15337
15338 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15339 device_xname(sc->sc_dev), __func__));
15340
15341 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15342 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15343 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15344 }
15345
15346 /*
15347 * Management mode and power management related subroutines.
15348 * BMC, AMT, suspend/resume and EEE.
15349 */
15350
15351 #ifdef WM_WOL
15352 static int
15353 wm_check_mng_mode(struct wm_softc *sc)
15354 {
15355 int rv;
15356
15357 switch (sc->sc_type) {
15358 case WM_T_ICH8:
15359 case WM_T_ICH9:
15360 case WM_T_ICH10:
15361 case WM_T_PCH:
15362 case WM_T_PCH2:
15363 case WM_T_PCH_LPT:
15364 case WM_T_PCH_SPT:
15365 case WM_T_PCH_CNP:
15366 rv = wm_check_mng_mode_ich8lan(sc);
15367 break;
15368 case WM_T_82574:
15369 case WM_T_82583:
15370 rv = wm_check_mng_mode_82574(sc);
15371 break;
15372 case WM_T_82571:
15373 case WM_T_82572:
15374 case WM_T_82573:
15375 case WM_T_80003:
15376 rv = wm_check_mng_mode_generic(sc);
15377 break;
15378 default:
15379 /* Noting to do */
15380 rv = 0;
15381 break;
15382 }
15383
15384 return rv;
15385 }
15386
15387 static int
15388 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15389 {
15390 uint32_t fwsm;
15391
15392 fwsm = CSR_READ(sc, WMREG_FWSM);
15393
15394 if (((fwsm & FWSM_FW_VALID) != 0)
15395 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15396 return 1;
15397
15398 return 0;
15399 }
15400
15401 static int
15402 wm_check_mng_mode_82574(struct wm_softc *sc)
15403 {
15404 uint16_t data;
15405
15406 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15407
15408 if ((data & NVM_CFG2_MNGM_MASK) != 0)
15409 return 1;
15410
15411 return 0;
15412 }
15413
15414 static int
15415 wm_check_mng_mode_generic(struct wm_softc *sc)
15416 {
15417 uint32_t fwsm;
15418
15419 fwsm = CSR_READ(sc, WMREG_FWSM);
15420
15421 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15422 return 1;
15423
15424 return 0;
15425 }
15426 #endif /* WM_WOL */
15427
15428 static int
15429 wm_enable_mng_pass_thru(struct wm_softc *sc)
15430 {
15431 uint32_t manc, fwsm, factps;
15432
15433 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15434 return 0;
15435
15436 manc = CSR_READ(sc, WMREG_MANC);
15437
15438 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15439 device_xname(sc->sc_dev), manc));
15440 if ((manc & MANC_RECV_TCO_EN) == 0)
15441 return 0;
15442
15443 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15444 fwsm = CSR_READ(sc, WMREG_FWSM);
15445 factps = CSR_READ(sc, WMREG_FACTPS);
15446 if (((factps & FACTPS_MNGCG) == 0)
15447 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15448 return 1;
15449 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15450 uint16_t data;
15451
15452 factps = CSR_READ(sc, WMREG_FACTPS);
15453 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15454 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15455 device_xname(sc->sc_dev), factps, data));
15456 if (((factps & FACTPS_MNGCG) == 0)
15457 && ((data & NVM_CFG2_MNGM_MASK)
15458 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15459 return 1;
15460 } else if (((manc & MANC_SMBUS_EN) != 0)
15461 && ((manc & MANC_ASF_EN) == 0))
15462 return 1;
15463
15464 return 0;
15465 }
15466
15467 static bool
15468 wm_phy_resetisblocked(struct wm_softc *sc)
15469 {
15470 bool blocked = false;
15471 uint32_t reg;
15472 int i = 0;
15473
15474 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15475 device_xname(sc->sc_dev), __func__));
15476
15477 switch (sc->sc_type) {
15478 case WM_T_ICH8:
15479 case WM_T_ICH9:
15480 case WM_T_ICH10:
15481 case WM_T_PCH:
15482 case WM_T_PCH2:
15483 case WM_T_PCH_LPT:
15484 case WM_T_PCH_SPT:
15485 case WM_T_PCH_CNP:
15486 do {
15487 reg = CSR_READ(sc, WMREG_FWSM);
15488 if ((reg & FWSM_RSPCIPHY) == 0) {
15489 blocked = true;
15490 delay(10*1000);
15491 continue;
15492 }
15493 blocked = false;
15494 } while (blocked && (i++ < 30));
15495 return blocked;
15496 break;
15497 case WM_T_82571:
15498 case WM_T_82572:
15499 case WM_T_82573:
15500 case WM_T_82574:
15501 case WM_T_82583:
15502 case WM_T_80003:
15503 reg = CSR_READ(sc, WMREG_MANC);
15504 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15505 return true;
15506 else
15507 return false;
15508 break;
15509 default:
15510 /* No problem */
15511 break;
15512 }
15513
15514 return false;
15515 }
15516
15517 static void
15518 wm_get_hw_control(struct wm_softc *sc)
15519 {
15520 uint32_t reg;
15521
15522 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15523 device_xname(sc->sc_dev), __func__));
15524
15525 if (sc->sc_type == WM_T_82573) {
15526 reg = CSR_READ(sc, WMREG_SWSM);
15527 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15528 } else if (sc->sc_type >= WM_T_82571) {
15529 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15530 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15531 }
15532 }
15533
15534 static void
15535 wm_release_hw_control(struct wm_softc *sc)
15536 {
15537 uint32_t reg;
15538
15539 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15540 device_xname(sc->sc_dev), __func__));
15541
15542 if (sc->sc_type == WM_T_82573) {
15543 reg = CSR_READ(sc, WMREG_SWSM);
15544 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15545 } else if (sc->sc_type >= WM_T_82571) {
15546 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15547 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15548 }
15549 }
15550
15551 static void
15552 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15553 {
15554 uint32_t reg;
15555
15556 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15557 device_xname(sc->sc_dev), __func__));
15558
15559 if (sc->sc_type < WM_T_PCH2)
15560 return;
15561
15562 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15563
15564 if (gate)
15565 reg |= EXTCNFCTR_GATE_PHY_CFG;
15566 else
15567 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15568
15569 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15570 }
15571
15572 static int
15573 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15574 {
15575 uint32_t fwsm, reg;
15576 int rv;
15577
15578 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15579 device_xname(sc->sc_dev), __func__));
15580
15581 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
15582 wm_gate_hw_phy_config_ich8lan(sc, true);
15583
15584 /* Disable ULP */
15585 wm_ulp_disable(sc);
15586
15587 /* Acquire PHY semaphore */
15588 rv = sc->phy.acquire(sc);
15589 if (rv != 0) {
15590 DPRINTF(sc, WM_DEBUG_INIT,
15591 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15592 return rv;
15593 }
15594
15595 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
15596 * inaccessible and resetting the PHY is not blocked, toggle the
15597 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15598 */
15599 fwsm = CSR_READ(sc, WMREG_FWSM);
15600 switch (sc->sc_type) {
15601 case WM_T_PCH_LPT:
15602 case WM_T_PCH_SPT:
15603 case WM_T_PCH_CNP:
15604 if (wm_phy_is_accessible_pchlan(sc))
15605 break;
15606
15607 /* Before toggling LANPHYPC, see if PHY is accessible by
15608 * forcing MAC to SMBus mode first.
15609 */
15610 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15611 reg |= CTRL_EXT_FORCE_SMBUS;
15612 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15613 #if 0
15614 /* XXX Isn't this required??? */
15615 CSR_WRITE_FLUSH(sc);
15616 #endif
15617 /* Wait 50 milliseconds for MAC to finish any retries
15618 * that it might be trying to perform from previous
15619 * attempts to acknowledge any phy read requests.
15620 */
15621 delay(50 * 1000);
15622 /* FALLTHROUGH */
15623 case WM_T_PCH2:
15624 if (wm_phy_is_accessible_pchlan(sc) == true)
15625 break;
15626 /* FALLTHROUGH */
15627 case WM_T_PCH:
15628 if (sc->sc_type == WM_T_PCH)
15629 if ((fwsm & FWSM_FW_VALID) != 0)
15630 break;
15631
15632 if (wm_phy_resetisblocked(sc) == true) {
15633 device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
15634 break;
15635 }
15636
15637 /* Toggle LANPHYPC Value bit */
15638 wm_toggle_lanphypc_pch_lpt(sc);
15639
15640 if (sc->sc_type >= WM_T_PCH_LPT) {
15641 if (wm_phy_is_accessible_pchlan(sc) == true)
15642 break;
15643
15644 /* Toggling LANPHYPC brings the PHY out of SMBus mode
15645 * so ensure that the MAC is also out of SMBus mode
15646 */
15647 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15648 reg &= ~CTRL_EXT_FORCE_SMBUS;
15649 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15650
15651 if (wm_phy_is_accessible_pchlan(sc) == true)
15652 break;
15653 rv = -1;
15654 }
15655 break;
15656 default:
15657 break;
15658 }
15659
15660 /* Release semaphore */
15661 sc->phy.release(sc);
15662
15663 if (rv == 0) {
15664 /* Check to see if able to reset PHY. Print error if not */
15665 if (wm_phy_resetisblocked(sc)) {
15666 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15667 goto out;
15668 }
15669
15670 /* Reset the PHY before any access to it. Doing so, ensures
15671 * that the PHY is in a known good state before we read/write
15672 * PHY registers. The generic reset is sufficient here,
15673 * because we haven't determined the PHY type yet.
15674 */
15675 if (wm_reset_phy(sc) != 0)
15676 goto out;
15677
15678 /* On a successful reset, possibly need to wait for the PHY
15679 * to quiesce to an accessible state before returning control
15680 * to the calling function. If the PHY does not quiesce, then
15681 * return E1000E_BLK_PHY_RESET, as this is the condition that
15682 * the PHY is in.
15683 */
15684 if (wm_phy_resetisblocked(sc))
15685 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15686 }
15687
15688 out:
15689 /* Ungate automatic PHY configuration on non-managed 82579 */
15690 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15691 delay(10*1000);
15692 wm_gate_hw_phy_config_ich8lan(sc, false);
15693 }
15694
15695 return 0;
15696 }
15697
15698 static void
15699 wm_init_manageability(struct wm_softc *sc)
15700 {
15701
15702 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15703 device_xname(sc->sc_dev), __func__));
15704 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
15705
15706 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15707 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15708 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15709
15710 /* Disable hardware interception of ARP */
15711 manc &= ~MANC_ARP_EN;
15712
15713 /* Enable receiving management packets to the host */
15714 if (sc->sc_type >= WM_T_82571) {
15715 manc |= MANC_EN_MNG2HOST;
15716 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15717 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15718 }
15719
15720 CSR_WRITE(sc, WMREG_MANC, manc);
15721 }
15722 }
15723
15724 static void
15725 wm_release_manageability(struct wm_softc *sc)
15726 {
15727
15728 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15729 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15730
15731 manc |= MANC_ARP_EN;
15732 if (sc->sc_type >= WM_T_82571)
15733 manc &= ~MANC_EN_MNG2HOST;
15734
15735 CSR_WRITE(sc, WMREG_MANC, manc);
15736 }
15737 }
15738
15739 static void
15740 wm_get_wakeup(struct wm_softc *sc)
15741 {
15742
15743 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15744 switch (sc->sc_type) {
15745 case WM_T_82573:
15746 case WM_T_82583:
15747 sc->sc_flags |= WM_F_HAS_AMT;
15748 /* FALLTHROUGH */
15749 case WM_T_80003:
15750 case WM_T_82575:
15751 case WM_T_82576:
15752 case WM_T_82580:
15753 case WM_T_I350:
15754 case WM_T_I354:
15755 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15756 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15757 /* FALLTHROUGH */
15758 case WM_T_82541:
15759 case WM_T_82541_2:
15760 case WM_T_82547:
15761 case WM_T_82547_2:
15762 case WM_T_82571:
15763 case WM_T_82572:
15764 case WM_T_82574:
15765 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15766 break;
15767 case WM_T_ICH8:
15768 case WM_T_ICH9:
15769 case WM_T_ICH10:
15770 case WM_T_PCH:
15771 case WM_T_PCH2:
15772 case WM_T_PCH_LPT:
15773 case WM_T_PCH_SPT:
15774 case WM_T_PCH_CNP:
15775 sc->sc_flags |= WM_F_HAS_AMT;
15776 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15777 break;
15778 default:
15779 break;
15780 }
15781
15782 /* 1: HAS_MANAGE */
15783 if (wm_enable_mng_pass_thru(sc) != 0)
15784 sc->sc_flags |= WM_F_HAS_MANAGE;
15785
15786 /*
15787 * Note that the WOL flags is set after the resetting of the eeprom
15788 * stuff
15789 */
15790 }
15791
15792 /*
15793 * Unconfigure Ultra Low Power mode.
15794 * Only for I217 and newer (see below).
15795 */
15796 static int
15797 wm_ulp_disable(struct wm_softc *sc)
15798 {
15799 uint32_t reg;
15800 uint16_t phyreg;
15801 int i = 0, rv;
15802
15803 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15804 device_xname(sc->sc_dev), __func__));
15805 /* Exclude old devices */
15806 if ((sc->sc_type < WM_T_PCH_LPT)
15807 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15808 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15809 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15810 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15811 return 0;
15812
15813 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15814 /* Request ME un-configure ULP mode in the PHY */
15815 reg = CSR_READ(sc, WMREG_H2ME);
15816 reg &= ~H2ME_ULP;
15817 reg |= H2ME_ENFORCE_SETTINGS;
15818 CSR_WRITE(sc, WMREG_H2ME, reg);
15819
15820 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15821 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15822 if (i++ == 30) {
15823 device_printf(sc->sc_dev, "%s timed out\n",
15824 __func__);
15825 return -1;
15826 }
15827 delay(10 * 1000);
15828 }
15829 reg = CSR_READ(sc, WMREG_H2ME);
15830 reg &= ~H2ME_ENFORCE_SETTINGS;
15831 CSR_WRITE(sc, WMREG_H2ME, reg);
15832
15833 return 0;
15834 }
15835
15836 /* Acquire semaphore */
15837 rv = sc->phy.acquire(sc);
15838 if (rv != 0) {
15839 DPRINTF(sc, WM_DEBUG_INIT,
15840 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15841 return rv;
15842 }
15843
15844 /* Toggle LANPHYPC */
15845 wm_toggle_lanphypc_pch_lpt(sc);
15846
15847 /* Unforce SMBus mode in PHY */
15848 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15849 if (rv != 0) {
15850 uint32_t reg2;
15851
15852 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15853 __func__);
15854 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15855 reg2 |= CTRL_EXT_FORCE_SMBUS;
15856 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15857 delay(50 * 1000);
15858
15859 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15860 &phyreg);
15861 if (rv != 0)
15862 goto release;
15863 }
15864 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15865 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15866
15867 /* Unforce SMBus mode in MAC */
15868 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15869 reg &= ~CTRL_EXT_FORCE_SMBUS;
15870 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15871
15872 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15873 if (rv != 0)
15874 goto release;
15875 phyreg |= HV_PM_CTRL_K1_ENA;
15876 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15877
15878 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15879 &phyreg);
15880 if (rv != 0)
15881 goto release;
15882 phyreg &= ~(I218_ULP_CONFIG1_IND
15883 | I218_ULP_CONFIG1_STICKY_ULP
15884 | I218_ULP_CONFIG1_RESET_TO_SMBUS
15885 | I218_ULP_CONFIG1_WOL_HOST
15886 | I218_ULP_CONFIG1_INBAND_EXIT
15887 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15888 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15889 | I218_ULP_CONFIG1_DIS_SMB_PERST);
15890 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15891 phyreg |= I218_ULP_CONFIG1_START;
15892 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15893
15894 reg = CSR_READ(sc, WMREG_FEXTNVM7);
15895 reg &= ~FEXTNVM7_DIS_SMB_PERST;
15896 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15897
15898 release:
15899 /* Release semaphore */
15900 sc->phy.release(sc);
15901 wm_gmii_reset(sc);
15902 delay(50 * 1000);
15903
15904 return rv;
15905 }
15906
15907 /* WOL in the newer chipset interfaces (pchlan) */
15908 static int
15909 wm_enable_phy_wakeup(struct wm_softc *sc)
15910 {
15911 device_t dev = sc->sc_dev;
15912 uint32_t mreg, moff;
15913 uint16_t wuce, wuc, wufc, preg;
15914 int i, rv;
15915
15916 KASSERT(sc->sc_type >= WM_T_PCH);
15917
15918 /* Copy MAC RARs to PHY RARs */
15919 wm_copy_rx_addrs_to_phy_ich8lan(sc);
15920
15921 /* Activate PHY wakeup */
15922 rv = sc->phy.acquire(sc);
15923 if (rv != 0) {
15924 device_printf(dev, "%s: failed to acquire semaphore\n",
15925 __func__);
15926 return rv;
15927 }
15928
15929 /*
15930 * Enable access to PHY wakeup registers.
15931 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15932 */
15933 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15934 if (rv != 0) {
15935 device_printf(dev,
15936 "%s: Could not enable PHY wakeup reg access\n", __func__);
15937 goto release;
15938 }
15939
15940 /* Copy MAC MTA to PHY MTA */
15941 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15942 uint16_t lo, hi;
15943
15944 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15945 lo = (uint16_t)(mreg & 0xffff);
15946 hi = (uint16_t)((mreg >> 16) & 0xffff);
15947 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15948 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15949 }
15950
15951 /* Configure PHY Rx Control register */
15952 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15953 mreg = CSR_READ(sc, WMREG_RCTL);
15954 if (mreg & RCTL_UPE)
15955 preg |= BM_RCTL_UPE;
15956 if (mreg & RCTL_MPE)
15957 preg |= BM_RCTL_MPE;
15958 preg &= ~(BM_RCTL_MO_MASK);
15959 moff = __SHIFTOUT(mreg, RCTL_MO);
15960 if (moff != 0)
15961 preg |= moff << BM_RCTL_MO_SHIFT;
15962 if (mreg & RCTL_BAM)
15963 preg |= BM_RCTL_BAM;
15964 if (mreg & RCTL_PMCF)
15965 preg |= BM_RCTL_PMCF;
15966 mreg = CSR_READ(sc, WMREG_CTRL);
15967 if (mreg & CTRL_RFCE)
15968 preg |= BM_RCTL_RFCE;
15969 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15970
15971 wuc = WUC_APME | WUC_PME_EN;
15972 wufc = WUFC_MAG;
15973 /* Enable PHY wakeup in MAC register */
15974 CSR_WRITE(sc, WMREG_WUC,
15975 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15976 CSR_WRITE(sc, WMREG_WUFC, wufc);
15977
15978 /* Configure and enable PHY wakeup in PHY registers */
15979 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15980 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15981
15982 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15983 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15984
15985 release:
15986 sc->phy.release(sc);
15987
15988 return 0;
15989 }
15990
15991 /* Power down workaround on D3 */
15992 static void
15993 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15994 {
15995 uint32_t reg;
15996 uint16_t phyreg;
15997 int i;
15998
15999 for (i = 0; i < 2; i++) {
16000 /* Disable link */
16001 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16002 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16003 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16004
16005 /*
16006 * Call gig speed drop workaround on Gig disable before
16007 * accessing any PHY registers
16008 */
16009 if (sc->sc_type == WM_T_ICH8)
16010 wm_gig_downshift_workaround_ich8lan(sc);
16011
16012 /* Write VR power-down enable */
16013 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16014 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16015 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
16016 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
16017
16018 /* Read it back and test */
16019 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16020 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16021 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16022 break;
16023
16024 /* Issue PHY reset and repeat at most one more time */
16025 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16026 }
16027 }
16028
16029 /*
16030 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16031 * @sc: pointer to the HW structure
16032 *
16033 * During S0 to Sx transition, it is possible the link remains at gig
16034 * instead of negotiating to a lower speed. Before going to Sx, set
16035 * 'Gig Disable' to force link speed negotiation to a lower speed based on
16036 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
16037 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16038 * needs to be written.
16039 * Parts that support (and are linked to a partner which support) EEE in
16040 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16041 * than 10Mbps w/o EEE.
16042 */
16043 static void
16044 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16045 {
16046 device_t dev = sc->sc_dev;
16047 struct ethercom *ec = &sc->sc_ethercom;
16048 uint32_t phy_ctrl;
16049 int rv;
16050
16051 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16052 phy_ctrl |= PHY_CTRL_GBE_DIS;
16053
16054 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
16055
16056 if (sc->sc_phytype == WMPHY_I217) {
16057 uint16_t devid = sc->sc_pcidevid;
16058
16059 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16060 (devid == PCI_PRODUCT_INTEL_I218_V) ||
16061 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16062 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16063 (sc->sc_type >= WM_T_PCH_SPT))
16064 CSR_WRITE(sc, WMREG_FEXTNVM6,
16065 CSR_READ(sc, WMREG_FEXTNVM6)
16066 & ~FEXTNVM6_REQ_PLL_CLK);
16067
16068 if (sc->phy.acquire(sc) != 0)
16069 goto out;
16070
16071 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16072 uint16_t eee_advert;
16073
16074 rv = wm_read_emi_reg_locked(dev,
16075 I217_EEE_ADVERTISEMENT, &eee_advert);
16076 if (rv)
16077 goto release;
16078
16079 /*
16080 * Disable LPLU if both link partners support 100BaseT
16081 * EEE and 100Full is advertised on both ends of the
16082 * link, and enable Auto Enable LPI since there will
16083 * be no driver to enable LPI while in Sx.
16084 */
16085 if ((eee_advert & AN_EEEADVERT_100_TX) &&
16086 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16087 uint16_t anar, phy_reg;
16088
16089 sc->phy.readreg_locked(dev, 2, MII_ANAR,
16090 &anar);
16091 if (anar & ANAR_TX_FD) {
16092 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16093 PHY_CTRL_NOND0A_LPLU);
16094
16095 /* Set Auto Enable LPI after link up */
16096 sc->phy.readreg_locked(dev, 2,
16097 I217_LPI_GPIO_CTRL, &phy_reg);
16098 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16099 sc->phy.writereg_locked(dev, 2,
16100 I217_LPI_GPIO_CTRL, phy_reg);
16101 }
16102 }
16103 }
16104
16105 /*
16106 * For i217 Intel Rapid Start Technology support,
16107 * when the system is going into Sx and no manageability engine
16108 * is present, the driver must configure proxy to reset only on
16109 * power good. LPI (Low Power Idle) state must also reset only
16110 * on power good, as well as the MTA (Multicast table array).
16111 * The SMBus release must also be disabled on LCD reset.
16112 */
16113
16114 /*
16115 * Enable MTA to reset for Intel Rapid Start Technology
16116 * Support
16117 */
16118
16119 release:
16120 sc->phy.release(sc);
16121 }
16122 out:
16123 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16124
16125 if (sc->sc_type == WM_T_ICH8)
16126 wm_gig_downshift_workaround_ich8lan(sc);
16127
16128 if (sc->sc_type >= WM_T_PCH) {
16129 wm_oem_bits_config_ich8lan(sc, false);
16130
16131 /* Reset PHY to activate OEM bits on 82577/8 */
16132 if (sc->sc_type == WM_T_PCH)
16133 wm_reset_phy(sc);
16134
16135 if (sc->phy.acquire(sc) != 0)
16136 return;
16137 wm_write_smbus_addr(sc);
16138 sc->phy.release(sc);
16139 }
16140 }
16141
16142 /*
16143 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16144 * @sc: pointer to the HW structure
16145 *
16146 * During Sx to S0 transitions on non-managed devices or managed devices
16147 * on which PHY resets are not blocked, if the PHY registers cannot be
16148 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
16149 * the PHY.
16150 * On i217, setup Intel Rapid Start Technology.
16151 */
16152 static int
16153 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16154 {
16155 device_t dev = sc->sc_dev;
16156 int rv;
16157
16158 if (sc->sc_type < WM_T_PCH2)
16159 return 0;
16160
16161 rv = wm_init_phy_workarounds_pchlan(sc);
16162 if (rv != 0)
16163 return rv;
16164
16165 /* For i217 Intel Rapid Start Technology support when the system
16166 * is transitioning from Sx and no manageability engine is present
16167 * configure SMBus to restore on reset, disable proxy, and enable
16168 * the reset on MTA (Multicast table array).
16169 */
16170 if (sc->sc_phytype == WMPHY_I217) {
16171 uint16_t phy_reg;
16172
16173 rv = sc->phy.acquire(sc);
16174 if (rv != 0)
16175 return rv;
16176
16177 /* Clear Auto Enable LPI after link up */
16178 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16179 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16180 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16181
16182 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16183 /* Restore clear on SMB if no manageability engine
16184 * is present
16185 */
16186 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16187 &phy_reg);
16188 if (rv != 0)
16189 goto release;
16190 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16191 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16192
16193 /* Disable Proxy */
16194 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16195 }
16196 /* Enable reset on MTA */
16197 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16198 if (rv != 0)
16199 goto release;
16200 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16201 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16202
16203 release:
16204 sc->phy.release(sc);
16205 return rv;
16206 }
16207
16208 return 0;
16209 }
16210
16211 static void
16212 wm_enable_wakeup(struct wm_softc *sc)
16213 {
16214 uint32_t reg, pmreg;
16215 pcireg_t pmode;
16216 int rv = 0;
16217
16218 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16219 device_xname(sc->sc_dev), __func__));
16220
16221 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16222 &pmreg, NULL) == 0)
16223 return;
16224
16225 if ((sc->sc_flags & WM_F_WOL) == 0)
16226 goto pme;
16227
16228 /* Advertise the wakeup capability */
16229 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16230 | CTRL_SWDPIN(3));
16231
16232 /* Keep the laser running on fiber adapters */
16233 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16234 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16235 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16236 reg |= CTRL_EXT_SWDPIN(3);
16237 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16238 }
16239
16240 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16241 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16242 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16243 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
16244 wm_suspend_workarounds_ich8lan(sc);
16245
16246 #if 0 /* For the multicast packet */
16247 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16248 reg |= WUFC_MC;
16249 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16250 #endif
16251
16252 if (sc->sc_type >= WM_T_PCH) {
16253 rv = wm_enable_phy_wakeup(sc);
16254 if (rv != 0)
16255 goto pme;
16256 } else {
16257 /* Enable wakeup by the MAC */
16258 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16259 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16260 }
16261
16262 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16263 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16264 || (sc->sc_type == WM_T_PCH2))
16265 && (sc->sc_phytype == WMPHY_IGP_3))
16266 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16267
16268 pme:
16269 /* Request PME */
16270 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16271 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16272 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16273 /* For WOL */
16274 pmode |= PCI_PMCSR_PME_EN;
16275 } else {
16276 /* Disable WOL */
16277 pmode &= ~PCI_PMCSR_PME_EN;
16278 }
16279 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16280 }
16281
16282 /* Disable ASPM L0s and/or L1 for workaround */
16283 static void
16284 wm_disable_aspm(struct wm_softc *sc)
16285 {
16286 pcireg_t reg, mask = 0;
16287 unsigned const char *str = "";
16288
16289 /*
16290 * Only for PCIe device which has PCIe capability in the PCI config
16291 * space.
16292 */
16293 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16294 return;
16295
16296 switch (sc->sc_type) {
16297 case WM_T_82571:
16298 case WM_T_82572:
16299 /*
16300 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16301 * State Power management L1 State (ASPM L1).
16302 */
16303 mask = PCIE_LCSR_ASPM_L1;
16304 str = "L1 is";
16305 break;
16306 case WM_T_82573:
16307 case WM_T_82574:
16308 case WM_T_82583:
16309 /*
16310 * The 82573 disappears when PCIe ASPM L0s is enabled.
16311 *
16312 * The 82574 and 82583 does not support PCIe ASPM L0s with
16313 * some chipset. The document of 82574 and 82583 says that
16314 * disabling L0s with some specific chipset is sufficient,
16315 * but we follow as of the Intel em driver does.
16316 *
16317 * References:
16318 * Errata 8 of the Specification Update of i82573.
16319 * Errata 20 of the Specification Update of i82574.
16320 * Errata 9 of the Specification Update of i82583.
16321 */
16322 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16323 str = "L0s and L1 are";
16324 break;
16325 default:
16326 return;
16327 }
16328
16329 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16330 sc->sc_pcixe_capoff + PCIE_LCSR);
16331 reg &= ~mask;
16332 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16333 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16334
16335 /* Print only in wm_attach() */
16336 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16337 aprint_verbose_dev(sc->sc_dev,
16338 "ASPM %s disabled to workaround the errata.\n", str);
16339 }
16340
16341 /* LPLU */
16342
16343 static void
16344 wm_lplu_d0_disable(struct wm_softc *sc)
16345 {
16346 struct mii_data *mii = &sc->sc_mii;
16347 uint32_t reg;
16348 uint16_t phyval;
16349
16350 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16351 device_xname(sc->sc_dev), __func__));
16352
16353 if (sc->sc_phytype == WMPHY_IFE)
16354 return;
16355
16356 switch (sc->sc_type) {
16357 case WM_T_82571:
16358 case WM_T_82572:
16359 case WM_T_82573:
16360 case WM_T_82575:
16361 case WM_T_82576:
16362 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16363 phyval &= ~PMR_D0_LPLU;
16364 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16365 break;
16366 case WM_T_82580:
16367 case WM_T_I350:
16368 case WM_T_I210:
16369 case WM_T_I211:
16370 reg = CSR_READ(sc, WMREG_PHPM);
16371 reg &= ~PHPM_D0A_LPLU;
16372 CSR_WRITE(sc, WMREG_PHPM, reg);
16373 break;
16374 case WM_T_82574:
16375 case WM_T_82583:
16376 case WM_T_ICH8:
16377 case WM_T_ICH9:
16378 case WM_T_ICH10:
16379 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16380 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16381 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16382 CSR_WRITE_FLUSH(sc);
16383 break;
16384 case WM_T_PCH:
16385 case WM_T_PCH2:
16386 case WM_T_PCH_LPT:
16387 case WM_T_PCH_SPT:
16388 case WM_T_PCH_CNP:
16389 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16390 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16391 if (wm_phy_resetisblocked(sc) == false)
16392 phyval |= HV_OEM_BITS_ANEGNOW;
16393 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16394 break;
16395 default:
16396 break;
16397 }
16398 }
16399
16400 /* EEE */
16401
16402 static int
16403 wm_set_eee_i350(struct wm_softc *sc)
16404 {
16405 struct ethercom *ec = &sc->sc_ethercom;
16406 uint32_t ipcnfg, eeer;
16407 uint32_t ipcnfg_mask
16408 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
16409 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
16410
16411 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
16412
16413 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
16414 eeer = CSR_READ(sc, WMREG_EEER);
16415
16416 /* Enable or disable per user setting */
16417 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16418 ipcnfg |= ipcnfg_mask;
16419 eeer |= eeer_mask;
16420 } else {
16421 ipcnfg &= ~ipcnfg_mask;
16422 eeer &= ~eeer_mask;
16423 }
16424
16425 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
16426 CSR_WRITE(sc, WMREG_EEER, eeer);
16427 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
16428 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
16429
16430 return 0;
16431 }
16432
16433 static int
16434 wm_set_eee_pchlan(struct wm_softc *sc)
16435 {
16436 device_t dev = sc->sc_dev;
16437 struct ethercom *ec = &sc->sc_ethercom;
16438 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
16439 int rv;
16440
16441 switch (sc->sc_phytype) {
16442 case WMPHY_82579:
16443 lpa = I82579_EEE_LP_ABILITY;
16444 pcs_status = I82579_EEE_PCS_STATUS;
16445 adv_addr = I82579_EEE_ADVERTISEMENT;
16446 break;
16447 case WMPHY_I217:
16448 lpa = I217_EEE_LP_ABILITY;
16449 pcs_status = I217_EEE_PCS_STATUS;
16450 adv_addr = I217_EEE_ADVERTISEMENT;
16451 break;
16452 default:
16453 return 0;
16454 }
16455
16456 rv = sc->phy.acquire(sc);
16457 if (rv != 0) {
16458 device_printf(dev, "%s: failed to get semaphore\n", __func__);
16459 return rv;
16460 }
16461
16462 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16463 if (rv != 0)
16464 goto release;
16465
16466 /* Clear bits that enable EEE in various speeds */
16467 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
16468
16469 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16470 /* Save off link partner's EEE ability */
16471 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
16472 if (rv != 0)
16473 goto release;
16474
16475 /* Read EEE advertisement */
16476 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
16477 goto release;
16478
16479 /*
16480 * Enable EEE only for speeds in which the link partner is
16481 * EEE capable and for which we advertise EEE.
16482 */
16483 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16484 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16485 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16486 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16487 if ((data & ANLPAR_TX_FD) != 0)
16488 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16489 else {
16490 /*
16491 * EEE is not supported in 100Half, so ignore
16492 * partner's EEE in 100 ability if full-duplex
16493 * is not advertised.
16494 */
16495 sc->eee_lp_ability
16496 &= ~AN_EEEADVERT_100_TX;
16497 }
16498 }
16499 }
16500
16501 if (sc->sc_phytype == WMPHY_82579) {
16502 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16503 if (rv != 0)
16504 goto release;
16505
16506 data &= ~I82579_LPI_PLL_SHUT_100;
16507 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16508 }
16509
16510 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16511 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16512 goto release;
16513
16514 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16515 release:
16516 sc->phy.release(sc);
16517
16518 return rv;
16519 }
16520
16521 static int
16522 wm_set_eee(struct wm_softc *sc)
16523 {
16524 struct ethercom *ec = &sc->sc_ethercom;
16525
16526 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16527 return 0;
16528
16529 if (sc->sc_type == WM_T_I354) {
16530 /* I354 uses an external PHY */
16531 return 0; /* not yet */
16532 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16533 return wm_set_eee_i350(sc);
16534 else if (sc->sc_type >= WM_T_PCH2)
16535 return wm_set_eee_pchlan(sc);
16536
16537 return 0;
16538 }
16539
16540 /*
16541 * Workarounds (mainly PHY related).
16542 * Basically, PHY's workarounds are in the PHY drivers.
16543 */
16544
16545 /* Workaround for 82566 Kumeran PCS lock loss */
16546 static int
16547 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16548 {
16549 struct mii_data *mii = &sc->sc_mii;
16550 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16551 int i, reg, rv;
16552 uint16_t phyreg;
16553
16554 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16555 device_xname(sc->sc_dev), __func__));
16556
16557 /* If the link is not up, do nothing */
16558 if ((status & STATUS_LU) == 0)
16559 return 0;
16560
16561 /* Nothing to do if the link is other than 1Gbps */
16562 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16563 return 0;
16564
16565 for (i = 0; i < 10; i++) {
16566 /* read twice */
16567 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16568 if (rv != 0)
16569 return rv;
16570 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16571 if (rv != 0)
16572 return rv;
16573
16574 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16575 goto out; /* GOOD! */
16576
16577 /* Reset the PHY */
16578 wm_reset_phy(sc);
16579 delay(5*1000);
16580 }
16581
16582 /* Disable GigE link negotiation */
16583 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16584 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16585 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16586
16587 /*
16588 * Call gig speed drop workaround on Gig disable before accessing
16589 * any PHY registers.
16590 */
16591 wm_gig_downshift_workaround_ich8lan(sc);
16592
16593 out:
16594 return 0;
16595 }
16596
16597 /*
16598 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16599 * @sc: pointer to the HW structure
16600 *
16601 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16602 * LPLU, Gig disable, MDIC PHY reset):
16603 * 1) Set Kumeran Near-end loopback
16604 * 2) Clear Kumeran Near-end loopback
16605 * Should only be called for ICH8[m] devices with any 1G Phy.
16606 */
16607 static void
16608 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16609 {
16610 uint16_t kmreg;
16611
16612 /* Only for igp3 */
16613 if (sc->sc_phytype == WMPHY_IGP_3) {
16614 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16615 return;
16616 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16617 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16618 return;
16619 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16620 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16621 }
16622 }
16623
16624 /*
16625 * Workaround for pch's PHYs
16626 * XXX should be moved to new PHY driver?
16627 */
16628 static int
16629 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16630 {
16631 device_t dev = sc->sc_dev;
16632 struct mii_data *mii = &sc->sc_mii;
16633 struct mii_softc *child;
16634 uint16_t phy_data, phyrev = 0;
16635 int phytype = sc->sc_phytype;
16636 int rv;
16637
16638 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16639 device_xname(dev), __func__));
16640 KASSERT(sc->sc_type == WM_T_PCH);
16641
16642 /* Set MDIO slow mode before any other MDIO access */
16643 if (phytype == WMPHY_82577)
16644 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16645 return rv;
16646
16647 child = LIST_FIRST(&mii->mii_phys);
16648 if (child != NULL)
16649 phyrev = child->mii_mpd_rev;
16650
16651 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16652 if ((child != NULL) &&
16653 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16654 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
16655 /* Disable generation of early preamble (0x4431) */
16656 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16657 &phy_data);
16658 if (rv != 0)
16659 return rv;
16660 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16661 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16662 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16663 phy_data);
16664 if (rv != 0)
16665 return rv;
16666
16667 /* Preamble tuning for SSC */
16668 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16669 if (rv != 0)
16670 return rv;
16671 }
16672
16673 /* 82578 */
16674 if (phytype == WMPHY_82578) {
16675 /*
16676 * Return registers to default by doing a soft reset then
16677 * writing 0x3140 to the control register
16678 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16679 */
16680 if ((child != NULL) && (phyrev < 2)) {
16681 PHY_RESET(child);
16682 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16683 if (rv != 0)
16684 return rv;
16685 }
16686 }
16687
16688 /* Select page 0 */
16689 if ((rv = sc->phy.acquire(sc)) != 0)
16690 return rv;
16691 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16692 sc->phy.release(sc);
16693 if (rv != 0)
16694 return rv;
16695
16696 /*
16697 * Configure the K1 Si workaround during phy reset assuming there is
16698 * link so that it disables K1 if link is in 1Gbps.
16699 */
16700 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16701 return rv;
16702
16703 /* Workaround for link disconnects on a busy hub in half duplex */
16704 rv = sc->phy.acquire(sc);
16705 if (rv)
16706 return rv;
16707 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16708 if (rv)
16709 goto release;
16710 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16711 phy_data & 0x00ff);
16712 if (rv)
16713 goto release;
16714
16715 /* Set MSE higher to enable link to stay up when noise is high */
16716 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16717 release:
16718 sc->phy.release(sc);
16719
16720 return rv;
16721 }
16722
16723 /*
16724 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16725 * @sc: pointer to the HW structure
16726 */
16727 static void
16728 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16729 {
16730
16731 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16732 device_xname(sc->sc_dev), __func__));
16733
16734 if (sc->phy.acquire(sc) != 0)
16735 return;
16736
16737 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16738
16739 sc->phy.release(sc);
16740 }
16741
16742 static void
16743 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16744 {
16745 device_t dev = sc->sc_dev;
16746 uint32_t mac_reg;
16747 uint16_t i, wuce;
16748 int count;
16749
16750 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16751 device_xname(dev), __func__));
16752
16753 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16754 return;
16755
16756 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16757 count = wm_rar_count(sc);
16758 for (i = 0; i < count; i++) {
16759 uint16_t lo, hi;
16760 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16761 lo = (uint16_t)(mac_reg & 0xffff);
16762 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16763 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16764 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16765
16766 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16767 lo = (uint16_t)(mac_reg & 0xffff);
16768 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16769 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16770 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16771 }
16772
16773 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16774 }
16775
16776 /*
16777 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16778 * with 82579 PHY
16779 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
16780 */
16781 static int
16782 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16783 {
16784 device_t dev = sc->sc_dev;
16785 int rar_count;
16786 int rv;
16787 uint32_t mac_reg;
16788 uint16_t dft_ctrl, data;
16789 uint16_t i;
16790
16791 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16792 device_xname(dev), __func__));
16793
16794 if (sc->sc_type < WM_T_PCH2)
16795 return 0;
16796
16797 /* Acquire PHY semaphore */
16798 rv = sc->phy.acquire(sc);
16799 if (rv != 0)
16800 return rv;
16801
16802 /* Disable Rx path while enabling/disabling workaround */
16803 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16804 if (rv != 0)
16805 goto out;
16806 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16807 dft_ctrl | (1 << 14));
16808 if (rv != 0)
16809 goto out;
16810
16811 if (enable) {
16812 /* Write Rx addresses (rar_entry_count for RAL/H, and
16813 * SHRAL/H) and initial CRC values to the MAC
16814 */
16815 rar_count = wm_rar_count(sc);
16816 for (i = 0; i < rar_count; i++) {
16817 uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16818 uint32_t addr_high, addr_low;
16819
16820 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16821 if (!(addr_high & RAL_AV))
16822 continue;
16823 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16824 mac_addr[0] = (addr_low & 0xFF);
16825 mac_addr[1] = ((addr_low >> 8) & 0xFF);
16826 mac_addr[2] = ((addr_low >> 16) & 0xFF);
16827 mac_addr[3] = ((addr_low >> 24) & 0xFF);
16828 mac_addr[4] = (addr_high & 0xFF);
16829 mac_addr[5] = ((addr_high >> 8) & 0xFF);
16830
16831 CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16832 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16833 }
16834
16835 /* Write Rx addresses to the PHY */
16836 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16837 }
16838
16839 /*
16840 * If enable ==
16841 * true: Enable jumbo frame workaround in the MAC.
16842 * false: Write MAC register values back to h/w defaults.
16843 */
16844 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16845 if (enable) {
16846 mac_reg &= ~(1 << 14);
16847 mac_reg |= (7 << 15);
16848 } else
16849 mac_reg &= ~(0xf << 14);
16850 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16851
16852 mac_reg = CSR_READ(sc, WMREG_RCTL);
16853 if (enable) {
16854 mac_reg |= RCTL_SECRC;
16855 sc->sc_rctl |= RCTL_SECRC;
16856 sc->sc_flags |= WM_F_CRC_STRIP;
16857 } else {
16858 mac_reg &= ~RCTL_SECRC;
16859 sc->sc_rctl &= ~RCTL_SECRC;
16860 sc->sc_flags &= ~WM_F_CRC_STRIP;
16861 }
16862 CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16863
16864 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16865 if (rv != 0)
16866 goto out;
16867 if (enable)
16868 data |= 1 << 0;
16869 else
16870 data &= ~(1 << 0);
16871 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16872 if (rv != 0)
16873 goto out;
16874
16875 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16876 if (rv != 0)
16877 goto out;
16878 /*
16879 * XXX FreeBSD and Linux do the same thing that they set the same value
16880 * on both the enable case and the disable case. Is it correct?
16881 */
16882 data &= ~(0xf << 8);
16883 data |= (0xb << 8);
16884 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16885 if (rv != 0)
16886 goto out;
16887
16888 /*
16889 * If enable ==
16890 * true: Enable jumbo frame workaround in the PHY.
16891 * false: Write PHY register values back to h/w defaults.
16892 */
16893 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16894 if (rv != 0)
16895 goto out;
16896 data &= ~(0x7F << 5);
16897 if (enable)
16898 data |= (0x37 << 5);
16899 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16900 if (rv != 0)
16901 goto out;
16902
16903 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16904 if (rv != 0)
16905 goto out;
16906 if (enable)
16907 data &= ~(1 << 13);
16908 else
16909 data |= (1 << 13);
16910 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16911 if (rv != 0)
16912 goto out;
16913
16914 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16915 if (rv != 0)
16916 goto out;
16917 data &= ~(0x3FF << 2);
16918 if (enable)
16919 data |= (I82579_TX_PTR_GAP << 2);
16920 else
16921 data |= (0x8 << 2);
16922 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16923 if (rv != 0)
16924 goto out;
16925
16926 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16927 enable ? 0xf100 : 0x7e00);
16928 if (rv != 0)
16929 goto out;
16930
16931 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16932 if (rv != 0)
16933 goto out;
16934 if (enable)
16935 data |= 1 << 10;
16936 else
16937 data &= ~(1 << 10);
16938 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16939 if (rv != 0)
16940 goto out;
16941
16942 /* Re-enable Rx path after enabling/disabling workaround */
16943 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16944 dft_ctrl & ~(1 << 14));
16945
16946 out:
16947 sc->phy.release(sc);
16948
16949 return rv;
16950 }
16951
16952 /*
16953 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16954 * done after every PHY reset.
16955 */
16956 static int
16957 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16958 {
16959 device_t dev = sc->sc_dev;
16960 int rv;
16961
16962 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16963 device_xname(dev), __func__));
16964 KASSERT(sc->sc_type == WM_T_PCH2);
16965
16966 /* Set MDIO slow mode before any other MDIO access */
16967 rv = wm_set_mdio_slow_mode_hv(sc);
16968 if (rv != 0)
16969 return rv;
16970
16971 rv = sc->phy.acquire(sc);
16972 if (rv != 0)
16973 return rv;
16974 /* Set MSE higher to enable link to stay up when noise is high */
16975 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16976 if (rv != 0)
16977 goto release;
16978 /* Drop link after 5 times MSE threshold was reached */
16979 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16980 release:
16981 sc->phy.release(sc);
16982
16983 return rv;
16984 }
16985
16986 /**
16987 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16988 * @link: link up bool flag
16989 *
16990 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16991 * preventing further DMA write requests. Workaround the issue by disabling
16992 * the de-assertion of the clock request when in 1Gpbs mode.
16993 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16994 * speeds in order to avoid Tx hangs.
16995 **/
16996 static int
16997 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16998 {
16999 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
17000 uint32_t status = CSR_READ(sc, WMREG_STATUS);
17001 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
17002 uint16_t phyreg;
17003
17004 if (link && (speed == STATUS_SPEED_1000)) {
17005 int rv;
17006
17007 rv = sc->phy.acquire(sc);
17008 if (rv != 0)
17009 return rv;
17010 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17011 &phyreg);
17012 if (rv != 0)
17013 goto release;
17014 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17015 phyreg & ~KUMCTRLSTA_K1_ENABLE);
17016 if (rv != 0)
17017 goto release;
17018 delay(20);
17019 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17020
17021 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17022 &phyreg);
17023 release:
17024 sc->phy.release(sc);
17025 return rv;
17026 }
17027
17028 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17029
17030 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17031 if (((child != NULL) && (child->mii_mpd_rev > 5))
17032 || !link
17033 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17034 goto update_fextnvm6;
17035
17036 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17037
17038 /* Clear link status transmit timeout */
17039 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17040 if (speed == STATUS_SPEED_100) {
17041 /* Set inband Tx timeout to 5x10us for 100Half */
17042 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17043
17044 /* Do not extend the K1 entry latency for 100Half */
17045 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17046 } else {
17047 /* Set inband Tx timeout to 50x10us for 10Full/Half */
17048 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17049
17050 /* Extend the K1 entry latency for 10 Mbps */
17051 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17052 }
17053
17054 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17055
17056 update_fextnvm6:
17057 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17058 return 0;
17059 }
17060
17061 /*
17062 * wm_k1_gig_workaround_hv - K1 Si workaround
17063 * @sc: pointer to the HW structure
17064 * @link: link up bool flag
17065 *
17066 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17067 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
17068 * If link is down, the function will restore the default K1 setting located
17069 * in the NVM.
17070 */
17071 static int
17072 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17073 {
17074 int k1_enable = sc->sc_nvm_k1_enabled;
17075 int rv;
17076
17077 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17078 device_xname(sc->sc_dev), __func__));
17079
17080 rv = sc->phy.acquire(sc);
17081 if (rv != 0)
17082 return rv;
17083
17084 if (link) {
17085 k1_enable = 0;
17086
17087 /* Link stall fix for link up */
17088 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17089 0x0100);
17090 } else {
17091 /* Link stall fix for link down */
17092 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17093 0x4100);
17094 }
17095
17096 wm_configure_k1_ich8lan(sc, k1_enable);
17097 sc->phy.release(sc);
17098
17099 return 0;
17100 }
17101
17102 /*
17103 * wm_k1_workaround_lv - K1 Si workaround
17104 * @sc: pointer to the HW structure
17105 *
17106 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17107 * Disable K1 for 1000 and 100 speeds
17108 */
17109 static int
17110 wm_k1_workaround_lv(struct wm_softc *sc)
17111 {
17112 uint32_t reg;
17113 uint16_t phyreg;
17114 int rv;
17115
17116 if (sc->sc_type != WM_T_PCH2)
17117 return 0;
17118
17119 /* Set K1 beacon duration based on 10Mbps speed */
17120 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17121 if (rv != 0)
17122 return rv;
17123
17124 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17125 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17126 if (phyreg &
17127 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17128 /* LV 1G/100 Packet drop issue wa */
17129 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17130 &phyreg);
17131 if (rv != 0)
17132 return rv;
17133 phyreg &= ~HV_PM_CTRL_K1_ENA;
17134 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17135 phyreg);
17136 if (rv != 0)
17137 return rv;
17138 } else {
17139 /* For 10Mbps */
17140 reg = CSR_READ(sc, WMREG_FEXTNVM4);
17141 reg &= ~FEXTNVM4_BEACON_DURATION;
17142 reg |= FEXTNVM4_BEACON_DURATION_16US;
17143 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17144 }
17145 }
17146
17147 return 0;
17148 }
17149
17150 /*
17151 * wm_link_stall_workaround_hv - Si workaround
17152 * @sc: pointer to the HW structure
17153 *
17154 * This function works around a Si bug where the link partner can get
17155 * a link up indication before the PHY does. If small packets are sent
17156 * by the link partner they can be placed in the packet buffer without
17157 * being properly accounted for by the PHY and will stall preventing
17158 * further packets from being received. The workaround is to clear the
17159 * packet buffer after the PHY detects link up.
17160 */
17161 static int
17162 wm_link_stall_workaround_hv(struct wm_softc *sc)
17163 {
17164 uint16_t phyreg;
17165
17166 if (sc->sc_phytype != WMPHY_82578)
17167 return 0;
17168
17169 /* Do not apply workaround if in PHY loopback bit 14 set */
17170 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17171 if ((phyreg & BMCR_LOOP) != 0)
17172 return 0;
17173
17174 /* Check if link is up and at 1Gbps */
17175 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17176 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17177 | BM_CS_STATUS_SPEED_MASK;
17178 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17179 | BM_CS_STATUS_SPEED_1000))
17180 return 0;
17181
17182 delay(200 * 1000); /* XXX too big */
17183
17184 /* Flush the packets in the fifo buffer */
17185 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17186 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17187 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17188 HV_MUX_DATA_CTRL_GEN_TO_MAC);
17189
17190 return 0;
17191 }
17192
17193 static int
17194 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17195 {
17196 int rv;
17197
17198 rv = sc->phy.acquire(sc);
17199 if (rv != 0) {
17200 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17201 __func__);
17202 return rv;
17203 }
17204
17205 rv = wm_set_mdio_slow_mode_hv_locked(sc);
17206
17207 sc->phy.release(sc);
17208
17209 return rv;
17210 }
17211
17212 static int
17213 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17214 {
17215 int rv;
17216 uint16_t reg;
17217
17218 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
17219 if (rv != 0)
17220 return rv;
17221
17222 return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17223 reg | HV_KMRN_MDIO_SLOW);
17224 }
17225
17226 /*
17227 * wm_configure_k1_ich8lan - Configure K1 power state
17228 * @sc: pointer to the HW structure
17229 * @enable: K1 state to configure
17230 *
17231 * Configure the K1 power state based on the provided parameter.
17232 * Assumes semaphore already acquired.
17233 */
17234 static void
17235 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17236 {
17237 uint32_t ctrl, ctrl_ext, tmp;
17238 uint16_t kmreg;
17239 int rv;
17240
17241 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17242
17243 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17244 if (rv != 0)
17245 return;
17246
17247 if (k1_enable)
17248 kmreg |= KUMCTRLSTA_K1_ENABLE;
17249 else
17250 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17251
17252 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17253 if (rv != 0)
17254 return;
17255
17256 delay(20);
17257
17258 ctrl = CSR_READ(sc, WMREG_CTRL);
17259 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17260
17261 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17262 tmp |= CTRL_FRCSPD;
17263
17264 CSR_WRITE(sc, WMREG_CTRL, tmp);
17265 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17266 CSR_WRITE_FLUSH(sc);
17267 delay(20);
17268
17269 CSR_WRITE(sc, WMREG_CTRL, ctrl);
17270 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17271 CSR_WRITE_FLUSH(sc);
17272 delay(20);
17273
17274 return;
17275 }
17276
17277 /* special case - for 82575 - need to do manual init ... */
17278 static void
17279 wm_reset_init_script_82575(struct wm_softc *sc)
17280 {
17281 /*
17282 * Remark: this is untested code - we have no board without EEPROM
17283 * same setup as mentioned int the FreeBSD driver for the i82575
17284 */
17285
17286 /* SerDes configuration via SERDESCTRL */
17287 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17288 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17289 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17290 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17291
17292 /* CCM configuration via CCMCTL register */
17293 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17294 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17295
17296 /* PCIe lanes configuration */
17297 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17298 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17299 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17300 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17301
17302 /* PCIe PLL Configuration */
17303 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17304 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17305 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17306 }
17307
17308 static void
17309 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17310 {
17311 uint32_t reg;
17312 uint16_t nvmword;
17313 int rv;
17314
17315 if (sc->sc_type != WM_T_82580)
17316 return;
17317 if ((sc->sc_flags & WM_F_SGMII) == 0)
17318 return;
17319
17320 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17321 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17322 if (rv != 0) {
17323 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17324 __func__);
17325 return;
17326 }
17327
17328 reg = CSR_READ(sc, WMREG_MDICNFG);
17329 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17330 reg |= MDICNFG_DEST;
17331 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17332 reg |= MDICNFG_COM_MDIO;
17333 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17334 }
17335
17336 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
17337
17338 static bool
17339 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17340 {
17341 uint32_t reg;
17342 uint16_t id1, id2;
17343 int i, rv;
17344
17345 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17346 device_xname(sc->sc_dev), __func__));
17347 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17348
17349 id1 = id2 = 0xffff;
17350 for (i = 0; i < 2; i++) {
17351 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17352 &id1);
17353 if ((rv != 0) || MII_INVALIDID(id1))
17354 continue;
17355 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17356 &id2);
17357 if ((rv != 0) || MII_INVALIDID(id2))
17358 continue;
17359 break;
17360 }
17361 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17362 goto out;
17363
17364 /*
17365 * In case the PHY needs to be in mdio slow mode,
17366 * set slow mode and try to get the PHY id again.
17367 */
17368 rv = 0;
17369 if (sc->sc_type < WM_T_PCH_LPT) {
17370 wm_set_mdio_slow_mode_hv_locked(sc);
17371 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17372 &id1);
17373 rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17374 &id2);
17375 }
17376 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17377 device_printf(sc->sc_dev, "XXX return with false\n");
17378 return false;
17379 }
17380 out:
17381 if (sc->sc_type >= WM_T_PCH_LPT) {
17382 /* Only unforce SMBus if ME is not active */
17383 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17384 uint16_t phyreg;
17385
17386 /* Unforce SMBus mode in PHY */
17387 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17388 CV_SMB_CTRL, &phyreg);
17389 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17390 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17391 CV_SMB_CTRL, phyreg);
17392
17393 /* Unforce SMBus mode in MAC */
17394 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17395 reg &= ~CTRL_EXT_FORCE_SMBUS;
17396 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17397 }
17398 }
17399 return true;
17400 }
17401
17402 static void
17403 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17404 {
17405 uint32_t reg;
17406 int i;
17407
17408 /* Set PHY Config Counter to 50msec */
17409 reg = CSR_READ(sc, WMREG_FEXTNVM3);
17410 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
17411 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
17412 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
17413
17414 /* Toggle LANPHYPC */
17415 reg = CSR_READ(sc, WMREG_CTRL);
17416 reg |= CTRL_LANPHYPC_OVERRIDE;
17417 reg &= ~CTRL_LANPHYPC_VALUE;
17418 CSR_WRITE(sc, WMREG_CTRL, reg);
17419 CSR_WRITE_FLUSH(sc);
17420 delay(1000);
17421 reg &= ~CTRL_LANPHYPC_OVERRIDE;
17422 CSR_WRITE(sc, WMREG_CTRL, reg);
17423 CSR_WRITE_FLUSH(sc);
17424
17425 if (sc->sc_type < WM_T_PCH_LPT)
17426 delay(50 * 1000);
17427 else {
17428 i = 20;
17429
17430 do {
17431 delay(5 * 1000);
17432 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
17433 && i--);
17434
17435 delay(30 * 1000);
17436 }
17437 }
17438
17439 static int
17440 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
17441 {
17442 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
17443 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
17444 uint32_t rxa;
17445 uint16_t scale = 0, lat_enc = 0;
17446 int32_t obff_hwm = 0;
17447 int64_t lat_ns, value;
17448
17449 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17450 device_xname(sc->sc_dev), __func__));
17451
17452 if (link) {
17453 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
17454 uint32_t status;
17455 uint16_t speed;
17456 pcireg_t preg;
17457
17458 status = CSR_READ(sc, WMREG_STATUS);
17459 switch (__SHIFTOUT(status, STATUS_SPEED)) {
17460 case STATUS_SPEED_10:
17461 speed = 10;
17462 break;
17463 case STATUS_SPEED_100:
17464 speed = 100;
17465 break;
17466 case STATUS_SPEED_1000:
17467 speed = 1000;
17468 break;
17469 default:
17470 device_printf(sc->sc_dev, "Unknown speed "
17471 "(status = %08x)\n", status);
17472 return -1;
17473 }
17474
17475 /* Rx Packet Buffer Allocation size (KB) */
17476 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
17477
17478 /*
17479 * Determine the maximum latency tolerated by the device.
17480 *
17481 * Per the PCIe spec, the tolerated latencies are encoded as
17482 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
17483 * a 10-bit value (0-1023) to provide a range from 1 ns to
17484 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
17485 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
17486 */
17487 lat_ns = ((int64_t)rxa * 1024 -
17488 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
17489 + ETHER_HDR_LEN))) * 8 * 1000;
17490 if (lat_ns < 0)
17491 lat_ns = 0;
17492 else
17493 lat_ns /= speed;
17494 value = lat_ns;
17495
17496 while (value > LTRV_VALUE) {
17497 scale ++;
17498 value = howmany(value, __BIT(5));
17499 }
17500 if (scale > LTRV_SCALE_MAX) {
17501 device_printf(sc->sc_dev,
17502 "Invalid LTR latency scale %d\n", scale);
17503 return -1;
17504 }
17505 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17506
17507 /* Determine the maximum latency tolerated by the platform */
17508 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17509 WM_PCI_LTR_CAP_LPT);
17510 max_snoop = preg & 0xffff;
17511 max_nosnoop = preg >> 16;
17512
17513 max_ltr_enc = MAX(max_snoop, max_nosnoop);
17514
17515 if (lat_enc > max_ltr_enc) {
17516 lat_enc = max_ltr_enc;
17517 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17518 * PCI_LTR_SCALETONS(
17519 __SHIFTOUT(lat_enc,
17520 PCI_LTR_MAXSNOOPLAT_SCALE));
17521 }
17522
17523 if (lat_ns) {
17524 lat_ns *= speed * 1000;
17525 lat_ns /= 8;
17526 lat_ns /= 1000000000;
17527 obff_hwm = (int32_t)(rxa - lat_ns);
17528 }
17529 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17530 device_printf(sc->sc_dev, "Invalid high water mark %d"
17531 "(rxa = %d, lat_ns = %d)\n",
17532 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17533 return -1;
17534 }
17535 }
17536 /* Snoop and No-Snoop latencies the same */
17537 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17538 CSR_WRITE(sc, WMREG_LTRV, reg);
17539
17540 /* Set OBFF high water mark */
17541 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17542 reg |= obff_hwm;
17543 CSR_WRITE(sc, WMREG_SVT, reg);
17544
17545 /* Enable OBFF */
17546 reg = CSR_READ(sc, WMREG_SVCR);
17547 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17548 CSR_WRITE(sc, WMREG_SVCR, reg);
17549
17550 return 0;
17551 }
17552
17553 /*
17554 * I210 Errata 25 and I211 Errata 10
17555 * Slow System Clock.
17556 *
17557 * Note that this function is called on both FLASH and iNVM case on NetBSD.
17558 */
17559 static int
17560 wm_pll_workaround_i210(struct wm_softc *sc)
17561 {
17562 uint32_t mdicnfg, wuc;
17563 uint32_t reg;
17564 pcireg_t pcireg;
17565 uint32_t pmreg;
17566 uint16_t nvmword, tmp_nvmword;
17567 uint16_t phyval;
17568 bool wa_done = false;
17569 int i, rv = 0;
17570
17571 /* Get Power Management cap offset */
17572 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17573 &pmreg, NULL) == 0)
17574 return -1;
17575
17576 /* Save WUC and MDICNFG registers */
17577 wuc = CSR_READ(sc, WMREG_WUC);
17578 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17579
17580 reg = mdicnfg & ~MDICNFG_DEST;
17581 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17582
17583 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17584 /*
17585 * The default value of the Initialization Control Word 1
17586 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17587 */
17588 nvmword = INVM_DEFAULT_AL;
17589 }
17590 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17591
17592 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17593 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17594 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17595
17596 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17597 rv = 0;
17598 break; /* OK */
17599 } else
17600 rv = -1;
17601
17602 wa_done = true;
17603 /* Directly reset the internal PHY */
17604 reg = CSR_READ(sc, WMREG_CTRL);
17605 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17606
17607 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17608 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17609 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17610
17611 CSR_WRITE(sc, WMREG_WUC, 0);
17612 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17613 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17614
17615 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17616 pmreg + PCI_PMCSR);
17617 pcireg |= PCI_PMCSR_STATE_D3;
17618 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17619 pmreg + PCI_PMCSR, pcireg);
17620 delay(1000);
17621 pcireg &= ~PCI_PMCSR_STATE_D3;
17622 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17623 pmreg + PCI_PMCSR, pcireg);
17624
17625 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
17626 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17627
17628 /* Restore WUC register */
17629 CSR_WRITE(sc, WMREG_WUC, wuc);
17630 }
17631
17632 /* Restore MDICNFG setting */
17633 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
17634 if (wa_done)
17635 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
17636 return rv;
17637 }
17638
17639 static void
17640 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
17641 {
17642 uint32_t reg;
17643
17644 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17645 device_xname(sc->sc_dev), __func__));
17646 KASSERT((sc->sc_type == WM_T_PCH_SPT)
17647 || (sc->sc_type == WM_T_PCH_CNP));
17648
17649 reg = CSR_READ(sc, WMREG_FEXTNVM7);
17650 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
17651 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
17652
17653 reg = CSR_READ(sc, WMREG_FEXTNVM9);
17654 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17655 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17656 }
17657
17658 /* Sysctl functions */
17659 static int
17660 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
17661 {
17662 struct sysctlnode node = *rnode;
17663 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17664 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17665 struct wm_softc *sc = txq->txq_sc;
17666 uint32_t reg;
17667
17668 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
17669 node.sysctl_data = ®
17670 return sysctl_lookup(SYSCTLFN_CALL(&node));
17671 }
17672
17673 static int
17674 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
17675 {
17676 struct sysctlnode node = *rnode;
17677 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17678 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17679 struct wm_softc *sc = txq->txq_sc;
17680 uint32_t reg;
17681
17682 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
17683 node.sysctl_data = ®
17684 return sysctl_lookup(SYSCTLFN_CALL(&node));
17685 }
17686
17687 #ifdef WM_DEBUG
17688 static int
17689 wm_sysctl_debug(SYSCTLFN_ARGS)
17690 {
17691 struct sysctlnode node = *rnode;
17692 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17693 uint32_t dflags;
17694 int error;
17695
17696 dflags = sc->sc_debug;
17697 node.sysctl_data = &dflags;
17698 error = sysctl_lookup(SYSCTLFN_CALL(&node));
17699
17700 if (error || newp == NULL)
17701 return error;
17702
17703 sc->sc_debug = dflags;
17704 device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
17705 device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
17706
17707 return 0;
17708 }
17709 #endif
17710