if_wm.c revision 1.773 1 /* $NetBSD: if_wm.c,v 1.773 2023/05/11 07:07:08 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.773 2023/05/11 07:07:08 msaitoh Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90
91 #include <sys/param.h>
92
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116
117 #include <net/bpf.h>
118
119 #include <net/rss_config.h>
120
121 #include <netinet/in.h> /* XXX for struct ip */
122 #include <netinet/in_systm.h> /* XXX for struct ip */
123 #include <netinet/ip.h> /* XXX for struct ip */
124 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h> /* XXX for struct tcphdr */
126
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149
150 #ifdef WM_DEBUG
151 #define WM_DEBUG_LINK __BIT(0)
152 #define WM_DEBUG_TX __BIT(1)
153 #define WM_DEBUG_RX __BIT(2)
154 #define WM_DEBUG_GMII __BIT(3)
155 #define WM_DEBUG_MANAGE __BIT(4)
156 #define WM_DEBUG_NVM __BIT(5)
157 #define WM_DEBUG_INIT __BIT(6)
158 #define WM_DEBUG_LOCK __BIT(7)
159
160 #if 0
161 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
163 WM_DEBUG_LOCK
164 #endif
165
166 #define DPRINTF(sc, x, y) \
167 do { \
168 if ((sc)->sc_debug & (x)) \
169 printf y; \
170 } while (0)
171 #else
172 #define DPRINTF(sc, x, y) __nothing
173 #endif /* WM_DEBUG */
174
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176
177 /*
178 * This device driver's max interrupt numbers.
179 */
180 #define WM_MAX_NQUEUEINTR 16
181 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
182
183 #ifndef WM_DISABLE_MSI
184 #define WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define WM_DISABLE_MSIX 0
188 #endif
189
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197
198 /*
199 * Transmit descriptor list size. Due to errata, we can only have
200 * 256 hardware descriptors in the ring on < 82544, but we use 4096
201 * on >= 82544. We tell the upper layers that they can queue a lot
202 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203 * of them at a time.
204 *
205 * We allow up to 64 DMA segments per packet. Pathological packet
206 * chains containing many small mbufs have been observed in zero-copy
207 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208 * m_defrag() is called to reduce it.
209 */
210 #define WM_NTXSEGS 64
211 #define WM_IFQUEUELEN 256
212 #define WM_TXQUEUELEN_MAX 64
213 #define WM_TXQUEUELEN_MAX_82547 16
214 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
215 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
216 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
217 #define WM_NTXDESC_82542 256
218 #define WM_NTXDESC_82544 4096
219 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
220 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
221 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224
225 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
226
227 #define WM_TXINTERQSIZE 256
228
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
234 #endif
235
236 /*
237 * Receive descriptor list size. We have one Rx buffer for normal
238 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
239 * packet. We allocate 256 receive descriptors, each with a 2k
240 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241 */
242 #define WM_NRXDESC 256U
243 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
244 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
245 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
246
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
252 #endif
253
254 typedef union txdescs {
255 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258
259 typedef union rxdescs {
260 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264
265 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
266 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
267
268 /*
269 * Software state for transmit jobs.
270 */
271 struct wm_txsoft {
272 struct mbuf *txs_mbuf; /* head of our mbuf chain */
273 bus_dmamap_t txs_dmamap; /* our DMA map */
274 int txs_firstdesc; /* first descriptor in packet */
275 int txs_lastdesc; /* last descriptor in packet */
276 int txs_ndesc; /* # of descriptors used */
277 };
278
279 /*
280 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281 * buffer and a DMA map. For packets which fill more than one buffer, we chain
282 * them together.
283 */
284 struct wm_rxsoft {
285 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
286 bus_dmamap_t rxs_dmamap; /* our DMA map */
287 };
288
289 #define WM_LINKUP_TIMEOUT 50
290
291 static uint16_t swfwphysem[] = {
292 SWFW_PHY0_SM,
293 SWFW_PHY1_SM,
294 SWFW_PHY2_SM,
295 SWFW_PHY3_SM
296 };
297
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301
302 struct wm_softc;
303
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname) \
312 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 struct evcnt qname##_ev_##evname
314
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
316 do { \
317 snprintf((q)->qname##_##evname##_evcnt_name, \
318 sizeof((q)->qname##_##evname##_evcnt_name), \
319 "%s%02d%s", #qname, (qnum), #evname); \
320 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
321 (evtype), NULL, (xname), \
322 (q)->qname##_##evname##_evcnt_name); \
323 } while (0)
324
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
326 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
329 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
332 evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334
335 struct wm_txqueue {
336 kmutex_t *txq_lock; /* lock for tx operations */
337
338 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
339
340 /* Software state for the transmit descriptors. */
341 int txq_num; /* must be a power of two */
342 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343
344 /* TX control data structures. */
345 int txq_ndesc; /* must be a power of two */
346 size_t txq_descsize; /* a tx descriptor size */
347 txdescs_t *txq_descs_u;
348 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
349 bus_dma_segment_t txq_desc_seg; /* control data segment */
350 int txq_desc_rseg; /* real number of control segment */
351 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
352 #define txq_descs txq_descs_u->sctxu_txdescs
353 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
354
355 bus_addr_t txq_tdt_reg; /* offset of TDT register */
356
357 int txq_free; /* number of free Tx descriptors */
358 int txq_next; /* next ready Tx descriptor */
359
360 int txq_sfree; /* number of free Tx jobs */
361 int txq_snext; /* next free Tx job */
362 int txq_sdirty; /* dirty Tx jobs */
363
364 /* These 4 variables are used only on the 82547. */
365 int txq_fifo_size; /* Tx FIFO size */
366 int txq_fifo_head; /* current head of FIFO */
367 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
368 int txq_fifo_stall; /* Tx FIFO is stalled */
369
370 /*
371 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 * CPUs. This queue intermediate them without block.
373 */
374 pcq_t *txq_interq;
375
376 /*
377 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 * to manage Tx H/W queue's busy flag.
379 */
380 int txq_flags; /* flags for H/W queue, see below */
381 #define WM_TXQ_NO_SPACE 0x1
382 #define WM_TXQ_LINKDOWN_DISCARD 0x2
383
384 bool txq_stopping;
385
386 bool txq_sending;
387 time_t txq_lastsent;
388
389 /* Checksum flags used for previous packet */
390 uint32_t txq_last_hw_cmd;
391 uint8_t txq_last_hw_fields;
392 uint16_t txq_last_hw_ipcs;
393 uint16_t txq_last_hw_tucs;
394
395 uint32_t txq_packets; /* for AIM */
396 uint32_t txq_bytes; /* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 /* TX event counters */
399 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
400 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
401 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
403 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
404 /* XXX not used? */
405
406 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
407 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
408 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
409 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
410 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
411 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
412 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
413 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
414 /* other than toomanyseg */
415
416 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
418 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
419 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420
421 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425
426 struct wm_rxqueue {
427 kmutex_t *rxq_lock; /* lock for rx operations */
428
429 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
430
431 /* Software state for the receive descriptors. */
432 struct wm_rxsoft rxq_soft[WM_NRXDESC];
433
434 /* RX control data structures. */
435 int rxq_ndesc; /* must be a power of two */
436 size_t rxq_descsize; /* a rx descriptor size */
437 rxdescs_t *rxq_descs_u;
438 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
439 bus_dma_segment_t rxq_desc_seg; /* control data segment */
440 int rxq_desc_rseg; /* real number of control segment */
441 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define rxq_descs rxq_descs_u->sctxu_rxdescs
443 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
444 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
445
446 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
447
448 int rxq_ptr; /* next ready Rx desc/queue ent */
449 int rxq_discard;
450 int rxq_len;
451 struct mbuf *rxq_head;
452 struct mbuf *rxq_tail;
453 struct mbuf **rxq_tailp;
454
455 bool rxq_stopping;
456
457 uint32_t rxq_packets; /* for AIM */
458 uint32_t rxq_bytes; /* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 /* RX event counters */
461 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
462 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
463
464 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
465 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
466 #endif
467 };
468
469 struct wm_queue {
470 int wmq_id; /* index of TX/RX queues */
471 int wmq_intr_idx; /* index of MSI-X tables */
472
473 uint32_t wmq_itr; /* interrupt interval per queue. */
474 bool wmq_set_itr;
475
476 struct wm_txqueue wmq_txq;
477 struct wm_rxqueue wmq_rxq;
478 char sysctlname[32]; /* Name for sysctl */
479
480 bool wmq_txrx_use_workqueue;
481 bool wmq_wq_enqueued;
482 struct work wmq_cookie;
483 void *wmq_si;
484 };
485
486 struct wm_phyop {
487 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
488 void (*release)(struct wm_softc *);
489 int (*readreg_locked)(device_t, int, int, uint16_t *);
490 int (*writereg_locked)(device_t, int, int, uint16_t);
491 int reset_delay_us;
492 bool no_errprint;
493 };
494
495 struct wm_nvmop {
496 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
497 void (*release)(struct wm_softc *);
498 int (*read)(struct wm_softc *, int, int, uint16_t *);
499 };
500
501 /*
502 * Software state per device.
503 */
504 struct wm_softc {
505 device_t sc_dev; /* generic device information */
506 bus_space_tag_t sc_st; /* bus space tag */
507 bus_space_handle_t sc_sh; /* bus space handle */
508 bus_size_t sc_ss; /* bus space size */
509 bus_space_tag_t sc_iot; /* I/O space tag */
510 bus_space_handle_t sc_ioh; /* I/O space handle */
511 bus_size_t sc_ios; /* I/O space size */
512 bus_space_tag_t sc_flasht; /* flash registers space tag */
513 bus_space_handle_t sc_flashh; /* flash registers space handle */
514 bus_size_t sc_flashs; /* flash registers space size */
515 off_t sc_flashreg_offset; /*
516 * offset to flash registers from
517 * start of BAR
518 */
519 bus_dma_tag_t sc_dmat; /* bus DMA tag */
520
521 struct ethercom sc_ethercom; /* Ethernet common data */
522 struct mii_data sc_mii; /* MII/media information */
523
524 pci_chipset_tag_t sc_pc;
525 pcitag_t sc_pcitag;
526 int sc_bus_speed; /* PCI/PCIX bus speed */
527 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
528
529 uint16_t sc_pcidevid; /* PCI device ID */
530 wm_chip_type sc_type; /* MAC type */
531 int sc_rev; /* MAC revision */
532 wm_phy_type sc_phytype; /* PHY type */
533 uint8_t sc_sfptype; /* SFP type */
534 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
535 #define WM_MEDIATYPE_UNKNOWN 0x00
536 #define WM_MEDIATYPE_FIBER 0x01
537 #define WM_MEDIATYPE_COPPER 0x02
538 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
539 int sc_funcid; /* unit number of the chip (0 to 3) */
540 int sc_flags; /* flags; see below */
541 u_short sc_if_flags; /* last if_flags */
542 int sc_ec_capenable; /* last ec_capenable */
543 int sc_flowflags; /* 802.3x flow control flags */
544 uint16_t eee_lp_ability; /* EEE link partner's ability */
545 int sc_align_tweak;
546
547 void *sc_ihs[WM_MAX_NINTR]; /*
548 * interrupt cookie.
549 * - legacy and msi use sc_ihs[0] only
550 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
551 */
552 pci_intr_handle_t *sc_intrs; /*
553 * legacy and msi use sc_intrs[0] only
554 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
555 */
556 int sc_nintrs; /* number of interrupts */
557
558 int sc_link_intr_idx; /* index of MSI-X tables */
559
560 callout_t sc_tick_ch; /* tick callout */
561 bool sc_core_stopping;
562
563 int sc_nvm_ver_major;
564 int sc_nvm_ver_minor;
565 int sc_nvm_ver_build;
566 int sc_nvm_addrbits; /* NVM address bits */
567 unsigned int sc_nvm_wordsize; /* NVM word size */
568 int sc_ich8_flash_base;
569 int sc_ich8_flash_bank_size;
570 int sc_nvm_k1_enabled;
571
572 int sc_nqueues;
573 struct wm_queue *sc_queue;
574 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
575 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
576 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
577 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
578 struct workqueue *sc_queue_wq;
579 bool sc_txrx_use_workqueue;
580
581 int sc_affinity_offset;
582
583 #ifdef WM_EVENT_COUNTERS
584 /* Event counters. */
585 struct evcnt sc_ev_linkintr; /* Link interrupts */
586
587 /* >= WM_T_82542_2_1 */
588 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
589 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
590 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
591 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
592 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
593
594 struct evcnt sc_ev_crcerrs; /* CRC Error */
595 struct evcnt sc_ev_algnerrc; /* Alignment Error */
596 struct evcnt sc_ev_symerrc; /* Symbol Error */
597 struct evcnt sc_ev_rxerrc; /* Receive Error */
598 struct evcnt sc_ev_mpc; /* Missed Packets */
599 struct evcnt sc_ev_scc; /* Single Collision */
600 struct evcnt sc_ev_ecol; /* Excessive Collision */
601 struct evcnt sc_ev_mcc; /* Multiple Collision */
602 struct evcnt sc_ev_latecol; /* Late Collision */
603 struct evcnt sc_ev_colc; /* Collision */
604 struct evcnt sc_ev_dc; /* Defer */
605 struct evcnt sc_ev_tncrs; /* Tx-No CRS */
606 struct evcnt sc_ev_sec; /* Sequence Error */
607 struct evcnt sc_ev_cexterr; /* Carrier Extension Error */
608 struct evcnt sc_ev_rlec; /* Receive Length Error */
609 struct evcnt sc_ev_prc64; /* Packets Rx (64 bytes) */
610 struct evcnt sc_ev_prc127; /* Packets Rx (65-127 bytes) */
611 struct evcnt sc_ev_prc255; /* Packets Rx (128-255 bytes) */
612 struct evcnt sc_ev_prc511; /* Packets Rx (255-511 bytes) */
613 struct evcnt sc_ev_prc1023; /* Packets Rx (512-1023 bytes) */
614 struct evcnt sc_ev_prc1522; /* Packets Rx (1024-1522 bytes) */
615 struct evcnt sc_ev_gprc; /* Good Packets Rx */
616 struct evcnt sc_ev_bprc; /* Broadcast Packets Rx */
617 struct evcnt sc_ev_mprc; /* Multicast Packets Rx */
618 struct evcnt sc_ev_gptc; /* Good Packets Tx */
619 struct evcnt sc_ev_gorc; /* Good Octets Rx */
620 struct evcnt sc_ev_gotc; /* Good Octets Tx */
621 struct evcnt sc_ev_rnbc; /* Rx No Buffers */
622 struct evcnt sc_ev_ruc; /* Rx Undersize */
623 struct evcnt sc_ev_rfc; /* Rx Fragment */
624 struct evcnt sc_ev_roc; /* Rx Oversize */
625 struct evcnt sc_ev_rjc; /* Rx Jabber */
626 struct evcnt sc_ev_mgtprc; /* Management Packets RX */
627 struct evcnt sc_ev_mgtpdc; /* Management Packets Dropped */
628 struct evcnt sc_ev_mgtptc; /* Management Packets TX */
629 struct evcnt sc_ev_tor; /* Total Octets Rx */
630 struct evcnt sc_ev_tot; /* Total Octets Tx */
631 struct evcnt sc_ev_tpr; /* Total Packets Rx */
632 struct evcnt sc_ev_tpt; /* Total Packets Tx */
633 struct evcnt sc_ev_ptc64; /* Packets Tx (64 bytes) */
634 struct evcnt sc_ev_ptc127; /* Packets Tx (65-127 bytes) */
635 struct evcnt sc_ev_ptc255; /* Packets Tx (128-255 bytes) */
636 struct evcnt sc_ev_ptc511; /* Packets Tx (256-511 bytes) */
637 struct evcnt sc_ev_ptc1023; /* Packets Tx (512-1023 bytes) */
638 struct evcnt sc_ev_ptc1522; /* Packets Tx (1024-1522 Bytes) */
639 struct evcnt sc_ev_mptc; /* Multicast Packets Tx */
640 struct evcnt sc_ev_bptc; /* Broadcast Packets Tx Count */
641 struct evcnt sc_ev_tsctc; /* TCP Segmentation Context Tx */
642 struct evcnt sc_ev_tsctfc; /* TCP Segmentation Context Tx Fail */
643 struct evcnt sc_ev_iac; /* Interrupt Assertion */
644 struct evcnt sc_ev_icrxptc; /* Intr. Cause Rx Pkt Timer Expire */
645 struct evcnt sc_ev_icrxatc; /* Intr. Cause Rx Abs Timer Expire */
646 struct evcnt sc_ev_ictxptc; /* Intr. Cause Tx Pkt Timer Expire */
647 struct evcnt sc_ev_ictxact; /* Intr. Cause Tx Abs Timer Expire */
648 struct evcnt sc_ev_ictxqec; /* Intr. Cause Tx Queue Empty */
649 struct evcnt sc_ev_ictxqmtc; /* Intr. Cause Tx Queue Min Thresh */
650 struct evcnt sc_ev_icrxdmtc; /* Intr. Cause Rx Desc Min Thresh */
651 struct evcnt sc_ev_icrxoc; /* Intr. Cause Receiver Overrun */
652 struct evcnt sc_ev_b2ogprc; /* BMC2OS pkts received by host */
653 struct evcnt sc_ev_o2bspc; /* OS2BMC pkts transmitted by host */
654 struct evcnt sc_ev_b2ospc; /* BMC2OS pkts sent by BMC */
655 struct evcnt sc_ev_o2bgptc; /* OS2BMC pkts received by BMC */
656
657 #endif /* WM_EVENT_COUNTERS */
658
659 struct sysctllog *sc_sysctllog;
660
661 /* This variable are used only on the 82547. */
662 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
663
664 uint32_t sc_ctrl; /* prototype CTRL register */
665 #if 0
666 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
667 #endif
668 uint32_t sc_icr; /* prototype interrupt bits */
669 uint32_t sc_itr_init; /* prototype intr throttling reg */
670 uint32_t sc_tctl; /* prototype TCTL register */
671 uint32_t sc_rctl; /* prototype RCTL register */
672 uint32_t sc_txcw; /* prototype TXCW register */
673 uint32_t sc_tipg; /* prototype TIPG register */
674 uint32_t sc_fcrtl; /* prototype FCRTL register */
675 uint32_t sc_pba; /* prototype PBA register */
676
677 int sc_tbi_linkup; /* TBI link status */
678 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
679 int sc_tbi_serdes_ticks; /* tbi ticks */
680
681 int sc_mchash_type; /* multicast filter offset */
682
683 krndsource_t rnd_source; /* random source */
684
685 struct if_percpuq *sc_ipq; /* softint-based input queues */
686
687 kmutex_t *sc_core_lock; /* lock for softc operations */
688 kmutex_t *sc_ich_phymtx; /*
689 * 82574/82583/ICH/PCH specific PHY
690 * mutex. For 82574/82583, the mutex
691 * is used for both PHY and NVM.
692 */
693 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
694
695 struct wm_phyop phy;
696 struct wm_nvmop nvm;
697
698 struct workqueue *sc_reset_wq;
699 struct work sc_reset_work;
700 volatile unsigned sc_reset_pending;
701
702 bool sc_dying;
703
704 #ifdef WM_DEBUG
705 uint32_t sc_debug;
706 bool sc_trigger_reset;
707 #endif
708 };
709
710 #define WM_RXCHAIN_RESET(rxq) \
711 do { \
712 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
713 *(rxq)->rxq_tailp = NULL; \
714 (rxq)->rxq_len = 0; \
715 } while (/*CONSTCOND*/0)
716
717 #define WM_RXCHAIN_LINK(rxq, m) \
718 do { \
719 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
720 (rxq)->rxq_tailp = &(m)->m_next; \
721 } while (/*CONSTCOND*/0)
722
723 #ifdef WM_EVENT_COUNTERS
724 #ifdef __HAVE_ATOMIC64_LOADSTORE
725 #define WM_EVCNT_INCR(ev) \
726 atomic_store_relaxed(&((ev)->ev_count), \
727 atomic_load_relaxed(&(ev)->ev_count) + 1)
728 #define WM_EVCNT_ADD(ev, val) \
729 atomic_store_relaxed(&((ev)->ev_count), \
730 atomic_load_relaxed(&(ev)->ev_count) + (val))
731 #else
732 #define WM_EVCNT_INCR(ev) \
733 ((ev)->ev_count)++
734 #define WM_EVCNT_ADD(ev, val) \
735 (ev)->ev_count += (val)
736 #endif
737
738 #define WM_Q_EVCNT_INCR(qname, evname) \
739 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
740 #define WM_Q_EVCNT_ADD(qname, evname, val) \
741 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
742 #else /* !WM_EVENT_COUNTERS */
743 #define WM_EVCNT_INCR(ev) /* nothing */
744 #define WM_EVCNT_ADD(ev, val) /* nothing */
745
746 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
747 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
748 #endif /* !WM_EVENT_COUNTERS */
749
750 #define CSR_READ(sc, reg) \
751 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
752 #define CSR_WRITE(sc, reg, val) \
753 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
754 #define CSR_WRITE_FLUSH(sc) \
755 (void)CSR_READ((sc), WMREG_STATUS)
756
757 #define ICH8_FLASH_READ32(sc, reg) \
758 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
759 (reg) + sc->sc_flashreg_offset)
760 #define ICH8_FLASH_WRITE32(sc, reg, data) \
761 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
762 (reg) + sc->sc_flashreg_offset, (data))
763
764 #define ICH8_FLASH_READ16(sc, reg) \
765 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
766 (reg) + sc->sc_flashreg_offset)
767 #define ICH8_FLASH_WRITE16(sc, reg, data) \
768 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
769 (reg) + sc->sc_flashreg_offset, (data))
770
771 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
772 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
773
774 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
775 #define WM_CDTXADDR_HI(txq, x) \
776 (sizeof(bus_addr_t) == 8 ? \
777 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
778
779 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
780 #define WM_CDRXADDR_HI(rxq, x) \
781 (sizeof(bus_addr_t) == 8 ? \
782 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
783
784 /*
785 * Register read/write functions.
786 * Other than CSR_{READ|WRITE}().
787 */
788 #if 0
789 static inline uint32_t wm_io_read(struct wm_softc *, int);
790 #endif
791 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
792 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
793 uint32_t, uint32_t);
794 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
795
796 /*
797 * Descriptor sync/init functions.
798 */
799 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
800 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
801 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
802
803 /*
804 * Device driver interface functions and commonly used functions.
805 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
806 */
807 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
808 static int wm_match(device_t, cfdata_t, void *);
809 static void wm_attach(device_t, device_t, void *);
810 static int wm_detach(device_t, int);
811 static bool wm_suspend(device_t, const pmf_qual_t *);
812 static bool wm_resume(device_t, const pmf_qual_t *);
813 static bool wm_watchdog(struct ifnet *);
814 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
815 uint16_t *);
816 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
817 uint16_t *);
818 static void wm_tick(void *);
819 static int wm_ifflags_cb(struct ethercom *);
820 static int wm_ioctl(struct ifnet *, u_long, void *);
821 /* MAC address related */
822 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
823 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
824 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
825 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
826 static int wm_rar_count(struct wm_softc *);
827 static void wm_set_filter(struct wm_softc *);
828 /* Reset and init related */
829 static void wm_set_vlan(struct wm_softc *);
830 static void wm_set_pcie_completion_timeout(struct wm_softc *);
831 static void wm_get_auto_rd_done(struct wm_softc *);
832 static void wm_lan_init_done(struct wm_softc *);
833 static void wm_get_cfg_done(struct wm_softc *);
834 static int wm_phy_post_reset(struct wm_softc *);
835 static int wm_write_smbus_addr(struct wm_softc *);
836 static int wm_init_lcd_from_nvm(struct wm_softc *);
837 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
838 static void wm_initialize_hardware_bits(struct wm_softc *);
839 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
840 static int wm_reset_phy(struct wm_softc *);
841 static void wm_flush_desc_rings(struct wm_softc *);
842 static void wm_reset(struct wm_softc *);
843 static int wm_add_rxbuf(struct wm_rxqueue *, int);
844 static void wm_rxdrain(struct wm_rxqueue *);
845 static void wm_init_rss(struct wm_softc *);
846 static void wm_adjust_qnum(struct wm_softc *, int);
847 static inline bool wm_is_using_msix(struct wm_softc *);
848 static inline bool wm_is_using_multiqueue(struct wm_softc *);
849 static int wm_softint_establish_queue(struct wm_softc *, int, int);
850 static int wm_setup_legacy(struct wm_softc *);
851 static int wm_setup_msix(struct wm_softc *);
852 static int wm_init(struct ifnet *);
853 static int wm_init_locked(struct ifnet *);
854 static void wm_init_sysctls(struct wm_softc *);
855 static void wm_unset_stopping_flags(struct wm_softc *);
856 static void wm_set_stopping_flags(struct wm_softc *);
857 static void wm_stop(struct ifnet *, int);
858 static void wm_stop_locked(struct ifnet *, bool, bool);
859 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
860 static void wm_82547_txfifo_stall(void *);
861 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
862 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
863 /* DMA related */
864 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
865 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
866 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
867 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
868 struct wm_txqueue *);
869 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
870 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
871 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
872 struct wm_rxqueue *);
873 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
874 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
875 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
876 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
877 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
878 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
879 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
880 struct wm_txqueue *);
881 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
882 struct wm_rxqueue *);
883 static int wm_alloc_txrx_queues(struct wm_softc *);
884 static void wm_free_txrx_queues(struct wm_softc *);
885 static int wm_init_txrx_queues(struct wm_softc *);
886 /* Start */
887 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
888 struct wm_txsoft *, uint32_t *, uint8_t *);
889 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
890 static void wm_start(struct ifnet *);
891 static void wm_start_locked(struct ifnet *);
892 static int wm_transmit(struct ifnet *, struct mbuf *);
893 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
894 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
895 bool);
896 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
897 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
898 static void wm_nq_start(struct ifnet *);
899 static void wm_nq_start_locked(struct ifnet *);
900 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
901 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
902 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
903 bool);
904 static void wm_deferred_start_locked(struct wm_txqueue *);
905 static void wm_handle_queue(void *);
906 static void wm_handle_queue_work(struct work *, void *);
907 static void wm_handle_reset_work(struct work *, void *);
908 /* Interrupt */
909 static bool wm_txeof(struct wm_txqueue *, u_int);
910 static bool wm_rxeof(struct wm_rxqueue *, u_int);
911 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
912 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
913 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
914 static void wm_linkintr(struct wm_softc *, uint32_t);
915 static int wm_intr_legacy(void *);
916 static inline void wm_txrxintr_disable(struct wm_queue *);
917 static inline void wm_txrxintr_enable(struct wm_queue *);
918 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
919 static int wm_txrxintr_msix(void *);
920 static int wm_linkintr_msix(void *);
921
922 /*
923 * Media related.
924 * GMII, SGMII, TBI, SERDES and SFP.
925 */
926 /* Common */
927 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
928 /* GMII related */
929 static void wm_gmii_reset(struct wm_softc *);
930 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
931 static int wm_get_phy_id_82575(struct wm_softc *);
932 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
933 static int wm_gmii_mediachange(struct ifnet *);
934 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
935 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
936 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
937 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
938 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
939 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
940 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
941 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
942 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
943 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
944 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
945 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
946 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
947 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
948 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
949 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
950 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
951 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
952 bool);
953 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
954 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
955 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
956 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
957 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
958 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
959 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
960 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
961 static void wm_gmii_statchg(struct ifnet *);
962 /*
963 * kumeran related (80003, ICH* and PCH*).
964 * These functions are not for accessing MII registers but for accessing
965 * kumeran specific registers.
966 */
967 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
968 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
969 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
970 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
971 /* EMI register related */
972 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
973 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
974 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
975 /* SGMII */
976 static bool wm_sgmii_uses_mdio(struct wm_softc *);
977 static void wm_sgmii_sfp_preconfig(struct wm_softc *);
978 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
979 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
980 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
981 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
982 /* TBI related */
983 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
984 static void wm_tbi_mediainit(struct wm_softc *);
985 static int wm_tbi_mediachange(struct ifnet *);
986 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
987 static int wm_check_for_link(struct wm_softc *);
988 static void wm_tbi_tick(struct wm_softc *);
989 /* SERDES related */
990 static void wm_serdes_power_up_link_82575(struct wm_softc *);
991 static int wm_serdes_mediachange(struct ifnet *);
992 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
993 static void wm_serdes_tick(struct wm_softc *);
994 /* SFP related */
995 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
996 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
997
998 /*
999 * NVM related.
1000 * Microwire, SPI (w/wo EERD) and Flash.
1001 */
1002 /* Misc functions */
1003 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1004 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1005 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1006 /* Microwire */
1007 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1008 /* SPI */
1009 static int wm_nvm_ready_spi(struct wm_softc *);
1010 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1011 /* Using with EERD */
1012 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
1013 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1014 /* Flash */
1015 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1016 unsigned int *);
1017 static int32_t wm_ich8_cycle_init(struct wm_softc *);
1018 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1019 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1020 uint32_t *);
1021 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1022 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1023 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1024 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1025 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1026 /* iNVM */
1027 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1028 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1029 /* Lock, detecting NVM type, validate checksum and read */
1030 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
1031 static int wm_nvm_flash_presence_i210(struct wm_softc *);
1032 static int wm_nvm_validate_checksum(struct wm_softc *);
1033 static void wm_nvm_version_invm(struct wm_softc *);
1034 static void wm_nvm_version(struct wm_softc *);
1035 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1036
1037 /*
1038 * Hardware semaphores.
1039 * Very complexed...
1040 */
1041 static int wm_get_null(struct wm_softc *);
1042 static void wm_put_null(struct wm_softc *);
1043 static int wm_get_eecd(struct wm_softc *);
1044 static void wm_put_eecd(struct wm_softc *);
1045 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1046 static void wm_put_swsm_semaphore(struct wm_softc *);
1047 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1048 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1049 static int wm_get_nvm_80003(struct wm_softc *);
1050 static void wm_put_nvm_80003(struct wm_softc *);
1051 static int wm_get_nvm_82571(struct wm_softc *);
1052 static void wm_put_nvm_82571(struct wm_softc *);
1053 static int wm_get_phy_82575(struct wm_softc *);
1054 static void wm_put_phy_82575(struct wm_softc *);
1055 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1056 static void wm_put_swfwhw_semaphore(struct wm_softc *);
1057 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1058 static void wm_put_swflag_ich8lan(struct wm_softc *);
1059 static int wm_get_nvm_ich8lan(struct wm_softc *);
1060 static void wm_put_nvm_ich8lan(struct wm_softc *);
1061 static int wm_get_hw_semaphore_82573(struct wm_softc *);
1062 static void wm_put_hw_semaphore_82573(struct wm_softc *);
1063
1064 /*
1065 * Management mode and power management related subroutines.
1066 * BMC, AMT, suspend/resume and EEE.
1067 */
1068 #if 0
1069 static int wm_check_mng_mode(struct wm_softc *);
1070 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1071 static int wm_check_mng_mode_82574(struct wm_softc *);
1072 static int wm_check_mng_mode_generic(struct wm_softc *);
1073 #endif
1074 static int wm_enable_mng_pass_thru(struct wm_softc *);
1075 static bool wm_phy_resetisblocked(struct wm_softc *);
1076 static void wm_get_hw_control(struct wm_softc *);
1077 static void wm_release_hw_control(struct wm_softc *);
1078 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1079 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1080 static void wm_init_manageability(struct wm_softc *);
1081 static void wm_release_manageability(struct wm_softc *);
1082 static void wm_get_wakeup(struct wm_softc *);
1083 static int wm_ulp_disable(struct wm_softc *);
1084 static int wm_enable_phy_wakeup(struct wm_softc *);
1085 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1086 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1087 static int wm_resume_workarounds_pchlan(struct wm_softc *);
1088 static void wm_enable_wakeup(struct wm_softc *);
1089 static void wm_disable_aspm(struct wm_softc *);
1090 /* LPLU (Low Power Link Up) */
1091 static void wm_lplu_d0_disable(struct wm_softc *);
1092 /* EEE */
1093 static int wm_set_eee_i350(struct wm_softc *);
1094 static int wm_set_eee_pchlan(struct wm_softc *);
1095 static int wm_set_eee(struct wm_softc *);
1096
1097 /*
1098 * Workarounds (mainly PHY related).
1099 * Basically, PHY's workarounds are in the PHY drivers.
1100 */
1101 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1102 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1103 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1104 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1105 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1106 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1107 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1108 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1109 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1110 static int wm_k1_workaround_lv(struct wm_softc *);
1111 static int wm_link_stall_workaround_hv(struct wm_softc *);
1112 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1113 static int wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1114 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1115 static void wm_reset_init_script_82575(struct wm_softc *);
1116 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1117 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1118 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1119 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1120 static int wm_pll_workaround_i210(struct wm_softc *);
1121 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1122 static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1123 static void wm_set_linkdown_discard(struct wm_softc *);
1124 static void wm_clear_linkdown_discard(struct wm_softc *);
1125
1126 static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1127 static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1128 #ifdef WM_DEBUG
1129 static int wm_sysctl_debug(SYSCTLFN_PROTO);
1130 #endif
1131
1132 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1133 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1134
1135 /*
1136 * Devices supported by this driver.
1137 */
1138 static const struct wm_product {
1139 pci_vendor_id_t wmp_vendor;
1140 pci_product_id_t wmp_product;
1141 const char *wmp_name;
1142 wm_chip_type wmp_type;
1143 uint32_t wmp_flags;
1144 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1145 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1146 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1147 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1148 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1149 } wm_products[] = {
1150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1151 "Intel i82542 1000BASE-X Ethernet",
1152 WM_T_82542_2_1, WMP_F_FIBER },
1153
1154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1155 "Intel i82543GC 1000BASE-X Ethernet",
1156 WM_T_82543, WMP_F_FIBER },
1157
1158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1159 "Intel i82543GC 1000BASE-T Ethernet",
1160 WM_T_82543, WMP_F_COPPER },
1161
1162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1163 "Intel i82544EI 1000BASE-T Ethernet",
1164 WM_T_82544, WMP_F_COPPER },
1165
1166 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1167 "Intel i82544EI 1000BASE-X Ethernet",
1168 WM_T_82544, WMP_F_FIBER },
1169
1170 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1171 "Intel i82544GC 1000BASE-T Ethernet",
1172 WM_T_82544, WMP_F_COPPER },
1173
1174 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1175 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1176 WM_T_82544, WMP_F_COPPER },
1177
1178 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1179 "Intel i82540EM 1000BASE-T Ethernet",
1180 WM_T_82540, WMP_F_COPPER },
1181
1182 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1183 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1184 WM_T_82540, WMP_F_COPPER },
1185
1186 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1187 "Intel i82540EP 1000BASE-T Ethernet",
1188 WM_T_82540, WMP_F_COPPER },
1189
1190 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1191 "Intel i82540EP 1000BASE-T Ethernet",
1192 WM_T_82540, WMP_F_COPPER },
1193
1194 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1195 "Intel i82540EP 1000BASE-T Ethernet",
1196 WM_T_82540, WMP_F_COPPER },
1197
1198 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1199 "Intel i82545EM 1000BASE-T Ethernet",
1200 WM_T_82545, WMP_F_COPPER },
1201
1202 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1203 "Intel i82545GM 1000BASE-T Ethernet",
1204 WM_T_82545_3, WMP_F_COPPER },
1205
1206 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1207 "Intel i82545GM 1000BASE-X Ethernet",
1208 WM_T_82545_3, WMP_F_FIBER },
1209
1210 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1211 "Intel i82545GM Gigabit Ethernet (SERDES)",
1212 WM_T_82545_3, WMP_F_SERDES },
1213
1214 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1215 "Intel i82546EB 1000BASE-T Ethernet",
1216 WM_T_82546, WMP_F_COPPER },
1217
1218 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1219 "Intel i82546EB 1000BASE-T Ethernet",
1220 WM_T_82546, WMP_F_COPPER },
1221
1222 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1223 "Intel i82545EM 1000BASE-X Ethernet",
1224 WM_T_82545, WMP_F_FIBER },
1225
1226 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1227 "Intel i82546EB 1000BASE-X Ethernet",
1228 WM_T_82546, WMP_F_FIBER },
1229
1230 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1231 "Intel i82546GB 1000BASE-T Ethernet",
1232 WM_T_82546_3, WMP_F_COPPER },
1233
1234 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1235 "Intel i82546GB 1000BASE-X Ethernet",
1236 WM_T_82546_3, WMP_F_FIBER },
1237
1238 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1239 "Intel i82546GB Gigabit Ethernet (SERDES)",
1240 WM_T_82546_3, WMP_F_SERDES },
1241
1242 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1243 "i82546GB quad-port Gigabit Ethernet",
1244 WM_T_82546_3, WMP_F_COPPER },
1245
1246 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1247 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1248 WM_T_82546_3, WMP_F_COPPER },
1249
1250 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1251 "Intel PRO/1000MT (82546GB)",
1252 WM_T_82546_3, WMP_F_COPPER },
1253
1254 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1255 "Intel i82541EI 1000BASE-T Ethernet",
1256 WM_T_82541, WMP_F_COPPER },
1257
1258 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1259 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1260 WM_T_82541, WMP_F_COPPER },
1261
1262 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1263 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1264 WM_T_82541, WMP_F_COPPER },
1265
1266 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1267 "Intel i82541ER 1000BASE-T Ethernet",
1268 WM_T_82541_2, WMP_F_COPPER },
1269
1270 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1271 "Intel i82541GI 1000BASE-T Ethernet",
1272 WM_T_82541_2, WMP_F_COPPER },
1273
1274 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1275 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1276 WM_T_82541_2, WMP_F_COPPER },
1277
1278 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1279 "Intel i82541PI 1000BASE-T Ethernet",
1280 WM_T_82541_2, WMP_F_COPPER },
1281
1282 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1283 "Intel i82547EI 1000BASE-T Ethernet",
1284 WM_T_82547, WMP_F_COPPER },
1285
1286 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1287 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1288 WM_T_82547, WMP_F_COPPER },
1289
1290 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1291 "Intel i82547GI 1000BASE-T Ethernet",
1292 WM_T_82547_2, WMP_F_COPPER },
1293
1294 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1295 "Intel PRO/1000 PT (82571EB)",
1296 WM_T_82571, WMP_F_COPPER },
1297
1298 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1299 "Intel PRO/1000 PF (82571EB)",
1300 WM_T_82571, WMP_F_FIBER },
1301
1302 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1303 "Intel PRO/1000 PB (82571EB)",
1304 WM_T_82571, WMP_F_SERDES },
1305
1306 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1307 "Intel PRO/1000 QT (82571EB)",
1308 WM_T_82571, WMP_F_COPPER },
1309
1310 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1311 "Intel PRO/1000 PT Quad Port Server Adapter",
1312 WM_T_82571, WMP_F_COPPER },
1313
1314 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1315 "Intel Gigabit PT Quad Port Server ExpressModule",
1316 WM_T_82571, WMP_F_COPPER },
1317
1318 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1319 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1320 WM_T_82571, WMP_F_SERDES },
1321
1322 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1323 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1324 WM_T_82571, WMP_F_SERDES },
1325
1326 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1327 "Intel 82571EB Quad 1000baseX Ethernet",
1328 WM_T_82571, WMP_F_FIBER },
1329
1330 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1331 "Intel i82572EI 1000baseT Ethernet",
1332 WM_T_82572, WMP_F_COPPER },
1333
1334 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1335 "Intel i82572EI 1000baseX Ethernet",
1336 WM_T_82572, WMP_F_FIBER },
1337
1338 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1339 "Intel i82572EI Gigabit Ethernet (SERDES)",
1340 WM_T_82572, WMP_F_SERDES },
1341
1342 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1343 "Intel i82572EI 1000baseT Ethernet",
1344 WM_T_82572, WMP_F_COPPER },
1345
1346 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1347 "Intel i82573E",
1348 WM_T_82573, WMP_F_COPPER },
1349
1350 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1351 "Intel i82573E IAMT",
1352 WM_T_82573, WMP_F_COPPER },
1353
1354 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1355 "Intel i82573L Gigabit Ethernet",
1356 WM_T_82573, WMP_F_COPPER },
1357
1358 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1359 "Intel i82574L",
1360 WM_T_82574, WMP_F_COPPER },
1361
1362 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1363 "Intel i82574L",
1364 WM_T_82574, WMP_F_COPPER },
1365
1366 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1367 "Intel i82583V",
1368 WM_T_82583, WMP_F_COPPER },
1369
1370 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1371 "i80003 dual 1000baseT Ethernet",
1372 WM_T_80003, WMP_F_COPPER },
1373
1374 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1375 "i80003 dual 1000baseX Ethernet",
1376 WM_T_80003, WMP_F_COPPER },
1377
1378 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1379 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1380 WM_T_80003, WMP_F_SERDES },
1381
1382 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1383 "Intel i80003 1000baseT Ethernet",
1384 WM_T_80003, WMP_F_COPPER },
1385
1386 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1387 "Intel i80003 Gigabit Ethernet (SERDES)",
1388 WM_T_80003, WMP_F_SERDES },
1389
1390 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1391 "Intel i82801H (M_AMT) LAN Controller",
1392 WM_T_ICH8, WMP_F_COPPER },
1393 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1394 "Intel i82801H (AMT) LAN Controller",
1395 WM_T_ICH8, WMP_F_COPPER },
1396 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1397 "Intel i82801H LAN Controller",
1398 WM_T_ICH8, WMP_F_COPPER },
1399 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1400 "Intel i82801H (IFE) 10/100 LAN Controller",
1401 WM_T_ICH8, WMP_F_COPPER },
1402 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1403 "Intel i82801H (M) LAN Controller",
1404 WM_T_ICH8, WMP_F_COPPER },
1405 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1406 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1407 WM_T_ICH8, WMP_F_COPPER },
1408 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1409 "Intel i82801H IFE (G) 10/100 LAN Controller",
1410 WM_T_ICH8, WMP_F_COPPER },
1411 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1412 "82567V-3 LAN Controller",
1413 WM_T_ICH8, WMP_F_COPPER },
1414 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1415 "82801I (AMT) LAN Controller",
1416 WM_T_ICH9, WMP_F_COPPER },
1417 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1418 "82801I 10/100 LAN Controller",
1419 WM_T_ICH9, WMP_F_COPPER },
1420 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1421 "82801I (G) 10/100 LAN Controller",
1422 WM_T_ICH9, WMP_F_COPPER },
1423 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1424 "82801I (GT) 10/100 LAN Controller",
1425 WM_T_ICH9, WMP_F_COPPER },
1426 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1427 "82801I (C) LAN Controller",
1428 WM_T_ICH9, WMP_F_COPPER },
1429 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1430 "82801I mobile LAN Controller",
1431 WM_T_ICH9, WMP_F_COPPER },
1432 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1433 "82801I mobile (V) LAN Controller",
1434 WM_T_ICH9, WMP_F_COPPER },
1435 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1436 "82801I mobile (AMT) LAN Controller",
1437 WM_T_ICH9, WMP_F_COPPER },
1438 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1439 "82567LM-4 LAN Controller",
1440 WM_T_ICH9, WMP_F_COPPER },
1441 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1442 "82567LM-2 LAN Controller",
1443 WM_T_ICH10, WMP_F_COPPER },
1444 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1445 "82567LF-2 LAN Controller",
1446 WM_T_ICH10, WMP_F_COPPER },
1447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1448 "82567LM-3 LAN Controller",
1449 WM_T_ICH10, WMP_F_COPPER },
1450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1451 "82567LF-3 LAN Controller",
1452 WM_T_ICH10, WMP_F_COPPER },
1453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1454 "82567V-2 LAN Controller",
1455 WM_T_ICH10, WMP_F_COPPER },
1456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1457 "82567V-3? LAN Controller",
1458 WM_T_ICH10, WMP_F_COPPER },
1459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1460 "HANKSVILLE LAN Controller",
1461 WM_T_ICH10, WMP_F_COPPER },
1462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1463 "PCH LAN (82577LM) Controller",
1464 WM_T_PCH, WMP_F_COPPER },
1465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1466 "PCH LAN (82577LC) Controller",
1467 WM_T_PCH, WMP_F_COPPER },
1468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1469 "PCH LAN (82578DM) Controller",
1470 WM_T_PCH, WMP_F_COPPER },
1471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1472 "PCH LAN (82578DC) Controller",
1473 WM_T_PCH, WMP_F_COPPER },
1474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1475 "PCH2 LAN (82579LM) Controller",
1476 WM_T_PCH2, WMP_F_COPPER },
1477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1478 "PCH2 LAN (82579V) Controller",
1479 WM_T_PCH2, WMP_F_COPPER },
1480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1481 "82575EB dual-1000baseT Ethernet",
1482 WM_T_82575, WMP_F_COPPER },
1483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1484 "82575EB dual-1000baseX Ethernet (SERDES)",
1485 WM_T_82575, WMP_F_SERDES },
1486 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1487 "82575GB quad-1000baseT Ethernet",
1488 WM_T_82575, WMP_F_COPPER },
1489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1490 "82575GB quad-1000baseT Ethernet (PM)",
1491 WM_T_82575, WMP_F_COPPER },
1492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1493 "82576 1000BaseT Ethernet",
1494 WM_T_82576, WMP_F_COPPER },
1495 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1496 "82576 1000BaseX Ethernet",
1497 WM_T_82576, WMP_F_FIBER },
1498
1499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1500 "82576 gigabit Ethernet (SERDES)",
1501 WM_T_82576, WMP_F_SERDES },
1502
1503 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1504 "82576 quad-1000BaseT Ethernet",
1505 WM_T_82576, WMP_F_COPPER },
1506
1507 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1508 "82576 Gigabit ET2 Quad Port Server Adapter",
1509 WM_T_82576, WMP_F_COPPER },
1510
1511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1512 "82576 gigabit Ethernet",
1513 WM_T_82576, WMP_F_COPPER },
1514
1515 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1516 "82576 gigabit Ethernet (SERDES)",
1517 WM_T_82576, WMP_F_SERDES },
1518 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1519 "82576 quad-gigabit Ethernet (SERDES)",
1520 WM_T_82576, WMP_F_SERDES },
1521
1522 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1523 "82580 1000BaseT Ethernet",
1524 WM_T_82580, WMP_F_COPPER },
1525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1526 "82580 1000BaseX Ethernet",
1527 WM_T_82580, WMP_F_FIBER },
1528
1529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1530 "82580 1000BaseT Ethernet (SERDES)",
1531 WM_T_82580, WMP_F_SERDES },
1532
1533 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1534 "82580 gigabit Ethernet (SGMII)",
1535 WM_T_82580, WMP_F_COPPER },
1536 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1537 "82580 dual-1000BaseT Ethernet",
1538 WM_T_82580, WMP_F_COPPER },
1539
1540 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1541 "82580 quad-1000BaseX Ethernet",
1542 WM_T_82580, WMP_F_FIBER },
1543
1544 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1545 "DH89XXCC Gigabit Ethernet (SGMII)",
1546 WM_T_82580, WMP_F_COPPER },
1547
1548 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1549 "DH89XXCC Gigabit Ethernet (SERDES)",
1550 WM_T_82580, WMP_F_SERDES },
1551
1552 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1553 "DH89XXCC 1000BASE-KX Ethernet",
1554 WM_T_82580, WMP_F_SERDES },
1555
1556 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1557 "DH89XXCC Gigabit Ethernet (SFP)",
1558 WM_T_82580, WMP_F_SERDES },
1559
1560 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1561 "I350 Gigabit Network Connection",
1562 WM_T_I350, WMP_F_COPPER },
1563
1564 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1565 "I350 Gigabit Fiber Network Connection",
1566 WM_T_I350, WMP_F_FIBER },
1567
1568 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1569 "I350 Gigabit Backplane Connection",
1570 WM_T_I350, WMP_F_SERDES },
1571
1572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1573 "I350 Quad Port Gigabit Ethernet",
1574 WM_T_I350, WMP_F_SERDES },
1575
1576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1577 "I350 Gigabit Connection",
1578 WM_T_I350, WMP_F_COPPER },
1579
1580 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1581 "I354 Gigabit Ethernet (KX)",
1582 WM_T_I354, WMP_F_SERDES },
1583
1584 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1585 "I354 Gigabit Ethernet (SGMII)",
1586 WM_T_I354, WMP_F_COPPER },
1587
1588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1589 "I354 Gigabit Ethernet (2.5G)",
1590 WM_T_I354, WMP_F_COPPER },
1591
1592 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1593 "I210-T1 Ethernet Server Adapter",
1594 WM_T_I210, WMP_F_COPPER },
1595
1596 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1597 "I210 Ethernet (Copper OEM)",
1598 WM_T_I210, WMP_F_COPPER },
1599
1600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1601 "I210 Ethernet (Copper IT)",
1602 WM_T_I210, WMP_F_COPPER },
1603
1604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1605 "I210 Ethernet (Copper, FLASH less)",
1606 WM_T_I210, WMP_F_COPPER },
1607
1608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1609 "I210 Gigabit Ethernet (Fiber)",
1610 WM_T_I210, WMP_F_FIBER },
1611
1612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1613 "I210 Gigabit Ethernet (SERDES)",
1614 WM_T_I210, WMP_F_SERDES },
1615
1616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1617 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1618 WM_T_I210, WMP_F_SERDES },
1619
1620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1621 "I210 Gigabit Ethernet (SGMII)",
1622 WM_T_I210, WMP_F_COPPER },
1623
1624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1625 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1626 WM_T_I210, WMP_F_COPPER },
1627
1628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1629 "I211 Ethernet (COPPER)",
1630 WM_T_I211, WMP_F_COPPER },
1631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1632 "I217 V Ethernet Connection",
1633 WM_T_PCH_LPT, WMP_F_COPPER },
1634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1635 "I217 LM Ethernet Connection",
1636 WM_T_PCH_LPT, WMP_F_COPPER },
1637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1638 "I218 V Ethernet Connection",
1639 WM_T_PCH_LPT, WMP_F_COPPER },
1640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1641 "I218 V Ethernet Connection",
1642 WM_T_PCH_LPT, WMP_F_COPPER },
1643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1644 "I218 V Ethernet Connection",
1645 WM_T_PCH_LPT, WMP_F_COPPER },
1646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1647 "I218 LM Ethernet Connection",
1648 WM_T_PCH_LPT, WMP_F_COPPER },
1649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1650 "I218 LM Ethernet Connection",
1651 WM_T_PCH_LPT, WMP_F_COPPER },
1652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1653 "I218 LM Ethernet Connection",
1654 WM_T_PCH_LPT, WMP_F_COPPER },
1655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1656 "I219 LM Ethernet Connection",
1657 WM_T_PCH_SPT, WMP_F_COPPER },
1658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1659 "I219 LM (2) Ethernet Connection",
1660 WM_T_PCH_SPT, WMP_F_COPPER },
1661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1662 "I219 LM (3) Ethernet Connection",
1663 WM_T_PCH_SPT, WMP_F_COPPER },
1664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1665 "I219 LM (4) Ethernet Connection",
1666 WM_T_PCH_SPT, WMP_F_COPPER },
1667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1668 "I219 LM (5) Ethernet Connection",
1669 WM_T_PCH_SPT, WMP_F_COPPER },
1670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1671 "I219 LM (6) Ethernet Connection",
1672 WM_T_PCH_CNP, WMP_F_COPPER },
1673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1674 "I219 LM (7) Ethernet Connection",
1675 WM_T_PCH_CNP, WMP_F_COPPER },
1676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1677 "I219 LM (8) Ethernet Connection",
1678 WM_T_PCH_CNP, WMP_F_COPPER },
1679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1680 "I219 LM (9) Ethernet Connection",
1681 WM_T_PCH_CNP, WMP_F_COPPER },
1682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1683 "I219 LM (10) Ethernet Connection",
1684 WM_T_PCH_CNP, WMP_F_COPPER },
1685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1686 "I219 LM (11) Ethernet Connection",
1687 WM_T_PCH_CNP, WMP_F_COPPER },
1688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1689 "I219 LM (12) Ethernet Connection",
1690 WM_T_PCH_SPT, WMP_F_COPPER },
1691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1692 "I219 LM (13) Ethernet Connection",
1693 WM_T_PCH_CNP, WMP_F_COPPER },
1694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1695 "I219 LM (14) Ethernet Connection",
1696 WM_T_PCH_CNP, WMP_F_COPPER },
1697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1698 "I219 LM (15) Ethernet Connection",
1699 WM_T_PCH_CNP, WMP_F_COPPER },
1700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16,
1701 "I219 LM (16) Ethernet Connection",
1702 WM_T_PCH_CNP, WMP_F_COPPER },
1703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17,
1704 "I219 LM (17) Ethernet Connection",
1705 WM_T_PCH_CNP, WMP_F_COPPER },
1706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18,
1707 "I219 LM (18) Ethernet Connection",
1708 WM_T_PCH_CNP, WMP_F_COPPER },
1709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19,
1710 "I219 LM (19) Ethernet Connection",
1711 WM_T_PCH_CNP, WMP_F_COPPER },
1712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1713 "I219 V Ethernet Connection",
1714 WM_T_PCH_SPT, WMP_F_COPPER },
1715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1716 "I219 V (2) Ethernet Connection",
1717 WM_T_PCH_SPT, WMP_F_COPPER },
1718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1719 "I219 V (4) Ethernet Connection",
1720 WM_T_PCH_SPT, WMP_F_COPPER },
1721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1722 "I219 V (5) Ethernet Connection",
1723 WM_T_PCH_SPT, WMP_F_COPPER },
1724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1725 "I219 V (6) Ethernet Connection",
1726 WM_T_PCH_CNP, WMP_F_COPPER },
1727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1728 "I219 V (7) Ethernet Connection",
1729 WM_T_PCH_CNP, WMP_F_COPPER },
1730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1731 "I219 V (8) Ethernet Connection",
1732 WM_T_PCH_CNP, WMP_F_COPPER },
1733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1734 "I219 V (9) Ethernet Connection",
1735 WM_T_PCH_CNP, WMP_F_COPPER },
1736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1737 "I219 V (10) Ethernet Connection",
1738 WM_T_PCH_CNP, WMP_F_COPPER },
1739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1740 "I219 V (11) Ethernet Connection",
1741 WM_T_PCH_CNP, WMP_F_COPPER },
1742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1743 "I219 V (12) Ethernet Connection",
1744 WM_T_PCH_SPT, WMP_F_COPPER },
1745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1746 "I219 V (13) Ethernet Connection",
1747 WM_T_PCH_CNP, WMP_F_COPPER },
1748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1749 "I219 V (14) Ethernet Connection",
1750 WM_T_PCH_CNP, WMP_F_COPPER },
1751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15,
1752 "I219 V (15) Ethernet Connection",
1753 WM_T_PCH_CNP, WMP_F_COPPER },
1754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16,
1755 "I219 V (16) Ethernet Connection",
1756 WM_T_PCH_CNP, WMP_F_COPPER },
1757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17,
1758 "I219 V (17) Ethernet Connection",
1759 WM_T_PCH_CNP, WMP_F_COPPER },
1760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18,
1761 "I219 V (18) Ethernet Connection",
1762 WM_T_PCH_CNP, WMP_F_COPPER },
1763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19,
1764 "I219 V (19) Ethernet Connection",
1765 WM_T_PCH_CNP, WMP_F_COPPER },
1766 { 0, 0,
1767 NULL,
1768 0, 0 },
1769 };
1770
1771 /*
1772 * Register read/write functions.
1773 * Other than CSR_{READ|WRITE}().
1774 */
1775
1776 #if 0 /* Not currently used */
1777 static inline uint32_t
1778 wm_io_read(struct wm_softc *sc, int reg)
1779 {
1780
1781 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1782 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1783 }
1784 #endif
1785
1786 static inline void
1787 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1788 {
1789
1790 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1791 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1792 }
1793
1794 static inline void
1795 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1796 uint32_t data)
1797 {
1798 uint32_t regval;
1799 int i;
1800
1801 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1802
1803 CSR_WRITE(sc, reg, regval);
1804
1805 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1806 delay(5);
1807 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1808 break;
1809 }
1810 if (i == SCTL_CTL_POLL_TIMEOUT) {
1811 aprint_error("%s: WARNING:"
1812 " i82575 reg 0x%08x setup did not indicate ready\n",
1813 device_xname(sc->sc_dev), reg);
1814 }
1815 }
1816
1817 static inline void
1818 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1819 {
1820 wa->wa_low = htole32(BUS_ADDR_LO32(v));
1821 wa->wa_high = htole32(BUS_ADDR_HI32(v));
1822 }
1823
1824 /*
1825 * Descriptor sync/init functions.
1826 */
1827 static inline void
1828 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1829 {
1830 struct wm_softc *sc = txq->txq_sc;
1831
1832 /* If it will wrap around, sync to the end of the ring. */
1833 if ((start + num) > WM_NTXDESC(txq)) {
1834 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1835 WM_CDTXOFF(txq, start), txq->txq_descsize *
1836 (WM_NTXDESC(txq) - start), ops);
1837 num -= (WM_NTXDESC(txq) - start);
1838 start = 0;
1839 }
1840
1841 /* Now sync whatever is left. */
1842 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1843 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1844 }
1845
1846 static inline void
1847 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1848 {
1849 struct wm_softc *sc = rxq->rxq_sc;
1850
1851 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1852 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1853 }
1854
1855 static inline void
1856 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1857 {
1858 struct wm_softc *sc = rxq->rxq_sc;
1859 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1860 struct mbuf *m = rxs->rxs_mbuf;
1861
1862 /*
1863 * Note: We scoot the packet forward 2 bytes in the buffer
1864 * so that the payload after the Ethernet header is aligned
1865 * to a 4-byte boundary.
1866
1867 * XXX BRAINDAMAGE ALERT!
1868 * The stupid chip uses the same size for every buffer, which
1869 * is set in the Receive Control register. We are using the 2K
1870 * size option, but what we REALLY want is (2K - 2)! For this
1871 * reason, we can't "scoot" packets longer than the standard
1872 * Ethernet MTU. On strict-alignment platforms, if the total
1873 * size exceeds (2K - 2) we set align_tweak to 0 and let
1874 * the upper layer copy the headers.
1875 */
1876 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1877
1878 if (sc->sc_type == WM_T_82574) {
1879 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1880 rxd->erx_data.erxd_addr =
1881 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1882 rxd->erx_data.erxd_dd = 0;
1883 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1884 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1885
1886 rxd->nqrx_data.nrxd_paddr =
1887 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1888 /* Currently, split header is not supported. */
1889 rxd->nqrx_data.nrxd_haddr = 0;
1890 } else {
1891 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1892
1893 wm_set_dma_addr(&rxd->wrx_addr,
1894 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1895 rxd->wrx_len = 0;
1896 rxd->wrx_cksum = 0;
1897 rxd->wrx_status = 0;
1898 rxd->wrx_errors = 0;
1899 rxd->wrx_special = 0;
1900 }
1901 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1902
1903 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1904 }
1905
1906 /*
1907 * Device driver interface functions and commonly used functions.
1908 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1909 */
1910
1911 /* Lookup supported device table */
1912 static const struct wm_product *
1913 wm_lookup(const struct pci_attach_args *pa)
1914 {
1915 const struct wm_product *wmp;
1916
1917 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1918 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1919 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1920 return wmp;
1921 }
1922 return NULL;
1923 }
1924
1925 /* The match function (ca_match) */
1926 static int
1927 wm_match(device_t parent, cfdata_t cf, void *aux)
1928 {
1929 struct pci_attach_args *pa = aux;
1930
1931 if (wm_lookup(pa) != NULL)
1932 return 1;
1933
1934 return 0;
1935 }
1936
1937 /* The attach function (ca_attach) */
1938 static void
1939 wm_attach(device_t parent, device_t self, void *aux)
1940 {
1941 struct wm_softc *sc = device_private(self);
1942 struct pci_attach_args *pa = aux;
1943 prop_dictionary_t dict;
1944 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1945 pci_chipset_tag_t pc = pa->pa_pc;
1946 int counts[PCI_INTR_TYPE_SIZE];
1947 pci_intr_type_t max_type;
1948 const char *eetype, *xname;
1949 bus_space_tag_t memt;
1950 bus_space_handle_t memh;
1951 bus_size_t memsize;
1952 int memh_valid;
1953 int i, error;
1954 const struct wm_product *wmp;
1955 prop_data_t ea;
1956 prop_number_t pn;
1957 uint8_t enaddr[ETHER_ADDR_LEN];
1958 char buf[256];
1959 char wqname[MAXCOMLEN];
1960 uint16_t cfg1, cfg2, swdpin, nvmword;
1961 pcireg_t preg, memtype;
1962 uint16_t eeprom_data, apme_mask;
1963 bool force_clear_smbi;
1964 uint32_t link_mode;
1965 uint32_t reg;
1966
1967 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1968 sc->sc_debug = WM_DEBUG_DEFAULT;
1969 #endif
1970 sc->sc_dev = self;
1971 callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
1972 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1973 sc->sc_core_stopping = false;
1974
1975 wmp = wm_lookup(pa);
1976 #ifdef DIAGNOSTIC
1977 if (wmp == NULL) {
1978 printf("\n");
1979 panic("wm_attach: impossible");
1980 }
1981 #endif
1982 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1983
1984 sc->sc_pc = pa->pa_pc;
1985 sc->sc_pcitag = pa->pa_tag;
1986
1987 if (pci_dma64_available(pa)) {
1988 aprint_verbose(", 64-bit DMA");
1989 sc->sc_dmat = pa->pa_dmat64;
1990 } else {
1991 aprint_verbose(", 32-bit DMA");
1992 sc->sc_dmat = pa->pa_dmat;
1993 }
1994
1995 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1996 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1997 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1998
1999 sc->sc_type = wmp->wmp_type;
2000
2001 /* Set default function pointers */
2002 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2003 sc->phy.release = sc->nvm.release = wm_put_null;
2004 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2005
2006 if (sc->sc_type < WM_T_82543) {
2007 if (sc->sc_rev < 2) {
2008 aprint_error_dev(sc->sc_dev,
2009 "i82542 must be at least rev. 2\n");
2010 return;
2011 }
2012 if (sc->sc_rev < 3)
2013 sc->sc_type = WM_T_82542_2_0;
2014 }
2015
2016 /*
2017 * Disable MSI for Errata:
2018 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2019 *
2020 * 82544: Errata 25
2021 * 82540: Errata 6 (easy to reproduce device timeout)
2022 * 82545: Errata 4 (easy to reproduce device timeout)
2023 * 82546: Errata 26 (easy to reproduce device timeout)
2024 * 82541: Errata 7 (easy to reproduce device timeout)
2025 *
2026 * "Byte Enables 2 and 3 are not set on MSI writes"
2027 *
2028 * 82571 & 82572: Errata 63
2029 */
2030 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2031 || (sc->sc_type == WM_T_82572))
2032 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2033
2034 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2035 || (sc->sc_type == WM_T_82580)
2036 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2037 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2038 sc->sc_flags |= WM_F_NEWQUEUE;
2039
2040 /* Set device properties (mactype) */
2041 dict = device_properties(sc->sc_dev);
2042 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2043
2044 /*
2045 * Map the device. All devices support memory-mapped acccess,
2046 * and it is really required for normal operation.
2047 */
2048 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2049 switch (memtype) {
2050 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2051 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2052 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2053 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2054 break;
2055 default:
2056 memh_valid = 0;
2057 break;
2058 }
2059
2060 if (memh_valid) {
2061 sc->sc_st = memt;
2062 sc->sc_sh = memh;
2063 sc->sc_ss = memsize;
2064 } else {
2065 aprint_error_dev(sc->sc_dev,
2066 "unable to map device registers\n");
2067 return;
2068 }
2069
2070 /*
2071 * In addition, i82544 and later support I/O mapped indirect
2072 * register access. It is not desirable (nor supported in
2073 * this driver) to use it for normal operation, though it is
2074 * required to work around bugs in some chip versions.
2075 */
2076 switch (sc->sc_type) {
2077 case WM_T_82544:
2078 case WM_T_82541:
2079 case WM_T_82541_2:
2080 case WM_T_82547:
2081 case WM_T_82547_2:
2082 /* First we have to find the I/O BAR. */
2083 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2084 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2085 if (memtype == PCI_MAPREG_TYPE_IO)
2086 break;
2087 if (PCI_MAPREG_MEM_TYPE(memtype) ==
2088 PCI_MAPREG_MEM_TYPE_64BIT)
2089 i += 4; /* skip high bits, too */
2090 }
2091 if (i < PCI_MAPREG_END) {
2092 /*
2093 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2094 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2095 * It's no problem because newer chips has no this
2096 * bug.
2097 *
2098 * The i8254x doesn't apparently respond when the
2099 * I/O BAR is 0, which looks somewhat like it's not
2100 * been configured.
2101 */
2102 preg = pci_conf_read(pc, pa->pa_tag, i);
2103 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2104 aprint_error_dev(sc->sc_dev,
2105 "WARNING: I/O BAR at zero.\n");
2106 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2107 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2108 == 0) {
2109 sc->sc_flags |= WM_F_IOH_VALID;
2110 } else
2111 aprint_error_dev(sc->sc_dev,
2112 "WARNING: unable to map I/O space\n");
2113 }
2114 break;
2115 default:
2116 break;
2117 }
2118
2119 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2120 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2121 preg |= PCI_COMMAND_MASTER_ENABLE;
2122 if (sc->sc_type < WM_T_82542_2_1)
2123 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2124 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2125
2126 /* Power up chip */
2127 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2128 && error != EOPNOTSUPP) {
2129 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2130 return;
2131 }
2132
2133 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2134 /*
2135 * Don't use MSI-X if we can use only one queue to save interrupt
2136 * resource.
2137 */
2138 if (sc->sc_nqueues > 1) {
2139 max_type = PCI_INTR_TYPE_MSIX;
2140 /*
2141 * 82583 has a MSI-X capability in the PCI configuration space
2142 * but it doesn't support it. At least the document doesn't
2143 * say anything about MSI-X.
2144 */
2145 counts[PCI_INTR_TYPE_MSIX]
2146 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2147 } else {
2148 max_type = PCI_INTR_TYPE_MSI;
2149 counts[PCI_INTR_TYPE_MSIX] = 0;
2150 }
2151
2152 /* Allocation settings */
2153 counts[PCI_INTR_TYPE_MSI] = 1;
2154 counts[PCI_INTR_TYPE_INTX] = 1;
2155 /* overridden by disable flags */
2156 if (wm_disable_msi != 0) {
2157 counts[PCI_INTR_TYPE_MSI] = 0;
2158 if (wm_disable_msix != 0) {
2159 max_type = PCI_INTR_TYPE_INTX;
2160 counts[PCI_INTR_TYPE_MSIX] = 0;
2161 }
2162 } else if (wm_disable_msix != 0) {
2163 max_type = PCI_INTR_TYPE_MSI;
2164 counts[PCI_INTR_TYPE_MSIX] = 0;
2165 }
2166
2167 alloc_retry:
2168 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2169 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2170 return;
2171 }
2172
2173 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2174 error = wm_setup_msix(sc);
2175 if (error) {
2176 pci_intr_release(pc, sc->sc_intrs,
2177 counts[PCI_INTR_TYPE_MSIX]);
2178
2179 /* Setup for MSI: Disable MSI-X */
2180 max_type = PCI_INTR_TYPE_MSI;
2181 counts[PCI_INTR_TYPE_MSI] = 1;
2182 counts[PCI_INTR_TYPE_INTX] = 1;
2183 goto alloc_retry;
2184 }
2185 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2186 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2187 error = wm_setup_legacy(sc);
2188 if (error) {
2189 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2190 counts[PCI_INTR_TYPE_MSI]);
2191
2192 /* The next try is for INTx: Disable MSI */
2193 max_type = PCI_INTR_TYPE_INTX;
2194 counts[PCI_INTR_TYPE_INTX] = 1;
2195 goto alloc_retry;
2196 }
2197 } else {
2198 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2199 error = wm_setup_legacy(sc);
2200 if (error) {
2201 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2202 counts[PCI_INTR_TYPE_INTX]);
2203 return;
2204 }
2205 }
2206
2207 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2208 error = workqueue_create(&sc->sc_queue_wq, wqname,
2209 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2210 WQ_PERCPU | WQ_MPSAFE);
2211 if (error) {
2212 aprint_error_dev(sc->sc_dev,
2213 "unable to create TxRx workqueue\n");
2214 goto out;
2215 }
2216
2217 snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2218 error = workqueue_create(&sc->sc_reset_wq, wqname,
2219 wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2220 WQ_MPSAFE);
2221 if (error) {
2222 workqueue_destroy(sc->sc_queue_wq);
2223 aprint_error_dev(sc->sc_dev,
2224 "unable to create reset workqueue\n");
2225 goto out;
2226 }
2227
2228 /*
2229 * Check the function ID (unit number of the chip).
2230 */
2231 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2232 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2233 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2234 || (sc->sc_type == WM_T_82580)
2235 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2236 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2237 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2238 else
2239 sc->sc_funcid = 0;
2240
2241 /*
2242 * Determine a few things about the bus we're connected to.
2243 */
2244 if (sc->sc_type < WM_T_82543) {
2245 /* We don't really know the bus characteristics here. */
2246 sc->sc_bus_speed = 33;
2247 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2248 /*
2249 * CSA (Communication Streaming Architecture) is about as fast
2250 * a 32-bit 66MHz PCI Bus.
2251 */
2252 sc->sc_flags |= WM_F_CSA;
2253 sc->sc_bus_speed = 66;
2254 aprint_verbose_dev(sc->sc_dev,
2255 "Communication Streaming Architecture\n");
2256 if (sc->sc_type == WM_T_82547) {
2257 callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2258 callout_setfunc(&sc->sc_txfifo_ch,
2259 wm_82547_txfifo_stall, sc);
2260 aprint_verbose_dev(sc->sc_dev,
2261 "using 82547 Tx FIFO stall work-around\n");
2262 }
2263 } else if (sc->sc_type >= WM_T_82571) {
2264 sc->sc_flags |= WM_F_PCIE;
2265 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2266 && (sc->sc_type != WM_T_ICH10)
2267 && (sc->sc_type != WM_T_PCH)
2268 && (sc->sc_type != WM_T_PCH2)
2269 && (sc->sc_type != WM_T_PCH_LPT)
2270 && (sc->sc_type != WM_T_PCH_SPT)
2271 && (sc->sc_type != WM_T_PCH_CNP)) {
2272 /* ICH* and PCH* have no PCIe capability registers */
2273 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2274 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2275 NULL) == 0)
2276 aprint_error_dev(sc->sc_dev,
2277 "unable to find PCIe capability\n");
2278 }
2279 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2280 } else {
2281 reg = CSR_READ(sc, WMREG_STATUS);
2282 if (reg & STATUS_BUS64)
2283 sc->sc_flags |= WM_F_BUS64;
2284 if ((reg & STATUS_PCIX_MODE) != 0) {
2285 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2286
2287 sc->sc_flags |= WM_F_PCIX;
2288 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2289 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2290 aprint_error_dev(sc->sc_dev,
2291 "unable to find PCIX capability\n");
2292 else if (sc->sc_type != WM_T_82545_3 &&
2293 sc->sc_type != WM_T_82546_3) {
2294 /*
2295 * Work around a problem caused by the BIOS
2296 * setting the max memory read byte count
2297 * incorrectly.
2298 */
2299 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2300 sc->sc_pcixe_capoff + PCIX_CMD);
2301 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2302 sc->sc_pcixe_capoff + PCIX_STATUS);
2303
2304 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2305 PCIX_CMD_BYTECNT_SHIFT;
2306 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2307 PCIX_STATUS_MAXB_SHIFT;
2308 if (bytecnt > maxb) {
2309 aprint_verbose_dev(sc->sc_dev,
2310 "resetting PCI-X MMRBC: %d -> %d\n",
2311 512 << bytecnt, 512 << maxb);
2312 pcix_cmd = (pcix_cmd &
2313 ~PCIX_CMD_BYTECNT_MASK) |
2314 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2315 pci_conf_write(pa->pa_pc, pa->pa_tag,
2316 sc->sc_pcixe_capoff + PCIX_CMD,
2317 pcix_cmd);
2318 }
2319 }
2320 }
2321 /*
2322 * The quad port adapter is special; it has a PCIX-PCIX
2323 * bridge on the board, and can run the secondary bus at
2324 * a higher speed.
2325 */
2326 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2327 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2328 : 66;
2329 } else if (sc->sc_flags & WM_F_PCIX) {
2330 switch (reg & STATUS_PCIXSPD_MASK) {
2331 case STATUS_PCIXSPD_50_66:
2332 sc->sc_bus_speed = 66;
2333 break;
2334 case STATUS_PCIXSPD_66_100:
2335 sc->sc_bus_speed = 100;
2336 break;
2337 case STATUS_PCIXSPD_100_133:
2338 sc->sc_bus_speed = 133;
2339 break;
2340 default:
2341 aprint_error_dev(sc->sc_dev,
2342 "unknown PCIXSPD %d; assuming 66MHz\n",
2343 reg & STATUS_PCIXSPD_MASK);
2344 sc->sc_bus_speed = 66;
2345 break;
2346 }
2347 } else
2348 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2349 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2350 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2351 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2352 }
2353
2354 /* clear interesting stat counters */
2355 CSR_READ(sc, WMREG_COLC);
2356 CSR_READ(sc, WMREG_RXERRC);
2357
2358 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2359 || (sc->sc_type >= WM_T_ICH8))
2360 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2361 if (sc->sc_type >= WM_T_ICH8)
2362 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2363
2364 /* Set PHY, NVM mutex related stuff */
2365 switch (sc->sc_type) {
2366 case WM_T_82542_2_0:
2367 case WM_T_82542_2_1:
2368 case WM_T_82543:
2369 case WM_T_82544:
2370 /* Microwire */
2371 sc->nvm.read = wm_nvm_read_uwire;
2372 sc->sc_nvm_wordsize = 64;
2373 sc->sc_nvm_addrbits = 6;
2374 break;
2375 case WM_T_82540:
2376 case WM_T_82545:
2377 case WM_T_82545_3:
2378 case WM_T_82546:
2379 case WM_T_82546_3:
2380 /* Microwire */
2381 sc->nvm.read = wm_nvm_read_uwire;
2382 reg = CSR_READ(sc, WMREG_EECD);
2383 if (reg & EECD_EE_SIZE) {
2384 sc->sc_nvm_wordsize = 256;
2385 sc->sc_nvm_addrbits = 8;
2386 } else {
2387 sc->sc_nvm_wordsize = 64;
2388 sc->sc_nvm_addrbits = 6;
2389 }
2390 sc->sc_flags |= WM_F_LOCK_EECD;
2391 sc->nvm.acquire = wm_get_eecd;
2392 sc->nvm.release = wm_put_eecd;
2393 break;
2394 case WM_T_82541:
2395 case WM_T_82541_2:
2396 case WM_T_82547:
2397 case WM_T_82547_2:
2398 reg = CSR_READ(sc, WMREG_EECD);
2399 /*
2400 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2401 * on 8254[17], so set flags and functios before calling it.
2402 */
2403 sc->sc_flags |= WM_F_LOCK_EECD;
2404 sc->nvm.acquire = wm_get_eecd;
2405 sc->nvm.release = wm_put_eecd;
2406 if (reg & EECD_EE_TYPE) {
2407 /* SPI */
2408 sc->nvm.read = wm_nvm_read_spi;
2409 sc->sc_flags |= WM_F_EEPROM_SPI;
2410 wm_nvm_set_addrbits_size_eecd(sc);
2411 } else {
2412 /* Microwire */
2413 sc->nvm.read = wm_nvm_read_uwire;
2414 if ((reg & EECD_EE_ABITS) != 0) {
2415 sc->sc_nvm_wordsize = 256;
2416 sc->sc_nvm_addrbits = 8;
2417 } else {
2418 sc->sc_nvm_wordsize = 64;
2419 sc->sc_nvm_addrbits = 6;
2420 }
2421 }
2422 break;
2423 case WM_T_82571:
2424 case WM_T_82572:
2425 /* SPI */
2426 sc->nvm.read = wm_nvm_read_eerd;
2427 /* Not use WM_F_LOCK_EECD because we use EERD */
2428 sc->sc_flags |= WM_F_EEPROM_SPI;
2429 wm_nvm_set_addrbits_size_eecd(sc);
2430 sc->phy.acquire = wm_get_swsm_semaphore;
2431 sc->phy.release = wm_put_swsm_semaphore;
2432 sc->nvm.acquire = wm_get_nvm_82571;
2433 sc->nvm.release = wm_put_nvm_82571;
2434 break;
2435 case WM_T_82573:
2436 case WM_T_82574:
2437 case WM_T_82583:
2438 sc->nvm.read = wm_nvm_read_eerd;
2439 /* Not use WM_F_LOCK_EECD because we use EERD */
2440 if (sc->sc_type == WM_T_82573) {
2441 sc->phy.acquire = wm_get_swsm_semaphore;
2442 sc->phy.release = wm_put_swsm_semaphore;
2443 sc->nvm.acquire = wm_get_nvm_82571;
2444 sc->nvm.release = wm_put_nvm_82571;
2445 } else {
2446 /* Both PHY and NVM use the same semaphore. */
2447 sc->phy.acquire = sc->nvm.acquire
2448 = wm_get_swfwhw_semaphore;
2449 sc->phy.release = sc->nvm.release
2450 = wm_put_swfwhw_semaphore;
2451 }
2452 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2453 sc->sc_flags |= WM_F_EEPROM_FLASH;
2454 sc->sc_nvm_wordsize = 2048;
2455 } else {
2456 /* SPI */
2457 sc->sc_flags |= WM_F_EEPROM_SPI;
2458 wm_nvm_set_addrbits_size_eecd(sc);
2459 }
2460 break;
2461 case WM_T_82575:
2462 case WM_T_82576:
2463 case WM_T_82580:
2464 case WM_T_I350:
2465 case WM_T_I354:
2466 case WM_T_80003:
2467 /* SPI */
2468 sc->sc_flags |= WM_F_EEPROM_SPI;
2469 wm_nvm_set_addrbits_size_eecd(sc);
2470 if ((sc->sc_type == WM_T_80003)
2471 || (sc->sc_nvm_wordsize < (1 << 15))) {
2472 sc->nvm.read = wm_nvm_read_eerd;
2473 /* Don't use WM_F_LOCK_EECD because we use EERD */
2474 } else {
2475 sc->nvm.read = wm_nvm_read_spi;
2476 sc->sc_flags |= WM_F_LOCK_EECD;
2477 }
2478 sc->phy.acquire = wm_get_phy_82575;
2479 sc->phy.release = wm_put_phy_82575;
2480 sc->nvm.acquire = wm_get_nvm_80003;
2481 sc->nvm.release = wm_put_nvm_80003;
2482 break;
2483 case WM_T_ICH8:
2484 case WM_T_ICH9:
2485 case WM_T_ICH10:
2486 case WM_T_PCH:
2487 case WM_T_PCH2:
2488 case WM_T_PCH_LPT:
2489 sc->nvm.read = wm_nvm_read_ich8;
2490 /* FLASH */
2491 sc->sc_flags |= WM_F_EEPROM_FLASH;
2492 sc->sc_nvm_wordsize = 2048;
2493 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2494 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2495 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2496 aprint_error_dev(sc->sc_dev,
2497 "can't map FLASH registers\n");
2498 goto out;
2499 }
2500 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2501 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2502 ICH_FLASH_SECTOR_SIZE;
2503 sc->sc_ich8_flash_bank_size =
2504 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2505 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2506 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2507 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2508 sc->sc_flashreg_offset = 0;
2509 sc->phy.acquire = wm_get_swflag_ich8lan;
2510 sc->phy.release = wm_put_swflag_ich8lan;
2511 sc->nvm.acquire = wm_get_nvm_ich8lan;
2512 sc->nvm.release = wm_put_nvm_ich8lan;
2513 break;
2514 case WM_T_PCH_SPT:
2515 case WM_T_PCH_CNP:
2516 sc->nvm.read = wm_nvm_read_spt;
2517 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2518 sc->sc_flags |= WM_F_EEPROM_FLASH;
2519 sc->sc_flasht = sc->sc_st;
2520 sc->sc_flashh = sc->sc_sh;
2521 sc->sc_ich8_flash_base = 0;
2522 sc->sc_nvm_wordsize =
2523 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2524 * NVM_SIZE_MULTIPLIER;
2525 /* It is size in bytes, we want words */
2526 sc->sc_nvm_wordsize /= 2;
2527 /* Assume 2 banks */
2528 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2529 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2530 sc->phy.acquire = wm_get_swflag_ich8lan;
2531 sc->phy.release = wm_put_swflag_ich8lan;
2532 sc->nvm.acquire = wm_get_nvm_ich8lan;
2533 sc->nvm.release = wm_put_nvm_ich8lan;
2534 break;
2535 case WM_T_I210:
2536 case WM_T_I211:
2537 /* Allow a single clear of the SW semaphore on I210 and newer*/
2538 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2539 if (wm_nvm_flash_presence_i210(sc)) {
2540 sc->nvm.read = wm_nvm_read_eerd;
2541 /* Don't use WM_F_LOCK_EECD because we use EERD */
2542 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2543 wm_nvm_set_addrbits_size_eecd(sc);
2544 } else {
2545 sc->nvm.read = wm_nvm_read_invm;
2546 sc->sc_flags |= WM_F_EEPROM_INVM;
2547 sc->sc_nvm_wordsize = INVM_SIZE;
2548 }
2549 sc->phy.acquire = wm_get_phy_82575;
2550 sc->phy.release = wm_put_phy_82575;
2551 sc->nvm.acquire = wm_get_nvm_80003;
2552 sc->nvm.release = wm_put_nvm_80003;
2553 break;
2554 default:
2555 break;
2556 }
2557
2558 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2559 switch (sc->sc_type) {
2560 case WM_T_82571:
2561 case WM_T_82572:
2562 reg = CSR_READ(sc, WMREG_SWSM2);
2563 if ((reg & SWSM2_LOCK) == 0) {
2564 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2565 force_clear_smbi = true;
2566 } else
2567 force_clear_smbi = false;
2568 break;
2569 case WM_T_82573:
2570 case WM_T_82574:
2571 case WM_T_82583:
2572 force_clear_smbi = true;
2573 break;
2574 default:
2575 force_clear_smbi = false;
2576 break;
2577 }
2578 if (force_clear_smbi) {
2579 reg = CSR_READ(sc, WMREG_SWSM);
2580 if ((reg & SWSM_SMBI) != 0)
2581 aprint_error_dev(sc->sc_dev,
2582 "Please update the Bootagent\n");
2583 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2584 }
2585
2586 /*
2587 * Defer printing the EEPROM type until after verifying the checksum
2588 * This allows the EEPROM type to be printed correctly in the case
2589 * that no EEPROM is attached.
2590 */
2591 /*
2592 * Validate the EEPROM checksum. If the checksum fails, flag
2593 * this for later, so we can fail future reads from the EEPROM.
2594 */
2595 if (wm_nvm_validate_checksum(sc)) {
2596 /*
2597 * Read twice again because some PCI-e parts fail the
2598 * first check due to the link being in sleep state.
2599 */
2600 if (wm_nvm_validate_checksum(sc))
2601 sc->sc_flags |= WM_F_EEPROM_INVALID;
2602 }
2603
2604 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2605 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2606 else {
2607 aprint_verbose_dev(sc->sc_dev, "%u words ",
2608 sc->sc_nvm_wordsize);
2609 if (sc->sc_flags & WM_F_EEPROM_INVM)
2610 aprint_verbose("iNVM");
2611 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2612 aprint_verbose("FLASH(HW)");
2613 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2614 aprint_verbose("FLASH");
2615 else {
2616 if (sc->sc_flags & WM_F_EEPROM_SPI)
2617 eetype = "SPI";
2618 else
2619 eetype = "MicroWire";
2620 aprint_verbose("(%d address bits) %s EEPROM",
2621 sc->sc_nvm_addrbits, eetype);
2622 }
2623 }
2624 wm_nvm_version(sc);
2625 aprint_verbose("\n");
2626
2627 /*
2628 * XXX The first call of wm_gmii_setup_phytype. The result might be
2629 * incorrect.
2630 */
2631 wm_gmii_setup_phytype(sc, 0, 0);
2632
2633 /* Check for WM_F_WOL on some chips before wm_reset() */
2634 switch (sc->sc_type) {
2635 case WM_T_ICH8:
2636 case WM_T_ICH9:
2637 case WM_T_ICH10:
2638 case WM_T_PCH:
2639 case WM_T_PCH2:
2640 case WM_T_PCH_LPT:
2641 case WM_T_PCH_SPT:
2642 case WM_T_PCH_CNP:
2643 apme_mask = WUC_APME;
2644 eeprom_data = CSR_READ(sc, WMREG_WUC);
2645 if ((eeprom_data & apme_mask) != 0)
2646 sc->sc_flags |= WM_F_WOL;
2647 break;
2648 default:
2649 break;
2650 }
2651
2652 /* Reset the chip to a known state. */
2653 wm_reset(sc);
2654
2655 /*
2656 * Check for I21[01] PLL workaround.
2657 *
2658 * Three cases:
2659 * a) Chip is I211.
2660 * b) Chip is I210 and it uses INVM (not FLASH).
2661 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2662 */
2663 if (sc->sc_type == WM_T_I211)
2664 sc->sc_flags |= WM_F_PLL_WA_I210;
2665 if (sc->sc_type == WM_T_I210) {
2666 if (!wm_nvm_flash_presence_i210(sc))
2667 sc->sc_flags |= WM_F_PLL_WA_I210;
2668 else if ((sc->sc_nvm_ver_major < 3)
2669 || ((sc->sc_nvm_ver_major == 3)
2670 && (sc->sc_nvm_ver_minor < 25))) {
2671 aprint_verbose_dev(sc->sc_dev,
2672 "ROM image version %d.%d is older than 3.25\n",
2673 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2674 sc->sc_flags |= WM_F_PLL_WA_I210;
2675 }
2676 }
2677 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2678 wm_pll_workaround_i210(sc);
2679
2680 wm_get_wakeup(sc);
2681
2682 /* Non-AMT based hardware can now take control from firmware */
2683 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2684 wm_get_hw_control(sc);
2685
2686 /*
2687 * Read the Ethernet address from the EEPROM, if not first found
2688 * in device properties.
2689 */
2690 ea = prop_dictionary_get(dict, "mac-address");
2691 if (ea != NULL) {
2692 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2693 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2694 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2695 } else {
2696 if (wm_read_mac_addr(sc, enaddr) != 0) {
2697 aprint_error_dev(sc->sc_dev,
2698 "unable to read Ethernet address\n");
2699 goto out;
2700 }
2701 }
2702
2703 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2704 ether_sprintf(enaddr));
2705
2706 /*
2707 * Read the config info from the EEPROM, and set up various
2708 * bits in the control registers based on their contents.
2709 */
2710 pn = prop_dictionary_get(dict, "i82543-cfg1");
2711 if (pn != NULL) {
2712 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2713 cfg1 = (uint16_t) prop_number_signed_value(pn);
2714 } else {
2715 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2716 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2717 goto out;
2718 }
2719 }
2720
2721 pn = prop_dictionary_get(dict, "i82543-cfg2");
2722 if (pn != NULL) {
2723 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2724 cfg2 = (uint16_t) prop_number_signed_value(pn);
2725 } else {
2726 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2727 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2728 goto out;
2729 }
2730 }
2731
2732 /* check for WM_F_WOL */
2733 switch (sc->sc_type) {
2734 case WM_T_82542_2_0:
2735 case WM_T_82542_2_1:
2736 case WM_T_82543:
2737 /* dummy? */
2738 eeprom_data = 0;
2739 apme_mask = NVM_CFG3_APME;
2740 break;
2741 case WM_T_82544:
2742 apme_mask = NVM_CFG2_82544_APM_EN;
2743 eeprom_data = cfg2;
2744 break;
2745 case WM_T_82546:
2746 case WM_T_82546_3:
2747 case WM_T_82571:
2748 case WM_T_82572:
2749 case WM_T_82573:
2750 case WM_T_82574:
2751 case WM_T_82583:
2752 case WM_T_80003:
2753 case WM_T_82575:
2754 case WM_T_82576:
2755 apme_mask = NVM_CFG3_APME;
2756 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2757 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2758 break;
2759 case WM_T_82580:
2760 case WM_T_I350:
2761 case WM_T_I354:
2762 case WM_T_I210:
2763 case WM_T_I211:
2764 apme_mask = NVM_CFG3_APME;
2765 wm_nvm_read(sc,
2766 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2767 1, &eeprom_data);
2768 break;
2769 case WM_T_ICH8:
2770 case WM_T_ICH9:
2771 case WM_T_ICH10:
2772 case WM_T_PCH:
2773 case WM_T_PCH2:
2774 case WM_T_PCH_LPT:
2775 case WM_T_PCH_SPT:
2776 case WM_T_PCH_CNP:
2777 /* Already checked before wm_reset () */
2778 apme_mask = eeprom_data = 0;
2779 break;
2780 default: /* XXX 82540 */
2781 apme_mask = NVM_CFG3_APME;
2782 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2783 break;
2784 }
2785 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2786 if ((eeprom_data & apme_mask) != 0)
2787 sc->sc_flags |= WM_F_WOL;
2788
2789 /*
2790 * We have the eeprom settings, now apply the special cases
2791 * where the eeprom may be wrong or the board won't support
2792 * wake on lan on a particular port
2793 */
2794 switch (sc->sc_pcidevid) {
2795 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2796 sc->sc_flags &= ~WM_F_WOL;
2797 break;
2798 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2799 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2800 /* Wake events only supported on port A for dual fiber
2801 * regardless of eeprom setting */
2802 if (sc->sc_funcid == 1)
2803 sc->sc_flags &= ~WM_F_WOL;
2804 break;
2805 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2806 /* If quad port adapter, disable WoL on all but port A */
2807 if (sc->sc_funcid != 0)
2808 sc->sc_flags &= ~WM_F_WOL;
2809 break;
2810 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2811 /* Wake events only supported on port A for dual fiber
2812 * regardless of eeprom setting */
2813 if (sc->sc_funcid == 1)
2814 sc->sc_flags &= ~WM_F_WOL;
2815 break;
2816 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2817 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2818 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2819 /* If quad port adapter, disable WoL on all but port A */
2820 if (sc->sc_funcid != 0)
2821 sc->sc_flags &= ~WM_F_WOL;
2822 break;
2823 }
2824
2825 if (sc->sc_type >= WM_T_82575) {
2826 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2827 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2828 nvmword);
2829 if ((sc->sc_type == WM_T_82575) ||
2830 (sc->sc_type == WM_T_82576)) {
2831 /* Check NVM for autonegotiation */
2832 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2833 != 0)
2834 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2835 }
2836 if ((sc->sc_type == WM_T_82575) ||
2837 (sc->sc_type == WM_T_I350)) {
2838 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2839 sc->sc_flags |= WM_F_MAS;
2840 }
2841 }
2842 }
2843
2844 /*
2845 * XXX need special handling for some multiple port cards
2846 * to disable a paticular port.
2847 */
2848
2849 if (sc->sc_type >= WM_T_82544) {
2850 pn = prop_dictionary_get(dict, "i82543-swdpin");
2851 if (pn != NULL) {
2852 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2853 swdpin = (uint16_t) prop_number_signed_value(pn);
2854 } else {
2855 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2856 aprint_error_dev(sc->sc_dev,
2857 "unable to read SWDPIN\n");
2858 goto out;
2859 }
2860 }
2861 }
2862
2863 if (cfg1 & NVM_CFG1_ILOS)
2864 sc->sc_ctrl |= CTRL_ILOS;
2865
2866 /*
2867 * XXX
2868 * This code isn't correct because pin 2 and 3 are located
2869 * in different position on newer chips. Check all datasheet.
2870 *
2871 * Until resolve this problem, check if a chip < 82580
2872 */
2873 if (sc->sc_type <= WM_T_82580) {
2874 if (sc->sc_type >= WM_T_82544) {
2875 sc->sc_ctrl |=
2876 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2877 CTRL_SWDPIO_SHIFT;
2878 sc->sc_ctrl |=
2879 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2880 CTRL_SWDPINS_SHIFT;
2881 } else {
2882 sc->sc_ctrl |=
2883 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2884 CTRL_SWDPIO_SHIFT;
2885 }
2886 }
2887
2888 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2889 wm_nvm_read(sc,
2890 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2891 1, &nvmword);
2892 if (nvmword & NVM_CFG3_ILOS)
2893 sc->sc_ctrl |= CTRL_ILOS;
2894 }
2895
2896 #if 0
2897 if (sc->sc_type >= WM_T_82544) {
2898 if (cfg1 & NVM_CFG1_IPS0)
2899 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2900 if (cfg1 & NVM_CFG1_IPS1)
2901 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2902 sc->sc_ctrl_ext |=
2903 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2904 CTRL_EXT_SWDPIO_SHIFT;
2905 sc->sc_ctrl_ext |=
2906 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2907 CTRL_EXT_SWDPINS_SHIFT;
2908 } else {
2909 sc->sc_ctrl_ext |=
2910 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2911 CTRL_EXT_SWDPIO_SHIFT;
2912 }
2913 #endif
2914
2915 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2916 #if 0
2917 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2918 #endif
2919
2920 if (sc->sc_type == WM_T_PCH) {
2921 uint16_t val;
2922
2923 /* Save the NVM K1 bit setting */
2924 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2925
2926 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2927 sc->sc_nvm_k1_enabled = 1;
2928 else
2929 sc->sc_nvm_k1_enabled = 0;
2930 }
2931
2932 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2933 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2934 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2935 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2936 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2937 || sc->sc_type == WM_T_82573
2938 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2939 /* Copper only */
2940 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2941 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2942 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2943 || (sc->sc_type ==WM_T_I211)) {
2944 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2945 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2946 switch (link_mode) {
2947 case CTRL_EXT_LINK_MODE_1000KX:
2948 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2949 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2950 break;
2951 case CTRL_EXT_LINK_MODE_SGMII:
2952 if (wm_sgmii_uses_mdio(sc)) {
2953 aprint_normal_dev(sc->sc_dev,
2954 "SGMII(MDIO)\n");
2955 sc->sc_flags |= WM_F_SGMII;
2956 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2957 break;
2958 }
2959 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2960 /*FALLTHROUGH*/
2961 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2962 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2963 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2964 if (link_mode
2965 == CTRL_EXT_LINK_MODE_SGMII) {
2966 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2967 sc->sc_flags |= WM_F_SGMII;
2968 aprint_verbose_dev(sc->sc_dev,
2969 "SGMII\n");
2970 } else {
2971 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2972 aprint_verbose_dev(sc->sc_dev,
2973 "SERDES\n");
2974 }
2975 break;
2976 }
2977 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2978 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2979 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2980 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2981 sc->sc_flags |= WM_F_SGMII;
2982 }
2983 /* Do not change link mode for 100BaseFX */
2984 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2985 break;
2986
2987 /* Change current link mode setting */
2988 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2989 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2990 reg |= CTRL_EXT_LINK_MODE_SGMII;
2991 else
2992 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2993 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2994 break;
2995 case CTRL_EXT_LINK_MODE_GMII:
2996 default:
2997 aprint_normal_dev(sc->sc_dev, "Copper\n");
2998 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2999 break;
3000 }
3001
3002 reg &= ~CTRL_EXT_I2C_ENA;
3003 if ((sc->sc_flags & WM_F_SGMII) != 0)
3004 reg |= CTRL_EXT_I2C_ENA;
3005 else
3006 reg &= ~CTRL_EXT_I2C_ENA;
3007 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3008 if ((sc->sc_flags & WM_F_SGMII) != 0) {
3009 if (!wm_sgmii_uses_mdio(sc))
3010 wm_gmii_setup_phytype(sc, 0, 0);
3011 wm_reset_mdicnfg_82580(sc);
3012 }
3013 } else if (sc->sc_type < WM_T_82543 ||
3014 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3015 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3016 aprint_error_dev(sc->sc_dev,
3017 "WARNING: TBIMODE set on 1000BASE-T product!\n");
3018 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3019 }
3020 } else {
3021 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3022 aprint_error_dev(sc->sc_dev,
3023 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3024 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3025 }
3026 }
3027
3028 if (sc->sc_type >= WM_T_PCH2)
3029 sc->sc_flags |= WM_F_EEE;
3030 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3031 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3032 /* XXX: Need special handling for I354. (not yet) */
3033 if (sc->sc_type != WM_T_I354)
3034 sc->sc_flags |= WM_F_EEE;
3035 }
3036
3037 /*
3038 * The I350 has a bug where it always strips the CRC whether
3039 * asked to or not. So ask for stripped CRC here and cope in rxeof
3040 */
3041 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3042 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3043 sc->sc_flags |= WM_F_CRC_STRIP;
3044
3045 /* Set device properties (macflags) */
3046 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3047
3048 if (sc->sc_flags != 0) {
3049 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3050 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3051 }
3052
3053 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3054
3055 /* Initialize the media structures accordingly. */
3056 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3057 wm_gmii_mediainit(sc, wmp->wmp_product);
3058 else
3059 wm_tbi_mediainit(sc); /* All others */
3060
3061 ifp = &sc->sc_ethercom.ec_if;
3062 xname = device_xname(sc->sc_dev);
3063 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3064 ifp->if_softc = sc;
3065 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3066 ifp->if_extflags = IFEF_MPSAFE;
3067 ifp->if_ioctl = wm_ioctl;
3068 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3069 ifp->if_start = wm_nq_start;
3070 /*
3071 * When the number of CPUs is one and the controller can use
3072 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3073 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3074 * and the other is used for link status changing.
3075 * In this situation, wm_nq_transmit() is disadvantageous
3076 * because of wm_select_txqueue() and pcq(9) overhead.
3077 */
3078 if (wm_is_using_multiqueue(sc))
3079 ifp->if_transmit = wm_nq_transmit;
3080 } else {
3081 ifp->if_start = wm_start;
3082 /*
3083 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3084 * described above.
3085 */
3086 if (wm_is_using_multiqueue(sc))
3087 ifp->if_transmit = wm_transmit;
3088 }
3089 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3090 ifp->if_init = wm_init;
3091 ifp->if_stop = wm_stop;
3092 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3093 IFQ_SET_READY(&ifp->if_snd);
3094
3095 /* Check for jumbo frame */
3096 switch (sc->sc_type) {
3097 case WM_T_82573:
3098 /* XXX limited to 9234 if ASPM is disabled */
3099 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3100 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3101 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3102 break;
3103 case WM_T_82571:
3104 case WM_T_82572:
3105 case WM_T_82574:
3106 case WM_T_82583:
3107 case WM_T_82575:
3108 case WM_T_82576:
3109 case WM_T_82580:
3110 case WM_T_I350:
3111 case WM_T_I354:
3112 case WM_T_I210:
3113 case WM_T_I211:
3114 case WM_T_80003:
3115 case WM_T_ICH9:
3116 case WM_T_ICH10:
3117 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3118 case WM_T_PCH_LPT:
3119 case WM_T_PCH_SPT:
3120 case WM_T_PCH_CNP:
3121 /* XXX limited to 9234 */
3122 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3123 break;
3124 case WM_T_PCH:
3125 /* XXX limited to 4096 */
3126 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3127 break;
3128 case WM_T_82542_2_0:
3129 case WM_T_82542_2_1:
3130 case WM_T_ICH8:
3131 /* No support for jumbo frame */
3132 break;
3133 default:
3134 /* ETHER_MAX_LEN_JUMBO */
3135 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3136 break;
3137 }
3138
3139 /* If we're a i82543 or greater, we can support VLANs. */
3140 if (sc->sc_type >= WM_T_82543) {
3141 sc->sc_ethercom.ec_capabilities |=
3142 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3143 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3144 }
3145
3146 if ((sc->sc_flags & WM_F_EEE) != 0)
3147 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3148
3149 /*
3150 * We can perform TCPv4 and UDPv4 checksums in-bound. Only
3151 * on i82543 and later.
3152 */
3153 if (sc->sc_type >= WM_T_82543) {
3154 ifp->if_capabilities |=
3155 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3156 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3157 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3158 IFCAP_CSUM_TCPv6_Tx |
3159 IFCAP_CSUM_UDPv6_Tx;
3160 }
3161
3162 /*
3163 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3164 *
3165 * 82541GI (8086:1076) ... no
3166 * 82572EI (8086:10b9) ... yes
3167 */
3168 if (sc->sc_type >= WM_T_82571) {
3169 ifp->if_capabilities |=
3170 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3171 }
3172
3173 /*
3174 * If we're a i82544 or greater (except i82547), we can do
3175 * TCP segmentation offload.
3176 */
3177 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3178 ifp->if_capabilities |= IFCAP_TSOv4;
3179
3180 if (sc->sc_type >= WM_T_82571)
3181 ifp->if_capabilities |= IFCAP_TSOv6;
3182
3183 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3184 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3185 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3186 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3187
3188 /* Attach the interface. */
3189 if_initialize(ifp);
3190 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3191 ether_ifattach(ifp, enaddr);
3192 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3193 if_register(ifp);
3194 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3195 RND_FLAG_DEFAULT);
3196
3197 #ifdef WM_EVENT_COUNTERS
3198 /* Attach event counters. */
3199 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3200 NULL, xname, "linkintr");
3201
3202 evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3203 NULL, xname, "CRC Error");
3204 evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3205 NULL, xname, "Symbol Error");
3206 evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3207 NULL, xname, "Missed Packets");
3208 evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3209 NULL, xname, "Collision");
3210 evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3211 NULL, xname, "Sequence Error");
3212 evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3213 NULL, xname, "Receive Length Error");
3214
3215 if (sc->sc_type >= WM_T_82543) {
3216 evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3217 NULL, xname, "Alignment Error");
3218 evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3219 NULL, xname, "Receive Error");
3220 evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
3221 NULL, xname, "Carrier Extension Error");
3222
3223 evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3224 NULL, xname, "Tx with No CRS");
3225 evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3226 NULL, xname, "TCP Segmentation Context Tx");
3227 evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
3228 NULL, xname, "TCP Segmentation Context Tx Fail");
3229 }
3230
3231 if (sc->sc_type >= WM_T_82542_2_1) {
3232 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3233 NULL, xname, "tx_xoff");
3234 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3235 NULL, xname, "tx_xon");
3236 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3237 NULL, xname, "rx_xoff");
3238 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3239 NULL, xname, "rx_xon");
3240 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3241 NULL, xname, "rx_macctl");
3242 }
3243
3244 evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3245 NULL, xname, "Single Collision");
3246 evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3247 NULL, xname, "Excessive Collisions");
3248 evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3249 NULL, xname, "Multiple Collision");
3250 evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3251 NULL, xname, "Late Collisions");
3252 evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3253 NULL, xname, "Defer");
3254 evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3255 NULL, xname, "Packets Rx (64 bytes)");
3256 evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3257 NULL, xname, "Packets Rx (65-127 bytes)");
3258 evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3259 NULL, xname, "Packets Rx (128-255 bytes)");
3260 evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3261 NULL, xname, "Packets Rx (255-511 bytes)");
3262 evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3263 NULL, xname, "Packets Rx (512-1023 bytes)");
3264 evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3265 NULL, xname, "Packets Rx (1024-1522 bytes)");
3266 evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3267 NULL, xname, "Good Packets Rx");
3268 evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3269 NULL, xname, "Broadcast Packets Rx");
3270 evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3271 NULL, xname, "Multicast Packets Rx");
3272 evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3273 NULL, xname, "Good Packets Tx");
3274 evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3275 NULL, xname, "Good Octets Rx");
3276 evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3277 NULL, xname, "Good Octets Tx");
3278 evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3279 NULL, xname, "Rx No Buffers");
3280 evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3281 NULL, xname, "Rx Undersize");
3282 evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3283 NULL, xname, "Rx Fragment");
3284 evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3285 NULL, xname, "Rx Oversize");
3286 evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3287 NULL, xname, "Rx Jabber");
3288 if (sc->sc_type >= WM_T_82540) {
3289 evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3290 NULL, xname, "Management Packets RX");
3291 evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3292 NULL, xname, "Management Packets Dropped");
3293 evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3294 NULL, xname, "Management Packets TX");
3295 }
3296 evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3297 NULL, xname, "Total Octets Rx");
3298 evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3299 NULL, xname, "Total Octets Tx");
3300 evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3301 NULL, xname, "Total Packets Rx");
3302 evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3303 NULL, xname, "Total Packets Tx");
3304 evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3305 NULL, xname, "Packets Tx (64 bytes)");
3306 evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3307 NULL, xname, "Packets Tx (65-127 bytes)");
3308 evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3309 NULL, xname, "Packets Tx (128-255 bytes)");
3310 evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3311 NULL, xname, "Packets Tx (256-511 bytes)");
3312 evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3313 NULL, xname, "Packets Tx (512-1023 bytes)");
3314 evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3315 NULL, xname, "Packets Tx (1024-1522 Bytes)");
3316 evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3317 NULL, xname, "Multicast Packets Tx");
3318 evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3319 NULL, xname, "Broadcast Packets Tx Count");
3320 evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3321 NULL, xname, "Interrupt Assertion");
3322 if (sc->sc_type < WM_T_82575) {
3323 evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3324 NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3325 evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3326 NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3327 evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3328 NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3329 evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
3330 NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3331 evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3332 NULL, xname, "Intr. Cause Tx Queue Empty");
3333 evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3334 NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3335 evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
3336 NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3337 evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3338 NULL, xname, "Interrupt Cause Receiver Overrun");
3339 }
3340 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3341 evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3342 NULL, xname, "BMC2OS Packets received by host");
3343 evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3344 NULL, xname, "OS2BMC Packets transmitted by host");
3345 evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3346 NULL, xname, "BMC2OS Packets sent by BMC");
3347 evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3348 NULL, xname, "OS2BMC Packets received by BMC");
3349 }
3350 #endif /* WM_EVENT_COUNTERS */
3351
3352 sc->sc_txrx_use_workqueue = false;
3353
3354 if (wm_phy_need_linkdown_discard(sc)) {
3355 DPRINTF(sc, WM_DEBUG_LINK,
3356 ("%s: %s: Set linkdown discard flag\n",
3357 device_xname(sc->sc_dev), __func__));
3358 wm_set_linkdown_discard(sc);
3359 }
3360
3361 wm_init_sysctls(sc);
3362
3363 if (pmf_device_register(self, wm_suspend, wm_resume))
3364 pmf_class_network_register(self, ifp);
3365 else
3366 aprint_error_dev(self, "couldn't establish power handler\n");
3367
3368 sc->sc_flags |= WM_F_ATTACHED;
3369 out:
3370 return;
3371 }
3372
3373 /* The detach function (ca_detach) */
3374 static int
3375 wm_detach(device_t self, int flags __unused)
3376 {
3377 struct wm_softc *sc = device_private(self);
3378 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3379 int i;
3380
3381 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3382 return 0;
3383
3384 /* Stop the interface. Callouts are stopped in it. */
3385 IFNET_LOCK(ifp);
3386 sc->sc_dying = true;
3387 wm_stop(ifp, 1);
3388 IFNET_UNLOCK(ifp);
3389
3390 pmf_device_deregister(self);
3391
3392 sysctl_teardown(&sc->sc_sysctllog);
3393
3394 #ifdef WM_EVENT_COUNTERS
3395 evcnt_detach(&sc->sc_ev_linkintr);
3396
3397 evcnt_detach(&sc->sc_ev_crcerrs);
3398 evcnt_detach(&sc->sc_ev_symerrc);
3399 evcnt_detach(&sc->sc_ev_mpc);
3400 evcnt_detach(&sc->sc_ev_colc);
3401 evcnt_detach(&sc->sc_ev_sec);
3402 evcnt_detach(&sc->sc_ev_rlec);
3403
3404 if (sc->sc_type >= WM_T_82543) {
3405 evcnt_detach(&sc->sc_ev_algnerrc);
3406 evcnt_detach(&sc->sc_ev_rxerrc);
3407 evcnt_detach(&sc->sc_ev_cexterr);
3408
3409 evcnt_detach(&sc->sc_ev_tncrs);
3410 evcnt_detach(&sc->sc_ev_tsctc);
3411 evcnt_detach(&sc->sc_ev_tsctfc);
3412 }
3413
3414 if (sc->sc_type >= WM_T_82542_2_1) {
3415 evcnt_detach(&sc->sc_ev_tx_xoff);
3416 evcnt_detach(&sc->sc_ev_tx_xon);
3417 evcnt_detach(&sc->sc_ev_rx_xoff);
3418 evcnt_detach(&sc->sc_ev_rx_xon);
3419 evcnt_detach(&sc->sc_ev_rx_macctl);
3420 }
3421
3422 evcnt_detach(&sc->sc_ev_scc);
3423 evcnt_detach(&sc->sc_ev_ecol);
3424 evcnt_detach(&sc->sc_ev_mcc);
3425 evcnt_detach(&sc->sc_ev_latecol);
3426 evcnt_detach(&sc->sc_ev_dc);
3427 evcnt_detach(&sc->sc_ev_prc64);
3428 evcnt_detach(&sc->sc_ev_prc127);
3429 evcnt_detach(&sc->sc_ev_prc255);
3430 evcnt_detach(&sc->sc_ev_prc511);
3431 evcnt_detach(&sc->sc_ev_prc1023);
3432 evcnt_detach(&sc->sc_ev_prc1522);
3433 evcnt_detach(&sc->sc_ev_gprc);
3434 evcnt_detach(&sc->sc_ev_bprc);
3435 evcnt_detach(&sc->sc_ev_mprc);
3436 evcnt_detach(&sc->sc_ev_gptc);
3437 evcnt_detach(&sc->sc_ev_gorc);
3438 evcnt_detach(&sc->sc_ev_gotc);
3439 evcnt_detach(&sc->sc_ev_rnbc);
3440 evcnt_detach(&sc->sc_ev_ruc);
3441 evcnt_detach(&sc->sc_ev_rfc);
3442 evcnt_detach(&sc->sc_ev_roc);
3443 evcnt_detach(&sc->sc_ev_rjc);
3444 if (sc->sc_type >= WM_T_82540) {
3445 evcnt_detach(&sc->sc_ev_mgtprc);
3446 evcnt_detach(&sc->sc_ev_mgtpdc);
3447 evcnt_detach(&sc->sc_ev_mgtptc);
3448 }
3449 evcnt_detach(&sc->sc_ev_tor);
3450 evcnt_detach(&sc->sc_ev_tot);
3451 evcnt_detach(&sc->sc_ev_tpr);
3452 evcnt_detach(&sc->sc_ev_tpt);
3453 evcnt_detach(&sc->sc_ev_ptc64);
3454 evcnt_detach(&sc->sc_ev_ptc127);
3455 evcnt_detach(&sc->sc_ev_ptc255);
3456 evcnt_detach(&sc->sc_ev_ptc511);
3457 evcnt_detach(&sc->sc_ev_ptc1023);
3458 evcnt_detach(&sc->sc_ev_ptc1522);
3459 evcnt_detach(&sc->sc_ev_mptc);
3460 evcnt_detach(&sc->sc_ev_bptc);
3461 evcnt_detach(&sc->sc_ev_iac);
3462 if (sc->sc_type < WM_T_82575) {
3463 evcnt_detach(&sc->sc_ev_icrxptc);
3464 evcnt_detach(&sc->sc_ev_icrxatc);
3465 evcnt_detach(&sc->sc_ev_ictxptc);
3466 evcnt_detach(&sc->sc_ev_ictxact);
3467 evcnt_detach(&sc->sc_ev_ictxqec);
3468 evcnt_detach(&sc->sc_ev_ictxqmtc);
3469 evcnt_detach(&sc->sc_ev_icrxdmtc);
3470 evcnt_detach(&sc->sc_ev_icrxoc);
3471 }
3472 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3473 evcnt_detach(&sc->sc_ev_b2ogprc);
3474 evcnt_detach(&sc->sc_ev_o2bspc);
3475 evcnt_detach(&sc->sc_ev_b2ospc);
3476 evcnt_detach(&sc->sc_ev_o2bgptc);
3477 }
3478 #endif /* WM_EVENT_COUNTERS */
3479
3480 rnd_detach_source(&sc->rnd_source);
3481
3482 /* Tell the firmware about the release */
3483 mutex_enter(sc->sc_core_lock);
3484 wm_release_manageability(sc);
3485 wm_release_hw_control(sc);
3486 wm_enable_wakeup(sc);
3487 mutex_exit(sc->sc_core_lock);
3488
3489 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3490
3491 ether_ifdetach(ifp);
3492 if_detach(ifp);
3493 if_percpuq_destroy(sc->sc_ipq);
3494
3495 /* Delete all remaining media. */
3496 ifmedia_fini(&sc->sc_mii.mii_media);
3497
3498 /* Unload RX dmamaps and free mbufs */
3499 for (i = 0; i < sc->sc_nqueues; i++) {
3500 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3501 mutex_enter(rxq->rxq_lock);
3502 wm_rxdrain(rxq);
3503 mutex_exit(rxq->rxq_lock);
3504 }
3505 /* Must unlock here */
3506
3507 /* Disestablish the interrupt handler */
3508 for (i = 0; i < sc->sc_nintrs; i++) {
3509 if (sc->sc_ihs[i] != NULL) {
3510 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3511 sc->sc_ihs[i] = NULL;
3512 }
3513 }
3514 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3515
3516 /* wm_stop() ensured that the workqueues are stopped. */
3517 workqueue_destroy(sc->sc_queue_wq);
3518 workqueue_destroy(sc->sc_reset_wq);
3519
3520 for (i = 0; i < sc->sc_nqueues; i++)
3521 softint_disestablish(sc->sc_queue[i].wmq_si);
3522
3523 wm_free_txrx_queues(sc);
3524
3525 /* Unmap the registers */
3526 if (sc->sc_ss) {
3527 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3528 sc->sc_ss = 0;
3529 }
3530 if (sc->sc_ios) {
3531 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3532 sc->sc_ios = 0;
3533 }
3534 if (sc->sc_flashs) {
3535 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3536 sc->sc_flashs = 0;
3537 }
3538
3539 if (sc->sc_core_lock)
3540 mutex_obj_free(sc->sc_core_lock);
3541 if (sc->sc_ich_phymtx)
3542 mutex_obj_free(sc->sc_ich_phymtx);
3543 if (sc->sc_ich_nvmmtx)
3544 mutex_obj_free(sc->sc_ich_nvmmtx);
3545
3546 return 0;
3547 }
3548
3549 static bool
3550 wm_suspend(device_t self, const pmf_qual_t *qual)
3551 {
3552 struct wm_softc *sc = device_private(self);
3553
3554 wm_release_manageability(sc);
3555 wm_release_hw_control(sc);
3556 wm_enable_wakeup(sc);
3557
3558 return true;
3559 }
3560
3561 static bool
3562 wm_resume(device_t self, const pmf_qual_t *qual)
3563 {
3564 struct wm_softc *sc = device_private(self);
3565 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3566 pcireg_t reg;
3567 char buf[256];
3568
3569 reg = CSR_READ(sc, WMREG_WUS);
3570 if (reg != 0) {
3571 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3572 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3573 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3574 }
3575
3576 if (sc->sc_type >= WM_T_PCH2)
3577 wm_resume_workarounds_pchlan(sc);
3578 IFNET_LOCK(ifp);
3579 if ((ifp->if_flags & IFF_UP) == 0) {
3580 /* >= PCH_SPT hardware workaround before reset. */
3581 if (sc->sc_type >= WM_T_PCH_SPT)
3582 wm_flush_desc_rings(sc);
3583
3584 wm_reset(sc);
3585 /* Non-AMT based hardware can now take control from firmware */
3586 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3587 wm_get_hw_control(sc);
3588 wm_init_manageability(sc);
3589 } else {
3590 /*
3591 * We called pmf_class_network_register(), so if_init() is
3592 * automatically called when IFF_UP. wm_reset(),
3593 * wm_get_hw_control() and wm_init_manageability() are called
3594 * via wm_init().
3595 */
3596 }
3597 IFNET_UNLOCK(ifp);
3598
3599 return true;
3600 }
3601
3602 /*
3603 * wm_watchdog:
3604 *
3605 * Watchdog checker.
3606 */
3607 static bool
3608 wm_watchdog(struct ifnet *ifp)
3609 {
3610 int qid;
3611 struct wm_softc *sc = ifp->if_softc;
3612 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3613
3614 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3615 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3616
3617 wm_watchdog_txq(ifp, txq, &hang_queue);
3618 }
3619
3620 #ifdef WM_DEBUG
3621 if (sc->sc_trigger_reset) {
3622 /* debug operation, no need for atomicity or reliability */
3623 sc->sc_trigger_reset = 0;
3624 hang_queue++;
3625 }
3626 #endif
3627
3628 if (hang_queue == 0)
3629 return true;
3630
3631 if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3632 workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3633
3634 return false;
3635 }
3636
3637 /*
3638 * Perform an interface watchdog reset.
3639 */
3640 static void
3641 wm_handle_reset_work(struct work *work, void *arg)
3642 {
3643 struct wm_softc * const sc = arg;
3644 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3645
3646 /* Don't want ioctl operations to happen */
3647 IFNET_LOCK(ifp);
3648
3649 /* reset the interface. */
3650 wm_init(ifp);
3651
3652 IFNET_UNLOCK(ifp);
3653
3654 /*
3655 * There are still some upper layer processing which call
3656 * ifp->if_start(). e.g. ALTQ or one CPU system
3657 */
3658 /* Try to get more packets going. */
3659 ifp->if_start(ifp);
3660
3661 atomic_store_relaxed(&sc->sc_reset_pending, 0);
3662 }
3663
3664
3665 static void
3666 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3667 {
3668
3669 mutex_enter(txq->txq_lock);
3670 if (txq->txq_sending &&
3671 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3672 wm_watchdog_txq_locked(ifp, txq, hang);
3673
3674 mutex_exit(txq->txq_lock);
3675 }
3676
3677 static void
3678 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3679 uint16_t *hang)
3680 {
3681 struct wm_softc *sc = ifp->if_softc;
3682 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3683
3684 KASSERT(mutex_owned(txq->txq_lock));
3685
3686 /*
3687 * Since we're using delayed interrupts, sweep up
3688 * before we report an error.
3689 */
3690 wm_txeof(txq, UINT_MAX);
3691
3692 if (txq->txq_sending)
3693 *hang |= __BIT(wmq->wmq_id);
3694
3695 if (txq->txq_free == WM_NTXDESC(txq)) {
3696 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3697 device_xname(sc->sc_dev));
3698 } else {
3699 #ifdef WM_DEBUG
3700 int i, j;
3701 struct wm_txsoft *txs;
3702 #endif
3703 log(LOG_ERR,
3704 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3705 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3706 txq->txq_next);
3707 if_statinc(ifp, if_oerrors);
3708 #ifdef WM_DEBUG
3709 for (i = txq->txq_sdirty; i != txq->txq_snext;
3710 i = WM_NEXTTXS(txq, i)) {
3711 txs = &txq->txq_soft[i];
3712 printf("txs %d tx %d -> %d\n",
3713 i, txs->txs_firstdesc, txs->txs_lastdesc);
3714 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3715 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3716 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3717 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3718 printf("\t %#08x%08x\n",
3719 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3720 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3721 } else {
3722 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3723 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3724 txq->txq_descs[j].wtx_addr.wa_low);
3725 printf("\t %#04x%02x%02x%08x\n",
3726 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3727 txq->txq_descs[j].wtx_fields.wtxu_options,
3728 txq->txq_descs[j].wtx_fields.wtxu_status,
3729 txq->txq_descs[j].wtx_cmdlen);
3730 }
3731 if (j == txs->txs_lastdesc)
3732 break;
3733 }
3734 }
3735 #endif
3736 }
3737 }
3738
3739 /*
3740 * wm_tick:
3741 *
3742 * One second timer, used to check link status, sweep up
3743 * completed transmit jobs, etc.
3744 */
3745 static void
3746 wm_tick(void *arg)
3747 {
3748 struct wm_softc *sc = arg;
3749 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3750 uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc,
3751 cexterr;
3752
3753 mutex_enter(sc->sc_core_lock);
3754
3755 if (sc->sc_core_stopping) {
3756 mutex_exit(sc->sc_core_lock);
3757 return;
3758 }
3759
3760 crcerrs = CSR_READ(sc, WMREG_CRCERRS);
3761 symerrc = CSR_READ(sc, WMREG_SYMERRC);
3762 mpc = CSR_READ(sc, WMREG_MPC);
3763 colc = CSR_READ(sc, WMREG_COLC);
3764 sec = CSR_READ(sc, WMREG_SEC);
3765 rlec = CSR_READ(sc, WMREG_RLEC);
3766
3767 WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
3768 WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
3769 WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
3770 WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
3771 WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
3772 WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
3773
3774 if (sc->sc_type >= WM_T_82543) {
3775 algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
3776 rxerrc = CSR_READ(sc, WMREG_RXERRC);
3777 cexterr = CSR_READ(sc, WMREG_CEXTERR);
3778 WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
3779 WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
3780 WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
3781
3782 WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
3783 WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
3784 WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
3785 } else
3786 algnerrc = rxerrc = cexterr = 0;
3787
3788 if (sc->sc_type >= WM_T_82542_2_1) {
3789 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3790 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3791 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3792 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3793 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3794 }
3795 WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
3796 WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
3797 WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
3798 WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
3799 WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
3800 WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
3801 WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
3802 WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
3803 WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
3804 WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
3805 WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
3806 WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
3807 WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
3808 WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
3809 WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
3810
3811 WM_EVCNT_ADD(&sc->sc_ev_gorc,
3812 CSR_READ(sc, WMREG_GORCL) +
3813 ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
3814 WM_EVCNT_ADD(&sc->sc_ev_gotc,
3815 CSR_READ(sc, WMREG_GOTCL) +
3816 ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
3817
3818 WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
3819 WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
3820 WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
3821 WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
3822 WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
3823
3824 if (sc->sc_type >= WM_T_82540) {
3825 WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
3826 WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
3827 WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
3828 }
3829
3830 /*
3831 * The TOR(L) register includes:
3832 * - Error
3833 * - Flow control
3834 * - Broadcast rejected (This note is described in 82574 and newer
3835 * datasheets. What does "broadcast rejected" mean?)
3836 */
3837 WM_EVCNT_ADD(&sc->sc_ev_tor,
3838 CSR_READ(sc, WMREG_TORL) +
3839 ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
3840 WM_EVCNT_ADD(&sc->sc_ev_tot,
3841 CSR_READ(sc, WMREG_TOTL) +
3842 ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
3843
3844 WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
3845 WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
3846 WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
3847 WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
3848 WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
3849 WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
3850 WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
3851 WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
3852 WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
3853 WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
3854 WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
3855 if (sc->sc_type < WM_T_82575) {
3856 WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
3857 WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
3858 WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
3859 WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
3860 WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
3861 WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
3862 CSR_READ(sc, WMREG_ICTXQMTC));
3863 WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc,
3864 CSR_READ(sc, WMREG_ICRXDMTC));
3865 WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
3866 }
3867
3868 if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
3869 && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
3870 WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
3871 WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
3872 WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
3873 WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
3874 }
3875 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3876 if_statadd_ref(nsr, if_collisions, colc);
3877 if_statadd_ref(nsr, if_ierrors,
3878 crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
3879 /*
3880 * WMREG_RNBC is incremented when there are no available buffers in
3881 * host memory. It does not mean the number of dropped packets, because
3882 * an Ethernet controller can receive packets in such case if there is
3883 * space in the phy's FIFO.
3884 *
3885 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3886 * own EVCNT instead of if_iqdrops.
3887 */
3888 if_statadd_ref(nsr, if_iqdrops, mpc);
3889 IF_STAT_PUTREF(ifp);
3890
3891 if (sc->sc_flags & WM_F_HAS_MII)
3892 mii_tick(&sc->sc_mii);
3893 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3894 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3895 wm_serdes_tick(sc);
3896 else
3897 wm_tbi_tick(sc);
3898
3899 mutex_exit(sc->sc_core_lock);
3900
3901 if (wm_watchdog(ifp))
3902 callout_schedule(&sc->sc_tick_ch, hz);
3903 }
3904
3905 static int
3906 wm_ifflags_cb(struct ethercom *ec)
3907 {
3908 struct ifnet *ifp = &ec->ec_if;
3909 struct wm_softc *sc = ifp->if_softc;
3910 u_short iffchange;
3911 int ecchange;
3912 bool needreset = false;
3913 int rc = 0;
3914
3915 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3916 device_xname(sc->sc_dev), __func__));
3917
3918 KASSERT(IFNET_LOCKED(ifp));
3919
3920 mutex_enter(sc->sc_core_lock);
3921
3922 /*
3923 * Check for if_flags.
3924 * Main usage is to prevent linkdown when opening bpf.
3925 */
3926 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3927 sc->sc_if_flags = ifp->if_flags;
3928 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3929 needreset = true;
3930 goto ec;
3931 }
3932
3933 /* iff related updates */
3934 if ((iffchange & IFF_PROMISC) != 0)
3935 wm_set_filter(sc);
3936
3937 wm_set_vlan(sc);
3938
3939 ec:
3940 /* Check for ec_capenable. */
3941 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3942 sc->sc_ec_capenable = ec->ec_capenable;
3943 if ((ecchange & ~ETHERCAP_EEE) != 0) {
3944 needreset = true;
3945 goto out;
3946 }
3947
3948 /* ec related updates */
3949 wm_set_eee(sc);
3950
3951 out:
3952 if (needreset)
3953 rc = ENETRESET;
3954 mutex_exit(sc->sc_core_lock);
3955
3956 return rc;
3957 }
3958
3959 static bool
3960 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3961 {
3962
3963 switch (sc->sc_phytype) {
3964 case WMPHY_82577: /* ihphy */
3965 case WMPHY_82578: /* atphy */
3966 case WMPHY_82579: /* ihphy */
3967 case WMPHY_I217: /* ihphy */
3968 case WMPHY_82580: /* ihphy */
3969 case WMPHY_I350: /* ihphy */
3970 return true;
3971 default:
3972 return false;
3973 }
3974 }
3975
3976 static void
3977 wm_set_linkdown_discard(struct wm_softc *sc)
3978 {
3979
3980 for (int i = 0; i < sc->sc_nqueues; i++) {
3981 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3982
3983 mutex_enter(txq->txq_lock);
3984 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3985 mutex_exit(txq->txq_lock);
3986 }
3987 }
3988
3989 static void
3990 wm_clear_linkdown_discard(struct wm_softc *sc)
3991 {
3992
3993 for (int i = 0; i < sc->sc_nqueues; i++) {
3994 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3995
3996 mutex_enter(txq->txq_lock);
3997 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3998 mutex_exit(txq->txq_lock);
3999 }
4000 }
4001
4002 /*
4003 * wm_ioctl: [ifnet interface function]
4004 *
4005 * Handle control requests from the operator.
4006 */
4007 static int
4008 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4009 {
4010 struct wm_softc *sc = ifp->if_softc;
4011 struct ifreq *ifr = (struct ifreq *)data;
4012 struct ifaddr *ifa = (struct ifaddr *)data;
4013 struct sockaddr_dl *sdl;
4014 int error;
4015
4016 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4017 device_xname(sc->sc_dev), __func__));
4018
4019 switch (cmd) {
4020 case SIOCADDMULTI:
4021 case SIOCDELMULTI:
4022 break;
4023 default:
4024 KASSERT(IFNET_LOCKED(ifp));
4025 }
4026
4027 switch (cmd) {
4028 case SIOCSIFMEDIA:
4029 mutex_enter(sc->sc_core_lock);
4030 /* Flow control requires full-duplex mode. */
4031 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4032 (ifr->ifr_media & IFM_FDX) == 0)
4033 ifr->ifr_media &= ~IFM_ETH_FMASK;
4034 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4035 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4036 /* We can do both TXPAUSE and RXPAUSE. */
4037 ifr->ifr_media |=
4038 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4039 }
4040 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4041 }
4042 mutex_exit(sc->sc_core_lock);
4043 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4044 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4045 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4046 DPRINTF(sc, WM_DEBUG_LINK,
4047 ("%s: %s: Set linkdown discard flag\n",
4048 device_xname(sc->sc_dev), __func__));
4049 wm_set_linkdown_discard(sc);
4050 }
4051 }
4052 break;
4053 case SIOCINITIFADDR:
4054 mutex_enter(sc->sc_core_lock);
4055 if (ifa->ifa_addr->sa_family == AF_LINK) {
4056 sdl = satosdl(ifp->if_dl->ifa_addr);
4057 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4058 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4059 /* Unicast address is the first multicast entry */
4060 wm_set_filter(sc);
4061 error = 0;
4062 mutex_exit(sc->sc_core_lock);
4063 break;
4064 }
4065 mutex_exit(sc->sc_core_lock);
4066 /*FALLTHROUGH*/
4067 default:
4068 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4069 if (((ifp->if_flags & IFF_UP) != 0) &&
4070 ((ifr->ifr_flags & IFF_UP) == 0)) {
4071 DPRINTF(sc, WM_DEBUG_LINK,
4072 ("%s: %s: Set linkdown discard flag\n",
4073 device_xname(sc->sc_dev), __func__));
4074 wm_set_linkdown_discard(sc);
4075 }
4076 }
4077 const int s = splnet();
4078 /* It may call wm_start, so unlock here */
4079 error = ether_ioctl(ifp, cmd, data);
4080 splx(s);
4081 if (error != ENETRESET)
4082 break;
4083
4084 error = 0;
4085
4086 if (cmd == SIOCSIFCAP)
4087 error = if_init(ifp);
4088 else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4089 mutex_enter(sc->sc_core_lock);
4090 if (sc->sc_if_flags & IFF_RUNNING) {
4091 /*
4092 * Multicast list has changed; set the
4093 * hardware filter accordingly.
4094 */
4095 wm_set_filter(sc);
4096 }
4097 mutex_exit(sc->sc_core_lock);
4098 }
4099 break;
4100 }
4101
4102 return error;
4103 }
4104
4105 /* MAC address related */
4106
4107 /*
4108 * Get the offset of MAC address and return it.
4109 * If error occured, use offset 0.
4110 */
4111 static uint16_t
4112 wm_check_alt_mac_addr(struct wm_softc *sc)
4113 {
4114 uint16_t myea[ETHER_ADDR_LEN / 2];
4115 uint16_t offset = NVM_OFF_MACADDR;
4116
4117 /* Try to read alternative MAC address pointer */
4118 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4119 return 0;
4120
4121 /* Check pointer if it's valid or not. */
4122 if ((offset == 0x0000) || (offset == 0xffff))
4123 return 0;
4124
4125 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4126 /*
4127 * Check whether alternative MAC address is valid or not.
4128 * Some cards have non 0xffff pointer but those don't use
4129 * alternative MAC address in reality.
4130 *
4131 * Check whether the broadcast bit is set or not.
4132 */
4133 if (wm_nvm_read(sc, offset, 1, myea) == 0)
4134 if (((myea[0] & 0xff) & 0x01) == 0)
4135 return offset; /* Found */
4136
4137 /* Not found */
4138 return 0;
4139 }
4140
4141 static int
4142 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4143 {
4144 uint16_t myea[ETHER_ADDR_LEN / 2];
4145 uint16_t offset = NVM_OFF_MACADDR;
4146 int do_invert = 0;
4147
4148 switch (sc->sc_type) {
4149 case WM_T_82580:
4150 case WM_T_I350:
4151 case WM_T_I354:
4152 /* EEPROM Top Level Partitioning */
4153 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4154 break;
4155 case WM_T_82571:
4156 case WM_T_82575:
4157 case WM_T_82576:
4158 case WM_T_80003:
4159 case WM_T_I210:
4160 case WM_T_I211:
4161 offset = wm_check_alt_mac_addr(sc);
4162 if (offset == 0)
4163 if ((sc->sc_funcid & 0x01) == 1)
4164 do_invert = 1;
4165 break;
4166 default:
4167 if ((sc->sc_funcid & 0x01) == 1)
4168 do_invert = 1;
4169 break;
4170 }
4171
4172 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4173 goto bad;
4174
4175 enaddr[0] = myea[0] & 0xff;
4176 enaddr[1] = myea[0] >> 8;
4177 enaddr[2] = myea[1] & 0xff;
4178 enaddr[3] = myea[1] >> 8;
4179 enaddr[4] = myea[2] & 0xff;
4180 enaddr[5] = myea[2] >> 8;
4181
4182 /*
4183 * Toggle the LSB of the MAC address on the second port
4184 * of some dual port cards.
4185 */
4186 if (do_invert != 0)
4187 enaddr[5] ^= 1;
4188
4189 return 0;
4190
4191 bad:
4192 return -1;
4193 }
4194
4195 /*
4196 * wm_set_ral:
4197 *
4198 * Set an entery in the receive address list.
4199 */
4200 static void
4201 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4202 {
4203 uint32_t ral_lo, ral_hi, addrl, addrh;
4204 uint32_t wlock_mac;
4205 int rv;
4206
4207 if (enaddr != NULL) {
4208 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4209 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4210 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4211 ral_hi |= RAL_AV;
4212 } else {
4213 ral_lo = 0;
4214 ral_hi = 0;
4215 }
4216
4217 switch (sc->sc_type) {
4218 case WM_T_82542_2_0:
4219 case WM_T_82542_2_1:
4220 case WM_T_82543:
4221 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4222 CSR_WRITE_FLUSH(sc);
4223 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4224 CSR_WRITE_FLUSH(sc);
4225 break;
4226 case WM_T_PCH2:
4227 case WM_T_PCH_LPT:
4228 case WM_T_PCH_SPT:
4229 case WM_T_PCH_CNP:
4230 if (idx == 0) {
4231 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4232 CSR_WRITE_FLUSH(sc);
4233 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4234 CSR_WRITE_FLUSH(sc);
4235 return;
4236 }
4237 if (sc->sc_type != WM_T_PCH2) {
4238 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4239 FWSM_WLOCK_MAC);
4240 addrl = WMREG_SHRAL(idx - 1);
4241 addrh = WMREG_SHRAH(idx - 1);
4242 } else {
4243 wlock_mac = 0;
4244 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4245 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4246 }
4247
4248 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4249 rv = wm_get_swflag_ich8lan(sc);
4250 if (rv != 0)
4251 return;
4252 CSR_WRITE(sc, addrl, ral_lo);
4253 CSR_WRITE_FLUSH(sc);
4254 CSR_WRITE(sc, addrh, ral_hi);
4255 CSR_WRITE_FLUSH(sc);
4256 wm_put_swflag_ich8lan(sc);
4257 }
4258
4259 break;
4260 default:
4261 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4262 CSR_WRITE_FLUSH(sc);
4263 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4264 CSR_WRITE_FLUSH(sc);
4265 break;
4266 }
4267 }
4268
4269 /*
4270 * wm_mchash:
4271 *
4272 * Compute the hash of the multicast address for the 4096-bit
4273 * multicast filter.
4274 */
4275 static uint32_t
4276 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4277 {
4278 static const int lo_shift[4] = { 4, 3, 2, 0 };
4279 static const int hi_shift[4] = { 4, 5, 6, 8 };
4280 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4281 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4282 uint32_t hash;
4283
4284 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4285 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4286 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4287 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4288 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4289 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4290 return (hash & 0x3ff);
4291 }
4292 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4293 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4294
4295 return (hash & 0xfff);
4296 }
4297
4298 /*
4299 *
4300 *
4301 */
4302 static int
4303 wm_rar_count(struct wm_softc *sc)
4304 {
4305 int size;
4306
4307 switch (sc->sc_type) {
4308 case WM_T_ICH8:
4309 size = WM_RAL_TABSIZE_ICH8 -1;
4310 break;
4311 case WM_T_ICH9:
4312 case WM_T_ICH10:
4313 case WM_T_PCH:
4314 size = WM_RAL_TABSIZE_ICH8;
4315 break;
4316 case WM_T_PCH2:
4317 size = WM_RAL_TABSIZE_PCH2;
4318 break;
4319 case WM_T_PCH_LPT:
4320 case WM_T_PCH_SPT:
4321 case WM_T_PCH_CNP:
4322 size = WM_RAL_TABSIZE_PCH_LPT;
4323 break;
4324 case WM_T_82575:
4325 case WM_T_I210:
4326 case WM_T_I211:
4327 size = WM_RAL_TABSIZE_82575;
4328 break;
4329 case WM_T_82576:
4330 case WM_T_82580:
4331 size = WM_RAL_TABSIZE_82576;
4332 break;
4333 case WM_T_I350:
4334 case WM_T_I354:
4335 size = WM_RAL_TABSIZE_I350;
4336 break;
4337 default:
4338 size = WM_RAL_TABSIZE;
4339 }
4340
4341 return size;
4342 }
4343
4344 /*
4345 * wm_set_filter:
4346 *
4347 * Set up the receive filter.
4348 */
4349 static void
4350 wm_set_filter(struct wm_softc *sc)
4351 {
4352 struct ethercom *ec = &sc->sc_ethercom;
4353 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4354 struct ether_multi *enm;
4355 struct ether_multistep step;
4356 bus_addr_t mta_reg;
4357 uint32_t hash, reg, bit;
4358 int i, size, ralmax, rv;
4359
4360 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4361 device_xname(sc->sc_dev), __func__));
4362 KASSERT(mutex_owned(sc->sc_core_lock));
4363
4364 if (sc->sc_type >= WM_T_82544)
4365 mta_reg = WMREG_CORDOVA_MTA;
4366 else
4367 mta_reg = WMREG_MTA;
4368
4369 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4370
4371 if (sc->sc_if_flags & IFF_BROADCAST)
4372 sc->sc_rctl |= RCTL_BAM;
4373 if (sc->sc_if_flags & IFF_PROMISC) {
4374 sc->sc_rctl |= RCTL_UPE;
4375 ETHER_LOCK(ec);
4376 ec->ec_flags |= ETHER_F_ALLMULTI;
4377 ETHER_UNLOCK(ec);
4378 goto allmulti;
4379 }
4380
4381 /*
4382 * Set the station address in the first RAL slot, and
4383 * clear the remaining slots.
4384 */
4385 size = wm_rar_count(sc);
4386 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4387
4388 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
4389 || (sc->sc_type == WM_T_PCH_CNP)) {
4390 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4391 switch (i) {
4392 case 0:
4393 /* We can use all entries */
4394 ralmax = size;
4395 break;
4396 case 1:
4397 /* Only RAR[0] */
4398 ralmax = 1;
4399 break;
4400 default:
4401 /* Available SHRA + RAR[0] */
4402 ralmax = i + 1;
4403 }
4404 } else
4405 ralmax = size;
4406 for (i = 1; i < size; i++) {
4407 if (i < ralmax)
4408 wm_set_ral(sc, NULL, i);
4409 }
4410
4411 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4412 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4413 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4414 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4415 size = WM_ICH8_MC_TABSIZE;
4416 else
4417 size = WM_MC_TABSIZE;
4418 /* Clear out the multicast table. */
4419 for (i = 0; i < size; i++) {
4420 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4421 CSR_WRITE_FLUSH(sc);
4422 }
4423
4424 ETHER_LOCK(ec);
4425 ETHER_FIRST_MULTI(step, ec, enm);
4426 while (enm != NULL) {
4427 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4428 ec->ec_flags |= ETHER_F_ALLMULTI;
4429 ETHER_UNLOCK(ec);
4430 /*
4431 * We must listen to a range of multicast addresses.
4432 * For now, just accept all multicasts, rather than
4433 * trying to set only those filter bits needed to match
4434 * the range. (At this time, the only use of address
4435 * ranges is for IP multicast routing, for which the
4436 * range is big enough to require all bits set.)
4437 */
4438 goto allmulti;
4439 }
4440
4441 hash = wm_mchash(sc, enm->enm_addrlo);
4442
4443 reg = (hash >> 5);
4444 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4445 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4446 || (sc->sc_type == WM_T_PCH2)
4447 || (sc->sc_type == WM_T_PCH_LPT)
4448 || (sc->sc_type == WM_T_PCH_SPT)
4449 || (sc->sc_type == WM_T_PCH_CNP))
4450 reg &= 0x1f;
4451 else
4452 reg &= 0x7f;
4453 bit = hash & 0x1f;
4454
4455 hash = CSR_READ(sc, mta_reg + (reg << 2));
4456 hash |= 1U << bit;
4457
4458 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4459 /*
4460 * 82544 Errata 9: Certain register cannot be written
4461 * with particular alignments in PCI-X bus operation
4462 * (FCAH, MTA and VFTA).
4463 */
4464 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4465 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4466 CSR_WRITE_FLUSH(sc);
4467 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4468 CSR_WRITE_FLUSH(sc);
4469 } else {
4470 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4471 CSR_WRITE_FLUSH(sc);
4472 }
4473
4474 ETHER_NEXT_MULTI(step, enm);
4475 }
4476 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4477 ETHER_UNLOCK(ec);
4478
4479 goto setit;
4480
4481 allmulti:
4482 sc->sc_rctl |= RCTL_MPE;
4483
4484 setit:
4485 if (sc->sc_type >= WM_T_PCH2) {
4486 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4487 && (ifp->if_mtu > ETHERMTU))
4488 rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4489 else
4490 rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4491 if (rv != 0)
4492 device_printf(sc->sc_dev,
4493 "Failed to do workaround for jumbo frame.\n");
4494 }
4495
4496 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4497 }
4498
4499 /* Reset and init related */
4500
4501 static void
4502 wm_set_vlan(struct wm_softc *sc)
4503 {
4504
4505 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4506 device_xname(sc->sc_dev), __func__));
4507
4508 /* Deal with VLAN enables. */
4509 if (VLAN_ATTACHED(&sc->sc_ethercom))
4510 sc->sc_ctrl |= CTRL_VME;
4511 else
4512 sc->sc_ctrl &= ~CTRL_VME;
4513
4514 /* Write the control registers. */
4515 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4516 }
4517
4518 static void
4519 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4520 {
4521 uint32_t gcr;
4522 pcireg_t ctrl2;
4523
4524 gcr = CSR_READ(sc, WMREG_GCR);
4525
4526 /* Only take action if timeout value is defaulted to 0 */
4527 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4528 goto out;
4529
4530 if ((gcr & GCR_CAP_VER2) == 0) {
4531 gcr |= GCR_CMPL_TMOUT_10MS;
4532 goto out;
4533 }
4534
4535 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4536 sc->sc_pcixe_capoff + PCIE_DCSR2);
4537 ctrl2 |= WM_PCIE_DCSR2_16MS;
4538 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4539 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4540
4541 out:
4542 /* Disable completion timeout resend */
4543 gcr &= ~GCR_CMPL_TMOUT_RESEND;
4544
4545 CSR_WRITE(sc, WMREG_GCR, gcr);
4546 }
4547
4548 void
4549 wm_get_auto_rd_done(struct wm_softc *sc)
4550 {
4551 int i;
4552
4553 /* wait for eeprom to reload */
4554 switch (sc->sc_type) {
4555 case WM_T_82571:
4556 case WM_T_82572:
4557 case WM_T_82573:
4558 case WM_T_82574:
4559 case WM_T_82583:
4560 case WM_T_82575:
4561 case WM_T_82576:
4562 case WM_T_82580:
4563 case WM_T_I350:
4564 case WM_T_I354:
4565 case WM_T_I210:
4566 case WM_T_I211:
4567 case WM_T_80003:
4568 case WM_T_ICH8:
4569 case WM_T_ICH9:
4570 for (i = 0; i < 10; i++) {
4571 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4572 break;
4573 delay(1000);
4574 }
4575 if (i == 10) {
4576 log(LOG_ERR, "%s: auto read from eeprom failed to "
4577 "complete\n", device_xname(sc->sc_dev));
4578 }
4579 break;
4580 default:
4581 break;
4582 }
4583 }
4584
4585 void
4586 wm_lan_init_done(struct wm_softc *sc)
4587 {
4588 uint32_t reg = 0;
4589 int i;
4590
4591 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4592 device_xname(sc->sc_dev), __func__));
4593
4594 /* Wait for eeprom to reload */
4595 switch (sc->sc_type) {
4596 case WM_T_ICH10:
4597 case WM_T_PCH:
4598 case WM_T_PCH2:
4599 case WM_T_PCH_LPT:
4600 case WM_T_PCH_SPT:
4601 case WM_T_PCH_CNP:
4602 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4603 reg = CSR_READ(sc, WMREG_STATUS);
4604 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4605 break;
4606 delay(100);
4607 }
4608 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4609 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4610 "complete\n", device_xname(sc->sc_dev), __func__);
4611 }
4612 break;
4613 default:
4614 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4615 __func__);
4616 break;
4617 }
4618
4619 reg &= ~STATUS_LAN_INIT_DONE;
4620 CSR_WRITE(sc, WMREG_STATUS, reg);
4621 }
4622
4623 void
4624 wm_get_cfg_done(struct wm_softc *sc)
4625 {
4626 int mask;
4627 uint32_t reg;
4628 int i;
4629
4630 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4631 device_xname(sc->sc_dev), __func__));
4632
4633 /* Wait for eeprom to reload */
4634 switch (sc->sc_type) {
4635 case WM_T_82542_2_0:
4636 case WM_T_82542_2_1:
4637 /* null */
4638 break;
4639 case WM_T_82543:
4640 case WM_T_82544:
4641 case WM_T_82540:
4642 case WM_T_82545:
4643 case WM_T_82545_3:
4644 case WM_T_82546:
4645 case WM_T_82546_3:
4646 case WM_T_82541:
4647 case WM_T_82541_2:
4648 case WM_T_82547:
4649 case WM_T_82547_2:
4650 case WM_T_82573:
4651 case WM_T_82574:
4652 case WM_T_82583:
4653 /* generic */
4654 delay(10*1000);
4655 break;
4656 case WM_T_80003:
4657 case WM_T_82571:
4658 case WM_T_82572:
4659 case WM_T_82575:
4660 case WM_T_82576:
4661 case WM_T_82580:
4662 case WM_T_I350:
4663 case WM_T_I354:
4664 case WM_T_I210:
4665 case WM_T_I211:
4666 if (sc->sc_type == WM_T_82571) {
4667 /* Only 82571 shares port 0 */
4668 mask = EEMNGCTL_CFGDONE_0;
4669 } else
4670 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4671 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4672 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4673 break;
4674 delay(1000);
4675 }
4676 if (i >= WM_PHY_CFG_TIMEOUT)
4677 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4678 device_xname(sc->sc_dev), __func__));
4679 break;
4680 case WM_T_ICH8:
4681 case WM_T_ICH9:
4682 case WM_T_ICH10:
4683 case WM_T_PCH:
4684 case WM_T_PCH2:
4685 case WM_T_PCH_LPT:
4686 case WM_T_PCH_SPT:
4687 case WM_T_PCH_CNP:
4688 delay(10*1000);
4689 if (sc->sc_type >= WM_T_ICH10)
4690 wm_lan_init_done(sc);
4691 else
4692 wm_get_auto_rd_done(sc);
4693
4694 /* Clear PHY Reset Asserted bit */
4695 reg = CSR_READ(sc, WMREG_STATUS);
4696 if ((reg & STATUS_PHYRA) != 0)
4697 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4698 break;
4699 default:
4700 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4701 __func__);
4702 break;
4703 }
4704 }
4705
4706 int
4707 wm_phy_post_reset(struct wm_softc *sc)
4708 {
4709 device_t dev = sc->sc_dev;
4710 uint16_t reg;
4711 int rv = 0;
4712
4713 /* This function is only for ICH8 and newer. */
4714 if (sc->sc_type < WM_T_ICH8)
4715 return 0;
4716
4717 if (wm_phy_resetisblocked(sc)) {
4718 /* XXX */
4719 device_printf(dev, "PHY is blocked\n");
4720 return -1;
4721 }
4722
4723 /* Allow time for h/w to get to quiescent state after reset */
4724 delay(10*1000);
4725
4726 /* Perform any necessary post-reset workarounds */
4727 if (sc->sc_type == WM_T_PCH)
4728 rv = wm_hv_phy_workarounds_ich8lan(sc);
4729 else if (sc->sc_type == WM_T_PCH2)
4730 rv = wm_lv_phy_workarounds_ich8lan(sc);
4731 if (rv != 0)
4732 return rv;
4733
4734 /* Clear the host wakeup bit after lcd reset */
4735 if (sc->sc_type >= WM_T_PCH) {
4736 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4737 reg &= ~BM_WUC_HOST_WU_BIT;
4738 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4739 }
4740
4741 /* Configure the LCD with the extended configuration region in NVM */
4742 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4743 return rv;
4744
4745 /* Configure the LCD with the OEM bits in NVM */
4746 rv = wm_oem_bits_config_ich8lan(sc, true);
4747
4748 if (sc->sc_type == WM_T_PCH2) {
4749 /* Ungate automatic PHY configuration on non-managed 82579 */
4750 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4751 delay(10 * 1000);
4752 wm_gate_hw_phy_config_ich8lan(sc, false);
4753 }
4754 /* Set EEE LPI Update Timer to 200usec */
4755 rv = sc->phy.acquire(sc);
4756 if (rv)
4757 return rv;
4758 rv = wm_write_emi_reg_locked(dev,
4759 I82579_LPI_UPDATE_TIMER, 0x1387);
4760 sc->phy.release(sc);
4761 }
4762
4763 return rv;
4764 }
4765
4766 /* Only for PCH and newer */
4767 static int
4768 wm_write_smbus_addr(struct wm_softc *sc)
4769 {
4770 uint32_t strap, freq;
4771 uint16_t phy_data;
4772 int rv;
4773
4774 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4775 device_xname(sc->sc_dev), __func__));
4776 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4777
4778 strap = CSR_READ(sc, WMREG_STRAP);
4779 freq = __SHIFTOUT(strap, STRAP_FREQ);
4780
4781 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4782 if (rv != 0)
4783 return rv;
4784
4785 phy_data &= ~HV_SMB_ADDR_ADDR;
4786 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4787 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4788
4789 if (sc->sc_phytype == WMPHY_I217) {
4790 /* Restore SMBus frequency */
4791 if (freq --) {
4792 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4793 | HV_SMB_ADDR_FREQ_HIGH);
4794 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4795 HV_SMB_ADDR_FREQ_LOW);
4796 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4797 HV_SMB_ADDR_FREQ_HIGH);
4798 } else
4799 DPRINTF(sc, WM_DEBUG_INIT,
4800 ("%s: %s Unsupported SMB frequency in PHY\n",
4801 device_xname(sc->sc_dev), __func__));
4802 }
4803
4804 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4805 phy_data);
4806 }
4807
4808 static int
4809 wm_init_lcd_from_nvm(struct wm_softc *sc)
4810 {
4811 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4812 uint16_t phy_page = 0;
4813 int rv = 0;
4814
4815 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4816 device_xname(sc->sc_dev), __func__));
4817
4818 switch (sc->sc_type) {
4819 case WM_T_ICH8:
4820 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4821 || (sc->sc_phytype != WMPHY_IGP_3))
4822 return 0;
4823
4824 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4825 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4826 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4827 break;
4828 }
4829 /* FALLTHROUGH */
4830 case WM_T_PCH:
4831 case WM_T_PCH2:
4832 case WM_T_PCH_LPT:
4833 case WM_T_PCH_SPT:
4834 case WM_T_PCH_CNP:
4835 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4836 break;
4837 default:
4838 return 0;
4839 }
4840
4841 if ((rv = sc->phy.acquire(sc)) != 0)
4842 return rv;
4843
4844 reg = CSR_READ(sc, WMREG_FEXTNVM);
4845 if ((reg & sw_cfg_mask) == 0)
4846 goto release;
4847
4848 /*
4849 * Make sure HW does not configure LCD from PHY extended configuration
4850 * before SW configuration
4851 */
4852 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4853 if ((sc->sc_type < WM_T_PCH2)
4854 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4855 goto release;
4856
4857 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4858 device_xname(sc->sc_dev), __func__));
4859 /* word_addr is in DWORD */
4860 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4861
4862 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4863 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4864 if (cnf_size == 0)
4865 goto release;
4866
4867 if (((sc->sc_type == WM_T_PCH)
4868 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4869 || (sc->sc_type > WM_T_PCH)) {
4870 /*
4871 * HW configures the SMBus address and LEDs when the OEM and
4872 * LCD Write Enable bits are set in the NVM. When both NVM bits
4873 * are cleared, SW will configure them instead.
4874 */
4875 DPRINTF(sc, WM_DEBUG_INIT,
4876 ("%s: %s: Configure SMBus and LED\n",
4877 device_xname(sc->sc_dev), __func__));
4878 if ((rv = wm_write_smbus_addr(sc)) != 0)
4879 goto release;
4880
4881 reg = CSR_READ(sc, WMREG_LEDCTL);
4882 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4883 (uint16_t)reg);
4884 if (rv != 0)
4885 goto release;
4886 }
4887
4888 /* Configure LCD from extended configuration region. */
4889 for (i = 0; i < cnf_size; i++) {
4890 uint16_t reg_data, reg_addr;
4891
4892 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4893 goto release;
4894
4895 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4896 goto release;
4897
4898 if (reg_addr == IGPHY_PAGE_SELECT)
4899 phy_page = reg_data;
4900
4901 reg_addr &= IGPHY_MAXREGADDR;
4902 reg_addr |= phy_page;
4903
4904 KASSERT(sc->phy.writereg_locked != NULL);
4905 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4906 reg_data);
4907 }
4908
4909 release:
4910 sc->phy.release(sc);
4911 return rv;
4912 }
4913
4914 /*
4915 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4916 * @sc: pointer to the HW structure
4917 * @d0_state: boolean if entering d0 or d3 device state
4918 *
4919 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4920 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
4921 * in NVM determines whether HW should configure LPLU and Gbe Disable.
4922 */
4923 int
4924 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4925 {
4926 uint32_t mac_reg;
4927 uint16_t oem_reg;
4928 int rv;
4929
4930 if (sc->sc_type < WM_T_PCH)
4931 return 0;
4932
4933 rv = sc->phy.acquire(sc);
4934 if (rv != 0)
4935 return rv;
4936
4937 if (sc->sc_type == WM_T_PCH) {
4938 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4939 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4940 goto release;
4941 }
4942
4943 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4944 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4945 goto release;
4946
4947 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4948
4949 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4950 if (rv != 0)
4951 goto release;
4952 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4953
4954 if (d0_state) {
4955 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4956 oem_reg |= HV_OEM_BITS_A1KDIS;
4957 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4958 oem_reg |= HV_OEM_BITS_LPLU;
4959 } else {
4960 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4961 != 0)
4962 oem_reg |= HV_OEM_BITS_A1KDIS;
4963 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4964 != 0)
4965 oem_reg |= HV_OEM_BITS_LPLU;
4966 }
4967
4968 /* Set Restart auto-neg to activate the bits */
4969 if ((d0_state || (sc->sc_type != WM_T_PCH))
4970 && (wm_phy_resetisblocked(sc) == false))
4971 oem_reg |= HV_OEM_BITS_ANEGNOW;
4972
4973 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4974
4975 release:
4976 sc->phy.release(sc);
4977
4978 return rv;
4979 }
4980
4981 /* Init hardware bits */
4982 void
4983 wm_initialize_hardware_bits(struct wm_softc *sc)
4984 {
4985 uint32_t tarc0, tarc1, reg;
4986
4987 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4988 device_xname(sc->sc_dev), __func__));
4989
4990 /* For 82571 variant, 80003 and ICHs */
4991 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4992 || (sc->sc_type >= WM_T_80003)) {
4993
4994 /* Transmit Descriptor Control 0 */
4995 reg = CSR_READ(sc, WMREG_TXDCTL(0));
4996 reg |= TXDCTL_COUNT_DESC;
4997 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4998
4999 /* Transmit Descriptor Control 1 */
5000 reg = CSR_READ(sc, WMREG_TXDCTL(1));
5001 reg |= TXDCTL_COUNT_DESC;
5002 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
5003
5004 /* TARC0 */
5005 tarc0 = CSR_READ(sc, WMREG_TARC0);
5006 switch (sc->sc_type) {
5007 case WM_T_82571:
5008 case WM_T_82572:
5009 case WM_T_82573:
5010 case WM_T_82574:
5011 case WM_T_82583:
5012 case WM_T_80003:
5013 /* Clear bits 30..27 */
5014 tarc0 &= ~__BITS(30, 27);
5015 break;
5016 default:
5017 break;
5018 }
5019
5020 switch (sc->sc_type) {
5021 case WM_T_82571:
5022 case WM_T_82572:
5023 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5024
5025 tarc1 = CSR_READ(sc, WMREG_TARC1);
5026 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5027 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5028 /* 8257[12] Errata No.7 */
5029 tarc1 |= __BIT(22); /* TARC1 bits 22 */
5030
5031 /* TARC1 bit 28 */
5032 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5033 tarc1 &= ~__BIT(28);
5034 else
5035 tarc1 |= __BIT(28);
5036 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5037
5038 /*
5039 * 8257[12] Errata No.13
5040 * Disable Dyamic Clock Gating.
5041 */
5042 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5043 reg &= ~CTRL_EXT_DMA_DYN_CLK;
5044 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5045 break;
5046 case WM_T_82573:
5047 case WM_T_82574:
5048 case WM_T_82583:
5049 if ((sc->sc_type == WM_T_82574)
5050 || (sc->sc_type == WM_T_82583))
5051 tarc0 |= __BIT(26); /* TARC0 bit 26 */
5052
5053 /* Extended Device Control */
5054 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5055 reg &= ~__BIT(23); /* Clear bit 23 */
5056 reg |= __BIT(22); /* Set bit 22 */
5057 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5058
5059 /* Device Control */
5060 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
5061 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5062
5063 /* PCIe Control Register */
5064 /*
5065 * 82573 Errata (unknown).
5066 *
5067 * 82574 Errata 25 and 82583 Errata 12
5068 * "Dropped Rx Packets":
5069 * NVM Image Version 2.1.4 and newer has no this bug.
5070 */
5071 reg = CSR_READ(sc, WMREG_GCR);
5072 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5073 CSR_WRITE(sc, WMREG_GCR, reg);
5074
5075 if ((sc->sc_type == WM_T_82574)
5076 || (sc->sc_type == WM_T_82583)) {
5077 /*
5078 * Document says this bit must be set for
5079 * proper operation.
5080 */
5081 reg = CSR_READ(sc, WMREG_GCR);
5082 reg |= __BIT(22);
5083 CSR_WRITE(sc, WMREG_GCR, reg);
5084
5085 /*
5086 * Apply workaround for hardware errata
5087 * documented in errata docs Fixes issue where
5088 * some error prone or unreliable PCIe
5089 * completions are occurring, particularly
5090 * with ASPM enabled. Without fix, issue can
5091 * cause Tx timeouts.
5092 */
5093 reg = CSR_READ(sc, WMREG_GCR2);
5094 reg |= __BIT(0);
5095 CSR_WRITE(sc, WMREG_GCR2, reg);
5096 }
5097 break;
5098 case WM_T_80003:
5099 /* TARC0 */
5100 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5101 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5102 tarc0 &= ~__BIT(20); /* Clear bits 20 */
5103
5104 /* TARC1 bit 28 */
5105 tarc1 = CSR_READ(sc, WMREG_TARC1);
5106 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5107 tarc1 &= ~__BIT(28);
5108 else
5109 tarc1 |= __BIT(28);
5110 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5111 break;
5112 case WM_T_ICH8:
5113 case WM_T_ICH9:
5114 case WM_T_ICH10:
5115 case WM_T_PCH:
5116 case WM_T_PCH2:
5117 case WM_T_PCH_LPT:
5118 case WM_T_PCH_SPT:
5119 case WM_T_PCH_CNP:
5120 /* TARC0 */
5121 if (sc->sc_type == WM_T_ICH8) {
5122 /* Set TARC0 bits 29 and 28 */
5123 tarc0 |= __BITS(29, 28);
5124 } else if (sc->sc_type == WM_T_PCH_SPT) {
5125 tarc0 |= __BIT(29);
5126 /*
5127 * Drop bit 28. From Linux.
5128 * See I218/I219 spec update
5129 * "5. Buffer Overrun While the I219 is
5130 * Processing DMA Transactions"
5131 */
5132 tarc0 &= ~__BIT(28);
5133 }
5134 /* Set TARC0 bits 23,24,26,27 */
5135 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5136
5137 /* CTRL_EXT */
5138 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5139 reg |= __BIT(22); /* Set bit 22 */
5140 /*
5141 * Enable PHY low-power state when MAC is at D3
5142 * w/o WoL
5143 */
5144 if (sc->sc_type >= WM_T_PCH)
5145 reg |= CTRL_EXT_PHYPDEN;
5146 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5147
5148 /* TARC1 */
5149 tarc1 = CSR_READ(sc, WMREG_TARC1);
5150 /* bit 28 */
5151 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5152 tarc1 &= ~__BIT(28);
5153 else
5154 tarc1 |= __BIT(28);
5155 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5156 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5157
5158 /* Device Status */
5159 if (sc->sc_type == WM_T_ICH8) {
5160 reg = CSR_READ(sc, WMREG_STATUS);
5161 reg &= ~__BIT(31);
5162 CSR_WRITE(sc, WMREG_STATUS, reg);
5163
5164 }
5165
5166 /* IOSFPC */
5167 if (sc->sc_type == WM_T_PCH_SPT) {
5168 reg = CSR_READ(sc, WMREG_IOSFPC);
5169 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5170 CSR_WRITE(sc, WMREG_IOSFPC, reg);
5171 }
5172 /*
5173 * Work-around descriptor data corruption issue during
5174 * NFS v2 UDP traffic, just disable the NFS filtering
5175 * capability.
5176 */
5177 reg = CSR_READ(sc, WMREG_RFCTL);
5178 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5179 CSR_WRITE(sc, WMREG_RFCTL, reg);
5180 break;
5181 default:
5182 break;
5183 }
5184 CSR_WRITE(sc, WMREG_TARC0, tarc0);
5185
5186 switch (sc->sc_type) {
5187 case WM_T_82571:
5188 case WM_T_82572:
5189 case WM_T_82573:
5190 case WM_T_80003:
5191 case WM_T_ICH8:
5192 /*
5193 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5194 * others to avoid RSS Hash Value bug.
5195 */
5196 reg = CSR_READ(sc, WMREG_RFCTL);
5197 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5198 CSR_WRITE(sc, WMREG_RFCTL, reg);
5199 break;
5200 case WM_T_82574:
5201 /* Use extened Rx descriptor. */
5202 reg = CSR_READ(sc, WMREG_RFCTL);
5203 reg |= WMREG_RFCTL_EXSTEN;
5204 CSR_WRITE(sc, WMREG_RFCTL, reg);
5205 break;
5206 default:
5207 break;
5208 }
5209 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5210 /*
5211 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5212 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5213 * "Certain Malformed IPv6 Extension Headers are Not Processed
5214 * Correctly by the Device"
5215 *
5216 * I354(C2000) Errata AVR53:
5217 * "Malformed IPv6 Extension Headers May Result in LAN Device
5218 * Hang"
5219 */
5220 reg = CSR_READ(sc, WMREG_RFCTL);
5221 reg |= WMREG_RFCTL_IPV6EXDIS;
5222 CSR_WRITE(sc, WMREG_RFCTL, reg);
5223 }
5224 }
5225
5226 static uint32_t
5227 wm_rxpbs_adjust_82580(uint32_t val)
5228 {
5229 uint32_t rv = 0;
5230
5231 if (val < __arraycount(wm_82580_rxpbs_table))
5232 rv = wm_82580_rxpbs_table[val];
5233
5234 return rv;
5235 }
5236
5237 /*
5238 * wm_reset_phy:
5239 *
5240 * generic PHY reset function.
5241 * Same as e1000_phy_hw_reset_generic()
5242 */
5243 static int
5244 wm_reset_phy(struct wm_softc *sc)
5245 {
5246 uint32_t reg;
5247 int rv;
5248
5249 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5250 device_xname(sc->sc_dev), __func__));
5251 if (wm_phy_resetisblocked(sc))
5252 return -1;
5253
5254 rv = sc->phy.acquire(sc);
5255 if (rv) {
5256 device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5257 __func__, rv);
5258 return rv;
5259 }
5260
5261 reg = CSR_READ(sc, WMREG_CTRL);
5262 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5263 CSR_WRITE_FLUSH(sc);
5264
5265 delay(sc->phy.reset_delay_us);
5266
5267 CSR_WRITE(sc, WMREG_CTRL, reg);
5268 CSR_WRITE_FLUSH(sc);
5269
5270 delay(150);
5271
5272 sc->phy.release(sc);
5273
5274 wm_get_cfg_done(sc);
5275 wm_phy_post_reset(sc);
5276
5277 return 0;
5278 }
5279
5280 /*
5281 * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5282 *
5283 * In i219, the descriptor rings must be emptied before resetting the HW
5284 * or before changing the device state to D3 during runtime (runtime PM).
5285 *
5286 * Failure to do this will cause the HW to enter a unit hang state which can
5287 * only be released by PCI reset on the device.
5288 *
5289 * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5290 */
5291 static void
5292 wm_flush_desc_rings(struct wm_softc *sc)
5293 {
5294 pcireg_t preg;
5295 uint32_t reg;
5296 struct wm_txqueue *txq;
5297 wiseman_txdesc_t *txd;
5298 int nexttx;
5299 uint32_t rctl;
5300
5301 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5302
5303 /* First, disable MULR fix in FEXTNVM11 */
5304 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5305 reg |= FEXTNVM11_DIS_MULRFIX;
5306 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5307
5308 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5309 reg = CSR_READ(sc, WMREG_TDLEN(0));
5310 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5311 return;
5312
5313 /*
5314 * Remove all descriptors from the tx_ring.
5315 *
5316 * We want to clear all pending descriptors from the TX ring. Zeroing
5317 * happens when the HW reads the regs. We assign the ring itself as
5318 * the data of the next descriptor. We don't care about the data we are
5319 * about to reset the HW.
5320 */
5321 #ifdef WM_DEBUG
5322 device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5323 #endif
5324 reg = CSR_READ(sc, WMREG_TCTL);
5325 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5326
5327 txq = &sc->sc_queue[0].wmq_txq;
5328 nexttx = txq->txq_next;
5329 txd = &txq->txq_descs[nexttx];
5330 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5331 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5332 txd->wtx_fields.wtxu_status = 0;
5333 txd->wtx_fields.wtxu_options = 0;
5334 txd->wtx_fields.wtxu_vlan = 0;
5335
5336 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5337 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5338
5339 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5340 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5341 CSR_WRITE_FLUSH(sc);
5342 delay(250);
5343
5344 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5345 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5346 return;
5347
5348 /*
5349 * Mark all descriptors in the RX ring as consumed and disable the
5350 * rx ring.
5351 */
5352 #ifdef WM_DEBUG
5353 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5354 #endif
5355 rctl = CSR_READ(sc, WMREG_RCTL);
5356 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5357 CSR_WRITE_FLUSH(sc);
5358 delay(150);
5359
5360 reg = CSR_READ(sc, WMREG_RXDCTL(0));
5361 /* Zero the lower 14 bits (prefetch and host thresholds) */
5362 reg &= 0xffffc000;
5363 /*
5364 * Update thresholds: prefetch threshold to 31, host threshold
5365 * to 1 and make sure the granularity is "descriptors" and not
5366 * "cache lines"
5367 */
5368 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5369 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5370
5371 /* Momentarily enable the RX ring for the changes to take effect */
5372 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5373 CSR_WRITE_FLUSH(sc);
5374 delay(150);
5375 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5376 }
5377
5378 /*
5379 * wm_reset:
5380 *
5381 * Reset the i82542 chip.
5382 */
5383 static void
5384 wm_reset(struct wm_softc *sc)
5385 {
5386 int phy_reset = 0;
5387 int i, error = 0;
5388 uint32_t reg;
5389 uint16_t kmreg;
5390 int rv;
5391
5392 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5393 device_xname(sc->sc_dev), __func__));
5394 KASSERT(sc->sc_type != 0);
5395
5396 /*
5397 * Allocate on-chip memory according to the MTU size.
5398 * The Packet Buffer Allocation register must be written
5399 * before the chip is reset.
5400 */
5401 switch (sc->sc_type) {
5402 case WM_T_82547:
5403 case WM_T_82547_2:
5404 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5405 PBA_22K : PBA_30K;
5406 for (i = 0; i < sc->sc_nqueues; i++) {
5407 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5408 txq->txq_fifo_head = 0;
5409 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5410 txq->txq_fifo_size =
5411 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5412 txq->txq_fifo_stall = 0;
5413 }
5414 break;
5415 case WM_T_82571:
5416 case WM_T_82572:
5417 case WM_T_82575: /* XXX need special handing for jumbo frames */
5418 case WM_T_80003:
5419 sc->sc_pba = PBA_32K;
5420 break;
5421 case WM_T_82573:
5422 sc->sc_pba = PBA_12K;
5423 break;
5424 case WM_T_82574:
5425 case WM_T_82583:
5426 sc->sc_pba = PBA_20K;
5427 break;
5428 case WM_T_82576:
5429 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5430 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5431 break;
5432 case WM_T_82580:
5433 case WM_T_I350:
5434 case WM_T_I354:
5435 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5436 break;
5437 case WM_T_I210:
5438 case WM_T_I211:
5439 sc->sc_pba = PBA_34K;
5440 break;
5441 case WM_T_ICH8:
5442 /* Workaround for a bit corruption issue in FIFO memory */
5443 sc->sc_pba = PBA_8K;
5444 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5445 break;
5446 case WM_T_ICH9:
5447 case WM_T_ICH10:
5448 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5449 PBA_14K : PBA_10K;
5450 break;
5451 case WM_T_PCH:
5452 case WM_T_PCH2: /* XXX 14K? */
5453 case WM_T_PCH_LPT:
5454 case WM_T_PCH_SPT:
5455 case WM_T_PCH_CNP:
5456 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5457 PBA_12K : PBA_26K;
5458 break;
5459 default:
5460 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5461 PBA_40K : PBA_48K;
5462 break;
5463 }
5464 /*
5465 * Only old or non-multiqueue devices have the PBA register
5466 * XXX Need special handling for 82575.
5467 */
5468 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5469 || (sc->sc_type == WM_T_82575))
5470 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5471
5472 /* Prevent the PCI-E bus from sticking */
5473 if (sc->sc_flags & WM_F_PCIE) {
5474 int timeout = 800;
5475
5476 sc->sc_ctrl |= CTRL_GIO_M_DIS;
5477 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5478
5479 while (timeout--) {
5480 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5481 == 0)
5482 break;
5483 delay(100);
5484 }
5485 if (timeout == 0)
5486 device_printf(sc->sc_dev,
5487 "failed to disable bus mastering\n");
5488 }
5489
5490 /* Set the completion timeout for interface */
5491 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5492 || (sc->sc_type == WM_T_82580)
5493 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5494 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5495 wm_set_pcie_completion_timeout(sc);
5496
5497 /* Clear interrupt */
5498 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5499 if (wm_is_using_msix(sc)) {
5500 if (sc->sc_type != WM_T_82574) {
5501 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5502 CSR_WRITE(sc, WMREG_EIAC, 0);
5503 } else
5504 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5505 }
5506
5507 /* Stop the transmit and receive processes. */
5508 CSR_WRITE(sc, WMREG_RCTL, 0);
5509 sc->sc_rctl &= ~RCTL_EN;
5510 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5511 CSR_WRITE_FLUSH(sc);
5512
5513 /* XXX set_tbi_sbp_82543() */
5514
5515 delay(10*1000);
5516
5517 /* Must acquire the MDIO ownership before MAC reset */
5518 switch (sc->sc_type) {
5519 case WM_T_82573:
5520 case WM_T_82574:
5521 case WM_T_82583:
5522 error = wm_get_hw_semaphore_82573(sc);
5523 break;
5524 default:
5525 break;
5526 }
5527
5528 /*
5529 * 82541 Errata 29? & 82547 Errata 28?
5530 * See also the description about PHY_RST bit in CTRL register
5531 * in 8254x_GBe_SDM.pdf.
5532 */
5533 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5534 CSR_WRITE(sc, WMREG_CTRL,
5535 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5536 CSR_WRITE_FLUSH(sc);
5537 delay(5000);
5538 }
5539
5540 switch (sc->sc_type) {
5541 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5542 case WM_T_82541:
5543 case WM_T_82541_2:
5544 case WM_T_82547:
5545 case WM_T_82547_2:
5546 /*
5547 * On some chipsets, a reset through a memory-mapped write
5548 * cycle can cause the chip to reset before completing the
5549 * write cycle. This causes major headache that can be avoided
5550 * by issuing the reset via indirect register writes through
5551 * I/O space.
5552 *
5553 * So, if we successfully mapped the I/O BAR at attach time,
5554 * use that. Otherwise, try our luck with a memory-mapped
5555 * reset.
5556 */
5557 if (sc->sc_flags & WM_F_IOH_VALID)
5558 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5559 else
5560 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5561 break;
5562 case WM_T_82545_3:
5563 case WM_T_82546_3:
5564 /* Use the shadow control register on these chips. */
5565 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5566 break;
5567 case WM_T_80003:
5568 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5569 if (sc->phy.acquire(sc) != 0)
5570 break;
5571 CSR_WRITE(sc, WMREG_CTRL, reg);
5572 sc->phy.release(sc);
5573 break;
5574 case WM_T_ICH8:
5575 case WM_T_ICH9:
5576 case WM_T_ICH10:
5577 case WM_T_PCH:
5578 case WM_T_PCH2:
5579 case WM_T_PCH_LPT:
5580 case WM_T_PCH_SPT:
5581 case WM_T_PCH_CNP:
5582 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5583 if (wm_phy_resetisblocked(sc) == false) {
5584 /*
5585 * Gate automatic PHY configuration by hardware on
5586 * non-managed 82579
5587 */
5588 if ((sc->sc_type == WM_T_PCH2)
5589 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5590 == 0))
5591 wm_gate_hw_phy_config_ich8lan(sc, true);
5592
5593 reg |= CTRL_PHY_RESET;
5594 phy_reset = 1;
5595 } else
5596 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5597 if (sc->phy.acquire(sc) != 0)
5598 break;
5599 CSR_WRITE(sc, WMREG_CTRL, reg);
5600 /* Don't insert a completion barrier when reset */
5601 delay(20*1000);
5602 /*
5603 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5604 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5605 * only. See also wm_get_swflag_ich8lan().
5606 */
5607 mutex_exit(sc->sc_ich_phymtx);
5608 break;
5609 case WM_T_82580:
5610 case WM_T_I350:
5611 case WM_T_I354:
5612 case WM_T_I210:
5613 case WM_T_I211:
5614 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5615 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5616 CSR_WRITE_FLUSH(sc);
5617 delay(5000);
5618 break;
5619 case WM_T_82542_2_0:
5620 case WM_T_82542_2_1:
5621 case WM_T_82543:
5622 case WM_T_82540:
5623 case WM_T_82545:
5624 case WM_T_82546:
5625 case WM_T_82571:
5626 case WM_T_82572:
5627 case WM_T_82573:
5628 case WM_T_82574:
5629 case WM_T_82575:
5630 case WM_T_82576:
5631 case WM_T_82583:
5632 default:
5633 /* Everything else can safely use the documented method. */
5634 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5635 break;
5636 }
5637
5638 /* Must release the MDIO ownership after MAC reset */
5639 switch (sc->sc_type) {
5640 case WM_T_82573:
5641 case WM_T_82574:
5642 case WM_T_82583:
5643 if (error == 0)
5644 wm_put_hw_semaphore_82573(sc);
5645 break;
5646 default:
5647 break;
5648 }
5649
5650 /* Set Phy Config Counter to 50msec */
5651 if (sc->sc_type == WM_T_PCH2) {
5652 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5653 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5654 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5655 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5656 }
5657
5658 if (phy_reset != 0)
5659 wm_get_cfg_done(sc);
5660
5661 /* Reload EEPROM */
5662 switch (sc->sc_type) {
5663 case WM_T_82542_2_0:
5664 case WM_T_82542_2_1:
5665 case WM_T_82543:
5666 case WM_T_82544:
5667 delay(10);
5668 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5669 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5670 CSR_WRITE_FLUSH(sc);
5671 delay(2000);
5672 break;
5673 case WM_T_82540:
5674 case WM_T_82545:
5675 case WM_T_82545_3:
5676 case WM_T_82546:
5677 case WM_T_82546_3:
5678 delay(5*1000);
5679 /* XXX Disable HW ARPs on ASF enabled adapters */
5680 break;
5681 case WM_T_82541:
5682 case WM_T_82541_2:
5683 case WM_T_82547:
5684 case WM_T_82547_2:
5685 delay(20000);
5686 /* XXX Disable HW ARPs on ASF enabled adapters */
5687 break;
5688 case WM_T_82571:
5689 case WM_T_82572:
5690 case WM_T_82573:
5691 case WM_T_82574:
5692 case WM_T_82583:
5693 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5694 delay(10);
5695 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5696 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5697 CSR_WRITE_FLUSH(sc);
5698 }
5699 /* check EECD_EE_AUTORD */
5700 wm_get_auto_rd_done(sc);
5701 /*
5702 * Phy configuration from NVM just starts after EECD_AUTO_RD
5703 * is set.
5704 */
5705 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5706 || (sc->sc_type == WM_T_82583))
5707 delay(25*1000);
5708 break;
5709 case WM_T_82575:
5710 case WM_T_82576:
5711 case WM_T_82580:
5712 case WM_T_I350:
5713 case WM_T_I354:
5714 case WM_T_I210:
5715 case WM_T_I211:
5716 case WM_T_80003:
5717 /* check EECD_EE_AUTORD */
5718 wm_get_auto_rd_done(sc);
5719 break;
5720 case WM_T_ICH8:
5721 case WM_T_ICH9:
5722 case WM_T_ICH10:
5723 case WM_T_PCH:
5724 case WM_T_PCH2:
5725 case WM_T_PCH_LPT:
5726 case WM_T_PCH_SPT:
5727 case WM_T_PCH_CNP:
5728 break;
5729 default:
5730 panic("%s: unknown type\n", __func__);
5731 }
5732
5733 /* Check whether EEPROM is present or not */
5734 switch (sc->sc_type) {
5735 case WM_T_82575:
5736 case WM_T_82576:
5737 case WM_T_82580:
5738 case WM_T_I350:
5739 case WM_T_I354:
5740 case WM_T_ICH8:
5741 case WM_T_ICH9:
5742 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5743 /* Not found */
5744 sc->sc_flags |= WM_F_EEPROM_INVALID;
5745 if (sc->sc_type == WM_T_82575)
5746 wm_reset_init_script_82575(sc);
5747 }
5748 break;
5749 default:
5750 break;
5751 }
5752
5753 if (phy_reset != 0)
5754 wm_phy_post_reset(sc);
5755
5756 if ((sc->sc_type == WM_T_82580)
5757 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5758 /* Clear global device reset status bit */
5759 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5760 }
5761
5762 /* Clear any pending interrupt events. */
5763 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5764 reg = CSR_READ(sc, WMREG_ICR);
5765 if (wm_is_using_msix(sc)) {
5766 if (sc->sc_type != WM_T_82574) {
5767 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5768 CSR_WRITE(sc, WMREG_EIAC, 0);
5769 } else
5770 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5771 }
5772
5773 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5774 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5775 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5776 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5777 reg = CSR_READ(sc, WMREG_KABGTXD);
5778 reg |= KABGTXD_BGSQLBIAS;
5779 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5780 }
5781
5782 /* Reload sc_ctrl */
5783 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5784
5785 wm_set_eee(sc);
5786
5787 /*
5788 * For PCH, this write will make sure that any noise will be detected
5789 * as a CRC error and be dropped rather than show up as a bad packet
5790 * to the DMA engine
5791 */
5792 if (sc->sc_type == WM_T_PCH)
5793 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5794
5795 if (sc->sc_type >= WM_T_82544)
5796 CSR_WRITE(sc, WMREG_WUC, 0);
5797
5798 if (sc->sc_type < WM_T_82575)
5799 wm_disable_aspm(sc); /* Workaround for some chips */
5800
5801 wm_reset_mdicnfg_82580(sc);
5802
5803 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5804 wm_pll_workaround_i210(sc);
5805
5806 if (sc->sc_type == WM_T_80003) {
5807 /* Default to TRUE to enable the MDIC W/A */
5808 sc->sc_flags |= WM_F_80003_MDIC_WA;
5809
5810 rv = wm_kmrn_readreg(sc,
5811 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5812 if (rv == 0) {
5813 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5814 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5815 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5816 else
5817 sc->sc_flags |= WM_F_80003_MDIC_WA;
5818 }
5819 }
5820 }
5821
5822 /*
5823 * wm_add_rxbuf:
5824 *
5825 * Add a receive buffer to the indiciated descriptor.
5826 */
5827 static int
5828 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5829 {
5830 struct wm_softc *sc = rxq->rxq_sc;
5831 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5832 struct mbuf *m;
5833 int error;
5834
5835 KASSERT(mutex_owned(rxq->rxq_lock));
5836
5837 MGETHDR(m, M_DONTWAIT, MT_DATA);
5838 if (m == NULL)
5839 return ENOBUFS;
5840
5841 MCLGET(m, M_DONTWAIT);
5842 if ((m->m_flags & M_EXT) == 0) {
5843 m_freem(m);
5844 return ENOBUFS;
5845 }
5846
5847 if (rxs->rxs_mbuf != NULL)
5848 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5849
5850 rxs->rxs_mbuf = m;
5851
5852 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5853 /*
5854 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5855 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5856 */
5857 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5858 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5859 if (error) {
5860 /* XXX XXX XXX */
5861 aprint_error_dev(sc->sc_dev,
5862 "unable to load rx DMA map %d, error = %d\n", idx, error);
5863 panic("wm_add_rxbuf");
5864 }
5865
5866 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5867 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5868
5869 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5870 if ((sc->sc_rctl & RCTL_EN) != 0)
5871 wm_init_rxdesc(rxq, idx);
5872 } else
5873 wm_init_rxdesc(rxq, idx);
5874
5875 return 0;
5876 }
5877
5878 /*
5879 * wm_rxdrain:
5880 *
5881 * Drain the receive queue.
5882 */
5883 static void
5884 wm_rxdrain(struct wm_rxqueue *rxq)
5885 {
5886 struct wm_softc *sc = rxq->rxq_sc;
5887 struct wm_rxsoft *rxs;
5888 int i;
5889
5890 KASSERT(mutex_owned(rxq->rxq_lock));
5891
5892 for (i = 0; i < WM_NRXDESC; i++) {
5893 rxs = &rxq->rxq_soft[i];
5894 if (rxs->rxs_mbuf != NULL) {
5895 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5896 m_freem(rxs->rxs_mbuf);
5897 rxs->rxs_mbuf = NULL;
5898 }
5899 }
5900 }
5901
5902 /*
5903 * Setup registers for RSS.
5904 *
5905 * XXX not yet VMDq support
5906 */
5907 static void
5908 wm_init_rss(struct wm_softc *sc)
5909 {
5910 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5911 int i;
5912
5913 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5914
5915 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5916 unsigned int qid, reta_ent;
5917
5918 qid = i % sc->sc_nqueues;
5919 switch (sc->sc_type) {
5920 case WM_T_82574:
5921 reta_ent = __SHIFTIN(qid,
5922 RETA_ENT_QINDEX_MASK_82574);
5923 break;
5924 case WM_T_82575:
5925 reta_ent = __SHIFTIN(qid,
5926 RETA_ENT_QINDEX1_MASK_82575);
5927 break;
5928 default:
5929 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5930 break;
5931 }
5932
5933 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5934 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5935 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5936 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5937 }
5938
5939 rss_getkey((uint8_t *)rss_key);
5940 for (i = 0; i < RSSRK_NUM_REGS; i++)
5941 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5942
5943 if (sc->sc_type == WM_T_82574)
5944 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5945 else
5946 mrqc = MRQC_ENABLE_RSS_MQ;
5947
5948 /*
5949 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5950 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5951 */
5952 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5953 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5954 #if 0
5955 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5956 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5957 #endif
5958 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5959
5960 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5961 }
5962
5963 /*
5964 * Adjust TX and RX queue numbers which the system actulally uses.
5965 *
5966 * The numbers are affected by below parameters.
5967 * - The nubmer of hardware queues
5968 * - The number of MSI-X vectors (= "nvectors" argument)
5969 * - ncpu
5970 */
5971 static void
5972 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5973 {
5974 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5975
5976 if (nvectors < 2) {
5977 sc->sc_nqueues = 1;
5978 return;
5979 }
5980
5981 switch (sc->sc_type) {
5982 case WM_T_82572:
5983 hw_ntxqueues = 2;
5984 hw_nrxqueues = 2;
5985 break;
5986 case WM_T_82574:
5987 hw_ntxqueues = 2;
5988 hw_nrxqueues = 2;
5989 break;
5990 case WM_T_82575:
5991 hw_ntxqueues = 4;
5992 hw_nrxqueues = 4;
5993 break;
5994 case WM_T_82576:
5995 hw_ntxqueues = 16;
5996 hw_nrxqueues = 16;
5997 break;
5998 case WM_T_82580:
5999 case WM_T_I350:
6000 case WM_T_I354:
6001 hw_ntxqueues = 8;
6002 hw_nrxqueues = 8;
6003 break;
6004 case WM_T_I210:
6005 hw_ntxqueues = 4;
6006 hw_nrxqueues = 4;
6007 break;
6008 case WM_T_I211:
6009 hw_ntxqueues = 2;
6010 hw_nrxqueues = 2;
6011 break;
6012 /*
6013 * The below Ethernet controllers do not support MSI-X;
6014 * this driver doesn't let them use multiqueue.
6015 * - WM_T_80003
6016 * - WM_T_ICH8
6017 * - WM_T_ICH9
6018 * - WM_T_ICH10
6019 * - WM_T_PCH
6020 * - WM_T_PCH2
6021 * - WM_T_PCH_LPT
6022 */
6023 default:
6024 hw_ntxqueues = 1;
6025 hw_nrxqueues = 1;
6026 break;
6027 }
6028
6029 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6030
6031 /*
6032 * As queues more than MSI-X vectors cannot improve scaling, we limit
6033 * the number of queues used actually.
6034 */
6035 if (nvectors < hw_nqueues + 1)
6036 sc->sc_nqueues = nvectors - 1;
6037 else
6038 sc->sc_nqueues = hw_nqueues;
6039
6040 /*
6041 * As queues more than CPUs cannot improve scaling, we limit
6042 * the number of queues used actually.
6043 */
6044 if (ncpu < sc->sc_nqueues)
6045 sc->sc_nqueues = ncpu;
6046 }
6047
6048 static inline bool
6049 wm_is_using_msix(struct wm_softc *sc)
6050 {
6051
6052 return (sc->sc_nintrs > 1);
6053 }
6054
6055 static inline bool
6056 wm_is_using_multiqueue(struct wm_softc *sc)
6057 {
6058
6059 return (sc->sc_nqueues > 1);
6060 }
6061
6062 static int
6063 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6064 {
6065 struct wm_queue *wmq = &sc->sc_queue[qidx];
6066
6067 wmq->wmq_id = qidx;
6068 wmq->wmq_intr_idx = intr_idx;
6069 wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6070 wm_handle_queue, wmq);
6071 if (wmq->wmq_si != NULL)
6072 return 0;
6073
6074 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6075 wmq->wmq_id);
6076 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6077 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6078 return ENOMEM;
6079 }
6080
6081 /*
6082 * Both single interrupt MSI and INTx can use this function.
6083 */
6084 static int
6085 wm_setup_legacy(struct wm_softc *sc)
6086 {
6087 pci_chipset_tag_t pc = sc->sc_pc;
6088 const char *intrstr = NULL;
6089 char intrbuf[PCI_INTRSTR_LEN];
6090 int error;
6091
6092 error = wm_alloc_txrx_queues(sc);
6093 if (error) {
6094 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6095 error);
6096 return ENOMEM;
6097 }
6098 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6099 sizeof(intrbuf));
6100 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6101 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6102 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6103 if (sc->sc_ihs[0] == NULL) {
6104 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6105 (pci_intr_type(pc, sc->sc_intrs[0])
6106 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6107 return ENOMEM;
6108 }
6109
6110 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6111 sc->sc_nintrs = 1;
6112
6113 return wm_softint_establish_queue(sc, 0, 0);
6114 }
6115
6116 static int
6117 wm_setup_msix(struct wm_softc *sc)
6118 {
6119 void *vih;
6120 kcpuset_t *affinity;
6121 int qidx, error, intr_idx, txrx_established;
6122 pci_chipset_tag_t pc = sc->sc_pc;
6123 const char *intrstr = NULL;
6124 char intrbuf[PCI_INTRSTR_LEN];
6125 char intr_xname[INTRDEVNAMEBUF];
6126
6127 if (sc->sc_nqueues < ncpu) {
6128 /*
6129 * To avoid other devices' interrupts, the affinity of Tx/Rx
6130 * interrupts start from CPU#1.
6131 */
6132 sc->sc_affinity_offset = 1;
6133 } else {
6134 /*
6135 * In this case, this device use all CPUs. So, we unify
6136 * affinitied cpu_index to msix vector number for readability.
6137 */
6138 sc->sc_affinity_offset = 0;
6139 }
6140
6141 error = wm_alloc_txrx_queues(sc);
6142 if (error) {
6143 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6144 error);
6145 return ENOMEM;
6146 }
6147
6148 kcpuset_create(&affinity, false);
6149 intr_idx = 0;
6150
6151 /*
6152 * TX and RX
6153 */
6154 txrx_established = 0;
6155 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6156 struct wm_queue *wmq = &sc->sc_queue[qidx];
6157 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6158
6159 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6160 sizeof(intrbuf));
6161 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6162 PCI_INTR_MPSAFE, true);
6163 memset(intr_xname, 0, sizeof(intr_xname));
6164 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6165 device_xname(sc->sc_dev), qidx);
6166 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6167 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6168 if (vih == NULL) {
6169 aprint_error_dev(sc->sc_dev,
6170 "unable to establish MSI-X(for TX and RX)%s%s\n",
6171 intrstr ? " at " : "",
6172 intrstr ? intrstr : "");
6173
6174 goto fail;
6175 }
6176 kcpuset_zero(affinity);
6177 /* Round-robin affinity */
6178 kcpuset_set(affinity, affinity_to);
6179 error = interrupt_distribute(vih, affinity, NULL);
6180 if (error == 0) {
6181 aprint_normal_dev(sc->sc_dev,
6182 "for TX and RX interrupting at %s affinity to %u\n",
6183 intrstr, affinity_to);
6184 } else {
6185 aprint_normal_dev(sc->sc_dev,
6186 "for TX and RX interrupting at %s\n", intrstr);
6187 }
6188 sc->sc_ihs[intr_idx] = vih;
6189 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6190 goto fail;
6191 txrx_established++;
6192 intr_idx++;
6193 }
6194
6195 /* LINK */
6196 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6197 sizeof(intrbuf));
6198 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6199 memset(intr_xname, 0, sizeof(intr_xname));
6200 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6201 device_xname(sc->sc_dev));
6202 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6203 IPL_NET, wm_linkintr_msix, sc, intr_xname);
6204 if (vih == NULL) {
6205 aprint_error_dev(sc->sc_dev,
6206 "unable to establish MSI-X(for LINK)%s%s\n",
6207 intrstr ? " at " : "",
6208 intrstr ? intrstr : "");
6209
6210 goto fail;
6211 }
6212 /* Keep default affinity to LINK interrupt */
6213 aprint_normal_dev(sc->sc_dev,
6214 "for LINK interrupting at %s\n", intrstr);
6215 sc->sc_ihs[intr_idx] = vih;
6216 sc->sc_link_intr_idx = intr_idx;
6217
6218 sc->sc_nintrs = sc->sc_nqueues + 1;
6219 kcpuset_destroy(affinity);
6220 return 0;
6221
6222 fail:
6223 for (qidx = 0; qidx < txrx_established; qidx++) {
6224 struct wm_queue *wmq = &sc->sc_queue[qidx];
6225 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6226 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6227 }
6228
6229 kcpuset_destroy(affinity);
6230 return ENOMEM;
6231 }
6232
6233 static void
6234 wm_unset_stopping_flags(struct wm_softc *sc)
6235 {
6236 int i;
6237
6238 KASSERT(mutex_owned(sc->sc_core_lock));
6239
6240 /* Must unset stopping flags in ascending order. */
6241 for (i = 0; i < sc->sc_nqueues; i++) {
6242 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6243 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6244
6245 mutex_enter(txq->txq_lock);
6246 txq->txq_stopping = false;
6247 mutex_exit(txq->txq_lock);
6248
6249 mutex_enter(rxq->rxq_lock);
6250 rxq->rxq_stopping = false;
6251 mutex_exit(rxq->rxq_lock);
6252 }
6253
6254 sc->sc_core_stopping = false;
6255 }
6256
6257 static void
6258 wm_set_stopping_flags(struct wm_softc *sc)
6259 {
6260 int i;
6261
6262 KASSERT(mutex_owned(sc->sc_core_lock));
6263
6264 sc->sc_core_stopping = true;
6265
6266 /* Must set stopping flags in ascending order. */
6267 for (i = 0; i < sc->sc_nqueues; i++) {
6268 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6269 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6270
6271 mutex_enter(rxq->rxq_lock);
6272 rxq->rxq_stopping = true;
6273 mutex_exit(rxq->rxq_lock);
6274
6275 mutex_enter(txq->txq_lock);
6276 txq->txq_stopping = true;
6277 mutex_exit(txq->txq_lock);
6278 }
6279 }
6280
6281 /*
6282 * Write interrupt interval value to ITR or EITR
6283 */
6284 static void
6285 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6286 {
6287
6288 if (!wmq->wmq_set_itr)
6289 return;
6290
6291 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6292 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6293
6294 /*
6295 * 82575 doesn't have CNT_INGR field.
6296 * So, overwrite counter field by software.
6297 */
6298 if (sc->sc_type == WM_T_82575)
6299 eitr |= __SHIFTIN(wmq->wmq_itr,
6300 EITR_COUNTER_MASK_82575);
6301 else
6302 eitr |= EITR_CNT_INGR;
6303
6304 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6305 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6306 /*
6307 * 82574 has both ITR and EITR. SET EITR when we use
6308 * the multi queue function with MSI-X.
6309 */
6310 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6311 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6312 } else {
6313 KASSERT(wmq->wmq_id == 0);
6314 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6315 }
6316
6317 wmq->wmq_set_itr = false;
6318 }
6319
6320 /*
6321 * TODO
6322 * Below dynamic calculation of itr is almost the same as Linux igb,
6323 * however it does not fit to wm(4). So, we will have been disable AIM
6324 * until we will find appropriate calculation of itr.
6325 */
6326 /*
6327 * Calculate interrupt interval value to be going to write register in
6328 * wm_itrs_writereg(). This function does not write ITR/EITR register.
6329 */
6330 static void
6331 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6332 {
6333 #ifdef NOTYET
6334 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6335 struct wm_txqueue *txq = &wmq->wmq_txq;
6336 uint32_t avg_size = 0;
6337 uint32_t new_itr;
6338
6339 if (rxq->rxq_packets)
6340 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
6341 if (txq->txq_packets)
6342 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6343
6344 if (avg_size == 0) {
6345 new_itr = 450; /* restore default value */
6346 goto out;
6347 }
6348
6349 /* Add 24 bytes to size to account for CRC, preamble, and gap */
6350 avg_size += 24;
6351
6352 /* Don't starve jumbo frames */
6353 avg_size = uimin(avg_size, 3000);
6354
6355 /* Give a little boost to mid-size frames */
6356 if ((avg_size > 300) && (avg_size < 1200))
6357 new_itr = avg_size / 3;
6358 else
6359 new_itr = avg_size / 2;
6360
6361 out:
6362 /*
6363 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6364 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6365 */
6366 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6367 new_itr *= 4;
6368
6369 if (new_itr != wmq->wmq_itr) {
6370 wmq->wmq_itr = new_itr;
6371 wmq->wmq_set_itr = true;
6372 } else
6373 wmq->wmq_set_itr = false;
6374
6375 rxq->rxq_packets = 0;
6376 rxq->rxq_bytes = 0;
6377 txq->txq_packets = 0;
6378 txq->txq_bytes = 0;
6379 #endif
6380 }
6381
6382 static void
6383 wm_init_sysctls(struct wm_softc *sc)
6384 {
6385 struct sysctllog **log;
6386 const struct sysctlnode *rnode, *qnode, *cnode;
6387 int i, rv;
6388 const char *dvname;
6389
6390 log = &sc->sc_sysctllog;
6391 dvname = device_xname(sc->sc_dev);
6392
6393 rv = sysctl_createv(log, 0, NULL, &rnode,
6394 0, CTLTYPE_NODE, dvname,
6395 SYSCTL_DESCR("wm information and settings"),
6396 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6397 if (rv != 0)
6398 goto err;
6399
6400 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6401 CTLTYPE_BOOL, "txrx_workqueue",
6402 SYSCTL_DESCR("Use workqueue for packet processing"),
6403 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6404 if (rv != 0)
6405 goto teardown;
6406
6407 for (i = 0; i < sc->sc_nqueues; i++) {
6408 struct wm_queue *wmq = &sc->sc_queue[i];
6409 struct wm_txqueue *txq = &wmq->wmq_txq;
6410 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6411
6412 snprintf(sc->sc_queue[i].sysctlname,
6413 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6414
6415 if (sysctl_createv(log, 0, &rnode, &qnode,
6416 0, CTLTYPE_NODE,
6417 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6418 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6419 break;
6420
6421 if (sysctl_createv(log, 0, &qnode, &cnode,
6422 CTLFLAG_READONLY, CTLTYPE_INT,
6423 "txq_free", SYSCTL_DESCR("TX queue free"),
6424 NULL, 0, &txq->txq_free,
6425 0, CTL_CREATE, CTL_EOL) != 0)
6426 break;
6427 if (sysctl_createv(log, 0, &qnode, &cnode,
6428 CTLFLAG_READONLY, CTLTYPE_INT,
6429 "txd_head", SYSCTL_DESCR("TX descriptor head"),
6430 wm_sysctl_tdh_handler, 0, (void *)txq,
6431 0, CTL_CREATE, CTL_EOL) != 0)
6432 break;
6433 if (sysctl_createv(log, 0, &qnode, &cnode,
6434 CTLFLAG_READONLY, CTLTYPE_INT,
6435 "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6436 wm_sysctl_tdt_handler, 0, (void *)txq,
6437 0, CTL_CREATE, CTL_EOL) != 0)
6438 break;
6439 if (sysctl_createv(log, 0, &qnode, &cnode,
6440 CTLFLAG_READONLY, CTLTYPE_INT,
6441 "txq_next", SYSCTL_DESCR("TX queue next"),
6442 NULL, 0, &txq->txq_next,
6443 0, CTL_CREATE, CTL_EOL) != 0)
6444 break;
6445 if (sysctl_createv(log, 0, &qnode, &cnode,
6446 CTLFLAG_READONLY, CTLTYPE_INT,
6447 "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6448 NULL, 0, &txq->txq_sfree,
6449 0, CTL_CREATE, CTL_EOL) != 0)
6450 break;
6451 if (sysctl_createv(log, 0, &qnode, &cnode,
6452 CTLFLAG_READONLY, CTLTYPE_INT,
6453 "txq_snext", SYSCTL_DESCR("TX queue snext"),
6454 NULL, 0, &txq->txq_snext,
6455 0, CTL_CREATE, CTL_EOL) != 0)
6456 break;
6457 if (sysctl_createv(log, 0, &qnode, &cnode,
6458 CTLFLAG_READONLY, CTLTYPE_INT,
6459 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6460 NULL, 0, &txq->txq_sdirty,
6461 0, CTL_CREATE, CTL_EOL) != 0)
6462 break;
6463 if (sysctl_createv(log, 0, &qnode, &cnode,
6464 CTLFLAG_READONLY, CTLTYPE_INT,
6465 "txq_flags", SYSCTL_DESCR("TX queue flags"),
6466 NULL, 0, &txq->txq_flags,
6467 0, CTL_CREATE, CTL_EOL) != 0)
6468 break;
6469 if (sysctl_createv(log, 0, &qnode, &cnode,
6470 CTLFLAG_READONLY, CTLTYPE_BOOL,
6471 "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6472 NULL, 0, &txq->txq_stopping,
6473 0, CTL_CREATE, CTL_EOL) != 0)
6474 break;
6475 if (sysctl_createv(log, 0, &qnode, &cnode,
6476 CTLFLAG_READONLY, CTLTYPE_BOOL,
6477 "txq_sending", SYSCTL_DESCR("TX queue sending"),
6478 NULL, 0, &txq->txq_sending,
6479 0, CTL_CREATE, CTL_EOL) != 0)
6480 break;
6481
6482 if (sysctl_createv(log, 0, &qnode, &cnode,
6483 CTLFLAG_READONLY, CTLTYPE_INT,
6484 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6485 NULL, 0, &rxq->rxq_ptr,
6486 0, CTL_CREATE, CTL_EOL) != 0)
6487 break;
6488 }
6489
6490 #ifdef WM_DEBUG
6491 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6492 CTLTYPE_INT, "debug_flags",
6493 SYSCTL_DESCR(
6494 "Debug flags:\n" \
6495 "\t0x01 LINK\n" \
6496 "\t0x02 TX\n" \
6497 "\t0x04 RX\n" \
6498 "\t0x08 GMII\n" \
6499 "\t0x10 MANAGE\n" \
6500 "\t0x20 NVM\n" \
6501 "\t0x40 INIT\n" \
6502 "\t0x80 LOCK"),
6503 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6504 if (rv != 0)
6505 goto teardown;
6506 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6507 CTLTYPE_BOOL, "trigger_reset",
6508 SYSCTL_DESCR("Trigger an interface reset"),
6509 NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6510 if (rv != 0)
6511 goto teardown;
6512 #endif
6513
6514 return;
6515
6516 teardown:
6517 sysctl_teardown(log);
6518 err:
6519 sc->sc_sysctllog = NULL;
6520 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6521 __func__, rv);
6522 }
6523
6524 /*
6525 * wm_init: [ifnet interface function]
6526 *
6527 * Initialize the interface.
6528 */
6529 static int
6530 wm_init(struct ifnet *ifp)
6531 {
6532 struct wm_softc *sc = ifp->if_softc;
6533 int ret;
6534
6535 KASSERT(IFNET_LOCKED(ifp));
6536
6537 if (sc->sc_dying)
6538 return ENXIO;
6539
6540 mutex_enter(sc->sc_core_lock);
6541 ret = wm_init_locked(ifp);
6542 mutex_exit(sc->sc_core_lock);
6543
6544 return ret;
6545 }
6546
6547 static int
6548 wm_init_locked(struct ifnet *ifp)
6549 {
6550 struct wm_softc *sc = ifp->if_softc;
6551 struct ethercom *ec = &sc->sc_ethercom;
6552 int i, j, trynum, error = 0;
6553 uint32_t reg, sfp_mask = 0;
6554
6555 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6556 device_xname(sc->sc_dev), __func__));
6557 KASSERT(IFNET_LOCKED(ifp));
6558 KASSERT(mutex_owned(sc->sc_core_lock));
6559
6560 /*
6561 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6562 * There is a small but measurable benefit to avoiding the adjusment
6563 * of the descriptor so that the headers are aligned, for normal mtu,
6564 * on such platforms. One possibility is that the DMA itself is
6565 * slightly more efficient if the front of the entire packet (instead
6566 * of the front of the headers) is aligned.
6567 *
6568 * Note we must always set align_tweak to 0 if we are using
6569 * jumbo frames.
6570 */
6571 #ifdef __NO_STRICT_ALIGNMENT
6572 sc->sc_align_tweak = 0;
6573 #else
6574 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6575 sc->sc_align_tweak = 0;
6576 else
6577 sc->sc_align_tweak = 2;
6578 #endif /* __NO_STRICT_ALIGNMENT */
6579
6580 /* Cancel any pending I/O. */
6581 wm_stop_locked(ifp, false, false);
6582
6583 /* Update statistics before reset */
6584 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6585 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6586
6587 /* >= PCH_SPT hardware workaround before reset. */
6588 if (sc->sc_type >= WM_T_PCH_SPT)
6589 wm_flush_desc_rings(sc);
6590
6591 /* Reset the chip to a known state. */
6592 wm_reset(sc);
6593
6594 /*
6595 * AMT based hardware can now take control from firmware
6596 * Do this after reset.
6597 */
6598 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6599 wm_get_hw_control(sc);
6600
6601 if ((sc->sc_type >= WM_T_PCH_SPT) &&
6602 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6603 wm_legacy_irq_quirk_spt(sc);
6604
6605 /* Init hardware bits */
6606 wm_initialize_hardware_bits(sc);
6607
6608 /* Reset the PHY. */
6609 if (sc->sc_flags & WM_F_HAS_MII)
6610 wm_gmii_reset(sc);
6611
6612 if (sc->sc_type >= WM_T_ICH8) {
6613 reg = CSR_READ(sc, WMREG_GCR);
6614 /*
6615 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6616 * default after reset.
6617 */
6618 if (sc->sc_type == WM_T_ICH8)
6619 reg |= GCR_NO_SNOOP_ALL;
6620 else
6621 reg &= ~GCR_NO_SNOOP_ALL;
6622 CSR_WRITE(sc, WMREG_GCR, reg);
6623 }
6624
6625 if ((sc->sc_type >= WM_T_ICH8)
6626 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6627 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6628
6629 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6630 reg |= CTRL_EXT_RO_DIS;
6631 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6632 }
6633
6634 /* Calculate (E)ITR value */
6635 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6636 /*
6637 * For NEWQUEUE's EITR (except for 82575).
6638 * 82575's EITR should be set same throttling value as other
6639 * old controllers' ITR because the interrupt/sec calculation
6640 * is the same, that is, 1,000,000,000 / (N * 256).
6641 *
6642 * 82574's EITR should be set same throttling value as ITR.
6643 *
6644 * For N interrupts/sec, set this value to:
6645 * 1,000,000 / N in contrast to ITR throttling value.
6646 */
6647 sc->sc_itr_init = 450;
6648 } else if (sc->sc_type >= WM_T_82543) {
6649 /*
6650 * Set up the interrupt throttling register (units of 256ns)
6651 * Note that a footnote in Intel's documentation says this
6652 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6653 * or 10Mbit mode. Empirically, it appears to be the case
6654 * that that is also true for the 1024ns units of the other
6655 * interrupt-related timer registers -- so, really, we ought
6656 * to divide this value by 4 when the link speed is low.
6657 *
6658 * XXX implement this division at link speed change!
6659 */
6660
6661 /*
6662 * For N interrupts/sec, set this value to:
6663 * 1,000,000,000 / (N * 256). Note that we set the
6664 * absolute and packet timer values to this value
6665 * divided by 4 to get "simple timer" behavior.
6666 */
6667 sc->sc_itr_init = 1500; /* 2604 ints/sec */
6668 }
6669
6670 error = wm_init_txrx_queues(sc);
6671 if (error)
6672 goto out;
6673
6674 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6675 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6676 (sc->sc_type >= WM_T_82575))
6677 wm_serdes_power_up_link_82575(sc);
6678
6679 /* Clear out the VLAN table -- we don't use it (yet). */
6680 CSR_WRITE(sc, WMREG_VET, 0);
6681 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6682 trynum = 10; /* Due to hw errata */
6683 else
6684 trynum = 1;
6685 for (i = 0; i < WM_VLAN_TABSIZE; i++)
6686 for (j = 0; j < trynum; j++)
6687 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6688
6689 /*
6690 * Set up flow-control parameters.
6691 *
6692 * XXX Values could probably stand some tuning.
6693 */
6694 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6695 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6696 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6697 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6698 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6699 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6700 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6701 }
6702
6703 sc->sc_fcrtl = FCRTL_DFLT;
6704 if (sc->sc_type < WM_T_82543) {
6705 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6706 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6707 } else {
6708 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6709 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6710 }
6711
6712 if (sc->sc_type == WM_T_80003)
6713 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6714 else
6715 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6716
6717 /* Writes the control register. */
6718 wm_set_vlan(sc);
6719
6720 if (sc->sc_flags & WM_F_HAS_MII) {
6721 uint16_t kmreg;
6722
6723 switch (sc->sc_type) {
6724 case WM_T_80003:
6725 case WM_T_ICH8:
6726 case WM_T_ICH9:
6727 case WM_T_ICH10:
6728 case WM_T_PCH:
6729 case WM_T_PCH2:
6730 case WM_T_PCH_LPT:
6731 case WM_T_PCH_SPT:
6732 case WM_T_PCH_CNP:
6733 /*
6734 * Set the mac to wait the maximum time between each
6735 * iteration and increase the max iterations when
6736 * polling the phy; this fixes erroneous timeouts at
6737 * 10Mbps.
6738 */
6739 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6740 0xFFFF);
6741 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6742 &kmreg);
6743 kmreg |= 0x3F;
6744 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6745 kmreg);
6746 break;
6747 default:
6748 break;
6749 }
6750
6751 if (sc->sc_type == WM_T_80003) {
6752 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6753 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6754 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6755
6756 /* Bypass RX and TX FIFOs */
6757 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6758 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6759 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6760 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6761 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6762 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6763 }
6764 }
6765 #if 0
6766 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6767 #endif
6768
6769 /* Set up checksum offload parameters. */
6770 reg = CSR_READ(sc, WMREG_RXCSUM);
6771 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6772 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6773 reg |= RXCSUM_IPOFL;
6774 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6775 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6776 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6777 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6778 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6779
6780 /* Set registers about MSI-X */
6781 if (wm_is_using_msix(sc)) {
6782 uint32_t ivar, qintr_idx;
6783 struct wm_queue *wmq;
6784 unsigned int qid;
6785
6786 if (sc->sc_type == WM_T_82575) {
6787 /* Interrupt control */
6788 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6789 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6790 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6791
6792 /* TX and RX */
6793 for (i = 0; i < sc->sc_nqueues; i++) {
6794 wmq = &sc->sc_queue[i];
6795 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6796 EITR_TX_QUEUE(wmq->wmq_id)
6797 | EITR_RX_QUEUE(wmq->wmq_id));
6798 }
6799 /* Link status */
6800 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6801 EITR_OTHER);
6802 } else if (sc->sc_type == WM_T_82574) {
6803 /* Interrupt control */
6804 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6805 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6806 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6807
6808 /*
6809 * Work around issue with spurious interrupts
6810 * in MSI-X mode.
6811 * At wm_initialize_hardware_bits(), sc_nintrs has not
6812 * initialized yet. So re-initialize WMREG_RFCTL here.
6813 */
6814 reg = CSR_READ(sc, WMREG_RFCTL);
6815 reg |= WMREG_RFCTL_ACKDIS;
6816 CSR_WRITE(sc, WMREG_RFCTL, reg);
6817
6818 ivar = 0;
6819 /* TX and RX */
6820 for (i = 0; i < sc->sc_nqueues; i++) {
6821 wmq = &sc->sc_queue[i];
6822 qid = wmq->wmq_id;
6823 qintr_idx = wmq->wmq_intr_idx;
6824
6825 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6826 IVAR_TX_MASK_Q_82574(qid));
6827 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6828 IVAR_RX_MASK_Q_82574(qid));
6829 }
6830 /* Link status */
6831 ivar |= __SHIFTIN((IVAR_VALID_82574
6832 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6833 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6834 } else {
6835 /* Interrupt control */
6836 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6837 | GPIE_EIAME | GPIE_PBA);
6838
6839 switch (sc->sc_type) {
6840 case WM_T_82580:
6841 case WM_T_I350:
6842 case WM_T_I354:
6843 case WM_T_I210:
6844 case WM_T_I211:
6845 /* TX and RX */
6846 for (i = 0; i < sc->sc_nqueues; i++) {
6847 wmq = &sc->sc_queue[i];
6848 qid = wmq->wmq_id;
6849 qintr_idx = wmq->wmq_intr_idx;
6850
6851 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6852 ivar &= ~IVAR_TX_MASK_Q(qid);
6853 ivar |= __SHIFTIN((qintr_idx
6854 | IVAR_VALID),
6855 IVAR_TX_MASK_Q(qid));
6856 ivar &= ~IVAR_RX_MASK_Q(qid);
6857 ivar |= __SHIFTIN((qintr_idx
6858 | IVAR_VALID),
6859 IVAR_RX_MASK_Q(qid));
6860 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6861 }
6862 break;
6863 case WM_T_82576:
6864 /* TX and RX */
6865 for (i = 0; i < sc->sc_nqueues; i++) {
6866 wmq = &sc->sc_queue[i];
6867 qid = wmq->wmq_id;
6868 qintr_idx = wmq->wmq_intr_idx;
6869
6870 ivar = CSR_READ(sc,
6871 WMREG_IVAR_Q_82576(qid));
6872 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6873 ivar |= __SHIFTIN((qintr_idx
6874 | IVAR_VALID),
6875 IVAR_TX_MASK_Q_82576(qid));
6876 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6877 ivar |= __SHIFTIN((qintr_idx
6878 | IVAR_VALID),
6879 IVAR_RX_MASK_Q_82576(qid));
6880 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6881 ivar);
6882 }
6883 break;
6884 default:
6885 break;
6886 }
6887
6888 /* Link status */
6889 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6890 IVAR_MISC_OTHER);
6891 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6892 }
6893
6894 if (wm_is_using_multiqueue(sc)) {
6895 wm_init_rss(sc);
6896
6897 /*
6898 ** NOTE: Receive Full-Packet Checksum Offload
6899 ** is mutually exclusive with Multiqueue. However
6900 ** this is not the same as TCP/IP checksums which
6901 ** still work.
6902 */
6903 reg = CSR_READ(sc, WMREG_RXCSUM);
6904 reg |= RXCSUM_PCSD;
6905 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6906 }
6907 }
6908
6909 /* Set up the interrupt registers. */
6910 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6911
6912 /* Enable SFP module insertion interrupt if it's required */
6913 if ((sc->sc_flags & WM_F_SFP) != 0) {
6914 sc->sc_ctrl |= CTRL_EXTLINK_EN;
6915 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6916 sfp_mask = ICR_GPI(0);
6917 }
6918
6919 if (wm_is_using_msix(sc)) {
6920 uint32_t mask;
6921 struct wm_queue *wmq;
6922
6923 switch (sc->sc_type) {
6924 case WM_T_82574:
6925 mask = 0;
6926 for (i = 0; i < sc->sc_nqueues; i++) {
6927 wmq = &sc->sc_queue[i];
6928 mask |= ICR_TXQ(wmq->wmq_id);
6929 mask |= ICR_RXQ(wmq->wmq_id);
6930 }
6931 mask |= ICR_OTHER;
6932 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6933 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6934 break;
6935 default:
6936 if (sc->sc_type == WM_T_82575) {
6937 mask = 0;
6938 for (i = 0; i < sc->sc_nqueues; i++) {
6939 wmq = &sc->sc_queue[i];
6940 mask |= EITR_TX_QUEUE(wmq->wmq_id);
6941 mask |= EITR_RX_QUEUE(wmq->wmq_id);
6942 }
6943 mask |= EITR_OTHER;
6944 } else {
6945 mask = 0;
6946 for (i = 0; i < sc->sc_nqueues; i++) {
6947 wmq = &sc->sc_queue[i];
6948 mask |= 1 << wmq->wmq_intr_idx;
6949 }
6950 mask |= 1 << sc->sc_link_intr_idx;
6951 }
6952 CSR_WRITE(sc, WMREG_EIAC, mask);
6953 CSR_WRITE(sc, WMREG_EIAM, mask);
6954 CSR_WRITE(sc, WMREG_EIMS, mask);
6955
6956 /* For other interrupts */
6957 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6958 break;
6959 }
6960 } else {
6961 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6962 ICR_RXO | ICR_RXT0 | sfp_mask;
6963 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6964 }
6965
6966 /* Set up the inter-packet gap. */
6967 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6968
6969 if (sc->sc_type >= WM_T_82543) {
6970 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6971 struct wm_queue *wmq = &sc->sc_queue[qidx];
6972 wm_itrs_writereg(sc, wmq);
6973 }
6974 /*
6975 * Link interrupts occur much less than TX
6976 * interrupts and RX interrupts. So, we don't
6977 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6978 * FreeBSD's if_igb.
6979 */
6980 }
6981
6982 /* Set the VLAN EtherType. */
6983 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6984
6985 /*
6986 * Set up the transmit control register; we start out with
6987 * a collision distance suitable for FDX, but update it when
6988 * we resolve the media type.
6989 */
6990 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6991 | TCTL_CT(TX_COLLISION_THRESHOLD)
6992 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6993 if (sc->sc_type >= WM_T_82571)
6994 sc->sc_tctl |= TCTL_MULR;
6995 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6996
6997 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6998 /* Write TDT after TCTL.EN is set. See the document. */
6999 CSR_WRITE(sc, WMREG_TDT(0), 0);
7000 }
7001
7002 if (sc->sc_type == WM_T_80003) {
7003 reg = CSR_READ(sc, WMREG_TCTL_EXT);
7004 reg &= ~TCTL_EXT_GCEX_MASK;
7005 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
7006 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
7007 }
7008
7009 /* Set the media. */
7010 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
7011 goto out;
7012
7013 /* Configure for OS presence */
7014 wm_init_manageability(sc);
7015
7016 /*
7017 * Set up the receive control register; we actually program the
7018 * register when we set the receive filter. Use multicast address
7019 * offset type 0.
7020 *
7021 * Only the i82544 has the ability to strip the incoming CRC, so we
7022 * don't enable that feature.
7023 */
7024 sc->sc_mchash_type = 0;
7025 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7026 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7027
7028 /* 82574 use one buffer extended Rx descriptor. */
7029 if (sc->sc_type == WM_T_82574)
7030 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7031
7032 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7033 sc->sc_rctl |= RCTL_SECRC;
7034
7035 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7036 && (ifp->if_mtu > ETHERMTU)) {
7037 sc->sc_rctl |= RCTL_LPE;
7038 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7039 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7040 }
7041
7042 if (MCLBYTES == 2048)
7043 sc->sc_rctl |= RCTL_2k;
7044 else {
7045 if (sc->sc_type >= WM_T_82543) {
7046 switch (MCLBYTES) {
7047 case 4096:
7048 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7049 break;
7050 case 8192:
7051 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7052 break;
7053 case 16384:
7054 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7055 break;
7056 default:
7057 panic("wm_init: MCLBYTES %d unsupported",
7058 MCLBYTES);
7059 break;
7060 }
7061 } else
7062 panic("wm_init: i82542 requires MCLBYTES = 2048");
7063 }
7064
7065 /* Enable ECC */
7066 switch (sc->sc_type) {
7067 case WM_T_82571:
7068 reg = CSR_READ(sc, WMREG_PBA_ECC);
7069 reg |= PBA_ECC_CORR_EN;
7070 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7071 break;
7072 case WM_T_PCH_LPT:
7073 case WM_T_PCH_SPT:
7074 case WM_T_PCH_CNP:
7075 reg = CSR_READ(sc, WMREG_PBECCSTS);
7076 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7077 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7078
7079 sc->sc_ctrl |= CTRL_MEHE;
7080 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7081 break;
7082 default:
7083 break;
7084 }
7085
7086 /*
7087 * Set the receive filter.
7088 *
7089 * For 82575 and 82576, the RX descriptors must be initialized after
7090 * the setting of RCTL.EN in wm_set_filter()
7091 */
7092 wm_set_filter(sc);
7093
7094 /* On 575 and later set RDT only if RX enabled */
7095 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7096 int qidx;
7097 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7098 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7099 for (i = 0; i < WM_NRXDESC; i++) {
7100 mutex_enter(rxq->rxq_lock);
7101 wm_init_rxdesc(rxq, i);
7102 mutex_exit(rxq->rxq_lock);
7103
7104 }
7105 }
7106 }
7107
7108 wm_unset_stopping_flags(sc);
7109
7110 /* Start the one second link check clock. */
7111 callout_schedule(&sc->sc_tick_ch, hz);
7112
7113 /*
7114 * ...all done! (IFNET_LOCKED asserted above.)
7115 */
7116 ifp->if_flags |= IFF_RUNNING;
7117
7118 out:
7119 /* Save last flags for the callback */
7120 sc->sc_if_flags = ifp->if_flags;
7121 sc->sc_ec_capenable = ec->ec_capenable;
7122 if (error)
7123 log(LOG_ERR, "%s: interface not running\n",
7124 device_xname(sc->sc_dev));
7125 return error;
7126 }
7127
7128 /*
7129 * wm_stop: [ifnet interface function]
7130 *
7131 * Stop transmission on the interface.
7132 */
7133 static void
7134 wm_stop(struct ifnet *ifp, int disable)
7135 {
7136 struct wm_softc *sc = ifp->if_softc;
7137
7138 ASSERT_SLEEPABLE();
7139 KASSERT(IFNET_LOCKED(ifp));
7140
7141 mutex_enter(sc->sc_core_lock);
7142 wm_stop_locked(ifp, disable ? true : false, true);
7143 mutex_exit(sc->sc_core_lock);
7144
7145 /*
7146 * After wm_set_stopping_flags(), it is guaranteed that
7147 * wm_handle_queue_work() does not call workqueue_enqueue().
7148 * However, workqueue_wait() cannot call in wm_stop_locked()
7149 * because it can sleep...
7150 * so, call workqueue_wait() here.
7151 */
7152 for (int i = 0; i < sc->sc_nqueues; i++)
7153 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7154 workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7155 }
7156
7157 static void
7158 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7159 {
7160 struct wm_softc *sc = ifp->if_softc;
7161 struct wm_txsoft *txs;
7162 int i, qidx;
7163
7164 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7165 device_xname(sc->sc_dev), __func__));
7166 KASSERT(IFNET_LOCKED(ifp));
7167 KASSERT(mutex_owned(sc->sc_core_lock));
7168
7169 wm_set_stopping_flags(sc);
7170
7171 if (sc->sc_flags & WM_F_HAS_MII) {
7172 /* Down the MII. */
7173 mii_down(&sc->sc_mii);
7174 } else {
7175 #if 0
7176 /* Should we clear PHY's status properly? */
7177 wm_reset(sc);
7178 #endif
7179 }
7180
7181 /* Stop the transmit and receive processes. */
7182 CSR_WRITE(sc, WMREG_TCTL, 0);
7183 CSR_WRITE(sc, WMREG_RCTL, 0);
7184 sc->sc_rctl &= ~RCTL_EN;
7185
7186 /*
7187 * Clear the interrupt mask to ensure the device cannot assert its
7188 * interrupt line.
7189 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7190 * service any currently pending or shared interrupt.
7191 */
7192 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7193 sc->sc_icr = 0;
7194 if (wm_is_using_msix(sc)) {
7195 if (sc->sc_type != WM_T_82574) {
7196 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7197 CSR_WRITE(sc, WMREG_EIAC, 0);
7198 } else
7199 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7200 }
7201
7202 /*
7203 * Stop callouts after interrupts are disabled; if we have
7204 * to wait for them, we will be releasing the CORE_LOCK
7205 * briefly, which will unblock interrupts on the current CPU.
7206 */
7207
7208 /* Stop the one second clock. */
7209 if (wait)
7210 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7211 else
7212 callout_stop(&sc->sc_tick_ch);
7213
7214 /* Stop the 82547 Tx FIFO stall check timer. */
7215 if (sc->sc_type == WM_T_82547) {
7216 if (wait)
7217 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7218 else
7219 callout_stop(&sc->sc_txfifo_ch);
7220 }
7221
7222 /* Release any queued transmit buffers. */
7223 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7224 struct wm_queue *wmq = &sc->sc_queue[qidx];
7225 struct wm_txqueue *txq = &wmq->wmq_txq;
7226 struct mbuf *m;
7227
7228 mutex_enter(txq->txq_lock);
7229 txq->txq_sending = false; /* Ensure watchdog disabled */
7230 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7231 txs = &txq->txq_soft[i];
7232 if (txs->txs_mbuf != NULL) {
7233 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7234 m_freem(txs->txs_mbuf);
7235 txs->txs_mbuf = NULL;
7236 }
7237 }
7238 /* Drain txq_interq */
7239 while ((m = pcq_get(txq->txq_interq)) != NULL)
7240 m_freem(m);
7241 mutex_exit(txq->txq_lock);
7242 }
7243
7244 /* Mark the interface as down and cancel the watchdog timer. */
7245 ifp->if_flags &= ~IFF_RUNNING;
7246 sc->sc_if_flags = ifp->if_flags;
7247
7248 if (disable) {
7249 for (i = 0; i < sc->sc_nqueues; i++) {
7250 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7251 mutex_enter(rxq->rxq_lock);
7252 wm_rxdrain(rxq);
7253 mutex_exit(rxq->rxq_lock);
7254 }
7255 }
7256
7257 #if 0 /* notyet */
7258 if (sc->sc_type >= WM_T_82544)
7259 CSR_WRITE(sc, WMREG_WUC, 0);
7260 #endif
7261 }
7262
7263 static void
7264 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7265 {
7266 struct mbuf *m;
7267 int i;
7268
7269 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7270 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7271 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7272 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7273 m->m_data, m->m_len, m->m_flags);
7274 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7275 i, i == 1 ? "" : "s");
7276 }
7277
7278 /*
7279 * wm_82547_txfifo_stall:
7280 *
7281 * Callout used to wait for the 82547 Tx FIFO to drain,
7282 * reset the FIFO pointers, and restart packet transmission.
7283 */
7284 static void
7285 wm_82547_txfifo_stall(void *arg)
7286 {
7287 struct wm_softc *sc = arg;
7288 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7289
7290 mutex_enter(txq->txq_lock);
7291
7292 if (txq->txq_stopping)
7293 goto out;
7294
7295 if (txq->txq_fifo_stall) {
7296 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7297 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7298 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7299 /*
7300 * Packets have drained. Stop transmitter, reset
7301 * FIFO pointers, restart transmitter, and kick
7302 * the packet queue.
7303 */
7304 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7305 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7306 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7307 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7308 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7309 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7310 CSR_WRITE(sc, WMREG_TCTL, tctl);
7311 CSR_WRITE_FLUSH(sc);
7312
7313 txq->txq_fifo_head = 0;
7314 txq->txq_fifo_stall = 0;
7315 wm_start_locked(&sc->sc_ethercom.ec_if);
7316 } else {
7317 /*
7318 * Still waiting for packets to drain; try again in
7319 * another tick.
7320 */
7321 callout_schedule(&sc->sc_txfifo_ch, 1);
7322 }
7323 }
7324
7325 out:
7326 mutex_exit(txq->txq_lock);
7327 }
7328
7329 /*
7330 * wm_82547_txfifo_bugchk:
7331 *
7332 * Check for bug condition in the 82547 Tx FIFO. We need to
7333 * prevent enqueueing a packet that would wrap around the end
7334 * if the Tx FIFO ring buffer, otherwise the chip will croak.
7335 *
7336 * We do this by checking the amount of space before the end
7337 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
7338 * the Tx FIFO, wait for all remaining packets to drain, reset
7339 * the internal FIFO pointers to the beginning, and restart
7340 * transmission on the interface.
7341 */
7342 #define WM_FIFO_HDR 0x10
7343 #define WM_82547_PAD_LEN 0x3e0
7344 static int
7345 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7346 {
7347 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7348 int space = txq->txq_fifo_size - txq->txq_fifo_head;
7349 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7350
7351 /* Just return if already stalled. */
7352 if (txq->txq_fifo_stall)
7353 return 1;
7354
7355 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7356 /* Stall only occurs in half-duplex mode. */
7357 goto send_packet;
7358 }
7359
7360 if (len >= WM_82547_PAD_LEN + space) {
7361 txq->txq_fifo_stall = 1;
7362 callout_schedule(&sc->sc_txfifo_ch, 1);
7363 return 1;
7364 }
7365
7366 send_packet:
7367 txq->txq_fifo_head += len;
7368 if (txq->txq_fifo_head >= txq->txq_fifo_size)
7369 txq->txq_fifo_head -= txq->txq_fifo_size;
7370
7371 return 0;
7372 }
7373
7374 static int
7375 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7376 {
7377 int error;
7378
7379 /*
7380 * Allocate the control data structures, and create and load the
7381 * DMA map for it.
7382 *
7383 * NOTE: All Tx descriptors must be in the same 4G segment of
7384 * memory. So must Rx descriptors. We simplify by allocating
7385 * both sets within the same 4G segment.
7386 */
7387 if (sc->sc_type < WM_T_82544)
7388 WM_NTXDESC(txq) = WM_NTXDESC_82542;
7389 else
7390 WM_NTXDESC(txq) = WM_NTXDESC_82544;
7391 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7392 txq->txq_descsize = sizeof(nq_txdesc_t);
7393 else
7394 txq->txq_descsize = sizeof(wiseman_txdesc_t);
7395
7396 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7397 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7398 1, &txq->txq_desc_rseg, 0)) != 0) {
7399 aprint_error_dev(sc->sc_dev,
7400 "unable to allocate TX control data, error = %d\n",
7401 error);
7402 goto fail_0;
7403 }
7404
7405 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7406 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7407 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7408 aprint_error_dev(sc->sc_dev,
7409 "unable to map TX control data, error = %d\n", error);
7410 goto fail_1;
7411 }
7412
7413 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7414 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7415 aprint_error_dev(sc->sc_dev,
7416 "unable to create TX control data DMA map, error = %d\n",
7417 error);
7418 goto fail_2;
7419 }
7420
7421 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7422 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7423 aprint_error_dev(sc->sc_dev,
7424 "unable to load TX control data DMA map, error = %d\n",
7425 error);
7426 goto fail_3;
7427 }
7428
7429 return 0;
7430
7431 fail_3:
7432 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7433 fail_2:
7434 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7435 WM_TXDESCS_SIZE(txq));
7436 fail_1:
7437 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7438 fail_0:
7439 return error;
7440 }
7441
7442 static void
7443 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7444 {
7445
7446 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7447 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7448 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7449 WM_TXDESCS_SIZE(txq));
7450 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7451 }
7452
7453 static int
7454 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7455 {
7456 int error;
7457 size_t rxq_descs_size;
7458
7459 /*
7460 * Allocate the control data structures, and create and load the
7461 * DMA map for it.
7462 *
7463 * NOTE: All Tx descriptors must be in the same 4G segment of
7464 * memory. So must Rx descriptors. We simplify by allocating
7465 * both sets within the same 4G segment.
7466 */
7467 rxq->rxq_ndesc = WM_NRXDESC;
7468 if (sc->sc_type == WM_T_82574)
7469 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7470 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7471 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7472 else
7473 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7474 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7475
7476 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7477 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7478 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7479 aprint_error_dev(sc->sc_dev,
7480 "unable to allocate RX control data, error = %d\n",
7481 error);
7482 goto fail_0;
7483 }
7484
7485 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7486 rxq->rxq_desc_rseg, rxq_descs_size,
7487 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7488 aprint_error_dev(sc->sc_dev,
7489 "unable to map RX control data, error = %d\n", error);
7490 goto fail_1;
7491 }
7492
7493 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7494 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7495 aprint_error_dev(sc->sc_dev,
7496 "unable to create RX control data DMA map, error = %d\n",
7497 error);
7498 goto fail_2;
7499 }
7500
7501 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7502 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7503 aprint_error_dev(sc->sc_dev,
7504 "unable to load RX control data DMA map, error = %d\n",
7505 error);
7506 goto fail_3;
7507 }
7508
7509 return 0;
7510
7511 fail_3:
7512 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7513 fail_2:
7514 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7515 rxq_descs_size);
7516 fail_1:
7517 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7518 fail_0:
7519 return error;
7520 }
7521
7522 static void
7523 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7524 {
7525
7526 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7527 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7528 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7529 rxq->rxq_descsize * rxq->rxq_ndesc);
7530 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7531 }
7532
7533
7534 static int
7535 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7536 {
7537 int i, error;
7538
7539 /* Create the transmit buffer DMA maps. */
7540 WM_TXQUEUELEN(txq) =
7541 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7542 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7543 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7544 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7545 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7546 &txq->txq_soft[i].txs_dmamap)) != 0) {
7547 aprint_error_dev(sc->sc_dev,
7548 "unable to create Tx DMA map %d, error = %d\n",
7549 i, error);
7550 goto fail;
7551 }
7552 }
7553
7554 return 0;
7555
7556 fail:
7557 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7558 if (txq->txq_soft[i].txs_dmamap != NULL)
7559 bus_dmamap_destroy(sc->sc_dmat,
7560 txq->txq_soft[i].txs_dmamap);
7561 }
7562 return error;
7563 }
7564
7565 static void
7566 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7567 {
7568 int i;
7569
7570 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7571 if (txq->txq_soft[i].txs_dmamap != NULL)
7572 bus_dmamap_destroy(sc->sc_dmat,
7573 txq->txq_soft[i].txs_dmamap);
7574 }
7575 }
7576
7577 static int
7578 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7579 {
7580 int i, error;
7581
7582 /* Create the receive buffer DMA maps. */
7583 for (i = 0; i < rxq->rxq_ndesc; i++) {
7584 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7585 MCLBYTES, 0, 0,
7586 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7587 aprint_error_dev(sc->sc_dev,
7588 "unable to create Rx DMA map %d error = %d\n",
7589 i, error);
7590 goto fail;
7591 }
7592 rxq->rxq_soft[i].rxs_mbuf = NULL;
7593 }
7594
7595 return 0;
7596
7597 fail:
7598 for (i = 0; i < rxq->rxq_ndesc; i++) {
7599 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7600 bus_dmamap_destroy(sc->sc_dmat,
7601 rxq->rxq_soft[i].rxs_dmamap);
7602 }
7603 return error;
7604 }
7605
7606 static void
7607 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7608 {
7609 int i;
7610
7611 for (i = 0; i < rxq->rxq_ndesc; i++) {
7612 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7613 bus_dmamap_destroy(sc->sc_dmat,
7614 rxq->rxq_soft[i].rxs_dmamap);
7615 }
7616 }
7617
7618 /*
7619 * wm_alloc_quques:
7620 * Allocate {tx,rx}descs and {tx,rx} buffers
7621 */
7622 static int
7623 wm_alloc_txrx_queues(struct wm_softc *sc)
7624 {
7625 int i, error, tx_done, rx_done;
7626
7627 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7628 KM_SLEEP);
7629 if (sc->sc_queue == NULL) {
7630 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7631 error = ENOMEM;
7632 goto fail_0;
7633 }
7634
7635 /* For transmission */
7636 error = 0;
7637 tx_done = 0;
7638 for (i = 0; i < sc->sc_nqueues; i++) {
7639 #ifdef WM_EVENT_COUNTERS
7640 int j;
7641 const char *xname;
7642 #endif
7643 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7644 txq->txq_sc = sc;
7645 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7646
7647 error = wm_alloc_tx_descs(sc, txq);
7648 if (error)
7649 break;
7650 error = wm_alloc_tx_buffer(sc, txq);
7651 if (error) {
7652 wm_free_tx_descs(sc, txq);
7653 break;
7654 }
7655 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7656 if (txq->txq_interq == NULL) {
7657 wm_free_tx_descs(sc, txq);
7658 wm_free_tx_buffer(sc, txq);
7659 error = ENOMEM;
7660 break;
7661 }
7662
7663 #ifdef WM_EVENT_COUNTERS
7664 xname = device_xname(sc->sc_dev);
7665
7666 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7667 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7668 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7669 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7670 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7671 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7672 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7673 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7674 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7675 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7676 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7677
7678 for (j = 0; j < WM_NTXSEGS; j++) {
7679 snprintf(txq->txq_txseg_evcnt_names[j],
7680 sizeof(txq->txq_txseg_evcnt_names[j]),
7681 "txq%02dtxseg%d", i, j);
7682 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
7683 EVCNT_TYPE_MISC,
7684 NULL, xname, txq->txq_txseg_evcnt_names[j]);
7685 }
7686
7687 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7688 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7689 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7690 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7691 /* Only for 82544 (and earlier?) */
7692 if (sc->sc_type <= WM_T_82544)
7693 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7694 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7695 #endif /* WM_EVENT_COUNTERS */
7696
7697 tx_done++;
7698 }
7699 if (error)
7700 goto fail_1;
7701
7702 /* For receive */
7703 error = 0;
7704 rx_done = 0;
7705 for (i = 0; i < sc->sc_nqueues; i++) {
7706 #ifdef WM_EVENT_COUNTERS
7707 const char *xname;
7708 #endif
7709 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7710 rxq->rxq_sc = sc;
7711 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7712
7713 error = wm_alloc_rx_descs(sc, rxq);
7714 if (error)
7715 break;
7716
7717 error = wm_alloc_rx_buffer(sc, rxq);
7718 if (error) {
7719 wm_free_rx_descs(sc, rxq);
7720 break;
7721 }
7722
7723 #ifdef WM_EVENT_COUNTERS
7724 xname = device_xname(sc->sc_dev);
7725
7726 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7727 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7728
7729 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7730 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7731 #endif /* WM_EVENT_COUNTERS */
7732
7733 rx_done++;
7734 }
7735 if (error)
7736 goto fail_2;
7737
7738 return 0;
7739
7740 fail_2:
7741 for (i = 0; i < rx_done; i++) {
7742 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7743 wm_free_rx_buffer(sc, rxq);
7744 wm_free_rx_descs(sc, rxq);
7745 if (rxq->rxq_lock)
7746 mutex_obj_free(rxq->rxq_lock);
7747 }
7748 fail_1:
7749 for (i = 0; i < tx_done; i++) {
7750 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7751 pcq_destroy(txq->txq_interq);
7752 wm_free_tx_buffer(sc, txq);
7753 wm_free_tx_descs(sc, txq);
7754 if (txq->txq_lock)
7755 mutex_obj_free(txq->txq_lock);
7756 }
7757
7758 kmem_free(sc->sc_queue,
7759 sizeof(struct wm_queue) * sc->sc_nqueues);
7760 fail_0:
7761 return error;
7762 }
7763
7764 /*
7765 * wm_free_quques:
7766 * Free {tx,rx}descs and {tx,rx} buffers
7767 */
7768 static void
7769 wm_free_txrx_queues(struct wm_softc *sc)
7770 {
7771 int i;
7772
7773 for (i = 0; i < sc->sc_nqueues; i++) {
7774 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7775
7776 #ifdef WM_EVENT_COUNTERS
7777 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7778 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7779 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7780 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7781 #endif /* WM_EVENT_COUNTERS */
7782
7783 wm_free_rx_buffer(sc, rxq);
7784 wm_free_rx_descs(sc, rxq);
7785 if (rxq->rxq_lock)
7786 mutex_obj_free(rxq->rxq_lock);
7787 }
7788
7789 for (i = 0; i < sc->sc_nqueues; i++) {
7790 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7791 struct mbuf *m;
7792 #ifdef WM_EVENT_COUNTERS
7793 int j;
7794
7795 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7796 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7797 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7798 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7799 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7800 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7801 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7802 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7803 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7804 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7805 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7806
7807 for (j = 0; j < WM_NTXSEGS; j++)
7808 evcnt_detach(&txq->txq_ev_txseg[j]);
7809
7810 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7811 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7812 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7813 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7814 if (sc->sc_type <= WM_T_82544)
7815 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7816 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7817 #endif /* WM_EVENT_COUNTERS */
7818
7819 /* Drain txq_interq */
7820 while ((m = pcq_get(txq->txq_interq)) != NULL)
7821 m_freem(m);
7822 pcq_destroy(txq->txq_interq);
7823
7824 wm_free_tx_buffer(sc, txq);
7825 wm_free_tx_descs(sc, txq);
7826 if (txq->txq_lock)
7827 mutex_obj_free(txq->txq_lock);
7828 }
7829
7830 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7831 }
7832
7833 static void
7834 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7835 {
7836
7837 KASSERT(mutex_owned(txq->txq_lock));
7838
7839 /* Initialize the transmit descriptor ring. */
7840 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7841 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7842 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7843 txq->txq_free = WM_NTXDESC(txq);
7844 txq->txq_next = 0;
7845 }
7846
7847 static void
7848 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7849 struct wm_txqueue *txq)
7850 {
7851
7852 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7853 device_xname(sc->sc_dev), __func__));
7854 KASSERT(mutex_owned(txq->txq_lock));
7855
7856 if (sc->sc_type < WM_T_82543) {
7857 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7858 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7859 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7860 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7861 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7862 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7863 } else {
7864 int qid = wmq->wmq_id;
7865
7866 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7867 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7868 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7869 CSR_WRITE(sc, WMREG_TDH(qid), 0);
7870
7871 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7872 /*
7873 * Don't write TDT before TCTL.EN is set.
7874 * See the document.
7875 */
7876 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7877 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7878 | TXDCTL_WTHRESH(0));
7879 else {
7880 /* XXX should update with AIM? */
7881 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7882 if (sc->sc_type >= WM_T_82540) {
7883 /* Should be the same */
7884 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7885 }
7886
7887 CSR_WRITE(sc, WMREG_TDT(qid), 0);
7888 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7889 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7890 }
7891 }
7892 }
7893
7894 static void
7895 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7896 {
7897 int i;
7898
7899 KASSERT(mutex_owned(txq->txq_lock));
7900
7901 /* Initialize the transmit job descriptors. */
7902 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7903 txq->txq_soft[i].txs_mbuf = NULL;
7904 txq->txq_sfree = WM_TXQUEUELEN(txq);
7905 txq->txq_snext = 0;
7906 txq->txq_sdirty = 0;
7907 }
7908
7909 static void
7910 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7911 struct wm_txqueue *txq)
7912 {
7913
7914 KASSERT(mutex_owned(txq->txq_lock));
7915
7916 /*
7917 * Set up some register offsets that are different between
7918 * the i82542 and the i82543 and later chips.
7919 */
7920 if (sc->sc_type < WM_T_82543)
7921 txq->txq_tdt_reg = WMREG_OLD_TDT;
7922 else
7923 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7924
7925 wm_init_tx_descs(sc, txq);
7926 wm_init_tx_regs(sc, wmq, txq);
7927 wm_init_tx_buffer(sc, txq);
7928
7929 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */
7930 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
7931
7932 txq->txq_sending = false;
7933 }
7934
7935 static void
7936 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7937 struct wm_rxqueue *rxq)
7938 {
7939
7940 KASSERT(mutex_owned(rxq->rxq_lock));
7941
7942 /*
7943 * Initialize the receive descriptor and receive job
7944 * descriptor rings.
7945 */
7946 if (sc->sc_type < WM_T_82543) {
7947 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7948 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7949 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7950 rxq->rxq_descsize * rxq->rxq_ndesc);
7951 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7952 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7953 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7954
7955 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7956 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7957 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7958 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7959 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7960 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7961 } else {
7962 int qid = wmq->wmq_id;
7963
7964 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7965 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7966 CSR_WRITE(sc, WMREG_RDLEN(qid),
7967 rxq->rxq_descsize * rxq->rxq_ndesc);
7968
7969 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7970 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7971 panic("%s: MCLBYTES %d unsupported for 82575 "
7972 "or higher\n", __func__, MCLBYTES);
7973
7974 /*
7975 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
7976 * only.
7977 */
7978 CSR_WRITE(sc, WMREG_SRRCTL(qid),
7979 SRRCTL_DESCTYPE_ADV_ONEBUF
7980 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7981 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7982 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7983 | RXDCTL_WTHRESH(1));
7984 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7985 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7986 } else {
7987 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7988 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7989 /* XXX should update with AIM? */
7990 CSR_WRITE(sc, WMREG_RDTR,
7991 (wmq->wmq_itr / 4) | RDTR_FPD);
7992 /* MUST be same */
7993 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7994 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7995 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7996 }
7997 }
7998 }
7999
8000 static int
8001 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8002 {
8003 struct wm_rxsoft *rxs;
8004 int error, i;
8005
8006 KASSERT(mutex_owned(rxq->rxq_lock));
8007
8008 for (i = 0; i < rxq->rxq_ndesc; i++) {
8009 rxs = &rxq->rxq_soft[i];
8010 if (rxs->rxs_mbuf == NULL) {
8011 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8012 log(LOG_ERR, "%s: unable to allocate or map "
8013 "rx buffer %d, error = %d\n",
8014 device_xname(sc->sc_dev), i, error);
8015 /*
8016 * XXX Should attempt to run with fewer receive
8017 * XXX buffers instead of just failing.
8018 */
8019 wm_rxdrain(rxq);
8020 return ENOMEM;
8021 }
8022 } else {
8023 /*
8024 * For 82575 and 82576, the RX descriptors must be
8025 * initialized after the setting of RCTL.EN in
8026 * wm_set_filter()
8027 */
8028 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8029 wm_init_rxdesc(rxq, i);
8030 }
8031 }
8032 rxq->rxq_ptr = 0;
8033 rxq->rxq_discard = 0;
8034 WM_RXCHAIN_RESET(rxq);
8035
8036 return 0;
8037 }
8038
8039 static int
8040 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8041 struct wm_rxqueue *rxq)
8042 {
8043
8044 KASSERT(mutex_owned(rxq->rxq_lock));
8045
8046 /*
8047 * Set up some register offsets that are different between
8048 * the i82542 and the i82543 and later chips.
8049 */
8050 if (sc->sc_type < WM_T_82543)
8051 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8052 else
8053 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8054
8055 wm_init_rx_regs(sc, wmq, rxq);
8056 return wm_init_rx_buffer(sc, rxq);
8057 }
8058
8059 /*
8060 * wm_init_quques:
8061 * Initialize {tx,rx}descs and {tx,rx} buffers
8062 */
8063 static int
8064 wm_init_txrx_queues(struct wm_softc *sc)
8065 {
8066 int i, error = 0;
8067
8068 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8069 device_xname(sc->sc_dev), __func__));
8070
8071 for (i = 0; i < sc->sc_nqueues; i++) {
8072 struct wm_queue *wmq = &sc->sc_queue[i];
8073 struct wm_txqueue *txq = &wmq->wmq_txq;
8074 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8075
8076 /*
8077 * TODO
8078 * Currently, use constant variable instead of AIM.
8079 * Furthermore, the interrupt interval of multiqueue which use
8080 * polling mode is less than default value.
8081 * More tuning and AIM are required.
8082 */
8083 if (wm_is_using_multiqueue(sc))
8084 wmq->wmq_itr = 50;
8085 else
8086 wmq->wmq_itr = sc->sc_itr_init;
8087 wmq->wmq_set_itr = true;
8088
8089 mutex_enter(txq->txq_lock);
8090 wm_init_tx_queue(sc, wmq, txq);
8091 mutex_exit(txq->txq_lock);
8092
8093 mutex_enter(rxq->rxq_lock);
8094 error = wm_init_rx_queue(sc, wmq, rxq);
8095 mutex_exit(rxq->rxq_lock);
8096 if (error)
8097 break;
8098 }
8099
8100 return error;
8101 }
8102
8103 /*
8104 * wm_tx_offload:
8105 *
8106 * Set up TCP/IP checksumming parameters for the
8107 * specified packet.
8108 */
8109 static void
8110 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8111 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8112 {
8113 struct mbuf *m0 = txs->txs_mbuf;
8114 struct livengood_tcpip_ctxdesc *t;
8115 uint32_t ipcs, tucs, cmd, cmdlen, seg;
8116 uint32_t ipcse;
8117 struct ether_header *eh;
8118 int offset, iphl;
8119 uint8_t fields;
8120
8121 /*
8122 * XXX It would be nice if the mbuf pkthdr had offset
8123 * fields for the protocol headers.
8124 */
8125
8126 eh = mtod(m0, struct ether_header *);
8127 switch (htons(eh->ether_type)) {
8128 case ETHERTYPE_IP:
8129 case ETHERTYPE_IPV6:
8130 offset = ETHER_HDR_LEN;
8131 break;
8132
8133 case ETHERTYPE_VLAN:
8134 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8135 break;
8136
8137 default:
8138 /* Don't support this protocol or encapsulation. */
8139 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8140 txq->txq_last_hw_ipcs = 0;
8141 txq->txq_last_hw_tucs = 0;
8142 *fieldsp = 0;
8143 *cmdp = 0;
8144 return;
8145 }
8146
8147 if ((m0->m_pkthdr.csum_flags &
8148 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8149 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8150 } else
8151 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8152
8153 ipcse = offset + iphl - 1;
8154
8155 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8156 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8157 seg = 0;
8158 fields = 0;
8159
8160 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8161 int hlen = offset + iphl;
8162 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8163
8164 if (__predict_false(m0->m_len <
8165 (hlen + sizeof(struct tcphdr)))) {
8166 /*
8167 * TCP/IP headers are not in the first mbuf; we need
8168 * to do this the slow and painful way. Let's just
8169 * hope this doesn't happen very often.
8170 */
8171 struct tcphdr th;
8172
8173 WM_Q_EVCNT_INCR(txq, tsopain);
8174
8175 m_copydata(m0, hlen, sizeof(th), &th);
8176 if (v4) {
8177 struct ip ip;
8178
8179 m_copydata(m0, offset, sizeof(ip), &ip);
8180 ip.ip_len = 0;
8181 m_copyback(m0,
8182 offset + offsetof(struct ip, ip_len),
8183 sizeof(ip.ip_len), &ip.ip_len);
8184 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8185 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8186 } else {
8187 struct ip6_hdr ip6;
8188
8189 m_copydata(m0, offset, sizeof(ip6), &ip6);
8190 ip6.ip6_plen = 0;
8191 m_copyback(m0,
8192 offset + offsetof(struct ip6_hdr, ip6_plen),
8193 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8194 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8195 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8196 }
8197 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8198 sizeof(th.th_sum), &th.th_sum);
8199
8200 hlen += th.th_off << 2;
8201 } else {
8202 /*
8203 * TCP/IP headers are in the first mbuf; we can do
8204 * this the easy way.
8205 */
8206 struct tcphdr *th;
8207
8208 if (v4) {
8209 struct ip *ip =
8210 (void *)(mtod(m0, char *) + offset);
8211 th = (void *)(mtod(m0, char *) + hlen);
8212
8213 ip->ip_len = 0;
8214 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8215 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8216 } else {
8217 struct ip6_hdr *ip6 =
8218 (void *)(mtod(m0, char *) + offset);
8219 th = (void *)(mtod(m0, char *) + hlen);
8220
8221 ip6->ip6_plen = 0;
8222 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8223 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8224 }
8225 hlen += th->th_off << 2;
8226 }
8227
8228 if (v4) {
8229 WM_Q_EVCNT_INCR(txq, tso);
8230 cmdlen |= WTX_TCPIP_CMD_IP;
8231 } else {
8232 WM_Q_EVCNT_INCR(txq, tso6);
8233 ipcse = 0;
8234 }
8235 cmd |= WTX_TCPIP_CMD_TSE;
8236 cmdlen |= WTX_TCPIP_CMD_TSE |
8237 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8238 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8239 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8240 }
8241
8242 /*
8243 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8244 * offload feature, if we load the context descriptor, we
8245 * MUST provide valid values for IPCSS and TUCSS fields.
8246 */
8247
8248 ipcs = WTX_TCPIP_IPCSS(offset) |
8249 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8250 WTX_TCPIP_IPCSE(ipcse);
8251 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8252 WM_Q_EVCNT_INCR(txq, ipsum);
8253 fields |= WTX_IXSM;
8254 }
8255
8256 offset += iphl;
8257
8258 if (m0->m_pkthdr.csum_flags &
8259 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8260 WM_Q_EVCNT_INCR(txq, tusum);
8261 fields |= WTX_TXSM;
8262 tucs = WTX_TCPIP_TUCSS(offset) |
8263 WTX_TCPIP_TUCSO(offset +
8264 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8265 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8266 } else if ((m0->m_pkthdr.csum_flags &
8267 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8268 WM_Q_EVCNT_INCR(txq, tusum6);
8269 fields |= WTX_TXSM;
8270 tucs = WTX_TCPIP_TUCSS(offset) |
8271 WTX_TCPIP_TUCSO(offset +
8272 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8273 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8274 } else {
8275 /* Just initialize it to a valid TCP context. */
8276 tucs = WTX_TCPIP_TUCSS(offset) |
8277 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8278 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8279 }
8280
8281 *cmdp = cmd;
8282 *fieldsp = fields;
8283
8284 /*
8285 * We don't have to write context descriptor for every packet
8286 * except for 82574. For 82574, we must write context descriptor
8287 * for every packet when we use two descriptor queues.
8288 *
8289 * The 82574L can only remember the *last* context used
8290 * regardless of queue that it was use for. We cannot reuse
8291 * contexts on this hardware platform and must generate a new
8292 * context every time. 82574L hardware spec, section 7.2.6,
8293 * second note.
8294 */
8295 if (sc->sc_nqueues < 2) {
8296 /*
8297 * Setting up new checksum offload context for every
8298 * frames takes a lot of processing time for hardware.
8299 * This also reduces performance a lot for small sized
8300 * frames so avoid it if driver can use previously
8301 * configured checksum offload context.
8302 * For TSO, in theory we can use the same TSO context only if
8303 * frame is the same type(IP/TCP) and the same MSS. However
8304 * checking whether a frame has the same IP/TCP structure is a
8305 * hard thing so just ignore that and always restablish a
8306 * new TSO context.
8307 */
8308 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8309 == 0) {
8310 if (txq->txq_last_hw_cmd == cmd &&
8311 txq->txq_last_hw_fields == fields &&
8312 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8313 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8314 WM_Q_EVCNT_INCR(txq, skipcontext);
8315 return;
8316 }
8317 }
8318
8319 txq->txq_last_hw_cmd = cmd;
8320 txq->txq_last_hw_fields = fields;
8321 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8322 txq->txq_last_hw_tucs = (tucs & 0xffff);
8323 }
8324
8325 /* Fill in the context descriptor. */
8326 t = (struct livengood_tcpip_ctxdesc *)
8327 &txq->txq_descs[txq->txq_next];
8328 t->tcpip_ipcs = htole32(ipcs);
8329 t->tcpip_tucs = htole32(tucs);
8330 t->tcpip_cmdlen = htole32(cmdlen);
8331 t->tcpip_seg = htole32(seg);
8332 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8333
8334 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8335 txs->txs_ndesc++;
8336 }
8337
8338 static inline int
8339 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8340 {
8341 struct wm_softc *sc = ifp->if_softc;
8342 u_int cpuid = cpu_index(curcpu());
8343
8344 /*
8345 * Currently, simple distribute strategy.
8346 * TODO:
8347 * distribute by flowid(RSS has value).
8348 */
8349 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8350 }
8351
8352 static inline bool
8353 wm_linkdown_discard(struct wm_txqueue *txq)
8354 {
8355
8356 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8357 return true;
8358
8359 return false;
8360 }
8361
8362 /*
8363 * wm_start: [ifnet interface function]
8364 *
8365 * Start packet transmission on the interface.
8366 */
8367 static void
8368 wm_start(struct ifnet *ifp)
8369 {
8370 struct wm_softc *sc = ifp->if_softc;
8371 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8372
8373 KASSERT(if_is_mpsafe(ifp));
8374 /*
8375 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8376 */
8377
8378 mutex_enter(txq->txq_lock);
8379 if (!txq->txq_stopping)
8380 wm_start_locked(ifp);
8381 mutex_exit(txq->txq_lock);
8382 }
8383
8384 static void
8385 wm_start_locked(struct ifnet *ifp)
8386 {
8387 struct wm_softc *sc = ifp->if_softc;
8388 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8389
8390 wm_send_common_locked(ifp, txq, false);
8391 }
8392
8393 static int
8394 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8395 {
8396 int qid;
8397 struct wm_softc *sc = ifp->if_softc;
8398 struct wm_txqueue *txq;
8399
8400 qid = wm_select_txqueue(ifp, m);
8401 txq = &sc->sc_queue[qid].wmq_txq;
8402
8403 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8404 m_freem(m);
8405 WM_Q_EVCNT_INCR(txq, pcqdrop);
8406 return ENOBUFS;
8407 }
8408
8409 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8410 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8411 if (m->m_flags & M_MCAST)
8412 if_statinc_ref(nsr, if_omcasts);
8413 IF_STAT_PUTREF(ifp);
8414
8415 if (mutex_tryenter(txq->txq_lock)) {
8416 if (!txq->txq_stopping)
8417 wm_transmit_locked(ifp, txq);
8418 mutex_exit(txq->txq_lock);
8419 }
8420
8421 return 0;
8422 }
8423
8424 static void
8425 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8426 {
8427
8428 wm_send_common_locked(ifp, txq, true);
8429 }
8430
8431 static void
8432 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8433 bool is_transmit)
8434 {
8435 struct wm_softc *sc = ifp->if_softc;
8436 struct mbuf *m0;
8437 struct wm_txsoft *txs;
8438 bus_dmamap_t dmamap;
8439 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8440 bus_addr_t curaddr;
8441 bus_size_t seglen, curlen;
8442 uint32_t cksumcmd;
8443 uint8_t cksumfields;
8444 bool remap = true;
8445
8446 KASSERT(mutex_owned(txq->txq_lock));
8447 KASSERT(!txq->txq_stopping);
8448
8449 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8450 return;
8451
8452 if (__predict_false(wm_linkdown_discard(txq))) {
8453 do {
8454 if (is_transmit)
8455 m0 = pcq_get(txq->txq_interq);
8456 else
8457 IFQ_DEQUEUE(&ifp->if_snd, m0);
8458 /*
8459 * increment successed packet counter as in the case
8460 * which the packet is discarded by link down PHY.
8461 */
8462 if (m0 != NULL) {
8463 if_statinc(ifp, if_opackets);
8464 m_freem(m0);
8465 }
8466 } while (m0 != NULL);
8467 return;
8468 }
8469
8470 /* Remember the previous number of free descriptors. */
8471 ofree = txq->txq_free;
8472
8473 /*
8474 * Loop through the send queue, setting up transmit descriptors
8475 * until we drain the queue, or use up all available transmit
8476 * descriptors.
8477 */
8478 for (;;) {
8479 m0 = NULL;
8480
8481 /* Get a work queue entry. */
8482 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8483 wm_txeof(txq, UINT_MAX);
8484 if (txq->txq_sfree == 0) {
8485 DPRINTF(sc, WM_DEBUG_TX,
8486 ("%s: TX: no free job descriptors\n",
8487 device_xname(sc->sc_dev)));
8488 WM_Q_EVCNT_INCR(txq, txsstall);
8489 break;
8490 }
8491 }
8492
8493 /* Grab a packet off the queue. */
8494 if (is_transmit)
8495 m0 = pcq_get(txq->txq_interq);
8496 else
8497 IFQ_DEQUEUE(&ifp->if_snd, m0);
8498 if (m0 == NULL)
8499 break;
8500
8501 DPRINTF(sc, WM_DEBUG_TX,
8502 ("%s: TX: have packet to transmit: %p\n",
8503 device_xname(sc->sc_dev), m0));
8504
8505 txs = &txq->txq_soft[txq->txq_snext];
8506 dmamap = txs->txs_dmamap;
8507
8508 use_tso = (m0->m_pkthdr.csum_flags &
8509 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8510
8511 /*
8512 * So says the Linux driver:
8513 * The controller does a simple calculation to make sure
8514 * there is enough room in the FIFO before initiating the
8515 * DMA for each buffer. The calc is:
8516 * 4 = ceil(buffer len / MSS)
8517 * To make sure we don't overrun the FIFO, adjust the max
8518 * buffer len if the MSS drops.
8519 */
8520 dmamap->dm_maxsegsz =
8521 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8522 ? m0->m_pkthdr.segsz << 2
8523 : WTX_MAX_LEN;
8524
8525 /*
8526 * Load the DMA map. If this fails, the packet either
8527 * didn't fit in the allotted number of segments, or we
8528 * were short on resources. For the too-many-segments
8529 * case, we simply report an error and drop the packet,
8530 * since we can't sanely copy a jumbo packet to a single
8531 * buffer.
8532 */
8533 retry:
8534 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8535 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8536 if (__predict_false(error)) {
8537 if (error == EFBIG) {
8538 if (remap == true) {
8539 struct mbuf *m;
8540
8541 remap = false;
8542 m = m_defrag(m0, M_NOWAIT);
8543 if (m != NULL) {
8544 WM_Q_EVCNT_INCR(txq, defrag);
8545 m0 = m;
8546 goto retry;
8547 }
8548 }
8549 WM_Q_EVCNT_INCR(txq, toomanyseg);
8550 log(LOG_ERR, "%s: Tx packet consumes too many "
8551 "DMA segments, dropping...\n",
8552 device_xname(sc->sc_dev));
8553 wm_dump_mbuf_chain(sc, m0);
8554 m_freem(m0);
8555 continue;
8556 }
8557 /* Short on resources, just stop for now. */
8558 DPRINTF(sc, WM_DEBUG_TX,
8559 ("%s: TX: dmamap load failed: %d\n",
8560 device_xname(sc->sc_dev), error));
8561 break;
8562 }
8563
8564 segs_needed = dmamap->dm_nsegs;
8565 if (use_tso) {
8566 /* For sentinel descriptor; see below. */
8567 segs_needed++;
8568 }
8569
8570 /*
8571 * Ensure we have enough descriptors free to describe
8572 * the packet. Note, we always reserve one descriptor
8573 * at the end of the ring due to the semantics of the
8574 * TDT register, plus one more in the event we need
8575 * to load offload context.
8576 */
8577 if (segs_needed > txq->txq_free - 2) {
8578 /*
8579 * Not enough free descriptors to transmit this
8580 * packet. We haven't committed anything yet,
8581 * so just unload the DMA map, put the packet
8582 * pack on the queue, and punt. Notify the upper
8583 * layer that there are no more slots left.
8584 */
8585 DPRINTF(sc, WM_DEBUG_TX,
8586 ("%s: TX: need %d (%d) descriptors, have %d\n",
8587 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8588 segs_needed, txq->txq_free - 1));
8589 txq->txq_flags |= WM_TXQ_NO_SPACE;
8590 bus_dmamap_unload(sc->sc_dmat, dmamap);
8591 WM_Q_EVCNT_INCR(txq, txdstall);
8592 break;
8593 }
8594
8595 /*
8596 * Check for 82547 Tx FIFO bug. We need to do this
8597 * once we know we can transmit the packet, since we
8598 * do some internal FIFO space accounting here.
8599 */
8600 if (sc->sc_type == WM_T_82547 &&
8601 wm_82547_txfifo_bugchk(sc, m0)) {
8602 DPRINTF(sc, WM_DEBUG_TX,
8603 ("%s: TX: 82547 Tx FIFO bug detected\n",
8604 device_xname(sc->sc_dev)));
8605 txq->txq_flags |= WM_TXQ_NO_SPACE;
8606 bus_dmamap_unload(sc->sc_dmat, dmamap);
8607 WM_Q_EVCNT_INCR(txq, fifo_stall);
8608 break;
8609 }
8610
8611 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8612
8613 DPRINTF(sc, WM_DEBUG_TX,
8614 ("%s: TX: packet has %d (%d) DMA segments\n",
8615 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8616
8617 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8618
8619 /*
8620 * Store a pointer to the packet so that we can free it
8621 * later.
8622 *
8623 * Initially, we consider the number of descriptors the
8624 * packet uses the number of DMA segments. This may be
8625 * incremented by 1 if we do checksum offload (a descriptor
8626 * is used to set the checksum context).
8627 */
8628 txs->txs_mbuf = m0;
8629 txs->txs_firstdesc = txq->txq_next;
8630 txs->txs_ndesc = segs_needed;
8631
8632 /* Set up offload parameters for this packet. */
8633 if (m0->m_pkthdr.csum_flags &
8634 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8635 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8636 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8637 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8638 } else {
8639 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8640 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8641 cksumcmd = 0;
8642 cksumfields = 0;
8643 }
8644
8645 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8646
8647 /* Sync the DMA map. */
8648 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8649 BUS_DMASYNC_PREWRITE);
8650
8651 /* Initialize the transmit descriptor. */
8652 for (nexttx = txq->txq_next, seg = 0;
8653 seg < dmamap->dm_nsegs; seg++) {
8654 for (seglen = dmamap->dm_segs[seg].ds_len,
8655 curaddr = dmamap->dm_segs[seg].ds_addr;
8656 seglen != 0;
8657 curaddr += curlen, seglen -= curlen,
8658 nexttx = WM_NEXTTX(txq, nexttx)) {
8659 curlen = seglen;
8660
8661 /*
8662 * So says the Linux driver:
8663 * Work around for premature descriptor
8664 * write-backs in TSO mode. Append a
8665 * 4-byte sentinel descriptor.
8666 */
8667 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8668 curlen > 8)
8669 curlen -= 4;
8670
8671 wm_set_dma_addr(
8672 &txq->txq_descs[nexttx].wtx_addr, curaddr);
8673 txq->txq_descs[nexttx].wtx_cmdlen
8674 = htole32(cksumcmd | curlen);
8675 txq->txq_descs[nexttx].wtx_fields.wtxu_status
8676 = 0;
8677 txq->txq_descs[nexttx].wtx_fields.wtxu_options
8678 = cksumfields;
8679 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8680 lasttx = nexttx;
8681
8682 DPRINTF(sc, WM_DEBUG_TX,
8683 ("%s: TX: desc %d: low %#" PRIx64 ", "
8684 "len %#04zx\n",
8685 device_xname(sc->sc_dev), nexttx,
8686 (uint64_t)curaddr, curlen));
8687 }
8688 }
8689
8690 KASSERT(lasttx != -1);
8691
8692 /*
8693 * Set up the command byte on the last descriptor of
8694 * the packet. If we're in the interrupt delay window,
8695 * delay the interrupt.
8696 */
8697 txq->txq_descs[lasttx].wtx_cmdlen |=
8698 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8699
8700 /*
8701 * If VLANs are enabled and the packet has a VLAN tag, set
8702 * up the descriptor to encapsulate the packet for us.
8703 *
8704 * This is only valid on the last descriptor of the packet.
8705 */
8706 if (vlan_has_tag(m0)) {
8707 txq->txq_descs[lasttx].wtx_cmdlen |=
8708 htole32(WTX_CMD_VLE);
8709 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8710 = htole16(vlan_get_tag(m0));
8711 }
8712
8713 txs->txs_lastdesc = lasttx;
8714
8715 DPRINTF(sc, WM_DEBUG_TX,
8716 ("%s: TX: desc %d: cmdlen 0x%08x\n",
8717 device_xname(sc->sc_dev),
8718 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8719
8720 /* Sync the descriptors we're using. */
8721 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8722 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8723
8724 /* Give the packet to the chip. */
8725 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8726
8727 DPRINTF(sc, WM_DEBUG_TX,
8728 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8729
8730 DPRINTF(sc, WM_DEBUG_TX,
8731 ("%s: TX: finished transmitting packet, job %d\n",
8732 device_xname(sc->sc_dev), txq->txq_snext));
8733
8734 /* Advance the tx pointer. */
8735 txq->txq_free -= txs->txs_ndesc;
8736 txq->txq_next = nexttx;
8737
8738 txq->txq_sfree--;
8739 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8740
8741 /* Pass the packet to any BPF listeners. */
8742 bpf_mtap(ifp, m0, BPF_D_OUT);
8743 }
8744
8745 if (m0 != NULL) {
8746 txq->txq_flags |= WM_TXQ_NO_SPACE;
8747 WM_Q_EVCNT_INCR(txq, descdrop);
8748 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8749 __func__));
8750 m_freem(m0);
8751 }
8752
8753 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8754 /* No more slots; notify upper layer. */
8755 txq->txq_flags |= WM_TXQ_NO_SPACE;
8756 }
8757
8758 if (txq->txq_free != ofree) {
8759 /* Set a watchdog timer in case the chip flakes out. */
8760 txq->txq_lastsent = time_uptime;
8761 txq->txq_sending = true;
8762 }
8763 }
8764
8765 /*
8766 * wm_nq_tx_offload:
8767 *
8768 * Set up TCP/IP checksumming parameters for the
8769 * specified packet, for NEWQUEUE devices
8770 */
8771 static void
8772 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8773 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8774 {
8775 struct mbuf *m0 = txs->txs_mbuf;
8776 uint32_t vl_len, mssidx, cmdc;
8777 struct ether_header *eh;
8778 int offset, iphl;
8779
8780 /*
8781 * XXX It would be nice if the mbuf pkthdr had offset
8782 * fields for the protocol headers.
8783 */
8784 *cmdlenp = 0;
8785 *fieldsp = 0;
8786
8787 eh = mtod(m0, struct ether_header *);
8788 switch (htons(eh->ether_type)) {
8789 case ETHERTYPE_IP:
8790 case ETHERTYPE_IPV6:
8791 offset = ETHER_HDR_LEN;
8792 break;
8793
8794 case ETHERTYPE_VLAN:
8795 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8796 break;
8797
8798 default:
8799 /* Don't support this protocol or encapsulation. */
8800 *do_csum = false;
8801 return;
8802 }
8803 *do_csum = true;
8804 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8805 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8806
8807 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8808 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8809
8810 if ((m0->m_pkthdr.csum_flags &
8811 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8812 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8813 } else {
8814 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8815 }
8816 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8817 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8818
8819 if (vlan_has_tag(m0)) {
8820 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8821 << NQTXC_VLLEN_VLAN_SHIFT);
8822 *cmdlenp |= NQTX_CMD_VLE;
8823 }
8824
8825 mssidx = 0;
8826
8827 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8828 int hlen = offset + iphl;
8829 int tcp_hlen;
8830 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8831
8832 if (__predict_false(m0->m_len <
8833 (hlen + sizeof(struct tcphdr)))) {
8834 /*
8835 * TCP/IP headers are not in the first mbuf; we need
8836 * to do this the slow and painful way. Let's just
8837 * hope this doesn't happen very often.
8838 */
8839 struct tcphdr th;
8840
8841 WM_Q_EVCNT_INCR(txq, tsopain);
8842
8843 m_copydata(m0, hlen, sizeof(th), &th);
8844 if (v4) {
8845 struct ip ip;
8846
8847 m_copydata(m0, offset, sizeof(ip), &ip);
8848 ip.ip_len = 0;
8849 m_copyback(m0,
8850 offset + offsetof(struct ip, ip_len),
8851 sizeof(ip.ip_len), &ip.ip_len);
8852 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8853 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8854 } else {
8855 struct ip6_hdr ip6;
8856
8857 m_copydata(m0, offset, sizeof(ip6), &ip6);
8858 ip6.ip6_plen = 0;
8859 m_copyback(m0,
8860 offset + offsetof(struct ip6_hdr, ip6_plen),
8861 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8862 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8863 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8864 }
8865 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8866 sizeof(th.th_sum), &th.th_sum);
8867
8868 tcp_hlen = th.th_off << 2;
8869 } else {
8870 /*
8871 * TCP/IP headers are in the first mbuf; we can do
8872 * this the easy way.
8873 */
8874 struct tcphdr *th;
8875
8876 if (v4) {
8877 struct ip *ip =
8878 (void *)(mtod(m0, char *) + offset);
8879 th = (void *)(mtod(m0, char *) + hlen);
8880
8881 ip->ip_len = 0;
8882 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8883 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8884 } else {
8885 struct ip6_hdr *ip6 =
8886 (void *)(mtod(m0, char *) + offset);
8887 th = (void *)(mtod(m0, char *) + hlen);
8888
8889 ip6->ip6_plen = 0;
8890 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8891 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8892 }
8893 tcp_hlen = th->th_off << 2;
8894 }
8895 hlen += tcp_hlen;
8896 *cmdlenp |= NQTX_CMD_TSE;
8897
8898 if (v4) {
8899 WM_Q_EVCNT_INCR(txq, tso);
8900 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8901 } else {
8902 WM_Q_EVCNT_INCR(txq, tso6);
8903 *fieldsp |= NQTXD_FIELDS_TUXSM;
8904 }
8905 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8906 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8907 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8908 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8909 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8910 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8911 } else {
8912 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8913 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8914 }
8915
8916 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8917 *fieldsp |= NQTXD_FIELDS_IXSM;
8918 cmdc |= NQTXC_CMD_IP4;
8919 }
8920
8921 if (m0->m_pkthdr.csum_flags &
8922 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8923 WM_Q_EVCNT_INCR(txq, tusum);
8924 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8925 cmdc |= NQTXC_CMD_TCP;
8926 else
8927 cmdc |= NQTXC_CMD_UDP;
8928
8929 cmdc |= NQTXC_CMD_IP4;
8930 *fieldsp |= NQTXD_FIELDS_TUXSM;
8931 }
8932 if (m0->m_pkthdr.csum_flags &
8933 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8934 WM_Q_EVCNT_INCR(txq, tusum6);
8935 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8936 cmdc |= NQTXC_CMD_TCP;
8937 else
8938 cmdc |= NQTXC_CMD_UDP;
8939
8940 cmdc |= NQTXC_CMD_IP6;
8941 *fieldsp |= NQTXD_FIELDS_TUXSM;
8942 }
8943
8944 /*
8945 * We don't have to write context descriptor for every packet to
8946 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8947 * I210 and I211. It is enough to write once per a Tx queue for these
8948 * controllers.
8949 * It would be overhead to write context descriptor for every packet,
8950 * however it does not cause problems.
8951 */
8952 /* Fill in the context descriptor. */
8953 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
8954 htole32(vl_len);
8955 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
8956 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
8957 htole32(cmdc);
8958 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
8959 htole32(mssidx);
8960 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8961 DPRINTF(sc, WM_DEBUG_TX,
8962 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8963 txq->txq_next, 0, vl_len));
8964 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8965 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8966 txs->txs_ndesc++;
8967 }
8968
8969 /*
8970 * wm_nq_start: [ifnet interface function]
8971 *
8972 * Start packet transmission on the interface for NEWQUEUE devices
8973 */
8974 static void
8975 wm_nq_start(struct ifnet *ifp)
8976 {
8977 struct wm_softc *sc = ifp->if_softc;
8978 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8979
8980 KASSERT(if_is_mpsafe(ifp));
8981 /*
8982 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8983 */
8984
8985 mutex_enter(txq->txq_lock);
8986 if (!txq->txq_stopping)
8987 wm_nq_start_locked(ifp);
8988 mutex_exit(txq->txq_lock);
8989 }
8990
8991 static void
8992 wm_nq_start_locked(struct ifnet *ifp)
8993 {
8994 struct wm_softc *sc = ifp->if_softc;
8995 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8996
8997 wm_nq_send_common_locked(ifp, txq, false);
8998 }
8999
9000 static int
9001 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
9002 {
9003 int qid;
9004 struct wm_softc *sc = ifp->if_softc;
9005 struct wm_txqueue *txq;
9006
9007 qid = wm_select_txqueue(ifp, m);
9008 txq = &sc->sc_queue[qid].wmq_txq;
9009
9010 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
9011 m_freem(m);
9012 WM_Q_EVCNT_INCR(txq, pcqdrop);
9013 return ENOBUFS;
9014 }
9015
9016 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
9017 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
9018 if (m->m_flags & M_MCAST)
9019 if_statinc_ref(nsr, if_omcasts);
9020 IF_STAT_PUTREF(ifp);
9021
9022 /*
9023 * The situations which this mutex_tryenter() fails at running time
9024 * are below two patterns.
9025 * (1) contention with interrupt handler(wm_txrxintr_msix())
9026 * (2) contention with deferred if_start softint(wm_handle_queue())
9027 * In the case of (1), the last packet enqueued to txq->txq_interq is
9028 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9029 * In the case of (2), the last packet enqueued to txq->txq_interq is
9030 * also dequeued by wm_deferred_start_locked(). So, it does not get
9031 * stuck, either.
9032 */
9033 if (mutex_tryenter(txq->txq_lock)) {
9034 if (!txq->txq_stopping)
9035 wm_nq_transmit_locked(ifp, txq);
9036 mutex_exit(txq->txq_lock);
9037 }
9038
9039 return 0;
9040 }
9041
9042 static void
9043 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9044 {
9045
9046 wm_nq_send_common_locked(ifp, txq, true);
9047 }
9048
9049 static void
9050 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9051 bool is_transmit)
9052 {
9053 struct wm_softc *sc = ifp->if_softc;
9054 struct mbuf *m0;
9055 struct wm_txsoft *txs;
9056 bus_dmamap_t dmamap;
9057 int error, nexttx, lasttx = -1, seg, segs_needed;
9058 bool do_csum, sent;
9059 bool remap = true;
9060
9061 KASSERT(mutex_owned(txq->txq_lock));
9062 KASSERT(!txq->txq_stopping);
9063
9064 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9065 return;
9066
9067 if (__predict_false(wm_linkdown_discard(txq))) {
9068 do {
9069 if (is_transmit)
9070 m0 = pcq_get(txq->txq_interq);
9071 else
9072 IFQ_DEQUEUE(&ifp->if_snd, m0);
9073 /*
9074 * increment successed packet counter as in the case
9075 * which the packet is discarded by link down PHY.
9076 */
9077 if (m0 != NULL) {
9078 if_statinc(ifp, if_opackets);
9079 m_freem(m0);
9080 }
9081 } while (m0 != NULL);
9082 return;
9083 }
9084
9085 sent = false;
9086
9087 /*
9088 * Loop through the send queue, setting up transmit descriptors
9089 * until we drain the queue, or use up all available transmit
9090 * descriptors.
9091 */
9092 for (;;) {
9093 m0 = NULL;
9094
9095 /* Get a work queue entry. */
9096 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9097 wm_txeof(txq, UINT_MAX);
9098 if (txq->txq_sfree == 0) {
9099 DPRINTF(sc, WM_DEBUG_TX,
9100 ("%s: TX: no free job descriptors\n",
9101 device_xname(sc->sc_dev)));
9102 WM_Q_EVCNT_INCR(txq, txsstall);
9103 break;
9104 }
9105 }
9106
9107 /* Grab a packet off the queue. */
9108 if (is_transmit)
9109 m0 = pcq_get(txq->txq_interq);
9110 else
9111 IFQ_DEQUEUE(&ifp->if_snd, m0);
9112 if (m0 == NULL)
9113 break;
9114
9115 DPRINTF(sc, WM_DEBUG_TX,
9116 ("%s: TX: have packet to transmit: %p\n",
9117 device_xname(sc->sc_dev), m0));
9118
9119 txs = &txq->txq_soft[txq->txq_snext];
9120 dmamap = txs->txs_dmamap;
9121
9122 /*
9123 * Load the DMA map. If this fails, the packet either
9124 * didn't fit in the allotted number of segments, or we
9125 * were short on resources. For the too-many-segments
9126 * case, we simply report an error and drop the packet,
9127 * since we can't sanely copy a jumbo packet to a single
9128 * buffer.
9129 */
9130 retry:
9131 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9132 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9133 if (__predict_false(error)) {
9134 if (error == EFBIG) {
9135 if (remap == true) {
9136 struct mbuf *m;
9137
9138 remap = false;
9139 m = m_defrag(m0, M_NOWAIT);
9140 if (m != NULL) {
9141 WM_Q_EVCNT_INCR(txq, defrag);
9142 m0 = m;
9143 goto retry;
9144 }
9145 }
9146 WM_Q_EVCNT_INCR(txq, toomanyseg);
9147 log(LOG_ERR, "%s: Tx packet consumes too many "
9148 "DMA segments, dropping...\n",
9149 device_xname(sc->sc_dev));
9150 wm_dump_mbuf_chain(sc, m0);
9151 m_freem(m0);
9152 continue;
9153 }
9154 /* Short on resources, just stop for now. */
9155 DPRINTF(sc, WM_DEBUG_TX,
9156 ("%s: TX: dmamap load failed: %d\n",
9157 device_xname(sc->sc_dev), error));
9158 break;
9159 }
9160
9161 segs_needed = dmamap->dm_nsegs;
9162
9163 /*
9164 * Ensure we have enough descriptors free to describe
9165 * the packet. Note, we always reserve one descriptor
9166 * at the end of the ring due to the semantics of the
9167 * TDT register, plus one more in the event we need
9168 * to load offload context.
9169 */
9170 if (segs_needed > txq->txq_free - 2) {
9171 /*
9172 * Not enough free descriptors to transmit this
9173 * packet. We haven't committed anything yet,
9174 * so just unload the DMA map, put the packet
9175 * pack on the queue, and punt. Notify the upper
9176 * layer that there are no more slots left.
9177 */
9178 DPRINTF(sc, WM_DEBUG_TX,
9179 ("%s: TX: need %d (%d) descriptors, have %d\n",
9180 device_xname(sc->sc_dev), dmamap->dm_nsegs,
9181 segs_needed, txq->txq_free - 1));
9182 txq->txq_flags |= WM_TXQ_NO_SPACE;
9183 bus_dmamap_unload(sc->sc_dmat, dmamap);
9184 WM_Q_EVCNT_INCR(txq, txdstall);
9185 break;
9186 }
9187
9188 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9189
9190 DPRINTF(sc, WM_DEBUG_TX,
9191 ("%s: TX: packet has %d (%d) DMA segments\n",
9192 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9193
9194 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9195
9196 /*
9197 * Store a pointer to the packet so that we can free it
9198 * later.
9199 *
9200 * Initially, we consider the number of descriptors the
9201 * packet uses the number of DMA segments. This may be
9202 * incremented by 1 if we do checksum offload (a descriptor
9203 * is used to set the checksum context).
9204 */
9205 txs->txs_mbuf = m0;
9206 txs->txs_firstdesc = txq->txq_next;
9207 txs->txs_ndesc = segs_needed;
9208
9209 /* Set up offload parameters for this packet. */
9210 uint32_t cmdlen, fields, dcmdlen;
9211 if (m0->m_pkthdr.csum_flags &
9212 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9213 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9214 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9215 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9216 &do_csum);
9217 } else {
9218 do_csum = false;
9219 cmdlen = 0;
9220 fields = 0;
9221 }
9222
9223 /* Sync the DMA map. */
9224 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9225 BUS_DMASYNC_PREWRITE);
9226
9227 /* Initialize the first transmit descriptor. */
9228 nexttx = txq->txq_next;
9229 if (!do_csum) {
9230 /* Set up a legacy descriptor */
9231 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9232 dmamap->dm_segs[0].ds_addr);
9233 txq->txq_descs[nexttx].wtx_cmdlen =
9234 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9235 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9236 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9237 if (vlan_has_tag(m0)) {
9238 txq->txq_descs[nexttx].wtx_cmdlen |=
9239 htole32(WTX_CMD_VLE);
9240 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9241 htole16(vlan_get_tag(m0));
9242 } else
9243 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9244
9245 dcmdlen = 0;
9246 } else {
9247 /* Set up an advanced data descriptor */
9248 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9249 htole64(dmamap->dm_segs[0].ds_addr);
9250 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9251 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9252 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9253 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9254 htole32(fields);
9255 DPRINTF(sc, WM_DEBUG_TX,
9256 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9257 device_xname(sc->sc_dev), nexttx,
9258 (uint64_t)dmamap->dm_segs[0].ds_addr));
9259 DPRINTF(sc, WM_DEBUG_TX,
9260 ("\t 0x%08x%08x\n", fields,
9261 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9262 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9263 }
9264
9265 lasttx = nexttx;
9266 nexttx = WM_NEXTTX(txq, nexttx);
9267 /*
9268 * Fill in the next descriptors. Legacy or advanced format
9269 * is the same here.
9270 */
9271 for (seg = 1; seg < dmamap->dm_nsegs;
9272 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9273 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9274 htole64(dmamap->dm_segs[seg].ds_addr);
9275 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9276 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9277 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9278 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9279 lasttx = nexttx;
9280
9281 DPRINTF(sc, WM_DEBUG_TX,
9282 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9283 device_xname(sc->sc_dev), nexttx,
9284 (uint64_t)dmamap->dm_segs[seg].ds_addr,
9285 dmamap->dm_segs[seg].ds_len));
9286 }
9287
9288 KASSERT(lasttx != -1);
9289
9290 /*
9291 * Set up the command byte on the last descriptor of
9292 * the packet. If we're in the interrupt delay window,
9293 * delay the interrupt.
9294 */
9295 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9296 (NQTX_CMD_EOP | NQTX_CMD_RS));
9297 txq->txq_descs[lasttx].wtx_cmdlen |=
9298 htole32(WTX_CMD_EOP | WTX_CMD_RS);
9299
9300 txs->txs_lastdesc = lasttx;
9301
9302 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9303 device_xname(sc->sc_dev),
9304 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9305
9306 /* Sync the descriptors we're using. */
9307 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9308 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9309
9310 /* Give the packet to the chip. */
9311 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9312 sent = true;
9313
9314 DPRINTF(sc, WM_DEBUG_TX,
9315 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9316
9317 DPRINTF(sc, WM_DEBUG_TX,
9318 ("%s: TX: finished transmitting packet, job %d\n",
9319 device_xname(sc->sc_dev), txq->txq_snext));
9320
9321 /* Advance the tx pointer. */
9322 txq->txq_free -= txs->txs_ndesc;
9323 txq->txq_next = nexttx;
9324
9325 txq->txq_sfree--;
9326 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9327
9328 /* Pass the packet to any BPF listeners. */
9329 bpf_mtap(ifp, m0, BPF_D_OUT);
9330 }
9331
9332 if (m0 != NULL) {
9333 txq->txq_flags |= WM_TXQ_NO_SPACE;
9334 WM_Q_EVCNT_INCR(txq, descdrop);
9335 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9336 __func__));
9337 m_freem(m0);
9338 }
9339
9340 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9341 /* No more slots; notify upper layer. */
9342 txq->txq_flags |= WM_TXQ_NO_SPACE;
9343 }
9344
9345 if (sent) {
9346 /* Set a watchdog timer in case the chip flakes out. */
9347 txq->txq_lastsent = time_uptime;
9348 txq->txq_sending = true;
9349 }
9350 }
9351
9352 static void
9353 wm_deferred_start_locked(struct wm_txqueue *txq)
9354 {
9355 struct wm_softc *sc = txq->txq_sc;
9356 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9357 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9358 int qid = wmq->wmq_id;
9359
9360 KASSERT(mutex_owned(txq->txq_lock));
9361 KASSERT(!txq->txq_stopping);
9362
9363 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9364 /* XXX need for ALTQ or one CPU system */
9365 if (qid == 0)
9366 wm_nq_start_locked(ifp);
9367 wm_nq_transmit_locked(ifp, txq);
9368 } else {
9369 /* XXX need for ALTQ or one CPU system */
9370 if (qid == 0)
9371 wm_start_locked(ifp);
9372 wm_transmit_locked(ifp, txq);
9373 }
9374 }
9375
9376 /* Interrupt */
9377
9378 /*
9379 * wm_txeof:
9380 *
9381 * Helper; handle transmit interrupts.
9382 */
9383 static bool
9384 wm_txeof(struct wm_txqueue *txq, u_int limit)
9385 {
9386 struct wm_softc *sc = txq->txq_sc;
9387 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9388 struct wm_txsoft *txs;
9389 int count = 0;
9390 int i;
9391 uint8_t status;
9392 bool more = false;
9393
9394 KASSERT(mutex_owned(txq->txq_lock));
9395
9396 if (txq->txq_stopping)
9397 return false;
9398
9399 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9400
9401 /*
9402 * Go through the Tx list and free mbufs for those
9403 * frames which have been transmitted.
9404 */
9405 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9406 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9407 txs = &txq->txq_soft[i];
9408
9409 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9410 device_xname(sc->sc_dev), i));
9411
9412 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9413 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9414
9415 status =
9416 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9417 if ((status & WTX_ST_DD) == 0) {
9418 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9419 BUS_DMASYNC_PREREAD);
9420 break;
9421 }
9422
9423 if (limit-- == 0) {
9424 more = true;
9425 DPRINTF(sc, WM_DEBUG_TX,
9426 ("%s: TX: loop limited, job %d is not processed\n",
9427 device_xname(sc->sc_dev), i));
9428 break;
9429 }
9430
9431 count++;
9432 DPRINTF(sc, WM_DEBUG_TX,
9433 ("%s: TX: job %d done: descs %d..%d\n",
9434 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9435 txs->txs_lastdesc));
9436
9437 #ifdef WM_EVENT_COUNTERS
9438 if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
9439 WM_Q_EVCNT_INCR(txq, underrun);
9440 #endif /* WM_EVENT_COUNTERS */
9441
9442 /*
9443 * 82574 and newer's document says the status field has neither
9444 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9445 * (reserved). Refer "PCIe GbE Controller Open Source Software
9446 * Developer's Manual", 82574 datasheet and newer.
9447 *
9448 * XXX I saw the LC bit was set on I218 even though the media
9449 * was full duplex, so the bit might be used for other
9450 * meaning ...(I have no document).
9451 */
9452
9453 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9454 && ((sc->sc_type < WM_T_82574)
9455 || (sc->sc_type == WM_T_80003))) {
9456 if_statinc(ifp, if_oerrors);
9457 if (status & WTX_ST_LC)
9458 log(LOG_WARNING, "%s: late collision\n",
9459 device_xname(sc->sc_dev));
9460 else if (status & WTX_ST_EC) {
9461 if_statadd(ifp, if_collisions,
9462 TX_COLLISION_THRESHOLD + 1);
9463 log(LOG_WARNING, "%s: excessive collisions\n",
9464 device_xname(sc->sc_dev));
9465 }
9466 } else
9467 if_statinc(ifp, if_opackets);
9468
9469 txq->txq_packets++;
9470 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9471
9472 txq->txq_free += txs->txs_ndesc;
9473 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9474 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9475 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9476 m_freem(txs->txs_mbuf);
9477 txs->txs_mbuf = NULL;
9478 }
9479
9480 /* Update the dirty transmit buffer pointer. */
9481 txq->txq_sdirty = i;
9482 DPRINTF(sc, WM_DEBUG_TX,
9483 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9484
9485 if (count != 0)
9486 rnd_add_uint32(&sc->rnd_source, count);
9487
9488 /*
9489 * If there are no more pending transmissions, cancel the watchdog
9490 * timer.
9491 */
9492 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9493 txq->txq_sending = false;
9494
9495 return more;
9496 }
9497
9498 static inline uint32_t
9499 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9500 {
9501 struct wm_softc *sc = rxq->rxq_sc;
9502
9503 if (sc->sc_type == WM_T_82574)
9504 return EXTRXC_STATUS(
9505 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9506 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9507 return NQRXC_STATUS(
9508 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9509 else
9510 return rxq->rxq_descs[idx].wrx_status;
9511 }
9512
9513 static inline uint32_t
9514 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9515 {
9516 struct wm_softc *sc = rxq->rxq_sc;
9517
9518 if (sc->sc_type == WM_T_82574)
9519 return EXTRXC_ERROR(
9520 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9521 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9522 return NQRXC_ERROR(
9523 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9524 else
9525 return rxq->rxq_descs[idx].wrx_errors;
9526 }
9527
9528 static inline uint16_t
9529 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9530 {
9531 struct wm_softc *sc = rxq->rxq_sc;
9532
9533 if (sc->sc_type == WM_T_82574)
9534 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9535 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9536 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9537 else
9538 return rxq->rxq_descs[idx].wrx_special;
9539 }
9540
9541 static inline int
9542 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9543 {
9544 struct wm_softc *sc = rxq->rxq_sc;
9545
9546 if (sc->sc_type == WM_T_82574)
9547 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9548 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9549 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9550 else
9551 return rxq->rxq_descs[idx].wrx_len;
9552 }
9553
9554 #ifdef WM_DEBUG
9555 static inline uint32_t
9556 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9557 {
9558 struct wm_softc *sc = rxq->rxq_sc;
9559
9560 if (sc->sc_type == WM_T_82574)
9561 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9562 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9563 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9564 else
9565 return 0;
9566 }
9567
9568 static inline uint8_t
9569 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9570 {
9571 struct wm_softc *sc = rxq->rxq_sc;
9572
9573 if (sc->sc_type == WM_T_82574)
9574 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9575 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9576 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9577 else
9578 return 0;
9579 }
9580 #endif /* WM_DEBUG */
9581
9582 static inline bool
9583 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9584 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9585 {
9586
9587 if (sc->sc_type == WM_T_82574)
9588 return (status & ext_bit) != 0;
9589 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9590 return (status & nq_bit) != 0;
9591 else
9592 return (status & legacy_bit) != 0;
9593 }
9594
9595 static inline bool
9596 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9597 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9598 {
9599
9600 if (sc->sc_type == WM_T_82574)
9601 return (error & ext_bit) != 0;
9602 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9603 return (error & nq_bit) != 0;
9604 else
9605 return (error & legacy_bit) != 0;
9606 }
9607
9608 static inline bool
9609 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9610 {
9611
9612 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9613 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9614 return true;
9615 else
9616 return false;
9617 }
9618
9619 static inline bool
9620 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9621 {
9622 struct wm_softc *sc = rxq->rxq_sc;
9623
9624 /* XXX missing error bit for newqueue? */
9625 if (wm_rxdesc_is_set_error(sc, errors,
9626 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9627 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9628 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9629 NQRXC_ERROR_RXE)) {
9630 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9631 EXTRXC_ERROR_SE, 0))
9632 log(LOG_WARNING, "%s: symbol error\n",
9633 device_xname(sc->sc_dev));
9634 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9635 EXTRXC_ERROR_SEQ, 0))
9636 log(LOG_WARNING, "%s: receive sequence error\n",
9637 device_xname(sc->sc_dev));
9638 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9639 EXTRXC_ERROR_CE, 0))
9640 log(LOG_WARNING, "%s: CRC error\n",
9641 device_xname(sc->sc_dev));
9642 return true;
9643 }
9644
9645 return false;
9646 }
9647
9648 static inline bool
9649 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9650 {
9651 struct wm_softc *sc = rxq->rxq_sc;
9652
9653 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9654 NQRXC_STATUS_DD)) {
9655 /* We have processed all of the receive descriptors. */
9656 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9657 return false;
9658 }
9659
9660 return true;
9661 }
9662
9663 static inline bool
9664 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9665 uint16_t vlantag, struct mbuf *m)
9666 {
9667
9668 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9669 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9670 vlan_set_tag(m, le16toh(vlantag));
9671 }
9672
9673 return true;
9674 }
9675
9676 static inline void
9677 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9678 uint32_t errors, struct mbuf *m)
9679 {
9680 struct wm_softc *sc = rxq->rxq_sc;
9681
9682 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9683 if (wm_rxdesc_is_set_status(sc, status,
9684 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9685 WM_Q_EVCNT_INCR(rxq, ipsum);
9686 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9687 if (wm_rxdesc_is_set_error(sc, errors,
9688 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9689 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9690 }
9691 if (wm_rxdesc_is_set_status(sc, status,
9692 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9693 /*
9694 * Note: we don't know if this was TCP or UDP,
9695 * so we just set both bits, and expect the
9696 * upper layers to deal.
9697 */
9698 WM_Q_EVCNT_INCR(rxq, tusum);
9699 m->m_pkthdr.csum_flags |=
9700 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9701 M_CSUM_TCPv6 | M_CSUM_UDPv6;
9702 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9703 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9704 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9705 }
9706 }
9707 }
9708
9709 /*
9710 * wm_rxeof:
9711 *
9712 * Helper; handle receive interrupts.
9713 */
9714 static bool
9715 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9716 {
9717 struct wm_softc *sc = rxq->rxq_sc;
9718 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9719 struct wm_rxsoft *rxs;
9720 struct mbuf *m;
9721 int i, len;
9722 int count = 0;
9723 uint32_t status, errors;
9724 uint16_t vlantag;
9725 bool more = false;
9726
9727 KASSERT(mutex_owned(rxq->rxq_lock));
9728
9729 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9730 rxs = &rxq->rxq_soft[i];
9731
9732 DPRINTF(sc, WM_DEBUG_RX,
9733 ("%s: RX: checking descriptor %d\n",
9734 device_xname(sc->sc_dev), i));
9735 wm_cdrxsync(rxq, i,
9736 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9737
9738 status = wm_rxdesc_get_status(rxq, i);
9739 errors = wm_rxdesc_get_errors(rxq, i);
9740 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9741 vlantag = wm_rxdesc_get_vlantag(rxq, i);
9742 #ifdef WM_DEBUG
9743 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9744 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9745 #endif
9746
9747 if (!wm_rxdesc_dd(rxq, i, status))
9748 break;
9749
9750 if (limit-- == 0) {
9751 more = true;
9752 DPRINTF(sc, WM_DEBUG_RX,
9753 ("%s: RX: loop limited, descriptor %d is not processed\n",
9754 device_xname(sc->sc_dev), i));
9755 break;
9756 }
9757
9758 count++;
9759 if (__predict_false(rxq->rxq_discard)) {
9760 DPRINTF(sc, WM_DEBUG_RX,
9761 ("%s: RX: discarding contents of descriptor %d\n",
9762 device_xname(sc->sc_dev), i));
9763 wm_init_rxdesc(rxq, i);
9764 if (wm_rxdesc_is_eop(rxq, status)) {
9765 /* Reset our state. */
9766 DPRINTF(sc, WM_DEBUG_RX,
9767 ("%s: RX: resetting rxdiscard -> 0\n",
9768 device_xname(sc->sc_dev)));
9769 rxq->rxq_discard = 0;
9770 }
9771 continue;
9772 }
9773
9774 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9775 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9776
9777 m = rxs->rxs_mbuf;
9778
9779 /*
9780 * Add a new receive buffer to the ring, unless of
9781 * course the length is zero. Treat the latter as a
9782 * failed mapping.
9783 */
9784 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9785 /*
9786 * Failed, throw away what we've done so
9787 * far, and discard the rest of the packet.
9788 */
9789 if_statinc(ifp, if_ierrors);
9790 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9791 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9792 wm_init_rxdesc(rxq, i);
9793 if (!wm_rxdesc_is_eop(rxq, status))
9794 rxq->rxq_discard = 1;
9795 if (rxq->rxq_head != NULL)
9796 m_freem(rxq->rxq_head);
9797 WM_RXCHAIN_RESET(rxq);
9798 DPRINTF(sc, WM_DEBUG_RX,
9799 ("%s: RX: Rx buffer allocation failed, "
9800 "dropping packet%s\n", device_xname(sc->sc_dev),
9801 rxq->rxq_discard ? " (discard)" : ""));
9802 continue;
9803 }
9804
9805 m->m_len = len;
9806 rxq->rxq_len += len;
9807 DPRINTF(sc, WM_DEBUG_RX,
9808 ("%s: RX: buffer at %p len %d\n",
9809 device_xname(sc->sc_dev), m->m_data, len));
9810
9811 /* If this is not the end of the packet, keep looking. */
9812 if (!wm_rxdesc_is_eop(rxq, status)) {
9813 WM_RXCHAIN_LINK(rxq, m);
9814 DPRINTF(sc, WM_DEBUG_RX,
9815 ("%s: RX: not yet EOP, rxlen -> %d\n",
9816 device_xname(sc->sc_dev), rxq->rxq_len));
9817 continue;
9818 }
9819
9820 /*
9821 * Okay, we have the entire packet now. The chip is
9822 * configured to include the FCS except I35[04], I21[01].
9823 * (not all chips can be configured to strip it), so we need
9824 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9825 * in RCTL register is always set, so we don't trim it.
9826 * PCH2 and newer chip also not include FCS when jumbo
9827 * frame is used to do workaround an errata.
9828 * May need to adjust length of previous mbuf in the
9829 * chain if the current mbuf is too short.
9830 */
9831 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9832 if (m->m_len < ETHER_CRC_LEN) {
9833 rxq->rxq_tail->m_len
9834 -= (ETHER_CRC_LEN - m->m_len);
9835 m->m_len = 0;
9836 } else
9837 m->m_len -= ETHER_CRC_LEN;
9838 len = rxq->rxq_len - ETHER_CRC_LEN;
9839 } else
9840 len = rxq->rxq_len;
9841
9842 WM_RXCHAIN_LINK(rxq, m);
9843
9844 *rxq->rxq_tailp = NULL;
9845 m = rxq->rxq_head;
9846
9847 WM_RXCHAIN_RESET(rxq);
9848
9849 DPRINTF(sc, WM_DEBUG_RX,
9850 ("%s: RX: have entire packet, len -> %d\n",
9851 device_xname(sc->sc_dev), len));
9852
9853 /* If an error occurred, update stats and drop the packet. */
9854 if (wm_rxdesc_has_errors(rxq, errors)) {
9855 m_freem(m);
9856 continue;
9857 }
9858
9859 /* No errors. Receive the packet. */
9860 m_set_rcvif(m, ifp);
9861 m->m_pkthdr.len = len;
9862 /*
9863 * TODO
9864 * should be save rsshash and rsstype to this mbuf.
9865 */
9866 DPRINTF(sc, WM_DEBUG_RX,
9867 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9868 device_xname(sc->sc_dev), rsstype, rsshash));
9869
9870 /*
9871 * If VLANs are enabled, VLAN packets have been unwrapped
9872 * for us. Associate the tag with the packet.
9873 */
9874 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9875 continue;
9876
9877 /* Set up checksum info for this packet. */
9878 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9879
9880 rxq->rxq_packets++;
9881 rxq->rxq_bytes += len;
9882 /* Pass it on. */
9883 if_percpuq_enqueue(sc->sc_ipq, m);
9884
9885 if (rxq->rxq_stopping)
9886 break;
9887 }
9888 rxq->rxq_ptr = i;
9889
9890 if (count != 0)
9891 rnd_add_uint32(&sc->rnd_source, count);
9892
9893 DPRINTF(sc, WM_DEBUG_RX,
9894 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9895
9896 return more;
9897 }
9898
9899 /*
9900 * wm_linkintr_gmii:
9901 *
9902 * Helper; handle link interrupts for GMII.
9903 */
9904 static void
9905 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9906 {
9907 device_t dev = sc->sc_dev;
9908 uint32_t status, reg;
9909 bool link;
9910 int rv;
9911
9912 KASSERT(mutex_owned(sc->sc_core_lock));
9913
9914 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9915 __func__));
9916
9917 if ((icr & ICR_LSC) == 0) {
9918 if (icr & ICR_RXSEQ)
9919 DPRINTF(sc, WM_DEBUG_LINK,
9920 ("%s: LINK Receive sequence error\n",
9921 device_xname(dev)));
9922 return;
9923 }
9924
9925 /* Link status changed */
9926 status = CSR_READ(sc, WMREG_STATUS);
9927 link = status & STATUS_LU;
9928 if (link) {
9929 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9930 device_xname(dev),
9931 (status & STATUS_FD) ? "FDX" : "HDX"));
9932 if (wm_phy_need_linkdown_discard(sc)) {
9933 DPRINTF(sc, WM_DEBUG_LINK,
9934 ("%s: linkintr: Clear linkdown discard flag\n",
9935 device_xname(dev)));
9936 wm_clear_linkdown_discard(sc);
9937 }
9938 } else {
9939 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9940 device_xname(dev)));
9941 if (wm_phy_need_linkdown_discard(sc)) {
9942 DPRINTF(sc, WM_DEBUG_LINK,
9943 ("%s: linkintr: Set linkdown discard flag\n",
9944 device_xname(dev)));
9945 wm_set_linkdown_discard(sc);
9946 }
9947 }
9948 if ((sc->sc_type == WM_T_ICH8) && (link == false))
9949 wm_gig_downshift_workaround_ich8lan(sc);
9950
9951 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
9952 wm_kmrn_lock_loss_workaround_ich8lan(sc);
9953
9954 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9955 device_xname(dev)));
9956 mii_pollstat(&sc->sc_mii);
9957 if (sc->sc_type == WM_T_82543) {
9958 int miistatus, active;
9959
9960 /*
9961 * With 82543, we need to force speed and
9962 * duplex on the MAC equal to what the PHY
9963 * speed and duplex configuration is.
9964 */
9965 miistatus = sc->sc_mii.mii_media_status;
9966
9967 if (miistatus & IFM_ACTIVE) {
9968 active = sc->sc_mii.mii_media_active;
9969 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9970 switch (IFM_SUBTYPE(active)) {
9971 case IFM_10_T:
9972 sc->sc_ctrl |= CTRL_SPEED_10;
9973 break;
9974 case IFM_100_TX:
9975 sc->sc_ctrl |= CTRL_SPEED_100;
9976 break;
9977 case IFM_1000_T:
9978 sc->sc_ctrl |= CTRL_SPEED_1000;
9979 break;
9980 default:
9981 /*
9982 * Fiber?
9983 * Shoud not enter here.
9984 */
9985 device_printf(dev, "unknown media (%x)\n",
9986 active);
9987 break;
9988 }
9989 if (active & IFM_FDX)
9990 sc->sc_ctrl |= CTRL_FD;
9991 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9992 }
9993 } else if (sc->sc_type == WM_T_PCH) {
9994 wm_k1_gig_workaround_hv(sc,
9995 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9996 }
9997
9998 /*
9999 * When connected at 10Mbps half-duplex, some parts are excessively
10000 * aggressive resulting in many collisions. To avoid this, increase
10001 * the IPG and reduce Rx latency in the PHY.
10002 */
10003 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
10004 && link) {
10005 uint32_t tipg_reg;
10006 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
10007 bool fdx;
10008 uint16_t emi_addr, emi_val;
10009
10010 tipg_reg = CSR_READ(sc, WMREG_TIPG);
10011 tipg_reg &= ~TIPG_IPGT_MASK;
10012 fdx = status & STATUS_FD;
10013
10014 if (!fdx && (speed == STATUS_SPEED_10)) {
10015 tipg_reg |= 0xff;
10016 /* Reduce Rx latency in analog PHY */
10017 emi_val = 0;
10018 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10019 fdx && speed != STATUS_SPEED_1000) {
10020 tipg_reg |= 0xc;
10021 emi_val = 1;
10022 } else {
10023 /* Roll back the default values */
10024 tipg_reg |= 0x08;
10025 emi_val = 1;
10026 }
10027
10028 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10029
10030 rv = sc->phy.acquire(sc);
10031 if (rv)
10032 return;
10033
10034 if (sc->sc_type == WM_T_PCH2)
10035 emi_addr = I82579_RX_CONFIG;
10036 else
10037 emi_addr = I217_RX_CONFIG;
10038 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10039
10040 if (sc->sc_type >= WM_T_PCH_LPT) {
10041 uint16_t phy_reg;
10042
10043 sc->phy.readreg_locked(dev, 2,
10044 I217_PLL_CLOCK_GATE_REG, &phy_reg);
10045 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10046 if (speed == STATUS_SPEED_100
10047 || speed == STATUS_SPEED_10)
10048 phy_reg |= 0x3e8;
10049 else
10050 phy_reg |= 0xfa;
10051 sc->phy.writereg_locked(dev, 2,
10052 I217_PLL_CLOCK_GATE_REG, phy_reg);
10053
10054 if (speed == STATUS_SPEED_1000) {
10055 sc->phy.readreg_locked(dev, 2,
10056 HV_PM_CTRL, &phy_reg);
10057
10058 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10059
10060 sc->phy.writereg_locked(dev, 2,
10061 HV_PM_CTRL, phy_reg);
10062 }
10063 }
10064 sc->phy.release(sc);
10065
10066 if (rv)
10067 return;
10068
10069 if (sc->sc_type >= WM_T_PCH_SPT) {
10070 uint16_t data, ptr_gap;
10071
10072 if (speed == STATUS_SPEED_1000) {
10073 rv = sc->phy.acquire(sc);
10074 if (rv)
10075 return;
10076
10077 rv = sc->phy.readreg_locked(dev, 2,
10078 I82579_UNKNOWN1, &data);
10079 if (rv) {
10080 sc->phy.release(sc);
10081 return;
10082 }
10083
10084 ptr_gap = (data & (0x3ff << 2)) >> 2;
10085 if (ptr_gap < 0x18) {
10086 data &= ~(0x3ff << 2);
10087 data |= (0x18 << 2);
10088 rv = sc->phy.writereg_locked(dev,
10089 2, I82579_UNKNOWN1, data);
10090 }
10091 sc->phy.release(sc);
10092 if (rv)
10093 return;
10094 } else {
10095 rv = sc->phy.acquire(sc);
10096 if (rv)
10097 return;
10098
10099 rv = sc->phy.writereg_locked(dev, 2,
10100 I82579_UNKNOWN1, 0xc023);
10101 sc->phy.release(sc);
10102 if (rv)
10103 return;
10104
10105 }
10106 }
10107 }
10108
10109 /*
10110 * I217 Packet Loss issue:
10111 * ensure that FEXTNVM4 Beacon Duration is set correctly
10112 * on power up.
10113 * Set the Beacon Duration for I217 to 8 usec
10114 */
10115 if (sc->sc_type >= WM_T_PCH_LPT) {
10116 reg = CSR_READ(sc, WMREG_FEXTNVM4);
10117 reg &= ~FEXTNVM4_BEACON_DURATION;
10118 reg |= FEXTNVM4_BEACON_DURATION_8US;
10119 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10120 }
10121
10122 /* Work-around I218 hang issue */
10123 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10124 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10125 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10126 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10127 wm_k1_workaround_lpt_lp(sc, link);
10128
10129 if (sc->sc_type >= WM_T_PCH_LPT) {
10130 /*
10131 * Set platform power management values for Latency
10132 * Tolerance Reporting (LTR)
10133 */
10134 wm_platform_pm_pch_lpt(sc,
10135 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10136 }
10137
10138 /* Clear link partner's EEE ability */
10139 sc->eee_lp_ability = 0;
10140
10141 /* FEXTNVM6 K1-off workaround */
10142 if (sc->sc_type == WM_T_PCH_SPT) {
10143 reg = CSR_READ(sc, WMREG_FEXTNVM6);
10144 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10145 reg |= FEXTNVM6_K1_OFF_ENABLE;
10146 else
10147 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10148 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10149 }
10150
10151 if (!link)
10152 return;
10153
10154 switch (sc->sc_type) {
10155 case WM_T_PCH2:
10156 wm_k1_workaround_lv(sc);
10157 /* FALLTHROUGH */
10158 case WM_T_PCH:
10159 if (sc->sc_phytype == WMPHY_82578)
10160 wm_link_stall_workaround_hv(sc);
10161 break;
10162 default:
10163 break;
10164 }
10165
10166 /* Enable/Disable EEE after link up */
10167 if (sc->sc_phytype > WMPHY_82579)
10168 wm_set_eee_pchlan(sc);
10169 }
10170
10171 /*
10172 * wm_linkintr_tbi:
10173 *
10174 * Helper; handle link interrupts for TBI mode.
10175 */
10176 static void
10177 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10178 {
10179 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10180 uint32_t status;
10181
10182 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10183 __func__));
10184
10185 status = CSR_READ(sc, WMREG_STATUS);
10186 if (icr & ICR_LSC) {
10187 wm_check_for_link(sc);
10188 if (status & STATUS_LU) {
10189 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10190 device_xname(sc->sc_dev),
10191 (status & STATUS_FD) ? "FDX" : "HDX"));
10192 /*
10193 * NOTE: CTRL will update TFCE and RFCE automatically,
10194 * so we should update sc->sc_ctrl
10195 */
10196
10197 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10198 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10199 sc->sc_fcrtl &= ~FCRTL_XONE;
10200 if (status & STATUS_FD)
10201 sc->sc_tctl |=
10202 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10203 else
10204 sc->sc_tctl |=
10205 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10206 if (sc->sc_ctrl & CTRL_TFCE)
10207 sc->sc_fcrtl |= FCRTL_XONE;
10208 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10209 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10210 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10211 sc->sc_tbi_linkup = 1;
10212 if_link_state_change(ifp, LINK_STATE_UP);
10213 } else {
10214 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10215 device_xname(sc->sc_dev)));
10216 sc->sc_tbi_linkup = 0;
10217 if_link_state_change(ifp, LINK_STATE_DOWN);
10218 }
10219 /* Update LED */
10220 wm_tbi_serdes_set_linkled(sc);
10221 } else if (icr & ICR_RXSEQ)
10222 DPRINTF(sc, WM_DEBUG_LINK,
10223 ("%s: LINK: Receive sequence error\n",
10224 device_xname(sc->sc_dev)));
10225 }
10226
10227 /*
10228 * wm_linkintr_serdes:
10229 *
10230 * Helper; handle link interrupts for TBI mode.
10231 */
10232 static void
10233 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10234 {
10235 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10236 struct mii_data *mii = &sc->sc_mii;
10237 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10238 uint32_t pcs_adv, pcs_lpab, reg;
10239
10240 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10241 __func__));
10242
10243 if (icr & ICR_LSC) {
10244 /* Check PCS */
10245 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10246 if ((reg & PCS_LSTS_LINKOK) != 0) {
10247 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10248 device_xname(sc->sc_dev)));
10249 mii->mii_media_status |= IFM_ACTIVE;
10250 sc->sc_tbi_linkup = 1;
10251 if_link_state_change(ifp, LINK_STATE_UP);
10252 } else {
10253 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10254 device_xname(sc->sc_dev)));
10255 mii->mii_media_status |= IFM_NONE;
10256 sc->sc_tbi_linkup = 0;
10257 if_link_state_change(ifp, LINK_STATE_DOWN);
10258 wm_tbi_serdes_set_linkled(sc);
10259 return;
10260 }
10261 mii->mii_media_active |= IFM_1000_SX;
10262 if ((reg & PCS_LSTS_FDX) != 0)
10263 mii->mii_media_active |= IFM_FDX;
10264 else
10265 mii->mii_media_active |= IFM_HDX;
10266 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10267 /* Check flow */
10268 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10269 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10270 DPRINTF(sc, WM_DEBUG_LINK,
10271 ("XXX LINKOK but not ACOMP\n"));
10272 return;
10273 }
10274 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10275 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10276 DPRINTF(sc, WM_DEBUG_LINK,
10277 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10278 if ((pcs_adv & TXCW_SYM_PAUSE)
10279 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10280 mii->mii_media_active |= IFM_FLOW
10281 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10282 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10283 && (pcs_adv & TXCW_ASYM_PAUSE)
10284 && (pcs_lpab & TXCW_SYM_PAUSE)
10285 && (pcs_lpab & TXCW_ASYM_PAUSE))
10286 mii->mii_media_active |= IFM_FLOW
10287 | IFM_ETH_TXPAUSE;
10288 else if ((pcs_adv & TXCW_SYM_PAUSE)
10289 && (pcs_adv & TXCW_ASYM_PAUSE)
10290 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10291 && (pcs_lpab & TXCW_ASYM_PAUSE))
10292 mii->mii_media_active |= IFM_FLOW
10293 | IFM_ETH_RXPAUSE;
10294 }
10295 /* Update LED */
10296 wm_tbi_serdes_set_linkled(sc);
10297 } else
10298 DPRINTF(sc, WM_DEBUG_LINK,
10299 ("%s: LINK: Receive sequence error\n",
10300 device_xname(sc->sc_dev)));
10301 }
10302
10303 /*
10304 * wm_linkintr:
10305 *
10306 * Helper; handle link interrupts.
10307 */
10308 static void
10309 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10310 {
10311
10312 KASSERT(mutex_owned(sc->sc_core_lock));
10313
10314 if (sc->sc_flags & WM_F_HAS_MII)
10315 wm_linkintr_gmii(sc, icr);
10316 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10317 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10318 wm_linkintr_serdes(sc, icr);
10319 else
10320 wm_linkintr_tbi(sc, icr);
10321 }
10322
10323
10324 static inline void
10325 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10326 {
10327
10328 if (wmq->wmq_txrx_use_workqueue) {
10329 if (!wmq->wmq_wq_enqueued) {
10330 wmq->wmq_wq_enqueued = true;
10331 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10332 curcpu());
10333 }
10334 } else
10335 softint_schedule(wmq->wmq_si);
10336 }
10337
10338 static inline void
10339 wm_legacy_intr_disable(struct wm_softc *sc)
10340 {
10341
10342 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10343 }
10344
10345 static inline void
10346 wm_legacy_intr_enable(struct wm_softc *sc)
10347 {
10348
10349 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10350 }
10351
10352 /*
10353 * wm_intr_legacy:
10354 *
10355 * Interrupt service routine for INTx and MSI.
10356 */
10357 static int
10358 wm_intr_legacy(void *arg)
10359 {
10360 struct wm_softc *sc = arg;
10361 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10362 struct wm_queue *wmq = &sc->sc_queue[0];
10363 struct wm_txqueue *txq = &wmq->wmq_txq;
10364 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10365 u_int txlimit = sc->sc_tx_intr_process_limit;
10366 u_int rxlimit = sc->sc_rx_intr_process_limit;
10367 uint32_t icr, rndval = 0;
10368 bool more = false;
10369
10370 icr = CSR_READ(sc, WMREG_ICR);
10371 if ((icr & sc->sc_icr) == 0)
10372 return 0;
10373
10374 DPRINTF(sc, WM_DEBUG_TX,
10375 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10376 if (rndval == 0)
10377 rndval = icr;
10378
10379 mutex_enter(txq->txq_lock);
10380
10381 if (txq->txq_stopping) {
10382 mutex_exit(txq->txq_lock);
10383 return 1;
10384 }
10385
10386 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10387 if (icr & ICR_TXDW) {
10388 DPRINTF(sc, WM_DEBUG_TX,
10389 ("%s: TX: got TXDW interrupt\n",
10390 device_xname(sc->sc_dev)));
10391 WM_Q_EVCNT_INCR(txq, txdw);
10392 }
10393 #endif
10394 if (txlimit > 0) {
10395 more |= wm_txeof(txq, txlimit);
10396 if (!IF_IS_EMPTY(&ifp->if_snd))
10397 more = true;
10398 } else
10399 more = true;
10400 mutex_exit(txq->txq_lock);
10401
10402 mutex_enter(rxq->rxq_lock);
10403
10404 if (rxq->rxq_stopping) {
10405 mutex_exit(rxq->rxq_lock);
10406 return 1;
10407 }
10408
10409 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10410 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10411 DPRINTF(sc, WM_DEBUG_RX,
10412 ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10413 device_xname(sc->sc_dev),
10414 icr & (ICR_RXDMT0 | ICR_RXT0)));
10415 WM_Q_EVCNT_INCR(rxq, intr);
10416 }
10417 #endif
10418 if (rxlimit > 0) {
10419 /*
10420 * wm_rxeof() does *not* call upper layer functions directly,
10421 * as if_percpuq_enqueue() just call softint_schedule().
10422 * So, we can call wm_rxeof() in interrupt context.
10423 */
10424 more = wm_rxeof(rxq, rxlimit);
10425 } else
10426 more = true;
10427
10428 mutex_exit(rxq->rxq_lock);
10429
10430 mutex_enter(sc->sc_core_lock);
10431
10432 if (sc->sc_core_stopping) {
10433 mutex_exit(sc->sc_core_lock);
10434 return 1;
10435 }
10436
10437 if (icr & (ICR_LSC | ICR_RXSEQ)) {
10438 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10439 wm_linkintr(sc, icr);
10440 }
10441 if ((icr & ICR_GPI(0)) != 0)
10442 device_printf(sc->sc_dev, "got module interrupt\n");
10443
10444 mutex_exit(sc->sc_core_lock);
10445
10446 if (icr & ICR_RXO) {
10447 #if defined(WM_DEBUG)
10448 log(LOG_WARNING, "%s: Receive overrun\n",
10449 device_xname(sc->sc_dev));
10450 #endif /* defined(WM_DEBUG) */
10451 }
10452
10453 rnd_add_uint32(&sc->rnd_source, rndval);
10454
10455 if (more) {
10456 /* Try to get more packets going. */
10457 wm_legacy_intr_disable(sc);
10458 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10459 wm_sched_handle_queue(sc, wmq);
10460 }
10461
10462 return 1;
10463 }
10464
10465 static inline void
10466 wm_txrxintr_disable(struct wm_queue *wmq)
10467 {
10468 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10469
10470 if (__predict_false(!wm_is_using_msix(sc))) {
10471 wm_legacy_intr_disable(sc);
10472 return;
10473 }
10474
10475 if (sc->sc_type == WM_T_82574)
10476 CSR_WRITE(sc, WMREG_IMC,
10477 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10478 else if (sc->sc_type == WM_T_82575)
10479 CSR_WRITE(sc, WMREG_EIMC,
10480 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10481 else
10482 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10483 }
10484
10485 static inline void
10486 wm_txrxintr_enable(struct wm_queue *wmq)
10487 {
10488 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10489
10490 wm_itrs_calculate(sc, wmq);
10491
10492 if (__predict_false(!wm_is_using_msix(sc))) {
10493 wm_legacy_intr_enable(sc);
10494 return;
10495 }
10496
10497 /*
10498 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10499 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10500 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10501 * while each wm_handle_queue(wmq) is runnig.
10502 */
10503 if (sc->sc_type == WM_T_82574)
10504 CSR_WRITE(sc, WMREG_IMS,
10505 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10506 else if (sc->sc_type == WM_T_82575)
10507 CSR_WRITE(sc, WMREG_EIMS,
10508 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10509 else
10510 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10511 }
10512
10513 static int
10514 wm_txrxintr_msix(void *arg)
10515 {
10516 struct wm_queue *wmq = arg;
10517 struct wm_txqueue *txq = &wmq->wmq_txq;
10518 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10519 struct wm_softc *sc = txq->txq_sc;
10520 u_int txlimit = sc->sc_tx_intr_process_limit;
10521 u_int rxlimit = sc->sc_rx_intr_process_limit;
10522 bool txmore;
10523 bool rxmore;
10524
10525 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10526
10527 DPRINTF(sc, WM_DEBUG_TX,
10528 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10529
10530 wm_txrxintr_disable(wmq);
10531
10532 mutex_enter(txq->txq_lock);
10533
10534 if (txq->txq_stopping) {
10535 mutex_exit(txq->txq_lock);
10536 return 1;
10537 }
10538
10539 WM_Q_EVCNT_INCR(txq, txdw);
10540 if (txlimit > 0) {
10541 txmore = wm_txeof(txq, txlimit);
10542 /* wm_deferred start() is done in wm_handle_queue(). */
10543 } else
10544 txmore = true;
10545 mutex_exit(txq->txq_lock);
10546
10547 DPRINTF(sc, WM_DEBUG_RX,
10548 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10549 mutex_enter(rxq->rxq_lock);
10550
10551 if (rxq->rxq_stopping) {
10552 mutex_exit(rxq->rxq_lock);
10553 return 1;
10554 }
10555
10556 WM_Q_EVCNT_INCR(rxq, intr);
10557 if (rxlimit > 0) {
10558 rxmore = wm_rxeof(rxq, rxlimit);
10559 } else
10560 rxmore = true;
10561 mutex_exit(rxq->rxq_lock);
10562
10563 wm_itrs_writereg(sc, wmq);
10564
10565 if (txmore || rxmore) {
10566 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10567 wm_sched_handle_queue(sc, wmq);
10568 } else
10569 wm_txrxintr_enable(wmq);
10570
10571 return 1;
10572 }
10573
10574 static void
10575 wm_handle_queue(void *arg)
10576 {
10577 struct wm_queue *wmq = arg;
10578 struct wm_txqueue *txq = &wmq->wmq_txq;
10579 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10580 struct wm_softc *sc = txq->txq_sc;
10581 u_int txlimit = sc->sc_tx_process_limit;
10582 u_int rxlimit = sc->sc_rx_process_limit;
10583 bool txmore;
10584 bool rxmore;
10585
10586 mutex_enter(txq->txq_lock);
10587 if (txq->txq_stopping) {
10588 mutex_exit(txq->txq_lock);
10589 return;
10590 }
10591 txmore = wm_txeof(txq, txlimit);
10592 wm_deferred_start_locked(txq);
10593 mutex_exit(txq->txq_lock);
10594
10595 mutex_enter(rxq->rxq_lock);
10596 if (rxq->rxq_stopping) {
10597 mutex_exit(rxq->rxq_lock);
10598 return;
10599 }
10600 WM_Q_EVCNT_INCR(rxq, defer);
10601 rxmore = wm_rxeof(rxq, rxlimit);
10602 mutex_exit(rxq->rxq_lock);
10603
10604 if (txmore || rxmore) {
10605 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10606 wm_sched_handle_queue(sc, wmq);
10607 } else
10608 wm_txrxintr_enable(wmq);
10609 }
10610
10611 static void
10612 wm_handle_queue_work(struct work *wk, void *context)
10613 {
10614 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10615
10616 /*
10617 * Some qemu environment workaround. They don't stop interrupt
10618 * immediately.
10619 */
10620 wmq->wmq_wq_enqueued = false;
10621 wm_handle_queue(wmq);
10622 }
10623
10624 /*
10625 * wm_linkintr_msix:
10626 *
10627 * Interrupt service routine for link status change for MSI-X.
10628 */
10629 static int
10630 wm_linkintr_msix(void *arg)
10631 {
10632 struct wm_softc *sc = arg;
10633 uint32_t reg;
10634 bool has_rxo;
10635
10636 reg = CSR_READ(sc, WMREG_ICR);
10637 mutex_enter(sc->sc_core_lock);
10638 DPRINTF(sc, WM_DEBUG_LINK,
10639 ("%s: LINK: got link intr. ICR = %08x\n",
10640 device_xname(sc->sc_dev), reg));
10641
10642 if (sc->sc_core_stopping)
10643 goto out;
10644
10645 if ((reg & ICR_LSC) != 0) {
10646 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10647 wm_linkintr(sc, ICR_LSC);
10648 }
10649 if ((reg & ICR_GPI(0)) != 0)
10650 device_printf(sc->sc_dev, "got module interrupt\n");
10651
10652 /*
10653 * XXX 82574 MSI-X mode workaround
10654 *
10655 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10656 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10657 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10658 * interrupts by writing WMREG_ICS to process receive packets.
10659 */
10660 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10661 #if defined(WM_DEBUG)
10662 log(LOG_WARNING, "%s: Receive overrun\n",
10663 device_xname(sc->sc_dev));
10664 #endif /* defined(WM_DEBUG) */
10665
10666 has_rxo = true;
10667 /*
10668 * The RXO interrupt is very high rate when receive traffic is
10669 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10670 * interrupts. ICR_OTHER will be enabled at the end of
10671 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10672 * ICR_RXQ(1) interrupts.
10673 */
10674 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10675
10676 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10677 }
10678
10679
10680
10681 out:
10682 mutex_exit(sc->sc_core_lock);
10683
10684 if (sc->sc_type == WM_T_82574) {
10685 if (!has_rxo)
10686 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10687 else
10688 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10689 } else if (sc->sc_type == WM_T_82575)
10690 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10691 else
10692 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10693
10694 return 1;
10695 }
10696
10697 /*
10698 * Media related.
10699 * GMII, SGMII, TBI (and SERDES)
10700 */
10701
10702 /* Common */
10703
10704 /*
10705 * wm_tbi_serdes_set_linkled:
10706 *
10707 * Update the link LED on TBI and SERDES devices.
10708 */
10709 static void
10710 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10711 {
10712
10713 if (sc->sc_tbi_linkup)
10714 sc->sc_ctrl |= CTRL_SWDPIN(0);
10715 else
10716 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10717
10718 /* 82540 or newer devices are active low */
10719 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10720
10721 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10722 }
10723
10724 /* GMII related */
10725
10726 /*
10727 * wm_gmii_reset:
10728 *
10729 * Reset the PHY.
10730 */
10731 static void
10732 wm_gmii_reset(struct wm_softc *sc)
10733 {
10734 uint32_t reg;
10735 int rv;
10736
10737 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10738 device_xname(sc->sc_dev), __func__));
10739
10740 rv = sc->phy.acquire(sc);
10741 if (rv != 0) {
10742 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10743 __func__);
10744 return;
10745 }
10746
10747 switch (sc->sc_type) {
10748 case WM_T_82542_2_0:
10749 case WM_T_82542_2_1:
10750 /* null */
10751 break;
10752 case WM_T_82543:
10753 /*
10754 * With 82543, we need to force speed and duplex on the MAC
10755 * equal to what the PHY speed and duplex configuration is.
10756 * In addition, we need to perform a hardware reset on the PHY
10757 * to take it out of reset.
10758 */
10759 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10760 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10761
10762 /* The PHY reset pin is active-low. */
10763 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10764 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10765 CTRL_EXT_SWDPIN(4));
10766 reg |= CTRL_EXT_SWDPIO(4);
10767
10768 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10769 CSR_WRITE_FLUSH(sc);
10770 delay(10*1000);
10771
10772 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10773 CSR_WRITE_FLUSH(sc);
10774 delay(150);
10775 #if 0
10776 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10777 #endif
10778 delay(20*1000); /* XXX extra delay to get PHY ID? */
10779 break;
10780 case WM_T_82544: /* Reset 10000us */
10781 case WM_T_82540:
10782 case WM_T_82545:
10783 case WM_T_82545_3:
10784 case WM_T_82546:
10785 case WM_T_82546_3:
10786 case WM_T_82541:
10787 case WM_T_82541_2:
10788 case WM_T_82547:
10789 case WM_T_82547_2:
10790 case WM_T_82571: /* Reset 100us */
10791 case WM_T_82572:
10792 case WM_T_82573:
10793 case WM_T_82574:
10794 case WM_T_82575:
10795 case WM_T_82576:
10796 case WM_T_82580:
10797 case WM_T_I350:
10798 case WM_T_I354:
10799 case WM_T_I210:
10800 case WM_T_I211:
10801 case WM_T_82583:
10802 case WM_T_80003:
10803 /* Generic reset */
10804 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10805 CSR_WRITE_FLUSH(sc);
10806 delay(20000);
10807 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10808 CSR_WRITE_FLUSH(sc);
10809 delay(20000);
10810
10811 if ((sc->sc_type == WM_T_82541)
10812 || (sc->sc_type == WM_T_82541_2)
10813 || (sc->sc_type == WM_T_82547)
10814 || (sc->sc_type == WM_T_82547_2)) {
10815 /* Workaround for igp are done in igp_reset() */
10816 /* XXX add code to set LED after phy reset */
10817 }
10818 break;
10819 case WM_T_ICH8:
10820 case WM_T_ICH9:
10821 case WM_T_ICH10:
10822 case WM_T_PCH:
10823 case WM_T_PCH2:
10824 case WM_T_PCH_LPT:
10825 case WM_T_PCH_SPT:
10826 case WM_T_PCH_CNP:
10827 /* Generic reset */
10828 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10829 CSR_WRITE_FLUSH(sc);
10830 delay(100);
10831 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10832 CSR_WRITE_FLUSH(sc);
10833 delay(150);
10834 break;
10835 default:
10836 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10837 __func__);
10838 break;
10839 }
10840
10841 sc->phy.release(sc);
10842
10843 /* get_cfg_done */
10844 wm_get_cfg_done(sc);
10845
10846 /* Extra setup */
10847 switch (sc->sc_type) {
10848 case WM_T_82542_2_0:
10849 case WM_T_82542_2_1:
10850 case WM_T_82543:
10851 case WM_T_82544:
10852 case WM_T_82540:
10853 case WM_T_82545:
10854 case WM_T_82545_3:
10855 case WM_T_82546:
10856 case WM_T_82546_3:
10857 case WM_T_82541_2:
10858 case WM_T_82547_2:
10859 case WM_T_82571:
10860 case WM_T_82572:
10861 case WM_T_82573:
10862 case WM_T_82574:
10863 case WM_T_82583:
10864 case WM_T_82575:
10865 case WM_T_82576:
10866 case WM_T_82580:
10867 case WM_T_I350:
10868 case WM_T_I354:
10869 case WM_T_I210:
10870 case WM_T_I211:
10871 case WM_T_80003:
10872 /* Null */
10873 break;
10874 case WM_T_82541:
10875 case WM_T_82547:
10876 /* XXX Configure actively LED after PHY reset */
10877 break;
10878 case WM_T_ICH8:
10879 case WM_T_ICH9:
10880 case WM_T_ICH10:
10881 case WM_T_PCH:
10882 case WM_T_PCH2:
10883 case WM_T_PCH_LPT:
10884 case WM_T_PCH_SPT:
10885 case WM_T_PCH_CNP:
10886 wm_phy_post_reset(sc);
10887 break;
10888 default:
10889 panic("%s: unknown type\n", __func__);
10890 break;
10891 }
10892 }
10893
10894 /*
10895 * Set up sc_phytype and mii_{read|write}reg.
10896 *
10897 * To identify PHY type, correct read/write function should be selected.
10898 * To select correct read/write function, PCI ID or MAC type are required
10899 * without accessing PHY registers.
10900 *
10901 * On the first call of this function, PHY ID is not known yet. Check
10902 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10903 * result might be incorrect.
10904 *
10905 * In the second call, PHY OUI and model is used to identify PHY type.
10906 * It might not be perfect because of the lack of compared entry, but it
10907 * would be better than the first call.
10908 *
10909 * If the detected new result and previous assumption is different,
10910 * a diagnostic message will be printed.
10911 */
10912 static void
10913 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10914 uint16_t phy_model)
10915 {
10916 device_t dev = sc->sc_dev;
10917 struct mii_data *mii = &sc->sc_mii;
10918 uint16_t new_phytype = WMPHY_UNKNOWN;
10919 uint16_t doubt_phytype = WMPHY_UNKNOWN;
10920 mii_readreg_t new_readreg;
10921 mii_writereg_t new_writereg;
10922 bool dodiag = true;
10923
10924 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10925 device_xname(sc->sc_dev), __func__));
10926
10927 /*
10928 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10929 * incorrect. So don't print diag output when it's 2nd call.
10930 */
10931 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10932 dodiag = false;
10933
10934 if (mii->mii_readreg == NULL) {
10935 /*
10936 * This is the first call of this function. For ICH and PCH
10937 * variants, it's difficult to determine the PHY access method
10938 * by sc_type, so use the PCI product ID for some devices.
10939 */
10940
10941 switch (sc->sc_pcidevid) {
10942 case PCI_PRODUCT_INTEL_PCH_M_LM:
10943 case PCI_PRODUCT_INTEL_PCH_M_LC:
10944 /* 82577 */
10945 new_phytype = WMPHY_82577;
10946 break;
10947 case PCI_PRODUCT_INTEL_PCH_D_DM:
10948 case PCI_PRODUCT_INTEL_PCH_D_DC:
10949 /* 82578 */
10950 new_phytype = WMPHY_82578;
10951 break;
10952 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10953 case PCI_PRODUCT_INTEL_PCH2_LV_V:
10954 /* 82579 */
10955 new_phytype = WMPHY_82579;
10956 break;
10957 case PCI_PRODUCT_INTEL_82801H_82567V_3:
10958 case PCI_PRODUCT_INTEL_82801I_BM:
10959 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10960 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10961 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10962 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10963 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10964 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10965 /* ICH8, 9, 10 with 82567 */
10966 new_phytype = WMPHY_BM;
10967 break;
10968 default:
10969 break;
10970 }
10971 } else {
10972 /* It's not the first call. Use PHY OUI and model */
10973 switch (phy_oui) {
10974 case MII_OUI_ATTANSIC: /* atphy(4) */
10975 switch (phy_model) {
10976 case MII_MODEL_ATTANSIC_AR8021:
10977 new_phytype = WMPHY_82578;
10978 break;
10979 default:
10980 break;
10981 }
10982 break;
10983 case MII_OUI_xxMARVELL:
10984 switch (phy_model) {
10985 case MII_MODEL_xxMARVELL_I210:
10986 new_phytype = WMPHY_I210;
10987 break;
10988 case MII_MODEL_xxMARVELL_E1011:
10989 case MII_MODEL_xxMARVELL_E1000_3:
10990 case MII_MODEL_xxMARVELL_E1000_5:
10991 case MII_MODEL_xxMARVELL_E1112:
10992 new_phytype = WMPHY_M88;
10993 break;
10994 case MII_MODEL_xxMARVELL_E1149:
10995 new_phytype = WMPHY_BM;
10996 break;
10997 case MII_MODEL_xxMARVELL_E1111:
10998 case MII_MODEL_xxMARVELL_I347:
10999 case MII_MODEL_xxMARVELL_E1512:
11000 case MII_MODEL_xxMARVELL_E1340M:
11001 case MII_MODEL_xxMARVELL_E1543:
11002 new_phytype = WMPHY_M88;
11003 break;
11004 case MII_MODEL_xxMARVELL_I82563:
11005 new_phytype = WMPHY_GG82563;
11006 break;
11007 default:
11008 break;
11009 }
11010 break;
11011 case MII_OUI_INTEL:
11012 switch (phy_model) {
11013 case MII_MODEL_INTEL_I82577:
11014 new_phytype = WMPHY_82577;
11015 break;
11016 case MII_MODEL_INTEL_I82579:
11017 new_phytype = WMPHY_82579;
11018 break;
11019 case MII_MODEL_INTEL_I217:
11020 new_phytype = WMPHY_I217;
11021 break;
11022 case MII_MODEL_INTEL_I82580:
11023 new_phytype = WMPHY_82580;
11024 break;
11025 case MII_MODEL_INTEL_I350:
11026 new_phytype = WMPHY_I350;
11027 break;
11028 default:
11029 break;
11030 }
11031 break;
11032 case MII_OUI_yyINTEL:
11033 switch (phy_model) {
11034 case MII_MODEL_yyINTEL_I82562G:
11035 case MII_MODEL_yyINTEL_I82562EM:
11036 case MII_MODEL_yyINTEL_I82562ET:
11037 new_phytype = WMPHY_IFE;
11038 break;
11039 case MII_MODEL_yyINTEL_IGP01E1000:
11040 new_phytype = WMPHY_IGP;
11041 break;
11042 case MII_MODEL_yyINTEL_I82566:
11043 new_phytype = WMPHY_IGP_3;
11044 break;
11045 default:
11046 break;
11047 }
11048 break;
11049 default:
11050 break;
11051 }
11052
11053 if (dodiag) {
11054 if (new_phytype == WMPHY_UNKNOWN)
11055 aprint_verbose_dev(dev,
11056 "%s: Unknown PHY model. OUI=%06x, "
11057 "model=%04x\n", __func__, phy_oui,
11058 phy_model);
11059
11060 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11061 && (sc->sc_phytype != new_phytype)) {
11062 aprint_error_dev(dev, "Previously assumed PHY "
11063 "type(%u) was incorrect. PHY type from PHY"
11064 "ID = %u\n", sc->sc_phytype, new_phytype);
11065 }
11066 }
11067 }
11068
11069 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11070 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11071 /* SGMII */
11072 new_readreg = wm_sgmii_readreg;
11073 new_writereg = wm_sgmii_writereg;
11074 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11075 /* BM2 (phyaddr == 1) */
11076 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11077 && (new_phytype != WMPHY_BM)
11078 && (new_phytype != WMPHY_UNKNOWN))
11079 doubt_phytype = new_phytype;
11080 new_phytype = WMPHY_BM;
11081 new_readreg = wm_gmii_bm_readreg;
11082 new_writereg = wm_gmii_bm_writereg;
11083 } else if (sc->sc_type >= WM_T_PCH) {
11084 /* All PCH* use _hv_ */
11085 new_readreg = wm_gmii_hv_readreg;
11086 new_writereg = wm_gmii_hv_writereg;
11087 } else if (sc->sc_type >= WM_T_ICH8) {
11088 /* non-82567 ICH8, 9 and 10 */
11089 new_readreg = wm_gmii_i82544_readreg;
11090 new_writereg = wm_gmii_i82544_writereg;
11091 } else if (sc->sc_type >= WM_T_80003) {
11092 /* 80003 */
11093 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11094 && (new_phytype != WMPHY_GG82563)
11095 && (new_phytype != WMPHY_UNKNOWN))
11096 doubt_phytype = new_phytype;
11097 new_phytype = WMPHY_GG82563;
11098 new_readreg = wm_gmii_i80003_readreg;
11099 new_writereg = wm_gmii_i80003_writereg;
11100 } else if (sc->sc_type >= WM_T_I210) {
11101 /* I210 and I211 */
11102 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11103 && (new_phytype != WMPHY_I210)
11104 && (new_phytype != WMPHY_UNKNOWN))
11105 doubt_phytype = new_phytype;
11106 new_phytype = WMPHY_I210;
11107 new_readreg = wm_gmii_gs40g_readreg;
11108 new_writereg = wm_gmii_gs40g_writereg;
11109 } else if (sc->sc_type >= WM_T_82580) {
11110 /* 82580, I350 and I354 */
11111 new_readreg = wm_gmii_82580_readreg;
11112 new_writereg = wm_gmii_82580_writereg;
11113 } else if (sc->sc_type >= WM_T_82544) {
11114 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
11115 new_readreg = wm_gmii_i82544_readreg;
11116 new_writereg = wm_gmii_i82544_writereg;
11117 } else {
11118 new_readreg = wm_gmii_i82543_readreg;
11119 new_writereg = wm_gmii_i82543_writereg;
11120 }
11121
11122 if (new_phytype == WMPHY_BM) {
11123 /* All BM use _bm_ */
11124 new_readreg = wm_gmii_bm_readreg;
11125 new_writereg = wm_gmii_bm_writereg;
11126 }
11127 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
11128 /* All PCH* use _hv_ */
11129 new_readreg = wm_gmii_hv_readreg;
11130 new_writereg = wm_gmii_hv_writereg;
11131 }
11132
11133 /* Diag output */
11134 if (dodiag) {
11135 if (doubt_phytype != WMPHY_UNKNOWN)
11136 aprint_error_dev(dev, "Assumed new PHY type was "
11137 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11138 new_phytype);
11139 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11140 && (sc->sc_phytype != new_phytype))
11141 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11142 "was incorrect. New PHY type = %u\n",
11143 sc->sc_phytype, new_phytype);
11144
11145 if ((mii->mii_readreg != NULL) &&
11146 (new_phytype == WMPHY_UNKNOWN))
11147 aprint_error_dev(dev, "PHY type is still unknown.\n");
11148
11149 if ((mii->mii_readreg != NULL) &&
11150 (mii->mii_readreg != new_readreg))
11151 aprint_error_dev(dev, "Previously assumed PHY "
11152 "read/write function was incorrect.\n");
11153 }
11154
11155 /* Update now */
11156 sc->sc_phytype = new_phytype;
11157 mii->mii_readreg = new_readreg;
11158 mii->mii_writereg = new_writereg;
11159 if (new_readreg == wm_gmii_hv_readreg) {
11160 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11161 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11162 } else if (new_readreg == wm_sgmii_readreg) {
11163 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11164 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11165 } else if (new_readreg == wm_gmii_i82544_readreg) {
11166 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11167 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11168 }
11169 }
11170
11171 /*
11172 * wm_get_phy_id_82575:
11173 *
11174 * Return PHY ID. Return -1 if it failed.
11175 */
11176 static int
11177 wm_get_phy_id_82575(struct wm_softc *sc)
11178 {
11179 uint32_t reg;
11180 int phyid = -1;
11181
11182 /* XXX */
11183 if ((sc->sc_flags & WM_F_SGMII) == 0)
11184 return -1;
11185
11186 if (wm_sgmii_uses_mdio(sc)) {
11187 switch (sc->sc_type) {
11188 case WM_T_82575:
11189 case WM_T_82576:
11190 reg = CSR_READ(sc, WMREG_MDIC);
11191 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11192 break;
11193 case WM_T_82580:
11194 case WM_T_I350:
11195 case WM_T_I354:
11196 case WM_T_I210:
11197 case WM_T_I211:
11198 reg = CSR_READ(sc, WMREG_MDICNFG);
11199 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11200 break;
11201 default:
11202 return -1;
11203 }
11204 }
11205
11206 return phyid;
11207 }
11208
11209 /*
11210 * wm_gmii_mediainit:
11211 *
11212 * Initialize media for use on 1000BASE-T devices.
11213 */
11214 static void
11215 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11216 {
11217 device_t dev = sc->sc_dev;
11218 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11219 struct mii_data *mii = &sc->sc_mii;
11220
11221 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11222 device_xname(sc->sc_dev), __func__));
11223
11224 /* We have GMII. */
11225 sc->sc_flags |= WM_F_HAS_MII;
11226
11227 if (sc->sc_type == WM_T_80003)
11228 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11229 else
11230 sc->sc_tipg = TIPG_1000T_DFLT;
11231
11232 /*
11233 * Let the chip set speed/duplex on its own based on
11234 * signals from the PHY.
11235 * XXXbouyer - I'm not sure this is right for the 80003,
11236 * the em driver only sets CTRL_SLU here - but it seems to work.
11237 */
11238 sc->sc_ctrl |= CTRL_SLU;
11239 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11240
11241 /* Initialize our media structures and probe the GMII. */
11242 mii->mii_ifp = ifp;
11243
11244 mii->mii_statchg = wm_gmii_statchg;
11245
11246 /* get PHY control from SMBus to PCIe */
11247 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11248 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11249 || (sc->sc_type == WM_T_PCH_CNP))
11250 wm_init_phy_workarounds_pchlan(sc);
11251
11252 wm_gmii_reset(sc);
11253
11254 sc->sc_ethercom.ec_mii = &sc->sc_mii;
11255 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11256 wm_gmii_mediastatus, sc->sc_core_lock);
11257
11258 /* Setup internal SGMII PHY for SFP */
11259 wm_sgmii_sfp_preconfig(sc);
11260
11261 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11262 || (sc->sc_type == WM_T_82580)
11263 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11264 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11265 if ((sc->sc_flags & WM_F_SGMII) == 0) {
11266 /* Attach only one port */
11267 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11268 MII_OFFSET_ANY, MIIF_DOPAUSE);
11269 } else {
11270 int i, id;
11271 uint32_t ctrl_ext;
11272
11273 id = wm_get_phy_id_82575(sc);
11274 if (id != -1) {
11275 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11276 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11277 }
11278 if ((id == -1)
11279 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11280 /* Power on sgmii phy if it is disabled */
11281 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11282 CSR_WRITE(sc, WMREG_CTRL_EXT,
11283 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11284 CSR_WRITE_FLUSH(sc);
11285 delay(300*1000); /* XXX too long */
11286
11287 /*
11288 * From 1 to 8.
11289 *
11290 * I2C access fails with I2C register's ERROR
11291 * bit set, so prevent error message while
11292 * scanning.
11293 */
11294 sc->phy.no_errprint = true;
11295 for (i = 1; i < 8; i++)
11296 mii_attach(sc->sc_dev, &sc->sc_mii,
11297 0xffffffff, i, MII_OFFSET_ANY,
11298 MIIF_DOPAUSE);
11299 sc->phy.no_errprint = false;
11300
11301 /* Restore previous sfp cage power state */
11302 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11303 }
11304 }
11305 } else
11306 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11307 MII_OFFSET_ANY, MIIF_DOPAUSE);
11308
11309 /*
11310 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11311 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11312 */
11313 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
11314 || (sc->sc_type == WM_T_PCH_SPT)
11315 || (sc->sc_type == WM_T_PCH_CNP))
11316 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11317 wm_set_mdio_slow_mode_hv(sc);
11318 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11319 MII_OFFSET_ANY, MIIF_DOPAUSE);
11320 }
11321
11322 /*
11323 * (For ICH8 variants)
11324 * If PHY detection failed, use BM's r/w function and retry.
11325 */
11326 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11327 /* if failed, retry with *_bm_* */
11328 aprint_verbose_dev(dev, "Assumed PHY access function "
11329 "(type = %d) might be incorrect. Use BM and retry.\n",
11330 sc->sc_phytype);
11331 sc->sc_phytype = WMPHY_BM;
11332 mii->mii_readreg = wm_gmii_bm_readreg;
11333 mii->mii_writereg = wm_gmii_bm_writereg;
11334
11335 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11336 MII_OFFSET_ANY, MIIF_DOPAUSE);
11337 }
11338
11339 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11340 /* Any PHY wasn't found */
11341 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11342 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11343 sc->sc_phytype = WMPHY_NONE;
11344 } else {
11345 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11346
11347 /*
11348 * PHY found! Check PHY type again by the second call of
11349 * wm_gmii_setup_phytype.
11350 */
11351 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11352 child->mii_mpd_model);
11353
11354 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11355 }
11356 }
11357
11358 /*
11359 * wm_gmii_mediachange: [ifmedia interface function]
11360 *
11361 * Set hardware to newly-selected media on a 1000BASE-T device.
11362 */
11363 static int
11364 wm_gmii_mediachange(struct ifnet *ifp)
11365 {
11366 struct wm_softc *sc = ifp->if_softc;
11367 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11368 uint32_t reg;
11369 int rc;
11370
11371 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11372 device_xname(sc->sc_dev), __func__));
11373
11374 KASSERT(mutex_owned(sc->sc_core_lock));
11375
11376 if ((sc->sc_if_flags & IFF_UP) == 0)
11377 return 0;
11378
11379 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11380 if ((sc->sc_type == WM_T_82580)
11381 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11382 || (sc->sc_type == WM_T_I211)) {
11383 reg = CSR_READ(sc, WMREG_PHPM);
11384 reg &= ~PHPM_GO_LINK_D;
11385 CSR_WRITE(sc, WMREG_PHPM, reg);
11386 }
11387
11388 /* Disable D0 LPLU. */
11389 wm_lplu_d0_disable(sc);
11390
11391 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11392 sc->sc_ctrl |= CTRL_SLU;
11393 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11394 || (sc->sc_type > WM_T_82543)) {
11395 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11396 } else {
11397 sc->sc_ctrl &= ~CTRL_ASDE;
11398 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11399 if (ife->ifm_media & IFM_FDX)
11400 sc->sc_ctrl |= CTRL_FD;
11401 switch (IFM_SUBTYPE(ife->ifm_media)) {
11402 case IFM_10_T:
11403 sc->sc_ctrl |= CTRL_SPEED_10;
11404 break;
11405 case IFM_100_TX:
11406 sc->sc_ctrl |= CTRL_SPEED_100;
11407 break;
11408 case IFM_1000_T:
11409 sc->sc_ctrl |= CTRL_SPEED_1000;
11410 break;
11411 case IFM_NONE:
11412 /* There is no specific setting for IFM_NONE */
11413 break;
11414 default:
11415 panic("wm_gmii_mediachange: bad media 0x%x",
11416 ife->ifm_media);
11417 }
11418 }
11419 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11420 CSR_WRITE_FLUSH(sc);
11421
11422 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11423 wm_serdes_mediachange(ifp);
11424
11425 if (sc->sc_type <= WM_T_82543)
11426 wm_gmii_reset(sc);
11427 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11428 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11429 /* allow time for SFP cage time to power up phy */
11430 delay(300 * 1000);
11431 wm_gmii_reset(sc);
11432 }
11433
11434 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11435 return 0;
11436 return rc;
11437 }
11438
11439 /*
11440 * wm_gmii_mediastatus: [ifmedia interface function]
11441 *
11442 * Get the current interface media status on a 1000BASE-T device.
11443 */
11444 static void
11445 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11446 {
11447 struct wm_softc *sc = ifp->if_softc;
11448
11449 KASSERT(mutex_owned(sc->sc_core_lock));
11450
11451 ether_mediastatus(ifp, ifmr);
11452 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11453 | sc->sc_flowflags;
11454 }
11455
11456 #define MDI_IO CTRL_SWDPIN(2)
11457 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11458 #define MDI_CLK CTRL_SWDPIN(3)
11459
11460 static void
11461 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11462 {
11463 uint32_t i, v;
11464
11465 v = CSR_READ(sc, WMREG_CTRL);
11466 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11467 v |= MDI_DIR | CTRL_SWDPIO(3);
11468
11469 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11470 if (data & i)
11471 v |= MDI_IO;
11472 else
11473 v &= ~MDI_IO;
11474 CSR_WRITE(sc, WMREG_CTRL, v);
11475 CSR_WRITE_FLUSH(sc);
11476 delay(10);
11477 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11478 CSR_WRITE_FLUSH(sc);
11479 delay(10);
11480 CSR_WRITE(sc, WMREG_CTRL, v);
11481 CSR_WRITE_FLUSH(sc);
11482 delay(10);
11483 }
11484 }
11485
11486 static uint16_t
11487 wm_i82543_mii_recvbits(struct wm_softc *sc)
11488 {
11489 uint32_t v, i;
11490 uint16_t data = 0;
11491
11492 v = CSR_READ(sc, WMREG_CTRL);
11493 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11494 v |= CTRL_SWDPIO(3);
11495
11496 CSR_WRITE(sc, WMREG_CTRL, v);
11497 CSR_WRITE_FLUSH(sc);
11498 delay(10);
11499 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11500 CSR_WRITE_FLUSH(sc);
11501 delay(10);
11502 CSR_WRITE(sc, WMREG_CTRL, v);
11503 CSR_WRITE_FLUSH(sc);
11504 delay(10);
11505
11506 for (i = 0; i < 16; i++) {
11507 data <<= 1;
11508 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11509 CSR_WRITE_FLUSH(sc);
11510 delay(10);
11511 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11512 data |= 1;
11513 CSR_WRITE(sc, WMREG_CTRL, v);
11514 CSR_WRITE_FLUSH(sc);
11515 delay(10);
11516 }
11517
11518 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11519 CSR_WRITE_FLUSH(sc);
11520 delay(10);
11521 CSR_WRITE(sc, WMREG_CTRL, v);
11522 CSR_WRITE_FLUSH(sc);
11523 delay(10);
11524
11525 return data;
11526 }
11527
11528 #undef MDI_IO
11529 #undef MDI_DIR
11530 #undef MDI_CLK
11531
11532 /*
11533 * wm_gmii_i82543_readreg: [mii interface function]
11534 *
11535 * Read a PHY register on the GMII (i82543 version).
11536 */
11537 static int
11538 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11539 {
11540 struct wm_softc *sc = device_private(dev);
11541
11542 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11543 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11544 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11545 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
11546
11547 DPRINTF(sc, WM_DEBUG_GMII,
11548 ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11549 device_xname(dev), phy, reg, *val));
11550
11551 return 0;
11552 }
11553
11554 /*
11555 * wm_gmii_i82543_writereg: [mii interface function]
11556 *
11557 * Write a PHY register on the GMII (i82543 version).
11558 */
11559 static int
11560 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11561 {
11562 struct wm_softc *sc = device_private(dev);
11563
11564 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11565 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11566 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11567 (MII_COMMAND_START << 30), 32);
11568
11569 return 0;
11570 }
11571
11572 /*
11573 * wm_gmii_mdic_readreg: [mii interface function]
11574 *
11575 * Read a PHY register on the GMII.
11576 */
11577 static int
11578 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11579 {
11580 struct wm_softc *sc = device_private(dev);
11581 uint32_t mdic = 0;
11582 int i;
11583
11584 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11585 && (reg > MII_ADDRMASK)) {
11586 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11587 __func__, sc->sc_phytype, reg);
11588 reg &= MII_ADDRMASK;
11589 }
11590
11591 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11592 MDIC_REGADD(reg));
11593
11594 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11595 delay(50);
11596 mdic = CSR_READ(sc, WMREG_MDIC);
11597 if (mdic & MDIC_READY)
11598 break;
11599 }
11600
11601 if ((mdic & MDIC_READY) == 0) {
11602 DPRINTF(sc, WM_DEBUG_GMII,
11603 ("%s: MDIC read timed out: phy %d reg %d\n",
11604 device_xname(dev), phy, reg));
11605 return ETIMEDOUT;
11606 } else if (mdic & MDIC_E) {
11607 /* This is normal if no PHY is present. */
11608 DPRINTF(sc, WM_DEBUG_GMII,
11609 ("%s: MDIC read error: phy %d reg %d\n",
11610 device_xname(sc->sc_dev), phy, reg));
11611 return -1;
11612 } else
11613 *val = MDIC_DATA(mdic);
11614
11615 /*
11616 * Allow some time after each MDIC transaction to avoid
11617 * reading duplicate data in the next MDIC transaction.
11618 */
11619 if (sc->sc_type == WM_T_PCH2)
11620 delay(100);
11621
11622 return 0;
11623 }
11624
11625 /*
11626 * wm_gmii_mdic_writereg: [mii interface function]
11627 *
11628 * Write a PHY register on the GMII.
11629 */
11630 static int
11631 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11632 {
11633 struct wm_softc *sc = device_private(dev);
11634 uint32_t mdic = 0;
11635 int i;
11636
11637 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11638 && (reg > MII_ADDRMASK)) {
11639 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11640 __func__, sc->sc_phytype, reg);
11641 reg &= MII_ADDRMASK;
11642 }
11643
11644 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11645 MDIC_REGADD(reg) | MDIC_DATA(val));
11646
11647 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11648 delay(50);
11649 mdic = CSR_READ(sc, WMREG_MDIC);
11650 if (mdic & MDIC_READY)
11651 break;
11652 }
11653
11654 if ((mdic & MDIC_READY) == 0) {
11655 DPRINTF(sc, WM_DEBUG_GMII,
11656 ("%s: MDIC write timed out: phy %d reg %d\n",
11657 device_xname(dev), phy, reg));
11658 return ETIMEDOUT;
11659 } else if (mdic & MDIC_E) {
11660 DPRINTF(sc, WM_DEBUG_GMII,
11661 ("%s: MDIC write error: phy %d reg %d\n",
11662 device_xname(dev), phy, reg));
11663 return -1;
11664 }
11665
11666 /*
11667 * Allow some time after each MDIC transaction to avoid
11668 * reading duplicate data in the next MDIC transaction.
11669 */
11670 if (sc->sc_type == WM_T_PCH2)
11671 delay(100);
11672
11673 return 0;
11674 }
11675
11676 /*
11677 * wm_gmii_i82544_readreg: [mii interface function]
11678 *
11679 * Read a PHY register on the GMII.
11680 */
11681 static int
11682 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11683 {
11684 struct wm_softc *sc = device_private(dev);
11685 int rv;
11686
11687 rv = sc->phy.acquire(sc);
11688 if (rv != 0) {
11689 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11690 return rv;
11691 }
11692
11693 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11694
11695 sc->phy.release(sc);
11696
11697 return rv;
11698 }
11699
11700 static int
11701 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11702 {
11703 struct wm_softc *sc = device_private(dev);
11704 int rv;
11705
11706 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11707 switch (sc->sc_phytype) {
11708 case WMPHY_IGP:
11709 case WMPHY_IGP_2:
11710 case WMPHY_IGP_3:
11711 rv = wm_gmii_mdic_writereg(dev, phy,
11712 IGPHY_PAGE_SELECT, reg);
11713 if (rv != 0)
11714 return rv;
11715 break;
11716 default:
11717 #ifdef WM_DEBUG
11718 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11719 __func__, sc->sc_phytype, reg);
11720 #endif
11721 break;
11722 }
11723 }
11724
11725 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11726 }
11727
11728 /*
11729 * wm_gmii_i82544_writereg: [mii interface function]
11730 *
11731 * Write a PHY register on the GMII.
11732 */
11733 static int
11734 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11735 {
11736 struct wm_softc *sc = device_private(dev);
11737 int rv;
11738
11739 rv = sc->phy.acquire(sc);
11740 if (rv != 0) {
11741 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11742 return rv;
11743 }
11744
11745 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11746 sc->phy.release(sc);
11747
11748 return rv;
11749 }
11750
11751 static int
11752 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11753 {
11754 struct wm_softc *sc = device_private(dev);
11755 int rv;
11756
11757 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11758 switch (sc->sc_phytype) {
11759 case WMPHY_IGP:
11760 case WMPHY_IGP_2:
11761 case WMPHY_IGP_3:
11762 rv = wm_gmii_mdic_writereg(dev, phy,
11763 IGPHY_PAGE_SELECT, reg);
11764 if (rv != 0)
11765 return rv;
11766 break;
11767 default:
11768 #ifdef WM_DEBUG
11769 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11770 __func__, sc->sc_phytype, reg);
11771 #endif
11772 break;
11773 }
11774 }
11775
11776 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11777 }
11778
11779 /*
11780 * wm_gmii_i80003_readreg: [mii interface function]
11781 *
11782 * Read a PHY register on the kumeran
11783 * This could be handled by the PHY layer if we didn't have to lock the
11784 * resource ...
11785 */
11786 static int
11787 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11788 {
11789 struct wm_softc *sc = device_private(dev);
11790 int page_select;
11791 uint16_t temp, temp2;
11792 int rv;
11793
11794 if (phy != 1) /* Only one PHY on kumeran bus */
11795 return -1;
11796
11797 rv = sc->phy.acquire(sc);
11798 if (rv != 0) {
11799 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11800 return rv;
11801 }
11802
11803 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11804 page_select = GG82563_PHY_PAGE_SELECT;
11805 else {
11806 /*
11807 * Use Alternative Page Select register to access registers
11808 * 30 and 31.
11809 */
11810 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11811 }
11812 temp = reg >> GG82563_PAGE_SHIFT;
11813 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11814 goto out;
11815
11816 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11817 /*
11818 * Wait more 200us for a bug of the ready bit in the MDIC
11819 * register.
11820 */
11821 delay(200);
11822 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11823 if ((rv != 0) || (temp2 != temp)) {
11824 device_printf(dev, "%s failed\n", __func__);
11825 rv = -1;
11826 goto out;
11827 }
11828 delay(200);
11829 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11830 delay(200);
11831 } else
11832 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11833
11834 out:
11835 sc->phy.release(sc);
11836 return rv;
11837 }
11838
11839 /*
11840 * wm_gmii_i80003_writereg: [mii interface function]
11841 *
11842 * Write a PHY register on the kumeran.
11843 * This could be handled by the PHY layer if we didn't have to lock the
11844 * resource ...
11845 */
11846 static int
11847 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11848 {
11849 struct wm_softc *sc = device_private(dev);
11850 int page_select, rv;
11851 uint16_t temp, temp2;
11852
11853 if (phy != 1) /* Only one PHY on kumeran bus */
11854 return -1;
11855
11856 rv = sc->phy.acquire(sc);
11857 if (rv != 0) {
11858 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11859 return rv;
11860 }
11861
11862 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11863 page_select = GG82563_PHY_PAGE_SELECT;
11864 else {
11865 /*
11866 * Use Alternative Page Select register to access registers
11867 * 30 and 31.
11868 */
11869 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11870 }
11871 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11872 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11873 goto out;
11874
11875 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11876 /*
11877 * Wait more 200us for a bug of the ready bit in the MDIC
11878 * register.
11879 */
11880 delay(200);
11881 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11882 if ((rv != 0) || (temp2 != temp)) {
11883 device_printf(dev, "%s failed\n", __func__);
11884 rv = -1;
11885 goto out;
11886 }
11887 delay(200);
11888 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11889 delay(200);
11890 } else
11891 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11892
11893 out:
11894 sc->phy.release(sc);
11895 return rv;
11896 }
11897
11898 /*
11899 * wm_gmii_bm_readreg: [mii interface function]
11900 *
11901 * Read a PHY register on the kumeran
11902 * This could be handled by the PHY layer if we didn't have to lock the
11903 * resource ...
11904 */
11905 static int
11906 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11907 {
11908 struct wm_softc *sc = device_private(dev);
11909 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11910 int rv;
11911
11912 rv = sc->phy.acquire(sc);
11913 if (rv != 0) {
11914 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11915 return rv;
11916 }
11917
11918 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11919 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11920 || (reg == 31)) ? 1 : phy;
11921 /* Page 800 works differently than the rest so it has its own func */
11922 if (page == BM_WUC_PAGE) {
11923 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11924 goto release;
11925 }
11926
11927 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11928 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11929 && (sc->sc_type != WM_T_82583))
11930 rv = wm_gmii_mdic_writereg(dev, phy,
11931 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11932 else
11933 rv = wm_gmii_mdic_writereg(dev, phy,
11934 BME1000_PHY_PAGE_SELECT, page);
11935 if (rv != 0)
11936 goto release;
11937 }
11938
11939 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11940
11941 release:
11942 sc->phy.release(sc);
11943 return rv;
11944 }
11945
11946 /*
11947 * wm_gmii_bm_writereg: [mii interface function]
11948 *
11949 * Write a PHY register on the kumeran.
11950 * This could be handled by the PHY layer if we didn't have to lock the
11951 * resource ...
11952 */
11953 static int
11954 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11955 {
11956 struct wm_softc *sc = device_private(dev);
11957 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11958 int rv;
11959
11960 rv = sc->phy.acquire(sc);
11961 if (rv != 0) {
11962 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11963 return rv;
11964 }
11965
11966 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11967 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11968 || (reg == 31)) ? 1 : phy;
11969 /* Page 800 works differently than the rest so it has its own func */
11970 if (page == BM_WUC_PAGE) {
11971 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11972 goto release;
11973 }
11974
11975 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11976 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11977 && (sc->sc_type != WM_T_82583))
11978 rv = wm_gmii_mdic_writereg(dev, phy,
11979 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11980 else
11981 rv = wm_gmii_mdic_writereg(dev, phy,
11982 BME1000_PHY_PAGE_SELECT, page);
11983 if (rv != 0)
11984 goto release;
11985 }
11986
11987 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11988
11989 release:
11990 sc->phy.release(sc);
11991 return rv;
11992 }
11993
11994 /*
11995 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11996 * @dev: pointer to the HW structure
11997 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11998 *
11999 * Assumes semaphore already acquired and phy_reg points to a valid memory
12000 * address to store contents of the BM_WUC_ENABLE_REG register.
12001 */
12002 static int
12003 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12004 {
12005 #ifdef WM_DEBUG
12006 struct wm_softc *sc = device_private(dev);
12007 #endif
12008 uint16_t temp;
12009 int rv;
12010
12011 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12012 device_xname(dev), __func__));
12013
12014 if (!phy_regp)
12015 return -1;
12016
12017 /* All page select, port ctrl and wakeup registers use phy address 1 */
12018
12019 /* Select Port Control Registers page */
12020 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12021 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12022 if (rv != 0)
12023 return rv;
12024
12025 /* Read WUCE and save it */
12026 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12027 if (rv != 0)
12028 return rv;
12029
12030 /* Enable both PHY wakeup mode and Wakeup register page writes.
12031 * Prevent a power state change by disabling ME and Host PHY wakeup.
12032 */
12033 temp = *phy_regp;
12034 temp |= BM_WUC_ENABLE_BIT;
12035 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12036
12037 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12038 return rv;
12039
12040 /* Select Host Wakeup Registers page - caller now able to write
12041 * registers on the Wakeup registers page
12042 */
12043 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12044 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12045 }
12046
12047 /*
12048 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12049 * @dev: pointer to the HW structure
12050 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12051 *
12052 * Restore BM_WUC_ENABLE_REG to its original value.
12053 *
12054 * Assumes semaphore already acquired and *phy_reg is the contents of the
12055 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12056 * caller.
12057 */
12058 static int
12059 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12060 {
12061 #ifdef WM_DEBUG
12062 struct wm_softc *sc = device_private(dev);
12063 #endif
12064
12065 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12066 device_xname(dev), __func__));
12067
12068 if (!phy_regp)
12069 return -1;
12070
12071 /* Select Port Control Registers page */
12072 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12073 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12074
12075 /* Restore 769.17 to its original value */
12076 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12077
12078 return 0;
12079 }
12080
12081 /*
12082 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12083 * @sc: pointer to the HW structure
12084 * @offset: register offset to be read or written
12085 * @val: pointer to the data to read or write
12086 * @rd: determines if operation is read or write
12087 * @page_set: BM_WUC_PAGE already set and access enabled
12088 *
12089 * Read the PHY register at offset and store the retrieved information in
12090 * data, or write data to PHY register at offset. Note the procedure to
12091 * access the PHY wakeup registers is different than reading the other PHY
12092 * registers. It works as such:
12093 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12094 * 2) Set page to 800 for host (801 if we were manageability)
12095 * 3) Write the address using the address opcode (0x11)
12096 * 4) Read or write the data using the data opcode (0x12)
12097 * 5) Restore 769.17.2 to its original value
12098 *
12099 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12100 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12101 *
12102 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
12103 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12104 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12105 */
12106 static int
12107 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12108 bool page_set)
12109 {
12110 struct wm_softc *sc = device_private(dev);
12111 uint16_t regnum = BM_PHY_REG_NUM(offset);
12112 uint16_t page = BM_PHY_REG_PAGE(offset);
12113 uint16_t wuce;
12114 int rv = 0;
12115
12116 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12117 device_xname(dev), __func__));
12118 /* XXX Gig must be disabled for MDIO accesses to page 800 */
12119 if ((sc->sc_type == WM_T_PCH)
12120 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12121 device_printf(dev,
12122 "Attempting to access page %d while gig enabled.\n", page);
12123 }
12124
12125 if (!page_set) {
12126 /* Enable access to PHY wakeup registers */
12127 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12128 if (rv != 0) {
12129 device_printf(dev,
12130 "%s: Could not enable PHY wakeup reg access\n",
12131 __func__);
12132 return rv;
12133 }
12134 }
12135 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12136 device_xname(sc->sc_dev), __func__, page, regnum));
12137
12138 /*
12139 * 2) Access PHY wakeup register.
12140 * See wm_access_phy_wakeup_reg_bm.
12141 */
12142
12143 /* Write the Wakeup register page offset value using opcode 0x11 */
12144 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12145 if (rv != 0)
12146 return rv;
12147
12148 if (rd) {
12149 /* Read the Wakeup register page value using opcode 0x12 */
12150 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12151 } else {
12152 /* Write the Wakeup register page value using opcode 0x12 */
12153 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12154 }
12155 if (rv != 0)
12156 return rv;
12157
12158 if (!page_set)
12159 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12160
12161 return rv;
12162 }
12163
12164 /*
12165 * wm_gmii_hv_readreg: [mii interface function]
12166 *
12167 * Read a PHY register on the kumeran
12168 * This could be handled by the PHY layer if we didn't have to lock the
12169 * resource ...
12170 */
12171 static int
12172 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12173 {
12174 struct wm_softc *sc = device_private(dev);
12175 int rv;
12176
12177 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12178 device_xname(dev), __func__));
12179
12180 rv = sc->phy.acquire(sc);
12181 if (rv != 0) {
12182 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12183 return rv;
12184 }
12185
12186 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12187 sc->phy.release(sc);
12188 return rv;
12189 }
12190
12191 static int
12192 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12193 {
12194 uint16_t page = BM_PHY_REG_PAGE(reg);
12195 uint16_t regnum = BM_PHY_REG_NUM(reg);
12196 int rv;
12197
12198 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12199
12200 /* Page 800 works differently than the rest so it has its own func */
12201 if (page == BM_WUC_PAGE)
12202 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12203
12204 /*
12205 * Lower than page 768 works differently than the rest so it has its
12206 * own func
12207 */
12208 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12209 device_printf(dev, "gmii_hv_readreg!!!\n");
12210 return -1;
12211 }
12212
12213 /*
12214 * XXX I21[789] documents say that the SMBus Address register is at
12215 * PHY address 01, Page 0 (not 768), Register 26.
12216 */
12217 if (page == HV_INTC_FC_PAGE_START)
12218 page = 0;
12219
12220 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12221 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12222 page << BME1000_PAGE_SHIFT);
12223 if (rv != 0)
12224 return rv;
12225 }
12226
12227 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12228 }
12229
12230 /*
12231 * wm_gmii_hv_writereg: [mii interface function]
12232 *
12233 * Write a PHY register on the kumeran.
12234 * This could be handled by the PHY layer if we didn't have to lock the
12235 * resource ...
12236 */
12237 static int
12238 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12239 {
12240 struct wm_softc *sc = device_private(dev);
12241 int rv;
12242
12243 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12244 device_xname(dev), __func__));
12245
12246 rv = sc->phy.acquire(sc);
12247 if (rv != 0) {
12248 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12249 return rv;
12250 }
12251
12252 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12253 sc->phy.release(sc);
12254
12255 return rv;
12256 }
12257
12258 static int
12259 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12260 {
12261 struct wm_softc *sc = device_private(dev);
12262 uint16_t page = BM_PHY_REG_PAGE(reg);
12263 uint16_t regnum = BM_PHY_REG_NUM(reg);
12264 int rv;
12265
12266 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12267
12268 /* Page 800 works differently than the rest so it has its own func */
12269 if (page == BM_WUC_PAGE)
12270 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12271 false);
12272
12273 /*
12274 * Lower than page 768 works differently than the rest so it has its
12275 * own func
12276 */
12277 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12278 device_printf(dev, "gmii_hv_writereg!!!\n");
12279 return -1;
12280 }
12281
12282 {
12283 /*
12284 * XXX I21[789] documents say that the SMBus Address register
12285 * is at PHY address 01, Page 0 (not 768), Register 26.
12286 */
12287 if (page == HV_INTC_FC_PAGE_START)
12288 page = 0;
12289
12290 /*
12291 * XXX Workaround MDIO accesses being disabled after entering
12292 * IEEE Power Down (whenever bit 11 of the PHY control
12293 * register is set)
12294 */
12295 if (sc->sc_phytype == WMPHY_82578) {
12296 struct mii_softc *child;
12297
12298 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12299 if ((child != NULL) && (child->mii_mpd_rev >= 1)
12300 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12301 && ((val & (1 << 11)) != 0)) {
12302 device_printf(dev, "XXX need workaround\n");
12303 }
12304 }
12305
12306 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12307 rv = wm_gmii_mdic_writereg(dev, 1,
12308 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12309 if (rv != 0)
12310 return rv;
12311 }
12312 }
12313
12314 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12315 }
12316
12317 /*
12318 * wm_gmii_82580_readreg: [mii interface function]
12319 *
12320 * Read a PHY register on the 82580 and I350.
12321 * This could be handled by the PHY layer if we didn't have to lock the
12322 * resource ...
12323 */
12324 static int
12325 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12326 {
12327 struct wm_softc *sc = device_private(dev);
12328 int rv;
12329
12330 rv = sc->phy.acquire(sc);
12331 if (rv != 0) {
12332 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12333 return rv;
12334 }
12335
12336 #ifdef DIAGNOSTIC
12337 if (reg > MII_ADDRMASK) {
12338 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12339 __func__, sc->sc_phytype, reg);
12340 reg &= MII_ADDRMASK;
12341 }
12342 #endif
12343 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12344
12345 sc->phy.release(sc);
12346 return rv;
12347 }
12348
12349 /*
12350 * wm_gmii_82580_writereg: [mii interface function]
12351 *
12352 * Write a PHY register on the 82580 and I350.
12353 * This could be handled by the PHY layer if we didn't have to lock the
12354 * resource ...
12355 */
12356 static int
12357 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12358 {
12359 struct wm_softc *sc = device_private(dev);
12360 int rv;
12361
12362 rv = sc->phy.acquire(sc);
12363 if (rv != 0) {
12364 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12365 return rv;
12366 }
12367
12368 #ifdef DIAGNOSTIC
12369 if (reg > MII_ADDRMASK) {
12370 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12371 __func__, sc->sc_phytype, reg);
12372 reg &= MII_ADDRMASK;
12373 }
12374 #endif
12375 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12376
12377 sc->phy.release(sc);
12378 return rv;
12379 }
12380
12381 /*
12382 * wm_gmii_gs40g_readreg: [mii interface function]
12383 *
12384 * Read a PHY register on the I2100 and I211.
12385 * This could be handled by the PHY layer if we didn't have to lock the
12386 * resource ...
12387 */
12388 static int
12389 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12390 {
12391 struct wm_softc *sc = device_private(dev);
12392 int page, offset;
12393 int rv;
12394
12395 /* Acquire semaphore */
12396 rv = sc->phy.acquire(sc);
12397 if (rv != 0) {
12398 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12399 return rv;
12400 }
12401
12402 /* Page select */
12403 page = reg >> GS40G_PAGE_SHIFT;
12404 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12405 if (rv != 0)
12406 goto release;
12407
12408 /* Read reg */
12409 offset = reg & GS40G_OFFSET_MASK;
12410 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12411
12412 release:
12413 sc->phy.release(sc);
12414 return rv;
12415 }
12416
12417 /*
12418 * wm_gmii_gs40g_writereg: [mii interface function]
12419 *
12420 * Write a PHY register on the I210 and I211.
12421 * This could be handled by the PHY layer if we didn't have to lock the
12422 * resource ...
12423 */
12424 static int
12425 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
12426 {
12427 struct wm_softc *sc = device_private(dev);
12428 uint16_t page;
12429 int offset, rv;
12430
12431 /* Acquire semaphore */
12432 rv = sc->phy.acquire(sc);
12433 if (rv != 0) {
12434 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12435 return rv;
12436 }
12437
12438 /* Page select */
12439 page = reg >> GS40G_PAGE_SHIFT;
12440 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12441 if (rv != 0)
12442 goto release;
12443
12444 /* Write reg */
12445 offset = reg & GS40G_OFFSET_MASK;
12446 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
12447
12448 release:
12449 /* Release semaphore */
12450 sc->phy.release(sc);
12451 return rv;
12452 }
12453
12454 /*
12455 * wm_gmii_statchg: [mii interface function]
12456 *
12457 * Callback from MII layer when media changes.
12458 */
12459 static void
12460 wm_gmii_statchg(struct ifnet *ifp)
12461 {
12462 struct wm_softc *sc = ifp->if_softc;
12463 struct mii_data *mii = &sc->sc_mii;
12464
12465 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12466 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12467 sc->sc_fcrtl &= ~FCRTL_XONE;
12468
12469 /* Get flow control negotiation result. */
12470 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12471 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12472 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12473 mii->mii_media_active &= ~IFM_ETH_FMASK;
12474 }
12475
12476 if (sc->sc_flowflags & IFM_FLOW) {
12477 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12478 sc->sc_ctrl |= CTRL_TFCE;
12479 sc->sc_fcrtl |= FCRTL_XONE;
12480 }
12481 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12482 sc->sc_ctrl |= CTRL_RFCE;
12483 }
12484
12485 if (mii->mii_media_active & IFM_FDX) {
12486 DPRINTF(sc, WM_DEBUG_LINK,
12487 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12488 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12489 } else {
12490 DPRINTF(sc, WM_DEBUG_LINK,
12491 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12492 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12493 }
12494
12495 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12496 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12497 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12498 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12499 if (sc->sc_type == WM_T_80003) {
12500 switch (IFM_SUBTYPE(mii->mii_media_active)) {
12501 case IFM_1000_T:
12502 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12503 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12504 sc->sc_tipg = TIPG_1000T_80003_DFLT;
12505 break;
12506 default:
12507 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12508 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12509 sc->sc_tipg = TIPG_10_100_80003_DFLT;
12510 break;
12511 }
12512 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12513 }
12514 }
12515
12516 /* kumeran related (80003, ICH* and PCH*) */
12517
12518 /*
12519 * wm_kmrn_readreg:
12520 *
12521 * Read a kumeran register
12522 */
12523 static int
12524 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12525 {
12526 int rv;
12527
12528 if (sc->sc_type == WM_T_80003)
12529 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12530 else
12531 rv = sc->phy.acquire(sc);
12532 if (rv != 0) {
12533 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12534 __func__);
12535 return rv;
12536 }
12537
12538 rv = wm_kmrn_readreg_locked(sc, reg, val);
12539
12540 if (sc->sc_type == WM_T_80003)
12541 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12542 else
12543 sc->phy.release(sc);
12544
12545 return rv;
12546 }
12547
12548 static int
12549 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12550 {
12551
12552 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12553 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12554 KUMCTRLSTA_REN);
12555 CSR_WRITE_FLUSH(sc);
12556 delay(2);
12557
12558 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12559
12560 return 0;
12561 }
12562
12563 /*
12564 * wm_kmrn_writereg:
12565 *
12566 * Write a kumeran register
12567 */
12568 static int
12569 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12570 {
12571 int rv;
12572
12573 if (sc->sc_type == WM_T_80003)
12574 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12575 else
12576 rv = sc->phy.acquire(sc);
12577 if (rv != 0) {
12578 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12579 __func__);
12580 return rv;
12581 }
12582
12583 rv = wm_kmrn_writereg_locked(sc, reg, val);
12584
12585 if (sc->sc_type == WM_T_80003)
12586 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12587 else
12588 sc->phy.release(sc);
12589
12590 return rv;
12591 }
12592
12593 static int
12594 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12595 {
12596
12597 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12598 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12599
12600 return 0;
12601 }
12602
12603 /*
12604 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12605 * This access method is different from IEEE MMD.
12606 */
12607 static int
12608 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12609 {
12610 struct wm_softc *sc = device_private(dev);
12611 int rv;
12612
12613 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12614 if (rv != 0)
12615 return rv;
12616
12617 if (rd)
12618 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12619 else
12620 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12621 return rv;
12622 }
12623
12624 static int
12625 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12626 {
12627
12628 return wm_access_emi_reg_locked(dev, reg, val, true);
12629 }
12630
12631 static int
12632 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12633 {
12634
12635 return wm_access_emi_reg_locked(dev, reg, &val, false);
12636 }
12637
12638 /* SGMII related */
12639
12640 /*
12641 * wm_sgmii_uses_mdio
12642 *
12643 * Check whether the transaction is to the internal PHY or the external
12644 * MDIO interface. Return true if it's MDIO.
12645 */
12646 static bool
12647 wm_sgmii_uses_mdio(struct wm_softc *sc)
12648 {
12649 uint32_t reg;
12650 bool ismdio = false;
12651
12652 switch (sc->sc_type) {
12653 case WM_T_82575:
12654 case WM_T_82576:
12655 reg = CSR_READ(sc, WMREG_MDIC);
12656 ismdio = ((reg & MDIC_DEST) != 0);
12657 break;
12658 case WM_T_82580:
12659 case WM_T_I350:
12660 case WM_T_I354:
12661 case WM_T_I210:
12662 case WM_T_I211:
12663 reg = CSR_READ(sc, WMREG_MDICNFG);
12664 ismdio = ((reg & MDICNFG_DEST) != 0);
12665 break;
12666 default:
12667 break;
12668 }
12669
12670 return ismdio;
12671 }
12672
12673 /* Setup internal SGMII PHY for SFP */
12674 static void
12675 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12676 {
12677 uint16_t id1, id2, phyreg;
12678 int i, rv;
12679
12680 if (((sc->sc_flags & WM_F_SGMII) == 0)
12681 || ((sc->sc_flags & WM_F_SFP) == 0))
12682 return;
12683
12684 for (i = 0; i < MII_NPHY; i++) {
12685 sc->phy.no_errprint = true;
12686 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12687 if (rv != 0)
12688 continue;
12689 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12690 if (rv != 0)
12691 continue;
12692 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12693 continue;
12694 sc->phy.no_errprint = false;
12695
12696 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12697 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12698 phyreg |= ESSR_SGMII_WOC_COPPER;
12699 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12700 break;
12701 }
12702
12703 }
12704
12705 /*
12706 * wm_sgmii_readreg: [mii interface function]
12707 *
12708 * Read a PHY register on the SGMII
12709 * This could be handled by the PHY layer if we didn't have to lock the
12710 * resource ...
12711 */
12712 static int
12713 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12714 {
12715 struct wm_softc *sc = device_private(dev);
12716 int rv;
12717
12718 rv = sc->phy.acquire(sc);
12719 if (rv != 0) {
12720 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12721 return rv;
12722 }
12723
12724 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12725
12726 sc->phy.release(sc);
12727 return rv;
12728 }
12729
12730 static int
12731 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12732 {
12733 struct wm_softc *sc = device_private(dev);
12734 uint32_t i2ccmd;
12735 int i, rv = 0;
12736
12737 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12738 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12739 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12740
12741 /* Poll the ready bit */
12742 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12743 delay(50);
12744 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12745 if (i2ccmd & I2CCMD_READY)
12746 break;
12747 }
12748 if ((i2ccmd & I2CCMD_READY) == 0) {
12749 device_printf(dev, "I2CCMD Read did not complete\n");
12750 rv = ETIMEDOUT;
12751 }
12752 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12753 if (!sc->phy.no_errprint)
12754 device_printf(dev, "I2CCMD Error bit set\n");
12755 rv = EIO;
12756 }
12757
12758 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12759
12760 return rv;
12761 }
12762
12763 /*
12764 * wm_sgmii_writereg: [mii interface function]
12765 *
12766 * Write a PHY register on the SGMII.
12767 * This could be handled by the PHY layer if we didn't have to lock the
12768 * resource ...
12769 */
12770 static int
12771 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12772 {
12773 struct wm_softc *sc = device_private(dev);
12774 int rv;
12775
12776 rv = sc->phy.acquire(sc);
12777 if (rv != 0) {
12778 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12779 return rv;
12780 }
12781
12782 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12783
12784 sc->phy.release(sc);
12785
12786 return rv;
12787 }
12788
12789 static int
12790 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12791 {
12792 struct wm_softc *sc = device_private(dev);
12793 uint32_t i2ccmd;
12794 uint16_t swapdata;
12795 int rv = 0;
12796 int i;
12797
12798 /* Swap the data bytes for the I2C interface */
12799 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12800 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12801 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12802 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12803
12804 /* Poll the ready bit */
12805 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12806 delay(50);
12807 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12808 if (i2ccmd & I2CCMD_READY)
12809 break;
12810 }
12811 if ((i2ccmd & I2CCMD_READY) == 0) {
12812 device_printf(dev, "I2CCMD Write did not complete\n");
12813 rv = ETIMEDOUT;
12814 }
12815 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12816 device_printf(dev, "I2CCMD Error bit set\n");
12817 rv = EIO;
12818 }
12819
12820 return rv;
12821 }
12822
12823 /* TBI related */
12824
12825 static bool
12826 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12827 {
12828 bool sig;
12829
12830 sig = ctrl & CTRL_SWDPIN(1);
12831
12832 /*
12833 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12834 * detect a signal, 1 if they don't.
12835 */
12836 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12837 sig = !sig;
12838
12839 return sig;
12840 }
12841
12842 /*
12843 * wm_tbi_mediainit:
12844 *
12845 * Initialize media for use on 1000BASE-X devices.
12846 */
12847 static void
12848 wm_tbi_mediainit(struct wm_softc *sc)
12849 {
12850 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12851 const char *sep = "";
12852
12853 if (sc->sc_type < WM_T_82543)
12854 sc->sc_tipg = TIPG_WM_DFLT;
12855 else
12856 sc->sc_tipg = TIPG_LG_DFLT;
12857
12858 sc->sc_tbi_serdes_anegticks = 5;
12859
12860 /* Initialize our media structures */
12861 sc->sc_mii.mii_ifp = ifp;
12862 sc->sc_ethercom.ec_mii = &sc->sc_mii;
12863
12864 ifp->if_baudrate = IF_Gbps(1);
12865 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12866 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12867 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12868 wm_serdes_mediachange, wm_serdes_mediastatus,
12869 sc->sc_core_lock);
12870 } else {
12871 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12872 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12873 }
12874
12875 /*
12876 * SWD Pins:
12877 *
12878 * 0 = Link LED (output)
12879 * 1 = Loss Of Signal (input)
12880 */
12881 sc->sc_ctrl |= CTRL_SWDPIO(0);
12882
12883 /* XXX Perhaps this is only for TBI */
12884 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12885 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12886
12887 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12888 sc->sc_ctrl &= ~CTRL_LRST;
12889
12890 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12891
12892 #define ADD(ss, mm, dd) \
12893 do { \
12894 aprint_normal("%s%s", sep, ss); \
12895 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12896 sep = ", "; \
12897 } while (/*CONSTCOND*/0)
12898
12899 aprint_normal_dev(sc->sc_dev, "");
12900
12901 if (sc->sc_type == WM_T_I354) {
12902 uint32_t status;
12903
12904 status = CSR_READ(sc, WMREG_STATUS);
12905 if (((status & STATUS_2P5_SKU) != 0)
12906 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12907 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12908 } else
12909 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12910 } else if (sc->sc_type == WM_T_82545) {
12911 /* Only 82545 is LX (XXX except SFP) */
12912 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12913 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12914 } else if (sc->sc_sfptype != 0) {
12915 /* XXX wm(4) fiber/serdes don't use ifm_data */
12916 switch (sc->sc_sfptype) {
12917 default:
12918 case SFF_SFP_ETH_FLAGS_1000SX:
12919 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12920 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12921 break;
12922 case SFF_SFP_ETH_FLAGS_1000LX:
12923 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12924 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12925 break;
12926 case SFF_SFP_ETH_FLAGS_1000CX:
12927 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12928 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12929 break;
12930 case SFF_SFP_ETH_FLAGS_1000T:
12931 ADD("1000baseT", IFM_1000_T, 0);
12932 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12933 break;
12934 case SFF_SFP_ETH_FLAGS_100FX:
12935 ADD("100baseFX", IFM_100_FX, ANAR_TX);
12936 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12937 break;
12938 }
12939 } else {
12940 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12941 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12942 }
12943 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12944 aprint_normal("\n");
12945
12946 #undef ADD
12947
12948 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12949 }
12950
12951 /*
12952 * wm_tbi_mediachange: [ifmedia interface function]
12953 *
12954 * Set hardware to newly-selected media on a 1000BASE-X device.
12955 */
12956 static int
12957 wm_tbi_mediachange(struct ifnet *ifp)
12958 {
12959 struct wm_softc *sc = ifp->if_softc;
12960 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12961 uint32_t status, ctrl;
12962 bool signal;
12963 int i;
12964
12965 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12966 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12967 /* XXX need some work for >= 82571 and < 82575 */
12968 if (sc->sc_type < WM_T_82575)
12969 return 0;
12970 }
12971
12972 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12973 || (sc->sc_type >= WM_T_82575))
12974 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12975
12976 sc->sc_ctrl &= ~CTRL_LRST;
12977 sc->sc_txcw = TXCW_ANE;
12978 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12979 sc->sc_txcw |= TXCW_FD | TXCW_HD;
12980 else if (ife->ifm_media & IFM_FDX)
12981 sc->sc_txcw |= TXCW_FD;
12982 else
12983 sc->sc_txcw |= TXCW_HD;
12984
12985 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12986 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12987
12988 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12989 device_xname(sc->sc_dev), sc->sc_txcw));
12990 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12991 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12992 CSR_WRITE_FLUSH(sc);
12993 delay(1000);
12994
12995 ctrl = CSR_READ(sc, WMREG_CTRL);
12996 signal = wm_tbi_havesignal(sc, ctrl);
12997
12998 DPRINTF(sc, WM_DEBUG_LINK,
12999 ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
13000
13001 if (signal) {
13002 /* Have signal; wait for the link to come up. */
13003 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
13004 delay(10000);
13005 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
13006 break;
13007 }
13008
13009 DPRINTF(sc, WM_DEBUG_LINK,
13010 ("%s: i = %d after waiting for link\n",
13011 device_xname(sc->sc_dev), i));
13012
13013 status = CSR_READ(sc, WMREG_STATUS);
13014 DPRINTF(sc, WM_DEBUG_LINK,
13015 ("%s: status after final read = 0x%x, STATUS_LU = %#"
13016 __PRIxBIT "\n",
13017 device_xname(sc->sc_dev), status, STATUS_LU));
13018 if (status & STATUS_LU) {
13019 /* Link is up. */
13020 DPRINTF(sc, WM_DEBUG_LINK,
13021 ("%s: LINK: set media -> link up %s\n",
13022 device_xname(sc->sc_dev),
13023 (status & STATUS_FD) ? "FDX" : "HDX"));
13024
13025 /*
13026 * NOTE: CTRL will update TFCE and RFCE automatically,
13027 * so we should update sc->sc_ctrl
13028 */
13029 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13030 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13031 sc->sc_fcrtl &= ~FCRTL_XONE;
13032 if (status & STATUS_FD)
13033 sc->sc_tctl |=
13034 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13035 else
13036 sc->sc_tctl |=
13037 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13038 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13039 sc->sc_fcrtl |= FCRTL_XONE;
13040 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13041 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13042 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13043 sc->sc_tbi_linkup = 1;
13044 } else {
13045 if (i == WM_LINKUP_TIMEOUT)
13046 wm_check_for_link(sc);
13047 /* Link is down. */
13048 DPRINTF(sc, WM_DEBUG_LINK,
13049 ("%s: LINK: set media -> link down\n",
13050 device_xname(sc->sc_dev)));
13051 sc->sc_tbi_linkup = 0;
13052 }
13053 } else {
13054 DPRINTF(sc, WM_DEBUG_LINK,
13055 ("%s: LINK: set media -> no signal\n",
13056 device_xname(sc->sc_dev)));
13057 sc->sc_tbi_linkup = 0;
13058 }
13059
13060 wm_tbi_serdes_set_linkled(sc);
13061
13062 return 0;
13063 }
13064
13065 /*
13066 * wm_tbi_mediastatus: [ifmedia interface function]
13067 *
13068 * Get the current interface media status on a 1000BASE-X device.
13069 */
13070 static void
13071 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13072 {
13073 struct wm_softc *sc = ifp->if_softc;
13074 uint32_t ctrl, status;
13075
13076 ifmr->ifm_status = IFM_AVALID;
13077 ifmr->ifm_active = IFM_ETHER;
13078
13079 status = CSR_READ(sc, WMREG_STATUS);
13080 if ((status & STATUS_LU) == 0) {
13081 ifmr->ifm_active |= IFM_NONE;
13082 return;
13083 }
13084
13085 ifmr->ifm_status |= IFM_ACTIVE;
13086 /* Only 82545 is LX */
13087 if (sc->sc_type == WM_T_82545)
13088 ifmr->ifm_active |= IFM_1000_LX;
13089 else
13090 ifmr->ifm_active |= IFM_1000_SX;
13091 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13092 ifmr->ifm_active |= IFM_FDX;
13093 else
13094 ifmr->ifm_active |= IFM_HDX;
13095 ctrl = CSR_READ(sc, WMREG_CTRL);
13096 if (ctrl & CTRL_RFCE)
13097 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13098 if (ctrl & CTRL_TFCE)
13099 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13100 }
13101
13102 /* XXX TBI only */
13103 static int
13104 wm_check_for_link(struct wm_softc *sc)
13105 {
13106 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13107 uint32_t rxcw;
13108 uint32_t ctrl;
13109 uint32_t status;
13110 bool signal;
13111
13112 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13113 device_xname(sc->sc_dev), __func__));
13114
13115 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13116 /* XXX need some work for >= 82571 */
13117 if (sc->sc_type >= WM_T_82571) {
13118 sc->sc_tbi_linkup = 1;
13119 return 0;
13120 }
13121 }
13122
13123 rxcw = CSR_READ(sc, WMREG_RXCW);
13124 ctrl = CSR_READ(sc, WMREG_CTRL);
13125 status = CSR_READ(sc, WMREG_STATUS);
13126 signal = wm_tbi_havesignal(sc, ctrl);
13127
13128 DPRINTF(sc, WM_DEBUG_LINK,
13129 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13130 device_xname(sc->sc_dev), __func__, signal,
13131 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13132
13133 /*
13134 * SWDPIN LU RXCW
13135 * 0 0 0
13136 * 0 0 1 (should not happen)
13137 * 0 1 0 (should not happen)
13138 * 0 1 1 (should not happen)
13139 * 1 0 0 Disable autonego and force linkup
13140 * 1 0 1 got /C/ but not linkup yet
13141 * 1 1 0 (linkup)
13142 * 1 1 1 If IFM_AUTO, back to autonego
13143 *
13144 */
13145 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13146 DPRINTF(sc, WM_DEBUG_LINK,
13147 ("%s: %s: force linkup and fullduplex\n",
13148 device_xname(sc->sc_dev), __func__));
13149 sc->sc_tbi_linkup = 0;
13150 /* Disable auto-negotiation in the TXCW register */
13151 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13152
13153 /*
13154 * Force link-up and also force full-duplex.
13155 *
13156 * NOTE: CTRL was updated TFCE and RFCE automatically,
13157 * so we should update sc->sc_ctrl
13158 */
13159 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13160 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13161 } else if (((status & STATUS_LU) != 0)
13162 && ((rxcw & RXCW_C) != 0)
13163 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13164 sc->sc_tbi_linkup = 1;
13165 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13166 device_xname(sc->sc_dev), __func__));
13167 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13168 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13169 } else if (signal && ((rxcw & RXCW_C) != 0)) {
13170 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13171 device_xname(sc->sc_dev), __func__));
13172 } else {
13173 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13174 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13175 status));
13176 }
13177
13178 return 0;
13179 }
13180
13181 /*
13182 * wm_tbi_tick:
13183 *
13184 * Check the link on TBI devices.
13185 * This function acts as mii_tick().
13186 */
13187 static void
13188 wm_tbi_tick(struct wm_softc *sc)
13189 {
13190 struct mii_data *mii = &sc->sc_mii;
13191 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13192 uint32_t status;
13193
13194 KASSERT(mutex_owned(sc->sc_core_lock));
13195
13196 status = CSR_READ(sc, WMREG_STATUS);
13197
13198 /* XXX is this needed? */
13199 (void)CSR_READ(sc, WMREG_RXCW);
13200 (void)CSR_READ(sc, WMREG_CTRL);
13201
13202 /* set link status */
13203 if ((status & STATUS_LU) == 0) {
13204 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13205 device_xname(sc->sc_dev)));
13206 sc->sc_tbi_linkup = 0;
13207 } else if (sc->sc_tbi_linkup == 0) {
13208 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13209 device_xname(sc->sc_dev),
13210 (status & STATUS_FD) ? "FDX" : "HDX"));
13211 sc->sc_tbi_linkup = 1;
13212 sc->sc_tbi_serdes_ticks = 0;
13213 }
13214
13215 if ((sc->sc_if_flags & IFF_UP) == 0)
13216 goto setled;
13217
13218 if ((status & STATUS_LU) == 0) {
13219 sc->sc_tbi_linkup = 0;
13220 /* If the timer expired, retry autonegotiation */
13221 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13222 && (++sc->sc_tbi_serdes_ticks
13223 >= sc->sc_tbi_serdes_anegticks)) {
13224 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13225 device_xname(sc->sc_dev), __func__));
13226 sc->sc_tbi_serdes_ticks = 0;
13227 /*
13228 * Reset the link, and let autonegotiation do
13229 * its thing
13230 */
13231 sc->sc_ctrl |= CTRL_LRST;
13232 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13233 CSR_WRITE_FLUSH(sc);
13234 delay(1000);
13235 sc->sc_ctrl &= ~CTRL_LRST;
13236 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13237 CSR_WRITE_FLUSH(sc);
13238 delay(1000);
13239 CSR_WRITE(sc, WMREG_TXCW,
13240 sc->sc_txcw & ~TXCW_ANE);
13241 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13242 }
13243 }
13244
13245 setled:
13246 wm_tbi_serdes_set_linkled(sc);
13247 }
13248
13249 /* SERDES related */
13250 static void
13251 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13252 {
13253 uint32_t reg;
13254
13255 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13256 && ((sc->sc_flags & WM_F_SGMII) == 0))
13257 return;
13258
13259 /* Enable PCS to turn on link */
13260 reg = CSR_READ(sc, WMREG_PCS_CFG);
13261 reg |= PCS_CFG_PCS_EN;
13262 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13263
13264 /* Power up the laser */
13265 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13266 reg &= ~CTRL_EXT_SWDPIN(3);
13267 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13268
13269 /* Flush the write to verify completion */
13270 CSR_WRITE_FLUSH(sc);
13271 delay(1000);
13272 }
13273
13274 static int
13275 wm_serdes_mediachange(struct ifnet *ifp)
13276 {
13277 struct wm_softc *sc = ifp->if_softc;
13278 bool pcs_autoneg = true; /* XXX */
13279 uint32_t ctrl_ext, pcs_lctl, reg;
13280
13281 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13282 && ((sc->sc_flags & WM_F_SGMII) == 0))
13283 return 0;
13284
13285 /* XXX Currently, this function is not called on 8257[12] */
13286 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13287 || (sc->sc_type >= WM_T_82575))
13288 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13289
13290 /* Power on the sfp cage if present */
13291 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13292 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13293 ctrl_ext |= CTRL_EXT_I2C_ENA;
13294 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13295
13296 sc->sc_ctrl |= CTRL_SLU;
13297
13298 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13299 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13300
13301 reg = CSR_READ(sc, WMREG_CONNSW);
13302 reg |= CONNSW_ENRGSRC;
13303 CSR_WRITE(sc, WMREG_CONNSW, reg);
13304 }
13305
13306 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13307 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13308 case CTRL_EXT_LINK_MODE_SGMII:
13309 /* SGMII mode lets the phy handle forcing speed/duplex */
13310 pcs_autoneg = true;
13311 /* Autoneg time out should be disabled for SGMII mode */
13312 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13313 break;
13314 case CTRL_EXT_LINK_MODE_1000KX:
13315 pcs_autoneg = false;
13316 /* FALLTHROUGH */
13317 default:
13318 if ((sc->sc_type == WM_T_82575)
13319 || (sc->sc_type == WM_T_82576)) {
13320 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13321 pcs_autoneg = false;
13322 }
13323 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13324 | CTRL_FRCFDX;
13325
13326 /* Set speed of 1000/Full if speed/duplex is forced */
13327 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13328 }
13329 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13330
13331 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13332 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13333
13334 if (pcs_autoneg) {
13335 /* Set PCS register for autoneg */
13336 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13337
13338 /* Disable force flow control for autoneg */
13339 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13340
13341 /* Configure flow control advertisement for autoneg */
13342 reg = CSR_READ(sc, WMREG_PCS_ANADV);
13343 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13344 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13345 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13346 } else
13347 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13348
13349 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13350
13351 return 0;
13352 }
13353
13354 static void
13355 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13356 {
13357 struct wm_softc *sc = ifp->if_softc;
13358 struct mii_data *mii = &sc->sc_mii;
13359 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13360 uint32_t pcs_adv, pcs_lpab, reg;
13361
13362 ifmr->ifm_status = IFM_AVALID;
13363 ifmr->ifm_active = IFM_ETHER;
13364
13365 /* Check PCS */
13366 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13367 if ((reg & PCS_LSTS_LINKOK) == 0) {
13368 ifmr->ifm_active |= IFM_NONE;
13369 sc->sc_tbi_linkup = 0;
13370 goto setled;
13371 }
13372
13373 sc->sc_tbi_linkup = 1;
13374 ifmr->ifm_status |= IFM_ACTIVE;
13375 if (sc->sc_type == WM_T_I354) {
13376 uint32_t status;
13377
13378 status = CSR_READ(sc, WMREG_STATUS);
13379 if (((status & STATUS_2P5_SKU) != 0)
13380 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13381 ifmr->ifm_active |= IFM_2500_KX;
13382 } else
13383 ifmr->ifm_active |= IFM_1000_KX;
13384 } else {
13385 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13386 case PCS_LSTS_SPEED_10:
13387 ifmr->ifm_active |= IFM_10_T; /* XXX */
13388 break;
13389 case PCS_LSTS_SPEED_100:
13390 ifmr->ifm_active |= IFM_100_FX; /* XXX */
13391 break;
13392 case PCS_LSTS_SPEED_1000:
13393 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13394 break;
13395 default:
13396 device_printf(sc->sc_dev, "Unknown speed\n");
13397 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13398 break;
13399 }
13400 }
13401 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13402 if ((reg & PCS_LSTS_FDX) != 0)
13403 ifmr->ifm_active |= IFM_FDX;
13404 else
13405 ifmr->ifm_active |= IFM_HDX;
13406 mii->mii_media_active &= ~IFM_ETH_FMASK;
13407 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13408 /* Check flow */
13409 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13410 if ((reg & PCS_LSTS_AN_COMP) == 0) {
13411 DPRINTF(sc, WM_DEBUG_LINK,
13412 ("XXX LINKOK but not ACOMP\n"));
13413 goto setled;
13414 }
13415 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13416 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13417 DPRINTF(sc, WM_DEBUG_LINK,
13418 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13419 if ((pcs_adv & TXCW_SYM_PAUSE)
13420 && (pcs_lpab & TXCW_SYM_PAUSE)) {
13421 mii->mii_media_active |= IFM_FLOW
13422 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
13423 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
13424 && (pcs_adv & TXCW_ASYM_PAUSE)
13425 && (pcs_lpab & TXCW_SYM_PAUSE)
13426 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13427 mii->mii_media_active |= IFM_FLOW
13428 | IFM_ETH_TXPAUSE;
13429 } else if ((pcs_adv & TXCW_SYM_PAUSE)
13430 && (pcs_adv & TXCW_ASYM_PAUSE)
13431 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
13432 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13433 mii->mii_media_active |= IFM_FLOW
13434 | IFM_ETH_RXPAUSE;
13435 }
13436 }
13437 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
13438 | (mii->mii_media_active & IFM_ETH_FMASK);
13439 setled:
13440 wm_tbi_serdes_set_linkled(sc);
13441 }
13442
13443 /*
13444 * wm_serdes_tick:
13445 *
13446 * Check the link on serdes devices.
13447 */
13448 static void
13449 wm_serdes_tick(struct wm_softc *sc)
13450 {
13451 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13452 struct mii_data *mii = &sc->sc_mii;
13453 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13454 uint32_t reg;
13455
13456 KASSERT(mutex_owned(sc->sc_core_lock));
13457
13458 mii->mii_media_status = IFM_AVALID;
13459 mii->mii_media_active = IFM_ETHER;
13460
13461 /* Check PCS */
13462 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13463 if ((reg & PCS_LSTS_LINKOK) != 0) {
13464 mii->mii_media_status |= IFM_ACTIVE;
13465 sc->sc_tbi_linkup = 1;
13466 sc->sc_tbi_serdes_ticks = 0;
13467 mii->mii_media_active |= IFM_1000_SX; /* XXX */
13468 if ((reg & PCS_LSTS_FDX) != 0)
13469 mii->mii_media_active |= IFM_FDX;
13470 else
13471 mii->mii_media_active |= IFM_HDX;
13472 } else {
13473 mii->mii_media_status |= IFM_NONE;
13474 sc->sc_tbi_linkup = 0;
13475 /* If the timer expired, retry autonegotiation */
13476 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13477 && (++sc->sc_tbi_serdes_ticks
13478 >= sc->sc_tbi_serdes_anegticks)) {
13479 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13480 device_xname(sc->sc_dev), __func__));
13481 sc->sc_tbi_serdes_ticks = 0;
13482 /* XXX */
13483 wm_serdes_mediachange(ifp);
13484 }
13485 }
13486
13487 wm_tbi_serdes_set_linkled(sc);
13488 }
13489
13490 /* SFP related */
13491
13492 static int
13493 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13494 {
13495 uint32_t i2ccmd;
13496 int i;
13497
13498 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13499 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13500
13501 /* Poll the ready bit */
13502 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13503 delay(50);
13504 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13505 if (i2ccmd & I2CCMD_READY)
13506 break;
13507 }
13508 if ((i2ccmd & I2CCMD_READY) == 0)
13509 return -1;
13510 if ((i2ccmd & I2CCMD_ERROR) != 0)
13511 return -1;
13512
13513 *data = i2ccmd & 0x00ff;
13514
13515 return 0;
13516 }
13517
13518 static uint32_t
13519 wm_sfp_get_media_type(struct wm_softc *sc)
13520 {
13521 uint32_t ctrl_ext;
13522 uint8_t val = 0;
13523 int timeout = 3;
13524 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13525 int rv = -1;
13526
13527 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13528 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13529 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13530 CSR_WRITE_FLUSH(sc);
13531
13532 /* Read SFP module data */
13533 while (timeout) {
13534 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13535 if (rv == 0)
13536 break;
13537 delay(100*1000); /* XXX too big */
13538 timeout--;
13539 }
13540 if (rv != 0)
13541 goto out;
13542
13543 switch (val) {
13544 case SFF_SFP_ID_SFF:
13545 aprint_normal_dev(sc->sc_dev,
13546 "Module/Connector soldered to board\n");
13547 break;
13548 case SFF_SFP_ID_SFP:
13549 sc->sc_flags |= WM_F_SFP;
13550 break;
13551 case SFF_SFP_ID_UNKNOWN:
13552 goto out;
13553 default:
13554 break;
13555 }
13556
13557 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13558 if (rv != 0)
13559 goto out;
13560
13561 sc->sc_sfptype = val;
13562 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13563 mediatype = WM_MEDIATYPE_SERDES;
13564 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13565 sc->sc_flags |= WM_F_SGMII;
13566 mediatype = WM_MEDIATYPE_COPPER;
13567 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13568 sc->sc_flags |= WM_F_SGMII;
13569 mediatype = WM_MEDIATYPE_SERDES;
13570 } else {
13571 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13572 __func__, sc->sc_sfptype);
13573 sc->sc_sfptype = 0; /* XXX unknown */
13574 }
13575
13576 out:
13577 /* Restore I2C interface setting */
13578 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13579
13580 return mediatype;
13581 }
13582
13583 /*
13584 * NVM related.
13585 * Microwire, SPI (w/wo EERD) and Flash.
13586 */
13587
13588 /* Both spi and uwire */
13589
13590 /*
13591 * wm_eeprom_sendbits:
13592 *
13593 * Send a series of bits to the EEPROM.
13594 */
13595 static void
13596 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13597 {
13598 uint32_t reg;
13599 int x;
13600
13601 reg = CSR_READ(sc, WMREG_EECD);
13602
13603 for (x = nbits; x > 0; x--) {
13604 if (bits & (1U << (x - 1)))
13605 reg |= EECD_DI;
13606 else
13607 reg &= ~EECD_DI;
13608 CSR_WRITE(sc, WMREG_EECD, reg);
13609 CSR_WRITE_FLUSH(sc);
13610 delay(2);
13611 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13612 CSR_WRITE_FLUSH(sc);
13613 delay(2);
13614 CSR_WRITE(sc, WMREG_EECD, reg);
13615 CSR_WRITE_FLUSH(sc);
13616 delay(2);
13617 }
13618 }
13619
13620 /*
13621 * wm_eeprom_recvbits:
13622 *
13623 * Receive a series of bits from the EEPROM.
13624 */
13625 static void
13626 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13627 {
13628 uint32_t reg, val;
13629 int x;
13630
13631 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13632
13633 val = 0;
13634 for (x = nbits; x > 0; x--) {
13635 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13636 CSR_WRITE_FLUSH(sc);
13637 delay(2);
13638 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13639 val |= (1U << (x - 1));
13640 CSR_WRITE(sc, WMREG_EECD, reg);
13641 CSR_WRITE_FLUSH(sc);
13642 delay(2);
13643 }
13644 *valp = val;
13645 }
13646
13647 /* Microwire */
13648
13649 /*
13650 * wm_nvm_read_uwire:
13651 *
13652 * Read a word from the EEPROM using the MicroWire protocol.
13653 */
13654 static int
13655 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13656 {
13657 uint32_t reg, val;
13658 int i, rv;
13659
13660 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13661 device_xname(sc->sc_dev), __func__));
13662
13663 rv = sc->nvm.acquire(sc);
13664 if (rv != 0)
13665 return rv;
13666
13667 for (i = 0; i < wordcnt; i++) {
13668 /* Clear SK and DI. */
13669 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13670 CSR_WRITE(sc, WMREG_EECD, reg);
13671
13672 /*
13673 * XXX: workaround for a bug in qemu-0.12.x and prior
13674 * and Xen.
13675 *
13676 * We use this workaround only for 82540 because qemu's
13677 * e1000 act as 82540.
13678 */
13679 if (sc->sc_type == WM_T_82540) {
13680 reg |= EECD_SK;
13681 CSR_WRITE(sc, WMREG_EECD, reg);
13682 reg &= ~EECD_SK;
13683 CSR_WRITE(sc, WMREG_EECD, reg);
13684 CSR_WRITE_FLUSH(sc);
13685 delay(2);
13686 }
13687 /* XXX: end of workaround */
13688
13689 /* Set CHIP SELECT. */
13690 reg |= EECD_CS;
13691 CSR_WRITE(sc, WMREG_EECD, reg);
13692 CSR_WRITE_FLUSH(sc);
13693 delay(2);
13694
13695 /* Shift in the READ command. */
13696 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13697
13698 /* Shift in address. */
13699 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13700
13701 /* Shift out the data. */
13702 wm_eeprom_recvbits(sc, &val, 16);
13703 data[i] = val & 0xffff;
13704
13705 /* Clear CHIP SELECT. */
13706 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13707 CSR_WRITE(sc, WMREG_EECD, reg);
13708 CSR_WRITE_FLUSH(sc);
13709 delay(2);
13710 }
13711
13712 sc->nvm.release(sc);
13713 return 0;
13714 }
13715
13716 /* SPI */
13717
13718 /*
13719 * Set SPI and FLASH related information from the EECD register.
13720 * For 82541 and 82547, the word size is taken from EEPROM.
13721 */
13722 static int
13723 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13724 {
13725 int size;
13726 uint32_t reg;
13727 uint16_t data;
13728
13729 reg = CSR_READ(sc, WMREG_EECD);
13730 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13731
13732 /* Read the size of NVM from EECD by default */
13733 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13734 switch (sc->sc_type) {
13735 case WM_T_82541:
13736 case WM_T_82541_2:
13737 case WM_T_82547:
13738 case WM_T_82547_2:
13739 /* Set dummy value to access EEPROM */
13740 sc->sc_nvm_wordsize = 64;
13741 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13742 aprint_error_dev(sc->sc_dev,
13743 "%s: failed to read EEPROM size\n", __func__);
13744 }
13745 reg = data;
13746 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13747 if (size == 0)
13748 size = 6; /* 64 word size */
13749 else
13750 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13751 break;
13752 case WM_T_80003:
13753 case WM_T_82571:
13754 case WM_T_82572:
13755 case WM_T_82573: /* SPI case */
13756 case WM_T_82574: /* SPI case */
13757 case WM_T_82583: /* SPI case */
13758 size += NVM_WORD_SIZE_BASE_SHIFT;
13759 if (size > 14)
13760 size = 14;
13761 break;
13762 case WM_T_82575:
13763 case WM_T_82576:
13764 case WM_T_82580:
13765 case WM_T_I350:
13766 case WM_T_I354:
13767 case WM_T_I210:
13768 case WM_T_I211:
13769 size += NVM_WORD_SIZE_BASE_SHIFT;
13770 if (size > 15)
13771 size = 15;
13772 break;
13773 default:
13774 aprint_error_dev(sc->sc_dev,
13775 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13776 return -1;
13777 break;
13778 }
13779
13780 sc->sc_nvm_wordsize = 1 << size;
13781
13782 return 0;
13783 }
13784
13785 /*
13786 * wm_nvm_ready_spi:
13787 *
13788 * Wait for a SPI EEPROM to be ready for commands.
13789 */
13790 static int
13791 wm_nvm_ready_spi(struct wm_softc *sc)
13792 {
13793 uint32_t val;
13794 int usec;
13795
13796 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13797 device_xname(sc->sc_dev), __func__));
13798
13799 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13800 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13801 wm_eeprom_recvbits(sc, &val, 8);
13802 if ((val & SPI_SR_RDY) == 0)
13803 break;
13804 }
13805 if (usec >= SPI_MAX_RETRIES) {
13806 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13807 return -1;
13808 }
13809 return 0;
13810 }
13811
13812 /*
13813 * wm_nvm_read_spi:
13814 *
13815 * Read a work from the EEPROM using the SPI protocol.
13816 */
13817 static int
13818 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13819 {
13820 uint32_t reg, val;
13821 int i;
13822 uint8_t opc;
13823 int rv;
13824
13825 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13826 device_xname(sc->sc_dev), __func__));
13827
13828 rv = sc->nvm.acquire(sc);
13829 if (rv != 0)
13830 return rv;
13831
13832 /* Clear SK and CS. */
13833 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13834 CSR_WRITE(sc, WMREG_EECD, reg);
13835 CSR_WRITE_FLUSH(sc);
13836 delay(2);
13837
13838 if ((rv = wm_nvm_ready_spi(sc)) != 0)
13839 goto out;
13840
13841 /* Toggle CS to flush commands. */
13842 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13843 CSR_WRITE_FLUSH(sc);
13844 delay(2);
13845 CSR_WRITE(sc, WMREG_EECD, reg);
13846 CSR_WRITE_FLUSH(sc);
13847 delay(2);
13848
13849 opc = SPI_OPC_READ;
13850 if (sc->sc_nvm_addrbits == 8 && word >= 128)
13851 opc |= SPI_OPC_A8;
13852
13853 wm_eeprom_sendbits(sc, opc, 8);
13854 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13855
13856 for (i = 0; i < wordcnt; i++) {
13857 wm_eeprom_recvbits(sc, &val, 16);
13858 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13859 }
13860
13861 /* Raise CS and clear SK. */
13862 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13863 CSR_WRITE(sc, WMREG_EECD, reg);
13864 CSR_WRITE_FLUSH(sc);
13865 delay(2);
13866
13867 out:
13868 sc->nvm.release(sc);
13869 return rv;
13870 }
13871
13872 /* Using with EERD */
13873
13874 static int
13875 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13876 {
13877 uint32_t attempts = 100000;
13878 uint32_t i, reg = 0;
13879 int32_t done = -1;
13880
13881 for (i = 0; i < attempts; i++) {
13882 reg = CSR_READ(sc, rw);
13883
13884 if (reg & EERD_DONE) {
13885 done = 0;
13886 break;
13887 }
13888 delay(5);
13889 }
13890
13891 return done;
13892 }
13893
13894 static int
13895 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13896 {
13897 int i, eerd = 0;
13898 int rv;
13899
13900 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13901 device_xname(sc->sc_dev), __func__));
13902
13903 rv = sc->nvm.acquire(sc);
13904 if (rv != 0)
13905 return rv;
13906
13907 for (i = 0; i < wordcnt; i++) {
13908 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13909 CSR_WRITE(sc, WMREG_EERD, eerd);
13910 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13911 if (rv != 0) {
13912 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13913 "offset=%d. wordcnt=%d\n", offset, wordcnt);
13914 break;
13915 }
13916 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13917 }
13918
13919 sc->nvm.release(sc);
13920 return rv;
13921 }
13922
13923 /* Flash */
13924
13925 static int
13926 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13927 {
13928 uint32_t eecd;
13929 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13930 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13931 uint32_t nvm_dword = 0;
13932 uint8_t sig_byte = 0;
13933 int rv;
13934
13935 switch (sc->sc_type) {
13936 case WM_T_PCH_SPT:
13937 case WM_T_PCH_CNP:
13938 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13939 act_offset = ICH_NVM_SIG_WORD * 2;
13940
13941 /* Set bank to 0 in case flash read fails. */
13942 *bank = 0;
13943
13944 /* Check bank 0 */
13945 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13946 if (rv != 0)
13947 return rv;
13948 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13949 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13950 *bank = 0;
13951 return 0;
13952 }
13953
13954 /* Check bank 1 */
13955 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13956 &nvm_dword);
13957 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13958 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13959 *bank = 1;
13960 return 0;
13961 }
13962 aprint_error_dev(sc->sc_dev,
13963 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13964 return -1;
13965 case WM_T_ICH8:
13966 case WM_T_ICH9:
13967 eecd = CSR_READ(sc, WMREG_EECD);
13968 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13969 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13970 return 0;
13971 }
13972 /* FALLTHROUGH */
13973 default:
13974 /* Default to 0 */
13975 *bank = 0;
13976
13977 /* Check bank 0 */
13978 wm_read_ich8_byte(sc, act_offset, &sig_byte);
13979 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13980 *bank = 0;
13981 return 0;
13982 }
13983
13984 /* Check bank 1 */
13985 wm_read_ich8_byte(sc, act_offset + bank1_offset,
13986 &sig_byte);
13987 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13988 *bank = 1;
13989 return 0;
13990 }
13991 }
13992
13993 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13994 device_xname(sc->sc_dev)));
13995 return -1;
13996 }
13997
13998 /******************************************************************************
13999 * This function does initial flash setup so that a new read/write/erase cycle
14000 * can be started.
14001 *
14002 * sc - The pointer to the hw structure
14003 ****************************************************************************/
14004 static int32_t
14005 wm_ich8_cycle_init(struct wm_softc *sc)
14006 {
14007 uint16_t hsfsts;
14008 int32_t error = 1;
14009 int32_t i = 0;
14010
14011 if (sc->sc_type >= WM_T_PCH_SPT)
14012 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
14013 else
14014 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14015
14016 /* May be check the Flash Des Valid bit in Hw status */
14017 if ((hsfsts & HSFSTS_FLDVAL) == 0)
14018 return error;
14019
14020 /* Clear FCERR in Hw status by writing 1 */
14021 /* Clear DAEL in Hw status by writing a 1 */
14022 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14023
14024 if (sc->sc_type >= WM_T_PCH_SPT)
14025 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14026 else
14027 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14028
14029 /*
14030 * Either we should have a hardware SPI cycle in progress bit to check
14031 * against, in order to start a new cycle or FDONE bit should be
14032 * changed in the hardware so that it is 1 after hardware reset, which
14033 * can then be used as an indication whether a cycle is in progress or
14034 * has been completed .. we should also have some software semaphore
14035 * mechanism to guard FDONE or the cycle in progress bit so that two
14036 * threads access to those bits can be sequentiallized or a way so that
14037 * 2 threads don't start the cycle at the same time
14038 */
14039
14040 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14041 /*
14042 * There is no cycle running at present, so we can start a
14043 * cycle
14044 */
14045
14046 /* Begin by setting Flash Cycle Done. */
14047 hsfsts |= HSFSTS_DONE;
14048 if (sc->sc_type >= WM_T_PCH_SPT)
14049 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14050 hsfsts & 0xffffUL);
14051 else
14052 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14053 error = 0;
14054 } else {
14055 /*
14056 * Otherwise poll for sometime so the current cycle has a
14057 * chance to end before giving up.
14058 */
14059 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14060 if (sc->sc_type >= WM_T_PCH_SPT)
14061 hsfsts = ICH8_FLASH_READ32(sc,
14062 ICH_FLASH_HSFSTS) & 0xffffUL;
14063 else
14064 hsfsts = ICH8_FLASH_READ16(sc,
14065 ICH_FLASH_HSFSTS);
14066 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14067 error = 0;
14068 break;
14069 }
14070 delay(1);
14071 }
14072 if (error == 0) {
14073 /*
14074 * Successful in waiting for previous cycle to timeout,
14075 * now set the Flash Cycle Done.
14076 */
14077 hsfsts |= HSFSTS_DONE;
14078 if (sc->sc_type >= WM_T_PCH_SPT)
14079 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14080 hsfsts & 0xffffUL);
14081 else
14082 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14083 hsfsts);
14084 }
14085 }
14086 return error;
14087 }
14088
14089 /******************************************************************************
14090 * This function starts a flash cycle and waits for its completion
14091 *
14092 * sc - The pointer to the hw structure
14093 ****************************************************************************/
14094 static int32_t
14095 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14096 {
14097 uint16_t hsflctl;
14098 uint16_t hsfsts;
14099 int32_t error = 1;
14100 uint32_t i = 0;
14101
14102 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14103 if (sc->sc_type >= WM_T_PCH_SPT)
14104 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14105 else
14106 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14107 hsflctl |= HSFCTL_GO;
14108 if (sc->sc_type >= WM_T_PCH_SPT)
14109 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14110 (uint32_t)hsflctl << 16);
14111 else
14112 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14113
14114 /* Wait till FDONE bit is set to 1 */
14115 do {
14116 if (sc->sc_type >= WM_T_PCH_SPT)
14117 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14118 & 0xffffUL;
14119 else
14120 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14121 if (hsfsts & HSFSTS_DONE)
14122 break;
14123 delay(1);
14124 i++;
14125 } while (i < timeout);
14126 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14127 error = 0;
14128
14129 return error;
14130 }
14131
14132 /******************************************************************************
14133 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14134 *
14135 * sc - The pointer to the hw structure
14136 * index - The index of the byte or word to read.
14137 * size - Size of data to read, 1=byte 2=word, 4=dword
14138 * data - Pointer to the word to store the value read.
14139 *****************************************************************************/
14140 static int32_t
14141 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14142 uint32_t size, uint32_t *data)
14143 {
14144 uint16_t hsfsts;
14145 uint16_t hsflctl;
14146 uint32_t flash_linear_address;
14147 uint32_t flash_data = 0;
14148 int32_t error = 1;
14149 int32_t count = 0;
14150
14151 if (size < 1 || size > 4 || data == 0x0 ||
14152 index > ICH_FLASH_LINEAR_ADDR_MASK)
14153 return error;
14154
14155 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14156 sc->sc_ich8_flash_base;
14157
14158 do {
14159 delay(1);
14160 /* Steps */
14161 error = wm_ich8_cycle_init(sc);
14162 if (error)
14163 break;
14164
14165 if (sc->sc_type >= WM_T_PCH_SPT)
14166 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14167 >> 16;
14168 else
14169 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14170 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14171 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14172 & HSFCTL_BCOUNT_MASK;
14173 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14174 if (sc->sc_type >= WM_T_PCH_SPT) {
14175 /*
14176 * In SPT, This register is in Lan memory space, not
14177 * flash. Therefore, only 32 bit access is supported.
14178 */
14179 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14180 (uint32_t)hsflctl << 16);
14181 } else
14182 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14183
14184 /*
14185 * Write the last 24 bits of index into Flash Linear address
14186 * field in Flash Address
14187 */
14188 /* TODO: TBD maybe check the index against the size of flash */
14189
14190 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14191
14192 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14193
14194 /*
14195 * Check if FCERR is set to 1, if set to 1, clear it and try
14196 * the whole sequence a few more times, else read in (shift in)
14197 * the Flash Data0, the order is least significant byte first
14198 * msb to lsb
14199 */
14200 if (error == 0) {
14201 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14202 if (size == 1)
14203 *data = (uint8_t)(flash_data & 0x000000FF);
14204 else if (size == 2)
14205 *data = (uint16_t)(flash_data & 0x0000FFFF);
14206 else if (size == 4)
14207 *data = (uint32_t)flash_data;
14208 break;
14209 } else {
14210 /*
14211 * If we've gotten here, then things are probably
14212 * completely hosed, but if the error condition is
14213 * detected, it won't hurt to give it another try...
14214 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14215 */
14216 if (sc->sc_type >= WM_T_PCH_SPT)
14217 hsfsts = ICH8_FLASH_READ32(sc,
14218 ICH_FLASH_HSFSTS) & 0xffffUL;
14219 else
14220 hsfsts = ICH8_FLASH_READ16(sc,
14221 ICH_FLASH_HSFSTS);
14222
14223 if (hsfsts & HSFSTS_ERR) {
14224 /* Repeat for some time before giving up. */
14225 continue;
14226 } else if ((hsfsts & HSFSTS_DONE) == 0)
14227 break;
14228 }
14229 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14230
14231 return error;
14232 }
14233
14234 /******************************************************************************
14235 * Reads a single byte from the NVM using the ICH8 flash access registers.
14236 *
14237 * sc - pointer to wm_hw structure
14238 * index - The index of the byte to read.
14239 * data - Pointer to a byte to store the value read.
14240 *****************************************************************************/
14241 static int32_t
14242 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14243 {
14244 int32_t status;
14245 uint32_t word = 0;
14246
14247 status = wm_read_ich8_data(sc, index, 1, &word);
14248 if (status == 0)
14249 *data = (uint8_t)word;
14250 else
14251 *data = 0;
14252
14253 return status;
14254 }
14255
14256 /******************************************************************************
14257 * Reads a word from the NVM using the ICH8 flash access registers.
14258 *
14259 * sc - pointer to wm_hw structure
14260 * index - The starting byte index of the word to read.
14261 * data - Pointer to a word to store the value read.
14262 *****************************************************************************/
14263 static int32_t
14264 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14265 {
14266 int32_t status;
14267 uint32_t word = 0;
14268
14269 status = wm_read_ich8_data(sc, index, 2, &word);
14270 if (status == 0)
14271 *data = (uint16_t)word;
14272 else
14273 *data = 0;
14274
14275 return status;
14276 }
14277
14278 /******************************************************************************
14279 * Reads a dword from the NVM using the ICH8 flash access registers.
14280 *
14281 * sc - pointer to wm_hw structure
14282 * index - The starting byte index of the word to read.
14283 * data - Pointer to a word to store the value read.
14284 *****************************************************************************/
14285 static int32_t
14286 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14287 {
14288 int32_t status;
14289
14290 status = wm_read_ich8_data(sc, index, 4, data);
14291 return status;
14292 }
14293
14294 /******************************************************************************
14295 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14296 * register.
14297 *
14298 * sc - Struct containing variables accessed by shared code
14299 * offset - offset of word in the EEPROM to read
14300 * data - word read from the EEPROM
14301 * words - number of words to read
14302 *****************************************************************************/
14303 static int
14304 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14305 {
14306 int rv;
14307 uint32_t flash_bank = 0;
14308 uint32_t act_offset = 0;
14309 uint32_t bank_offset = 0;
14310 uint16_t word = 0;
14311 uint16_t i = 0;
14312
14313 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14314 device_xname(sc->sc_dev), __func__));
14315
14316 rv = sc->nvm.acquire(sc);
14317 if (rv != 0)
14318 return rv;
14319
14320 /*
14321 * We need to know which is the valid flash bank. In the event
14322 * that we didn't allocate eeprom_shadow_ram, we may not be
14323 * managing flash_bank. So it cannot be trusted and needs
14324 * to be updated with each read.
14325 */
14326 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14327 if (rv) {
14328 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14329 device_xname(sc->sc_dev)));
14330 flash_bank = 0;
14331 }
14332
14333 /*
14334 * Adjust offset appropriately if we're on bank 1 - adjust for word
14335 * size
14336 */
14337 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14338
14339 for (i = 0; i < words; i++) {
14340 /* The NVM part needs a byte offset, hence * 2 */
14341 act_offset = bank_offset + ((offset + i) * 2);
14342 rv = wm_read_ich8_word(sc, act_offset, &word);
14343 if (rv) {
14344 aprint_error_dev(sc->sc_dev,
14345 "%s: failed to read NVM\n", __func__);
14346 break;
14347 }
14348 data[i] = word;
14349 }
14350
14351 sc->nvm.release(sc);
14352 return rv;
14353 }
14354
14355 /******************************************************************************
14356 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14357 * register.
14358 *
14359 * sc - Struct containing variables accessed by shared code
14360 * offset - offset of word in the EEPROM to read
14361 * data - word read from the EEPROM
14362 * words - number of words to read
14363 *****************************************************************************/
14364 static int
14365 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14366 {
14367 int rv;
14368 uint32_t flash_bank = 0;
14369 uint32_t act_offset = 0;
14370 uint32_t bank_offset = 0;
14371 uint32_t dword = 0;
14372 uint16_t i = 0;
14373
14374 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14375 device_xname(sc->sc_dev), __func__));
14376
14377 rv = sc->nvm.acquire(sc);
14378 if (rv != 0)
14379 return rv;
14380
14381 /*
14382 * We need to know which is the valid flash bank. In the event
14383 * that we didn't allocate eeprom_shadow_ram, we may not be
14384 * managing flash_bank. So it cannot be trusted and needs
14385 * to be updated with each read.
14386 */
14387 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14388 if (rv) {
14389 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14390 device_xname(sc->sc_dev)));
14391 flash_bank = 0;
14392 }
14393
14394 /*
14395 * Adjust offset appropriately if we're on bank 1 - adjust for word
14396 * size
14397 */
14398 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14399
14400 for (i = 0; i < words; i++) {
14401 /* The NVM part needs a byte offset, hence * 2 */
14402 act_offset = bank_offset + ((offset + i) * 2);
14403 /* but we must read dword aligned, so mask ... */
14404 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14405 if (rv) {
14406 aprint_error_dev(sc->sc_dev,
14407 "%s: failed to read NVM\n", __func__);
14408 break;
14409 }
14410 /* ... and pick out low or high word */
14411 if ((act_offset & 0x2) == 0)
14412 data[i] = (uint16_t)(dword & 0xFFFF);
14413 else
14414 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14415 }
14416
14417 sc->nvm.release(sc);
14418 return rv;
14419 }
14420
14421 /* iNVM */
14422
14423 static int
14424 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14425 {
14426 int32_t rv = 0;
14427 uint32_t invm_dword;
14428 uint16_t i;
14429 uint8_t record_type, word_address;
14430
14431 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14432 device_xname(sc->sc_dev), __func__));
14433
14434 for (i = 0; i < INVM_SIZE; i++) {
14435 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14436 /* Get record type */
14437 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14438 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14439 break;
14440 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14441 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14442 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14443 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14444 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14445 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14446 if (word_address == address) {
14447 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14448 rv = 0;
14449 break;
14450 }
14451 }
14452 }
14453
14454 return rv;
14455 }
14456
14457 static int
14458 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14459 {
14460 int i, rv;
14461
14462 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14463 device_xname(sc->sc_dev), __func__));
14464
14465 rv = sc->nvm.acquire(sc);
14466 if (rv != 0)
14467 return rv;
14468
14469 for (i = 0; i < words; i++) {
14470 switch (offset + i) {
14471 case NVM_OFF_MACADDR:
14472 case NVM_OFF_MACADDR1:
14473 case NVM_OFF_MACADDR2:
14474 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14475 if (rv != 0) {
14476 data[i] = 0xffff;
14477 rv = -1;
14478 }
14479 break;
14480 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14481 rv = wm_nvm_read_word_invm(sc, offset, data);
14482 if (rv != 0) {
14483 *data = INVM_DEFAULT_AL;
14484 rv = 0;
14485 }
14486 break;
14487 case NVM_OFF_CFG2:
14488 rv = wm_nvm_read_word_invm(sc, offset, data);
14489 if (rv != 0) {
14490 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
14491 rv = 0;
14492 }
14493 break;
14494 case NVM_OFF_CFG4:
14495 rv = wm_nvm_read_word_invm(sc, offset, data);
14496 if (rv != 0) {
14497 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
14498 rv = 0;
14499 }
14500 break;
14501 case NVM_OFF_LED_1_CFG:
14502 rv = wm_nvm_read_word_invm(sc, offset, data);
14503 if (rv != 0) {
14504 *data = NVM_LED_1_CFG_DEFAULT_I211;
14505 rv = 0;
14506 }
14507 break;
14508 case NVM_OFF_LED_0_2_CFG:
14509 rv = wm_nvm_read_word_invm(sc, offset, data);
14510 if (rv != 0) {
14511 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
14512 rv = 0;
14513 }
14514 break;
14515 case NVM_OFF_ID_LED_SETTINGS:
14516 rv = wm_nvm_read_word_invm(sc, offset, data);
14517 if (rv != 0) {
14518 *data = ID_LED_RESERVED_FFFF;
14519 rv = 0;
14520 }
14521 break;
14522 default:
14523 DPRINTF(sc, WM_DEBUG_NVM,
14524 ("NVM word 0x%02x is not mapped.\n", offset));
14525 *data = NVM_RESERVED_WORD;
14526 break;
14527 }
14528 }
14529
14530 sc->nvm.release(sc);
14531 return rv;
14532 }
14533
14534 /* Lock, detecting NVM type, validate checksum, version and read */
14535
14536 static int
14537 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14538 {
14539 uint32_t eecd = 0;
14540
14541 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14542 || sc->sc_type == WM_T_82583) {
14543 eecd = CSR_READ(sc, WMREG_EECD);
14544
14545 /* Isolate bits 15 & 16 */
14546 eecd = ((eecd >> 15) & 0x03);
14547
14548 /* If both bits are set, device is Flash type */
14549 if (eecd == 0x03)
14550 return 0;
14551 }
14552 return 1;
14553 }
14554
14555 static int
14556 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14557 {
14558 uint32_t eec;
14559
14560 eec = CSR_READ(sc, WMREG_EEC);
14561 if ((eec & EEC_FLASH_DETECTED) != 0)
14562 return 1;
14563
14564 return 0;
14565 }
14566
14567 /*
14568 * wm_nvm_validate_checksum
14569 *
14570 * The checksum is defined as the sum of the first 64 (16 bit) words.
14571 */
14572 static int
14573 wm_nvm_validate_checksum(struct wm_softc *sc)
14574 {
14575 uint16_t checksum;
14576 uint16_t eeprom_data;
14577 #ifdef WM_DEBUG
14578 uint16_t csum_wordaddr, valid_checksum;
14579 #endif
14580 int i;
14581
14582 checksum = 0;
14583
14584 /* Don't check for I211 */
14585 if (sc->sc_type == WM_T_I211)
14586 return 0;
14587
14588 #ifdef WM_DEBUG
14589 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14590 || (sc->sc_type == WM_T_PCH_CNP)) {
14591 csum_wordaddr = NVM_OFF_COMPAT;
14592 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14593 } else {
14594 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14595 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14596 }
14597
14598 /* Dump EEPROM image for debug */
14599 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14600 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14601 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14602 /* XXX PCH_SPT? */
14603 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14604 if ((eeprom_data & valid_checksum) == 0)
14605 DPRINTF(sc, WM_DEBUG_NVM,
14606 ("%s: NVM need to be updated (%04x != %04x)\n",
14607 device_xname(sc->sc_dev), eeprom_data,
14608 valid_checksum));
14609 }
14610
14611 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14612 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14613 for (i = 0; i < NVM_SIZE; i++) {
14614 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14615 printf("XXXX ");
14616 else
14617 printf("%04hx ", eeprom_data);
14618 if (i % 8 == 7)
14619 printf("\n");
14620 }
14621 }
14622
14623 #endif /* WM_DEBUG */
14624
14625 for (i = 0; i < NVM_SIZE; i++) {
14626 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14627 return -1;
14628 checksum += eeprom_data;
14629 }
14630
14631 if (checksum != (uint16_t) NVM_CHECKSUM) {
14632 #ifdef WM_DEBUG
14633 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14634 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14635 #endif
14636 }
14637
14638 return 0;
14639 }
14640
14641 static void
14642 wm_nvm_version_invm(struct wm_softc *sc)
14643 {
14644 uint32_t dword;
14645
14646 /*
14647 * Linux's code to decode version is very strange, so we don't
14648 * obey that algorithm and just use word 61 as the document.
14649 * Perhaps it's not perfect though...
14650 *
14651 * Example:
14652 *
14653 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14654 */
14655 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14656 dword = __SHIFTOUT(dword, INVM_VER_1);
14657 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14658 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14659 }
14660
14661 static void
14662 wm_nvm_version(struct wm_softc *sc)
14663 {
14664 uint16_t major, minor, build, patch;
14665 uint16_t uid0, uid1;
14666 uint16_t nvm_data;
14667 uint16_t off;
14668 bool check_version = false;
14669 bool check_optionrom = false;
14670 bool have_build = false;
14671 bool have_uid = true;
14672
14673 /*
14674 * Version format:
14675 *
14676 * XYYZ
14677 * X0YZ
14678 * X0YY
14679 *
14680 * Example:
14681 *
14682 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
14683 * 82571 0x50a6 5.10.6?
14684 * 82572 0x506a 5.6.10?
14685 * 82572EI 0x5069 5.6.9?
14686 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
14687 * 0x2013 2.1.3?
14688 * 82583 0x10a0 1.10.0? (document says it's default value)
14689 * ICH8+82567 0x0040 0.4.0?
14690 * ICH9+82566 0x1040 1.4.0?
14691 *ICH10+82567 0x0043 0.4.3?
14692 * PCH+82577 0x00c1 0.12.1?
14693 * PCH2+82579 0x00d3 0.13.3?
14694 * 0x00d4 0.13.4?
14695 * LPT+I218 0x0023 0.2.3?
14696 * SPT+I219 0x0084 0.8.4?
14697 * CNP+I219 0x0054 0.5.4?
14698 */
14699
14700 /*
14701 * XXX
14702 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14703 * I've never seen real 82574 hardware with such small SPI ROM.
14704 */
14705 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14706 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14707 have_uid = false;
14708
14709 switch (sc->sc_type) {
14710 case WM_T_82571:
14711 case WM_T_82572:
14712 case WM_T_82574:
14713 case WM_T_82583:
14714 check_version = true;
14715 check_optionrom = true;
14716 have_build = true;
14717 break;
14718 case WM_T_ICH8:
14719 case WM_T_ICH9:
14720 case WM_T_ICH10:
14721 case WM_T_PCH:
14722 case WM_T_PCH2:
14723 case WM_T_PCH_LPT:
14724 case WM_T_PCH_SPT:
14725 case WM_T_PCH_CNP:
14726 check_version = true;
14727 have_build = true;
14728 have_uid = false;
14729 break;
14730 case WM_T_82575:
14731 case WM_T_82576:
14732 case WM_T_82580:
14733 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14734 check_version = true;
14735 break;
14736 case WM_T_I211:
14737 wm_nvm_version_invm(sc);
14738 have_uid = false;
14739 goto printver;
14740 case WM_T_I210:
14741 if (!wm_nvm_flash_presence_i210(sc)) {
14742 wm_nvm_version_invm(sc);
14743 have_uid = false;
14744 goto printver;
14745 }
14746 /* FALLTHROUGH */
14747 case WM_T_I350:
14748 case WM_T_I354:
14749 check_version = true;
14750 check_optionrom = true;
14751 break;
14752 default:
14753 return;
14754 }
14755 if (check_version
14756 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14757 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14758 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14759 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14760 build = nvm_data & NVM_BUILD_MASK;
14761 have_build = true;
14762 } else
14763 minor = nvm_data & 0x00ff;
14764
14765 /* Decimal */
14766 minor = (minor / 16) * 10 + (minor % 16);
14767 sc->sc_nvm_ver_major = major;
14768 sc->sc_nvm_ver_minor = minor;
14769
14770 printver:
14771 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14772 sc->sc_nvm_ver_minor);
14773 if (have_build) {
14774 sc->sc_nvm_ver_build = build;
14775 aprint_verbose(".%d", build);
14776 }
14777 }
14778
14779 /* Assume the Option ROM area is at avove NVM_SIZE */
14780 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14781 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14782 /* Option ROM Version */
14783 if ((off != 0x0000) && (off != 0xffff)) {
14784 int rv;
14785
14786 off += NVM_COMBO_VER_OFF;
14787 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14788 rv |= wm_nvm_read(sc, off, 1, &uid0);
14789 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14790 && (uid1 != 0) && (uid1 != 0xffff)) {
14791 /* 16bits */
14792 major = uid0 >> 8;
14793 build = (uid0 << 8) | (uid1 >> 8);
14794 patch = uid1 & 0x00ff;
14795 aprint_verbose(", option ROM Version %d.%d.%d",
14796 major, build, patch);
14797 }
14798 }
14799 }
14800
14801 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14802 aprint_verbose(", Image Unique ID %08x",
14803 ((uint32_t)uid1 << 16) | uid0);
14804 }
14805
14806 /*
14807 * wm_nvm_read:
14808 *
14809 * Read data from the serial EEPROM.
14810 */
14811 static int
14812 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14813 {
14814 int rv;
14815
14816 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14817 device_xname(sc->sc_dev), __func__));
14818
14819 if (sc->sc_flags & WM_F_EEPROM_INVALID)
14820 return -1;
14821
14822 rv = sc->nvm.read(sc, word, wordcnt, data);
14823
14824 return rv;
14825 }
14826
14827 /*
14828 * Hardware semaphores.
14829 * Very complexed...
14830 */
14831
14832 static int
14833 wm_get_null(struct wm_softc *sc)
14834 {
14835
14836 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14837 device_xname(sc->sc_dev), __func__));
14838 return 0;
14839 }
14840
14841 static void
14842 wm_put_null(struct wm_softc *sc)
14843 {
14844
14845 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14846 device_xname(sc->sc_dev), __func__));
14847 return;
14848 }
14849
14850 static int
14851 wm_get_eecd(struct wm_softc *sc)
14852 {
14853 uint32_t reg;
14854 int x;
14855
14856 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14857 device_xname(sc->sc_dev), __func__));
14858
14859 reg = CSR_READ(sc, WMREG_EECD);
14860
14861 /* Request EEPROM access. */
14862 reg |= EECD_EE_REQ;
14863 CSR_WRITE(sc, WMREG_EECD, reg);
14864
14865 /* ..and wait for it to be granted. */
14866 for (x = 0; x < 1000; x++) {
14867 reg = CSR_READ(sc, WMREG_EECD);
14868 if (reg & EECD_EE_GNT)
14869 break;
14870 delay(5);
14871 }
14872 if ((reg & EECD_EE_GNT) == 0) {
14873 aprint_error_dev(sc->sc_dev,
14874 "could not acquire EEPROM GNT\n");
14875 reg &= ~EECD_EE_REQ;
14876 CSR_WRITE(sc, WMREG_EECD, reg);
14877 return -1;
14878 }
14879
14880 return 0;
14881 }
14882
14883 static void
14884 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14885 {
14886
14887 *eecd |= EECD_SK;
14888 CSR_WRITE(sc, WMREG_EECD, *eecd);
14889 CSR_WRITE_FLUSH(sc);
14890 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14891 delay(1);
14892 else
14893 delay(50);
14894 }
14895
14896 static void
14897 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14898 {
14899
14900 *eecd &= ~EECD_SK;
14901 CSR_WRITE(sc, WMREG_EECD, *eecd);
14902 CSR_WRITE_FLUSH(sc);
14903 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14904 delay(1);
14905 else
14906 delay(50);
14907 }
14908
14909 static void
14910 wm_put_eecd(struct wm_softc *sc)
14911 {
14912 uint32_t reg;
14913
14914 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14915 device_xname(sc->sc_dev), __func__));
14916
14917 /* Stop nvm */
14918 reg = CSR_READ(sc, WMREG_EECD);
14919 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14920 /* Pull CS high */
14921 reg |= EECD_CS;
14922 wm_nvm_eec_clock_lower(sc, ®);
14923 } else {
14924 /* CS on Microwire is active-high */
14925 reg &= ~(EECD_CS | EECD_DI);
14926 CSR_WRITE(sc, WMREG_EECD, reg);
14927 wm_nvm_eec_clock_raise(sc, ®);
14928 wm_nvm_eec_clock_lower(sc, ®);
14929 }
14930
14931 reg = CSR_READ(sc, WMREG_EECD);
14932 reg &= ~EECD_EE_REQ;
14933 CSR_WRITE(sc, WMREG_EECD, reg);
14934
14935 return;
14936 }
14937
14938 /*
14939 * Get hardware semaphore.
14940 * Same as e1000_get_hw_semaphore_generic()
14941 */
14942 static int
14943 wm_get_swsm_semaphore(struct wm_softc *sc)
14944 {
14945 int32_t timeout;
14946 uint32_t swsm;
14947
14948 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14949 device_xname(sc->sc_dev), __func__));
14950 KASSERT(sc->sc_nvm_wordsize > 0);
14951
14952 retry:
14953 /* Get the SW semaphore. */
14954 timeout = sc->sc_nvm_wordsize + 1;
14955 while (timeout) {
14956 swsm = CSR_READ(sc, WMREG_SWSM);
14957
14958 if ((swsm & SWSM_SMBI) == 0)
14959 break;
14960
14961 delay(50);
14962 timeout--;
14963 }
14964
14965 if (timeout == 0) {
14966 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14967 /*
14968 * In rare circumstances, the SW semaphore may already
14969 * be held unintentionally. Clear the semaphore once
14970 * before giving up.
14971 */
14972 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14973 wm_put_swsm_semaphore(sc);
14974 goto retry;
14975 }
14976 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
14977 return -1;
14978 }
14979
14980 /* Get the FW semaphore. */
14981 timeout = sc->sc_nvm_wordsize + 1;
14982 while (timeout) {
14983 swsm = CSR_READ(sc, WMREG_SWSM);
14984 swsm |= SWSM_SWESMBI;
14985 CSR_WRITE(sc, WMREG_SWSM, swsm);
14986 /* If we managed to set the bit we got the semaphore. */
14987 swsm = CSR_READ(sc, WMREG_SWSM);
14988 if (swsm & SWSM_SWESMBI)
14989 break;
14990
14991 delay(50);
14992 timeout--;
14993 }
14994
14995 if (timeout == 0) {
14996 aprint_error_dev(sc->sc_dev,
14997 "could not acquire SWSM SWESMBI\n");
14998 /* Release semaphores */
14999 wm_put_swsm_semaphore(sc);
15000 return -1;
15001 }
15002 return 0;
15003 }
15004
15005 /*
15006 * Put hardware semaphore.
15007 * Same as e1000_put_hw_semaphore_generic()
15008 */
15009 static void
15010 wm_put_swsm_semaphore(struct wm_softc *sc)
15011 {
15012 uint32_t swsm;
15013
15014 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15015 device_xname(sc->sc_dev), __func__));
15016
15017 swsm = CSR_READ(sc, WMREG_SWSM);
15018 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15019 CSR_WRITE(sc, WMREG_SWSM, swsm);
15020 }
15021
15022 /*
15023 * Get SW/FW semaphore.
15024 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15025 */
15026 static int
15027 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15028 {
15029 uint32_t swfw_sync;
15030 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15031 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15032 int timeout;
15033
15034 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15035 device_xname(sc->sc_dev), __func__));
15036
15037 if (sc->sc_type == WM_T_80003)
15038 timeout = 50;
15039 else
15040 timeout = 200;
15041
15042 while (timeout) {
15043 if (wm_get_swsm_semaphore(sc)) {
15044 aprint_error_dev(sc->sc_dev,
15045 "%s: failed to get semaphore\n",
15046 __func__);
15047 return -1;
15048 }
15049 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15050 if ((swfw_sync & (swmask | fwmask)) == 0) {
15051 swfw_sync |= swmask;
15052 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15053 wm_put_swsm_semaphore(sc);
15054 return 0;
15055 }
15056 wm_put_swsm_semaphore(sc);
15057 delay(5000);
15058 timeout--;
15059 }
15060 device_printf(sc->sc_dev,
15061 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15062 mask, swfw_sync);
15063 return -1;
15064 }
15065
15066 static void
15067 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15068 {
15069 uint32_t swfw_sync;
15070
15071 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15072 device_xname(sc->sc_dev), __func__));
15073
15074 while (wm_get_swsm_semaphore(sc) != 0)
15075 continue;
15076
15077 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15078 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15079 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15080
15081 wm_put_swsm_semaphore(sc);
15082 }
15083
15084 static int
15085 wm_get_nvm_80003(struct wm_softc *sc)
15086 {
15087 int rv;
15088
15089 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15090 device_xname(sc->sc_dev), __func__));
15091
15092 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15093 aprint_error_dev(sc->sc_dev,
15094 "%s: failed to get semaphore(SWFW)\n", __func__);
15095 return rv;
15096 }
15097
15098 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15099 && (rv = wm_get_eecd(sc)) != 0) {
15100 aprint_error_dev(sc->sc_dev,
15101 "%s: failed to get semaphore(EECD)\n", __func__);
15102 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15103 return rv;
15104 }
15105
15106 return 0;
15107 }
15108
15109 static void
15110 wm_put_nvm_80003(struct wm_softc *sc)
15111 {
15112
15113 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15114 device_xname(sc->sc_dev), __func__));
15115
15116 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15117 wm_put_eecd(sc);
15118 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15119 }
15120
15121 static int
15122 wm_get_nvm_82571(struct wm_softc *sc)
15123 {
15124 int rv;
15125
15126 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15127 device_xname(sc->sc_dev), __func__));
15128
15129 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15130 return rv;
15131
15132 switch (sc->sc_type) {
15133 case WM_T_82573:
15134 break;
15135 default:
15136 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15137 rv = wm_get_eecd(sc);
15138 break;
15139 }
15140
15141 if (rv != 0) {
15142 aprint_error_dev(sc->sc_dev,
15143 "%s: failed to get semaphore\n",
15144 __func__);
15145 wm_put_swsm_semaphore(sc);
15146 }
15147
15148 return rv;
15149 }
15150
15151 static void
15152 wm_put_nvm_82571(struct wm_softc *sc)
15153 {
15154
15155 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15156 device_xname(sc->sc_dev), __func__));
15157
15158 switch (sc->sc_type) {
15159 case WM_T_82573:
15160 break;
15161 default:
15162 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15163 wm_put_eecd(sc);
15164 break;
15165 }
15166
15167 wm_put_swsm_semaphore(sc);
15168 }
15169
15170 static int
15171 wm_get_phy_82575(struct wm_softc *sc)
15172 {
15173
15174 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15175 device_xname(sc->sc_dev), __func__));
15176 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15177 }
15178
15179 static void
15180 wm_put_phy_82575(struct wm_softc *sc)
15181 {
15182
15183 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15184 device_xname(sc->sc_dev), __func__));
15185 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15186 }
15187
15188 static int
15189 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15190 {
15191 uint32_t ext_ctrl;
15192 int timeout = 200;
15193
15194 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15195 device_xname(sc->sc_dev), __func__));
15196
15197 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15198 for (timeout = 0; timeout < 200; timeout++) {
15199 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15200 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15201 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15202
15203 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15204 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15205 return 0;
15206 delay(5000);
15207 }
15208 device_printf(sc->sc_dev,
15209 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15210 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15211 return -1;
15212 }
15213
15214 static void
15215 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15216 {
15217 uint32_t ext_ctrl;
15218
15219 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15220 device_xname(sc->sc_dev), __func__));
15221
15222 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15223 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15224 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15225
15226 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15227 }
15228
15229 static int
15230 wm_get_swflag_ich8lan(struct wm_softc *sc)
15231 {
15232 uint32_t ext_ctrl;
15233 int timeout;
15234
15235 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15236 device_xname(sc->sc_dev), __func__));
15237 mutex_enter(sc->sc_ich_phymtx);
15238 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15239 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15240 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15241 break;
15242 delay(1000);
15243 }
15244 if (timeout >= WM_PHY_CFG_TIMEOUT) {
15245 device_printf(sc->sc_dev,
15246 "SW has already locked the resource\n");
15247 goto out;
15248 }
15249
15250 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15251 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15252 for (timeout = 0; timeout < 1000; timeout++) {
15253 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15254 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15255 break;
15256 delay(1000);
15257 }
15258 if (timeout >= 1000) {
15259 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15260 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15261 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15262 goto out;
15263 }
15264 return 0;
15265
15266 out:
15267 mutex_exit(sc->sc_ich_phymtx);
15268 return -1;
15269 }
15270
15271 static void
15272 wm_put_swflag_ich8lan(struct wm_softc *sc)
15273 {
15274 uint32_t ext_ctrl;
15275
15276 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15277 device_xname(sc->sc_dev), __func__));
15278 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15279 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15280 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15281 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15282 } else
15283 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15284
15285 mutex_exit(sc->sc_ich_phymtx);
15286 }
15287
15288 static int
15289 wm_get_nvm_ich8lan(struct wm_softc *sc)
15290 {
15291
15292 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15293 device_xname(sc->sc_dev), __func__));
15294 mutex_enter(sc->sc_ich_nvmmtx);
15295
15296 return 0;
15297 }
15298
15299 static void
15300 wm_put_nvm_ich8lan(struct wm_softc *sc)
15301 {
15302
15303 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15304 device_xname(sc->sc_dev), __func__));
15305 mutex_exit(sc->sc_ich_nvmmtx);
15306 }
15307
15308 static int
15309 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15310 {
15311 int i = 0;
15312 uint32_t reg;
15313
15314 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15315 device_xname(sc->sc_dev), __func__));
15316
15317 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15318 do {
15319 CSR_WRITE(sc, WMREG_EXTCNFCTR,
15320 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15321 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15322 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15323 break;
15324 delay(2*1000);
15325 i++;
15326 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15327
15328 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15329 wm_put_hw_semaphore_82573(sc);
15330 log(LOG_ERR, "%s: Driver can't access the PHY\n",
15331 device_xname(sc->sc_dev));
15332 return -1;
15333 }
15334
15335 return 0;
15336 }
15337
15338 static void
15339 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15340 {
15341 uint32_t reg;
15342
15343 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15344 device_xname(sc->sc_dev), __func__));
15345
15346 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15347 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15348 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15349 }
15350
15351 /*
15352 * Management mode and power management related subroutines.
15353 * BMC, AMT, suspend/resume and EEE.
15354 */
15355
15356 #ifdef WM_WOL
15357 static int
15358 wm_check_mng_mode(struct wm_softc *sc)
15359 {
15360 int rv;
15361
15362 switch (sc->sc_type) {
15363 case WM_T_ICH8:
15364 case WM_T_ICH9:
15365 case WM_T_ICH10:
15366 case WM_T_PCH:
15367 case WM_T_PCH2:
15368 case WM_T_PCH_LPT:
15369 case WM_T_PCH_SPT:
15370 case WM_T_PCH_CNP:
15371 rv = wm_check_mng_mode_ich8lan(sc);
15372 break;
15373 case WM_T_82574:
15374 case WM_T_82583:
15375 rv = wm_check_mng_mode_82574(sc);
15376 break;
15377 case WM_T_82571:
15378 case WM_T_82572:
15379 case WM_T_82573:
15380 case WM_T_80003:
15381 rv = wm_check_mng_mode_generic(sc);
15382 break;
15383 default:
15384 /* Noting to do */
15385 rv = 0;
15386 break;
15387 }
15388
15389 return rv;
15390 }
15391
15392 static int
15393 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15394 {
15395 uint32_t fwsm;
15396
15397 fwsm = CSR_READ(sc, WMREG_FWSM);
15398
15399 if (((fwsm & FWSM_FW_VALID) != 0)
15400 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15401 return 1;
15402
15403 return 0;
15404 }
15405
15406 static int
15407 wm_check_mng_mode_82574(struct wm_softc *sc)
15408 {
15409 uint16_t data;
15410
15411 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15412
15413 if ((data & NVM_CFG2_MNGM_MASK) != 0)
15414 return 1;
15415
15416 return 0;
15417 }
15418
15419 static int
15420 wm_check_mng_mode_generic(struct wm_softc *sc)
15421 {
15422 uint32_t fwsm;
15423
15424 fwsm = CSR_READ(sc, WMREG_FWSM);
15425
15426 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15427 return 1;
15428
15429 return 0;
15430 }
15431 #endif /* WM_WOL */
15432
15433 static int
15434 wm_enable_mng_pass_thru(struct wm_softc *sc)
15435 {
15436 uint32_t manc, fwsm, factps;
15437
15438 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15439 return 0;
15440
15441 manc = CSR_READ(sc, WMREG_MANC);
15442
15443 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15444 device_xname(sc->sc_dev), manc));
15445 if ((manc & MANC_RECV_TCO_EN) == 0)
15446 return 0;
15447
15448 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15449 fwsm = CSR_READ(sc, WMREG_FWSM);
15450 factps = CSR_READ(sc, WMREG_FACTPS);
15451 if (((factps & FACTPS_MNGCG) == 0)
15452 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15453 return 1;
15454 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15455 uint16_t data;
15456
15457 factps = CSR_READ(sc, WMREG_FACTPS);
15458 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15459 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15460 device_xname(sc->sc_dev), factps, data));
15461 if (((factps & FACTPS_MNGCG) == 0)
15462 && ((data & NVM_CFG2_MNGM_MASK)
15463 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15464 return 1;
15465 } else if (((manc & MANC_SMBUS_EN) != 0)
15466 && ((manc & MANC_ASF_EN) == 0))
15467 return 1;
15468
15469 return 0;
15470 }
15471
15472 static bool
15473 wm_phy_resetisblocked(struct wm_softc *sc)
15474 {
15475 bool blocked = false;
15476 uint32_t reg;
15477 int i = 0;
15478
15479 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15480 device_xname(sc->sc_dev), __func__));
15481
15482 switch (sc->sc_type) {
15483 case WM_T_ICH8:
15484 case WM_T_ICH9:
15485 case WM_T_ICH10:
15486 case WM_T_PCH:
15487 case WM_T_PCH2:
15488 case WM_T_PCH_LPT:
15489 case WM_T_PCH_SPT:
15490 case WM_T_PCH_CNP:
15491 do {
15492 reg = CSR_READ(sc, WMREG_FWSM);
15493 if ((reg & FWSM_RSPCIPHY) == 0) {
15494 blocked = true;
15495 delay(10*1000);
15496 continue;
15497 }
15498 blocked = false;
15499 } while (blocked && (i++ < 30));
15500 return blocked;
15501 break;
15502 case WM_T_82571:
15503 case WM_T_82572:
15504 case WM_T_82573:
15505 case WM_T_82574:
15506 case WM_T_82583:
15507 case WM_T_80003:
15508 reg = CSR_READ(sc, WMREG_MANC);
15509 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15510 return true;
15511 else
15512 return false;
15513 break;
15514 default:
15515 /* No problem */
15516 break;
15517 }
15518
15519 return false;
15520 }
15521
15522 static void
15523 wm_get_hw_control(struct wm_softc *sc)
15524 {
15525 uint32_t reg;
15526
15527 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15528 device_xname(sc->sc_dev), __func__));
15529
15530 if (sc->sc_type == WM_T_82573) {
15531 reg = CSR_READ(sc, WMREG_SWSM);
15532 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15533 } else if (sc->sc_type >= WM_T_82571) {
15534 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15535 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15536 }
15537 }
15538
15539 static void
15540 wm_release_hw_control(struct wm_softc *sc)
15541 {
15542 uint32_t reg;
15543
15544 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15545 device_xname(sc->sc_dev), __func__));
15546
15547 if (sc->sc_type == WM_T_82573) {
15548 reg = CSR_READ(sc, WMREG_SWSM);
15549 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15550 } else if (sc->sc_type >= WM_T_82571) {
15551 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15552 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15553 }
15554 }
15555
15556 static void
15557 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15558 {
15559 uint32_t reg;
15560
15561 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15562 device_xname(sc->sc_dev), __func__));
15563
15564 if (sc->sc_type < WM_T_PCH2)
15565 return;
15566
15567 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15568
15569 if (gate)
15570 reg |= EXTCNFCTR_GATE_PHY_CFG;
15571 else
15572 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15573
15574 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15575 }
15576
15577 static int
15578 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15579 {
15580 uint32_t fwsm, reg;
15581 int rv;
15582
15583 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15584 device_xname(sc->sc_dev), __func__));
15585
15586 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
15587 wm_gate_hw_phy_config_ich8lan(sc, true);
15588
15589 /* Disable ULP */
15590 wm_ulp_disable(sc);
15591
15592 /* Acquire PHY semaphore */
15593 rv = sc->phy.acquire(sc);
15594 if (rv != 0) {
15595 DPRINTF(sc, WM_DEBUG_INIT,
15596 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15597 return rv;
15598 }
15599
15600 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
15601 * inaccessible and resetting the PHY is not blocked, toggle the
15602 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15603 */
15604 fwsm = CSR_READ(sc, WMREG_FWSM);
15605 switch (sc->sc_type) {
15606 case WM_T_PCH_LPT:
15607 case WM_T_PCH_SPT:
15608 case WM_T_PCH_CNP:
15609 if (wm_phy_is_accessible_pchlan(sc))
15610 break;
15611
15612 /* Before toggling LANPHYPC, see if PHY is accessible by
15613 * forcing MAC to SMBus mode first.
15614 */
15615 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15616 reg |= CTRL_EXT_FORCE_SMBUS;
15617 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15618 #if 0
15619 /* XXX Isn't this required??? */
15620 CSR_WRITE_FLUSH(sc);
15621 #endif
15622 /* Wait 50 milliseconds for MAC to finish any retries
15623 * that it might be trying to perform from previous
15624 * attempts to acknowledge any phy read requests.
15625 */
15626 delay(50 * 1000);
15627 /* FALLTHROUGH */
15628 case WM_T_PCH2:
15629 if (wm_phy_is_accessible_pchlan(sc) == true)
15630 break;
15631 /* FALLTHROUGH */
15632 case WM_T_PCH:
15633 if (sc->sc_type == WM_T_PCH)
15634 if ((fwsm & FWSM_FW_VALID) != 0)
15635 break;
15636
15637 if (wm_phy_resetisblocked(sc) == true) {
15638 device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
15639 break;
15640 }
15641
15642 /* Toggle LANPHYPC Value bit */
15643 wm_toggle_lanphypc_pch_lpt(sc);
15644
15645 if (sc->sc_type >= WM_T_PCH_LPT) {
15646 if (wm_phy_is_accessible_pchlan(sc) == true)
15647 break;
15648
15649 /* Toggling LANPHYPC brings the PHY out of SMBus mode
15650 * so ensure that the MAC is also out of SMBus mode
15651 */
15652 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15653 reg &= ~CTRL_EXT_FORCE_SMBUS;
15654 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15655
15656 if (wm_phy_is_accessible_pchlan(sc) == true)
15657 break;
15658 rv = -1;
15659 }
15660 break;
15661 default:
15662 break;
15663 }
15664
15665 /* Release semaphore */
15666 sc->phy.release(sc);
15667
15668 if (rv == 0) {
15669 /* Check to see if able to reset PHY. Print error if not */
15670 if (wm_phy_resetisblocked(sc)) {
15671 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15672 goto out;
15673 }
15674
15675 /* Reset the PHY before any access to it. Doing so, ensures
15676 * that the PHY is in a known good state before we read/write
15677 * PHY registers. The generic reset is sufficient here,
15678 * because we haven't determined the PHY type yet.
15679 */
15680 if (wm_reset_phy(sc) != 0)
15681 goto out;
15682
15683 /* On a successful reset, possibly need to wait for the PHY
15684 * to quiesce to an accessible state before returning control
15685 * to the calling function. If the PHY does not quiesce, then
15686 * return E1000E_BLK_PHY_RESET, as this is the condition that
15687 * the PHY is in.
15688 */
15689 if (wm_phy_resetisblocked(sc))
15690 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15691 }
15692
15693 out:
15694 /* Ungate automatic PHY configuration on non-managed 82579 */
15695 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15696 delay(10*1000);
15697 wm_gate_hw_phy_config_ich8lan(sc, false);
15698 }
15699
15700 return 0;
15701 }
15702
15703 static void
15704 wm_init_manageability(struct wm_softc *sc)
15705 {
15706
15707 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15708 device_xname(sc->sc_dev), __func__));
15709 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
15710
15711 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15712 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15713 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15714
15715 /* Disable hardware interception of ARP */
15716 manc &= ~MANC_ARP_EN;
15717
15718 /* Enable receiving management packets to the host */
15719 if (sc->sc_type >= WM_T_82571) {
15720 manc |= MANC_EN_MNG2HOST;
15721 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15722 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15723 }
15724
15725 CSR_WRITE(sc, WMREG_MANC, manc);
15726 }
15727 }
15728
15729 static void
15730 wm_release_manageability(struct wm_softc *sc)
15731 {
15732
15733 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15734 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15735
15736 manc |= MANC_ARP_EN;
15737 if (sc->sc_type >= WM_T_82571)
15738 manc &= ~MANC_EN_MNG2HOST;
15739
15740 CSR_WRITE(sc, WMREG_MANC, manc);
15741 }
15742 }
15743
15744 static void
15745 wm_get_wakeup(struct wm_softc *sc)
15746 {
15747
15748 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15749 switch (sc->sc_type) {
15750 case WM_T_82573:
15751 case WM_T_82583:
15752 sc->sc_flags |= WM_F_HAS_AMT;
15753 /* FALLTHROUGH */
15754 case WM_T_80003:
15755 case WM_T_82575:
15756 case WM_T_82576:
15757 case WM_T_82580:
15758 case WM_T_I350:
15759 case WM_T_I354:
15760 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15761 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15762 /* FALLTHROUGH */
15763 case WM_T_82541:
15764 case WM_T_82541_2:
15765 case WM_T_82547:
15766 case WM_T_82547_2:
15767 case WM_T_82571:
15768 case WM_T_82572:
15769 case WM_T_82574:
15770 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15771 break;
15772 case WM_T_ICH8:
15773 case WM_T_ICH9:
15774 case WM_T_ICH10:
15775 case WM_T_PCH:
15776 case WM_T_PCH2:
15777 case WM_T_PCH_LPT:
15778 case WM_T_PCH_SPT:
15779 case WM_T_PCH_CNP:
15780 sc->sc_flags |= WM_F_HAS_AMT;
15781 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15782 break;
15783 default:
15784 break;
15785 }
15786
15787 /* 1: HAS_MANAGE */
15788 if (wm_enable_mng_pass_thru(sc) != 0)
15789 sc->sc_flags |= WM_F_HAS_MANAGE;
15790
15791 /*
15792 * Note that the WOL flags is set after the resetting of the eeprom
15793 * stuff
15794 */
15795 }
15796
15797 /*
15798 * Unconfigure Ultra Low Power mode.
15799 * Only for I217 and newer (see below).
15800 */
15801 static int
15802 wm_ulp_disable(struct wm_softc *sc)
15803 {
15804 uint32_t reg;
15805 uint16_t phyreg;
15806 int i = 0, rv;
15807
15808 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15809 device_xname(sc->sc_dev), __func__));
15810 /* Exclude old devices */
15811 if ((sc->sc_type < WM_T_PCH_LPT)
15812 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15813 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15814 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15815 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15816 return 0;
15817
15818 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15819 /* Request ME un-configure ULP mode in the PHY */
15820 reg = CSR_READ(sc, WMREG_H2ME);
15821 reg &= ~H2ME_ULP;
15822 reg |= H2ME_ENFORCE_SETTINGS;
15823 CSR_WRITE(sc, WMREG_H2ME, reg);
15824
15825 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15826 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15827 if (i++ == 30) {
15828 device_printf(sc->sc_dev, "%s timed out\n",
15829 __func__);
15830 return -1;
15831 }
15832 delay(10 * 1000);
15833 }
15834 reg = CSR_READ(sc, WMREG_H2ME);
15835 reg &= ~H2ME_ENFORCE_SETTINGS;
15836 CSR_WRITE(sc, WMREG_H2ME, reg);
15837
15838 return 0;
15839 }
15840
15841 /* Acquire semaphore */
15842 rv = sc->phy.acquire(sc);
15843 if (rv != 0) {
15844 DPRINTF(sc, WM_DEBUG_INIT,
15845 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15846 return rv;
15847 }
15848
15849 /* Toggle LANPHYPC */
15850 wm_toggle_lanphypc_pch_lpt(sc);
15851
15852 /* Unforce SMBus mode in PHY */
15853 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15854 if (rv != 0) {
15855 uint32_t reg2;
15856
15857 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15858 __func__);
15859 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15860 reg2 |= CTRL_EXT_FORCE_SMBUS;
15861 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15862 delay(50 * 1000);
15863
15864 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15865 &phyreg);
15866 if (rv != 0)
15867 goto release;
15868 }
15869 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15870 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15871
15872 /* Unforce SMBus mode in MAC */
15873 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15874 reg &= ~CTRL_EXT_FORCE_SMBUS;
15875 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15876
15877 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15878 if (rv != 0)
15879 goto release;
15880 phyreg |= HV_PM_CTRL_K1_ENA;
15881 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15882
15883 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15884 &phyreg);
15885 if (rv != 0)
15886 goto release;
15887 phyreg &= ~(I218_ULP_CONFIG1_IND
15888 | I218_ULP_CONFIG1_STICKY_ULP
15889 | I218_ULP_CONFIG1_RESET_TO_SMBUS
15890 | I218_ULP_CONFIG1_WOL_HOST
15891 | I218_ULP_CONFIG1_INBAND_EXIT
15892 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15893 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15894 | I218_ULP_CONFIG1_DIS_SMB_PERST);
15895 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15896 phyreg |= I218_ULP_CONFIG1_START;
15897 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15898
15899 reg = CSR_READ(sc, WMREG_FEXTNVM7);
15900 reg &= ~FEXTNVM7_DIS_SMB_PERST;
15901 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15902
15903 release:
15904 /* Release semaphore */
15905 sc->phy.release(sc);
15906 wm_gmii_reset(sc);
15907 delay(50 * 1000);
15908
15909 return rv;
15910 }
15911
15912 /* WOL in the newer chipset interfaces (pchlan) */
15913 static int
15914 wm_enable_phy_wakeup(struct wm_softc *sc)
15915 {
15916 device_t dev = sc->sc_dev;
15917 uint32_t mreg, moff;
15918 uint16_t wuce, wuc, wufc, preg;
15919 int i, rv;
15920
15921 KASSERT(sc->sc_type >= WM_T_PCH);
15922
15923 /* Copy MAC RARs to PHY RARs */
15924 wm_copy_rx_addrs_to_phy_ich8lan(sc);
15925
15926 /* Activate PHY wakeup */
15927 rv = sc->phy.acquire(sc);
15928 if (rv != 0) {
15929 device_printf(dev, "%s: failed to acquire semaphore\n",
15930 __func__);
15931 return rv;
15932 }
15933
15934 /*
15935 * Enable access to PHY wakeup registers.
15936 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15937 */
15938 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15939 if (rv != 0) {
15940 device_printf(dev,
15941 "%s: Could not enable PHY wakeup reg access\n", __func__);
15942 goto release;
15943 }
15944
15945 /* Copy MAC MTA to PHY MTA */
15946 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15947 uint16_t lo, hi;
15948
15949 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15950 lo = (uint16_t)(mreg & 0xffff);
15951 hi = (uint16_t)((mreg >> 16) & 0xffff);
15952 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15953 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15954 }
15955
15956 /* Configure PHY Rx Control register */
15957 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15958 mreg = CSR_READ(sc, WMREG_RCTL);
15959 if (mreg & RCTL_UPE)
15960 preg |= BM_RCTL_UPE;
15961 if (mreg & RCTL_MPE)
15962 preg |= BM_RCTL_MPE;
15963 preg &= ~(BM_RCTL_MO_MASK);
15964 moff = __SHIFTOUT(mreg, RCTL_MO);
15965 if (moff != 0)
15966 preg |= moff << BM_RCTL_MO_SHIFT;
15967 if (mreg & RCTL_BAM)
15968 preg |= BM_RCTL_BAM;
15969 if (mreg & RCTL_PMCF)
15970 preg |= BM_RCTL_PMCF;
15971 mreg = CSR_READ(sc, WMREG_CTRL);
15972 if (mreg & CTRL_RFCE)
15973 preg |= BM_RCTL_RFCE;
15974 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15975
15976 wuc = WUC_APME | WUC_PME_EN;
15977 wufc = WUFC_MAG;
15978 /* Enable PHY wakeup in MAC register */
15979 CSR_WRITE(sc, WMREG_WUC,
15980 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15981 CSR_WRITE(sc, WMREG_WUFC, wufc);
15982
15983 /* Configure and enable PHY wakeup in PHY registers */
15984 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15985 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15986
15987 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15988 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15989
15990 release:
15991 sc->phy.release(sc);
15992
15993 return 0;
15994 }
15995
15996 /* Power down workaround on D3 */
15997 static void
15998 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15999 {
16000 uint32_t reg;
16001 uint16_t phyreg;
16002 int i;
16003
16004 for (i = 0; i < 2; i++) {
16005 /* Disable link */
16006 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16007 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16008 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16009
16010 /*
16011 * Call gig speed drop workaround on Gig disable before
16012 * accessing any PHY registers
16013 */
16014 if (sc->sc_type == WM_T_ICH8)
16015 wm_gig_downshift_workaround_ich8lan(sc);
16016
16017 /* Write VR power-down enable */
16018 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16019 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16020 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
16021 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
16022
16023 /* Read it back and test */
16024 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16025 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16026 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16027 break;
16028
16029 /* Issue PHY reset and repeat at most one more time */
16030 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16031 }
16032 }
16033
16034 /*
16035 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16036 * @sc: pointer to the HW structure
16037 *
16038 * During S0 to Sx transition, it is possible the link remains at gig
16039 * instead of negotiating to a lower speed. Before going to Sx, set
16040 * 'Gig Disable' to force link speed negotiation to a lower speed based on
16041 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
16042 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16043 * needs to be written.
16044 * Parts that support (and are linked to a partner which support) EEE in
16045 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16046 * than 10Mbps w/o EEE.
16047 */
16048 static void
16049 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16050 {
16051 device_t dev = sc->sc_dev;
16052 struct ethercom *ec = &sc->sc_ethercom;
16053 uint32_t phy_ctrl;
16054 int rv;
16055
16056 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16057 phy_ctrl |= PHY_CTRL_GBE_DIS;
16058
16059 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
16060
16061 if (sc->sc_phytype == WMPHY_I217) {
16062 uint16_t devid = sc->sc_pcidevid;
16063
16064 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16065 (devid == PCI_PRODUCT_INTEL_I218_V) ||
16066 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16067 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16068 (sc->sc_type >= WM_T_PCH_SPT))
16069 CSR_WRITE(sc, WMREG_FEXTNVM6,
16070 CSR_READ(sc, WMREG_FEXTNVM6)
16071 & ~FEXTNVM6_REQ_PLL_CLK);
16072
16073 if (sc->phy.acquire(sc) != 0)
16074 goto out;
16075
16076 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16077 uint16_t eee_advert;
16078
16079 rv = wm_read_emi_reg_locked(dev,
16080 I217_EEE_ADVERTISEMENT, &eee_advert);
16081 if (rv)
16082 goto release;
16083
16084 /*
16085 * Disable LPLU if both link partners support 100BaseT
16086 * EEE and 100Full is advertised on both ends of the
16087 * link, and enable Auto Enable LPI since there will
16088 * be no driver to enable LPI while in Sx.
16089 */
16090 if ((eee_advert & AN_EEEADVERT_100_TX) &&
16091 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16092 uint16_t anar, phy_reg;
16093
16094 sc->phy.readreg_locked(dev, 2, MII_ANAR,
16095 &anar);
16096 if (anar & ANAR_TX_FD) {
16097 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16098 PHY_CTRL_NOND0A_LPLU);
16099
16100 /* Set Auto Enable LPI after link up */
16101 sc->phy.readreg_locked(dev, 2,
16102 I217_LPI_GPIO_CTRL, &phy_reg);
16103 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16104 sc->phy.writereg_locked(dev, 2,
16105 I217_LPI_GPIO_CTRL, phy_reg);
16106 }
16107 }
16108 }
16109
16110 /*
16111 * For i217 Intel Rapid Start Technology support,
16112 * when the system is going into Sx and no manageability engine
16113 * is present, the driver must configure proxy to reset only on
16114 * power good. LPI (Low Power Idle) state must also reset only
16115 * on power good, as well as the MTA (Multicast table array).
16116 * The SMBus release must also be disabled on LCD reset.
16117 */
16118
16119 /*
16120 * Enable MTA to reset for Intel Rapid Start Technology
16121 * Support
16122 */
16123
16124 release:
16125 sc->phy.release(sc);
16126 }
16127 out:
16128 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16129
16130 if (sc->sc_type == WM_T_ICH8)
16131 wm_gig_downshift_workaround_ich8lan(sc);
16132
16133 if (sc->sc_type >= WM_T_PCH) {
16134 wm_oem_bits_config_ich8lan(sc, false);
16135
16136 /* Reset PHY to activate OEM bits on 82577/8 */
16137 if (sc->sc_type == WM_T_PCH)
16138 wm_reset_phy(sc);
16139
16140 if (sc->phy.acquire(sc) != 0)
16141 return;
16142 wm_write_smbus_addr(sc);
16143 sc->phy.release(sc);
16144 }
16145 }
16146
16147 /*
16148 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16149 * @sc: pointer to the HW structure
16150 *
16151 * During Sx to S0 transitions on non-managed devices or managed devices
16152 * on which PHY resets are not blocked, if the PHY registers cannot be
16153 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
16154 * the PHY.
16155 * On i217, setup Intel Rapid Start Technology.
16156 */
16157 static int
16158 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16159 {
16160 device_t dev = sc->sc_dev;
16161 int rv;
16162
16163 if (sc->sc_type < WM_T_PCH2)
16164 return 0;
16165
16166 rv = wm_init_phy_workarounds_pchlan(sc);
16167 if (rv != 0)
16168 return rv;
16169
16170 /* For i217 Intel Rapid Start Technology support when the system
16171 * is transitioning from Sx and no manageability engine is present
16172 * configure SMBus to restore on reset, disable proxy, and enable
16173 * the reset on MTA (Multicast table array).
16174 */
16175 if (sc->sc_phytype == WMPHY_I217) {
16176 uint16_t phy_reg;
16177
16178 rv = sc->phy.acquire(sc);
16179 if (rv != 0)
16180 return rv;
16181
16182 /* Clear Auto Enable LPI after link up */
16183 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16184 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16185 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16186
16187 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16188 /* Restore clear on SMB if no manageability engine
16189 * is present
16190 */
16191 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16192 &phy_reg);
16193 if (rv != 0)
16194 goto release;
16195 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16196 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16197
16198 /* Disable Proxy */
16199 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16200 }
16201 /* Enable reset on MTA */
16202 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16203 if (rv != 0)
16204 goto release;
16205 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16206 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16207
16208 release:
16209 sc->phy.release(sc);
16210 return rv;
16211 }
16212
16213 return 0;
16214 }
16215
16216 static void
16217 wm_enable_wakeup(struct wm_softc *sc)
16218 {
16219 uint32_t reg, pmreg;
16220 pcireg_t pmode;
16221 int rv = 0;
16222
16223 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16224 device_xname(sc->sc_dev), __func__));
16225
16226 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16227 &pmreg, NULL) == 0)
16228 return;
16229
16230 if ((sc->sc_flags & WM_F_WOL) == 0)
16231 goto pme;
16232
16233 /* Advertise the wakeup capability */
16234 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16235 | CTRL_SWDPIN(3));
16236
16237 /* Keep the laser running on fiber adapters */
16238 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16239 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16240 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16241 reg |= CTRL_EXT_SWDPIN(3);
16242 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16243 }
16244
16245 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16246 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16247 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16248 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
16249 wm_suspend_workarounds_ich8lan(sc);
16250
16251 #if 0 /* For the multicast packet */
16252 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16253 reg |= WUFC_MC;
16254 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16255 #endif
16256
16257 if (sc->sc_type >= WM_T_PCH) {
16258 rv = wm_enable_phy_wakeup(sc);
16259 if (rv != 0)
16260 goto pme;
16261 } else {
16262 /* Enable wakeup by the MAC */
16263 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16264 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16265 }
16266
16267 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16268 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16269 || (sc->sc_type == WM_T_PCH2))
16270 && (sc->sc_phytype == WMPHY_IGP_3))
16271 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16272
16273 pme:
16274 /* Request PME */
16275 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16276 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16277 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16278 /* For WOL */
16279 pmode |= PCI_PMCSR_PME_EN;
16280 } else {
16281 /* Disable WOL */
16282 pmode &= ~PCI_PMCSR_PME_EN;
16283 }
16284 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16285 }
16286
16287 /* Disable ASPM L0s and/or L1 for workaround */
16288 static void
16289 wm_disable_aspm(struct wm_softc *sc)
16290 {
16291 pcireg_t reg, mask = 0;
16292 unsigned const char *str = "";
16293
16294 /*
16295 * Only for PCIe device which has PCIe capability in the PCI config
16296 * space.
16297 */
16298 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16299 return;
16300
16301 switch (sc->sc_type) {
16302 case WM_T_82571:
16303 case WM_T_82572:
16304 /*
16305 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16306 * State Power management L1 State (ASPM L1).
16307 */
16308 mask = PCIE_LCSR_ASPM_L1;
16309 str = "L1 is";
16310 break;
16311 case WM_T_82573:
16312 case WM_T_82574:
16313 case WM_T_82583:
16314 /*
16315 * The 82573 disappears when PCIe ASPM L0s is enabled.
16316 *
16317 * The 82574 and 82583 does not support PCIe ASPM L0s with
16318 * some chipset. The document of 82574 and 82583 says that
16319 * disabling L0s with some specific chipset is sufficient,
16320 * but we follow as of the Intel em driver does.
16321 *
16322 * References:
16323 * Errata 8 of the Specification Update of i82573.
16324 * Errata 20 of the Specification Update of i82574.
16325 * Errata 9 of the Specification Update of i82583.
16326 */
16327 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16328 str = "L0s and L1 are";
16329 break;
16330 default:
16331 return;
16332 }
16333
16334 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16335 sc->sc_pcixe_capoff + PCIE_LCSR);
16336 reg &= ~mask;
16337 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16338 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16339
16340 /* Print only in wm_attach() */
16341 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16342 aprint_verbose_dev(sc->sc_dev,
16343 "ASPM %s disabled to workaround the errata.\n", str);
16344 }
16345
16346 /* LPLU */
16347
16348 static void
16349 wm_lplu_d0_disable(struct wm_softc *sc)
16350 {
16351 struct mii_data *mii = &sc->sc_mii;
16352 uint32_t reg;
16353 uint16_t phyval;
16354
16355 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16356 device_xname(sc->sc_dev), __func__));
16357
16358 if (sc->sc_phytype == WMPHY_IFE)
16359 return;
16360
16361 switch (sc->sc_type) {
16362 case WM_T_82571:
16363 case WM_T_82572:
16364 case WM_T_82573:
16365 case WM_T_82575:
16366 case WM_T_82576:
16367 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16368 phyval &= ~PMR_D0_LPLU;
16369 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16370 break;
16371 case WM_T_82580:
16372 case WM_T_I350:
16373 case WM_T_I210:
16374 case WM_T_I211:
16375 reg = CSR_READ(sc, WMREG_PHPM);
16376 reg &= ~PHPM_D0A_LPLU;
16377 CSR_WRITE(sc, WMREG_PHPM, reg);
16378 break;
16379 case WM_T_82574:
16380 case WM_T_82583:
16381 case WM_T_ICH8:
16382 case WM_T_ICH9:
16383 case WM_T_ICH10:
16384 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16385 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16386 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16387 CSR_WRITE_FLUSH(sc);
16388 break;
16389 case WM_T_PCH:
16390 case WM_T_PCH2:
16391 case WM_T_PCH_LPT:
16392 case WM_T_PCH_SPT:
16393 case WM_T_PCH_CNP:
16394 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16395 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16396 if (wm_phy_resetisblocked(sc) == false)
16397 phyval |= HV_OEM_BITS_ANEGNOW;
16398 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16399 break;
16400 default:
16401 break;
16402 }
16403 }
16404
16405 /* EEE */
16406
16407 static int
16408 wm_set_eee_i350(struct wm_softc *sc)
16409 {
16410 struct ethercom *ec = &sc->sc_ethercom;
16411 uint32_t ipcnfg, eeer;
16412 uint32_t ipcnfg_mask
16413 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
16414 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
16415
16416 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
16417
16418 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
16419 eeer = CSR_READ(sc, WMREG_EEER);
16420
16421 /* Enable or disable per user setting */
16422 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16423 ipcnfg |= ipcnfg_mask;
16424 eeer |= eeer_mask;
16425 } else {
16426 ipcnfg &= ~ipcnfg_mask;
16427 eeer &= ~eeer_mask;
16428 }
16429
16430 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
16431 CSR_WRITE(sc, WMREG_EEER, eeer);
16432 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
16433 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
16434
16435 return 0;
16436 }
16437
16438 static int
16439 wm_set_eee_pchlan(struct wm_softc *sc)
16440 {
16441 device_t dev = sc->sc_dev;
16442 struct ethercom *ec = &sc->sc_ethercom;
16443 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
16444 int rv;
16445
16446 switch (sc->sc_phytype) {
16447 case WMPHY_82579:
16448 lpa = I82579_EEE_LP_ABILITY;
16449 pcs_status = I82579_EEE_PCS_STATUS;
16450 adv_addr = I82579_EEE_ADVERTISEMENT;
16451 break;
16452 case WMPHY_I217:
16453 lpa = I217_EEE_LP_ABILITY;
16454 pcs_status = I217_EEE_PCS_STATUS;
16455 adv_addr = I217_EEE_ADVERTISEMENT;
16456 break;
16457 default:
16458 return 0;
16459 }
16460
16461 rv = sc->phy.acquire(sc);
16462 if (rv != 0) {
16463 device_printf(dev, "%s: failed to get semaphore\n", __func__);
16464 return rv;
16465 }
16466
16467 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16468 if (rv != 0)
16469 goto release;
16470
16471 /* Clear bits that enable EEE in various speeds */
16472 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
16473
16474 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16475 /* Save off link partner's EEE ability */
16476 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
16477 if (rv != 0)
16478 goto release;
16479
16480 /* Read EEE advertisement */
16481 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
16482 goto release;
16483
16484 /*
16485 * Enable EEE only for speeds in which the link partner is
16486 * EEE capable and for which we advertise EEE.
16487 */
16488 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16489 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16490 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16491 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16492 if ((data & ANLPAR_TX_FD) != 0)
16493 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16494 else {
16495 /*
16496 * EEE is not supported in 100Half, so ignore
16497 * partner's EEE in 100 ability if full-duplex
16498 * is not advertised.
16499 */
16500 sc->eee_lp_ability
16501 &= ~AN_EEEADVERT_100_TX;
16502 }
16503 }
16504 }
16505
16506 if (sc->sc_phytype == WMPHY_82579) {
16507 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16508 if (rv != 0)
16509 goto release;
16510
16511 data &= ~I82579_LPI_PLL_SHUT_100;
16512 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16513 }
16514
16515 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16516 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16517 goto release;
16518
16519 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16520 release:
16521 sc->phy.release(sc);
16522
16523 return rv;
16524 }
16525
16526 static int
16527 wm_set_eee(struct wm_softc *sc)
16528 {
16529 struct ethercom *ec = &sc->sc_ethercom;
16530
16531 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16532 return 0;
16533
16534 if (sc->sc_type == WM_T_I354) {
16535 /* I354 uses an external PHY */
16536 return 0; /* not yet */
16537 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16538 return wm_set_eee_i350(sc);
16539 else if (sc->sc_type >= WM_T_PCH2)
16540 return wm_set_eee_pchlan(sc);
16541
16542 return 0;
16543 }
16544
16545 /*
16546 * Workarounds (mainly PHY related).
16547 * Basically, PHY's workarounds are in the PHY drivers.
16548 */
16549
16550 /* Workaround for 82566 Kumeran PCS lock loss */
16551 static int
16552 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16553 {
16554 struct mii_data *mii = &sc->sc_mii;
16555 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16556 int i, reg, rv;
16557 uint16_t phyreg;
16558
16559 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16560 device_xname(sc->sc_dev), __func__));
16561
16562 /* If the link is not up, do nothing */
16563 if ((status & STATUS_LU) == 0)
16564 return 0;
16565
16566 /* Nothing to do if the link is other than 1Gbps */
16567 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16568 return 0;
16569
16570 for (i = 0; i < 10; i++) {
16571 /* read twice */
16572 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16573 if (rv != 0)
16574 return rv;
16575 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16576 if (rv != 0)
16577 return rv;
16578
16579 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16580 goto out; /* GOOD! */
16581
16582 /* Reset the PHY */
16583 wm_reset_phy(sc);
16584 delay(5*1000);
16585 }
16586
16587 /* Disable GigE link negotiation */
16588 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16589 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16590 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16591
16592 /*
16593 * Call gig speed drop workaround on Gig disable before accessing
16594 * any PHY registers.
16595 */
16596 wm_gig_downshift_workaround_ich8lan(sc);
16597
16598 out:
16599 return 0;
16600 }
16601
16602 /*
16603 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16604 * @sc: pointer to the HW structure
16605 *
16606 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16607 * LPLU, Gig disable, MDIC PHY reset):
16608 * 1) Set Kumeran Near-end loopback
16609 * 2) Clear Kumeran Near-end loopback
16610 * Should only be called for ICH8[m] devices with any 1G Phy.
16611 */
16612 static void
16613 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16614 {
16615 uint16_t kmreg;
16616
16617 /* Only for igp3 */
16618 if (sc->sc_phytype == WMPHY_IGP_3) {
16619 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16620 return;
16621 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16622 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16623 return;
16624 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16625 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16626 }
16627 }
16628
16629 /*
16630 * Workaround for pch's PHYs
16631 * XXX should be moved to new PHY driver?
16632 */
16633 static int
16634 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16635 {
16636 device_t dev = sc->sc_dev;
16637 struct mii_data *mii = &sc->sc_mii;
16638 struct mii_softc *child;
16639 uint16_t phy_data, phyrev = 0;
16640 int phytype = sc->sc_phytype;
16641 int rv;
16642
16643 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16644 device_xname(dev), __func__));
16645 KASSERT(sc->sc_type == WM_T_PCH);
16646
16647 /* Set MDIO slow mode before any other MDIO access */
16648 if (phytype == WMPHY_82577)
16649 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16650 return rv;
16651
16652 child = LIST_FIRST(&mii->mii_phys);
16653 if (child != NULL)
16654 phyrev = child->mii_mpd_rev;
16655
16656 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16657 if ((child != NULL) &&
16658 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16659 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
16660 /* Disable generation of early preamble (0x4431) */
16661 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16662 &phy_data);
16663 if (rv != 0)
16664 return rv;
16665 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16666 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16667 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16668 phy_data);
16669 if (rv != 0)
16670 return rv;
16671
16672 /* Preamble tuning for SSC */
16673 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16674 if (rv != 0)
16675 return rv;
16676 }
16677
16678 /* 82578 */
16679 if (phytype == WMPHY_82578) {
16680 /*
16681 * Return registers to default by doing a soft reset then
16682 * writing 0x3140 to the control register
16683 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16684 */
16685 if ((child != NULL) && (phyrev < 2)) {
16686 PHY_RESET(child);
16687 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16688 if (rv != 0)
16689 return rv;
16690 }
16691 }
16692
16693 /* Select page 0 */
16694 if ((rv = sc->phy.acquire(sc)) != 0)
16695 return rv;
16696 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16697 sc->phy.release(sc);
16698 if (rv != 0)
16699 return rv;
16700
16701 /*
16702 * Configure the K1 Si workaround during phy reset assuming there is
16703 * link so that it disables K1 if link is in 1Gbps.
16704 */
16705 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16706 return rv;
16707
16708 /* Workaround for link disconnects on a busy hub in half duplex */
16709 rv = sc->phy.acquire(sc);
16710 if (rv)
16711 return rv;
16712 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16713 if (rv)
16714 goto release;
16715 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16716 phy_data & 0x00ff);
16717 if (rv)
16718 goto release;
16719
16720 /* Set MSE higher to enable link to stay up when noise is high */
16721 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16722 release:
16723 sc->phy.release(sc);
16724
16725 return rv;
16726 }
16727
16728 /*
16729 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16730 * @sc: pointer to the HW structure
16731 */
16732 static void
16733 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16734 {
16735
16736 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16737 device_xname(sc->sc_dev), __func__));
16738
16739 if (sc->phy.acquire(sc) != 0)
16740 return;
16741
16742 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16743
16744 sc->phy.release(sc);
16745 }
16746
16747 static void
16748 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16749 {
16750 device_t dev = sc->sc_dev;
16751 uint32_t mac_reg;
16752 uint16_t i, wuce;
16753 int count;
16754
16755 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16756 device_xname(dev), __func__));
16757
16758 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16759 return;
16760
16761 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16762 count = wm_rar_count(sc);
16763 for (i = 0; i < count; i++) {
16764 uint16_t lo, hi;
16765 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16766 lo = (uint16_t)(mac_reg & 0xffff);
16767 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16768 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16769 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16770
16771 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16772 lo = (uint16_t)(mac_reg & 0xffff);
16773 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16774 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16775 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16776 }
16777
16778 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16779 }
16780
16781 /*
16782 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16783 * with 82579 PHY
16784 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
16785 */
16786 static int
16787 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16788 {
16789 device_t dev = sc->sc_dev;
16790 int rar_count;
16791 int rv;
16792 uint32_t mac_reg;
16793 uint16_t dft_ctrl, data;
16794 uint16_t i;
16795
16796 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16797 device_xname(dev), __func__));
16798
16799 if (sc->sc_type < WM_T_PCH2)
16800 return 0;
16801
16802 /* Acquire PHY semaphore */
16803 rv = sc->phy.acquire(sc);
16804 if (rv != 0)
16805 return rv;
16806
16807 /* Disable Rx path while enabling/disabling workaround */
16808 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16809 if (rv != 0)
16810 goto out;
16811 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16812 dft_ctrl | (1 << 14));
16813 if (rv != 0)
16814 goto out;
16815
16816 if (enable) {
16817 /* Write Rx addresses (rar_entry_count for RAL/H, and
16818 * SHRAL/H) and initial CRC values to the MAC
16819 */
16820 rar_count = wm_rar_count(sc);
16821 for (i = 0; i < rar_count; i++) {
16822 uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16823 uint32_t addr_high, addr_low;
16824
16825 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16826 if (!(addr_high & RAL_AV))
16827 continue;
16828 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16829 mac_addr[0] = (addr_low & 0xFF);
16830 mac_addr[1] = ((addr_low >> 8) & 0xFF);
16831 mac_addr[2] = ((addr_low >> 16) & 0xFF);
16832 mac_addr[3] = ((addr_low >> 24) & 0xFF);
16833 mac_addr[4] = (addr_high & 0xFF);
16834 mac_addr[5] = ((addr_high >> 8) & 0xFF);
16835
16836 CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16837 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16838 }
16839
16840 /* Write Rx addresses to the PHY */
16841 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16842 }
16843
16844 /*
16845 * If enable ==
16846 * true: Enable jumbo frame workaround in the MAC.
16847 * false: Write MAC register values back to h/w defaults.
16848 */
16849 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16850 if (enable) {
16851 mac_reg &= ~(1 << 14);
16852 mac_reg |= (7 << 15);
16853 } else
16854 mac_reg &= ~(0xf << 14);
16855 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16856
16857 mac_reg = CSR_READ(sc, WMREG_RCTL);
16858 if (enable) {
16859 mac_reg |= RCTL_SECRC;
16860 sc->sc_rctl |= RCTL_SECRC;
16861 sc->sc_flags |= WM_F_CRC_STRIP;
16862 } else {
16863 mac_reg &= ~RCTL_SECRC;
16864 sc->sc_rctl &= ~RCTL_SECRC;
16865 sc->sc_flags &= ~WM_F_CRC_STRIP;
16866 }
16867 CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16868
16869 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16870 if (rv != 0)
16871 goto out;
16872 if (enable)
16873 data |= 1 << 0;
16874 else
16875 data &= ~(1 << 0);
16876 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16877 if (rv != 0)
16878 goto out;
16879
16880 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16881 if (rv != 0)
16882 goto out;
16883 /*
16884 * XXX FreeBSD and Linux do the same thing that they set the same value
16885 * on both the enable case and the disable case. Is it correct?
16886 */
16887 data &= ~(0xf << 8);
16888 data |= (0xb << 8);
16889 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16890 if (rv != 0)
16891 goto out;
16892
16893 /*
16894 * If enable ==
16895 * true: Enable jumbo frame workaround in the PHY.
16896 * false: Write PHY register values back to h/w defaults.
16897 */
16898 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16899 if (rv != 0)
16900 goto out;
16901 data &= ~(0x7F << 5);
16902 if (enable)
16903 data |= (0x37 << 5);
16904 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16905 if (rv != 0)
16906 goto out;
16907
16908 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16909 if (rv != 0)
16910 goto out;
16911 if (enable)
16912 data &= ~(1 << 13);
16913 else
16914 data |= (1 << 13);
16915 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16916 if (rv != 0)
16917 goto out;
16918
16919 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16920 if (rv != 0)
16921 goto out;
16922 data &= ~(0x3FF << 2);
16923 if (enable)
16924 data |= (I82579_TX_PTR_GAP << 2);
16925 else
16926 data |= (0x8 << 2);
16927 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16928 if (rv != 0)
16929 goto out;
16930
16931 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16932 enable ? 0xf100 : 0x7e00);
16933 if (rv != 0)
16934 goto out;
16935
16936 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16937 if (rv != 0)
16938 goto out;
16939 if (enable)
16940 data |= 1 << 10;
16941 else
16942 data &= ~(1 << 10);
16943 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16944 if (rv != 0)
16945 goto out;
16946
16947 /* Re-enable Rx path after enabling/disabling workaround */
16948 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16949 dft_ctrl & ~(1 << 14));
16950
16951 out:
16952 sc->phy.release(sc);
16953
16954 return rv;
16955 }
16956
16957 /*
16958 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16959 * done after every PHY reset.
16960 */
16961 static int
16962 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16963 {
16964 device_t dev = sc->sc_dev;
16965 int rv;
16966
16967 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16968 device_xname(dev), __func__));
16969 KASSERT(sc->sc_type == WM_T_PCH2);
16970
16971 /* Set MDIO slow mode before any other MDIO access */
16972 rv = wm_set_mdio_slow_mode_hv(sc);
16973 if (rv != 0)
16974 return rv;
16975
16976 rv = sc->phy.acquire(sc);
16977 if (rv != 0)
16978 return rv;
16979 /* Set MSE higher to enable link to stay up when noise is high */
16980 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16981 if (rv != 0)
16982 goto release;
16983 /* Drop link after 5 times MSE threshold was reached */
16984 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16985 release:
16986 sc->phy.release(sc);
16987
16988 return rv;
16989 }
16990
16991 /**
16992 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16993 * @link: link up bool flag
16994 *
16995 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16996 * preventing further DMA write requests. Workaround the issue by disabling
16997 * the de-assertion of the clock request when in 1Gpbs mode.
16998 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16999 * speeds in order to avoid Tx hangs.
17000 **/
17001 static int
17002 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
17003 {
17004 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
17005 uint32_t status = CSR_READ(sc, WMREG_STATUS);
17006 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
17007 uint16_t phyreg;
17008
17009 if (link && (speed == STATUS_SPEED_1000)) {
17010 int rv;
17011
17012 rv = sc->phy.acquire(sc);
17013 if (rv != 0)
17014 return rv;
17015 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17016 &phyreg);
17017 if (rv != 0)
17018 goto release;
17019 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17020 phyreg & ~KUMCTRLSTA_K1_ENABLE);
17021 if (rv != 0)
17022 goto release;
17023 delay(20);
17024 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17025
17026 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17027 &phyreg);
17028 release:
17029 sc->phy.release(sc);
17030 return rv;
17031 }
17032
17033 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17034
17035 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17036 if (((child != NULL) && (child->mii_mpd_rev > 5))
17037 || !link
17038 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17039 goto update_fextnvm6;
17040
17041 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17042
17043 /* Clear link status transmit timeout */
17044 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17045 if (speed == STATUS_SPEED_100) {
17046 /* Set inband Tx timeout to 5x10us for 100Half */
17047 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17048
17049 /* Do not extend the K1 entry latency for 100Half */
17050 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17051 } else {
17052 /* Set inband Tx timeout to 50x10us for 10Full/Half */
17053 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17054
17055 /* Extend the K1 entry latency for 10 Mbps */
17056 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17057 }
17058
17059 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17060
17061 update_fextnvm6:
17062 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17063 return 0;
17064 }
17065
17066 /*
17067 * wm_k1_gig_workaround_hv - K1 Si workaround
17068 * @sc: pointer to the HW structure
17069 * @link: link up bool flag
17070 *
17071 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17072 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
17073 * If link is down, the function will restore the default K1 setting located
17074 * in the NVM.
17075 */
17076 static int
17077 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17078 {
17079 int k1_enable = sc->sc_nvm_k1_enabled;
17080 int rv;
17081
17082 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17083 device_xname(sc->sc_dev), __func__));
17084
17085 rv = sc->phy.acquire(sc);
17086 if (rv != 0)
17087 return rv;
17088
17089 if (link) {
17090 k1_enable = 0;
17091
17092 /* Link stall fix for link up */
17093 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17094 0x0100);
17095 } else {
17096 /* Link stall fix for link down */
17097 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17098 0x4100);
17099 }
17100
17101 wm_configure_k1_ich8lan(sc, k1_enable);
17102 sc->phy.release(sc);
17103
17104 return 0;
17105 }
17106
17107 /*
17108 * wm_k1_workaround_lv - K1 Si workaround
17109 * @sc: pointer to the HW structure
17110 *
17111 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17112 * Disable K1 for 1000 and 100 speeds
17113 */
17114 static int
17115 wm_k1_workaround_lv(struct wm_softc *sc)
17116 {
17117 uint32_t reg;
17118 uint16_t phyreg;
17119 int rv;
17120
17121 if (sc->sc_type != WM_T_PCH2)
17122 return 0;
17123
17124 /* Set K1 beacon duration based on 10Mbps speed */
17125 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17126 if (rv != 0)
17127 return rv;
17128
17129 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17130 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17131 if (phyreg &
17132 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17133 /* LV 1G/100 Packet drop issue wa */
17134 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17135 &phyreg);
17136 if (rv != 0)
17137 return rv;
17138 phyreg &= ~HV_PM_CTRL_K1_ENA;
17139 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17140 phyreg);
17141 if (rv != 0)
17142 return rv;
17143 } else {
17144 /* For 10Mbps */
17145 reg = CSR_READ(sc, WMREG_FEXTNVM4);
17146 reg &= ~FEXTNVM4_BEACON_DURATION;
17147 reg |= FEXTNVM4_BEACON_DURATION_16US;
17148 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17149 }
17150 }
17151
17152 return 0;
17153 }
17154
17155 /*
17156 * wm_link_stall_workaround_hv - Si workaround
17157 * @sc: pointer to the HW structure
17158 *
17159 * This function works around a Si bug where the link partner can get
17160 * a link up indication before the PHY does. If small packets are sent
17161 * by the link partner they can be placed in the packet buffer without
17162 * being properly accounted for by the PHY and will stall preventing
17163 * further packets from being received. The workaround is to clear the
17164 * packet buffer after the PHY detects link up.
17165 */
17166 static int
17167 wm_link_stall_workaround_hv(struct wm_softc *sc)
17168 {
17169 uint16_t phyreg;
17170
17171 if (sc->sc_phytype != WMPHY_82578)
17172 return 0;
17173
17174 /* Do not apply workaround if in PHY loopback bit 14 set */
17175 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17176 if ((phyreg & BMCR_LOOP) != 0)
17177 return 0;
17178
17179 /* Check if link is up and at 1Gbps */
17180 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17181 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17182 | BM_CS_STATUS_SPEED_MASK;
17183 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17184 | BM_CS_STATUS_SPEED_1000))
17185 return 0;
17186
17187 delay(200 * 1000); /* XXX too big */
17188
17189 /* Flush the packets in the fifo buffer */
17190 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17191 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17192 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17193 HV_MUX_DATA_CTRL_GEN_TO_MAC);
17194
17195 return 0;
17196 }
17197
17198 static int
17199 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17200 {
17201 int rv;
17202
17203 rv = sc->phy.acquire(sc);
17204 if (rv != 0) {
17205 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17206 __func__);
17207 return rv;
17208 }
17209
17210 rv = wm_set_mdio_slow_mode_hv_locked(sc);
17211
17212 sc->phy.release(sc);
17213
17214 return rv;
17215 }
17216
17217 static int
17218 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17219 {
17220 int rv;
17221 uint16_t reg;
17222
17223 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
17224 if (rv != 0)
17225 return rv;
17226
17227 return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17228 reg | HV_KMRN_MDIO_SLOW);
17229 }
17230
17231 /*
17232 * wm_configure_k1_ich8lan - Configure K1 power state
17233 * @sc: pointer to the HW structure
17234 * @enable: K1 state to configure
17235 *
17236 * Configure the K1 power state based on the provided parameter.
17237 * Assumes semaphore already acquired.
17238 */
17239 static void
17240 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17241 {
17242 uint32_t ctrl, ctrl_ext, tmp;
17243 uint16_t kmreg;
17244 int rv;
17245
17246 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17247
17248 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17249 if (rv != 0)
17250 return;
17251
17252 if (k1_enable)
17253 kmreg |= KUMCTRLSTA_K1_ENABLE;
17254 else
17255 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17256
17257 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17258 if (rv != 0)
17259 return;
17260
17261 delay(20);
17262
17263 ctrl = CSR_READ(sc, WMREG_CTRL);
17264 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17265
17266 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17267 tmp |= CTRL_FRCSPD;
17268
17269 CSR_WRITE(sc, WMREG_CTRL, tmp);
17270 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17271 CSR_WRITE_FLUSH(sc);
17272 delay(20);
17273
17274 CSR_WRITE(sc, WMREG_CTRL, ctrl);
17275 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17276 CSR_WRITE_FLUSH(sc);
17277 delay(20);
17278
17279 return;
17280 }
17281
17282 /* special case - for 82575 - need to do manual init ... */
17283 static void
17284 wm_reset_init_script_82575(struct wm_softc *sc)
17285 {
17286 /*
17287 * Remark: this is untested code - we have no board without EEPROM
17288 * same setup as mentioned int the FreeBSD driver for the i82575
17289 */
17290
17291 /* SerDes configuration via SERDESCTRL */
17292 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17293 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17294 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17295 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17296
17297 /* CCM configuration via CCMCTL register */
17298 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17299 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17300
17301 /* PCIe lanes configuration */
17302 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17303 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17304 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17305 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17306
17307 /* PCIe PLL Configuration */
17308 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17309 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17310 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17311 }
17312
17313 static void
17314 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17315 {
17316 uint32_t reg;
17317 uint16_t nvmword;
17318 int rv;
17319
17320 if (sc->sc_type != WM_T_82580)
17321 return;
17322 if ((sc->sc_flags & WM_F_SGMII) == 0)
17323 return;
17324
17325 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17326 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17327 if (rv != 0) {
17328 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17329 __func__);
17330 return;
17331 }
17332
17333 reg = CSR_READ(sc, WMREG_MDICNFG);
17334 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17335 reg |= MDICNFG_DEST;
17336 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17337 reg |= MDICNFG_COM_MDIO;
17338 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17339 }
17340
17341 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
17342
17343 static bool
17344 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17345 {
17346 uint32_t reg;
17347 uint16_t id1, id2;
17348 int i, rv;
17349
17350 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17351 device_xname(sc->sc_dev), __func__));
17352 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17353
17354 id1 = id2 = 0xffff;
17355 for (i = 0; i < 2; i++) {
17356 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17357 &id1);
17358 if ((rv != 0) || MII_INVALIDID(id1))
17359 continue;
17360 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17361 &id2);
17362 if ((rv != 0) || MII_INVALIDID(id2))
17363 continue;
17364 break;
17365 }
17366 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17367 goto out;
17368
17369 /*
17370 * In case the PHY needs to be in mdio slow mode,
17371 * set slow mode and try to get the PHY id again.
17372 */
17373 rv = 0;
17374 if (sc->sc_type < WM_T_PCH_LPT) {
17375 wm_set_mdio_slow_mode_hv_locked(sc);
17376 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17377 &id1);
17378 rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17379 &id2);
17380 }
17381 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17382 device_printf(sc->sc_dev, "XXX return with false\n");
17383 return false;
17384 }
17385 out:
17386 if (sc->sc_type >= WM_T_PCH_LPT) {
17387 /* Only unforce SMBus if ME is not active */
17388 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17389 uint16_t phyreg;
17390
17391 /* Unforce SMBus mode in PHY */
17392 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17393 CV_SMB_CTRL, &phyreg);
17394 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17395 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17396 CV_SMB_CTRL, phyreg);
17397
17398 /* Unforce SMBus mode in MAC */
17399 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17400 reg &= ~CTRL_EXT_FORCE_SMBUS;
17401 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17402 }
17403 }
17404 return true;
17405 }
17406
17407 static void
17408 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17409 {
17410 uint32_t reg;
17411 int i;
17412
17413 /* Set PHY Config Counter to 50msec */
17414 reg = CSR_READ(sc, WMREG_FEXTNVM3);
17415 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
17416 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
17417 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
17418
17419 /* Toggle LANPHYPC */
17420 reg = CSR_READ(sc, WMREG_CTRL);
17421 reg |= CTRL_LANPHYPC_OVERRIDE;
17422 reg &= ~CTRL_LANPHYPC_VALUE;
17423 CSR_WRITE(sc, WMREG_CTRL, reg);
17424 CSR_WRITE_FLUSH(sc);
17425 delay(1000);
17426 reg &= ~CTRL_LANPHYPC_OVERRIDE;
17427 CSR_WRITE(sc, WMREG_CTRL, reg);
17428 CSR_WRITE_FLUSH(sc);
17429
17430 if (sc->sc_type < WM_T_PCH_LPT)
17431 delay(50 * 1000);
17432 else {
17433 i = 20;
17434
17435 do {
17436 delay(5 * 1000);
17437 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
17438 && i--);
17439
17440 delay(30 * 1000);
17441 }
17442 }
17443
17444 static int
17445 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
17446 {
17447 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
17448 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
17449 uint32_t rxa;
17450 uint16_t scale = 0, lat_enc = 0;
17451 int32_t obff_hwm = 0;
17452 int64_t lat_ns, value;
17453
17454 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17455 device_xname(sc->sc_dev), __func__));
17456
17457 if (link) {
17458 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
17459 uint32_t status;
17460 uint16_t speed;
17461 pcireg_t preg;
17462
17463 status = CSR_READ(sc, WMREG_STATUS);
17464 switch (__SHIFTOUT(status, STATUS_SPEED)) {
17465 case STATUS_SPEED_10:
17466 speed = 10;
17467 break;
17468 case STATUS_SPEED_100:
17469 speed = 100;
17470 break;
17471 case STATUS_SPEED_1000:
17472 speed = 1000;
17473 break;
17474 default:
17475 device_printf(sc->sc_dev, "Unknown speed "
17476 "(status = %08x)\n", status);
17477 return -1;
17478 }
17479
17480 /* Rx Packet Buffer Allocation size (KB) */
17481 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
17482
17483 /*
17484 * Determine the maximum latency tolerated by the device.
17485 *
17486 * Per the PCIe spec, the tolerated latencies are encoded as
17487 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
17488 * a 10-bit value (0-1023) to provide a range from 1 ns to
17489 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
17490 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
17491 */
17492 lat_ns = ((int64_t)rxa * 1024 -
17493 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
17494 + ETHER_HDR_LEN))) * 8 * 1000;
17495 if (lat_ns < 0)
17496 lat_ns = 0;
17497 else
17498 lat_ns /= speed;
17499 value = lat_ns;
17500
17501 while (value > LTRV_VALUE) {
17502 scale ++;
17503 value = howmany(value, __BIT(5));
17504 }
17505 if (scale > LTRV_SCALE_MAX) {
17506 device_printf(sc->sc_dev,
17507 "Invalid LTR latency scale %d\n", scale);
17508 return -1;
17509 }
17510 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17511
17512 /* Determine the maximum latency tolerated by the platform */
17513 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17514 WM_PCI_LTR_CAP_LPT);
17515 max_snoop = preg & 0xffff;
17516 max_nosnoop = preg >> 16;
17517
17518 max_ltr_enc = MAX(max_snoop, max_nosnoop);
17519
17520 if (lat_enc > max_ltr_enc) {
17521 lat_enc = max_ltr_enc;
17522 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17523 * PCI_LTR_SCALETONS(
17524 __SHIFTOUT(lat_enc,
17525 PCI_LTR_MAXSNOOPLAT_SCALE));
17526 }
17527
17528 if (lat_ns) {
17529 lat_ns *= speed * 1000;
17530 lat_ns /= 8;
17531 lat_ns /= 1000000000;
17532 obff_hwm = (int32_t)(rxa - lat_ns);
17533 }
17534 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17535 device_printf(sc->sc_dev, "Invalid high water mark %d"
17536 "(rxa = %d, lat_ns = %d)\n",
17537 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17538 return -1;
17539 }
17540 }
17541 /* Snoop and No-Snoop latencies the same */
17542 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17543 CSR_WRITE(sc, WMREG_LTRV, reg);
17544
17545 /* Set OBFF high water mark */
17546 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17547 reg |= obff_hwm;
17548 CSR_WRITE(sc, WMREG_SVT, reg);
17549
17550 /* Enable OBFF */
17551 reg = CSR_READ(sc, WMREG_SVCR);
17552 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17553 CSR_WRITE(sc, WMREG_SVCR, reg);
17554
17555 return 0;
17556 }
17557
17558 /*
17559 * I210 Errata 25 and I211 Errata 10
17560 * Slow System Clock.
17561 *
17562 * Note that this function is called on both FLASH and iNVM case on NetBSD.
17563 */
17564 static int
17565 wm_pll_workaround_i210(struct wm_softc *sc)
17566 {
17567 uint32_t mdicnfg, wuc;
17568 uint32_t reg;
17569 pcireg_t pcireg;
17570 uint32_t pmreg;
17571 uint16_t nvmword, tmp_nvmword;
17572 uint16_t phyval;
17573 bool wa_done = false;
17574 int i, rv = 0;
17575
17576 /* Get Power Management cap offset */
17577 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17578 &pmreg, NULL) == 0)
17579 return -1;
17580
17581 /* Save WUC and MDICNFG registers */
17582 wuc = CSR_READ(sc, WMREG_WUC);
17583 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17584
17585 reg = mdicnfg & ~MDICNFG_DEST;
17586 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17587
17588 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17589 /*
17590 * The default value of the Initialization Control Word 1
17591 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17592 */
17593 nvmword = INVM_DEFAULT_AL;
17594 }
17595 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17596
17597 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17598 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17599 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17600
17601 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17602 rv = 0;
17603 break; /* OK */
17604 } else
17605 rv = -1;
17606
17607 wa_done = true;
17608 /* Directly reset the internal PHY */
17609 reg = CSR_READ(sc, WMREG_CTRL);
17610 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17611
17612 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17613 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17614 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17615
17616 CSR_WRITE(sc, WMREG_WUC, 0);
17617 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17618 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17619
17620 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17621 pmreg + PCI_PMCSR);
17622 pcireg |= PCI_PMCSR_STATE_D3;
17623 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17624 pmreg + PCI_PMCSR, pcireg);
17625 delay(1000);
17626 pcireg &= ~PCI_PMCSR_STATE_D3;
17627 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17628 pmreg + PCI_PMCSR, pcireg);
17629
17630 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
17631 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17632
17633 /* Restore WUC register */
17634 CSR_WRITE(sc, WMREG_WUC, wuc);
17635 }
17636
17637 /* Restore MDICNFG setting */
17638 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
17639 if (wa_done)
17640 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
17641 return rv;
17642 }
17643
17644 static void
17645 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
17646 {
17647 uint32_t reg;
17648
17649 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17650 device_xname(sc->sc_dev), __func__));
17651 KASSERT((sc->sc_type == WM_T_PCH_SPT)
17652 || (sc->sc_type == WM_T_PCH_CNP));
17653
17654 reg = CSR_READ(sc, WMREG_FEXTNVM7);
17655 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
17656 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
17657
17658 reg = CSR_READ(sc, WMREG_FEXTNVM9);
17659 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17660 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17661 }
17662
17663 /* Sysctl functions */
17664 static int
17665 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
17666 {
17667 struct sysctlnode node = *rnode;
17668 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17669 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17670 struct wm_softc *sc = txq->txq_sc;
17671 uint32_t reg;
17672
17673 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
17674 node.sysctl_data = ®
17675 return sysctl_lookup(SYSCTLFN_CALL(&node));
17676 }
17677
17678 static int
17679 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
17680 {
17681 struct sysctlnode node = *rnode;
17682 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17683 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17684 struct wm_softc *sc = txq->txq_sc;
17685 uint32_t reg;
17686
17687 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
17688 node.sysctl_data = ®
17689 return sysctl_lookup(SYSCTLFN_CALL(&node));
17690 }
17691
17692 #ifdef WM_DEBUG
17693 static int
17694 wm_sysctl_debug(SYSCTLFN_ARGS)
17695 {
17696 struct sysctlnode node = *rnode;
17697 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17698 uint32_t dflags;
17699 int error;
17700
17701 dflags = sc->sc_debug;
17702 node.sysctl_data = &dflags;
17703 error = sysctl_lookup(SYSCTLFN_CALL(&node));
17704
17705 if (error || newp == NULL)
17706 return error;
17707
17708 sc->sc_debug = dflags;
17709 device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
17710 device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
17711
17712 return 0;
17713 }
17714 #endif
17715