if_wm.c revision 1.766 1 /* $NetBSD: if_wm.c,v 1.766 2022/10/26 06:36:39 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.766 2022/10/26 06:36:39 msaitoh Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90
91 #include <sys/param.h>
92
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116
117 #include <net/bpf.h>
118
119 #include <net/rss_config.h>
120
121 #include <netinet/in.h> /* XXX for struct ip */
122 #include <netinet/in_systm.h> /* XXX for struct ip */
123 #include <netinet/ip.h> /* XXX for struct ip */
124 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h> /* XXX for struct tcphdr */
126
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149
150 #ifdef WM_DEBUG
151 #define WM_DEBUG_LINK __BIT(0)
152 #define WM_DEBUG_TX __BIT(1)
153 #define WM_DEBUG_RX __BIT(2)
154 #define WM_DEBUG_GMII __BIT(3)
155 #define WM_DEBUG_MANAGE __BIT(4)
156 #define WM_DEBUG_NVM __BIT(5)
157 #define WM_DEBUG_INIT __BIT(6)
158 #define WM_DEBUG_LOCK __BIT(7)
159
160 #if 0
161 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
163 WM_DEBUG_LOCK
164 #endif
165
166 #define DPRINTF(sc, x, y) \
167 do { \
168 if ((sc)->sc_debug & (x)) \
169 printf y; \
170 } while (0)
171 #else
172 #define DPRINTF(sc, x, y) __nothing
173 #endif /* WM_DEBUG */
174
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176
177 /*
178 * This device driver's max interrupt numbers.
179 */
180 #define WM_MAX_NQUEUEINTR 16
181 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
182
183 #ifndef WM_DISABLE_MSI
184 #define WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define WM_DISABLE_MSIX 0
188 #endif
189
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197
198 /*
199 * Transmit descriptor list size. Due to errata, we can only have
200 * 256 hardware descriptors in the ring on < 82544, but we use 4096
201 * on >= 82544. We tell the upper layers that they can queue a lot
202 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203 * of them at a time.
204 *
205 * We allow up to 64 DMA segments per packet. Pathological packet
206 * chains containing many small mbufs have been observed in zero-copy
207 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208 * m_defrag() is called to reduce it.
209 */
210 #define WM_NTXSEGS 64
211 #define WM_IFQUEUELEN 256
212 #define WM_TXQUEUELEN_MAX 64
213 #define WM_TXQUEUELEN_MAX_82547 16
214 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
215 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
216 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
217 #define WM_NTXDESC_82542 256
218 #define WM_NTXDESC_82544 4096
219 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
220 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
221 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224
225 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
226
227 #define WM_TXINTERQSIZE 256
228
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
234 #endif
235
236 /*
237 * Receive descriptor list size. We have one Rx buffer for normal
238 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
239 * packet. We allocate 256 receive descriptors, each with a 2k
240 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241 */
242 #define WM_NRXDESC 256U
243 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
244 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
245 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
246
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
252 #endif
253
254 typedef union txdescs {
255 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258
259 typedef union rxdescs {
260 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264
265 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
266 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
267
268 /*
269 * Software state for transmit jobs.
270 */
271 struct wm_txsoft {
272 struct mbuf *txs_mbuf; /* head of our mbuf chain */
273 bus_dmamap_t txs_dmamap; /* our DMA map */
274 int txs_firstdesc; /* first descriptor in packet */
275 int txs_lastdesc; /* last descriptor in packet */
276 int txs_ndesc; /* # of descriptors used */
277 };
278
279 /*
280 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281 * buffer and a DMA map. For packets which fill more than one buffer, we chain
282 * them together.
283 */
284 struct wm_rxsoft {
285 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
286 bus_dmamap_t rxs_dmamap; /* our DMA map */
287 };
288
289 #define WM_LINKUP_TIMEOUT 50
290
291 static uint16_t swfwphysem[] = {
292 SWFW_PHY0_SM,
293 SWFW_PHY1_SM,
294 SWFW_PHY2_SM,
295 SWFW_PHY3_SM
296 };
297
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301
302 struct wm_softc;
303
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname) \
312 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 struct evcnt qname##_ev_##evname
314
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
316 do { \
317 snprintf((q)->qname##_##evname##_evcnt_name, \
318 sizeof((q)->qname##_##evname##_evcnt_name), \
319 "%s%02d%s", #qname, (qnum), #evname); \
320 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
321 (evtype), NULL, (xname), \
322 (q)->qname##_##evname##_evcnt_name); \
323 } while (0)
324
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
326 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
329 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
332 evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334
335 struct wm_txqueue {
336 kmutex_t *txq_lock; /* lock for tx operations */
337
338 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
339
340 /* Software state for the transmit descriptors. */
341 int txq_num; /* must be a power of two */
342 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343
344 /* TX control data structures. */
345 int txq_ndesc; /* must be a power of two */
346 size_t txq_descsize; /* a tx descriptor size */
347 txdescs_t *txq_descs_u;
348 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
349 bus_dma_segment_t txq_desc_seg; /* control data segment */
350 int txq_desc_rseg; /* real number of control segment */
351 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
352 #define txq_descs txq_descs_u->sctxu_txdescs
353 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
354
355 bus_addr_t txq_tdt_reg; /* offset of TDT register */
356
357 int txq_free; /* number of free Tx descriptors */
358 int txq_next; /* next ready Tx descriptor */
359
360 int txq_sfree; /* number of free Tx jobs */
361 int txq_snext; /* next free Tx job */
362 int txq_sdirty; /* dirty Tx jobs */
363
364 /* These 4 variables are used only on the 82547. */
365 int txq_fifo_size; /* Tx FIFO size */
366 int txq_fifo_head; /* current head of FIFO */
367 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
368 int txq_fifo_stall; /* Tx FIFO is stalled */
369
370 /*
371 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 * CPUs. This queue intermediate them without block.
373 */
374 pcq_t *txq_interq;
375
376 /*
377 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 * to manage Tx H/W queue's busy flag.
379 */
380 int txq_flags; /* flags for H/W queue, see below */
381 #define WM_TXQ_NO_SPACE 0x1
382 #define WM_TXQ_LINKDOWN_DISCARD 0x2
383
384 bool txq_stopping;
385
386 bool txq_sending;
387 time_t txq_lastsent;
388
389 /* Checksum flags used for previous packet */
390 uint32_t txq_last_hw_cmd;
391 uint8_t txq_last_hw_fields;
392 uint16_t txq_last_hw_ipcs;
393 uint16_t txq_last_hw_tucs;
394
395 uint32_t txq_packets; /* for AIM */
396 uint32_t txq_bytes; /* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 /* TX event counters */
399 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
400 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
401 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
403 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
404 /* XXX not used? */
405
406 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
407 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
408 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
409 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
410 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
411 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
412 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
413 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
414 /* other than toomanyseg */
415
416 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
418 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
419 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420
421 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425
426 struct wm_rxqueue {
427 kmutex_t *rxq_lock; /* lock for rx operations */
428
429 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
430
431 /* Software state for the receive descriptors. */
432 struct wm_rxsoft rxq_soft[WM_NRXDESC];
433
434 /* RX control data structures. */
435 int rxq_ndesc; /* must be a power of two */
436 size_t rxq_descsize; /* a rx descriptor size */
437 rxdescs_t *rxq_descs_u;
438 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
439 bus_dma_segment_t rxq_desc_seg; /* control data segment */
440 int rxq_desc_rseg; /* real number of control segment */
441 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define rxq_descs rxq_descs_u->sctxu_rxdescs
443 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
444 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
445
446 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
447
448 int rxq_ptr; /* next ready Rx desc/queue ent */
449 int rxq_discard;
450 int rxq_len;
451 struct mbuf *rxq_head;
452 struct mbuf *rxq_tail;
453 struct mbuf **rxq_tailp;
454
455 bool rxq_stopping;
456
457 uint32_t rxq_packets; /* for AIM */
458 uint32_t rxq_bytes; /* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 /* RX event counters */
461 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
462 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
463
464 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
465 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
466 #endif
467 };
468
469 struct wm_queue {
470 int wmq_id; /* index of TX/RX queues */
471 int wmq_intr_idx; /* index of MSI-X tables */
472
473 uint32_t wmq_itr; /* interrupt interval per queue. */
474 bool wmq_set_itr;
475
476 struct wm_txqueue wmq_txq;
477 struct wm_rxqueue wmq_rxq;
478 char sysctlname[32]; /* Name for sysctl */
479
480 bool wmq_txrx_use_workqueue;
481 struct work wmq_cookie;
482 void *wmq_si;
483 };
484
485 struct wm_phyop {
486 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
487 void (*release)(struct wm_softc *);
488 int (*readreg_locked)(device_t, int, int, uint16_t *);
489 int (*writereg_locked)(device_t, int, int, uint16_t);
490 int reset_delay_us;
491 bool no_errprint;
492 };
493
494 struct wm_nvmop {
495 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
496 void (*release)(struct wm_softc *);
497 int (*read)(struct wm_softc *, int, int, uint16_t *);
498 };
499
500 /*
501 * Software state per device.
502 */
503 struct wm_softc {
504 device_t sc_dev; /* generic device information */
505 bus_space_tag_t sc_st; /* bus space tag */
506 bus_space_handle_t sc_sh; /* bus space handle */
507 bus_size_t sc_ss; /* bus space size */
508 bus_space_tag_t sc_iot; /* I/O space tag */
509 bus_space_handle_t sc_ioh; /* I/O space handle */
510 bus_size_t sc_ios; /* I/O space size */
511 bus_space_tag_t sc_flasht; /* flash registers space tag */
512 bus_space_handle_t sc_flashh; /* flash registers space handle */
513 bus_size_t sc_flashs; /* flash registers space size */
514 off_t sc_flashreg_offset; /*
515 * offset to flash registers from
516 * start of BAR
517 */
518 bus_dma_tag_t sc_dmat; /* bus DMA tag */
519
520 struct ethercom sc_ethercom; /* Ethernet common data */
521 struct mii_data sc_mii; /* MII/media information */
522
523 pci_chipset_tag_t sc_pc;
524 pcitag_t sc_pcitag;
525 int sc_bus_speed; /* PCI/PCIX bus speed */
526 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
527
528 uint16_t sc_pcidevid; /* PCI device ID */
529 wm_chip_type sc_type; /* MAC type */
530 int sc_rev; /* MAC revision */
531 wm_phy_type sc_phytype; /* PHY type */
532 uint8_t sc_sfptype; /* SFP type */
533 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
534 #define WM_MEDIATYPE_UNKNOWN 0x00
535 #define WM_MEDIATYPE_FIBER 0x01
536 #define WM_MEDIATYPE_COPPER 0x02
537 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
538 int sc_funcid; /* unit number of the chip (0 to 3) */
539 int sc_flags; /* flags; see below */
540 u_short sc_if_flags; /* last if_flags */
541 int sc_ec_capenable; /* last ec_capenable */
542 int sc_flowflags; /* 802.3x flow control flags */
543 uint16_t eee_lp_ability; /* EEE link partner's ability */
544 int sc_align_tweak;
545
546 void *sc_ihs[WM_MAX_NINTR]; /*
547 * interrupt cookie.
548 * - legacy and msi use sc_ihs[0] only
549 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
550 */
551 pci_intr_handle_t *sc_intrs; /*
552 * legacy and msi use sc_intrs[0] only
553 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
554 */
555 int sc_nintrs; /* number of interrupts */
556
557 int sc_link_intr_idx; /* index of MSI-X tables */
558
559 callout_t sc_tick_ch; /* tick callout */
560 bool sc_core_stopping;
561
562 int sc_nvm_ver_major;
563 int sc_nvm_ver_minor;
564 int sc_nvm_ver_build;
565 int sc_nvm_addrbits; /* NVM address bits */
566 unsigned int sc_nvm_wordsize; /* NVM word size */
567 int sc_ich8_flash_base;
568 int sc_ich8_flash_bank_size;
569 int sc_nvm_k1_enabled;
570
571 int sc_nqueues;
572 struct wm_queue *sc_queue;
573 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
574 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
575 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
576 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
577 struct workqueue *sc_queue_wq;
578 bool sc_txrx_use_workqueue;
579
580 int sc_affinity_offset;
581
582 #ifdef WM_EVENT_COUNTERS
583 /* Event counters. */
584 struct evcnt sc_ev_linkintr; /* Link interrupts */
585
586 /* >= WM_T_82542_2_1 */
587 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
588 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
589 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
590 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
591 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
592
593 struct evcnt sc_ev_crcerrs; /* CRC Error */
594 struct evcnt sc_ev_algnerrc; /* Alignment Error */
595 struct evcnt sc_ev_symerrc; /* Symbol Error */
596 struct evcnt sc_ev_rxerrc; /* Receive Error */
597 struct evcnt sc_ev_mpc; /* Missed Packets */
598 struct evcnt sc_ev_colc; /* Collision */
599 struct evcnt sc_ev_sec; /* Sequence Error */
600 struct evcnt sc_ev_cexterr; /* Carrier Extension Error */
601 struct evcnt sc_ev_rlec; /* Receive Length Error */
602 struct evcnt sc_ev_scc; /* Single Collision */
603 struct evcnt sc_ev_ecol; /* Excessive Collision */
604 struct evcnt sc_ev_mcc; /* Multiple Collision */
605 struct evcnt sc_ev_latecol; /* Late Collision */
606 struct evcnt sc_ev_dc; /* Defer */
607 struct evcnt sc_ev_gprc; /* Good Packets Rx */
608 struct evcnt sc_ev_bprc; /* Broadcast Packets Rx */
609 struct evcnt sc_ev_mprc; /* Multicast Packets Rx */
610 struct evcnt sc_ev_gptc; /* Good Packets Tx */
611 struct evcnt sc_ev_gorc; /* Good Octets Rx */
612 struct evcnt sc_ev_gotc; /* Good Octets Tx */
613 struct evcnt sc_ev_rnbc; /* Rx No Buffers */
614 struct evcnt sc_ev_ruc; /* Rx Undersize */
615 struct evcnt sc_ev_rfc; /* Rx Fragment */
616 struct evcnt sc_ev_roc; /* Rx Oversize */
617 struct evcnt sc_ev_rjc; /* Rx Jabber */
618 struct evcnt sc_ev_tor; /* Total Octets Rx */
619 struct evcnt sc_ev_tot; /* Total Octets Tx */
620 struct evcnt sc_ev_tpr; /* Total Packets Rx */
621 struct evcnt sc_ev_tpt; /* Total Packets Tx */
622 struct evcnt sc_ev_mptc; /* Multicast Packets Tx */
623 struct evcnt sc_ev_bptc; /* Broadcast Packets Tx Count */
624 struct evcnt sc_ev_prc64; /* Packets Rx (64 bytes) */
625 struct evcnt sc_ev_prc127; /* Packets Rx (65-127 bytes) */
626 struct evcnt sc_ev_prc255; /* Packets Rx (128-255 bytes) */
627 struct evcnt sc_ev_prc511; /* Packets Rx (255-511 bytes) */
628 struct evcnt sc_ev_prc1023; /* Packets Rx (512-1023 bytes) */
629 struct evcnt sc_ev_prc1522; /* Packets Rx (1024-1522 bytes) */
630 struct evcnt sc_ev_ptc64; /* Packets Tx (64 bytes) */
631 struct evcnt sc_ev_ptc127; /* Packets Tx (65-127 bytes) */
632 struct evcnt sc_ev_ptc255; /* Packets Tx (128-255 bytes) */
633 struct evcnt sc_ev_ptc511; /* Packets Tx (256-511 bytes) */
634 struct evcnt sc_ev_ptc1023; /* Packets Tx (512-1023 bytes) */
635 struct evcnt sc_ev_ptc1522; /* Packets Tx (1024-1522 Bytes) */
636 struct evcnt sc_ev_iac; /* Interrupt Assertion */
637 struct evcnt sc_ev_icrxptc; /* Intr. Cause Rx Pkt Timer Expire */
638 struct evcnt sc_ev_icrxatc; /* Intr. Cause Rx Abs Timer Expire */
639 struct evcnt sc_ev_ictxptc; /* Intr. Cause Tx Pkt Timer Expire */
640 struct evcnt sc_ev_ictxact; /* Intr. Cause Tx Abs Timer Expire */
641 struct evcnt sc_ev_ictxqec; /* Intr. Cause Tx Queue Empty */
642 struct evcnt sc_ev_ictxqmtc; /* Intr. Cause Tx Queue Min Thresh */
643 struct evcnt sc_ev_icrxdmtc; /* Intr. Cause Rx Desc Min Thresh */
644 struct evcnt sc_ev_icrxoc; /* Intr. Cause Receiver Overrun */
645 struct evcnt sc_ev_tncrs; /* Tx-No CRS */
646 struct evcnt sc_ev_tsctc; /* TCP Segmentation Context Tx */
647 struct evcnt sc_ev_tsctfc; /* TCP Segmentation Context Tx Fail */
648 struct evcnt sc_ev_mgtprc; /* Management Packets RX */
649 struct evcnt sc_ev_mgtpdc; /* Management Packets Dropped */
650 struct evcnt sc_ev_mgtptc; /* Management Packets TX */
651 struct evcnt sc_ev_b2ogprc; /* BMC2OS pkts received by host */
652 struct evcnt sc_ev_o2bspc; /* OS2BMC pkts transmitted by host */
653 struct evcnt sc_ev_b2ospc; /* BMC2OS pkts sent by BMC */
654 struct evcnt sc_ev_o2bgptc; /* OS2BMC pkts received by BMC */
655
656 #endif /* WM_EVENT_COUNTERS */
657
658 struct sysctllog *sc_sysctllog;
659
660 /* This variable are used only on the 82547. */
661 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
662
663 uint32_t sc_ctrl; /* prototype CTRL register */
664 #if 0
665 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
666 #endif
667 uint32_t sc_icr; /* prototype interrupt bits */
668 uint32_t sc_itr_init; /* prototype intr throttling reg */
669 uint32_t sc_tctl; /* prototype TCTL register */
670 uint32_t sc_rctl; /* prototype RCTL register */
671 uint32_t sc_txcw; /* prototype TXCW register */
672 uint32_t sc_tipg; /* prototype TIPG register */
673 uint32_t sc_fcrtl; /* prototype FCRTL register */
674 uint32_t sc_pba; /* prototype PBA register */
675
676 int sc_tbi_linkup; /* TBI link status */
677 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
678 int sc_tbi_serdes_ticks; /* tbi ticks */
679
680 int sc_mchash_type; /* multicast filter offset */
681
682 krndsource_t rnd_source; /* random source */
683
684 struct if_percpuq *sc_ipq; /* softint-based input queues */
685
686 kmutex_t *sc_core_lock; /* lock for softc operations */
687 kmutex_t *sc_ich_phymtx; /*
688 * 82574/82583/ICH/PCH specific PHY
689 * mutex. For 82574/82583, the mutex
690 * is used for both PHY and NVM.
691 */
692 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
693
694 struct wm_phyop phy;
695 struct wm_nvmop nvm;
696
697 struct workqueue *sc_reset_wq;
698 struct work sc_reset_work;
699 volatile unsigned sc_reset_pending;
700
701 bool sc_dying;
702
703 #ifdef WM_DEBUG
704 uint32_t sc_debug;
705 bool sc_trigger_reset;
706 #endif
707 };
708
709 #define WM_RXCHAIN_RESET(rxq) \
710 do { \
711 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
712 *(rxq)->rxq_tailp = NULL; \
713 (rxq)->rxq_len = 0; \
714 } while (/*CONSTCOND*/0)
715
716 #define WM_RXCHAIN_LINK(rxq, m) \
717 do { \
718 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
719 (rxq)->rxq_tailp = &(m)->m_next; \
720 } while (/*CONSTCOND*/0)
721
722 #ifdef WM_EVENT_COUNTERS
723 #ifdef __HAVE_ATOMIC64_LOADSTORE
724 #define WM_EVCNT_INCR(ev) \
725 atomic_store_relaxed(&((ev)->ev_count), \
726 atomic_load_relaxed(&(ev)->ev_count) + 1)
727 #define WM_EVCNT_ADD(ev, val) \
728 atomic_store_relaxed(&((ev)->ev_count), \
729 atomic_load_relaxed(&(ev)->ev_count) + (val))
730 #else
731 #define WM_EVCNT_INCR(ev) \
732 ((ev)->ev_count)++
733 #define WM_EVCNT_ADD(ev, val) \
734 (ev)->ev_count += (val)
735 #endif
736
737 #define WM_Q_EVCNT_INCR(qname, evname) \
738 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
739 #define WM_Q_EVCNT_ADD(qname, evname, val) \
740 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
741 #else /* !WM_EVENT_COUNTERS */
742 #define WM_EVCNT_INCR(ev) /* nothing */
743 #define WM_EVCNT_ADD(ev, val) /* nothing */
744
745 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
746 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
747 #endif /* !WM_EVENT_COUNTERS */
748
749 #define CSR_READ(sc, reg) \
750 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
751 #define CSR_WRITE(sc, reg, val) \
752 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
753 #define CSR_WRITE_FLUSH(sc) \
754 (void)CSR_READ((sc), WMREG_STATUS)
755
756 #define ICH8_FLASH_READ32(sc, reg) \
757 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
758 (reg) + sc->sc_flashreg_offset)
759 #define ICH8_FLASH_WRITE32(sc, reg, data) \
760 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
761 (reg) + sc->sc_flashreg_offset, (data))
762
763 #define ICH8_FLASH_READ16(sc, reg) \
764 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
765 (reg) + sc->sc_flashreg_offset)
766 #define ICH8_FLASH_WRITE16(sc, reg, data) \
767 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
768 (reg) + sc->sc_flashreg_offset, (data))
769
770 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
771 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
772
773 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
774 #define WM_CDTXADDR_HI(txq, x) \
775 (sizeof(bus_addr_t) == 8 ? \
776 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
777
778 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
779 #define WM_CDRXADDR_HI(rxq, x) \
780 (sizeof(bus_addr_t) == 8 ? \
781 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
782
783 /*
784 * Register read/write functions.
785 * Other than CSR_{READ|WRITE}().
786 */
787 #if 0
788 static inline uint32_t wm_io_read(struct wm_softc *, int);
789 #endif
790 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
791 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
792 uint32_t, uint32_t);
793 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
794
795 /*
796 * Descriptor sync/init functions.
797 */
798 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
799 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
800 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
801
802 /*
803 * Device driver interface functions and commonly used functions.
804 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
805 */
806 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
807 static int wm_match(device_t, cfdata_t, void *);
808 static void wm_attach(device_t, device_t, void *);
809 static int wm_detach(device_t, int);
810 static bool wm_suspend(device_t, const pmf_qual_t *);
811 static bool wm_resume(device_t, const pmf_qual_t *);
812 static bool wm_watchdog(struct ifnet *);
813 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
814 uint16_t *);
815 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
816 uint16_t *);
817 static void wm_tick(void *);
818 static int wm_ifflags_cb(struct ethercom *);
819 static int wm_ioctl(struct ifnet *, u_long, void *);
820 /* MAC address related */
821 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
822 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
823 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
824 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
825 static int wm_rar_count(struct wm_softc *);
826 static void wm_set_filter(struct wm_softc *);
827 /* Reset and init related */
828 static void wm_set_vlan(struct wm_softc *);
829 static void wm_set_pcie_completion_timeout(struct wm_softc *);
830 static void wm_get_auto_rd_done(struct wm_softc *);
831 static void wm_lan_init_done(struct wm_softc *);
832 static void wm_get_cfg_done(struct wm_softc *);
833 static int wm_phy_post_reset(struct wm_softc *);
834 static int wm_write_smbus_addr(struct wm_softc *);
835 static int wm_init_lcd_from_nvm(struct wm_softc *);
836 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
837 static void wm_initialize_hardware_bits(struct wm_softc *);
838 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
839 static int wm_reset_phy(struct wm_softc *);
840 static void wm_flush_desc_rings(struct wm_softc *);
841 static void wm_reset(struct wm_softc *);
842 static int wm_add_rxbuf(struct wm_rxqueue *, int);
843 static void wm_rxdrain(struct wm_rxqueue *);
844 static void wm_init_rss(struct wm_softc *);
845 static void wm_adjust_qnum(struct wm_softc *, int);
846 static inline bool wm_is_using_msix(struct wm_softc *);
847 static inline bool wm_is_using_multiqueue(struct wm_softc *);
848 static int wm_softint_establish_queue(struct wm_softc *, int, int);
849 static int wm_setup_legacy(struct wm_softc *);
850 static int wm_setup_msix(struct wm_softc *);
851 static int wm_init(struct ifnet *);
852 static int wm_init_locked(struct ifnet *);
853 static void wm_init_sysctls(struct wm_softc *);
854 static void wm_unset_stopping_flags(struct wm_softc *);
855 static void wm_set_stopping_flags(struct wm_softc *);
856 static void wm_stop(struct ifnet *, int);
857 static void wm_stop_locked(struct ifnet *, bool, bool);
858 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
859 static void wm_82547_txfifo_stall(void *);
860 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
861 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
862 /* DMA related */
863 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
864 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
865 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
866 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
867 struct wm_txqueue *);
868 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
869 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
870 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
871 struct wm_rxqueue *);
872 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
873 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
874 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
875 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
876 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
877 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
878 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
879 struct wm_txqueue *);
880 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
881 struct wm_rxqueue *);
882 static int wm_alloc_txrx_queues(struct wm_softc *);
883 static void wm_free_txrx_queues(struct wm_softc *);
884 static int wm_init_txrx_queues(struct wm_softc *);
885 /* Start */
886 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
887 struct wm_txsoft *, uint32_t *, uint8_t *);
888 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
889 static void wm_start(struct ifnet *);
890 static void wm_start_locked(struct ifnet *);
891 static int wm_transmit(struct ifnet *, struct mbuf *);
892 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
893 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
894 bool);
895 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
896 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
897 static void wm_nq_start(struct ifnet *);
898 static void wm_nq_start_locked(struct ifnet *);
899 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
900 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
901 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
902 bool);
903 static void wm_deferred_start_locked(struct wm_txqueue *);
904 static void wm_handle_queue(void *);
905 static void wm_handle_queue_work(struct work *, void *);
906 static void wm_handle_reset_work(struct work *, void *);
907 /* Interrupt */
908 static bool wm_txeof(struct wm_txqueue *, u_int);
909 static bool wm_rxeof(struct wm_rxqueue *, u_int);
910 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
911 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
912 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
913 static void wm_linkintr(struct wm_softc *, uint32_t);
914 static int wm_intr_legacy(void *);
915 static inline void wm_txrxintr_disable(struct wm_queue *);
916 static inline void wm_txrxintr_enable(struct wm_queue *);
917 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
918 static int wm_txrxintr_msix(void *);
919 static int wm_linkintr_msix(void *);
920
921 /*
922 * Media related.
923 * GMII, SGMII, TBI, SERDES and SFP.
924 */
925 /* Common */
926 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
927 /* GMII related */
928 static void wm_gmii_reset(struct wm_softc *);
929 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
930 static int wm_get_phy_id_82575(struct wm_softc *);
931 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
932 static int wm_gmii_mediachange(struct ifnet *);
933 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
934 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
935 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
936 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
937 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
938 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
939 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
940 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
941 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
942 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
943 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
944 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
945 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
946 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
947 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
948 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
949 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
950 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
951 bool);
952 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
953 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
954 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
955 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
956 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
957 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
958 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
959 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
960 static void wm_gmii_statchg(struct ifnet *);
961 /*
962 * kumeran related (80003, ICH* and PCH*).
963 * These functions are not for accessing MII registers but for accessing
964 * kumeran specific registers.
965 */
966 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
967 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
968 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
969 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
970 /* EMI register related */
971 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
972 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
973 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
974 /* SGMII */
975 static bool wm_sgmii_uses_mdio(struct wm_softc *);
976 static void wm_sgmii_sfp_preconfig(struct wm_softc *);
977 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
978 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
979 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
980 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
981 /* TBI related */
982 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
983 static void wm_tbi_mediainit(struct wm_softc *);
984 static int wm_tbi_mediachange(struct ifnet *);
985 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
986 static int wm_check_for_link(struct wm_softc *);
987 static void wm_tbi_tick(struct wm_softc *);
988 /* SERDES related */
989 static void wm_serdes_power_up_link_82575(struct wm_softc *);
990 static int wm_serdes_mediachange(struct ifnet *);
991 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
992 static void wm_serdes_tick(struct wm_softc *);
993 /* SFP related */
994 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
995 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
996
997 /*
998 * NVM related.
999 * Microwire, SPI (w/wo EERD) and Flash.
1000 */
1001 /* Misc functions */
1002 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1003 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1004 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1005 /* Microwire */
1006 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1007 /* SPI */
1008 static int wm_nvm_ready_spi(struct wm_softc *);
1009 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1010 /* Using with EERD */
1011 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
1012 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1013 /* Flash */
1014 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1015 unsigned int *);
1016 static int32_t wm_ich8_cycle_init(struct wm_softc *);
1017 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1018 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1019 uint32_t *);
1020 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1021 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1022 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1023 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1024 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1025 /* iNVM */
1026 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1027 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1028 /* Lock, detecting NVM type, validate checksum and read */
1029 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
1030 static int wm_nvm_flash_presence_i210(struct wm_softc *);
1031 static int wm_nvm_validate_checksum(struct wm_softc *);
1032 static void wm_nvm_version_invm(struct wm_softc *);
1033 static void wm_nvm_version(struct wm_softc *);
1034 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1035
1036 /*
1037 * Hardware semaphores.
1038 * Very complexed...
1039 */
1040 static int wm_get_null(struct wm_softc *);
1041 static void wm_put_null(struct wm_softc *);
1042 static int wm_get_eecd(struct wm_softc *);
1043 static void wm_put_eecd(struct wm_softc *);
1044 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1045 static void wm_put_swsm_semaphore(struct wm_softc *);
1046 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1047 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1048 static int wm_get_nvm_80003(struct wm_softc *);
1049 static void wm_put_nvm_80003(struct wm_softc *);
1050 static int wm_get_nvm_82571(struct wm_softc *);
1051 static void wm_put_nvm_82571(struct wm_softc *);
1052 static int wm_get_phy_82575(struct wm_softc *);
1053 static void wm_put_phy_82575(struct wm_softc *);
1054 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1055 static void wm_put_swfwhw_semaphore(struct wm_softc *);
1056 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1057 static void wm_put_swflag_ich8lan(struct wm_softc *);
1058 static int wm_get_nvm_ich8lan(struct wm_softc *);
1059 static void wm_put_nvm_ich8lan(struct wm_softc *);
1060 static int wm_get_hw_semaphore_82573(struct wm_softc *);
1061 static void wm_put_hw_semaphore_82573(struct wm_softc *);
1062
1063 /*
1064 * Management mode and power management related subroutines.
1065 * BMC, AMT, suspend/resume and EEE.
1066 */
1067 #if 0
1068 static int wm_check_mng_mode(struct wm_softc *);
1069 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1070 static int wm_check_mng_mode_82574(struct wm_softc *);
1071 static int wm_check_mng_mode_generic(struct wm_softc *);
1072 #endif
1073 static int wm_enable_mng_pass_thru(struct wm_softc *);
1074 static bool wm_phy_resetisblocked(struct wm_softc *);
1075 static void wm_get_hw_control(struct wm_softc *);
1076 static void wm_release_hw_control(struct wm_softc *);
1077 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1078 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1079 static void wm_init_manageability(struct wm_softc *);
1080 static void wm_release_manageability(struct wm_softc *);
1081 static void wm_get_wakeup(struct wm_softc *);
1082 static int wm_ulp_disable(struct wm_softc *);
1083 static int wm_enable_phy_wakeup(struct wm_softc *);
1084 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1085 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1086 static int wm_resume_workarounds_pchlan(struct wm_softc *);
1087 static void wm_enable_wakeup(struct wm_softc *);
1088 static void wm_disable_aspm(struct wm_softc *);
1089 /* LPLU (Low Power Link Up) */
1090 static void wm_lplu_d0_disable(struct wm_softc *);
1091 /* EEE */
1092 static int wm_set_eee_i350(struct wm_softc *);
1093 static int wm_set_eee_pchlan(struct wm_softc *);
1094 static int wm_set_eee(struct wm_softc *);
1095
1096 /*
1097 * Workarounds (mainly PHY related).
1098 * Basically, PHY's workarounds are in the PHY drivers.
1099 */
1100 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1101 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1102 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1103 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1104 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1105 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1106 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1107 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1108 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1109 static int wm_k1_workaround_lv(struct wm_softc *);
1110 static int wm_link_stall_workaround_hv(struct wm_softc *);
1111 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1112 static int wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1113 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1114 static void wm_reset_init_script_82575(struct wm_softc *);
1115 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1116 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1117 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1118 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1119 static int wm_pll_workaround_i210(struct wm_softc *);
1120 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1121 static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1122 static void wm_set_linkdown_discard(struct wm_softc *);
1123 static void wm_clear_linkdown_discard(struct wm_softc *);
1124
1125 static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1126 static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1127 #ifdef WM_DEBUG
1128 static int wm_sysctl_debug(SYSCTLFN_PROTO);
1129 #endif
1130
1131 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1132 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1133
1134 /*
1135 * Devices supported by this driver.
1136 */
1137 static const struct wm_product {
1138 pci_vendor_id_t wmp_vendor;
1139 pci_product_id_t wmp_product;
1140 const char *wmp_name;
1141 wm_chip_type wmp_type;
1142 uint32_t wmp_flags;
1143 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1144 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1145 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1146 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1147 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1148 } wm_products[] = {
1149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1150 "Intel i82542 1000BASE-X Ethernet",
1151 WM_T_82542_2_1, WMP_F_FIBER },
1152
1153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1154 "Intel i82543GC 1000BASE-X Ethernet",
1155 WM_T_82543, WMP_F_FIBER },
1156
1157 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1158 "Intel i82543GC 1000BASE-T Ethernet",
1159 WM_T_82543, WMP_F_COPPER },
1160
1161 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1162 "Intel i82544EI 1000BASE-T Ethernet",
1163 WM_T_82544, WMP_F_COPPER },
1164
1165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1166 "Intel i82544EI 1000BASE-X Ethernet",
1167 WM_T_82544, WMP_F_FIBER },
1168
1169 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1170 "Intel i82544GC 1000BASE-T Ethernet",
1171 WM_T_82544, WMP_F_COPPER },
1172
1173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1174 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1175 WM_T_82544, WMP_F_COPPER },
1176
1177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1178 "Intel i82540EM 1000BASE-T Ethernet",
1179 WM_T_82540, WMP_F_COPPER },
1180
1181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1182 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1183 WM_T_82540, WMP_F_COPPER },
1184
1185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1186 "Intel i82540EP 1000BASE-T Ethernet",
1187 WM_T_82540, WMP_F_COPPER },
1188
1189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1190 "Intel i82540EP 1000BASE-T Ethernet",
1191 WM_T_82540, WMP_F_COPPER },
1192
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1194 "Intel i82540EP 1000BASE-T Ethernet",
1195 WM_T_82540, WMP_F_COPPER },
1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1198 "Intel i82545EM 1000BASE-T Ethernet",
1199 WM_T_82545, WMP_F_COPPER },
1200
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1202 "Intel i82545GM 1000BASE-T Ethernet",
1203 WM_T_82545_3, WMP_F_COPPER },
1204
1205 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1206 "Intel i82545GM 1000BASE-X Ethernet",
1207 WM_T_82545_3, WMP_F_FIBER },
1208
1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1210 "Intel i82545GM Gigabit Ethernet (SERDES)",
1211 WM_T_82545_3, WMP_F_SERDES },
1212
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1214 "Intel i82546EB 1000BASE-T Ethernet",
1215 WM_T_82546, WMP_F_COPPER },
1216
1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1218 "Intel i82546EB 1000BASE-T Ethernet",
1219 WM_T_82546, WMP_F_COPPER },
1220
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1222 "Intel i82545EM 1000BASE-X Ethernet",
1223 WM_T_82545, WMP_F_FIBER },
1224
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1226 "Intel i82546EB 1000BASE-X Ethernet",
1227 WM_T_82546, WMP_F_FIBER },
1228
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1230 "Intel i82546GB 1000BASE-T Ethernet",
1231 WM_T_82546_3, WMP_F_COPPER },
1232
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1234 "Intel i82546GB 1000BASE-X Ethernet",
1235 WM_T_82546_3, WMP_F_FIBER },
1236
1237 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1238 "Intel i82546GB Gigabit Ethernet (SERDES)",
1239 WM_T_82546_3, WMP_F_SERDES },
1240
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1242 "i82546GB quad-port Gigabit Ethernet",
1243 WM_T_82546_3, WMP_F_COPPER },
1244
1245 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1246 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1247 WM_T_82546_3, WMP_F_COPPER },
1248
1249 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1250 "Intel PRO/1000MT (82546GB)",
1251 WM_T_82546_3, WMP_F_COPPER },
1252
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1254 "Intel i82541EI 1000BASE-T Ethernet",
1255 WM_T_82541, WMP_F_COPPER },
1256
1257 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1258 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1259 WM_T_82541, WMP_F_COPPER },
1260
1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1262 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1263 WM_T_82541, WMP_F_COPPER },
1264
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1266 "Intel i82541ER 1000BASE-T Ethernet",
1267 WM_T_82541_2, WMP_F_COPPER },
1268
1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1270 "Intel i82541GI 1000BASE-T Ethernet",
1271 WM_T_82541_2, WMP_F_COPPER },
1272
1273 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1274 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1275 WM_T_82541_2, WMP_F_COPPER },
1276
1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1278 "Intel i82541PI 1000BASE-T Ethernet",
1279 WM_T_82541_2, WMP_F_COPPER },
1280
1281 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1282 "Intel i82547EI 1000BASE-T Ethernet",
1283 WM_T_82547, WMP_F_COPPER },
1284
1285 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1286 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1287 WM_T_82547, WMP_F_COPPER },
1288
1289 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1290 "Intel i82547GI 1000BASE-T Ethernet",
1291 WM_T_82547_2, WMP_F_COPPER },
1292
1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1294 "Intel PRO/1000 PT (82571EB)",
1295 WM_T_82571, WMP_F_COPPER },
1296
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1298 "Intel PRO/1000 PF (82571EB)",
1299 WM_T_82571, WMP_F_FIBER },
1300
1301 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1302 "Intel PRO/1000 PB (82571EB)",
1303 WM_T_82571, WMP_F_SERDES },
1304
1305 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1306 "Intel PRO/1000 QT (82571EB)",
1307 WM_T_82571, WMP_F_COPPER },
1308
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1310 "Intel PRO/1000 PT Quad Port Server Adapter",
1311 WM_T_82571, WMP_F_COPPER },
1312
1313 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1314 "Intel Gigabit PT Quad Port Server ExpressModule",
1315 WM_T_82571, WMP_F_COPPER },
1316
1317 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1318 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1319 WM_T_82571, WMP_F_SERDES },
1320
1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1322 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1323 WM_T_82571, WMP_F_SERDES },
1324
1325 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1326 "Intel 82571EB Quad 1000baseX Ethernet",
1327 WM_T_82571, WMP_F_FIBER },
1328
1329 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1330 "Intel i82572EI 1000baseT Ethernet",
1331 WM_T_82572, WMP_F_COPPER },
1332
1333 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1334 "Intel i82572EI 1000baseX Ethernet",
1335 WM_T_82572, WMP_F_FIBER },
1336
1337 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1338 "Intel i82572EI Gigabit Ethernet (SERDES)",
1339 WM_T_82572, WMP_F_SERDES },
1340
1341 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1342 "Intel i82572EI 1000baseT Ethernet",
1343 WM_T_82572, WMP_F_COPPER },
1344
1345 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1346 "Intel i82573E",
1347 WM_T_82573, WMP_F_COPPER },
1348
1349 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1350 "Intel i82573E IAMT",
1351 WM_T_82573, WMP_F_COPPER },
1352
1353 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1354 "Intel i82573L Gigabit Ethernet",
1355 WM_T_82573, WMP_F_COPPER },
1356
1357 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1358 "Intel i82574L",
1359 WM_T_82574, WMP_F_COPPER },
1360
1361 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1362 "Intel i82574L",
1363 WM_T_82574, WMP_F_COPPER },
1364
1365 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1366 "Intel i82583V",
1367 WM_T_82583, WMP_F_COPPER },
1368
1369 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1370 "i80003 dual 1000baseT Ethernet",
1371 WM_T_80003, WMP_F_COPPER },
1372
1373 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1374 "i80003 dual 1000baseX Ethernet",
1375 WM_T_80003, WMP_F_COPPER },
1376
1377 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1378 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1379 WM_T_80003, WMP_F_SERDES },
1380
1381 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1382 "Intel i80003 1000baseT Ethernet",
1383 WM_T_80003, WMP_F_COPPER },
1384
1385 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1386 "Intel i80003 Gigabit Ethernet (SERDES)",
1387 WM_T_80003, WMP_F_SERDES },
1388
1389 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1390 "Intel i82801H (M_AMT) LAN Controller",
1391 WM_T_ICH8, WMP_F_COPPER },
1392 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1393 "Intel i82801H (AMT) LAN Controller",
1394 WM_T_ICH8, WMP_F_COPPER },
1395 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1396 "Intel i82801H LAN Controller",
1397 WM_T_ICH8, WMP_F_COPPER },
1398 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1399 "Intel i82801H (IFE) 10/100 LAN Controller",
1400 WM_T_ICH8, WMP_F_COPPER },
1401 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1402 "Intel i82801H (M) LAN Controller",
1403 WM_T_ICH8, WMP_F_COPPER },
1404 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1405 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1406 WM_T_ICH8, WMP_F_COPPER },
1407 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1408 "Intel i82801H IFE (G) 10/100 LAN Controller",
1409 WM_T_ICH8, WMP_F_COPPER },
1410 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1411 "82567V-3 LAN Controller",
1412 WM_T_ICH8, WMP_F_COPPER },
1413 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1414 "82801I (AMT) LAN Controller",
1415 WM_T_ICH9, WMP_F_COPPER },
1416 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1417 "82801I 10/100 LAN Controller",
1418 WM_T_ICH9, WMP_F_COPPER },
1419 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1420 "82801I (G) 10/100 LAN Controller",
1421 WM_T_ICH9, WMP_F_COPPER },
1422 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1423 "82801I (GT) 10/100 LAN Controller",
1424 WM_T_ICH9, WMP_F_COPPER },
1425 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1426 "82801I (C) LAN Controller",
1427 WM_T_ICH9, WMP_F_COPPER },
1428 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1429 "82801I mobile LAN Controller",
1430 WM_T_ICH9, WMP_F_COPPER },
1431 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1432 "82801I mobile (V) LAN Controller",
1433 WM_T_ICH9, WMP_F_COPPER },
1434 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1435 "82801I mobile (AMT) LAN Controller",
1436 WM_T_ICH9, WMP_F_COPPER },
1437 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1438 "82567LM-4 LAN Controller",
1439 WM_T_ICH9, WMP_F_COPPER },
1440 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1441 "82567LM-2 LAN Controller",
1442 WM_T_ICH10, WMP_F_COPPER },
1443 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1444 "82567LF-2 LAN Controller",
1445 WM_T_ICH10, WMP_F_COPPER },
1446 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1447 "82567LM-3 LAN Controller",
1448 WM_T_ICH10, WMP_F_COPPER },
1449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1450 "82567LF-3 LAN Controller",
1451 WM_T_ICH10, WMP_F_COPPER },
1452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1453 "82567V-2 LAN Controller",
1454 WM_T_ICH10, WMP_F_COPPER },
1455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1456 "82567V-3? LAN Controller",
1457 WM_T_ICH10, WMP_F_COPPER },
1458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1459 "HANKSVILLE LAN Controller",
1460 WM_T_ICH10, WMP_F_COPPER },
1461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1462 "PCH LAN (82577LM) Controller",
1463 WM_T_PCH, WMP_F_COPPER },
1464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1465 "PCH LAN (82577LC) Controller",
1466 WM_T_PCH, WMP_F_COPPER },
1467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1468 "PCH LAN (82578DM) Controller",
1469 WM_T_PCH, WMP_F_COPPER },
1470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1471 "PCH LAN (82578DC) Controller",
1472 WM_T_PCH, WMP_F_COPPER },
1473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1474 "PCH2 LAN (82579LM) Controller",
1475 WM_T_PCH2, WMP_F_COPPER },
1476 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1477 "PCH2 LAN (82579V) Controller",
1478 WM_T_PCH2, WMP_F_COPPER },
1479 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1480 "82575EB dual-1000baseT Ethernet",
1481 WM_T_82575, WMP_F_COPPER },
1482 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1483 "82575EB dual-1000baseX Ethernet (SERDES)",
1484 WM_T_82575, WMP_F_SERDES },
1485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1486 "82575GB quad-1000baseT Ethernet",
1487 WM_T_82575, WMP_F_COPPER },
1488 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1489 "82575GB quad-1000baseT Ethernet (PM)",
1490 WM_T_82575, WMP_F_COPPER },
1491 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1492 "82576 1000BaseT Ethernet",
1493 WM_T_82576, WMP_F_COPPER },
1494 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1495 "82576 1000BaseX Ethernet",
1496 WM_T_82576, WMP_F_FIBER },
1497
1498 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1499 "82576 gigabit Ethernet (SERDES)",
1500 WM_T_82576, WMP_F_SERDES },
1501
1502 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1503 "82576 quad-1000BaseT Ethernet",
1504 WM_T_82576, WMP_F_COPPER },
1505
1506 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1507 "82576 Gigabit ET2 Quad Port Server Adapter",
1508 WM_T_82576, WMP_F_COPPER },
1509
1510 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1511 "82576 gigabit Ethernet",
1512 WM_T_82576, WMP_F_COPPER },
1513
1514 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1515 "82576 gigabit Ethernet (SERDES)",
1516 WM_T_82576, WMP_F_SERDES },
1517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1518 "82576 quad-gigabit Ethernet (SERDES)",
1519 WM_T_82576, WMP_F_SERDES },
1520
1521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1522 "82580 1000BaseT Ethernet",
1523 WM_T_82580, WMP_F_COPPER },
1524 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1525 "82580 1000BaseX Ethernet",
1526 WM_T_82580, WMP_F_FIBER },
1527
1528 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1529 "82580 1000BaseT Ethernet (SERDES)",
1530 WM_T_82580, WMP_F_SERDES },
1531
1532 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1533 "82580 gigabit Ethernet (SGMII)",
1534 WM_T_82580, WMP_F_COPPER },
1535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1536 "82580 dual-1000BaseT Ethernet",
1537 WM_T_82580, WMP_F_COPPER },
1538
1539 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1540 "82580 quad-1000BaseX Ethernet",
1541 WM_T_82580, WMP_F_FIBER },
1542
1543 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1544 "DH89XXCC Gigabit Ethernet (SGMII)",
1545 WM_T_82580, WMP_F_COPPER },
1546
1547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1548 "DH89XXCC Gigabit Ethernet (SERDES)",
1549 WM_T_82580, WMP_F_SERDES },
1550
1551 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1552 "DH89XXCC 1000BASE-KX Ethernet",
1553 WM_T_82580, WMP_F_SERDES },
1554
1555 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1556 "DH89XXCC Gigabit Ethernet (SFP)",
1557 WM_T_82580, WMP_F_SERDES },
1558
1559 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1560 "I350 Gigabit Network Connection",
1561 WM_T_I350, WMP_F_COPPER },
1562
1563 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1564 "I350 Gigabit Fiber Network Connection",
1565 WM_T_I350, WMP_F_FIBER },
1566
1567 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1568 "I350 Gigabit Backplane Connection",
1569 WM_T_I350, WMP_F_SERDES },
1570
1571 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1572 "I350 Quad Port Gigabit Ethernet",
1573 WM_T_I350, WMP_F_SERDES },
1574
1575 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1576 "I350 Gigabit Connection",
1577 WM_T_I350, WMP_F_COPPER },
1578
1579 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1580 "I354 Gigabit Ethernet (KX)",
1581 WM_T_I354, WMP_F_SERDES },
1582
1583 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1584 "I354 Gigabit Ethernet (SGMII)",
1585 WM_T_I354, WMP_F_COPPER },
1586
1587 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1588 "I354 Gigabit Ethernet (2.5G)",
1589 WM_T_I354, WMP_F_COPPER },
1590
1591 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1592 "I210-T1 Ethernet Server Adapter",
1593 WM_T_I210, WMP_F_COPPER },
1594
1595 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1596 "I210 Ethernet (Copper OEM)",
1597 WM_T_I210, WMP_F_COPPER },
1598
1599 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1600 "I210 Ethernet (Copper IT)",
1601 WM_T_I210, WMP_F_COPPER },
1602
1603 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1604 "I210 Ethernet (Copper, FLASH less)",
1605 WM_T_I210, WMP_F_COPPER },
1606
1607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1608 "I210 Gigabit Ethernet (Fiber)",
1609 WM_T_I210, WMP_F_FIBER },
1610
1611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1612 "I210 Gigabit Ethernet (SERDES)",
1613 WM_T_I210, WMP_F_SERDES },
1614
1615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1616 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1617 WM_T_I210, WMP_F_SERDES },
1618
1619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1620 "I210 Gigabit Ethernet (SGMII)",
1621 WM_T_I210, WMP_F_COPPER },
1622
1623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1624 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1625 WM_T_I210, WMP_F_COPPER },
1626
1627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1628 "I211 Ethernet (COPPER)",
1629 WM_T_I211, WMP_F_COPPER },
1630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1631 "I217 V Ethernet Connection",
1632 WM_T_PCH_LPT, WMP_F_COPPER },
1633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1634 "I217 LM Ethernet Connection",
1635 WM_T_PCH_LPT, WMP_F_COPPER },
1636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1637 "I218 V Ethernet Connection",
1638 WM_T_PCH_LPT, WMP_F_COPPER },
1639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1640 "I218 V Ethernet Connection",
1641 WM_T_PCH_LPT, WMP_F_COPPER },
1642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1643 "I218 V Ethernet Connection",
1644 WM_T_PCH_LPT, WMP_F_COPPER },
1645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1646 "I218 LM Ethernet Connection",
1647 WM_T_PCH_LPT, WMP_F_COPPER },
1648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1649 "I218 LM Ethernet Connection",
1650 WM_T_PCH_LPT, WMP_F_COPPER },
1651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1652 "I218 LM Ethernet Connection",
1653 WM_T_PCH_LPT, WMP_F_COPPER },
1654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1655 "I219 LM Ethernet Connection",
1656 WM_T_PCH_SPT, WMP_F_COPPER },
1657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1658 "I219 LM (2) Ethernet Connection",
1659 WM_T_PCH_SPT, WMP_F_COPPER },
1660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1661 "I219 LM (3) Ethernet Connection",
1662 WM_T_PCH_SPT, WMP_F_COPPER },
1663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1664 "I219 LM (4) Ethernet Connection",
1665 WM_T_PCH_SPT, WMP_F_COPPER },
1666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1667 "I219 LM (5) Ethernet Connection",
1668 WM_T_PCH_SPT, WMP_F_COPPER },
1669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1670 "I219 LM (6) Ethernet Connection",
1671 WM_T_PCH_CNP, WMP_F_COPPER },
1672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1673 "I219 LM (7) Ethernet Connection",
1674 WM_T_PCH_CNP, WMP_F_COPPER },
1675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1676 "I219 LM (8) Ethernet Connection",
1677 WM_T_PCH_CNP, WMP_F_COPPER },
1678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1679 "I219 LM (9) Ethernet Connection",
1680 WM_T_PCH_CNP, WMP_F_COPPER },
1681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1682 "I219 LM (10) Ethernet Connection",
1683 WM_T_PCH_CNP, WMP_F_COPPER },
1684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1685 "I219 LM (11) Ethernet Connection",
1686 WM_T_PCH_CNP, WMP_F_COPPER },
1687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1688 "I219 LM (12) Ethernet Connection",
1689 WM_T_PCH_SPT, WMP_F_COPPER },
1690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1691 "I219 LM (13) Ethernet Connection",
1692 WM_T_PCH_CNP, WMP_F_COPPER },
1693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1694 "I219 LM (14) Ethernet Connection",
1695 WM_T_PCH_CNP, WMP_F_COPPER },
1696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1697 "I219 LM (15) Ethernet Connection",
1698 WM_T_PCH_CNP, WMP_F_COPPER },
1699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16,
1700 "I219 LM (16) Ethernet Connection",
1701 WM_T_PCH_CNP, WMP_F_COPPER },
1702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17,
1703 "I219 LM (17) Ethernet Connection",
1704 WM_T_PCH_CNP, WMP_F_COPPER },
1705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18,
1706 "I219 LM (18) Ethernet Connection",
1707 WM_T_PCH_CNP, WMP_F_COPPER },
1708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19,
1709 "I219 LM (19) Ethernet Connection",
1710 WM_T_PCH_CNP, WMP_F_COPPER },
1711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1712 "I219 V Ethernet Connection",
1713 WM_T_PCH_SPT, WMP_F_COPPER },
1714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1715 "I219 V (2) Ethernet Connection",
1716 WM_T_PCH_SPT, WMP_F_COPPER },
1717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1718 "I219 V (4) Ethernet Connection",
1719 WM_T_PCH_SPT, WMP_F_COPPER },
1720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1721 "I219 V (5) Ethernet Connection",
1722 WM_T_PCH_SPT, WMP_F_COPPER },
1723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1724 "I219 V (6) Ethernet Connection",
1725 WM_T_PCH_CNP, WMP_F_COPPER },
1726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1727 "I219 V (7) Ethernet Connection",
1728 WM_T_PCH_CNP, WMP_F_COPPER },
1729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1730 "I219 V (8) Ethernet Connection",
1731 WM_T_PCH_CNP, WMP_F_COPPER },
1732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1733 "I219 V (9) Ethernet Connection",
1734 WM_T_PCH_CNP, WMP_F_COPPER },
1735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1736 "I219 V (10) Ethernet Connection",
1737 WM_T_PCH_CNP, WMP_F_COPPER },
1738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1739 "I219 V (11) Ethernet Connection",
1740 WM_T_PCH_CNP, WMP_F_COPPER },
1741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1742 "I219 V (12) Ethernet Connection",
1743 WM_T_PCH_SPT, WMP_F_COPPER },
1744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1745 "I219 V (13) Ethernet Connection",
1746 WM_T_PCH_CNP, WMP_F_COPPER },
1747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1748 "I219 V (14) Ethernet Connection",
1749 WM_T_PCH_CNP, WMP_F_COPPER },
1750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15,
1751 "I219 V (15) Ethernet Connection",
1752 WM_T_PCH_CNP, WMP_F_COPPER },
1753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16,
1754 "I219 V (16) Ethernet Connection",
1755 WM_T_PCH_CNP, WMP_F_COPPER },
1756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17,
1757 "I219 V (17) Ethernet Connection",
1758 WM_T_PCH_CNP, WMP_F_COPPER },
1759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18,
1760 "I219 V (18) Ethernet Connection",
1761 WM_T_PCH_CNP, WMP_F_COPPER },
1762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19,
1763 "I219 V (19) Ethernet Connection",
1764 WM_T_PCH_CNP, WMP_F_COPPER },
1765 { 0, 0,
1766 NULL,
1767 0, 0 },
1768 };
1769
1770 /*
1771 * Register read/write functions.
1772 * Other than CSR_{READ|WRITE}().
1773 */
1774
1775 #if 0 /* Not currently used */
1776 static inline uint32_t
1777 wm_io_read(struct wm_softc *sc, int reg)
1778 {
1779
1780 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1781 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1782 }
1783 #endif
1784
1785 static inline void
1786 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1787 {
1788
1789 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1790 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1791 }
1792
1793 static inline void
1794 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1795 uint32_t data)
1796 {
1797 uint32_t regval;
1798 int i;
1799
1800 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1801
1802 CSR_WRITE(sc, reg, regval);
1803
1804 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1805 delay(5);
1806 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1807 break;
1808 }
1809 if (i == SCTL_CTL_POLL_TIMEOUT) {
1810 aprint_error("%s: WARNING:"
1811 " i82575 reg 0x%08x setup did not indicate ready\n",
1812 device_xname(sc->sc_dev), reg);
1813 }
1814 }
1815
1816 static inline void
1817 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1818 {
1819 wa->wa_low = htole32(BUS_ADDR_LO32(v));
1820 wa->wa_high = htole32(BUS_ADDR_HI32(v));
1821 }
1822
1823 /*
1824 * Descriptor sync/init functions.
1825 */
1826 static inline void
1827 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1828 {
1829 struct wm_softc *sc = txq->txq_sc;
1830
1831 /* If it will wrap around, sync to the end of the ring. */
1832 if ((start + num) > WM_NTXDESC(txq)) {
1833 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1834 WM_CDTXOFF(txq, start), txq->txq_descsize *
1835 (WM_NTXDESC(txq) - start), ops);
1836 num -= (WM_NTXDESC(txq) - start);
1837 start = 0;
1838 }
1839
1840 /* Now sync whatever is left. */
1841 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1842 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1843 }
1844
1845 static inline void
1846 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1847 {
1848 struct wm_softc *sc = rxq->rxq_sc;
1849
1850 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1851 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1852 }
1853
1854 static inline void
1855 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1856 {
1857 struct wm_softc *sc = rxq->rxq_sc;
1858 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1859 struct mbuf *m = rxs->rxs_mbuf;
1860
1861 /*
1862 * Note: We scoot the packet forward 2 bytes in the buffer
1863 * so that the payload after the Ethernet header is aligned
1864 * to a 4-byte boundary.
1865
1866 * XXX BRAINDAMAGE ALERT!
1867 * The stupid chip uses the same size for every buffer, which
1868 * is set in the Receive Control register. We are using the 2K
1869 * size option, but what we REALLY want is (2K - 2)! For this
1870 * reason, we can't "scoot" packets longer than the standard
1871 * Ethernet MTU. On strict-alignment platforms, if the total
1872 * size exceeds (2K - 2) we set align_tweak to 0 and let
1873 * the upper layer copy the headers.
1874 */
1875 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1876
1877 if (sc->sc_type == WM_T_82574) {
1878 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1879 rxd->erx_data.erxd_addr =
1880 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1881 rxd->erx_data.erxd_dd = 0;
1882 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1883 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1884
1885 rxd->nqrx_data.nrxd_paddr =
1886 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1887 /* Currently, split header is not supported. */
1888 rxd->nqrx_data.nrxd_haddr = 0;
1889 } else {
1890 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1891
1892 wm_set_dma_addr(&rxd->wrx_addr,
1893 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1894 rxd->wrx_len = 0;
1895 rxd->wrx_cksum = 0;
1896 rxd->wrx_status = 0;
1897 rxd->wrx_errors = 0;
1898 rxd->wrx_special = 0;
1899 }
1900 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1901
1902 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1903 }
1904
1905 /*
1906 * Device driver interface functions and commonly used functions.
1907 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1908 */
1909
1910 /* Lookup supported device table */
1911 static const struct wm_product *
1912 wm_lookup(const struct pci_attach_args *pa)
1913 {
1914 const struct wm_product *wmp;
1915
1916 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1917 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1918 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1919 return wmp;
1920 }
1921 return NULL;
1922 }
1923
1924 /* The match function (ca_match) */
1925 static int
1926 wm_match(device_t parent, cfdata_t cf, void *aux)
1927 {
1928 struct pci_attach_args *pa = aux;
1929
1930 if (wm_lookup(pa) != NULL)
1931 return 1;
1932
1933 return 0;
1934 }
1935
1936 /* The attach function (ca_attach) */
1937 static void
1938 wm_attach(device_t parent, device_t self, void *aux)
1939 {
1940 struct wm_softc *sc = device_private(self);
1941 struct pci_attach_args *pa = aux;
1942 prop_dictionary_t dict;
1943 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1944 pci_chipset_tag_t pc = pa->pa_pc;
1945 int counts[PCI_INTR_TYPE_SIZE];
1946 pci_intr_type_t max_type;
1947 const char *eetype, *xname;
1948 bus_space_tag_t memt;
1949 bus_space_handle_t memh;
1950 bus_size_t memsize;
1951 int memh_valid;
1952 int i, error;
1953 const struct wm_product *wmp;
1954 prop_data_t ea;
1955 prop_number_t pn;
1956 uint8_t enaddr[ETHER_ADDR_LEN];
1957 char buf[256];
1958 char wqname[MAXCOMLEN];
1959 uint16_t cfg1, cfg2, swdpin, nvmword;
1960 pcireg_t preg, memtype;
1961 uint16_t eeprom_data, apme_mask;
1962 bool force_clear_smbi;
1963 uint32_t link_mode;
1964 uint32_t reg;
1965
1966 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1967 sc->sc_debug = WM_DEBUG_DEFAULT;
1968 #endif
1969 sc->sc_dev = self;
1970 callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
1971 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1972 sc->sc_core_stopping = false;
1973
1974 wmp = wm_lookup(pa);
1975 #ifdef DIAGNOSTIC
1976 if (wmp == NULL) {
1977 printf("\n");
1978 panic("wm_attach: impossible");
1979 }
1980 #endif
1981 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1982
1983 sc->sc_pc = pa->pa_pc;
1984 sc->sc_pcitag = pa->pa_tag;
1985
1986 if (pci_dma64_available(pa)) {
1987 aprint_verbose(", 64-bit DMA");
1988 sc->sc_dmat = pa->pa_dmat64;
1989 } else {
1990 aprint_verbose(", 32-bit DMA");
1991 sc->sc_dmat = pa->pa_dmat;
1992 }
1993
1994 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1995 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1996 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1997
1998 sc->sc_type = wmp->wmp_type;
1999
2000 /* Set default function pointers */
2001 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2002 sc->phy.release = sc->nvm.release = wm_put_null;
2003 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2004
2005 if (sc->sc_type < WM_T_82543) {
2006 if (sc->sc_rev < 2) {
2007 aprint_error_dev(sc->sc_dev,
2008 "i82542 must be at least rev. 2\n");
2009 return;
2010 }
2011 if (sc->sc_rev < 3)
2012 sc->sc_type = WM_T_82542_2_0;
2013 }
2014
2015 /*
2016 * Disable MSI for Errata:
2017 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2018 *
2019 * 82544: Errata 25
2020 * 82540: Errata 6 (easy to reproduce device timeout)
2021 * 82545: Errata 4 (easy to reproduce device timeout)
2022 * 82546: Errata 26 (easy to reproduce device timeout)
2023 * 82541: Errata 7 (easy to reproduce device timeout)
2024 *
2025 * "Byte Enables 2 and 3 are not set on MSI writes"
2026 *
2027 * 82571 & 82572: Errata 63
2028 */
2029 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2030 || (sc->sc_type == WM_T_82572))
2031 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2032
2033 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2034 || (sc->sc_type == WM_T_82580)
2035 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2036 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2037 sc->sc_flags |= WM_F_NEWQUEUE;
2038
2039 /* Set device properties (mactype) */
2040 dict = device_properties(sc->sc_dev);
2041 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2042
2043 /*
2044 * Map the device. All devices support memory-mapped acccess,
2045 * and it is really required for normal operation.
2046 */
2047 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2048 switch (memtype) {
2049 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2050 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2051 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2052 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2053 break;
2054 default:
2055 memh_valid = 0;
2056 break;
2057 }
2058
2059 if (memh_valid) {
2060 sc->sc_st = memt;
2061 sc->sc_sh = memh;
2062 sc->sc_ss = memsize;
2063 } else {
2064 aprint_error_dev(sc->sc_dev,
2065 "unable to map device registers\n");
2066 return;
2067 }
2068
2069 /*
2070 * In addition, i82544 and later support I/O mapped indirect
2071 * register access. It is not desirable (nor supported in
2072 * this driver) to use it for normal operation, though it is
2073 * required to work around bugs in some chip versions.
2074 */
2075 switch (sc->sc_type) {
2076 case WM_T_82544:
2077 case WM_T_82541:
2078 case WM_T_82541_2:
2079 case WM_T_82547:
2080 case WM_T_82547_2:
2081 /* First we have to find the I/O BAR. */
2082 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2083 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2084 if (memtype == PCI_MAPREG_TYPE_IO)
2085 break;
2086 if (PCI_MAPREG_MEM_TYPE(memtype) ==
2087 PCI_MAPREG_MEM_TYPE_64BIT)
2088 i += 4; /* skip high bits, too */
2089 }
2090 if (i < PCI_MAPREG_END) {
2091 /*
2092 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2093 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2094 * It's no problem because newer chips has no this
2095 * bug.
2096 *
2097 * The i8254x doesn't apparently respond when the
2098 * I/O BAR is 0, which looks somewhat like it's not
2099 * been configured.
2100 */
2101 preg = pci_conf_read(pc, pa->pa_tag, i);
2102 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2103 aprint_error_dev(sc->sc_dev,
2104 "WARNING: I/O BAR at zero.\n");
2105 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2106 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2107 == 0) {
2108 sc->sc_flags |= WM_F_IOH_VALID;
2109 } else
2110 aprint_error_dev(sc->sc_dev,
2111 "WARNING: unable to map I/O space\n");
2112 }
2113 break;
2114 default:
2115 break;
2116 }
2117
2118 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2119 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2120 preg |= PCI_COMMAND_MASTER_ENABLE;
2121 if (sc->sc_type < WM_T_82542_2_1)
2122 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2123 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2124
2125 /* Power up chip */
2126 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2127 && error != EOPNOTSUPP) {
2128 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2129 return;
2130 }
2131
2132 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2133 /*
2134 * Don't use MSI-X if we can use only one queue to save interrupt
2135 * resource.
2136 */
2137 if (sc->sc_nqueues > 1) {
2138 max_type = PCI_INTR_TYPE_MSIX;
2139 /*
2140 * 82583 has a MSI-X capability in the PCI configuration space
2141 * but it doesn't support it. At least the document doesn't
2142 * say anything about MSI-X.
2143 */
2144 counts[PCI_INTR_TYPE_MSIX]
2145 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2146 } else {
2147 max_type = PCI_INTR_TYPE_MSI;
2148 counts[PCI_INTR_TYPE_MSIX] = 0;
2149 }
2150
2151 /* Allocation settings */
2152 counts[PCI_INTR_TYPE_MSI] = 1;
2153 counts[PCI_INTR_TYPE_INTX] = 1;
2154 /* overridden by disable flags */
2155 if (wm_disable_msi != 0) {
2156 counts[PCI_INTR_TYPE_MSI] = 0;
2157 if (wm_disable_msix != 0) {
2158 max_type = PCI_INTR_TYPE_INTX;
2159 counts[PCI_INTR_TYPE_MSIX] = 0;
2160 }
2161 } else if (wm_disable_msix != 0) {
2162 max_type = PCI_INTR_TYPE_MSI;
2163 counts[PCI_INTR_TYPE_MSIX] = 0;
2164 }
2165
2166 alloc_retry:
2167 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2168 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2169 return;
2170 }
2171
2172 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2173 error = wm_setup_msix(sc);
2174 if (error) {
2175 pci_intr_release(pc, sc->sc_intrs,
2176 counts[PCI_INTR_TYPE_MSIX]);
2177
2178 /* Setup for MSI: Disable MSI-X */
2179 max_type = PCI_INTR_TYPE_MSI;
2180 counts[PCI_INTR_TYPE_MSI] = 1;
2181 counts[PCI_INTR_TYPE_INTX] = 1;
2182 goto alloc_retry;
2183 }
2184 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2185 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2186 error = wm_setup_legacy(sc);
2187 if (error) {
2188 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2189 counts[PCI_INTR_TYPE_MSI]);
2190
2191 /* The next try is for INTx: Disable MSI */
2192 max_type = PCI_INTR_TYPE_INTX;
2193 counts[PCI_INTR_TYPE_INTX] = 1;
2194 goto alloc_retry;
2195 }
2196 } else {
2197 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2198 error = wm_setup_legacy(sc);
2199 if (error) {
2200 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2201 counts[PCI_INTR_TYPE_INTX]);
2202 return;
2203 }
2204 }
2205
2206 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2207 error = workqueue_create(&sc->sc_queue_wq, wqname,
2208 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2209 WQ_PERCPU | WQ_MPSAFE);
2210 if (error) {
2211 aprint_error_dev(sc->sc_dev,
2212 "unable to create TxRx workqueue\n");
2213 goto out;
2214 }
2215
2216 snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2217 error = workqueue_create(&sc->sc_reset_wq, wqname,
2218 wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2219 WQ_MPSAFE);
2220 if (error) {
2221 workqueue_destroy(sc->sc_queue_wq);
2222 aprint_error_dev(sc->sc_dev,
2223 "unable to create reset workqueue\n");
2224 goto out;
2225 }
2226
2227 /*
2228 * Check the function ID (unit number of the chip).
2229 */
2230 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2231 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2232 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2233 || (sc->sc_type == WM_T_82580)
2234 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2235 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2236 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2237 else
2238 sc->sc_funcid = 0;
2239
2240 /*
2241 * Determine a few things about the bus we're connected to.
2242 */
2243 if (sc->sc_type < WM_T_82543) {
2244 /* We don't really know the bus characteristics here. */
2245 sc->sc_bus_speed = 33;
2246 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2247 /*
2248 * CSA (Communication Streaming Architecture) is about as fast
2249 * a 32-bit 66MHz PCI Bus.
2250 */
2251 sc->sc_flags |= WM_F_CSA;
2252 sc->sc_bus_speed = 66;
2253 aprint_verbose_dev(sc->sc_dev,
2254 "Communication Streaming Architecture\n");
2255 if (sc->sc_type == WM_T_82547) {
2256 callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2257 callout_setfunc(&sc->sc_txfifo_ch,
2258 wm_82547_txfifo_stall, sc);
2259 aprint_verbose_dev(sc->sc_dev,
2260 "using 82547 Tx FIFO stall work-around\n");
2261 }
2262 } else if (sc->sc_type >= WM_T_82571) {
2263 sc->sc_flags |= WM_F_PCIE;
2264 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2265 && (sc->sc_type != WM_T_ICH10)
2266 && (sc->sc_type != WM_T_PCH)
2267 && (sc->sc_type != WM_T_PCH2)
2268 && (sc->sc_type != WM_T_PCH_LPT)
2269 && (sc->sc_type != WM_T_PCH_SPT)
2270 && (sc->sc_type != WM_T_PCH_CNP)) {
2271 /* ICH* and PCH* have no PCIe capability registers */
2272 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2273 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2274 NULL) == 0)
2275 aprint_error_dev(sc->sc_dev,
2276 "unable to find PCIe capability\n");
2277 }
2278 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2279 } else {
2280 reg = CSR_READ(sc, WMREG_STATUS);
2281 if (reg & STATUS_BUS64)
2282 sc->sc_flags |= WM_F_BUS64;
2283 if ((reg & STATUS_PCIX_MODE) != 0) {
2284 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2285
2286 sc->sc_flags |= WM_F_PCIX;
2287 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2288 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2289 aprint_error_dev(sc->sc_dev,
2290 "unable to find PCIX capability\n");
2291 else if (sc->sc_type != WM_T_82545_3 &&
2292 sc->sc_type != WM_T_82546_3) {
2293 /*
2294 * Work around a problem caused by the BIOS
2295 * setting the max memory read byte count
2296 * incorrectly.
2297 */
2298 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2299 sc->sc_pcixe_capoff + PCIX_CMD);
2300 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2301 sc->sc_pcixe_capoff + PCIX_STATUS);
2302
2303 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2304 PCIX_CMD_BYTECNT_SHIFT;
2305 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2306 PCIX_STATUS_MAXB_SHIFT;
2307 if (bytecnt > maxb) {
2308 aprint_verbose_dev(sc->sc_dev,
2309 "resetting PCI-X MMRBC: %d -> %d\n",
2310 512 << bytecnt, 512 << maxb);
2311 pcix_cmd = (pcix_cmd &
2312 ~PCIX_CMD_BYTECNT_MASK) |
2313 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2314 pci_conf_write(pa->pa_pc, pa->pa_tag,
2315 sc->sc_pcixe_capoff + PCIX_CMD,
2316 pcix_cmd);
2317 }
2318 }
2319 }
2320 /*
2321 * The quad port adapter is special; it has a PCIX-PCIX
2322 * bridge on the board, and can run the secondary bus at
2323 * a higher speed.
2324 */
2325 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2326 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2327 : 66;
2328 } else if (sc->sc_flags & WM_F_PCIX) {
2329 switch (reg & STATUS_PCIXSPD_MASK) {
2330 case STATUS_PCIXSPD_50_66:
2331 sc->sc_bus_speed = 66;
2332 break;
2333 case STATUS_PCIXSPD_66_100:
2334 sc->sc_bus_speed = 100;
2335 break;
2336 case STATUS_PCIXSPD_100_133:
2337 sc->sc_bus_speed = 133;
2338 break;
2339 default:
2340 aprint_error_dev(sc->sc_dev,
2341 "unknown PCIXSPD %d; assuming 66MHz\n",
2342 reg & STATUS_PCIXSPD_MASK);
2343 sc->sc_bus_speed = 66;
2344 break;
2345 }
2346 } else
2347 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2348 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2349 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2350 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2351 }
2352
2353 /* clear interesting stat counters */
2354 CSR_READ(sc, WMREG_COLC);
2355 CSR_READ(sc, WMREG_RXERRC);
2356
2357 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2358 || (sc->sc_type >= WM_T_ICH8))
2359 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2360 if (sc->sc_type >= WM_T_ICH8)
2361 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2362
2363 /* Set PHY, NVM mutex related stuff */
2364 switch (sc->sc_type) {
2365 case WM_T_82542_2_0:
2366 case WM_T_82542_2_1:
2367 case WM_T_82543:
2368 case WM_T_82544:
2369 /* Microwire */
2370 sc->nvm.read = wm_nvm_read_uwire;
2371 sc->sc_nvm_wordsize = 64;
2372 sc->sc_nvm_addrbits = 6;
2373 break;
2374 case WM_T_82540:
2375 case WM_T_82545:
2376 case WM_T_82545_3:
2377 case WM_T_82546:
2378 case WM_T_82546_3:
2379 /* Microwire */
2380 sc->nvm.read = wm_nvm_read_uwire;
2381 reg = CSR_READ(sc, WMREG_EECD);
2382 if (reg & EECD_EE_SIZE) {
2383 sc->sc_nvm_wordsize = 256;
2384 sc->sc_nvm_addrbits = 8;
2385 } else {
2386 sc->sc_nvm_wordsize = 64;
2387 sc->sc_nvm_addrbits = 6;
2388 }
2389 sc->sc_flags |= WM_F_LOCK_EECD;
2390 sc->nvm.acquire = wm_get_eecd;
2391 sc->nvm.release = wm_put_eecd;
2392 break;
2393 case WM_T_82541:
2394 case WM_T_82541_2:
2395 case WM_T_82547:
2396 case WM_T_82547_2:
2397 reg = CSR_READ(sc, WMREG_EECD);
2398 /*
2399 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2400 * on 8254[17], so set flags and functios before calling it.
2401 */
2402 sc->sc_flags |= WM_F_LOCK_EECD;
2403 sc->nvm.acquire = wm_get_eecd;
2404 sc->nvm.release = wm_put_eecd;
2405 if (reg & EECD_EE_TYPE) {
2406 /* SPI */
2407 sc->nvm.read = wm_nvm_read_spi;
2408 sc->sc_flags |= WM_F_EEPROM_SPI;
2409 wm_nvm_set_addrbits_size_eecd(sc);
2410 } else {
2411 /* Microwire */
2412 sc->nvm.read = wm_nvm_read_uwire;
2413 if ((reg & EECD_EE_ABITS) != 0) {
2414 sc->sc_nvm_wordsize = 256;
2415 sc->sc_nvm_addrbits = 8;
2416 } else {
2417 sc->sc_nvm_wordsize = 64;
2418 sc->sc_nvm_addrbits = 6;
2419 }
2420 }
2421 break;
2422 case WM_T_82571:
2423 case WM_T_82572:
2424 /* SPI */
2425 sc->nvm.read = wm_nvm_read_eerd;
2426 /* Not use WM_F_LOCK_EECD because we use EERD */
2427 sc->sc_flags |= WM_F_EEPROM_SPI;
2428 wm_nvm_set_addrbits_size_eecd(sc);
2429 sc->phy.acquire = wm_get_swsm_semaphore;
2430 sc->phy.release = wm_put_swsm_semaphore;
2431 sc->nvm.acquire = wm_get_nvm_82571;
2432 sc->nvm.release = wm_put_nvm_82571;
2433 break;
2434 case WM_T_82573:
2435 case WM_T_82574:
2436 case WM_T_82583:
2437 sc->nvm.read = wm_nvm_read_eerd;
2438 /* Not use WM_F_LOCK_EECD because we use EERD */
2439 if (sc->sc_type == WM_T_82573) {
2440 sc->phy.acquire = wm_get_swsm_semaphore;
2441 sc->phy.release = wm_put_swsm_semaphore;
2442 sc->nvm.acquire = wm_get_nvm_82571;
2443 sc->nvm.release = wm_put_nvm_82571;
2444 } else {
2445 /* Both PHY and NVM use the same semaphore. */
2446 sc->phy.acquire = sc->nvm.acquire
2447 = wm_get_swfwhw_semaphore;
2448 sc->phy.release = sc->nvm.release
2449 = wm_put_swfwhw_semaphore;
2450 }
2451 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2452 sc->sc_flags |= WM_F_EEPROM_FLASH;
2453 sc->sc_nvm_wordsize = 2048;
2454 } else {
2455 /* SPI */
2456 sc->sc_flags |= WM_F_EEPROM_SPI;
2457 wm_nvm_set_addrbits_size_eecd(sc);
2458 }
2459 break;
2460 case WM_T_82575:
2461 case WM_T_82576:
2462 case WM_T_82580:
2463 case WM_T_I350:
2464 case WM_T_I354:
2465 case WM_T_80003:
2466 /* SPI */
2467 sc->sc_flags |= WM_F_EEPROM_SPI;
2468 wm_nvm_set_addrbits_size_eecd(sc);
2469 if ((sc->sc_type == WM_T_80003)
2470 || (sc->sc_nvm_wordsize < (1 << 15))) {
2471 sc->nvm.read = wm_nvm_read_eerd;
2472 /* Don't use WM_F_LOCK_EECD because we use EERD */
2473 } else {
2474 sc->nvm.read = wm_nvm_read_spi;
2475 sc->sc_flags |= WM_F_LOCK_EECD;
2476 }
2477 sc->phy.acquire = wm_get_phy_82575;
2478 sc->phy.release = wm_put_phy_82575;
2479 sc->nvm.acquire = wm_get_nvm_80003;
2480 sc->nvm.release = wm_put_nvm_80003;
2481 break;
2482 case WM_T_ICH8:
2483 case WM_T_ICH9:
2484 case WM_T_ICH10:
2485 case WM_T_PCH:
2486 case WM_T_PCH2:
2487 case WM_T_PCH_LPT:
2488 sc->nvm.read = wm_nvm_read_ich8;
2489 /* FLASH */
2490 sc->sc_flags |= WM_F_EEPROM_FLASH;
2491 sc->sc_nvm_wordsize = 2048;
2492 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2493 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2494 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2495 aprint_error_dev(sc->sc_dev,
2496 "can't map FLASH registers\n");
2497 goto out;
2498 }
2499 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2500 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2501 ICH_FLASH_SECTOR_SIZE;
2502 sc->sc_ich8_flash_bank_size =
2503 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2504 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2505 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2506 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2507 sc->sc_flashreg_offset = 0;
2508 sc->phy.acquire = wm_get_swflag_ich8lan;
2509 sc->phy.release = wm_put_swflag_ich8lan;
2510 sc->nvm.acquire = wm_get_nvm_ich8lan;
2511 sc->nvm.release = wm_put_nvm_ich8lan;
2512 break;
2513 case WM_T_PCH_SPT:
2514 case WM_T_PCH_CNP:
2515 sc->nvm.read = wm_nvm_read_spt;
2516 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2517 sc->sc_flags |= WM_F_EEPROM_FLASH;
2518 sc->sc_flasht = sc->sc_st;
2519 sc->sc_flashh = sc->sc_sh;
2520 sc->sc_ich8_flash_base = 0;
2521 sc->sc_nvm_wordsize =
2522 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2523 * NVM_SIZE_MULTIPLIER;
2524 /* It is size in bytes, we want words */
2525 sc->sc_nvm_wordsize /= 2;
2526 /* Assume 2 banks */
2527 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2528 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2529 sc->phy.acquire = wm_get_swflag_ich8lan;
2530 sc->phy.release = wm_put_swflag_ich8lan;
2531 sc->nvm.acquire = wm_get_nvm_ich8lan;
2532 sc->nvm.release = wm_put_nvm_ich8lan;
2533 break;
2534 case WM_T_I210:
2535 case WM_T_I211:
2536 /* Allow a single clear of the SW semaphore on I210 and newer*/
2537 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2538 if (wm_nvm_flash_presence_i210(sc)) {
2539 sc->nvm.read = wm_nvm_read_eerd;
2540 /* Don't use WM_F_LOCK_EECD because we use EERD */
2541 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2542 wm_nvm_set_addrbits_size_eecd(sc);
2543 } else {
2544 sc->nvm.read = wm_nvm_read_invm;
2545 sc->sc_flags |= WM_F_EEPROM_INVM;
2546 sc->sc_nvm_wordsize = INVM_SIZE;
2547 }
2548 sc->phy.acquire = wm_get_phy_82575;
2549 sc->phy.release = wm_put_phy_82575;
2550 sc->nvm.acquire = wm_get_nvm_80003;
2551 sc->nvm.release = wm_put_nvm_80003;
2552 break;
2553 default:
2554 break;
2555 }
2556
2557 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2558 switch (sc->sc_type) {
2559 case WM_T_82571:
2560 case WM_T_82572:
2561 reg = CSR_READ(sc, WMREG_SWSM2);
2562 if ((reg & SWSM2_LOCK) == 0) {
2563 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2564 force_clear_smbi = true;
2565 } else
2566 force_clear_smbi = false;
2567 break;
2568 case WM_T_82573:
2569 case WM_T_82574:
2570 case WM_T_82583:
2571 force_clear_smbi = true;
2572 break;
2573 default:
2574 force_clear_smbi = false;
2575 break;
2576 }
2577 if (force_clear_smbi) {
2578 reg = CSR_READ(sc, WMREG_SWSM);
2579 if ((reg & SWSM_SMBI) != 0)
2580 aprint_error_dev(sc->sc_dev,
2581 "Please update the Bootagent\n");
2582 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2583 }
2584
2585 /*
2586 * Defer printing the EEPROM type until after verifying the checksum
2587 * This allows the EEPROM type to be printed correctly in the case
2588 * that no EEPROM is attached.
2589 */
2590 /*
2591 * Validate the EEPROM checksum. If the checksum fails, flag
2592 * this for later, so we can fail future reads from the EEPROM.
2593 */
2594 if (wm_nvm_validate_checksum(sc)) {
2595 /*
2596 * Read twice again because some PCI-e parts fail the
2597 * first check due to the link being in sleep state.
2598 */
2599 if (wm_nvm_validate_checksum(sc))
2600 sc->sc_flags |= WM_F_EEPROM_INVALID;
2601 }
2602
2603 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2604 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2605 else {
2606 aprint_verbose_dev(sc->sc_dev, "%u words ",
2607 sc->sc_nvm_wordsize);
2608 if (sc->sc_flags & WM_F_EEPROM_INVM)
2609 aprint_verbose("iNVM");
2610 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2611 aprint_verbose("FLASH(HW)");
2612 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2613 aprint_verbose("FLASH");
2614 else {
2615 if (sc->sc_flags & WM_F_EEPROM_SPI)
2616 eetype = "SPI";
2617 else
2618 eetype = "MicroWire";
2619 aprint_verbose("(%d address bits) %s EEPROM",
2620 sc->sc_nvm_addrbits, eetype);
2621 }
2622 }
2623 wm_nvm_version(sc);
2624 aprint_verbose("\n");
2625
2626 /*
2627 * XXX The first call of wm_gmii_setup_phytype. The result might be
2628 * incorrect.
2629 */
2630 wm_gmii_setup_phytype(sc, 0, 0);
2631
2632 /* Check for WM_F_WOL on some chips before wm_reset() */
2633 switch (sc->sc_type) {
2634 case WM_T_ICH8:
2635 case WM_T_ICH9:
2636 case WM_T_ICH10:
2637 case WM_T_PCH:
2638 case WM_T_PCH2:
2639 case WM_T_PCH_LPT:
2640 case WM_T_PCH_SPT:
2641 case WM_T_PCH_CNP:
2642 apme_mask = WUC_APME;
2643 eeprom_data = CSR_READ(sc, WMREG_WUC);
2644 if ((eeprom_data & apme_mask) != 0)
2645 sc->sc_flags |= WM_F_WOL;
2646 break;
2647 default:
2648 break;
2649 }
2650
2651 /* Reset the chip to a known state. */
2652 wm_reset(sc);
2653
2654 /*
2655 * Check for I21[01] PLL workaround.
2656 *
2657 * Three cases:
2658 * a) Chip is I211.
2659 * b) Chip is I210 and it uses INVM (not FLASH).
2660 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2661 */
2662 if (sc->sc_type == WM_T_I211)
2663 sc->sc_flags |= WM_F_PLL_WA_I210;
2664 if (sc->sc_type == WM_T_I210) {
2665 if (!wm_nvm_flash_presence_i210(sc))
2666 sc->sc_flags |= WM_F_PLL_WA_I210;
2667 else if ((sc->sc_nvm_ver_major < 3)
2668 || ((sc->sc_nvm_ver_major == 3)
2669 && (sc->sc_nvm_ver_minor < 25))) {
2670 aprint_verbose_dev(sc->sc_dev,
2671 "ROM image version %d.%d is older than 3.25\n",
2672 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2673 sc->sc_flags |= WM_F_PLL_WA_I210;
2674 }
2675 }
2676 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2677 wm_pll_workaround_i210(sc);
2678
2679 wm_get_wakeup(sc);
2680
2681 /* Non-AMT based hardware can now take control from firmware */
2682 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2683 wm_get_hw_control(sc);
2684
2685 /*
2686 * Read the Ethernet address from the EEPROM, if not first found
2687 * in device properties.
2688 */
2689 ea = prop_dictionary_get(dict, "mac-address");
2690 if (ea != NULL) {
2691 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2692 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2693 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2694 } else {
2695 if (wm_read_mac_addr(sc, enaddr) != 0) {
2696 aprint_error_dev(sc->sc_dev,
2697 "unable to read Ethernet address\n");
2698 goto out;
2699 }
2700 }
2701
2702 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2703 ether_sprintf(enaddr));
2704
2705 /*
2706 * Read the config info from the EEPROM, and set up various
2707 * bits in the control registers based on their contents.
2708 */
2709 pn = prop_dictionary_get(dict, "i82543-cfg1");
2710 if (pn != NULL) {
2711 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2712 cfg1 = (uint16_t) prop_number_signed_value(pn);
2713 } else {
2714 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2715 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2716 goto out;
2717 }
2718 }
2719
2720 pn = prop_dictionary_get(dict, "i82543-cfg2");
2721 if (pn != NULL) {
2722 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2723 cfg2 = (uint16_t) prop_number_signed_value(pn);
2724 } else {
2725 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2726 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2727 goto out;
2728 }
2729 }
2730
2731 /* check for WM_F_WOL */
2732 switch (sc->sc_type) {
2733 case WM_T_82542_2_0:
2734 case WM_T_82542_2_1:
2735 case WM_T_82543:
2736 /* dummy? */
2737 eeprom_data = 0;
2738 apme_mask = NVM_CFG3_APME;
2739 break;
2740 case WM_T_82544:
2741 apme_mask = NVM_CFG2_82544_APM_EN;
2742 eeprom_data = cfg2;
2743 break;
2744 case WM_T_82546:
2745 case WM_T_82546_3:
2746 case WM_T_82571:
2747 case WM_T_82572:
2748 case WM_T_82573:
2749 case WM_T_82574:
2750 case WM_T_82583:
2751 case WM_T_80003:
2752 case WM_T_82575:
2753 case WM_T_82576:
2754 apme_mask = NVM_CFG3_APME;
2755 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2756 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2757 break;
2758 case WM_T_82580:
2759 case WM_T_I350:
2760 case WM_T_I354:
2761 case WM_T_I210:
2762 case WM_T_I211:
2763 apme_mask = NVM_CFG3_APME;
2764 wm_nvm_read(sc,
2765 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2766 1, &eeprom_data);
2767 break;
2768 case WM_T_ICH8:
2769 case WM_T_ICH9:
2770 case WM_T_ICH10:
2771 case WM_T_PCH:
2772 case WM_T_PCH2:
2773 case WM_T_PCH_LPT:
2774 case WM_T_PCH_SPT:
2775 case WM_T_PCH_CNP:
2776 /* Already checked before wm_reset () */
2777 apme_mask = eeprom_data = 0;
2778 break;
2779 default: /* XXX 82540 */
2780 apme_mask = NVM_CFG3_APME;
2781 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2782 break;
2783 }
2784 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2785 if ((eeprom_data & apme_mask) != 0)
2786 sc->sc_flags |= WM_F_WOL;
2787
2788 /*
2789 * We have the eeprom settings, now apply the special cases
2790 * where the eeprom may be wrong or the board won't support
2791 * wake on lan on a particular port
2792 */
2793 switch (sc->sc_pcidevid) {
2794 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2795 sc->sc_flags &= ~WM_F_WOL;
2796 break;
2797 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2798 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2799 /* Wake events only supported on port A for dual fiber
2800 * regardless of eeprom setting */
2801 if (sc->sc_funcid == 1)
2802 sc->sc_flags &= ~WM_F_WOL;
2803 break;
2804 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2805 /* If quad port adapter, disable WoL on all but port A */
2806 if (sc->sc_funcid != 0)
2807 sc->sc_flags &= ~WM_F_WOL;
2808 break;
2809 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2810 /* Wake events only supported on port A for dual fiber
2811 * regardless of eeprom setting */
2812 if (sc->sc_funcid == 1)
2813 sc->sc_flags &= ~WM_F_WOL;
2814 break;
2815 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2816 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2817 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2818 /* If quad port adapter, disable WoL on all but port A */
2819 if (sc->sc_funcid != 0)
2820 sc->sc_flags &= ~WM_F_WOL;
2821 break;
2822 }
2823
2824 if (sc->sc_type >= WM_T_82575) {
2825 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2826 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2827 nvmword);
2828 if ((sc->sc_type == WM_T_82575) ||
2829 (sc->sc_type == WM_T_82576)) {
2830 /* Check NVM for autonegotiation */
2831 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2832 != 0)
2833 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2834 }
2835 if ((sc->sc_type == WM_T_82575) ||
2836 (sc->sc_type == WM_T_I350)) {
2837 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2838 sc->sc_flags |= WM_F_MAS;
2839 }
2840 }
2841 }
2842
2843 /*
2844 * XXX need special handling for some multiple port cards
2845 * to disable a paticular port.
2846 */
2847
2848 if (sc->sc_type >= WM_T_82544) {
2849 pn = prop_dictionary_get(dict, "i82543-swdpin");
2850 if (pn != NULL) {
2851 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2852 swdpin = (uint16_t) prop_number_signed_value(pn);
2853 } else {
2854 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2855 aprint_error_dev(sc->sc_dev,
2856 "unable to read SWDPIN\n");
2857 goto out;
2858 }
2859 }
2860 }
2861
2862 if (cfg1 & NVM_CFG1_ILOS)
2863 sc->sc_ctrl |= CTRL_ILOS;
2864
2865 /*
2866 * XXX
2867 * This code isn't correct because pin 2 and 3 are located
2868 * in different position on newer chips. Check all datasheet.
2869 *
2870 * Until resolve this problem, check if a chip < 82580
2871 */
2872 if (sc->sc_type <= WM_T_82580) {
2873 if (sc->sc_type >= WM_T_82544) {
2874 sc->sc_ctrl |=
2875 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2876 CTRL_SWDPIO_SHIFT;
2877 sc->sc_ctrl |=
2878 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2879 CTRL_SWDPINS_SHIFT;
2880 } else {
2881 sc->sc_ctrl |=
2882 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2883 CTRL_SWDPIO_SHIFT;
2884 }
2885 }
2886
2887 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2888 wm_nvm_read(sc,
2889 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2890 1, &nvmword);
2891 if (nvmword & NVM_CFG3_ILOS)
2892 sc->sc_ctrl |= CTRL_ILOS;
2893 }
2894
2895 #if 0
2896 if (sc->sc_type >= WM_T_82544) {
2897 if (cfg1 & NVM_CFG1_IPS0)
2898 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2899 if (cfg1 & NVM_CFG1_IPS1)
2900 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2901 sc->sc_ctrl_ext |=
2902 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2903 CTRL_EXT_SWDPIO_SHIFT;
2904 sc->sc_ctrl_ext |=
2905 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2906 CTRL_EXT_SWDPINS_SHIFT;
2907 } else {
2908 sc->sc_ctrl_ext |=
2909 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2910 CTRL_EXT_SWDPIO_SHIFT;
2911 }
2912 #endif
2913
2914 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2915 #if 0
2916 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2917 #endif
2918
2919 if (sc->sc_type == WM_T_PCH) {
2920 uint16_t val;
2921
2922 /* Save the NVM K1 bit setting */
2923 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2924
2925 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2926 sc->sc_nvm_k1_enabled = 1;
2927 else
2928 sc->sc_nvm_k1_enabled = 0;
2929 }
2930
2931 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2932 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2933 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2934 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2935 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2936 || sc->sc_type == WM_T_82573
2937 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2938 /* Copper only */
2939 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2940 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2941 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2942 || (sc->sc_type ==WM_T_I211)) {
2943 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2944 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2945 switch (link_mode) {
2946 case CTRL_EXT_LINK_MODE_1000KX:
2947 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2948 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2949 break;
2950 case CTRL_EXT_LINK_MODE_SGMII:
2951 if (wm_sgmii_uses_mdio(sc)) {
2952 aprint_normal_dev(sc->sc_dev,
2953 "SGMII(MDIO)\n");
2954 sc->sc_flags |= WM_F_SGMII;
2955 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2956 break;
2957 }
2958 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2959 /*FALLTHROUGH*/
2960 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2961 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2962 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2963 if (link_mode
2964 == CTRL_EXT_LINK_MODE_SGMII) {
2965 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2966 sc->sc_flags |= WM_F_SGMII;
2967 aprint_verbose_dev(sc->sc_dev,
2968 "SGMII\n");
2969 } else {
2970 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2971 aprint_verbose_dev(sc->sc_dev,
2972 "SERDES\n");
2973 }
2974 break;
2975 }
2976 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2977 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2978 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2979 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2980 sc->sc_flags |= WM_F_SGMII;
2981 }
2982 /* Do not change link mode for 100BaseFX */
2983 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2984 break;
2985
2986 /* Change current link mode setting */
2987 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2988 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2989 reg |= CTRL_EXT_LINK_MODE_SGMII;
2990 else
2991 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2992 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2993 break;
2994 case CTRL_EXT_LINK_MODE_GMII:
2995 default:
2996 aprint_normal_dev(sc->sc_dev, "Copper\n");
2997 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2998 break;
2999 }
3000
3001 reg &= ~CTRL_EXT_I2C_ENA;
3002 if ((sc->sc_flags & WM_F_SGMII) != 0)
3003 reg |= CTRL_EXT_I2C_ENA;
3004 else
3005 reg &= ~CTRL_EXT_I2C_ENA;
3006 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3007 if ((sc->sc_flags & WM_F_SGMII) != 0) {
3008 if (!wm_sgmii_uses_mdio(sc))
3009 wm_gmii_setup_phytype(sc, 0, 0);
3010 wm_reset_mdicnfg_82580(sc);
3011 }
3012 } else if (sc->sc_type < WM_T_82543 ||
3013 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3014 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3015 aprint_error_dev(sc->sc_dev,
3016 "WARNING: TBIMODE set on 1000BASE-T product!\n");
3017 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3018 }
3019 } else {
3020 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3021 aprint_error_dev(sc->sc_dev,
3022 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3023 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3024 }
3025 }
3026
3027 if (sc->sc_type >= WM_T_PCH2)
3028 sc->sc_flags |= WM_F_EEE;
3029 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3030 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3031 /* XXX: Need special handling for I354. (not yet) */
3032 if (sc->sc_type != WM_T_I354)
3033 sc->sc_flags |= WM_F_EEE;
3034 }
3035
3036 /*
3037 * The I350 has a bug where it always strips the CRC whether
3038 * asked to or not. So ask for stripped CRC here and cope in rxeof
3039 */
3040 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3041 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3042 sc->sc_flags |= WM_F_CRC_STRIP;
3043
3044 /* Set device properties (macflags) */
3045 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3046
3047 if (sc->sc_flags != 0) {
3048 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3049 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3050 }
3051
3052 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3053
3054 /* Initialize the media structures accordingly. */
3055 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3056 wm_gmii_mediainit(sc, wmp->wmp_product);
3057 else
3058 wm_tbi_mediainit(sc); /* All others */
3059
3060 ifp = &sc->sc_ethercom.ec_if;
3061 xname = device_xname(sc->sc_dev);
3062 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3063 ifp->if_softc = sc;
3064 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3065 ifp->if_extflags = IFEF_MPSAFE;
3066 ifp->if_ioctl = wm_ioctl;
3067 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3068 ifp->if_start = wm_nq_start;
3069 /*
3070 * When the number of CPUs is one and the controller can use
3071 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3072 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3073 * and the other is used for link status changing.
3074 * In this situation, wm_nq_transmit() is disadvantageous
3075 * because of wm_select_txqueue() and pcq(9) overhead.
3076 */
3077 if (wm_is_using_multiqueue(sc))
3078 ifp->if_transmit = wm_nq_transmit;
3079 } else {
3080 ifp->if_start = wm_start;
3081 /*
3082 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3083 * described above.
3084 */
3085 if (wm_is_using_multiqueue(sc))
3086 ifp->if_transmit = wm_transmit;
3087 }
3088 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3089 ifp->if_init = wm_init;
3090 ifp->if_stop = wm_stop;
3091 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3092 IFQ_SET_READY(&ifp->if_snd);
3093
3094 /* Check for jumbo frame */
3095 switch (sc->sc_type) {
3096 case WM_T_82573:
3097 /* XXX limited to 9234 if ASPM is disabled */
3098 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3099 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3100 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3101 break;
3102 case WM_T_82571:
3103 case WM_T_82572:
3104 case WM_T_82574:
3105 case WM_T_82583:
3106 case WM_T_82575:
3107 case WM_T_82576:
3108 case WM_T_82580:
3109 case WM_T_I350:
3110 case WM_T_I354:
3111 case WM_T_I210:
3112 case WM_T_I211:
3113 case WM_T_80003:
3114 case WM_T_ICH9:
3115 case WM_T_ICH10:
3116 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3117 case WM_T_PCH_LPT:
3118 case WM_T_PCH_SPT:
3119 case WM_T_PCH_CNP:
3120 /* XXX limited to 9234 */
3121 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3122 break;
3123 case WM_T_PCH:
3124 /* XXX limited to 4096 */
3125 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3126 break;
3127 case WM_T_82542_2_0:
3128 case WM_T_82542_2_1:
3129 case WM_T_ICH8:
3130 /* No support for jumbo frame */
3131 break;
3132 default:
3133 /* ETHER_MAX_LEN_JUMBO */
3134 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3135 break;
3136 }
3137
3138 /* If we're a i82543 or greater, we can support VLANs. */
3139 if (sc->sc_type >= WM_T_82543) {
3140 sc->sc_ethercom.ec_capabilities |=
3141 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3142 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3143 }
3144
3145 if ((sc->sc_flags & WM_F_EEE) != 0)
3146 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3147
3148 /*
3149 * We can perform TCPv4 and UDPv4 checksums in-bound. Only
3150 * on i82543 and later.
3151 */
3152 if (sc->sc_type >= WM_T_82543) {
3153 ifp->if_capabilities |=
3154 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3155 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3156 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3157 IFCAP_CSUM_TCPv6_Tx |
3158 IFCAP_CSUM_UDPv6_Tx;
3159 }
3160
3161 /*
3162 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3163 *
3164 * 82541GI (8086:1076) ... no
3165 * 82572EI (8086:10b9) ... yes
3166 */
3167 if (sc->sc_type >= WM_T_82571) {
3168 ifp->if_capabilities |=
3169 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3170 }
3171
3172 /*
3173 * If we're a i82544 or greater (except i82547), we can do
3174 * TCP segmentation offload.
3175 */
3176 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3177 ifp->if_capabilities |= IFCAP_TSOv4;
3178
3179 if (sc->sc_type >= WM_T_82571)
3180 ifp->if_capabilities |= IFCAP_TSOv6;
3181
3182 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3183 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3184 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3185 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3186
3187 /* Attach the interface. */
3188 if_initialize(ifp);
3189 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3190 ether_ifattach(ifp, enaddr);
3191 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3192 if_register(ifp);
3193 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3194 RND_FLAG_DEFAULT);
3195
3196 #ifdef WM_EVENT_COUNTERS
3197 /* Attach event counters. */
3198 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3199 NULL, xname, "linkintr");
3200
3201 if (sc->sc_type >= WM_T_82542_2_1) {
3202 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3203 NULL, xname, "tx_xoff");
3204 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3205 NULL, xname, "tx_xon");
3206 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3207 NULL, xname, "rx_xoff");
3208 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3209 NULL, xname, "rx_xon");
3210 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3211 NULL, xname, "rx_macctl");
3212 }
3213
3214 evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3215 NULL, xname, "CRC Error");
3216 evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3217 NULL, xname, "Symbol Error");
3218
3219 if (sc->sc_type >= WM_T_82543) {
3220 evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3221 NULL, xname, "Alignment Error");
3222 evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3223 NULL, xname, "Receive Error");
3224 evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
3225 NULL, xname, "Carrier Extension Error");
3226 }
3227
3228 evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3229 NULL, xname, "Missed Packets");
3230 evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3231 NULL, xname, "Collision");
3232 evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3233 NULL, xname, "Sequence Error");
3234 evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3235 NULL, xname, "Receive Length Error");
3236 evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3237 NULL, xname, "Single Collision");
3238 evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3239 NULL, xname, "Excessive Collisions");
3240 evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3241 NULL, xname, "Multiple Collision");
3242 evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3243 NULL, xname, "Late Collisions");
3244 evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3245 NULL, xname, "Defer");
3246 evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3247 NULL, xname, "Good Packets Rx");
3248 evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3249 NULL, xname, "Broadcast Packets Rx");
3250 evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3251 NULL, xname, "Multicast Packets Rx");
3252 evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3253 NULL, xname, "Good Packets Tx");
3254 evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3255 NULL, xname, "Good Octets Rx");
3256 evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3257 NULL, xname, "Good Octets Tx");
3258 evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3259 NULL, xname, "Rx No Buffers");
3260 evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3261 NULL, xname, "Rx Undersize");
3262 evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3263 NULL, xname, "Rx Fragment");
3264 evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3265 NULL, xname, "Rx Oversize");
3266 evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3267 NULL, xname, "Rx Jabber");
3268 evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3269 NULL, xname, "Total Octets Rx");
3270 evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3271 NULL, xname, "Total Octets Tx");
3272 evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3273 NULL, xname, "Total Packets Rx");
3274 evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3275 NULL, xname, "Total Packets Tx");
3276 evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3277 NULL, xname, "Multicast Packets Tx");
3278 evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3279 NULL, xname, "Broadcast Packets Tx Count");
3280 evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3281 NULL, xname, "Packets Rx (64 bytes)");
3282 evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3283 NULL, xname, "Packets Rx (65-127 bytes)");
3284 evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3285 NULL, xname, "Packets Rx (128-255 bytes)");
3286 evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3287 NULL, xname, "Packets Rx (255-511 bytes)");
3288 evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3289 NULL, xname, "Packets Rx (512-1023 bytes)");
3290 evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3291 NULL, xname, "Packets Rx (1024-1522 bytes)");
3292 evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3293 NULL, xname, "Packets Tx (64 bytes)");
3294 evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3295 NULL, xname, "Packets Tx (65-127 bytes)");
3296 evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3297 NULL, xname, "Packets Tx (128-255 bytes)");
3298 evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3299 NULL, xname, "Packets Tx (256-511 bytes)");
3300 evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3301 NULL, xname, "Packets Tx (512-1023 bytes)");
3302 evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3303 NULL, xname, "Packets Tx (1024-1522 Bytes)");
3304 evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3305 NULL, xname, "Interrupt Assertion");
3306 evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3307 NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3308 evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3309 NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3310 evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3311 NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3312 evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
3313 NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3314 evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3315 NULL, xname, "Intr. Cause Tx Queue Empty");
3316 evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3317 NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3318 evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
3319 NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3320 evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3321 NULL, xname, "Interrupt Cause Receiver Overrun");
3322 if (sc->sc_type >= WM_T_82543) {
3323 evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3324 NULL, xname, "Tx with No CRS");
3325 evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3326 NULL, xname, "TCP Segmentation Context Tx");
3327 evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
3328 NULL, xname, "TCP Segmentation Context Tx Fail");
3329 }
3330 if (sc->sc_type >= WM_T_82540) {
3331 evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3332 NULL, xname, "Management Packets RX");
3333 evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3334 NULL, xname, "Management Packets Dropped");
3335 evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3336 NULL, xname, "Management Packets TX");
3337 }
3338 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3339 evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3340 NULL, xname, "BMC2OS Packets received by host");
3341 evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3342 NULL, xname, "OS2BMC Packets transmitted by host");
3343 evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3344 NULL, xname, "BMC2OS Packets sent by BMC");
3345 evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3346 NULL, xname, "OS2BMC Packets received by BMC");
3347 }
3348 #endif /* WM_EVENT_COUNTERS */
3349
3350 sc->sc_txrx_use_workqueue = false;
3351
3352 if (wm_phy_need_linkdown_discard(sc)) {
3353 DPRINTF(sc, WM_DEBUG_LINK,
3354 ("%s: %s: Set linkdown discard flag\n",
3355 device_xname(sc->sc_dev), __func__));
3356 wm_set_linkdown_discard(sc);
3357 }
3358
3359 wm_init_sysctls(sc);
3360
3361 if (pmf_device_register(self, wm_suspend, wm_resume))
3362 pmf_class_network_register(self, ifp);
3363 else
3364 aprint_error_dev(self, "couldn't establish power handler\n");
3365
3366 sc->sc_flags |= WM_F_ATTACHED;
3367 out:
3368 return;
3369 }
3370
3371 /* The detach function (ca_detach) */
3372 static int
3373 wm_detach(device_t self, int flags __unused)
3374 {
3375 struct wm_softc *sc = device_private(self);
3376 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3377 int i;
3378
3379 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3380 return 0;
3381
3382 /* Stop the interface. Callouts are stopped in it. */
3383 IFNET_LOCK(ifp);
3384 sc->sc_dying = true;
3385 wm_stop(ifp, 1);
3386 IFNET_UNLOCK(ifp);
3387
3388 pmf_device_deregister(self);
3389
3390 sysctl_teardown(&sc->sc_sysctllog);
3391
3392 #ifdef WM_EVENT_COUNTERS
3393 evcnt_detach(&sc->sc_ev_linkintr);
3394
3395 if (sc->sc_type >= WM_T_82542_2_1) {
3396 evcnt_detach(&sc->sc_ev_tx_xoff);
3397 evcnt_detach(&sc->sc_ev_tx_xon);
3398 evcnt_detach(&sc->sc_ev_rx_xoff);
3399 evcnt_detach(&sc->sc_ev_rx_xon);
3400 evcnt_detach(&sc->sc_ev_rx_macctl);
3401 }
3402
3403 evcnt_detach(&sc->sc_ev_crcerrs);
3404 evcnt_detach(&sc->sc_ev_symerrc);
3405
3406 if (sc->sc_type >= WM_T_82543) {
3407 evcnt_detach(&sc->sc_ev_algnerrc);
3408 evcnt_detach(&sc->sc_ev_rxerrc);
3409 evcnt_detach(&sc->sc_ev_cexterr);
3410 }
3411 evcnt_detach(&sc->sc_ev_mpc);
3412 evcnt_detach(&sc->sc_ev_colc);
3413 evcnt_detach(&sc->sc_ev_sec);
3414 evcnt_detach(&sc->sc_ev_rlec);
3415 evcnt_detach(&sc->sc_ev_scc);
3416 evcnt_detach(&sc->sc_ev_ecol);
3417 evcnt_detach(&sc->sc_ev_mcc);
3418 evcnt_detach(&sc->sc_ev_latecol);
3419 evcnt_detach(&sc->sc_ev_dc);
3420 evcnt_detach(&sc->sc_ev_gprc);
3421 evcnt_detach(&sc->sc_ev_bprc);
3422 evcnt_detach(&sc->sc_ev_mprc);
3423 evcnt_detach(&sc->sc_ev_gptc);
3424 evcnt_detach(&sc->sc_ev_gorc);
3425 evcnt_detach(&sc->sc_ev_gotc);
3426 evcnt_detach(&sc->sc_ev_rnbc);
3427 evcnt_detach(&sc->sc_ev_ruc);
3428 evcnt_detach(&sc->sc_ev_rfc);
3429 evcnt_detach(&sc->sc_ev_roc);
3430 evcnt_detach(&sc->sc_ev_rjc);
3431 evcnt_detach(&sc->sc_ev_tor);
3432 evcnt_detach(&sc->sc_ev_tot);
3433 evcnt_detach(&sc->sc_ev_tpr);
3434 evcnt_detach(&sc->sc_ev_tpt);
3435 evcnt_detach(&sc->sc_ev_mptc);
3436 evcnt_detach(&sc->sc_ev_bptc);
3437 evcnt_detach(&sc->sc_ev_prc64);
3438 evcnt_detach(&sc->sc_ev_prc127);
3439 evcnt_detach(&sc->sc_ev_prc255);
3440 evcnt_detach(&sc->sc_ev_prc511);
3441 evcnt_detach(&sc->sc_ev_prc1023);
3442 evcnt_detach(&sc->sc_ev_prc1522);
3443 evcnt_detach(&sc->sc_ev_ptc64);
3444 evcnt_detach(&sc->sc_ev_ptc127);
3445 evcnt_detach(&sc->sc_ev_ptc255);
3446 evcnt_detach(&sc->sc_ev_ptc511);
3447 evcnt_detach(&sc->sc_ev_ptc1023);
3448 evcnt_detach(&sc->sc_ev_ptc1522);
3449 evcnt_detach(&sc->sc_ev_iac);
3450 evcnt_detach(&sc->sc_ev_icrxptc);
3451 evcnt_detach(&sc->sc_ev_icrxatc);
3452 evcnt_detach(&sc->sc_ev_ictxptc);
3453 evcnt_detach(&sc->sc_ev_ictxact);
3454 evcnt_detach(&sc->sc_ev_ictxqec);
3455 evcnt_detach(&sc->sc_ev_ictxqmtc);
3456 evcnt_detach(&sc->sc_ev_icrxdmtc);
3457 evcnt_detach(&sc->sc_ev_icrxoc);
3458 if (sc->sc_type >= WM_T_82543) {
3459 evcnt_detach(&sc->sc_ev_tncrs);
3460 evcnt_detach(&sc->sc_ev_tsctc);
3461 evcnt_detach(&sc->sc_ev_tsctfc);
3462 }
3463 if (sc->sc_type >= WM_T_82540) {
3464 evcnt_detach(&sc->sc_ev_mgtprc);
3465 evcnt_detach(&sc->sc_ev_mgtpdc);
3466 evcnt_detach(&sc->sc_ev_mgtptc);
3467 }
3468 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3469 evcnt_detach(&sc->sc_ev_b2ogprc);
3470 evcnt_detach(&sc->sc_ev_o2bspc);
3471 evcnt_detach(&sc->sc_ev_b2ospc);
3472 evcnt_detach(&sc->sc_ev_o2bgptc);
3473 }
3474 #endif /* WM_EVENT_COUNTERS */
3475
3476 rnd_detach_source(&sc->rnd_source);
3477
3478 /* Tell the firmware about the release */
3479 mutex_enter(sc->sc_core_lock);
3480 wm_release_manageability(sc);
3481 wm_release_hw_control(sc);
3482 wm_enable_wakeup(sc);
3483 mutex_exit(sc->sc_core_lock);
3484
3485 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3486
3487 ether_ifdetach(ifp);
3488 if_detach(ifp);
3489 if_percpuq_destroy(sc->sc_ipq);
3490
3491 /* Delete all remaining media. */
3492 ifmedia_fini(&sc->sc_mii.mii_media);
3493
3494 /* Unload RX dmamaps and free mbufs */
3495 for (i = 0; i < sc->sc_nqueues; i++) {
3496 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3497 mutex_enter(rxq->rxq_lock);
3498 wm_rxdrain(rxq);
3499 mutex_exit(rxq->rxq_lock);
3500 }
3501 /* Must unlock here */
3502
3503 /* Disestablish the interrupt handler */
3504 for (i = 0; i < sc->sc_nintrs; i++) {
3505 if (sc->sc_ihs[i] != NULL) {
3506 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3507 sc->sc_ihs[i] = NULL;
3508 }
3509 }
3510 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3511
3512 /* wm_stop() ensured that the workqueues are stopped. */
3513 workqueue_destroy(sc->sc_queue_wq);
3514 workqueue_destroy(sc->sc_reset_wq);
3515
3516 for (i = 0; i < sc->sc_nqueues; i++)
3517 softint_disestablish(sc->sc_queue[i].wmq_si);
3518
3519 wm_free_txrx_queues(sc);
3520
3521 /* Unmap the registers */
3522 if (sc->sc_ss) {
3523 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3524 sc->sc_ss = 0;
3525 }
3526 if (sc->sc_ios) {
3527 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3528 sc->sc_ios = 0;
3529 }
3530 if (sc->sc_flashs) {
3531 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3532 sc->sc_flashs = 0;
3533 }
3534
3535 if (sc->sc_core_lock)
3536 mutex_obj_free(sc->sc_core_lock);
3537 if (sc->sc_ich_phymtx)
3538 mutex_obj_free(sc->sc_ich_phymtx);
3539 if (sc->sc_ich_nvmmtx)
3540 mutex_obj_free(sc->sc_ich_nvmmtx);
3541
3542 return 0;
3543 }
3544
3545 static bool
3546 wm_suspend(device_t self, const pmf_qual_t *qual)
3547 {
3548 struct wm_softc *sc = device_private(self);
3549
3550 wm_release_manageability(sc);
3551 wm_release_hw_control(sc);
3552 wm_enable_wakeup(sc);
3553
3554 return true;
3555 }
3556
3557 static bool
3558 wm_resume(device_t self, const pmf_qual_t *qual)
3559 {
3560 struct wm_softc *sc = device_private(self);
3561 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3562 pcireg_t reg;
3563 char buf[256];
3564
3565 reg = CSR_READ(sc, WMREG_WUS);
3566 if (reg != 0) {
3567 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3568 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3569 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3570 }
3571
3572 if (sc->sc_type >= WM_T_PCH2)
3573 wm_resume_workarounds_pchlan(sc);
3574 IFNET_LOCK(ifp);
3575 if ((ifp->if_flags & IFF_UP) == 0) {
3576 /* >= PCH_SPT hardware workaround before reset. */
3577 if (sc->sc_type >= WM_T_PCH_SPT)
3578 wm_flush_desc_rings(sc);
3579
3580 wm_reset(sc);
3581 /* Non-AMT based hardware can now take control from firmware */
3582 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3583 wm_get_hw_control(sc);
3584 wm_init_manageability(sc);
3585 } else {
3586 /*
3587 * We called pmf_class_network_register(), so if_init() is
3588 * automatically called when IFF_UP. wm_reset(),
3589 * wm_get_hw_control() and wm_init_manageability() are called
3590 * via wm_init().
3591 */
3592 }
3593 IFNET_UNLOCK(ifp);
3594
3595 return true;
3596 }
3597
3598 /*
3599 * wm_watchdog:
3600 *
3601 * Watchdog checker.
3602 */
3603 static bool
3604 wm_watchdog(struct ifnet *ifp)
3605 {
3606 int qid;
3607 struct wm_softc *sc = ifp->if_softc;
3608 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3609
3610 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3611 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3612
3613 wm_watchdog_txq(ifp, txq, &hang_queue);
3614 }
3615
3616 #ifdef WM_DEBUG
3617 if (sc->sc_trigger_reset) {
3618 /* debug operation, no need for atomicity or reliability */
3619 sc->sc_trigger_reset = 0;
3620 hang_queue++;
3621 }
3622 #endif
3623
3624 if (hang_queue == 0)
3625 return true;
3626
3627 if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3628 workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3629
3630 return false;
3631 }
3632
3633 /*
3634 * Perform an interface watchdog reset.
3635 */
3636 static void
3637 wm_handle_reset_work(struct work *work, void *arg)
3638 {
3639 struct wm_softc * const sc = arg;
3640 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3641
3642 /* Don't want ioctl operations to happen */
3643 IFNET_LOCK(ifp);
3644
3645 /* reset the interface. */
3646 wm_init(ifp);
3647
3648 IFNET_UNLOCK(ifp);
3649
3650 /*
3651 * There are still some upper layer processing which call
3652 * ifp->if_start(). e.g. ALTQ or one CPU system
3653 */
3654 /* Try to get more packets going. */
3655 ifp->if_start(ifp);
3656
3657 atomic_store_relaxed(&sc->sc_reset_pending, 0);
3658 }
3659
3660
3661 static void
3662 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3663 {
3664
3665 mutex_enter(txq->txq_lock);
3666 if (txq->txq_sending &&
3667 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3668 wm_watchdog_txq_locked(ifp, txq, hang);
3669
3670 mutex_exit(txq->txq_lock);
3671 }
3672
3673 static void
3674 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3675 uint16_t *hang)
3676 {
3677 struct wm_softc *sc = ifp->if_softc;
3678 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3679
3680 KASSERT(mutex_owned(txq->txq_lock));
3681
3682 /*
3683 * Since we're using delayed interrupts, sweep up
3684 * before we report an error.
3685 */
3686 wm_txeof(txq, UINT_MAX);
3687
3688 if (txq->txq_sending)
3689 *hang |= __BIT(wmq->wmq_id);
3690
3691 if (txq->txq_free == WM_NTXDESC(txq)) {
3692 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3693 device_xname(sc->sc_dev));
3694 } else {
3695 #ifdef WM_DEBUG
3696 int i, j;
3697 struct wm_txsoft *txs;
3698 #endif
3699 log(LOG_ERR,
3700 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3701 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3702 txq->txq_next);
3703 if_statinc(ifp, if_oerrors);
3704 #ifdef WM_DEBUG
3705 for (i = txq->txq_sdirty; i != txq->txq_snext;
3706 i = WM_NEXTTXS(txq, i)) {
3707 txs = &txq->txq_soft[i];
3708 printf("txs %d tx %d -> %d\n",
3709 i, txs->txs_firstdesc, txs->txs_lastdesc);
3710 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3711 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3712 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3713 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3714 printf("\t %#08x%08x\n",
3715 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3716 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3717 } else {
3718 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3719 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3720 txq->txq_descs[j].wtx_addr.wa_low);
3721 printf("\t %#04x%02x%02x%08x\n",
3722 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3723 txq->txq_descs[j].wtx_fields.wtxu_options,
3724 txq->txq_descs[j].wtx_fields.wtxu_status,
3725 txq->txq_descs[j].wtx_cmdlen);
3726 }
3727 if (j == txs->txs_lastdesc)
3728 break;
3729 }
3730 }
3731 #endif
3732 }
3733 }
3734
3735 /*
3736 * wm_tick:
3737 *
3738 * One second timer, used to check link status, sweep up
3739 * completed transmit jobs, etc.
3740 */
3741 static void
3742 wm_tick(void *arg)
3743 {
3744 struct wm_softc *sc = arg;
3745 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3746 uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc,
3747 cexterr;
3748
3749 mutex_enter(sc->sc_core_lock);
3750
3751 if (sc->sc_core_stopping) {
3752 mutex_exit(sc->sc_core_lock);
3753 return;
3754 }
3755
3756 crcerrs = CSR_READ(sc, WMREG_CRCERRS);
3757 symerrc = CSR_READ(sc, WMREG_SYMERRC);
3758 mpc = CSR_READ(sc, WMREG_MPC);
3759 colc = CSR_READ(sc, WMREG_COLC);
3760 sec = CSR_READ(sc, WMREG_SEC);
3761 rlec = CSR_READ(sc, WMREG_RLEC);
3762
3763 WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
3764 WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
3765 WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
3766 WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
3767 WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
3768 WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
3769
3770 if (sc->sc_type >= WM_T_82542_2_1) {
3771 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3772 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3773 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3774 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3775 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3776 }
3777 WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
3778 WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
3779 WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
3780 WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
3781 WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
3782 WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
3783 WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
3784 WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
3785 WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
3786
3787 WM_EVCNT_ADD(&sc->sc_ev_gorc,
3788 CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
3789 WM_EVCNT_ADD(&sc->sc_ev_gotc,
3790 CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
3791
3792 WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
3793 WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
3794 WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
3795 WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
3796 WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
3797
3798 WM_EVCNT_ADD(&sc->sc_ev_tor,
3799 CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
3800 WM_EVCNT_ADD(&sc->sc_ev_tot,
3801 CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
3802
3803 WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
3804 WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
3805 WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
3806 WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
3807 WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
3808 WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
3809 WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
3810 WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
3811 WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
3812 WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
3813 WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
3814 WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
3815 WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
3816 WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
3817 WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
3818 WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
3819 WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
3820 WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
3821 WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
3822 WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
3823 WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
3824 WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
3825 WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
3826 WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
3827 WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
3828
3829 if (sc->sc_type >= WM_T_82543) {
3830 algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
3831 rxerrc = CSR_READ(sc, WMREG_RXERRC);
3832 cexterr = CSR_READ(sc, WMREG_CEXTERR);
3833 WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
3834 WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
3835 WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
3836
3837 WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
3838 WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
3839 WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
3840 } else
3841 algnerrc = rxerrc = cexterr = 0;
3842
3843 if (sc->sc_type >= WM_T_82540) {
3844 WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
3845 WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
3846 WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
3847 }
3848 if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
3849 && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
3850 WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
3851 WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
3852 WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
3853 WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
3854 }
3855 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3856 if_statadd_ref(nsr, if_collisions, colc);
3857 if_statadd_ref(nsr, if_ierrors,
3858 crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
3859 /*
3860 * WMREG_RNBC is incremented when there are no available buffers in
3861 * host memory. It does not mean the number of dropped packets, because
3862 * an Ethernet controller can receive packets in such case if there is
3863 * space in the phy's FIFO.
3864 *
3865 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3866 * own EVCNT instead of if_iqdrops.
3867 */
3868 if_statadd_ref(nsr, if_iqdrops, mpc);
3869 IF_STAT_PUTREF(ifp);
3870
3871 if (sc->sc_flags & WM_F_HAS_MII)
3872 mii_tick(&sc->sc_mii);
3873 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3874 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3875 wm_serdes_tick(sc);
3876 else
3877 wm_tbi_tick(sc);
3878
3879 mutex_exit(sc->sc_core_lock);
3880
3881 if (wm_watchdog(ifp))
3882 callout_schedule(&sc->sc_tick_ch, hz);
3883 }
3884
3885 static int
3886 wm_ifflags_cb(struct ethercom *ec)
3887 {
3888 struct ifnet *ifp = &ec->ec_if;
3889 struct wm_softc *sc = ifp->if_softc;
3890 u_short iffchange;
3891 int ecchange;
3892 bool needreset = false;
3893 int rc = 0;
3894
3895 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3896 device_xname(sc->sc_dev), __func__));
3897
3898 KASSERT(IFNET_LOCKED(ifp));
3899
3900 mutex_enter(sc->sc_core_lock);
3901
3902 /*
3903 * Check for if_flags.
3904 * Main usage is to prevent linkdown when opening bpf.
3905 */
3906 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3907 sc->sc_if_flags = ifp->if_flags;
3908 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3909 needreset = true;
3910 goto ec;
3911 }
3912
3913 /* iff related updates */
3914 if ((iffchange & IFF_PROMISC) != 0)
3915 wm_set_filter(sc);
3916
3917 wm_set_vlan(sc);
3918
3919 ec:
3920 /* Check for ec_capenable. */
3921 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3922 sc->sc_ec_capenable = ec->ec_capenable;
3923 if ((ecchange & ~ETHERCAP_EEE) != 0) {
3924 needreset = true;
3925 goto out;
3926 }
3927
3928 /* ec related updates */
3929 wm_set_eee(sc);
3930
3931 out:
3932 if (needreset)
3933 rc = ENETRESET;
3934 mutex_exit(sc->sc_core_lock);
3935
3936 return rc;
3937 }
3938
3939 static bool
3940 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3941 {
3942
3943 switch (sc->sc_phytype) {
3944 case WMPHY_82577: /* ihphy */
3945 case WMPHY_82578: /* atphy */
3946 case WMPHY_82579: /* ihphy */
3947 case WMPHY_I217: /* ihphy */
3948 case WMPHY_82580: /* ihphy */
3949 case WMPHY_I350: /* ihphy */
3950 return true;
3951 default:
3952 return false;
3953 }
3954 }
3955
3956 static void
3957 wm_set_linkdown_discard(struct wm_softc *sc)
3958 {
3959
3960 for (int i = 0; i < sc->sc_nqueues; i++) {
3961 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3962
3963 mutex_enter(txq->txq_lock);
3964 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3965 mutex_exit(txq->txq_lock);
3966 }
3967 }
3968
3969 static void
3970 wm_clear_linkdown_discard(struct wm_softc *sc)
3971 {
3972
3973 for (int i = 0; i < sc->sc_nqueues; i++) {
3974 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3975
3976 mutex_enter(txq->txq_lock);
3977 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3978 mutex_exit(txq->txq_lock);
3979 }
3980 }
3981
3982 /*
3983 * wm_ioctl: [ifnet interface function]
3984 *
3985 * Handle control requests from the operator.
3986 */
3987 static int
3988 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3989 {
3990 struct wm_softc *sc = ifp->if_softc;
3991 struct ifreq *ifr = (struct ifreq *)data;
3992 struct ifaddr *ifa = (struct ifaddr *)data;
3993 struct sockaddr_dl *sdl;
3994 int error;
3995
3996 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3997 device_xname(sc->sc_dev), __func__));
3998
3999 switch (cmd) {
4000 case SIOCADDMULTI:
4001 case SIOCDELMULTI:
4002 break;
4003 default:
4004 KASSERT(IFNET_LOCKED(ifp));
4005 }
4006
4007 switch (cmd) {
4008 case SIOCSIFMEDIA:
4009 mutex_enter(sc->sc_core_lock);
4010 /* Flow control requires full-duplex mode. */
4011 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4012 (ifr->ifr_media & IFM_FDX) == 0)
4013 ifr->ifr_media &= ~IFM_ETH_FMASK;
4014 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4015 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4016 /* We can do both TXPAUSE and RXPAUSE. */
4017 ifr->ifr_media |=
4018 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4019 }
4020 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4021 }
4022 mutex_exit(sc->sc_core_lock);
4023 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4024 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4025 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4026 DPRINTF(sc, WM_DEBUG_LINK,
4027 ("%s: %s: Set linkdown discard flag\n",
4028 device_xname(sc->sc_dev), __func__));
4029 wm_set_linkdown_discard(sc);
4030 }
4031 }
4032 break;
4033 case SIOCINITIFADDR:
4034 mutex_enter(sc->sc_core_lock);
4035 if (ifa->ifa_addr->sa_family == AF_LINK) {
4036 sdl = satosdl(ifp->if_dl->ifa_addr);
4037 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4038 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4039 /* Unicast address is the first multicast entry */
4040 wm_set_filter(sc);
4041 error = 0;
4042 mutex_exit(sc->sc_core_lock);
4043 break;
4044 }
4045 mutex_exit(sc->sc_core_lock);
4046 /*FALLTHROUGH*/
4047 default:
4048 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4049 if (((ifp->if_flags & IFF_UP) != 0) &&
4050 ((ifr->ifr_flags & IFF_UP) == 0)) {
4051 DPRINTF(sc, WM_DEBUG_LINK,
4052 ("%s: %s: Set linkdown discard flag\n",
4053 device_xname(sc->sc_dev), __func__));
4054 wm_set_linkdown_discard(sc);
4055 }
4056 }
4057 const int s = splnet();
4058 /* It may call wm_start, so unlock here */
4059 error = ether_ioctl(ifp, cmd, data);
4060 splx(s);
4061 if (error != ENETRESET)
4062 break;
4063
4064 error = 0;
4065
4066 if (cmd == SIOCSIFCAP)
4067 error = if_init(ifp);
4068 else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4069 mutex_enter(sc->sc_core_lock);
4070 if (sc->sc_if_flags & IFF_RUNNING) {
4071 /*
4072 * Multicast list has changed; set the
4073 * hardware filter accordingly.
4074 */
4075 wm_set_filter(sc);
4076 }
4077 mutex_exit(sc->sc_core_lock);
4078 }
4079 break;
4080 }
4081
4082 return error;
4083 }
4084
4085 /* MAC address related */
4086
4087 /*
4088 * Get the offset of MAC address and return it.
4089 * If error occured, use offset 0.
4090 */
4091 static uint16_t
4092 wm_check_alt_mac_addr(struct wm_softc *sc)
4093 {
4094 uint16_t myea[ETHER_ADDR_LEN / 2];
4095 uint16_t offset = NVM_OFF_MACADDR;
4096
4097 /* Try to read alternative MAC address pointer */
4098 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4099 return 0;
4100
4101 /* Check pointer if it's valid or not. */
4102 if ((offset == 0x0000) || (offset == 0xffff))
4103 return 0;
4104
4105 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4106 /*
4107 * Check whether alternative MAC address is valid or not.
4108 * Some cards have non 0xffff pointer but those don't use
4109 * alternative MAC address in reality.
4110 *
4111 * Check whether the broadcast bit is set or not.
4112 */
4113 if (wm_nvm_read(sc, offset, 1, myea) == 0)
4114 if (((myea[0] & 0xff) & 0x01) == 0)
4115 return offset; /* Found */
4116
4117 /* Not found */
4118 return 0;
4119 }
4120
4121 static int
4122 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4123 {
4124 uint16_t myea[ETHER_ADDR_LEN / 2];
4125 uint16_t offset = NVM_OFF_MACADDR;
4126 int do_invert = 0;
4127
4128 switch (sc->sc_type) {
4129 case WM_T_82580:
4130 case WM_T_I350:
4131 case WM_T_I354:
4132 /* EEPROM Top Level Partitioning */
4133 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4134 break;
4135 case WM_T_82571:
4136 case WM_T_82575:
4137 case WM_T_82576:
4138 case WM_T_80003:
4139 case WM_T_I210:
4140 case WM_T_I211:
4141 offset = wm_check_alt_mac_addr(sc);
4142 if (offset == 0)
4143 if ((sc->sc_funcid & 0x01) == 1)
4144 do_invert = 1;
4145 break;
4146 default:
4147 if ((sc->sc_funcid & 0x01) == 1)
4148 do_invert = 1;
4149 break;
4150 }
4151
4152 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4153 goto bad;
4154
4155 enaddr[0] = myea[0] & 0xff;
4156 enaddr[1] = myea[0] >> 8;
4157 enaddr[2] = myea[1] & 0xff;
4158 enaddr[3] = myea[1] >> 8;
4159 enaddr[4] = myea[2] & 0xff;
4160 enaddr[5] = myea[2] >> 8;
4161
4162 /*
4163 * Toggle the LSB of the MAC address on the second port
4164 * of some dual port cards.
4165 */
4166 if (do_invert != 0)
4167 enaddr[5] ^= 1;
4168
4169 return 0;
4170
4171 bad:
4172 return -1;
4173 }
4174
4175 /*
4176 * wm_set_ral:
4177 *
4178 * Set an entery in the receive address list.
4179 */
4180 static void
4181 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4182 {
4183 uint32_t ral_lo, ral_hi, addrl, addrh;
4184 uint32_t wlock_mac;
4185 int rv;
4186
4187 if (enaddr != NULL) {
4188 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4189 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4190 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4191 ral_hi |= RAL_AV;
4192 } else {
4193 ral_lo = 0;
4194 ral_hi = 0;
4195 }
4196
4197 switch (sc->sc_type) {
4198 case WM_T_82542_2_0:
4199 case WM_T_82542_2_1:
4200 case WM_T_82543:
4201 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4202 CSR_WRITE_FLUSH(sc);
4203 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4204 CSR_WRITE_FLUSH(sc);
4205 break;
4206 case WM_T_PCH2:
4207 case WM_T_PCH_LPT:
4208 case WM_T_PCH_SPT:
4209 case WM_T_PCH_CNP:
4210 if (idx == 0) {
4211 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4212 CSR_WRITE_FLUSH(sc);
4213 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4214 CSR_WRITE_FLUSH(sc);
4215 return;
4216 }
4217 if (sc->sc_type != WM_T_PCH2) {
4218 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4219 FWSM_WLOCK_MAC);
4220 addrl = WMREG_SHRAL(idx - 1);
4221 addrh = WMREG_SHRAH(idx - 1);
4222 } else {
4223 wlock_mac = 0;
4224 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4225 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4226 }
4227
4228 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4229 rv = wm_get_swflag_ich8lan(sc);
4230 if (rv != 0)
4231 return;
4232 CSR_WRITE(sc, addrl, ral_lo);
4233 CSR_WRITE_FLUSH(sc);
4234 CSR_WRITE(sc, addrh, ral_hi);
4235 CSR_WRITE_FLUSH(sc);
4236 wm_put_swflag_ich8lan(sc);
4237 }
4238
4239 break;
4240 default:
4241 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4242 CSR_WRITE_FLUSH(sc);
4243 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4244 CSR_WRITE_FLUSH(sc);
4245 break;
4246 }
4247 }
4248
4249 /*
4250 * wm_mchash:
4251 *
4252 * Compute the hash of the multicast address for the 4096-bit
4253 * multicast filter.
4254 */
4255 static uint32_t
4256 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4257 {
4258 static const int lo_shift[4] = { 4, 3, 2, 0 };
4259 static const int hi_shift[4] = { 4, 5, 6, 8 };
4260 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4261 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4262 uint32_t hash;
4263
4264 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4265 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4266 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4267 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4268 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4269 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4270 return (hash & 0x3ff);
4271 }
4272 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4273 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4274
4275 return (hash & 0xfff);
4276 }
4277
4278 /*
4279 *
4280 *
4281 */
4282 static int
4283 wm_rar_count(struct wm_softc *sc)
4284 {
4285 int size;
4286
4287 switch (sc->sc_type) {
4288 case WM_T_ICH8:
4289 size = WM_RAL_TABSIZE_ICH8 -1;
4290 break;
4291 case WM_T_ICH9:
4292 case WM_T_ICH10:
4293 case WM_T_PCH:
4294 size = WM_RAL_TABSIZE_ICH8;
4295 break;
4296 case WM_T_PCH2:
4297 size = WM_RAL_TABSIZE_PCH2;
4298 break;
4299 case WM_T_PCH_LPT:
4300 case WM_T_PCH_SPT:
4301 case WM_T_PCH_CNP:
4302 size = WM_RAL_TABSIZE_PCH_LPT;
4303 break;
4304 case WM_T_82575:
4305 case WM_T_I210:
4306 case WM_T_I211:
4307 size = WM_RAL_TABSIZE_82575;
4308 break;
4309 case WM_T_82576:
4310 case WM_T_82580:
4311 size = WM_RAL_TABSIZE_82576;
4312 break;
4313 case WM_T_I350:
4314 case WM_T_I354:
4315 size = WM_RAL_TABSIZE_I350;
4316 break;
4317 default:
4318 size = WM_RAL_TABSIZE;
4319 }
4320
4321 return size;
4322 }
4323
4324 /*
4325 * wm_set_filter:
4326 *
4327 * Set up the receive filter.
4328 */
4329 static void
4330 wm_set_filter(struct wm_softc *sc)
4331 {
4332 struct ethercom *ec = &sc->sc_ethercom;
4333 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4334 struct ether_multi *enm;
4335 struct ether_multistep step;
4336 bus_addr_t mta_reg;
4337 uint32_t hash, reg, bit;
4338 int i, size, ralmax, rv;
4339
4340 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4341 device_xname(sc->sc_dev), __func__));
4342 KASSERT(mutex_owned(sc->sc_core_lock));
4343
4344 if (sc->sc_type >= WM_T_82544)
4345 mta_reg = WMREG_CORDOVA_MTA;
4346 else
4347 mta_reg = WMREG_MTA;
4348
4349 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4350
4351 if (sc->sc_if_flags & IFF_BROADCAST)
4352 sc->sc_rctl |= RCTL_BAM;
4353 if (sc->sc_if_flags & IFF_PROMISC) {
4354 sc->sc_rctl |= RCTL_UPE;
4355 ETHER_LOCK(ec);
4356 ec->ec_flags |= ETHER_F_ALLMULTI;
4357 ETHER_UNLOCK(ec);
4358 goto allmulti;
4359 }
4360
4361 /*
4362 * Set the station address in the first RAL slot, and
4363 * clear the remaining slots.
4364 */
4365 size = wm_rar_count(sc);
4366 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4367
4368 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
4369 || (sc->sc_type == WM_T_PCH_CNP)) {
4370 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4371 switch (i) {
4372 case 0:
4373 /* We can use all entries */
4374 ralmax = size;
4375 break;
4376 case 1:
4377 /* Only RAR[0] */
4378 ralmax = 1;
4379 break;
4380 default:
4381 /* Available SHRA + RAR[0] */
4382 ralmax = i + 1;
4383 }
4384 } else
4385 ralmax = size;
4386 for (i = 1; i < size; i++) {
4387 if (i < ralmax)
4388 wm_set_ral(sc, NULL, i);
4389 }
4390
4391 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4392 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4393 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4394 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4395 size = WM_ICH8_MC_TABSIZE;
4396 else
4397 size = WM_MC_TABSIZE;
4398 /* Clear out the multicast table. */
4399 for (i = 0; i < size; i++) {
4400 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4401 CSR_WRITE_FLUSH(sc);
4402 }
4403
4404 ETHER_LOCK(ec);
4405 ETHER_FIRST_MULTI(step, ec, enm);
4406 while (enm != NULL) {
4407 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4408 ec->ec_flags |= ETHER_F_ALLMULTI;
4409 ETHER_UNLOCK(ec);
4410 /*
4411 * We must listen to a range of multicast addresses.
4412 * For now, just accept all multicasts, rather than
4413 * trying to set only those filter bits needed to match
4414 * the range. (At this time, the only use of address
4415 * ranges is for IP multicast routing, for which the
4416 * range is big enough to require all bits set.)
4417 */
4418 goto allmulti;
4419 }
4420
4421 hash = wm_mchash(sc, enm->enm_addrlo);
4422
4423 reg = (hash >> 5);
4424 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4425 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4426 || (sc->sc_type == WM_T_PCH2)
4427 || (sc->sc_type == WM_T_PCH_LPT)
4428 || (sc->sc_type == WM_T_PCH_SPT)
4429 || (sc->sc_type == WM_T_PCH_CNP))
4430 reg &= 0x1f;
4431 else
4432 reg &= 0x7f;
4433 bit = hash & 0x1f;
4434
4435 hash = CSR_READ(sc, mta_reg + (reg << 2));
4436 hash |= 1U << bit;
4437
4438 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4439 /*
4440 * 82544 Errata 9: Certain register cannot be written
4441 * with particular alignments in PCI-X bus operation
4442 * (FCAH, MTA and VFTA).
4443 */
4444 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4445 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4446 CSR_WRITE_FLUSH(sc);
4447 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4448 CSR_WRITE_FLUSH(sc);
4449 } else {
4450 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4451 CSR_WRITE_FLUSH(sc);
4452 }
4453
4454 ETHER_NEXT_MULTI(step, enm);
4455 }
4456 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4457 ETHER_UNLOCK(ec);
4458
4459 goto setit;
4460
4461 allmulti:
4462 sc->sc_rctl |= RCTL_MPE;
4463
4464 setit:
4465 if (sc->sc_type >= WM_T_PCH2) {
4466 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4467 && (ifp->if_mtu > ETHERMTU))
4468 rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4469 else
4470 rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4471 if (rv != 0)
4472 device_printf(sc->sc_dev,
4473 "Failed to do workaround for jumbo frame.\n");
4474 }
4475
4476 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4477 }
4478
4479 /* Reset and init related */
4480
4481 static void
4482 wm_set_vlan(struct wm_softc *sc)
4483 {
4484
4485 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4486 device_xname(sc->sc_dev), __func__));
4487
4488 /* Deal with VLAN enables. */
4489 if (VLAN_ATTACHED(&sc->sc_ethercom))
4490 sc->sc_ctrl |= CTRL_VME;
4491 else
4492 sc->sc_ctrl &= ~CTRL_VME;
4493
4494 /* Write the control registers. */
4495 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4496 }
4497
4498 static void
4499 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4500 {
4501 uint32_t gcr;
4502 pcireg_t ctrl2;
4503
4504 gcr = CSR_READ(sc, WMREG_GCR);
4505
4506 /* Only take action if timeout value is defaulted to 0 */
4507 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4508 goto out;
4509
4510 if ((gcr & GCR_CAP_VER2) == 0) {
4511 gcr |= GCR_CMPL_TMOUT_10MS;
4512 goto out;
4513 }
4514
4515 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4516 sc->sc_pcixe_capoff + PCIE_DCSR2);
4517 ctrl2 |= WM_PCIE_DCSR2_16MS;
4518 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4519 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4520
4521 out:
4522 /* Disable completion timeout resend */
4523 gcr &= ~GCR_CMPL_TMOUT_RESEND;
4524
4525 CSR_WRITE(sc, WMREG_GCR, gcr);
4526 }
4527
4528 void
4529 wm_get_auto_rd_done(struct wm_softc *sc)
4530 {
4531 int i;
4532
4533 /* wait for eeprom to reload */
4534 switch (sc->sc_type) {
4535 case WM_T_82571:
4536 case WM_T_82572:
4537 case WM_T_82573:
4538 case WM_T_82574:
4539 case WM_T_82583:
4540 case WM_T_82575:
4541 case WM_T_82576:
4542 case WM_T_82580:
4543 case WM_T_I350:
4544 case WM_T_I354:
4545 case WM_T_I210:
4546 case WM_T_I211:
4547 case WM_T_80003:
4548 case WM_T_ICH8:
4549 case WM_T_ICH9:
4550 for (i = 0; i < 10; i++) {
4551 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4552 break;
4553 delay(1000);
4554 }
4555 if (i == 10) {
4556 log(LOG_ERR, "%s: auto read from eeprom failed to "
4557 "complete\n", device_xname(sc->sc_dev));
4558 }
4559 break;
4560 default:
4561 break;
4562 }
4563 }
4564
4565 void
4566 wm_lan_init_done(struct wm_softc *sc)
4567 {
4568 uint32_t reg = 0;
4569 int i;
4570
4571 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4572 device_xname(sc->sc_dev), __func__));
4573
4574 /* Wait for eeprom to reload */
4575 switch (sc->sc_type) {
4576 case WM_T_ICH10:
4577 case WM_T_PCH:
4578 case WM_T_PCH2:
4579 case WM_T_PCH_LPT:
4580 case WM_T_PCH_SPT:
4581 case WM_T_PCH_CNP:
4582 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4583 reg = CSR_READ(sc, WMREG_STATUS);
4584 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4585 break;
4586 delay(100);
4587 }
4588 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4589 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4590 "complete\n", device_xname(sc->sc_dev), __func__);
4591 }
4592 break;
4593 default:
4594 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4595 __func__);
4596 break;
4597 }
4598
4599 reg &= ~STATUS_LAN_INIT_DONE;
4600 CSR_WRITE(sc, WMREG_STATUS, reg);
4601 }
4602
4603 void
4604 wm_get_cfg_done(struct wm_softc *sc)
4605 {
4606 int mask;
4607 uint32_t reg;
4608 int i;
4609
4610 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4611 device_xname(sc->sc_dev), __func__));
4612
4613 /* Wait for eeprom to reload */
4614 switch (sc->sc_type) {
4615 case WM_T_82542_2_0:
4616 case WM_T_82542_2_1:
4617 /* null */
4618 break;
4619 case WM_T_82543:
4620 case WM_T_82544:
4621 case WM_T_82540:
4622 case WM_T_82545:
4623 case WM_T_82545_3:
4624 case WM_T_82546:
4625 case WM_T_82546_3:
4626 case WM_T_82541:
4627 case WM_T_82541_2:
4628 case WM_T_82547:
4629 case WM_T_82547_2:
4630 case WM_T_82573:
4631 case WM_T_82574:
4632 case WM_T_82583:
4633 /* generic */
4634 delay(10*1000);
4635 break;
4636 case WM_T_80003:
4637 case WM_T_82571:
4638 case WM_T_82572:
4639 case WM_T_82575:
4640 case WM_T_82576:
4641 case WM_T_82580:
4642 case WM_T_I350:
4643 case WM_T_I354:
4644 case WM_T_I210:
4645 case WM_T_I211:
4646 if (sc->sc_type == WM_T_82571) {
4647 /* Only 82571 shares port 0 */
4648 mask = EEMNGCTL_CFGDONE_0;
4649 } else
4650 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4651 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4652 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4653 break;
4654 delay(1000);
4655 }
4656 if (i >= WM_PHY_CFG_TIMEOUT)
4657 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4658 device_xname(sc->sc_dev), __func__));
4659 break;
4660 case WM_T_ICH8:
4661 case WM_T_ICH9:
4662 case WM_T_ICH10:
4663 case WM_T_PCH:
4664 case WM_T_PCH2:
4665 case WM_T_PCH_LPT:
4666 case WM_T_PCH_SPT:
4667 case WM_T_PCH_CNP:
4668 delay(10*1000);
4669 if (sc->sc_type >= WM_T_ICH10)
4670 wm_lan_init_done(sc);
4671 else
4672 wm_get_auto_rd_done(sc);
4673
4674 /* Clear PHY Reset Asserted bit */
4675 reg = CSR_READ(sc, WMREG_STATUS);
4676 if ((reg & STATUS_PHYRA) != 0)
4677 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4678 break;
4679 default:
4680 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4681 __func__);
4682 break;
4683 }
4684 }
4685
4686 int
4687 wm_phy_post_reset(struct wm_softc *sc)
4688 {
4689 device_t dev = sc->sc_dev;
4690 uint16_t reg;
4691 int rv = 0;
4692
4693 /* This function is only for ICH8 and newer. */
4694 if (sc->sc_type < WM_T_ICH8)
4695 return 0;
4696
4697 if (wm_phy_resetisblocked(sc)) {
4698 /* XXX */
4699 device_printf(dev, "PHY is blocked\n");
4700 return -1;
4701 }
4702
4703 /* Allow time for h/w to get to quiescent state after reset */
4704 delay(10*1000);
4705
4706 /* Perform any necessary post-reset workarounds */
4707 if (sc->sc_type == WM_T_PCH)
4708 rv = wm_hv_phy_workarounds_ich8lan(sc);
4709 else if (sc->sc_type == WM_T_PCH2)
4710 rv = wm_lv_phy_workarounds_ich8lan(sc);
4711 if (rv != 0)
4712 return rv;
4713
4714 /* Clear the host wakeup bit after lcd reset */
4715 if (sc->sc_type >= WM_T_PCH) {
4716 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4717 reg &= ~BM_WUC_HOST_WU_BIT;
4718 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4719 }
4720
4721 /* Configure the LCD with the extended configuration region in NVM */
4722 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4723 return rv;
4724
4725 /* Configure the LCD with the OEM bits in NVM */
4726 rv = wm_oem_bits_config_ich8lan(sc, true);
4727
4728 if (sc->sc_type == WM_T_PCH2) {
4729 /* Ungate automatic PHY configuration on non-managed 82579 */
4730 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4731 delay(10 * 1000);
4732 wm_gate_hw_phy_config_ich8lan(sc, false);
4733 }
4734 /* Set EEE LPI Update Timer to 200usec */
4735 rv = sc->phy.acquire(sc);
4736 if (rv)
4737 return rv;
4738 rv = wm_write_emi_reg_locked(dev,
4739 I82579_LPI_UPDATE_TIMER, 0x1387);
4740 sc->phy.release(sc);
4741 }
4742
4743 return rv;
4744 }
4745
4746 /* Only for PCH and newer */
4747 static int
4748 wm_write_smbus_addr(struct wm_softc *sc)
4749 {
4750 uint32_t strap, freq;
4751 uint16_t phy_data;
4752 int rv;
4753
4754 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4755 device_xname(sc->sc_dev), __func__));
4756 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4757
4758 strap = CSR_READ(sc, WMREG_STRAP);
4759 freq = __SHIFTOUT(strap, STRAP_FREQ);
4760
4761 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4762 if (rv != 0)
4763 return rv;
4764
4765 phy_data &= ~HV_SMB_ADDR_ADDR;
4766 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4767 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4768
4769 if (sc->sc_phytype == WMPHY_I217) {
4770 /* Restore SMBus frequency */
4771 if (freq --) {
4772 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4773 | HV_SMB_ADDR_FREQ_HIGH);
4774 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4775 HV_SMB_ADDR_FREQ_LOW);
4776 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4777 HV_SMB_ADDR_FREQ_HIGH);
4778 } else
4779 DPRINTF(sc, WM_DEBUG_INIT,
4780 ("%s: %s Unsupported SMB frequency in PHY\n",
4781 device_xname(sc->sc_dev), __func__));
4782 }
4783
4784 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4785 phy_data);
4786 }
4787
4788 static int
4789 wm_init_lcd_from_nvm(struct wm_softc *sc)
4790 {
4791 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4792 uint16_t phy_page = 0;
4793 int rv = 0;
4794
4795 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4796 device_xname(sc->sc_dev), __func__));
4797
4798 switch (sc->sc_type) {
4799 case WM_T_ICH8:
4800 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4801 || (sc->sc_phytype != WMPHY_IGP_3))
4802 return 0;
4803
4804 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4805 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4806 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4807 break;
4808 }
4809 /* FALLTHROUGH */
4810 case WM_T_PCH:
4811 case WM_T_PCH2:
4812 case WM_T_PCH_LPT:
4813 case WM_T_PCH_SPT:
4814 case WM_T_PCH_CNP:
4815 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4816 break;
4817 default:
4818 return 0;
4819 }
4820
4821 if ((rv = sc->phy.acquire(sc)) != 0)
4822 return rv;
4823
4824 reg = CSR_READ(sc, WMREG_FEXTNVM);
4825 if ((reg & sw_cfg_mask) == 0)
4826 goto release;
4827
4828 /*
4829 * Make sure HW does not configure LCD from PHY extended configuration
4830 * before SW configuration
4831 */
4832 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4833 if ((sc->sc_type < WM_T_PCH2)
4834 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4835 goto release;
4836
4837 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4838 device_xname(sc->sc_dev), __func__));
4839 /* word_addr is in DWORD */
4840 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4841
4842 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4843 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4844 if (cnf_size == 0)
4845 goto release;
4846
4847 if (((sc->sc_type == WM_T_PCH)
4848 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4849 || (sc->sc_type > WM_T_PCH)) {
4850 /*
4851 * HW configures the SMBus address and LEDs when the OEM and
4852 * LCD Write Enable bits are set in the NVM. When both NVM bits
4853 * are cleared, SW will configure them instead.
4854 */
4855 DPRINTF(sc, WM_DEBUG_INIT,
4856 ("%s: %s: Configure SMBus and LED\n",
4857 device_xname(sc->sc_dev), __func__));
4858 if ((rv = wm_write_smbus_addr(sc)) != 0)
4859 goto release;
4860
4861 reg = CSR_READ(sc, WMREG_LEDCTL);
4862 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4863 (uint16_t)reg);
4864 if (rv != 0)
4865 goto release;
4866 }
4867
4868 /* Configure LCD from extended configuration region. */
4869 for (i = 0; i < cnf_size; i++) {
4870 uint16_t reg_data, reg_addr;
4871
4872 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4873 goto release;
4874
4875 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4876 goto release;
4877
4878 if (reg_addr == IGPHY_PAGE_SELECT)
4879 phy_page = reg_data;
4880
4881 reg_addr &= IGPHY_MAXREGADDR;
4882 reg_addr |= phy_page;
4883
4884 KASSERT(sc->phy.writereg_locked != NULL);
4885 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4886 reg_data);
4887 }
4888
4889 release:
4890 sc->phy.release(sc);
4891 return rv;
4892 }
4893
4894 /*
4895 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4896 * @sc: pointer to the HW structure
4897 * @d0_state: boolean if entering d0 or d3 device state
4898 *
4899 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4900 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
4901 * in NVM determines whether HW should configure LPLU and Gbe Disable.
4902 */
4903 int
4904 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4905 {
4906 uint32_t mac_reg;
4907 uint16_t oem_reg;
4908 int rv;
4909
4910 if (sc->sc_type < WM_T_PCH)
4911 return 0;
4912
4913 rv = sc->phy.acquire(sc);
4914 if (rv != 0)
4915 return rv;
4916
4917 if (sc->sc_type == WM_T_PCH) {
4918 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4919 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4920 goto release;
4921 }
4922
4923 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4924 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4925 goto release;
4926
4927 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4928
4929 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4930 if (rv != 0)
4931 goto release;
4932 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4933
4934 if (d0_state) {
4935 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4936 oem_reg |= HV_OEM_BITS_A1KDIS;
4937 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4938 oem_reg |= HV_OEM_BITS_LPLU;
4939 } else {
4940 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4941 != 0)
4942 oem_reg |= HV_OEM_BITS_A1KDIS;
4943 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4944 != 0)
4945 oem_reg |= HV_OEM_BITS_LPLU;
4946 }
4947
4948 /* Set Restart auto-neg to activate the bits */
4949 if ((d0_state || (sc->sc_type != WM_T_PCH))
4950 && (wm_phy_resetisblocked(sc) == false))
4951 oem_reg |= HV_OEM_BITS_ANEGNOW;
4952
4953 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4954
4955 release:
4956 sc->phy.release(sc);
4957
4958 return rv;
4959 }
4960
4961 /* Init hardware bits */
4962 void
4963 wm_initialize_hardware_bits(struct wm_softc *sc)
4964 {
4965 uint32_t tarc0, tarc1, reg;
4966
4967 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4968 device_xname(sc->sc_dev), __func__));
4969
4970 /* For 82571 variant, 80003 and ICHs */
4971 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4972 || (sc->sc_type >= WM_T_80003)) {
4973
4974 /* Transmit Descriptor Control 0 */
4975 reg = CSR_READ(sc, WMREG_TXDCTL(0));
4976 reg |= TXDCTL_COUNT_DESC;
4977 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4978
4979 /* Transmit Descriptor Control 1 */
4980 reg = CSR_READ(sc, WMREG_TXDCTL(1));
4981 reg |= TXDCTL_COUNT_DESC;
4982 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4983
4984 /* TARC0 */
4985 tarc0 = CSR_READ(sc, WMREG_TARC0);
4986 switch (sc->sc_type) {
4987 case WM_T_82571:
4988 case WM_T_82572:
4989 case WM_T_82573:
4990 case WM_T_82574:
4991 case WM_T_82583:
4992 case WM_T_80003:
4993 /* Clear bits 30..27 */
4994 tarc0 &= ~__BITS(30, 27);
4995 break;
4996 default:
4997 break;
4998 }
4999
5000 switch (sc->sc_type) {
5001 case WM_T_82571:
5002 case WM_T_82572:
5003 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5004
5005 tarc1 = CSR_READ(sc, WMREG_TARC1);
5006 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5007 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5008 /* 8257[12] Errata No.7 */
5009 tarc1 |= __BIT(22); /* TARC1 bits 22 */
5010
5011 /* TARC1 bit 28 */
5012 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5013 tarc1 &= ~__BIT(28);
5014 else
5015 tarc1 |= __BIT(28);
5016 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5017
5018 /*
5019 * 8257[12] Errata No.13
5020 * Disable Dyamic Clock Gating.
5021 */
5022 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5023 reg &= ~CTRL_EXT_DMA_DYN_CLK;
5024 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5025 break;
5026 case WM_T_82573:
5027 case WM_T_82574:
5028 case WM_T_82583:
5029 if ((sc->sc_type == WM_T_82574)
5030 || (sc->sc_type == WM_T_82583))
5031 tarc0 |= __BIT(26); /* TARC0 bit 26 */
5032
5033 /* Extended Device Control */
5034 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5035 reg &= ~__BIT(23); /* Clear bit 23 */
5036 reg |= __BIT(22); /* Set bit 22 */
5037 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5038
5039 /* Device Control */
5040 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
5041 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5042
5043 /* PCIe Control Register */
5044 /*
5045 * 82573 Errata (unknown).
5046 *
5047 * 82574 Errata 25 and 82583 Errata 12
5048 * "Dropped Rx Packets":
5049 * NVM Image Version 2.1.4 and newer has no this bug.
5050 */
5051 reg = CSR_READ(sc, WMREG_GCR);
5052 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5053 CSR_WRITE(sc, WMREG_GCR, reg);
5054
5055 if ((sc->sc_type == WM_T_82574)
5056 || (sc->sc_type == WM_T_82583)) {
5057 /*
5058 * Document says this bit must be set for
5059 * proper operation.
5060 */
5061 reg = CSR_READ(sc, WMREG_GCR);
5062 reg |= __BIT(22);
5063 CSR_WRITE(sc, WMREG_GCR, reg);
5064
5065 /*
5066 * Apply workaround for hardware errata
5067 * documented in errata docs Fixes issue where
5068 * some error prone or unreliable PCIe
5069 * completions are occurring, particularly
5070 * with ASPM enabled. Without fix, issue can
5071 * cause Tx timeouts.
5072 */
5073 reg = CSR_READ(sc, WMREG_GCR2);
5074 reg |= __BIT(0);
5075 CSR_WRITE(sc, WMREG_GCR2, reg);
5076 }
5077 break;
5078 case WM_T_80003:
5079 /* TARC0 */
5080 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5081 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5082 tarc0 &= ~__BIT(20); /* Clear bits 20 */
5083
5084 /* TARC1 bit 28 */
5085 tarc1 = CSR_READ(sc, WMREG_TARC1);
5086 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5087 tarc1 &= ~__BIT(28);
5088 else
5089 tarc1 |= __BIT(28);
5090 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5091 break;
5092 case WM_T_ICH8:
5093 case WM_T_ICH9:
5094 case WM_T_ICH10:
5095 case WM_T_PCH:
5096 case WM_T_PCH2:
5097 case WM_T_PCH_LPT:
5098 case WM_T_PCH_SPT:
5099 case WM_T_PCH_CNP:
5100 /* TARC0 */
5101 if (sc->sc_type == WM_T_ICH8) {
5102 /* Set TARC0 bits 29 and 28 */
5103 tarc0 |= __BITS(29, 28);
5104 } else if (sc->sc_type == WM_T_PCH_SPT) {
5105 tarc0 |= __BIT(29);
5106 /*
5107 * Drop bit 28. From Linux.
5108 * See I218/I219 spec update
5109 * "5. Buffer Overrun While the I219 is
5110 * Processing DMA Transactions"
5111 */
5112 tarc0 &= ~__BIT(28);
5113 }
5114 /* Set TARC0 bits 23,24,26,27 */
5115 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5116
5117 /* CTRL_EXT */
5118 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5119 reg |= __BIT(22); /* Set bit 22 */
5120 /*
5121 * Enable PHY low-power state when MAC is at D3
5122 * w/o WoL
5123 */
5124 if (sc->sc_type >= WM_T_PCH)
5125 reg |= CTRL_EXT_PHYPDEN;
5126 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5127
5128 /* TARC1 */
5129 tarc1 = CSR_READ(sc, WMREG_TARC1);
5130 /* bit 28 */
5131 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5132 tarc1 &= ~__BIT(28);
5133 else
5134 tarc1 |= __BIT(28);
5135 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5136 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5137
5138 /* Device Status */
5139 if (sc->sc_type == WM_T_ICH8) {
5140 reg = CSR_READ(sc, WMREG_STATUS);
5141 reg &= ~__BIT(31);
5142 CSR_WRITE(sc, WMREG_STATUS, reg);
5143
5144 }
5145
5146 /* IOSFPC */
5147 if (sc->sc_type == WM_T_PCH_SPT) {
5148 reg = CSR_READ(sc, WMREG_IOSFPC);
5149 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5150 CSR_WRITE(sc, WMREG_IOSFPC, reg);
5151 }
5152 /*
5153 * Work-around descriptor data corruption issue during
5154 * NFS v2 UDP traffic, just disable the NFS filtering
5155 * capability.
5156 */
5157 reg = CSR_READ(sc, WMREG_RFCTL);
5158 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5159 CSR_WRITE(sc, WMREG_RFCTL, reg);
5160 break;
5161 default:
5162 break;
5163 }
5164 CSR_WRITE(sc, WMREG_TARC0, tarc0);
5165
5166 switch (sc->sc_type) {
5167 case WM_T_82571:
5168 case WM_T_82572:
5169 case WM_T_82573:
5170 case WM_T_80003:
5171 case WM_T_ICH8:
5172 /*
5173 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5174 * others to avoid RSS Hash Value bug.
5175 */
5176 reg = CSR_READ(sc, WMREG_RFCTL);
5177 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5178 CSR_WRITE(sc, WMREG_RFCTL, reg);
5179 break;
5180 case WM_T_82574:
5181 /* Use extened Rx descriptor. */
5182 reg = CSR_READ(sc, WMREG_RFCTL);
5183 reg |= WMREG_RFCTL_EXSTEN;
5184 CSR_WRITE(sc, WMREG_RFCTL, reg);
5185 break;
5186 default:
5187 break;
5188 }
5189 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5190 /*
5191 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5192 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5193 * "Certain Malformed IPv6 Extension Headers are Not Processed
5194 * Correctly by the Device"
5195 *
5196 * I354(C2000) Errata AVR53:
5197 * "Malformed IPv6 Extension Headers May Result in LAN Device
5198 * Hang"
5199 */
5200 reg = CSR_READ(sc, WMREG_RFCTL);
5201 reg |= WMREG_RFCTL_IPV6EXDIS;
5202 CSR_WRITE(sc, WMREG_RFCTL, reg);
5203 }
5204 }
5205
5206 static uint32_t
5207 wm_rxpbs_adjust_82580(uint32_t val)
5208 {
5209 uint32_t rv = 0;
5210
5211 if (val < __arraycount(wm_82580_rxpbs_table))
5212 rv = wm_82580_rxpbs_table[val];
5213
5214 return rv;
5215 }
5216
5217 /*
5218 * wm_reset_phy:
5219 *
5220 * generic PHY reset function.
5221 * Same as e1000_phy_hw_reset_generic()
5222 */
5223 static int
5224 wm_reset_phy(struct wm_softc *sc)
5225 {
5226 uint32_t reg;
5227 int rv;
5228
5229 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5230 device_xname(sc->sc_dev), __func__));
5231 if (wm_phy_resetisblocked(sc))
5232 return -1;
5233
5234 rv = sc->phy.acquire(sc);
5235 if (rv) {
5236 device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5237 __func__, rv);
5238 return rv;
5239 }
5240
5241 reg = CSR_READ(sc, WMREG_CTRL);
5242 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5243 CSR_WRITE_FLUSH(sc);
5244
5245 delay(sc->phy.reset_delay_us);
5246
5247 CSR_WRITE(sc, WMREG_CTRL, reg);
5248 CSR_WRITE_FLUSH(sc);
5249
5250 delay(150);
5251
5252 sc->phy.release(sc);
5253
5254 wm_get_cfg_done(sc);
5255 wm_phy_post_reset(sc);
5256
5257 return 0;
5258 }
5259
5260 /*
5261 * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5262 *
5263 * In i219, the descriptor rings must be emptied before resetting the HW
5264 * or before changing the device state to D3 during runtime (runtime PM).
5265 *
5266 * Failure to do this will cause the HW to enter a unit hang state which can
5267 * only be released by PCI reset on the device.
5268 *
5269 * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5270 */
5271 static void
5272 wm_flush_desc_rings(struct wm_softc *sc)
5273 {
5274 pcireg_t preg;
5275 uint32_t reg;
5276 struct wm_txqueue *txq;
5277 wiseman_txdesc_t *txd;
5278 int nexttx;
5279 uint32_t rctl;
5280
5281 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5282
5283 /* First, disable MULR fix in FEXTNVM11 */
5284 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5285 reg |= FEXTNVM11_DIS_MULRFIX;
5286 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5287
5288 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5289 reg = CSR_READ(sc, WMREG_TDLEN(0));
5290 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5291 return;
5292
5293 /*
5294 * Remove all descriptors from the tx_ring.
5295 *
5296 * We want to clear all pending descriptors from the TX ring. Zeroing
5297 * happens when the HW reads the regs. We assign the ring itself as
5298 * the data of the next descriptor. We don't care about the data we are
5299 * about to reset the HW.
5300 */
5301 #ifdef WM_DEBUG
5302 device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5303 #endif
5304 reg = CSR_READ(sc, WMREG_TCTL);
5305 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5306
5307 txq = &sc->sc_queue[0].wmq_txq;
5308 nexttx = txq->txq_next;
5309 txd = &txq->txq_descs[nexttx];
5310 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5311 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5312 txd->wtx_fields.wtxu_status = 0;
5313 txd->wtx_fields.wtxu_options = 0;
5314 txd->wtx_fields.wtxu_vlan = 0;
5315
5316 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5317 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5318
5319 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5320 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5321 CSR_WRITE_FLUSH(sc);
5322 delay(250);
5323
5324 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5325 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5326 return;
5327
5328 /*
5329 * Mark all descriptors in the RX ring as consumed and disable the
5330 * rx ring.
5331 */
5332 #ifdef WM_DEBUG
5333 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5334 #endif
5335 rctl = CSR_READ(sc, WMREG_RCTL);
5336 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5337 CSR_WRITE_FLUSH(sc);
5338 delay(150);
5339
5340 reg = CSR_READ(sc, WMREG_RXDCTL(0));
5341 /* Zero the lower 14 bits (prefetch and host thresholds) */
5342 reg &= 0xffffc000;
5343 /*
5344 * Update thresholds: prefetch threshold to 31, host threshold
5345 * to 1 and make sure the granularity is "descriptors" and not
5346 * "cache lines"
5347 */
5348 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5349 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5350
5351 /* Momentarily enable the RX ring for the changes to take effect */
5352 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5353 CSR_WRITE_FLUSH(sc);
5354 delay(150);
5355 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5356 }
5357
5358 /*
5359 * wm_reset:
5360 *
5361 * Reset the i82542 chip.
5362 */
5363 static void
5364 wm_reset(struct wm_softc *sc)
5365 {
5366 int phy_reset = 0;
5367 int i, error = 0;
5368 uint32_t reg;
5369 uint16_t kmreg;
5370 int rv;
5371
5372 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5373 device_xname(sc->sc_dev), __func__));
5374 KASSERT(sc->sc_type != 0);
5375
5376 /*
5377 * Allocate on-chip memory according to the MTU size.
5378 * The Packet Buffer Allocation register must be written
5379 * before the chip is reset.
5380 */
5381 switch (sc->sc_type) {
5382 case WM_T_82547:
5383 case WM_T_82547_2:
5384 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5385 PBA_22K : PBA_30K;
5386 for (i = 0; i < sc->sc_nqueues; i++) {
5387 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5388 txq->txq_fifo_head = 0;
5389 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5390 txq->txq_fifo_size =
5391 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5392 txq->txq_fifo_stall = 0;
5393 }
5394 break;
5395 case WM_T_82571:
5396 case WM_T_82572:
5397 case WM_T_82575: /* XXX need special handing for jumbo frames */
5398 case WM_T_80003:
5399 sc->sc_pba = PBA_32K;
5400 break;
5401 case WM_T_82573:
5402 sc->sc_pba = PBA_12K;
5403 break;
5404 case WM_T_82574:
5405 case WM_T_82583:
5406 sc->sc_pba = PBA_20K;
5407 break;
5408 case WM_T_82576:
5409 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5410 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5411 break;
5412 case WM_T_82580:
5413 case WM_T_I350:
5414 case WM_T_I354:
5415 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5416 break;
5417 case WM_T_I210:
5418 case WM_T_I211:
5419 sc->sc_pba = PBA_34K;
5420 break;
5421 case WM_T_ICH8:
5422 /* Workaround for a bit corruption issue in FIFO memory */
5423 sc->sc_pba = PBA_8K;
5424 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5425 break;
5426 case WM_T_ICH9:
5427 case WM_T_ICH10:
5428 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5429 PBA_14K : PBA_10K;
5430 break;
5431 case WM_T_PCH:
5432 case WM_T_PCH2: /* XXX 14K? */
5433 case WM_T_PCH_LPT:
5434 case WM_T_PCH_SPT:
5435 case WM_T_PCH_CNP:
5436 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5437 PBA_12K : PBA_26K;
5438 break;
5439 default:
5440 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5441 PBA_40K : PBA_48K;
5442 break;
5443 }
5444 /*
5445 * Only old or non-multiqueue devices have the PBA register
5446 * XXX Need special handling for 82575.
5447 */
5448 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5449 || (sc->sc_type == WM_T_82575))
5450 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5451
5452 /* Prevent the PCI-E bus from sticking */
5453 if (sc->sc_flags & WM_F_PCIE) {
5454 int timeout = 800;
5455
5456 sc->sc_ctrl |= CTRL_GIO_M_DIS;
5457 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5458
5459 while (timeout--) {
5460 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5461 == 0)
5462 break;
5463 delay(100);
5464 }
5465 if (timeout == 0)
5466 device_printf(sc->sc_dev,
5467 "failed to disable bus mastering\n");
5468 }
5469
5470 /* Set the completion timeout for interface */
5471 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5472 || (sc->sc_type == WM_T_82580)
5473 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5474 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5475 wm_set_pcie_completion_timeout(sc);
5476
5477 /* Clear interrupt */
5478 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5479 if (wm_is_using_msix(sc)) {
5480 if (sc->sc_type != WM_T_82574) {
5481 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5482 CSR_WRITE(sc, WMREG_EIAC, 0);
5483 } else
5484 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5485 }
5486
5487 /* Stop the transmit and receive processes. */
5488 CSR_WRITE(sc, WMREG_RCTL, 0);
5489 sc->sc_rctl &= ~RCTL_EN;
5490 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5491 CSR_WRITE_FLUSH(sc);
5492
5493 /* XXX set_tbi_sbp_82543() */
5494
5495 delay(10*1000);
5496
5497 /* Must acquire the MDIO ownership before MAC reset */
5498 switch (sc->sc_type) {
5499 case WM_T_82573:
5500 case WM_T_82574:
5501 case WM_T_82583:
5502 error = wm_get_hw_semaphore_82573(sc);
5503 break;
5504 default:
5505 break;
5506 }
5507
5508 /*
5509 * 82541 Errata 29? & 82547 Errata 28?
5510 * See also the description about PHY_RST bit in CTRL register
5511 * in 8254x_GBe_SDM.pdf.
5512 */
5513 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5514 CSR_WRITE(sc, WMREG_CTRL,
5515 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5516 CSR_WRITE_FLUSH(sc);
5517 delay(5000);
5518 }
5519
5520 switch (sc->sc_type) {
5521 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5522 case WM_T_82541:
5523 case WM_T_82541_2:
5524 case WM_T_82547:
5525 case WM_T_82547_2:
5526 /*
5527 * On some chipsets, a reset through a memory-mapped write
5528 * cycle can cause the chip to reset before completing the
5529 * write cycle. This causes major headache that can be avoided
5530 * by issuing the reset via indirect register writes through
5531 * I/O space.
5532 *
5533 * So, if we successfully mapped the I/O BAR at attach time,
5534 * use that. Otherwise, try our luck with a memory-mapped
5535 * reset.
5536 */
5537 if (sc->sc_flags & WM_F_IOH_VALID)
5538 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5539 else
5540 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5541 break;
5542 case WM_T_82545_3:
5543 case WM_T_82546_3:
5544 /* Use the shadow control register on these chips. */
5545 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5546 break;
5547 case WM_T_80003:
5548 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5549 if (sc->phy.acquire(sc) != 0)
5550 break;
5551 CSR_WRITE(sc, WMREG_CTRL, reg);
5552 sc->phy.release(sc);
5553 break;
5554 case WM_T_ICH8:
5555 case WM_T_ICH9:
5556 case WM_T_ICH10:
5557 case WM_T_PCH:
5558 case WM_T_PCH2:
5559 case WM_T_PCH_LPT:
5560 case WM_T_PCH_SPT:
5561 case WM_T_PCH_CNP:
5562 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5563 if (wm_phy_resetisblocked(sc) == false) {
5564 /*
5565 * Gate automatic PHY configuration by hardware on
5566 * non-managed 82579
5567 */
5568 if ((sc->sc_type == WM_T_PCH2)
5569 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5570 == 0))
5571 wm_gate_hw_phy_config_ich8lan(sc, true);
5572
5573 reg |= CTRL_PHY_RESET;
5574 phy_reset = 1;
5575 } else
5576 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5577 if (sc->phy.acquire(sc) != 0)
5578 break;
5579 CSR_WRITE(sc, WMREG_CTRL, reg);
5580 /* Don't insert a completion barrier when reset */
5581 delay(20*1000);
5582 /*
5583 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5584 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5585 * only. See also wm_get_swflag_ich8lan().
5586 */
5587 mutex_exit(sc->sc_ich_phymtx);
5588 break;
5589 case WM_T_82580:
5590 case WM_T_I350:
5591 case WM_T_I354:
5592 case WM_T_I210:
5593 case WM_T_I211:
5594 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5595 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5596 CSR_WRITE_FLUSH(sc);
5597 delay(5000);
5598 break;
5599 case WM_T_82542_2_0:
5600 case WM_T_82542_2_1:
5601 case WM_T_82543:
5602 case WM_T_82540:
5603 case WM_T_82545:
5604 case WM_T_82546:
5605 case WM_T_82571:
5606 case WM_T_82572:
5607 case WM_T_82573:
5608 case WM_T_82574:
5609 case WM_T_82575:
5610 case WM_T_82576:
5611 case WM_T_82583:
5612 default:
5613 /* Everything else can safely use the documented method. */
5614 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5615 break;
5616 }
5617
5618 /* Must release the MDIO ownership after MAC reset */
5619 switch (sc->sc_type) {
5620 case WM_T_82573:
5621 case WM_T_82574:
5622 case WM_T_82583:
5623 if (error == 0)
5624 wm_put_hw_semaphore_82573(sc);
5625 break;
5626 default:
5627 break;
5628 }
5629
5630 /* Set Phy Config Counter to 50msec */
5631 if (sc->sc_type == WM_T_PCH2) {
5632 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5633 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5634 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5635 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5636 }
5637
5638 if (phy_reset != 0)
5639 wm_get_cfg_done(sc);
5640
5641 /* Reload EEPROM */
5642 switch (sc->sc_type) {
5643 case WM_T_82542_2_0:
5644 case WM_T_82542_2_1:
5645 case WM_T_82543:
5646 case WM_T_82544:
5647 delay(10);
5648 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5649 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5650 CSR_WRITE_FLUSH(sc);
5651 delay(2000);
5652 break;
5653 case WM_T_82540:
5654 case WM_T_82545:
5655 case WM_T_82545_3:
5656 case WM_T_82546:
5657 case WM_T_82546_3:
5658 delay(5*1000);
5659 /* XXX Disable HW ARPs on ASF enabled adapters */
5660 break;
5661 case WM_T_82541:
5662 case WM_T_82541_2:
5663 case WM_T_82547:
5664 case WM_T_82547_2:
5665 delay(20000);
5666 /* XXX Disable HW ARPs on ASF enabled adapters */
5667 break;
5668 case WM_T_82571:
5669 case WM_T_82572:
5670 case WM_T_82573:
5671 case WM_T_82574:
5672 case WM_T_82583:
5673 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5674 delay(10);
5675 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5676 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5677 CSR_WRITE_FLUSH(sc);
5678 }
5679 /* check EECD_EE_AUTORD */
5680 wm_get_auto_rd_done(sc);
5681 /*
5682 * Phy configuration from NVM just starts after EECD_AUTO_RD
5683 * is set.
5684 */
5685 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5686 || (sc->sc_type == WM_T_82583))
5687 delay(25*1000);
5688 break;
5689 case WM_T_82575:
5690 case WM_T_82576:
5691 case WM_T_82580:
5692 case WM_T_I350:
5693 case WM_T_I354:
5694 case WM_T_I210:
5695 case WM_T_I211:
5696 case WM_T_80003:
5697 /* check EECD_EE_AUTORD */
5698 wm_get_auto_rd_done(sc);
5699 break;
5700 case WM_T_ICH8:
5701 case WM_T_ICH9:
5702 case WM_T_ICH10:
5703 case WM_T_PCH:
5704 case WM_T_PCH2:
5705 case WM_T_PCH_LPT:
5706 case WM_T_PCH_SPT:
5707 case WM_T_PCH_CNP:
5708 break;
5709 default:
5710 panic("%s: unknown type\n", __func__);
5711 }
5712
5713 /* Check whether EEPROM is present or not */
5714 switch (sc->sc_type) {
5715 case WM_T_82575:
5716 case WM_T_82576:
5717 case WM_T_82580:
5718 case WM_T_I350:
5719 case WM_T_I354:
5720 case WM_T_ICH8:
5721 case WM_T_ICH9:
5722 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5723 /* Not found */
5724 sc->sc_flags |= WM_F_EEPROM_INVALID;
5725 if (sc->sc_type == WM_T_82575)
5726 wm_reset_init_script_82575(sc);
5727 }
5728 break;
5729 default:
5730 break;
5731 }
5732
5733 if (phy_reset != 0)
5734 wm_phy_post_reset(sc);
5735
5736 if ((sc->sc_type == WM_T_82580)
5737 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5738 /* Clear global device reset status bit */
5739 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5740 }
5741
5742 /* Clear any pending interrupt events. */
5743 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5744 reg = CSR_READ(sc, WMREG_ICR);
5745 if (wm_is_using_msix(sc)) {
5746 if (sc->sc_type != WM_T_82574) {
5747 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5748 CSR_WRITE(sc, WMREG_EIAC, 0);
5749 } else
5750 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5751 }
5752
5753 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5754 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5755 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5756 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5757 reg = CSR_READ(sc, WMREG_KABGTXD);
5758 reg |= KABGTXD_BGSQLBIAS;
5759 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5760 }
5761
5762 /* Reload sc_ctrl */
5763 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5764
5765 wm_set_eee(sc);
5766
5767 /*
5768 * For PCH, this write will make sure that any noise will be detected
5769 * as a CRC error and be dropped rather than show up as a bad packet
5770 * to the DMA engine
5771 */
5772 if (sc->sc_type == WM_T_PCH)
5773 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5774
5775 if (sc->sc_type >= WM_T_82544)
5776 CSR_WRITE(sc, WMREG_WUC, 0);
5777
5778 if (sc->sc_type < WM_T_82575)
5779 wm_disable_aspm(sc); /* Workaround for some chips */
5780
5781 wm_reset_mdicnfg_82580(sc);
5782
5783 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5784 wm_pll_workaround_i210(sc);
5785
5786 if (sc->sc_type == WM_T_80003) {
5787 /* Default to TRUE to enable the MDIC W/A */
5788 sc->sc_flags |= WM_F_80003_MDIC_WA;
5789
5790 rv = wm_kmrn_readreg(sc,
5791 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5792 if (rv == 0) {
5793 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5794 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5795 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5796 else
5797 sc->sc_flags |= WM_F_80003_MDIC_WA;
5798 }
5799 }
5800 }
5801
5802 /*
5803 * wm_add_rxbuf:
5804 *
5805 * Add a receive buffer to the indiciated descriptor.
5806 */
5807 static int
5808 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5809 {
5810 struct wm_softc *sc = rxq->rxq_sc;
5811 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5812 struct mbuf *m;
5813 int error;
5814
5815 KASSERT(mutex_owned(rxq->rxq_lock));
5816
5817 MGETHDR(m, M_DONTWAIT, MT_DATA);
5818 if (m == NULL)
5819 return ENOBUFS;
5820
5821 MCLGET(m, M_DONTWAIT);
5822 if ((m->m_flags & M_EXT) == 0) {
5823 m_freem(m);
5824 return ENOBUFS;
5825 }
5826
5827 if (rxs->rxs_mbuf != NULL)
5828 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5829
5830 rxs->rxs_mbuf = m;
5831
5832 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5833 /*
5834 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5835 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5836 */
5837 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5838 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5839 if (error) {
5840 /* XXX XXX XXX */
5841 aprint_error_dev(sc->sc_dev,
5842 "unable to load rx DMA map %d, error = %d\n", idx, error);
5843 panic("wm_add_rxbuf");
5844 }
5845
5846 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5847 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5848
5849 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5850 if ((sc->sc_rctl & RCTL_EN) != 0)
5851 wm_init_rxdesc(rxq, idx);
5852 } else
5853 wm_init_rxdesc(rxq, idx);
5854
5855 return 0;
5856 }
5857
5858 /*
5859 * wm_rxdrain:
5860 *
5861 * Drain the receive queue.
5862 */
5863 static void
5864 wm_rxdrain(struct wm_rxqueue *rxq)
5865 {
5866 struct wm_softc *sc = rxq->rxq_sc;
5867 struct wm_rxsoft *rxs;
5868 int i;
5869
5870 KASSERT(mutex_owned(rxq->rxq_lock));
5871
5872 for (i = 0; i < WM_NRXDESC; i++) {
5873 rxs = &rxq->rxq_soft[i];
5874 if (rxs->rxs_mbuf != NULL) {
5875 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5876 m_freem(rxs->rxs_mbuf);
5877 rxs->rxs_mbuf = NULL;
5878 }
5879 }
5880 }
5881
5882 /*
5883 * Setup registers for RSS.
5884 *
5885 * XXX not yet VMDq support
5886 */
5887 static void
5888 wm_init_rss(struct wm_softc *sc)
5889 {
5890 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5891 int i;
5892
5893 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5894
5895 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5896 unsigned int qid, reta_ent;
5897
5898 qid = i % sc->sc_nqueues;
5899 switch (sc->sc_type) {
5900 case WM_T_82574:
5901 reta_ent = __SHIFTIN(qid,
5902 RETA_ENT_QINDEX_MASK_82574);
5903 break;
5904 case WM_T_82575:
5905 reta_ent = __SHIFTIN(qid,
5906 RETA_ENT_QINDEX1_MASK_82575);
5907 break;
5908 default:
5909 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5910 break;
5911 }
5912
5913 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5914 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5915 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5916 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5917 }
5918
5919 rss_getkey((uint8_t *)rss_key);
5920 for (i = 0; i < RSSRK_NUM_REGS; i++)
5921 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5922
5923 if (sc->sc_type == WM_T_82574)
5924 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5925 else
5926 mrqc = MRQC_ENABLE_RSS_MQ;
5927
5928 /*
5929 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5930 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5931 */
5932 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5933 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5934 #if 0
5935 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5936 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5937 #endif
5938 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5939
5940 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5941 }
5942
5943 /*
5944 * Adjust TX and RX queue numbers which the system actulally uses.
5945 *
5946 * The numbers are affected by below parameters.
5947 * - The nubmer of hardware queues
5948 * - The number of MSI-X vectors (= "nvectors" argument)
5949 * - ncpu
5950 */
5951 static void
5952 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5953 {
5954 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5955
5956 if (nvectors < 2) {
5957 sc->sc_nqueues = 1;
5958 return;
5959 }
5960
5961 switch (sc->sc_type) {
5962 case WM_T_82572:
5963 hw_ntxqueues = 2;
5964 hw_nrxqueues = 2;
5965 break;
5966 case WM_T_82574:
5967 hw_ntxqueues = 2;
5968 hw_nrxqueues = 2;
5969 break;
5970 case WM_T_82575:
5971 hw_ntxqueues = 4;
5972 hw_nrxqueues = 4;
5973 break;
5974 case WM_T_82576:
5975 hw_ntxqueues = 16;
5976 hw_nrxqueues = 16;
5977 break;
5978 case WM_T_82580:
5979 case WM_T_I350:
5980 case WM_T_I354:
5981 hw_ntxqueues = 8;
5982 hw_nrxqueues = 8;
5983 break;
5984 case WM_T_I210:
5985 hw_ntxqueues = 4;
5986 hw_nrxqueues = 4;
5987 break;
5988 case WM_T_I211:
5989 hw_ntxqueues = 2;
5990 hw_nrxqueues = 2;
5991 break;
5992 /*
5993 * The below Ethernet controllers do not support MSI-X;
5994 * this driver doesn't let them use multiqueue.
5995 * - WM_T_80003
5996 * - WM_T_ICH8
5997 * - WM_T_ICH9
5998 * - WM_T_ICH10
5999 * - WM_T_PCH
6000 * - WM_T_PCH2
6001 * - WM_T_PCH_LPT
6002 */
6003 default:
6004 hw_ntxqueues = 1;
6005 hw_nrxqueues = 1;
6006 break;
6007 }
6008
6009 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6010
6011 /*
6012 * As queues more than MSI-X vectors cannot improve scaling, we limit
6013 * the number of queues used actually.
6014 */
6015 if (nvectors < hw_nqueues + 1)
6016 sc->sc_nqueues = nvectors - 1;
6017 else
6018 sc->sc_nqueues = hw_nqueues;
6019
6020 /*
6021 * As queues more than CPUs cannot improve scaling, we limit
6022 * the number of queues used actually.
6023 */
6024 if (ncpu < sc->sc_nqueues)
6025 sc->sc_nqueues = ncpu;
6026 }
6027
6028 static inline bool
6029 wm_is_using_msix(struct wm_softc *sc)
6030 {
6031
6032 return (sc->sc_nintrs > 1);
6033 }
6034
6035 static inline bool
6036 wm_is_using_multiqueue(struct wm_softc *sc)
6037 {
6038
6039 return (sc->sc_nqueues > 1);
6040 }
6041
6042 static int
6043 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6044 {
6045 struct wm_queue *wmq = &sc->sc_queue[qidx];
6046
6047 wmq->wmq_id = qidx;
6048 wmq->wmq_intr_idx = intr_idx;
6049 wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6050 wm_handle_queue, wmq);
6051 if (wmq->wmq_si != NULL)
6052 return 0;
6053
6054 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6055 wmq->wmq_id);
6056 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6057 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6058 return ENOMEM;
6059 }
6060
6061 /*
6062 * Both single interrupt MSI and INTx can use this function.
6063 */
6064 static int
6065 wm_setup_legacy(struct wm_softc *sc)
6066 {
6067 pci_chipset_tag_t pc = sc->sc_pc;
6068 const char *intrstr = NULL;
6069 char intrbuf[PCI_INTRSTR_LEN];
6070 int error;
6071
6072 error = wm_alloc_txrx_queues(sc);
6073 if (error) {
6074 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6075 error);
6076 return ENOMEM;
6077 }
6078 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6079 sizeof(intrbuf));
6080 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6081 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6082 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6083 if (sc->sc_ihs[0] == NULL) {
6084 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6085 (pci_intr_type(pc, sc->sc_intrs[0])
6086 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6087 return ENOMEM;
6088 }
6089
6090 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6091 sc->sc_nintrs = 1;
6092
6093 return wm_softint_establish_queue(sc, 0, 0);
6094 }
6095
6096 static int
6097 wm_setup_msix(struct wm_softc *sc)
6098 {
6099 void *vih;
6100 kcpuset_t *affinity;
6101 int qidx, error, intr_idx, txrx_established;
6102 pci_chipset_tag_t pc = sc->sc_pc;
6103 const char *intrstr = NULL;
6104 char intrbuf[PCI_INTRSTR_LEN];
6105 char intr_xname[INTRDEVNAMEBUF];
6106
6107 if (sc->sc_nqueues < ncpu) {
6108 /*
6109 * To avoid other devices' interrupts, the affinity of Tx/Rx
6110 * interrupts start from CPU#1.
6111 */
6112 sc->sc_affinity_offset = 1;
6113 } else {
6114 /*
6115 * In this case, this device use all CPUs. So, we unify
6116 * affinitied cpu_index to msix vector number for readability.
6117 */
6118 sc->sc_affinity_offset = 0;
6119 }
6120
6121 error = wm_alloc_txrx_queues(sc);
6122 if (error) {
6123 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6124 error);
6125 return ENOMEM;
6126 }
6127
6128 kcpuset_create(&affinity, false);
6129 intr_idx = 0;
6130
6131 /*
6132 * TX and RX
6133 */
6134 txrx_established = 0;
6135 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6136 struct wm_queue *wmq = &sc->sc_queue[qidx];
6137 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6138
6139 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6140 sizeof(intrbuf));
6141 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6142 PCI_INTR_MPSAFE, true);
6143 memset(intr_xname, 0, sizeof(intr_xname));
6144 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6145 device_xname(sc->sc_dev), qidx);
6146 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6147 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6148 if (vih == NULL) {
6149 aprint_error_dev(sc->sc_dev,
6150 "unable to establish MSI-X(for TX and RX)%s%s\n",
6151 intrstr ? " at " : "",
6152 intrstr ? intrstr : "");
6153
6154 goto fail;
6155 }
6156 kcpuset_zero(affinity);
6157 /* Round-robin affinity */
6158 kcpuset_set(affinity, affinity_to);
6159 error = interrupt_distribute(vih, affinity, NULL);
6160 if (error == 0) {
6161 aprint_normal_dev(sc->sc_dev,
6162 "for TX and RX interrupting at %s affinity to %u\n",
6163 intrstr, affinity_to);
6164 } else {
6165 aprint_normal_dev(sc->sc_dev,
6166 "for TX and RX interrupting at %s\n", intrstr);
6167 }
6168 sc->sc_ihs[intr_idx] = vih;
6169 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6170 goto fail;
6171 txrx_established++;
6172 intr_idx++;
6173 }
6174
6175 /* LINK */
6176 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6177 sizeof(intrbuf));
6178 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6179 memset(intr_xname, 0, sizeof(intr_xname));
6180 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6181 device_xname(sc->sc_dev));
6182 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6183 IPL_NET, wm_linkintr_msix, sc, intr_xname);
6184 if (vih == NULL) {
6185 aprint_error_dev(sc->sc_dev,
6186 "unable to establish MSI-X(for LINK)%s%s\n",
6187 intrstr ? " at " : "",
6188 intrstr ? intrstr : "");
6189
6190 goto fail;
6191 }
6192 /* Keep default affinity to LINK interrupt */
6193 aprint_normal_dev(sc->sc_dev,
6194 "for LINK interrupting at %s\n", intrstr);
6195 sc->sc_ihs[intr_idx] = vih;
6196 sc->sc_link_intr_idx = intr_idx;
6197
6198 sc->sc_nintrs = sc->sc_nqueues + 1;
6199 kcpuset_destroy(affinity);
6200 return 0;
6201
6202 fail:
6203 for (qidx = 0; qidx < txrx_established; qidx++) {
6204 struct wm_queue *wmq = &sc->sc_queue[qidx];
6205 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6206 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6207 }
6208
6209 kcpuset_destroy(affinity);
6210 return ENOMEM;
6211 }
6212
6213 static void
6214 wm_unset_stopping_flags(struct wm_softc *sc)
6215 {
6216 int i;
6217
6218 KASSERT(mutex_owned(sc->sc_core_lock));
6219
6220 /* Must unset stopping flags in ascending order. */
6221 for (i = 0; i < sc->sc_nqueues; i++) {
6222 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6223 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6224
6225 mutex_enter(txq->txq_lock);
6226 txq->txq_stopping = false;
6227 mutex_exit(txq->txq_lock);
6228
6229 mutex_enter(rxq->rxq_lock);
6230 rxq->rxq_stopping = false;
6231 mutex_exit(rxq->rxq_lock);
6232 }
6233
6234 sc->sc_core_stopping = false;
6235 }
6236
6237 static void
6238 wm_set_stopping_flags(struct wm_softc *sc)
6239 {
6240 int i;
6241
6242 KASSERT(mutex_owned(sc->sc_core_lock));
6243
6244 sc->sc_core_stopping = true;
6245
6246 /* Must set stopping flags in ascending order. */
6247 for (i = 0; i < sc->sc_nqueues; i++) {
6248 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6249 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6250
6251 mutex_enter(rxq->rxq_lock);
6252 rxq->rxq_stopping = true;
6253 mutex_exit(rxq->rxq_lock);
6254
6255 mutex_enter(txq->txq_lock);
6256 txq->txq_stopping = true;
6257 mutex_exit(txq->txq_lock);
6258 }
6259 }
6260
6261 /*
6262 * Write interrupt interval value to ITR or EITR
6263 */
6264 static void
6265 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6266 {
6267
6268 if (!wmq->wmq_set_itr)
6269 return;
6270
6271 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6272 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6273
6274 /*
6275 * 82575 doesn't have CNT_INGR field.
6276 * So, overwrite counter field by software.
6277 */
6278 if (sc->sc_type == WM_T_82575)
6279 eitr |= __SHIFTIN(wmq->wmq_itr,
6280 EITR_COUNTER_MASK_82575);
6281 else
6282 eitr |= EITR_CNT_INGR;
6283
6284 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6285 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6286 /*
6287 * 82574 has both ITR and EITR. SET EITR when we use
6288 * the multi queue function with MSI-X.
6289 */
6290 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6291 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6292 } else {
6293 KASSERT(wmq->wmq_id == 0);
6294 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6295 }
6296
6297 wmq->wmq_set_itr = false;
6298 }
6299
6300 /*
6301 * TODO
6302 * Below dynamic calculation of itr is almost the same as Linux igb,
6303 * however it does not fit to wm(4). So, we will have been disable AIM
6304 * until we will find appropriate calculation of itr.
6305 */
6306 /*
6307 * Calculate interrupt interval value to be going to write register in
6308 * wm_itrs_writereg(). This function does not write ITR/EITR register.
6309 */
6310 static void
6311 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6312 {
6313 #ifdef NOTYET
6314 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6315 struct wm_txqueue *txq = &wmq->wmq_txq;
6316 uint32_t avg_size = 0;
6317 uint32_t new_itr;
6318
6319 if (rxq->rxq_packets)
6320 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
6321 if (txq->txq_packets)
6322 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6323
6324 if (avg_size == 0) {
6325 new_itr = 450; /* restore default value */
6326 goto out;
6327 }
6328
6329 /* Add 24 bytes to size to account for CRC, preamble, and gap */
6330 avg_size += 24;
6331
6332 /* Don't starve jumbo frames */
6333 avg_size = uimin(avg_size, 3000);
6334
6335 /* Give a little boost to mid-size frames */
6336 if ((avg_size > 300) && (avg_size < 1200))
6337 new_itr = avg_size / 3;
6338 else
6339 new_itr = avg_size / 2;
6340
6341 out:
6342 /*
6343 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6344 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6345 */
6346 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6347 new_itr *= 4;
6348
6349 if (new_itr != wmq->wmq_itr) {
6350 wmq->wmq_itr = new_itr;
6351 wmq->wmq_set_itr = true;
6352 } else
6353 wmq->wmq_set_itr = false;
6354
6355 rxq->rxq_packets = 0;
6356 rxq->rxq_bytes = 0;
6357 txq->txq_packets = 0;
6358 txq->txq_bytes = 0;
6359 #endif
6360 }
6361
6362 static void
6363 wm_init_sysctls(struct wm_softc *sc)
6364 {
6365 struct sysctllog **log;
6366 const struct sysctlnode *rnode, *qnode, *cnode;
6367 int i, rv;
6368 const char *dvname;
6369
6370 log = &sc->sc_sysctllog;
6371 dvname = device_xname(sc->sc_dev);
6372
6373 rv = sysctl_createv(log, 0, NULL, &rnode,
6374 0, CTLTYPE_NODE, dvname,
6375 SYSCTL_DESCR("wm information and settings"),
6376 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6377 if (rv != 0)
6378 goto err;
6379
6380 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6381 CTLTYPE_BOOL, "txrx_workqueue",
6382 SYSCTL_DESCR("Use workqueue for packet processing"),
6383 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6384 if (rv != 0)
6385 goto teardown;
6386
6387 for (i = 0; i < sc->sc_nqueues; i++) {
6388 struct wm_queue *wmq = &sc->sc_queue[i];
6389 struct wm_txqueue *txq = &wmq->wmq_txq;
6390 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6391
6392 snprintf(sc->sc_queue[i].sysctlname,
6393 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6394
6395 if (sysctl_createv(log, 0, &rnode, &qnode,
6396 0, CTLTYPE_NODE,
6397 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6398 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6399 break;
6400
6401 if (sysctl_createv(log, 0, &qnode, &cnode,
6402 CTLFLAG_READONLY, CTLTYPE_INT,
6403 "txq_free", SYSCTL_DESCR("TX queue free"),
6404 NULL, 0, &txq->txq_free,
6405 0, CTL_CREATE, CTL_EOL) != 0)
6406 break;
6407 if (sysctl_createv(log, 0, &qnode, &cnode,
6408 CTLFLAG_READONLY, CTLTYPE_INT,
6409 "txd_head", SYSCTL_DESCR("TX descriptor head"),
6410 wm_sysctl_tdh_handler, 0, (void *)txq,
6411 0, CTL_CREATE, CTL_EOL) != 0)
6412 break;
6413 if (sysctl_createv(log, 0, &qnode, &cnode,
6414 CTLFLAG_READONLY, CTLTYPE_INT,
6415 "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6416 wm_sysctl_tdt_handler, 0, (void *)txq,
6417 0, CTL_CREATE, CTL_EOL) != 0)
6418 break;
6419 if (sysctl_createv(log, 0, &qnode, &cnode,
6420 CTLFLAG_READONLY, CTLTYPE_INT,
6421 "txq_next", SYSCTL_DESCR("TX queue next"),
6422 NULL, 0, &txq->txq_next,
6423 0, CTL_CREATE, CTL_EOL) != 0)
6424 break;
6425 if (sysctl_createv(log, 0, &qnode, &cnode,
6426 CTLFLAG_READONLY, CTLTYPE_INT,
6427 "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6428 NULL, 0, &txq->txq_sfree,
6429 0, CTL_CREATE, CTL_EOL) != 0)
6430 break;
6431 if (sysctl_createv(log, 0, &qnode, &cnode,
6432 CTLFLAG_READONLY, CTLTYPE_INT,
6433 "txq_snext", SYSCTL_DESCR("TX queue snext"),
6434 NULL, 0, &txq->txq_snext,
6435 0, CTL_CREATE, CTL_EOL) != 0)
6436 break;
6437 if (sysctl_createv(log, 0, &qnode, &cnode,
6438 CTLFLAG_READONLY, CTLTYPE_INT,
6439 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6440 NULL, 0, &txq->txq_sdirty,
6441 0, CTL_CREATE, CTL_EOL) != 0)
6442 break;
6443 if (sysctl_createv(log, 0, &qnode, &cnode,
6444 CTLFLAG_READONLY, CTLTYPE_INT,
6445 "txq_flags", SYSCTL_DESCR("TX queue flags"),
6446 NULL, 0, &txq->txq_flags,
6447 0, CTL_CREATE, CTL_EOL) != 0)
6448 break;
6449 if (sysctl_createv(log, 0, &qnode, &cnode,
6450 CTLFLAG_READONLY, CTLTYPE_BOOL,
6451 "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6452 NULL, 0, &txq->txq_stopping,
6453 0, CTL_CREATE, CTL_EOL) != 0)
6454 break;
6455 if (sysctl_createv(log, 0, &qnode, &cnode,
6456 CTLFLAG_READONLY, CTLTYPE_BOOL,
6457 "txq_sending", SYSCTL_DESCR("TX queue sending"),
6458 NULL, 0, &txq->txq_sending,
6459 0, CTL_CREATE, CTL_EOL) != 0)
6460 break;
6461
6462 if (sysctl_createv(log, 0, &qnode, &cnode,
6463 CTLFLAG_READONLY, CTLTYPE_INT,
6464 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6465 NULL, 0, &rxq->rxq_ptr,
6466 0, CTL_CREATE, CTL_EOL) != 0)
6467 break;
6468 }
6469
6470 #ifdef WM_DEBUG
6471 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6472 CTLTYPE_INT, "debug_flags",
6473 SYSCTL_DESCR(
6474 "Debug flags:\n" \
6475 "\t0x01 LINK\n" \
6476 "\t0x02 TX\n" \
6477 "\t0x04 RX\n" \
6478 "\t0x08 GMII\n" \
6479 "\t0x10 MANAGE\n" \
6480 "\t0x20 NVM\n" \
6481 "\t0x40 INIT\n" \
6482 "\t0x80 LOCK"),
6483 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6484 if (rv != 0)
6485 goto teardown;
6486 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6487 CTLTYPE_BOOL, "trigger_reset",
6488 SYSCTL_DESCR("Trigger an interface reset"),
6489 NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6490 if (rv != 0)
6491 goto teardown;
6492 #endif
6493
6494 return;
6495
6496 teardown:
6497 sysctl_teardown(log);
6498 err:
6499 sc->sc_sysctllog = NULL;
6500 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6501 __func__, rv);
6502 }
6503
6504 /*
6505 * wm_init: [ifnet interface function]
6506 *
6507 * Initialize the interface.
6508 */
6509 static int
6510 wm_init(struct ifnet *ifp)
6511 {
6512 struct wm_softc *sc = ifp->if_softc;
6513 int ret;
6514
6515 KASSERT(IFNET_LOCKED(ifp));
6516
6517 if (sc->sc_dying)
6518 return ENXIO;
6519
6520 mutex_enter(sc->sc_core_lock);
6521 ret = wm_init_locked(ifp);
6522 mutex_exit(sc->sc_core_lock);
6523
6524 return ret;
6525 }
6526
6527 static int
6528 wm_init_locked(struct ifnet *ifp)
6529 {
6530 struct wm_softc *sc = ifp->if_softc;
6531 struct ethercom *ec = &sc->sc_ethercom;
6532 int i, j, trynum, error = 0;
6533 uint32_t reg, sfp_mask = 0;
6534
6535 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6536 device_xname(sc->sc_dev), __func__));
6537 KASSERT(IFNET_LOCKED(ifp));
6538 KASSERT(mutex_owned(sc->sc_core_lock));
6539
6540 /*
6541 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6542 * There is a small but measurable benefit to avoiding the adjusment
6543 * of the descriptor so that the headers are aligned, for normal mtu,
6544 * on such platforms. One possibility is that the DMA itself is
6545 * slightly more efficient if the front of the entire packet (instead
6546 * of the front of the headers) is aligned.
6547 *
6548 * Note we must always set align_tweak to 0 if we are using
6549 * jumbo frames.
6550 */
6551 #ifdef __NO_STRICT_ALIGNMENT
6552 sc->sc_align_tweak = 0;
6553 #else
6554 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6555 sc->sc_align_tweak = 0;
6556 else
6557 sc->sc_align_tweak = 2;
6558 #endif /* __NO_STRICT_ALIGNMENT */
6559
6560 /* Cancel any pending I/O. */
6561 wm_stop_locked(ifp, false, false);
6562
6563 /* Update statistics before reset */
6564 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6565 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6566
6567 /* >= PCH_SPT hardware workaround before reset. */
6568 if (sc->sc_type >= WM_T_PCH_SPT)
6569 wm_flush_desc_rings(sc);
6570
6571 /* Reset the chip to a known state. */
6572 wm_reset(sc);
6573
6574 /*
6575 * AMT based hardware can now take control from firmware
6576 * Do this after reset.
6577 */
6578 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6579 wm_get_hw_control(sc);
6580
6581 if ((sc->sc_type >= WM_T_PCH_SPT) &&
6582 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6583 wm_legacy_irq_quirk_spt(sc);
6584
6585 /* Init hardware bits */
6586 wm_initialize_hardware_bits(sc);
6587
6588 /* Reset the PHY. */
6589 if (sc->sc_flags & WM_F_HAS_MII)
6590 wm_gmii_reset(sc);
6591
6592 if (sc->sc_type >= WM_T_ICH8) {
6593 reg = CSR_READ(sc, WMREG_GCR);
6594 /*
6595 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6596 * default after reset.
6597 */
6598 if (sc->sc_type == WM_T_ICH8)
6599 reg |= GCR_NO_SNOOP_ALL;
6600 else
6601 reg &= ~GCR_NO_SNOOP_ALL;
6602 CSR_WRITE(sc, WMREG_GCR, reg);
6603 }
6604
6605 if ((sc->sc_type >= WM_T_ICH8)
6606 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6607 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6608
6609 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6610 reg |= CTRL_EXT_RO_DIS;
6611 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6612 }
6613
6614 /* Calculate (E)ITR value */
6615 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6616 /*
6617 * For NEWQUEUE's EITR (except for 82575).
6618 * 82575's EITR should be set same throttling value as other
6619 * old controllers' ITR because the interrupt/sec calculation
6620 * is the same, that is, 1,000,000,000 / (N * 256).
6621 *
6622 * 82574's EITR should be set same throttling value as ITR.
6623 *
6624 * For N interrupts/sec, set this value to:
6625 * 1,000,000 / N in contrast to ITR throttling value.
6626 */
6627 sc->sc_itr_init = 450;
6628 } else if (sc->sc_type >= WM_T_82543) {
6629 /*
6630 * Set up the interrupt throttling register (units of 256ns)
6631 * Note that a footnote in Intel's documentation says this
6632 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6633 * or 10Mbit mode. Empirically, it appears to be the case
6634 * that that is also true for the 1024ns units of the other
6635 * interrupt-related timer registers -- so, really, we ought
6636 * to divide this value by 4 when the link speed is low.
6637 *
6638 * XXX implement this division at link speed change!
6639 */
6640
6641 /*
6642 * For N interrupts/sec, set this value to:
6643 * 1,000,000,000 / (N * 256). Note that we set the
6644 * absolute and packet timer values to this value
6645 * divided by 4 to get "simple timer" behavior.
6646 */
6647 sc->sc_itr_init = 1500; /* 2604 ints/sec */
6648 }
6649
6650 error = wm_init_txrx_queues(sc);
6651 if (error)
6652 goto out;
6653
6654 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6655 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6656 (sc->sc_type >= WM_T_82575))
6657 wm_serdes_power_up_link_82575(sc);
6658
6659 /* Clear out the VLAN table -- we don't use it (yet). */
6660 CSR_WRITE(sc, WMREG_VET, 0);
6661 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6662 trynum = 10; /* Due to hw errata */
6663 else
6664 trynum = 1;
6665 for (i = 0; i < WM_VLAN_TABSIZE; i++)
6666 for (j = 0; j < trynum; j++)
6667 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6668
6669 /*
6670 * Set up flow-control parameters.
6671 *
6672 * XXX Values could probably stand some tuning.
6673 */
6674 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6675 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6676 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6677 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6678 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6679 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6680 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6681 }
6682
6683 sc->sc_fcrtl = FCRTL_DFLT;
6684 if (sc->sc_type < WM_T_82543) {
6685 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6686 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6687 } else {
6688 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6689 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6690 }
6691
6692 if (sc->sc_type == WM_T_80003)
6693 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6694 else
6695 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6696
6697 /* Writes the control register. */
6698 wm_set_vlan(sc);
6699
6700 if (sc->sc_flags & WM_F_HAS_MII) {
6701 uint16_t kmreg;
6702
6703 switch (sc->sc_type) {
6704 case WM_T_80003:
6705 case WM_T_ICH8:
6706 case WM_T_ICH9:
6707 case WM_T_ICH10:
6708 case WM_T_PCH:
6709 case WM_T_PCH2:
6710 case WM_T_PCH_LPT:
6711 case WM_T_PCH_SPT:
6712 case WM_T_PCH_CNP:
6713 /*
6714 * Set the mac to wait the maximum time between each
6715 * iteration and increase the max iterations when
6716 * polling the phy; this fixes erroneous timeouts at
6717 * 10Mbps.
6718 */
6719 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6720 0xFFFF);
6721 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6722 &kmreg);
6723 kmreg |= 0x3F;
6724 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6725 kmreg);
6726 break;
6727 default:
6728 break;
6729 }
6730
6731 if (sc->sc_type == WM_T_80003) {
6732 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6733 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6734 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6735
6736 /* Bypass RX and TX FIFOs */
6737 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6738 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6739 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6740 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6741 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6742 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6743 }
6744 }
6745 #if 0
6746 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6747 #endif
6748
6749 /* Set up checksum offload parameters. */
6750 reg = CSR_READ(sc, WMREG_RXCSUM);
6751 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6752 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6753 reg |= RXCSUM_IPOFL;
6754 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6755 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6756 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6757 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6758 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6759
6760 /* Set registers about MSI-X */
6761 if (wm_is_using_msix(sc)) {
6762 uint32_t ivar, qintr_idx;
6763 struct wm_queue *wmq;
6764 unsigned int qid;
6765
6766 if (sc->sc_type == WM_T_82575) {
6767 /* Interrupt control */
6768 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6769 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6770 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6771
6772 /* TX and RX */
6773 for (i = 0; i < sc->sc_nqueues; i++) {
6774 wmq = &sc->sc_queue[i];
6775 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6776 EITR_TX_QUEUE(wmq->wmq_id)
6777 | EITR_RX_QUEUE(wmq->wmq_id));
6778 }
6779 /* Link status */
6780 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6781 EITR_OTHER);
6782 } else if (sc->sc_type == WM_T_82574) {
6783 /* Interrupt control */
6784 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6785 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6786 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6787
6788 /*
6789 * Work around issue with spurious interrupts
6790 * in MSI-X mode.
6791 * At wm_initialize_hardware_bits(), sc_nintrs has not
6792 * initialized yet. So re-initialize WMREG_RFCTL here.
6793 */
6794 reg = CSR_READ(sc, WMREG_RFCTL);
6795 reg |= WMREG_RFCTL_ACKDIS;
6796 CSR_WRITE(sc, WMREG_RFCTL, reg);
6797
6798 ivar = 0;
6799 /* TX and RX */
6800 for (i = 0; i < sc->sc_nqueues; i++) {
6801 wmq = &sc->sc_queue[i];
6802 qid = wmq->wmq_id;
6803 qintr_idx = wmq->wmq_intr_idx;
6804
6805 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6806 IVAR_TX_MASK_Q_82574(qid));
6807 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6808 IVAR_RX_MASK_Q_82574(qid));
6809 }
6810 /* Link status */
6811 ivar |= __SHIFTIN((IVAR_VALID_82574
6812 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6813 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6814 } else {
6815 /* Interrupt control */
6816 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6817 | GPIE_EIAME | GPIE_PBA);
6818
6819 switch (sc->sc_type) {
6820 case WM_T_82580:
6821 case WM_T_I350:
6822 case WM_T_I354:
6823 case WM_T_I210:
6824 case WM_T_I211:
6825 /* TX and RX */
6826 for (i = 0; i < sc->sc_nqueues; i++) {
6827 wmq = &sc->sc_queue[i];
6828 qid = wmq->wmq_id;
6829 qintr_idx = wmq->wmq_intr_idx;
6830
6831 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6832 ivar &= ~IVAR_TX_MASK_Q(qid);
6833 ivar |= __SHIFTIN((qintr_idx
6834 | IVAR_VALID),
6835 IVAR_TX_MASK_Q(qid));
6836 ivar &= ~IVAR_RX_MASK_Q(qid);
6837 ivar |= __SHIFTIN((qintr_idx
6838 | IVAR_VALID),
6839 IVAR_RX_MASK_Q(qid));
6840 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6841 }
6842 break;
6843 case WM_T_82576:
6844 /* TX and RX */
6845 for (i = 0; i < sc->sc_nqueues; i++) {
6846 wmq = &sc->sc_queue[i];
6847 qid = wmq->wmq_id;
6848 qintr_idx = wmq->wmq_intr_idx;
6849
6850 ivar = CSR_READ(sc,
6851 WMREG_IVAR_Q_82576(qid));
6852 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6853 ivar |= __SHIFTIN((qintr_idx
6854 | IVAR_VALID),
6855 IVAR_TX_MASK_Q_82576(qid));
6856 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6857 ivar |= __SHIFTIN((qintr_idx
6858 | IVAR_VALID),
6859 IVAR_RX_MASK_Q_82576(qid));
6860 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6861 ivar);
6862 }
6863 break;
6864 default:
6865 break;
6866 }
6867
6868 /* Link status */
6869 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6870 IVAR_MISC_OTHER);
6871 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6872 }
6873
6874 if (wm_is_using_multiqueue(sc)) {
6875 wm_init_rss(sc);
6876
6877 /*
6878 ** NOTE: Receive Full-Packet Checksum Offload
6879 ** is mutually exclusive with Multiqueue. However
6880 ** this is not the same as TCP/IP checksums which
6881 ** still work.
6882 */
6883 reg = CSR_READ(sc, WMREG_RXCSUM);
6884 reg |= RXCSUM_PCSD;
6885 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6886 }
6887 }
6888
6889 /* Set up the interrupt registers. */
6890 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6891
6892 /* Enable SFP module insertion interrupt if it's required */
6893 if ((sc->sc_flags & WM_F_SFP) != 0) {
6894 sc->sc_ctrl |= CTRL_EXTLINK_EN;
6895 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6896 sfp_mask = ICR_GPI(0);
6897 }
6898
6899 if (wm_is_using_msix(sc)) {
6900 uint32_t mask;
6901 struct wm_queue *wmq;
6902
6903 switch (sc->sc_type) {
6904 case WM_T_82574:
6905 mask = 0;
6906 for (i = 0; i < sc->sc_nqueues; i++) {
6907 wmq = &sc->sc_queue[i];
6908 mask |= ICR_TXQ(wmq->wmq_id);
6909 mask |= ICR_RXQ(wmq->wmq_id);
6910 }
6911 mask |= ICR_OTHER;
6912 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6913 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6914 break;
6915 default:
6916 if (sc->sc_type == WM_T_82575) {
6917 mask = 0;
6918 for (i = 0; i < sc->sc_nqueues; i++) {
6919 wmq = &sc->sc_queue[i];
6920 mask |= EITR_TX_QUEUE(wmq->wmq_id);
6921 mask |= EITR_RX_QUEUE(wmq->wmq_id);
6922 }
6923 mask |= EITR_OTHER;
6924 } else {
6925 mask = 0;
6926 for (i = 0; i < sc->sc_nqueues; i++) {
6927 wmq = &sc->sc_queue[i];
6928 mask |= 1 << wmq->wmq_intr_idx;
6929 }
6930 mask |= 1 << sc->sc_link_intr_idx;
6931 }
6932 CSR_WRITE(sc, WMREG_EIAC, mask);
6933 CSR_WRITE(sc, WMREG_EIAM, mask);
6934 CSR_WRITE(sc, WMREG_EIMS, mask);
6935
6936 /* For other interrupts */
6937 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6938 break;
6939 }
6940 } else {
6941 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6942 ICR_RXO | ICR_RXT0 | sfp_mask;
6943 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6944 }
6945
6946 /* Set up the inter-packet gap. */
6947 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6948
6949 if (sc->sc_type >= WM_T_82543) {
6950 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6951 struct wm_queue *wmq = &sc->sc_queue[qidx];
6952 wm_itrs_writereg(sc, wmq);
6953 }
6954 /*
6955 * Link interrupts occur much less than TX
6956 * interrupts and RX interrupts. So, we don't
6957 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6958 * FreeBSD's if_igb.
6959 */
6960 }
6961
6962 /* Set the VLAN EtherType. */
6963 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6964
6965 /*
6966 * Set up the transmit control register; we start out with
6967 * a collision distance suitable for FDX, but update it when
6968 * we resolve the media type.
6969 */
6970 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6971 | TCTL_CT(TX_COLLISION_THRESHOLD)
6972 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6973 if (sc->sc_type >= WM_T_82571)
6974 sc->sc_tctl |= TCTL_MULR;
6975 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6976
6977 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6978 /* Write TDT after TCTL.EN is set. See the document. */
6979 CSR_WRITE(sc, WMREG_TDT(0), 0);
6980 }
6981
6982 if (sc->sc_type == WM_T_80003) {
6983 reg = CSR_READ(sc, WMREG_TCTL_EXT);
6984 reg &= ~TCTL_EXT_GCEX_MASK;
6985 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6986 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6987 }
6988
6989 /* Set the media. */
6990 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6991 goto out;
6992
6993 /* Configure for OS presence */
6994 wm_init_manageability(sc);
6995
6996 /*
6997 * Set up the receive control register; we actually program the
6998 * register when we set the receive filter. Use multicast address
6999 * offset type 0.
7000 *
7001 * Only the i82544 has the ability to strip the incoming CRC, so we
7002 * don't enable that feature.
7003 */
7004 sc->sc_mchash_type = 0;
7005 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7006 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7007
7008 /* 82574 use one buffer extended Rx descriptor. */
7009 if (sc->sc_type == WM_T_82574)
7010 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7011
7012 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7013 sc->sc_rctl |= RCTL_SECRC;
7014
7015 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7016 && (ifp->if_mtu > ETHERMTU)) {
7017 sc->sc_rctl |= RCTL_LPE;
7018 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7019 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7020 }
7021
7022 if (MCLBYTES == 2048)
7023 sc->sc_rctl |= RCTL_2k;
7024 else {
7025 if (sc->sc_type >= WM_T_82543) {
7026 switch (MCLBYTES) {
7027 case 4096:
7028 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7029 break;
7030 case 8192:
7031 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7032 break;
7033 case 16384:
7034 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7035 break;
7036 default:
7037 panic("wm_init: MCLBYTES %d unsupported",
7038 MCLBYTES);
7039 break;
7040 }
7041 } else
7042 panic("wm_init: i82542 requires MCLBYTES = 2048");
7043 }
7044
7045 /* Enable ECC */
7046 switch (sc->sc_type) {
7047 case WM_T_82571:
7048 reg = CSR_READ(sc, WMREG_PBA_ECC);
7049 reg |= PBA_ECC_CORR_EN;
7050 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7051 break;
7052 case WM_T_PCH_LPT:
7053 case WM_T_PCH_SPT:
7054 case WM_T_PCH_CNP:
7055 reg = CSR_READ(sc, WMREG_PBECCSTS);
7056 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7057 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7058
7059 sc->sc_ctrl |= CTRL_MEHE;
7060 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7061 break;
7062 default:
7063 break;
7064 }
7065
7066 /*
7067 * Set the receive filter.
7068 *
7069 * For 82575 and 82576, the RX descriptors must be initialized after
7070 * the setting of RCTL.EN in wm_set_filter()
7071 */
7072 wm_set_filter(sc);
7073
7074 /* On 575 and later set RDT only if RX enabled */
7075 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7076 int qidx;
7077 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7078 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7079 for (i = 0; i < WM_NRXDESC; i++) {
7080 mutex_enter(rxq->rxq_lock);
7081 wm_init_rxdesc(rxq, i);
7082 mutex_exit(rxq->rxq_lock);
7083
7084 }
7085 }
7086 }
7087
7088 wm_unset_stopping_flags(sc);
7089
7090 /* Start the one second link check clock. */
7091 callout_schedule(&sc->sc_tick_ch, hz);
7092
7093 /*
7094 * ...all done! (IFNET_LOCKED asserted above.)
7095 */
7096 ifp->if_flags |= IFF_RUNNING;
7097
7098 out:
7099 /* Save last flags for the callback */
7100 sc->sc_if_flags = ifp->if_flags;
7101 sc->sc_ec_capenable = ec->ec_capenable;
7102 if (error)
7103 log(LOG_ERR, "%s: interface not running\n",
7104 device_xname(sc->sc_dev));
7105 return error;
7106 }
7107
7108 /*
7109 * wm_stop: [ifnet interface function]
7110 *
7111 * Stop transmission on the interface.
7112 */
7113 static void
7114 wm_stop(struct ifnet *ifp, int disable)
7115 {
7116 struct wm_softc *sc = ifp->if_softc;
7117
7118 ASSERT_SLEEPABLE();
7119 KASSERT(IFNET_LOCKED(ifp));
7120
7121 mutex_enter(sc->sc_core_lock);
7122 wm_stop_locked(ifp, disable ? true : false, true);
7123 mutex_exit(sc->sc_core_lock);
7124
7125 /*
7126 * After wm_set_stopping_flags(), it is guaranteed that
7127 * wm_handle_queue_work() does not call workqueue_enqueue().
7128 * However, workqueue_wait() cannot call in wm_stop_locked()
7129 * because it can sleep...
7130 * so, call workqueue_wait() here.
7131 */
7132 for (int i = 0; i < sc->sc_nqueues; i++)
7133 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7134 workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7135 }
7136
7137 static void
7138 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7139 {
7140 struct wm_softc *sc = ifp->if_softc;
7141 struct wm_txsoft *txs;
7142 int i, qidx;
7143
7144 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7145 device_xname(sc->sc_dev), __func__));
7146 KASSERT(IFNET_LOCKED(ifp));
7147 KASSERT(mutex_owned(sc->sc_core_lock));
7148
7149 wm_set_stopping_flags(sc);
7150
7151 if (sc->sc_flags & WM_F_HAS_MII) {
7152 /* Down the MII. */
7153 mii_down(&sc->sc_mii);
7154 } else {
7155 #if 0
7156 /* Should we clear PHY's status properly? */
7157 wm_reset(sc);
7158 #endif
7159 }
7160
7161 /* Stop the transmit and receive processes. */
7162 CSR_WRITE(sc, WMREG_TCTL, 0);
7163 CSR_WRITE(sc, WMREG_RCTL, 0);
7164 sc->sc_rctl &= ~RCTL_EN;
7165
7166 /*
7167 * Clear the interrupt mask to ensure the device cannot assert its
7168 * interrupt line.
7169 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7170 * service any currently pending or shared interrupt.
7171 */
7172 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7173 sc->sc_icr = 0;
7174 if (wm_is_using_msix(sc)) {
7175 if (sc->sc_type != WM_T_82574) {
7176 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7177 CSR_WRITE(sc, WMREG_EIAC, 0);
7178 } else
7179 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7180 }
7181
7182 /*
7183 * Stop callouts after interrupts are disabled; if we have
7184 * to wait for them, we will be releasing the CORE_LOCK
7185 * briefly, which will unblock interrupts on the current CPU.
7186 */
7187
7188 /* Stop the one second clock. */
7189 if (wait)
7190 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7191 else
7192 callout_stop(&sc->sc_tick_ch);
7193
7194 /* Stop the 82547 Tx FIFO stall check timer. */
7195 if (sc->sc_type == WM_T_82547) {
7196 if (wait)
7197 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7198 else
7199 callout_stop(&sc->sc_txfifo_ch);
7200 }
7201
7202 /* Release any queued transmit buffers. */
7203 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7204 struct wm_queue *wmq = &sc->sc_queue[qidx];
7205 struct wm_txqueue *txq = &wmq->wmq_txq;
7206 struct mbuf *m;
7207
7208 mutex_enter(txq->txq_lock);
7209 txq->txq_sending = false; /* Ensure watchdog disabled */
7210 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7211 txs = &txq->txq_soft[i];
7212 if (txs->txs_mbuf != NULL) {
7213 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7214 m_freem(txs->txs_mbuf);
7215 txs->txs_mbuf = NULL;
7216 }
7217 }
7218 /* Drain txq_interq */
7219 while ((m = pcq_get(txq->txq_interq)) != NULL)
7220 m_freem(m);
7221 mutex_exit(txq->txq_lock);
7222 }
7223
7224 /* Mark the interface as down and cancel the watchdog timer. */
7225 ifp->if_flags &= ~IFF_RUNNING;
7226 sc->sc_if_flags = ifp->if_flags;
7227
7228 if (disable) {
7229 for (i = 0; i < sc->sc_nqueues; i++) {
7230 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7231 mutex_enter(rxq->rxq_lock);
7232 wm_rxdrain(rxq);
7233 mutex_exit(rxq->rxq_lock);
7234 }
7235 }
7236
7237 #if 0 /* notyet */
7238 if (sc->sc_type >= WM_T_82544)
7239 CSR_WRITE(sc, WMREG_WUC, 0);
7240 #endif
7241 }
7242
7243 static void
7244 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7245 {
7246 struct mbuf *m;
7247 int i;
7248
7249 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7250 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7251 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7252 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7253 m->m_data, m->m_len, m->m_flags);
7254 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7255 i, i == 1 ? "" : "s");
7256 }
7257
7258 /*
7259 * wm_82547_txfifo_stall:
7260 *
7261 * Callout used to wait for the 82547 Tx FIFO to drain,
7262 * reset the FIFO pointers, and restart packet transmission.
7263 */
7264 static void
7265 wm_82547_txfifo_stall(void *arg)
7266 {
7267 struct wm_softc *sc = arg;
7268 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7269
7270 mutex_enter(txq->txq_lock);
7271
7272 if (txq->txq_stopping)
7273 goto out;
7274
7275 if (txq->txq_fifo_stall) {
7276 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7277 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7278 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7279 /*
7280 * Packets have drained. Stop transmitter, reset
7281 * FIFO pointers, restart transmitter, and kick
7282 * the packet queue.
7283 */
7284 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7285 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7286 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7287 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7288 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7289 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7290 CSR_WRITE(sc, WMREG_TCTL, tctl);
7291 CSR_WRITE_FLUSH(sc);
7292
7293 txq->txq_fifo_head = 0;
7294 txq->txq_fifo_stall = 0;
7295 wm_start_locked(&sc->sc_ethercom.ec_if);
7296 } else {
7297 /*
7298 * Still waiting for packets to drain; try again in
7299 * another tick.
7300 */
7301 callout_schedule(&sc->sc_txfifo_ch, 1);
7302 }
7303 }
7304
7305 out:
7306 mutex_exit(txq->txq_lock);
7307 }
7308
7309 /*
7310 * wm_82547_txfifo_bugchk:
7311 *
7312 * Check for bug condition in the 82547 Tx FIFO. We need to
7313 * prevent enqueueing a packet that would wrap around the end
7314 * if the Tx FIFO ring buffer, otherwise the chip will croak.
7315 *
7316 * We do this by checking the amount of space before the end
7317 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
7318 * the Tx FIFO, wait for all remaining packets to drain, reset
7319 * the internal FIFO pointers to the beginning, and restart
7320 * transmission on the interface.
7321 */
7322 #define WM_FIFO_HDR 0x10
7323 #define WM_82547_PAD_LEN 0x3e0
7324 static int
7325 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7326 {
7327 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7328 int space = txq->txq_fifo_size - txq->txq_fifo_head;
7329 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7330
7331 /* Just return if already stalled. */
7332 if (txq->txq_fifo_stall)
7333 return 1;
7334
7335 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7336 /* Stall only occurs in half-duplex mode. */
7337 goto send_packet;
7338 }
7339
7340 if (len >= WM_82547_PAD_LEN + space) {
7341 txq->txq_fifo_stall = 1;
7342 callout_schedule(&sc->sc_txfifo_ch, 1);
7343 return 1;
7344 }
7345
7346 send_packet:
7347 txq->txq_fifo_head += len;
7348 if (txq->txq_fifo_head >= txq->txq_fifo_size)
7349 txq->txq_fifo_head -= txq->txq_fifo_size;
7350
7351 return 0;
7352 }
7353
7354 static int
7355 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7356 {
7357 int error;
7358
7359 /*
7360 * Allocate the control data structures, and create and load the
7361 * DMA map for it.
7362 *
7363 * NOTE: All Tx descriptors must be in the same 4G segment of
7364 * memory. So must Rx descriptors. We simplify by allocating
7365 * both sets within the same 4G segment.
7366 */
7367 if (sc->sc_type < WM_T_82544)
7368 WM_NTXDESC(txq) = WM_NTXDESC_82542;
7369 else
7370 WM_NTXDESC(txq) = WM_NTXDESC_82544;
7371 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7372 txq->txq_descsize = sizeof(nq_txdesc_t);
7373 else
7374 txq->txq_descsize = sizeof(wiseman_txdesc_t);
7375
7376 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7377 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7378 1, &txq->txq_desc_rseg, 0)) != 0) {
7379 aprint_error_dev(sc->sc_dev,
7380 "unable to allocate TX control data, error = %d\n",
7381 error);
7382 goto fail_0;
7383 }
7384
7385 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7386 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7387 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7388 aprint_error_dev(sc->sc_dev,
7389 "unable to map TX control data, error = %d\n", error);
7390 goto fail_1;
7391 }
7392
7393 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7394 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7395 aprint_error_dev(sc->sc_dev,
7396 "unable to create TX control data DMA map, error = %d\n",
7397 error);
7398 goto fail_2;
7399 }
7400
7401 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7402 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7403 aprint_error_dev(sc->sc_dev,
7404 "unable to load TX control data DMA map, error = %d\n",
7405 error);
7406 goto fail_3;
7407 }
7408
7409 return 0;
7410
7411 fail_3:
7412 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7413 fail_2:
7414 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7415 WM_TXDESCS_SIZE(txq));
7416 fail_1:
7417 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7418 fail_0:
7419 return error;
7420 }
7421
7422 static void
7423 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7424 {
7425
7426 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7427 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7428 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7429 WM_TXDESCS_SIZE(txq));
7430 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7431 }
7432
7433 static int
7434 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7435 {
7436 int error;
7437 size_t rxq_descs_size;
7438
7439 /*
7440 * Allocate the control data structures, and create and load the
7441 * DMA map for it.
7442 *
7443 * NOTE: All Tx descriptors must be in the same 4G segment of
7444 * memory. So must Rx descriptors. We simplify by allocating
7445 * both sets within the same 4G segment.
7446 */
7447 rxq->rxq_ndesc = WM_NRXDESC;
7448 if (sc->sc_type == WM_T_82574)
7449 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7450 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7451 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7452 else
7453 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7454 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7455
7456 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7457 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7458 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7459 aprint_error_dev(sc->sc_dev,
7460 "unable to allocate RX control data, error = %d\n",
7461 error);
7462 goto fail_0;
7463 }
7464
7465 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7466 rxq->rxq_desc_rseg, rxq_descs_size,
7467 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7468 aprint_error_dev(sc->sc_dev,
7469 "unable to map RX control data, error = %d\n", error);
7470 goto fail_1;
7471 }
7472
7473 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7474 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7475 aprint_error_dev(sc->sc_dev,
7476 "unable to create RX control data DMA map, error = %d\n",
7477 error);
7478 goto fail_2;
7479 }
7480
7481 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7482 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7483 aprint_error_dev(sc->sc_dev,
7484 "unable to load RX control data DMA map, error = %d\n",
7485 error);
7486 goto fail_3;
7487 }
7488
7489 return 0;
7490
7491 fail_3:
7492 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7493 fail_2:
7494 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7495 rxq_descs_size);
7496 fail_1:
7497 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7498 fail_0:
7499 return error;
7500 }
7501
7502 static void
7503 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7504 {
7505
7506 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7507 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7508 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7509 rxq->rxq_descsize * rxq->rxq_ndesc);
7510 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7511 }
7512
7513
7514 static int
7515 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7516 {
7517 int i, error;
7518
7519 /* Create the transmit buffer DMA maps. */
7520 WM_TXQUEUELEN(txq) =
7521 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7522 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7523 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7524 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7525 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7526 &txq->txq_soft[i].txs_dmamap)) != 0) {
7527 aprint_error_dev(sc->sc_dev,
7528 "unable to create Tx DMA map %d, error = %d\n",
7529 i, error);
7530 goto fail;
7531 }
7532 }
7533
7534 return 0;
7535
7536 fail:
7537 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7538 if (txq->txq_soft[i].txs_dmamap != NULL)
7539 bus_dmamap_destroy(sc->sc_dmat,
7540 txq->txq_soft[i].txs_dmamap);
7541 }
7542 return error;
7543 }
7544
7545 static void
7546 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7547 {
7548 int i;
7549
7550 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7551 if (txq->txq_soft[i].txs_dmamap != NULL)
7552 bus_dmamap_destroy(sc->sc_dmat,
7553 txq->txq_soft[i].txs_dmamap);
7554 }
7555 }
7556
7557 static int
7558 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7559 {
7560 int i, error;
7561
7562 /* Create the receive buffer DMA maps. */
7563 for (i = 0; i < rxq->rxq_ndesc; i++) {
7564 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7565 MCLBYTES, 0, 0,
7566 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7567 aprint_error_dev(sc->sc_dev,
7568 "unable to create Rx DMA map %d error = %d\n",
7569 i, error);
7570 goto fail;
7571 }
7572 rxq->rxq_soft[i].rxs_mbuf = NULL;
7573 }
7574
7575 return 0;
7576
7577 fail:
7578 for (i = 0; i < rxq->rxq_ndesc; i++) {
7579 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7580 bus_dmamap_destroy(sc->sc_dmat,
7581 rxq->rxq_soft[i].rxs_dmamap);
7582 }
7583 return error;
7584 }
7585
7586 static void
7587 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7588 {
7589 int i;
7590
7591 for (i = 0; i < rxq->rxq_ndesc; i++) {
7592 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7593 bus_dmamap_destroy(sc->sc_dmat,
7594 rxq->rxq_soft[i].rxs_dmamap);
7595 }
7596 }
7597
7598 /*
7599 * wm_alloc_quques:
7600 * Allocate {tx,rx}descs and {tx,rx} buffers
7601 */
7602 static int
7603 wm_alloc_txrx_queues(struct wm_softc *sc)
7604 {
7605 int i, error, tx_done, rx_done;
7606
7607 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7608 KM_SLEEP);
7609 if (sc->sc_queue == NULL) {
7610 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7611 error = ENOMEM;
7612 goto fail_0;
7613 }
7614
7615 /* For transmission */
7616 error = 0;
7617 tx_done = 0;
7618 for (i = 0; i < sc->sc_nqueues; i++) {
7619 #ifdef WM_EVENT_COUNTERS
7620 int j;
7621 const char *xname;
7622 #endif
7623 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7624 txq->txq_sc = sc;
7625 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7626
7627 error = wm_alloc_tx_descs(sc, txq);
7628 if (error)
7629 break;
7630 error = wm_alloc_tx_buffer(sc, txq);
7631 if (error) {
7632 wm_free_tx_descs(sc, txq);
7633 break;
7634 }
7635 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7636 if (txq->txq_interq == NULL) {
7637 wm_free_tx_descs(sc, txq);
7638 wm_free_tx_buffer(sc, txq);
7639 error = ENOMEM;
7640 break;
7641 }
7642
7643 #ifdef WM_EVENT_COUNTERS
7644 xname = device_xname(sc->sc_dev);
7645
7646 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7647 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7648 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7649 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7650 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7651 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7652 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7653 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7654 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7655 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7656 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7657
7658 for (j = 0; j < WM_NTXSEGS; j++) {
7659 snprintf(txq->txq_txseg_evcnt_names[j],
7660 sizeof(txq->txq_txseg_evcnt_names[j]),
7661 "txq%02dtxseg%d", i, j);
7662 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
7663 EVCNT_TYPE_MISC,
7664 NULL, xname, txq->txq_txseg_evcnt_names[j]);
7665 }
7666
7667 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7668 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7669 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7670 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7671 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7672 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7673 #endif /* WM_EVENT_COUNTERS */
7674
7675 tx_done++;
7676 }
7677 if (error)
7678 goto fail_1;
7679
7680 /* For receive */
7681 error = 0;
7682 rx_done = 0;
7683 for (i = 0; i < sc->sc_nqueues; i++) {
7684 #ifdef WM_EVENT_COUNTERS
7685 const char *xname;
7686 #endif
7687 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7688 rxq->rxq_sc = sc;
7689 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7690
7691 error = wm_alloc_rx_descs(sc, rxq);
7692 if (error)
7693 break;
7694
7695 error = wm_alloc_rx_buffer(sc, rxq);
7696 if (error) {
7697 wm_free_rx_descs(sc, rxq);
7698 break;
7699 }
7700
7701 #ifdef WM_EVENT_COUNTERS
7702 xname = device_xname(sc->sc_dev);
7703
7704 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7705 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7706
7707 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7708 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7709 #endif /* WM_EVENT_COUNTERS */
7710
7711 rx_done++;
7712 }
7713 if (error)
7714 goto fail_2;
7715
7716 return 0;
7717
7718 fail_2:
7719 for (i = 0; i < rx_done; i++) {
7720 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7721 wm_free_rx_buffer(sc, rxq);
7722 wm_free_rx_descs(sc, rxq);
7723 if (rxq->rxq_lock)
7724 mutex_obj_free(rxq->rxq_lock);
7725 }
7726 fail_1:
7727 for (i = 0; i < tx_done; i++) {
7728 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7729 pcq_destroy(txq->txq_interq);
7730 wm_free_tx_buffer(sc, txq);
7731 wm_free_tx_descs(sc, txq);
7732 if (txq->txq_lock)
7733 mutex_obj_free(txq->txq_lock);
7734 }
7735
7736 kmem_free(sc->sc_queue,
7737 sizeof(struct wm_queue) * sc->sc_nqueues);
7738 fail_0:
7739 return error;
7740 }
7741
7742 /*
7743 * wm_free_quques:
7744 * Free {tx,rx}descs and {tx,rx} buffers
7745 */
7746 static void
7747 wm_free_txrx_queues(struct wm_softc *sc)
7748 {
7749 int i;
7750
7751 for (i = 0; i < sc->sc_nqueues; i++) {
7752 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7753
7754 #ifdef WM_EVENT_COUNTERS
7755 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7756 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7757 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7758 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7759 #endif /* WM_EVENT_COUNTERS */
7760
7761 wm_free_rx_buffer(sc, rxq);
7762 wm_free_rx_descs(sc, rxq);
7763 if (rxq->rxq_lock)
7764 mutex_obj_free(rxq->rxq_lock);
7765 }
7766
7767 for (i = 0; i < sc->sc_nqueues; i++) {
7768 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7769 struct mbuf *m;
7770 #ifdef WM_EVENT_COUNTERS
7771 int j;
7772
7773 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7774 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7775 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7776 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7777 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7778 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7779 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7780 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7781 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7782 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7783 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7784
7785 for (j = 0; j < WM_NTXSEGS; j++)
7786 evcnt_detach(&txq->txq_ev_txseg[j]);
7787
7788 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7789 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7790 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7791 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7792 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7793 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7794 #endif /* WM_EVENT_COUNTERS */
7795
7796 /* Drain txq_interq */
7797 while ((m = pcq_get(txq->txq_interq)) != NULL)
7798 m_freem(m);
7799 pcq_destroy(txq->txq_interq);
7800
7801 wm_free_tx_buffer(sc, txq);
7802 wm_free_tx_descs(sc, txq);
7803 if (txq->txq_lock)
7804 mutex_obj_free(txq->txq_lock);
7805 }
7806
7807 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7808 }
7809
7810 static void
7811 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7812 {
7813
7814 KASSERT(mutex_owned(txq->txq_lock));
7815
7816 /* Initialize the transmit descriptor ring. */
7817 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7818 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7819 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7820 txq->txq_free = WM_NTXDESC(txq);
7821 txq->txq_next = 0;
7822 }
7823
7824 static void
7825 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7826 struct wm_txqueue *txq)
7827 {
7828
7829 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7830 device_xname(sc->sc_dev), __func__));
7831 KASSERT(mutex_owned(txq->txq_lock));
7832
7833 if (sc->sc_type < WM_T_82543) {
7834 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7835 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7836 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7837 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7838 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7839 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7840 } else {
7841 int qid = wmq->wmq_id;
7842
7843 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7844 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7845 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7846 CSR_WRITE(sc, WMREG_TDH(qid), 0);
7847
7848 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7849 /*
7850 * Don't write TDT before TCTL.EN is set.
7851 * See the document.
7852 */
7853 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7854 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7855 | TXDCTL_WTHRESH(0));
7856 else {
7857 /* XXX should update with AIM? */
7858 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7859 if (sc->sc_type >= WM_T_82540) {
7860 /* Should be the same */
7861 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7862 }
7863
7864 CSR_WRITE(sc, WMREG_TDT(qid), 0);
7865 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7866 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7867 }
7868 }
7869 }
7870
7871 static void
7872 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7873 {
7874 int i;
7875
7876 KASSERT(mutex_owned(txq->txq_lock));
7877
7878 /* Initialize the transmit job descriptors. */
7879 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7880 txq->txq_soft[i].txs_mbuf = NULL;
7881 txq->txq_sfree = WM_TXQUEUELEN(txq);
7882 txq->txq_snext = 0;
7883 txq->txq_sdirty = 0;
7884 }
7885
7886 static void
7887 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7888 struct wm_txqueue *txq)
7889 {
7890
7891 KASSERT(mutex_owned(txq->txq_lock));
7892
7893 /*
7894 * Set up some register offsets that are different between
7895 * the i82542 and the i82543 and later chips.
7896 */
7897 if (sc->sc_type < WM_T_82543)
7898 txq->txq_tdt_reg = WMREG_OLD_TDT;
7899 else
7900 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7901
7902 wm_init_tx_descs(sc, txq);
7903 wm_init_tx_regs(sc, wmq, txq);
7904 wm_init_tx_buffer(sc, txq);
7905
7906 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */
7907 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
7908
7909 txq->txq_sending = false;
7910 }
7911
7912 static void
7913 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7914 struct wm_rxqueue *rxq)
7915 {
7916
7917 KASSERT(mutex_owned(rxq->rxq_lock));
7918
7919 /*
7920 * Initialize the receive descriptor and receive job
7921 * descriptor rings.
7922 */
7923 if (sc->sc_type < WM_T_82543) {
7924 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7925 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7926 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7927 rxq->rxq_descsize * rxq->rxq_ndesc);
7928 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7929 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7930 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7931
7932 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7933 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7934 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7935 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7936 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7937 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7938 } else {
7939 int qid = wmq->wmq_id;
7940
7941 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7942 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7943 CSR_WRITE(sc, WMREG_RDLEN(qid),
7944 rxq->rxq_descsize * rxq->rxq_ndesc);
7945
7946 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7947 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7948 panic("%s: MCLBYTES %d unsupported for 82575 "
7949 "or higher\n", __func__, MCLBYTES);
7950
7951 /*
7952 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
7953 * only.
7954 */
7955 CSR_WRITE(sc, WMREG_SRRCTL(qid),
7956 SRRCTL_DESCTYPE_ADV_ONEBUF
7957 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7958 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7959 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7960 | RXDCTL_WTHRESH(1));
7961 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7962 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7963 } else {
7964 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7965 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7966 /* XXX should update with AIM? */
7967 CSR_WRITE(sc, WMREG_RDTR,
7968 (wmq->wmq_itr / 4) | RDTR_FPD);
7969 /* MUST be same */
7970 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7971 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7972 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7973 }
7974 }
7975 }
7976
7977 static int
7978 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7979 {
7980 struct wm_rxsoft *rxs;
7981 int error, i;
7982
7983 KASSERT(mutex_owned(rxq->rxq_lock));
7984
7985 for (i = 0; i < rxq->rxq_ndesc; i++) {
7986 rxs = &rxq->rxq_soft[i];
7987 if (rxs->rxs_mbuf == NULL) {
7988 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7989 log(LOG_ERR, "%s: unable to allocate or map "
7990 "rx buffer %d, error = %d\n",
7991 device_xname(sc->sc_dev), i, error);
7992 /*
7993 * XXX Should attempt to run with fewer receive
7994 * XXX buffers instead of just failing.
7995 */
7996 wm_rxdrain(rxq);
7997 return ENOMEM;
7998 }
7999 } else {
8000 /*
8001 * For 82575 and 82576, the RX descriptors must be
8002 * initialized after the setting of RCTL.EN in
8003 * wm_set_filter()
8004 */
8005 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8006 wm_init_rxdesc(rxq, i);
8007 }
8008 }
8009 rxq->rxq_ptr = 0;
8010 rxq->rxq_discard = 0;
8011 WM_RXCHAIN_RESET(rxq);
8012
8013 return 0;
8014 }
8015
8016 static int
8017 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8018 struct wm_rxqueue *rxq)
8019 {
8020
8021 KASSERT(mutex_owned(rxq->rxq_lock));
8022
8023 /*
8024 * Set up some register offsets that are different between
8025 * the i82542 and the i82543 and later chips.
8026 */
8027 if (sc->sc_type < WM_T_82543)
8028 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8029 else
8030 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8031
8032 wm_init_rx_regs(sc, wmq, rxq);
8033 return wm_init_rx_buffer(sc, rxq);
8034 }
8035
8036 /*
8037 * wm_init_quques:
8038 * Initialize {tx,rx}descs and {tx,rx} buffers
8039 */
8040 static int
8041 wm_init_txrx_queues(struct wm_softc *sc)
8042 {
8043 int i, error = 0;
8044
8045 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8046 device_xname(sc->sc_dev), __func__));
8047
8048 for (i = 0; i < sc->sc_nqueues; i++) {
8049 struct wm_queue *wmq = &sc->sc_queue[i];
8050 struct wm_txqueue *txq = &wmq->wmq_txq;
8051 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8052
8053 /*
8054 * TODO
8055 * Currently, use constant variable instead of AIM.
8056 * Furthermore, the interrupt interval of multiqueue which use
8057 * polling mode is less than default value.
8058 * More tuning and AIM are required.
8059 */
8060 if (wm_is_using_multiqueue(sc))
8061 wmq->wmq_itr = 50;
8062 else
8063 wmq->wmq_itr = sc->sc_itr_init;
8064 wmq->wmq_set_itr = true;
8065
8066 mutex_enter(txq->txq_lock);
8067 wm_init_tx_queue(sc, wmq, txq);
8068 mutex_exit(txq->txq_lock);
8069
8070 mutex_enter(rxq->rxq_lock);
8071 error = wm_init_rx_queue(sc, wmq, rxq);
8072 mutex_exit(rxq->rxq_lock);
8073 if (error)
8074 break;
8075 }
8076
8077 return error;
8078 }
8079
8080 /*
8081 * wm_tx_offload:
8082 *
8083 * Set up TCP/IP checksumming parameters for the
8084 * specified packet.
8085 */
8086 static void
8087 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8088 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8089 {
8090 struct mbuf *m0 = txs->txs_mbuf;
8091 struct livengood_tcpip_ctxdesc *t;
8092 uint32_t ipcs, tucs, cmd, cmdlen, seg;
8093 uint32_t ipcse;
8094 struct ether_header *eh;
8095 int offset, iphl;
8096 uint8_t fields;
8097
8098 /*
8099 * XXX It would be nice if the mbuf pkthdr had offset
8100 * fields for the protocol headers.
8101 */
8102
8103 eh = mtod(m0, struct ether_header *);
8104 switch (htons(eh->ether_type)) {
8105 case ETHERTYPE_IP:
8106 case ETHERTYPE_IPV6:
8107 offset = ETHER_HDR_LEN;
8108 break;
8109
8110 case ETHERTYPE_VLAN:
8111 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8112 break;
8113
8114 default:
8115 /* Don't support this protocol or encapsulation. */
8116 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8117 txq->txq_last_hw_ipcs = 0;
8118 txq->txq_last_hw_tucs = 0;
8119 *fieldsp = 0;
8120 *cmdp = 0;
8121 return;
8122 }
8123
8124 if ((m0->m_pkthdr.csum_flags &
8125 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8126 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8127 } else
8128 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8129
8130 ipcse = offset + iphl - 1;
8131
8132 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8133 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8134 seg = 0;
8135 fields = 0;
8136
8137 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8138 int hlen = offset + iphl;
8139 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8140
8141 if (__predict_false(m0->m_len <
8142 (hlen + sizeof(struct tcphdr)))) {
8143 /*
8144 * TCP/IP headers are not in the first mbuf; we need
8145 * to do this the slow and painful way. Let's just
8146 * hope this doesn't happen very often.
8147 */
8148 struct tcphdr th;
8149
8150 WM_Q_EVCNT_INCR(txq, tsopain);
8151
8152 m_copydata(m0, hlen, sizeof(th), &th);
8153 if (v4) {
8154 struct ip ip;
8155
8156 m_copydata(m0, offset, sizeof(ip), &ip);
8157 ip.ip_len = 0;
8158 m_copyback(m0,
8159 offset + offsetof(struct ip, ip_len),
8160 sizeof(ip.ip_len), &ip.ip_len);
8161 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8162 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8163 } else {
8164 struct ip6_hdr ip6;
8165
8166 m_copydata(m0, offset, sizeof(ip6), &ip6);
8167 ip6.ip6_plen = 0;
8168 m_copyback(m0,
8169 offset + offsetof(struct ip6_hdr, ip6_plen),
8170 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8171 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8172 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8173 }
8174 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8175 sizeof(th.th_sum), &th.th_sum);
8176
8177 hlen += th.th_off << 2;
8178 } else {
8179 /*
8180 * TCP/IP headers are in the first mbuf; we can do
8181 * this the easy way.
8182 */
8183 struct tcphdr *th;
8184
8185 if (v4) {
8186 struct ip *ip =
8187 (void *)(mtod(m0, char *) + offset);
8188 th = (void *)(mtod(m0, char *) + hlen);
8189
8190 ip->ip_len = 0;
8191 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8192 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8193 } else {
8194 struct ip6_hdr *ip6 =
8195 (void *)(mtod(m0, char *) + offset);
8196 th = (void *)(mtod(m0, char *) + hlen);
8197
8198 ip6->ip6_plen = 0;
8199 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8200 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8201 }
8202 hlen += th->th_off << 2;
8203 }
8204
8205 if (v4) {
8206 WM_Q_EVCNT_INCR(txq, tso);
8207 cmdlen |= WTX_TCPIP_CMD_IP;
8208 } else {
8209 WM_Q_EVCNT_INCR(txq, tso6);
8210 ipcse = 0;
8211 }
8212 cmd |= WTX_TCPIP_CMD_TSE;
8213 cmdlen |= WTX_TCPIP_CMD_TSE |
8214 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8215 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8216 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8217 }
8218
8219 /*
8220 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8221 * offload feature, if we load the context descriptor, we
8222 * MUST provide valid values for IPCSS and TUCSS fields.
8223 */
8224
8225 ipcs = WTX_TCPIP_IPCSS(offset) |
8226 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8227 WTX_TCPIP_IPCSE(ipcse);
8228 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8229 WM_Q_EVCNT_INCR(txq, ipsum);
8230 fields |= WTX_IXSM;
8231 }
8232
8233 offset += iphl;
8234
8235 if (m0->m_pkthdr.csum_flags &
8236 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8237 WM_Q_EVCNT_INCR(txq, tusum);
8238 fields |= WTX_TXSM;
8239 tucs = WTX_TCPIP_TUCSS(offset) |
8240 WTX_TCPIP_TUCSO(offset +
8241 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8242 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8243 } else if ((m0->m_pkthdr.csum_flags &
8244 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8245 WM_Q_EVCNT_INCR(txq, tusum6);
8246 fields |= WTX_TXSM;
8247 tucs = WTX_TCPIP_TUCSS(offset) |
8248 WTX_TCPIP_TUCSO(offset +
8249 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8250 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8251 } else {
8252 /* Just initialize it to a valid TCP context. */
8253 tucs = WTX_TCPIP_TUCSS(offset) |
8254 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8255 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8256 }
8257
8258 *cmdp = cmd;
8259 *fieldsp = fields;
8260
8261 /*
8262 * We don't have to write context descriptor for every packet
8263 * except for 82574. For 82574, we must write context descriptor
8264 * for every packet when we use two descriptor queues.
8265 *
8266 * The 82574L can only remember the *last* context used
8267 * regardless of queue that it was use for. We cannot reuse
8268 * contexts on this hardware platform and must generate a new
8269 * context every time. 82574L hardware spec, section 7.2.6,
8270 * second note.
8271 */
8272 if (sc->sc_nqueues < 2) {
8273 /*
8274 * Setting up new checksum offload context for every
8275 * frames takes a lot of processing time for hardware.
8276 * This also reduces performance a lot for small sized
8277 * frames so avoid it if driver can use previously
8278 * configured checksum offload context.
8279 * For TSO, in theory we can use the same TSO context only if
8280 * frame is the same type(IP/TCP) and the same MSS. However
8281 * checking whether a frame has the same IP/TCP structure is a
8282 * hard thing so just ignore that and always restablish a
8283 * new TSO context.
8284 */
8285 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8286 == 0) {
8287 if (txq->txq_last_hw_cmd == cmd &&
8288 txq->txq_last_hw_fields == fields &&
8289 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8290 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8291 WM_Q_EVCNT_INCR(txq, skipcontext);
8292 return;
8293 }
8294 }
8295
8296 txq->txq_last_hw_cmd = cmd;
8297 txq->txq_last_hw_fields = fields;
8298 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8299 txq->txq_last_hw_tucs = (tucs & 0xffff);
8300 }
8301
8302 /* Fill in the context descriptor. */
8303 t = (struct livengood_tcpip_ctxdesc *)
8304 &txq->txq_descs[txq->txq_next];
8305 t->tcpip_ipcs = htole32(ipcs);
8306 t->tcpip_tucs = htole32(tucs);
8307 t->tcpip_cmdlen = htole32(cmdlen);
8308 t->tcpip_seg = htole32(seg);
8309 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8310
8311 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8312 txs->txs_ndesc++;
8313 }
8314
8315 static inline int
8316 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8317 {
8318 struct wm_softc *sc = ifp->if_softc;
8319 u_int cpuid = cpu_index(curcpu());
8320
8321 /*
8322 * Currently, simple distribute strategy.
8323 * TODO:
8324 * distribute by flowid(RSS has value).
8325 */
8326 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8327 }
8328
8329 static inline bool
8330 wm_linkdown_discard(struct wm_txqueue *txq)
8331 {
8332
8333 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8334 return true;
8335
8336 return false;
8337 }
8338
8339 /*
8340 * wm_start: [ifnet interface function]
8341 *
8342 * Start packet transmission on the interface.
8343 */
8344 static void
8345 wm_start(struct ifnet *ifp)
8346 {
8347 struct wm_softc *sc = ifp->if_softc;
8348 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8349
8350 KASSERT(if_is_mpsafe(ifp));
8351 /*
8352 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8353 */
8354
8355 mutex_enter(txq->txq_lock);
8356 if (!txq->txq_stopping)
8357 wm_start_locked(ifp);
8358 mutex_exit(txq->txq_lock);
8359 }
8360
8361 static void
8362 wm_start_locked(struct ifnet *ifp)
8363 {
8364 struct wm_softc *sc = ifp->if_softc;
8365 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8366
8367 wm_send_common_locked(ifp, txq, false);
8368 }
8369
8370 static int
8371 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8372 {
8373 int qid;
8374 struct wm_softc *sc = ifp->if_softc;
8375 struct wm_txqueue *txq;
8376
8377 qid = wm_select_txqueue(ifp, m);
8378 txq = &sc->sc_queue[qid].wmq_txq;
8379
8380 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8381 m_freem(m);
8382 WM_Q_EVCNT_INCR(txq, pcqdrop);
8383 return ENOBUFS;
8384 }
8385
8386 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8387 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8388 if (m->m_flags & M_MCAST)
8389 if_statinc_ref(nsr, if_omcasts);
8390 IF_STAT_PUTREF(ifp);
8391
8392 if (mutex_tryenter(txq->txq_lock)) {
8393 if (!txq->txq_stopping)
8394 wm_transmit_locked(ifp, txq);
8395 mutex_exit(txq->txq_lock);
8396 }
8397
8398 return 0;
8399 }
8400
8401 static void
8402 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8403 {
8404
8405 wm_send_common_locked(ifp, txq, true);
8406 }
8407
8408 static void
8409 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8410 bool is_transmit)
8411 {
8412 struct wm_softc *sc = ifp->if_softc;
8413 struct mbuf *m0;
8414 struct wm_txsoft *txs;
8415 bus_dmamap_t dmamap;
8416 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8417 bus_addr_t curaddr;
8418 bus_size_t seglen, curlen;
8419 uint32_t cksumcmd;
8420 uint8_t cksumfields;
8421 bool remap = true;
8422
8423 KASSERT(mutex_owned(txq->txq_lock));
8424 KASSERT(!txq->txq_stopping);
8425
8426 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8427 return;
8428
8429 if (__predict_false(wm_linkdown_discard(txq))) {
8430 do {
8431 if (is_transmit)
8432 m0 = pcq_get(txq->txq_interq);
8433 else
8434 IFQ_DEQUEUE(&ifp->if_snd, m0);
8435 /*
8436 * increment successed packet counter as in the case
8437 * which the packet is discarded by link down PHY.
8438 */
8439 if (m0 != NULL) {
8440 if_statinc(ifp, if_opackets);
8441 m_freem(m0);
8442 }
8443 } while (m0 != NULL);
8444 return;
8445 }
8446
8447 /* Remember the previous number of free descriptors. */
8448 ofree = txq->txq_free;
8449
8450 /*
8451 * Loop through the send queue, setting up transmit descriptors
8452 * until we drain the queue, or use up all available transmit
8453 * descriptors.
8454 */
8455 for (;;) {
8456 m0 = NULL;
8457
8458 /* Get a work queue entry. */
8459 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8460 wm_txeof(txq, UINT_MAX);
8461 if (txq->txq_sfree == 0) {
8462 DPRINTF(sc, WM_DEBUG_TX,
8463 ("%s: TX: no free job descriptors\n",
8464 device_xname(sc->sc_dev)));
8465 WM_Q_EVCNT_INCR(txq, txsstall);
8466 break;
8467 }
8468 }
8469
8470 /* Grab a packet off the queue. */
8471 if (is_transmit)
8472 m0 = pcq_get(txq->txq_interq);
8473 else
8474 IFQ_DEQUEUE(&ifp->if_snd, m0);
8475 if (m0 == NULL)
8476 break;
8477
8478 DPRINTF(sc, WM_DEBUG_TX,
8479 ("%s: TX: have packet to transmit: %p\n",
8480 device_xname(sc->sc_dev), m0));
8481
8482 txs = &txq->txq_soft[txq->txq_snext];
8483 dmamap = txs->txs_dmamap;
8484
8485 use_tso = (m0->m_pkthdr.csum_flags &
8486 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8487
8488 /*
8489 * So says the Linux driver:
8490 * The controller does a simple calculation to make sure
8491 * there is enough room in the FIFO before initiating the
8492 * DMA for each buffer. The calc is:
8493 * 4 = ceil(buffer len / MSS)
8494 * To make sure we don't overrun the FIFO, adjust the max
8495 * buffer len if the MSS drops.
8496 */
8497 dmamap->dm_maxsegsz =
8498 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8499 ? m0->m_pkthdr.segsz << 2
8500 : WTX_MAX_LEN;
8501
8502 /*
8503 * Load the DMA map. If this fails, the packet either
8504 * didn't fit in the allotted number of segments, or we
8505 * were short on resources. For the too-many-segments
8506 * case, we simply report an error and drop the packet,
8507 * since we can't sanely copy a jumbo packet to a single
8508 * buffer.
8509 */
8510 retry:
8511 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8512 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8513 if (__predict_false(error)) {
8514 if (error == EFBIG) {
8515 if (remap == true) {
8516 struct mbuf *m;
8517
8518 remap = false;
8519 m = m_defrag(m0, M_NOWAIT);
8520 if (m != NULL) {
8521 WM_Q_EVCNT_INCR(txq, defrag);
8522 m0 = m;
8523 goto retry;
8524 }
8525 }
8526 WM_Q_EVCNT_INCR(txq, toomanyseg);
8527 log(LOG_ERR, "%s: Tx packet consumes too many "
8528 "DMA segments, dropping...\n",
8529 device_xname(sc->sc_dev));
8530 wm_dump_mbuf_chain(sc, m0);
8531 m_freem(m0);
8532 continue;
8533 }
8534 /* Short on resources, just stop for now. */
8535 DPRINTF(sc, WM_DEBUG_TX,
8536 ("%s: TX: dmamap load failed: %d\n",
8537 device_xname(sc->sc_dev), error));
8538 break;
8539 }
8540
8541 segs_needed = dmamap->dm_nsegs;
8542 if (use_tso) {
8543 /* For sentinel descriptor; see below. */
8544 segs_needed++;
8545 }
8546
8547 /*
8548 * Ensure we have enough descriptors free to describe
8549 * the packet. Note, we always reserve one descriptor
8550 * at the end of the ring due to the semantics of the
8551 * TDT register, plus one more in the event we need
8552 * to load offload context.
8553 */
8554 if (segs_needed > txq->txq_free - 2) {
8555 /*
8556 * Not enough free descriptors to transmit this
8557 * packet. We haven't committed anything yet,
8558 * so just unload the DMA map, put the packet
8559 * pack on the queue, and punt. Notify the upper
8560 * layer that there are no more slots left.
8561 */
8562 DPRINTF(sc, WM_DEBUG_TX,
8563 ("%s: TX: need %d (%d) descriptors, have %d\n",
8564 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8565 segs_needed, txq->txq_free - 1));
8566 txq->txq_flags |= WM_TXQ_NO_SPACE;
8567 bus_dmamap_unload(sc->sc_dmat, dmamap);
8568 WM_Q_EVCNT_INCR(txq, txdstall);
8569 break;
8570 }
8571
8572 /*
8573 * Check for 82547 Tx FIFO bug. We need to do this
8574 * once we know we can transmit the packet, since we
8575 * do some internal FIFO space accounting here.
8576 */
8577 if (sc->sc_type == WM_T_82547 &&
8578 wm_82547_txfifo_bugchk(sc, m0)) {
8579 DPRINTF(sc, WM_DEBUG_TX,
8580 ("%s: TX: 82547 Tx FIFO bug detected\n",
8581 device_xname(sc->sc_dev)));
8582 txq->txq_flags |= WM_TXQ_NO_SPACE;
8583 bus_dmamap_unload(sc->sc_dmat, dmamap);
8584 WM_Q_EVCNT_INCR(txq, fifo_stall);
8585 break;
8586 }
8587
8588 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8589
8590 DPRINTF(sc, WM_DEBUG_TX,
8591 ("%s: TX: packet has %d (%d) DMA segments\n",
8592 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8593
8594 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8595
8596 /*
8597 * Store a pointer to the packet so that we can free it
8598 * later.
8599 *
8600 * Initially, we consider the number of descriptors the
8601 * packet uses the number of DMA segments. This may be
8602 * incremented by 1 if we do checksum offload (a descriptor
8603 * is used to set the checksum context).
8604 */
8605 txs->txs_mbuf = m0;
8606 txs->txs_firstdesc = txq->txq_next;
8607 txs->txs_ndesc = segs_needed;
8608
8609 /* Set up offload parameters for this packet. */
8610 if (m0->m_pkthdr.csum_flags &
8611 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8612 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8613 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8614 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8615 } else {
8616 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8617 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8618 cksumcmd = 0;
8619 cksumfields = 0;
8620 }
8621
8622 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8623
8624 /* Sync the DMA map. */
8625 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8626 BUS_DMASYNC_PREWRITE);
8627
8628 /* Initialize the transmit descriptor. */
8629 for (nexttx = txq->txq_next, seg = 0;
8630 seg < dmamap->dm_nsegs; seg++) {
8631 for (seglen = dmamap->dm_segs[seg].ds_len,
8632 curaddr = dmamap->dm_segs[seg].ds_addr;
8633 seglen != 0;
8634 curaddr += curlen, seglen -= curlen,
8635 nexttx = WM_NEXTTX(txq, nexttx)) {
8636 curlen = seglen;
8637
8638 /*
8639 * So says the Linux driver:
8640 * Work around for premature descriptor
8641 * write-backs in TSO mode. Append a
8642 * 4-byte sentinel descriptor.
8643 */
8644 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8645 curlen > 8)
8646 curlen -= 4;
8647
8648 wm_set_dma_addr(
8649 &txq->txq_descs[nexttx].wtx_addr, curaddr);
8650 txq->txq_descs[nexttx].wtx_cmdlen
8651 = htole32(cksumcmd | curlen);
8652 txq->txq_descs[nexttx].wtx_fields.wtxu_status
8653 = 0;
8654 txq->txq_descs[nexttx].wtx_fields.wtxu_options
8655 = cksumfields;
8656 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8657 lasttx = nexttx;
8658
8659 DPRINTF(sc, WM_DEBUG_TX,
8660 ("%s: TX: desc %d: low %#" PRIx64 ", "
8661 "len %#04zx\n",
8662 device_xname(sc->sc_dev), nexttx,
8663 (uint64_t)curaddr, curlen));
8664 }
8665 }
8666
8667 KASSERT(lasttx != -1);
8668
8669 /*
8670 * Set up the command byte on the last descriptor of
8671 * the packet. If we're in the interrupt delay window,
8672 * delay the interrupt.
8673 */
8674 txq->txq_descs[lasttx].wtx_cmdlen |=
8675 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8676
8677 /*
8678 * If VLANs are enabled and the packet has a VLAN tag, set
8679 * up the descriptor to encapsulate the packet for us.
8680 *
8681 * This is only valid on the last descriptor of the packet.
8682 */
8683 if (vlan_has_tag(m0)) {
8684 txq->txq_descs[lasttx].wtx_cmdlen |=
8685 htole32(WTX_CMD_VLE);
8686 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8687 = htole16(vlan_get_tag(m0));
8688 }
8689
8690 txs->txs_lastdesc = lasttx;
8691
8692 DPRINTF(sc, WM_DEBUG_TX,
8693 ("%s: TX: desc %d: cmdlen 0x%08x\n",
8694 device_xname(sc->sc_dev),
8695 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8696
8697 /* Sync the descriptors we're using. */
8698 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8699 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8700
8701 /* Give the packet to the chip. */
8702 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8703
8704 DPRINTF(sc, WM_DEBUG_TX,
8705 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8706
8707 DPRINTF(sc, WM_DEBUG_TX,
8708 ("%s: TX: finished transmitting packet, job %d\n",
8709 device_xname(sc->sc_dev), txq->txq_snext));
8710
8711 /* Advance the tx pointer. */
8712 txq->txq_free -= txs->txs_ndesc;
8713 txq->txq_next = nexttx;
8714
8715 txq->txq_sfree--;
8716 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8717
8718 /* Pass the packet to any BPF listeners. */
8719 bpf_mtap(ifp, m0, BPF_D_OUT);
8720 }
8721
8722 if (m0 != NULL) {
8723 txq->txq_flags |= WM_TXQ_NO_SPACE;
8724 WM_Q_EVCNT_INCR(txq, descdrop);
8725 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8726 __func__));
8727 m_freem(m0);
8728 }
8729
8730 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8731 /* No more slots; notify upper layer. */
8732 txq->txq_flags |= WM_TXQ_NO_SPACE;
8733 }
8734
8735 if (txq->txq_free != ofree) {
8736 /* Set a watchdog timer in case the chip flakes out. */
8737 txq->txq_lastsent = time_uptime;
8738 txq->txq_sending = true;
8739 }
8740 }
8741
8742 /*
8743 * wm_nq_tx_offload:
8744 *
8745 * Set up TCP/IP checksumming parameters for the
8746 * specified packet, for NEWQUEUE devices
8747 */
8748 static void
8749 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8750 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8751 {
8752 struct mbuf *m0 = txs->txs_mbuf;
8753 uint32_t vl_len, mssidx, cmdc;
8754 struct ether_header *eh;
8755 int offset, iphl;
8756
8757 /*
8758 * XXX It would be nice if the mbuf pkthdr had offset
8759 * fields for the protocol headers.
8760 */
8761 *cmdlenp = 0;
8762 *fieldsp = 0;
8763
8764 eh = mtod(m0, struct ether_header *);
8765 switch (htons(eh->ether_type)) {
8766 case ETHERTYPE_IP:
8767 case ETHERTYPE_IPV6:
8768 offset = ETHER_HDR_LEN;
8769 break;
8770
8771 case ETHERTYPE_VLAN:
8772 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8773 break;
8774
8775 default:
8776 /* Don't support this protocol or encapsulation. */
8777 *do_csum = false;
8778 return;
8779 }
8780 *do_csum = true;
8781 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8782 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8783
8784 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8785 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8786
8787 if ((m0->m_pkthdr.csum_flags &
8788 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8789 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8790 } else {
8791 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8792 }
8793 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8794 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8795
8796 if (vlan_has_tag(m0)) {
8797 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8798 << NQTXC_VLLEN_VLAN_SHIFT);
8799 *cmdlenp |= NQTX_CMD_VLE;
8800 }
8801
8802 mssidx = 0;
8803
8804 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8805 int hlen = offset + iphl;
8806 int tcp_hlen;
8807 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8808
8809 if (__predict_false(m0->m_len <
8810 (hlen + sizeof(struct tcphdr)))) {
8811 /*
8812 * TCP/IP headers are not in the first mbuf; we need
8813 * to do this the slow and painful way. Let's just
8814 * hope this doesn't happen very often.
8815 */
8816 struct tcphdr th;
8817
8818 WM_Q_EVCNT_INCR(txq, tsopain);
8819
8820 m_copydata(m0, hlen, sizeof(th), &th);
8821 if (v4) {
8822 struct ip ip;
8823
8824 m_copydata(m0, offset, sizeof(ip), &ip);
8825 ip.ip_len = 0;
8826 m_copyback(m0,
8827 offset + offsetof(struct ip, ip_len),
8828 sizeof(ip.ip_len), &ip.ip_len);
8829 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8830 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8831 } else {
8832 struct ip6_hdr ip6;
8833
8834 m_copydata(m0, offset, sizeof(ip6), &ip6);
8835 ip6.ip6_plen = 0;
8836 m_copyback(m0,
8837 offset + offsetof(struct ip6_hdr, ip6_plen),
8838 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8839 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8840 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8841 }
8842 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8843 sizeof(th.th_sum), &th.th_sum);
8844
8845 tcp_hlen = th.th_off << 2;
8846 } else {
8847 /*
8848 * TCP/IP headers are in the first mbuf; we can do
8849 * this the easy way.
8850 */
8851 struct tcphdr *th;
8852
8853 if (v4) {
8854 struct ip *ip =
8855 (void *)(mtod(m0, char *) + offset);
8856 th = (void *)(mtod(m0, char *) + hlen);
8857
8858 ip->ip_len = 0;
8859 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8860 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8861 } else {
8862 struct ip6_hdr *ip6 =
8863 (void *)(mtod(m0, char *) + offset);
8864 th = (void *)(mtod(m0, char *) + hlen);
8865
8866 ip6->ip6_plen = 0;
8867 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8868 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8869 }
8870 tcp_hlen = th->th_off << 2;
8871 }
8872 hlen += tcp_hlen;
8873 *cmdlenp |= NQTX_CMD_TSE;
8874
8875 if (v4) {
8876 WM_Q_EVCNT_INCR(txq, tso);
8877 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8878 } else {
8879 WM_Q_EVCNT_INCR(txq, tso6);
8880 *fieldsp |= NQTXD_FIELDS_TUXSM;
8881 }
8882 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8883 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8884 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8885 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8886 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8887 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8888 } else {
8889 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8890 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8891 }
8892
8893 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8894 *fieldsp |= NQTXD_FIELDS_IXSM;
8895 cmdc |= NQTXC_CMD_IP4;
8896 }
8897
8898 if (m0->m_pkthdr.csum_flags &
8899 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8900 WM_Q_EVCNT_INCR(txq, tusum);
8901 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8902 cmdc |= NQTXC_CMD_TCP;
8903 else
8904 cmdc |= NQTXC_CMD_UDP;
8905
8906 cmdc |= NQTXC_CMD_IP4;
8907 *fieldsp |= NQTXD_FIELDS_TUXSM;
8908 }
8909 if (m0->m_pkthdr.csum_flags &
8910 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8911 WM_Q_EVCNT_INCR(txq, tusum6);
8912 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8913 cmdc |= NQTXC_CMD_TCP;
8914 else
8915 cmdc |= NQTXC_CMD_UDP;
8916
8917 cmdc |= NQTXC_CMD_IP6;
8918 *fieldsp |= NQTXD_FIELDS_TUXSM;
8919 }
8920
8921 /*
8922 * We don't have to write context descriptor for every packet to
8923 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8924 * I210 and I211. It is enough to write once per a Tx queue for these
8925 * controllers.
8926 * It would be overhead to write context descriptor for every packet,
8927 * however it does not cause problems.
8928 */
8929 /* Fill in the context descriptor. */
8930 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
8931 htole32(vl_len);
8932 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
8933 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
8934 htole32(cmdc);
8935 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
8936 htole32(mssidx);
8937 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8938 DPRINTF(sc, WM_DEBUG_TX,
8939 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8940 txq->txq_next, 0, vl_len));
8941 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8942 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8943 txs->txs_ndesc++;
8944 }
8945
8946 /*
8947 * wm_nq_start: [ifnet interface function]
8948 *
8949 * Start packet transmission on the interface for NEWQUEUE devices
8950 */
8951 static void
8952 wm_nq_start(struct ifnet *ifp)
8953 {
8954 struct wm_softc *sc = ifp->if_softc;
8955 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8956
8957 KASSERT(if_is_mpsafe(ifp));
8958 /*
8959 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8960 */
8961
8962 mutex_enter(txq->txq_lock);
8963 if (!txq->txq_stopping)
8964 wm_nq_start_locked(ifp);
8965 mutex_exit(txq->txq_lock);
8966 }
8967
8968 static void
8969 wm_nq_start_locked(struct ifnet *ifp)
8970 {
8971 struct wm_softc *sc = ifp->if_softc;
8972 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8973
8974 wm_nq_send_common_locked(ifp, txq, false);
8975 }
8976
8977 static int
8978 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8979 {
8980 int qid;
8981 struct wm_softc *sc = ifp->if_softc;
8982 struct wm_txqueue *txq;
8983
8984 qid = wm_select_txqueue(ifp, m);
8985 txq = &sc->sc_queue[qid].wmq_txq;
8986
8987 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8988 m_freem(m);
8989 WM_Q_EVCNT_INCR(txq, pcqdrop);
8990 return ENOBUFS;
8991 }
8992
8993 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8994 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8995 if (m->m_flags & M_MCAST)
8996 if_statinc_ref(nsr, if_omcasts);
8997 IF_STAT_PUTREF(ifp);
8998
8999 /*
9000 * The situations which this mutex_tryenter() fails at running time
9001 * are below two patterns.
9002 * (1) contention with interrupt handler(wm_txrxintr_msix())
9003 * (2) contention with deferred if_start softint(wm_handle_queue())
9004 * In the case of (1), the last packet enqueued to txq->txq_interq is
9005 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9006 * In the case of (2), the last packet enqueued to txq->txq_interq is
9007 * also dequeued by wm_deferred_start_locked(). So, it does not get
9008 * stuck, either.
9009 */
9010 if (mutex_tryenter(txq->txq_lock)) {
9011 if (!txq->txq_stopping)
9012 wm_nq_transmit_locked(ifp, txq);
9013 mutex_exit(txq->txq_lock);
9014 }
9015
9016 return 0;
9017 }
9018
9019 static void
9020 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9021 {
9022
9023 wm_nq_send_common_locked(ifp, txq, true);
9024 }
9025
9026 static void
9027 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9028 bool is_transmit)
9029 {
9030 struct wm_softc *sc = ifp->if_softc;
9031 struct mbuf *m0;
9032 struct wm_txsoft *txs;
9033 bus_dmamap_t dmamap;
9034 int error, nexttx, lasttx = -1, seg, segs_needed;
9035 bool do_csum, sent;
9036 bool remap = true;
9037
9038 KASSERT(mutex_owned(txq->txq_lock));
9039 KASSERT(!txq->txq_stopping);
9040
9041 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9042 return;
9043
9044 if (__predict_false(wm_linkdown_discard(txq))) {
9045 do {
9046 if (is_transmit)
9047 m0 = pcq_get(txq->txq_interq);
9048 else
9049 IFQ_DEQUEUE(&ifp->if_snd, m0);
9050 /*
9051 * increment successed packet counter as in the case
9052 * which the packet is discarded by link down PHY.
9053 */
9054 if (m0 != NULL) {
9055 if_statinc(ifp, if_opackets);
9056 m_freem(m0);
9057 }
9058 } while (m0 != NULL);
9059 return;
9060 }
9061
9062 sent = false;
9063
9064 /*
9065 * Loop through the send queue, setting up transmit descriptors
9066 * until we drain the queue, or use up all available transmit
9067 * descriptors.
9068 */
9069 for (;;) {
9070 m0 = NULL;
9071
9072 /* Get a work queue entry. */
9073 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9074 wm_txeof(txq, UINT_MAX);
9075 if (txq->txq_sfree == 0) {
9076 DPRINTF(sc, WM_DEBUG_TX,
9077 ("%s: TX: no free job descriptors\n",
9078 device_xname(sc->sc_dev)));
9079 WM_Q_EVCNT_INCR(txq, txsstall);
9080 break;
9081 }
9082 }
9083
9084 /* Grab a packet off the queue. */
9085 if (is_transmit)
9086 m0 = pcq_get(txq->txq_interq);
9087 else
9088 IFQ_DEQUEUE(&ifp->if_snd, m0);
9089 if (m0 == NULL)
9090 break;
9091
9092 DPRINTF(sc, WM_DEBUG_TX,
9093 ("%s: TX: have packet to transmit: %p\n",
9094 device_xname(sc->sc_dev), m0));
9095
9096 txs = &txq->txq_soft[txq->txq_snext];
9097 dmamap = txs->txs_dmamap;
9098
9099 /*
9100 * Load the DMA map. If this fails, the packet either
9101 * didn't fit in the allotted number of segments, or we
9102 * were short on resources. For the too-many-segments
9103 * case, we simply report an error and drop the packet,
9104 * since we can't sanely copy a jumbo packet to a single
9105 * buffer.
9106 */
9107 retry:
9108 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9109 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9110 if (__predict_false(error)) {
9111 if (error == EFBIG) {
9112 if (remap == true) {
9113 struct mbuf *m;
9114
9115 remap = false;
9116 m = m_defrag(m0, M_NOWAIT);
9117 if (m != NULL) {
9118 WM_Q_EVCNT_INCR(txq, defrag);
9119 m0 = m;
9120 goto retry;
9121 }
9122 }
9123 WM_Q_EVCNT_INCR(txq, toomanyseg);
9124 log(LOG_ERR, "%s: Tx packet consumes too many "
9125 "DMA segments, dropping...\n",
9126 device_xname(sc->sc_dev));
9127 wm_dump_mbuf_chain(sc, m0);
9128 m_freem(m0);
9129 continue;
9130 }
9131 /* Short on resources, just stop for now. */
9132 DPRINTF(sc, WM_DEBUG_TX,
9133 ("%s: TX: dmamap load failed: %d\n",
9134 device_xname(sc->sc_dev), error));
9135 break;
9136 }
9137
9138 segs_needed = dmamap->dm_nsegs;
9139
9140 /*
9141 * Ensure we have enough descriptors free to describe
9142 * the packet. Note, we always reserve one descriptor
9143 * at the end of the ring due to the semantics of the
9144 * TDT register, plus one more in the event we need
9145 * to load offload context.
9146 */
9147 if (segs_needed > txq->txq_free - 2) {
9148 /*
9149 * Not enough free descriptors to transmit this
9150 * packet. We haven't committed anything yet,
9151 * so just unload the DMA map, put the packet
9152 * pack on the queue, and punt. Notify the upper
9153 * layer that there are no more slots left.
9154 */
9155 DPRINTF(sc, WM_DEBUG_TX,
9156 ("%s: TX: need %d (%d) descriptors, have %d\n",
9157 device_xname(sc->sc_dev), dmamap->dm_nsegs,
9158 segs_needed, txq->txq_free - 1));
9159 txq->txq_flags |= WM_TXQ_NO_SPACE;
9160 bus_dmamap_unload(sc->sc_dmat, dmamap);
9161 WM_Q_EVCNT_INCR(txq, txdstall);
9162 break;
9163 }
9164
9165 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9166
9167 DPRINTF(sc, WM_DEBUG_TX,
9168 ("%s: TX: packet has %d (%d) DMA segments\n",
9169 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9170
9171 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9172
9173 /*
9174 * Store a pointer to the packet so that we can free it
9175 * later.
9176 *
9177 * Initially, we consider the number of descriptors the
9178 * packet uses the number of DMA segments. This may be
9179 * incremented by 1 if we do checksum offload (a descriptor
9180 * is used to set the checksum context).
9181 */
9182 txs->txs_mbuf = m0;
9183 txs->txs_firstdesc = txq->txq_next;
9184 txs->txs_ndesc = segs_needed;
9185
9186 /* Set up offload parameters for this packet. */
9187 uint32_t cmdlen, fields, dcmdlen;
9188 if (m0->m_pkthdr.csum_flags &
9189 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9190 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9191 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9192 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9193 &do_csum);
9194 } else {
9195 do_csum = false;
9196 cmdlen = 0;
9197 fields = 0;
9198 }
9199
9200 /* Sync the DMA map. */
9201 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9202 BUS_DMASYNC_PREWRITE);
9203
9204 /* Initialize the first transmit descriptor. */
9205 nexttx = txq->txq_next;
9206 if (!do_csum) {
9207 /* Set up a legacy descriptor */
9208 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9209 dmamap->dm_segs[0].ds_addr);
9210 txq->txq_descs[nexttx].wtx_cmdlen =
9211 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9212 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9213 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9214 if (vlan_has_tag(m0)) {
9215 txq->txq_descs[nexttx].wtx_cmdlen |=
9216 htole32(WTX_CMD_VLE);
9217 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9218 htole16(vlan_get_tag(m0));
9219 } else
9220 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9221
9222 dcmdlen = 0;
9223 } else {
9224 /* Set up an advanced data descriptor */
9225 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9226 htole64(dmamap->dm_segs[0].ds_addr);
9227 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9228 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9229 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9230 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9231 htole32(fields);
9232 DPRINTF(sc, WM_DEBUG_TX,
9233 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9234 device_xname(sc->sc_dev), nexttx,
9235 (uint64_t)dmamap->dm_segs[0].ds_addr));
9236 DPRINTF(sc, WM_DEBUG_TX,
9237 ("\t 0x%08x%08x\n", fields,
9238 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9239 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9240 }
9241
9242 lasttx = nexttx;
9243 nexttx = WM_NEXTTX(txq, nexttx);
9244 /*
9245 * Fill in the next descriptors. Legacy or advanced format
9246 * is the same here.
9247 */
9248 for (seg = 1; seg < dmamap->dm_nsegs;
9249 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9250 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9251 htole64(dmamap->dm_segs[seg].ds_addr);
9252 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9253 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9254 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9255 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9256 lasttx = nexttx;
9257
9258 DPRINTF(sc, WM_DEBUG_TX,
9259 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9260 device_xname(sc->sc_dev), nexttx,
9261 (uint64_t)dmamap->dm_segs[seg].ds_addr,
9262 dmamap->dm_segs[seg].ds_len));
9263 }
9264
9265 KASSERT(lasttx != -1);
9266
9267 /*
9268 * Set up the command byte on the last descriptor of
9269 * the packet. If we're in the interrupt delay window,
9270 * delay the interrupt.
9271 */
9272 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9273 (NQTX_CMD_EOP | NQTX_CMD_RS));
9274 txq->txq_descs[lasttx].wtx_cmdlen |=
9275 htole32(WTX_CMD_EOP | WTX_CMD_RS);
9276
9277 txs->txs_lastdesc = lasttx;
9278
9279 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9280 device_xname(sc->sc_dev),
9281 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9282
9283 /* Sync the descriptors we're using. */
9284 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9285 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9286
9287 /* Give the packet to the chip. */
9288 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9289 sent = true;
9290
9291 DPRINTF(sc, WM_DEBUG_TX,
9292 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9293
9294 DPRINTF(sc, WM_DEBUG_TX,
9295 ("%s: TX: finished transmitting packet, job %d\n",
9296 device_xname(sc->sc_dev), txq->txq_snext));
9297
9298 /* Advance the tx pointer. */
9299 txq->txq_free -= txs->txs_ndesc;
9300 txq->txq_next = nexttx;
9301
9302 txq->txq_sfree--;
9303 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9304
9305 /* Pass the packet to any BPF listeners. */
9306 bpf_mtap(ifp, m0, BPF_D_OUT);
9307 }
9308
9309 if (m0 != NULL) {
9310 txq->txq_flags |= WM_TXQ_NO_SPACE;
9311 WM_Q_EVCNT_INCR(txq, descdrop);
9312 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9313 __func__));
9314 m_freem(m0);
9315 }
9316
9317 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9318 /* No more slots; notify upper layer. */
9319 txq->txq_flags |= WM_TXQ_NO_SPACE;
9320 }
9321
9322 if (sent) {
9323 /* Set a watchdog timer in case the chip flakes out. */
9324 txq->txq_lastsent = time_uptime;
9325 txq->txq_sending = true;
9326 }
9327 }
9328
9329 static void
9330 wm_deferred_start_locked(struct wm_txqueue *txq)
9331 {
9332 struct wm_softc *sc = txq->txq_sc;
9333 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9334 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9335 int qid = wmq->wmq_id;
9336
9337 KASSERT(mutex_owned(txq->txq_lock));
9338 KASSERT(!txq->txq_stopping);
9339
9340 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9341 /* XXX need for ALTQ or one CPU system */
9342 if (qid == 0)
9343 wm_nq_start_locked(ifp);
9344 wm_nq_transmit_locked(ifp, txq);
9345 } else {
9346 /* XXX need for ALTQ or one CPU system */
9347 if (qid == 0)
9348 wm_start_locked(ifp);
9349 wm_transmit_locked(ifp, txq);
9350 }
9351 }
9352
9353 /* Interrupt */
9354
9355 /*
9356 * wm_txeof:
9357 *
9358 * Helper; handle transmit interrupts.
9359 */
9360 static bool
9361 wm_txeof(struct wm_txqueue *txq, u_int limit)
9362 {
9363 struct wm_softc *sc = txq->txq_sc;
9364 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9365 struct wm_txsoft *txs;
9366 int count = 0;
9367 int i;
9368 uint8_t status;
9369 bool more = false;
9370
9371 KASSERT(mutex_owned(txq->txq_lock));
9372
9373 if (txq->txq_stopping)
9374 return false;
9375
9376 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9377
9378 /*
9379 * Go through the Tx list and free mbufs for those
9380 * frames which have been transmitted.
9381 */
9382 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9383 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9384 txs = &txq->txq_soft[i];
9385
9386 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9387 device_xname(sc->sc_dev), i));
9388
9389 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9390 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9391
9392 status =
9393 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9394 if ((status & WTX_ST_DD) == 0) {
9395 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9396 BUS_DMASYNC_PREREAD);
9397 break;
9398 }
9399
9400 if (limit-- == 0) {
9401 more = true;
9402 DPRINTF(sc, WM_DEBUG_TX,
9403 ("%s: TX: loop limited, job %d is not processed\n",
9404 device_xname(sc->sc_dev), i));
9405 break;
9406 }
9407
9408 count++;
9409 DPRINTF(sc, WM_DEBUG_TX,
9410 ("%s: TX: job %d done: descs %d..%d\n",
9411 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9412 txs->txs_lastdesc));
9413
9414 /*
9415 * XXX We should probably be using the statistics
9416 * XXX registers, but I don't know if they exist
9417 * XXX on chips before the i82544.
9418 */
9419
9420 #ifdef WM_EVENT_COUNTERS
9421 if (status & WTX_ST_TU)
9422 WM_Q_EVCNT_INCR(txq, underrun);
9423 #endif /* WM_EVENT_COUNTERS */
9424
9425 /*
9426 * 82574 and newer's document says the status field has neither
9427 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9428 * (reserved). Refer "PCIe GbE Controller Open Source Software
9429 * Developer's Manual", 82574 datasheet and newer.
9430 *
9431 * XXX I saw the LC bit was set on I218 even though the media
9432 * was full duplex, so the bit might be used for other
9433 * meaning ...(I have no document).
9434 */
9435
9436 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9437 && ((sc->sc_type < WM_T_82574)
9438 || (sc->sc_type == WM_T_80003))) {
9439 if_statinc(ifp, if_oerrors);
9440 if (status & WTX_ST_LC)
9441 log(LOG_WARNING, "%s: late collision\n",
9442 device_xname(sc->sc_dev));
9443 else if (status & WTX_ST_EC) {
9444 if_statadd(ifp, if_collisions,
9445 TX_COLLISION_THRESHOLD + 1);
9446 log(LOG_WARNING, "%s: excessive collisions\n",
9447 device_xname(sc->sc_dev));
9448 }
9449 } else
9450 if_statinc(ifp, if_opackets);
9451
9452 txq->txq_packets++;
9453 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9454
9455 txq->txq_free += txs->txs_ndesc;
9456 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9457 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9458 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9459 m_freem(txs->txs_mbuf);
9460 txs->txs_mbuf = NULL;
9461 }
9462
9463 /* Update the dirty transmit buffer pointer. */
9464 txq->txq_sdirty = i;
9465 DPRINTF(sc, WM_DEBUG_TX,
9466 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9467
9468 if (count != 0)
9469 rnd_add_uint32(&sc->rnd_source, count);
9470
9471 /*
9472 * If there are no more pending transmissions, cancel the watchdog
9473 * timer.
9474 */
9475 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9476 txq->txq_sending = false;
9477
9478 return more;
9479 }
9480
9481 static inline uint32_t
9482 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9483 {
9484 struct wm_softc *sc = rxq->rxq_sc;
9485
9486 if (sc->sc_type == WM_T_82574)
9487 return EXTRXC_STATUS(
9488 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9489 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9490 return NQRXC_STATUS(
9491 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9492 else
9493 return rxq->rxq_descs[idx].wrx_status;
9494 }
9495
9496 static inline uint32_t
9497 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9498 {
9499 struct wm_softc *sc = rxq->rxq_sc;
9500
9501 if (sc->sc_type == WM_T_82574)
9502 return EXTRXC_ERROR(
9503 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9504 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9505 return NQRXC_ERROR(
9506 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9507 else
9508 return rxq->rxq_descs[idx].wrx_errors;
9509 }
9510
9511 static inline uint16_t
9512 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9513 {
9514 struct wm_softc *sc = rxq->rxq_sc;
9515
9516 if (sc->sc_type == WM_T_82574)
9517 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9518 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9519 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9520 else
9521 return rxq->rxq_descs[idx].wrx_special;
9522 }
9523
9524 static inline int
9525 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9526 {
9527 struct wm_softc *sc = rxq->rxq_sc;
9528
9529 if (sc->sc_type == WM_T_82574)
9530 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9531 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9532 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9533 else
9534 return rxq->rxq_descs[idx].wrx_len;
9535 }
9536
9537 #ifdef WM_DEBUG
9538 static inline uint32_t
9539 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9540 {
9541 struct wm_softc *sc = rxq->rxq_sc;
9542
9543 if (sc->sc_type == WM_T_82574)
9544 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9545 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9546 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9547 else
9548 return 0;
9549 }
9550
9551 static inline uint8_t
9552 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9553 {
9554 struct wm_softc *sc = rxq->rxq_sc;
9555
9556 if (sc->sc_type == WM_T_82574)
9557 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9558 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9559 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9560 else
9561 return 0;
9562 }
9563 #endif /* WM_DEBUG */
9564
9565 static inline bool
9566 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9567 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9568 {
9569
9570 if (sc->sc_type == WM_T_82574)
9571 return (status & ext_bit) != 0;
9572 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9573 return (status & nq_bit) != 0;
9574 else
9575 return (status & legacy_bit) != 0;
9576 }
9577
9578 static inline bool
9579 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9580 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9581 {
9582
9583 if (sc->sc_type == WM_T_82574)
9584 return (error & ext_bit) != 0;
9585 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9586 return (error & nq_bit) != 0;
9587 else
9588 return (error & legacy_bit) != 0;
9589 }
9590
9591 static inline bool
9592 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9593 {
9594
9595 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9596 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9597 return true;
9598 else
9599 return false;
9600 }
9601
9602 static inline bool
9603 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9604 {
9605 struct wm_softc *sc = rxq->rxq_sc;
9606
9607 /* XXX missing error bit for newqueue? */
9608 if (wm_rxdesc_is_set_error(sc, errors,
9609 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9610 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9611 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9612 NQRXC_ERROR_RXE)) {
9613 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9614 EXTRXC_ERROR_SE, 0))
9615 log(LOG_WARNING, "%s: symbol error\n",
9616 device_xname(sc->sc_dev));
9617 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9618 EXTRXC_ERROR_SEQ, 0))
9619 log(LOG_WARNING, "%s: receive sequence error\n",
9620 device_xname(sc->sc_dev));
9621 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9622 EXTRXC_ERROR_CE, 0))
9623 log(LOG_WARNING, "%s: CRC error\n",
9624 device_xname(sc->sc_dev));
9625 return true;
9626 }
9627
9628 return false;
9629 }
9630
9631 static inline bool
9632 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9633 {
9634 struct wm_softc *sc = rxq->rxq_sc;
9635
9636 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9637 NQRXC_STATUS_DD)) {
9638 /* We have processed all of the receive descriptors. */
9639 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9640 return false;
9641 }
9642
9643 return true;
9644 }
9645
9646 static inline bool
9647 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9648 uint16_t vlantag, struct mbuf *m)
9649 {
9650
9651 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9652 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9653 vlan_set_tag(m, le16toh(vlantag));
9654 }
9655
9656 return true;
9657 }
9658
9659 static inline void
9660 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9661 uint32_t errors, struct mbuf *m)
9662 {
9663 struct wm_softc *sc = rxq->rxq_sc;
9664
9665 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9666 if (wm_rxdesc_is_set_status(sc, status,
9667 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9668 WM_Q_EVCNT_INCR(rxq, ipsum);
9669 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9670 if (wm_rxdesc_is_set_error(sc, errors,
9671 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9672 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9673 }
9674 if (wm_rxdesc_is_set_status(sc, status,
9675 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9676 /*
9677 * Note: we don't know if this was TCP or UDP,
9678 * so we just set both bits, and expect the
9679 * upper layers to deal.
9680 */
9681 WM_Q_EVCNT_INCR(rxq, tusum);
9682 m->m_pkthdr.csum_flags |=
9683 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9684 M_CSUM_TCPv6 | M_CSUM_UDPv6;
9685 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9686 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9687 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9688 }
9689 }
9690 }
9691
9692 /*
9693 * wm_rxeof:
9694 *
9695 * Helper; handle receive interrupts.
9696 */
9697 static bool
9698 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9699 {
9700 struct wm_softc *sc = rxq->rxq_sc;
9701 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9702 struct wm_rxsoft *rxs;
9703 struct mbuf *m;
9704 int i, len;
9705 int count = 0;
9706 uint32_t status, errors;
9707 uint16_t vlantag;
9708 bool more = false;
9709
9710 KASSERT(mutex_owned(rxq->rxq_lock));
9711
9712 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9713 rxs = &rxq->rxq_soft[i];
9714
9715 DPRINTF(sc, WM_DEBUG_RX,
9716 ("%s: RX: checking descriptor %d\n",
9717 device_xname(sc->sc_dev), i));
9718 wm_cdrxsync(rxq, i,
9719 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9720
9721 status = wm_rxdesc_get_status(rxq, i);
9722 errors = wm_rxdesc_get_errors(rxq, i);
9723 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9724 vlantag = wm_rxdesc_get_vlantag(rxq, i);
9725 #ifdef WM_DEBUG
9726 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9727 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9728 #endif
9729
9730 if (!wm_rxdesc_dd(rxq, i, status))
9731 break;
9732
9733 if (limit-- == 0) {
9734 more = true;
9735 DPRINTF(sc, WM_DEBUG_RX,
9736 ("%s: RX: loop limited, descriptor %d is not processed\n",
9737 device_xname(sc->sc_dev), i));
9738 break;
9739 }
9740
9741 count++;
9742 if (__predict_false(rxq->rxq_discard)) {
9743 DPRINTF(sc, WM_DEBUG_RX,
9744 ("%s: RX: discarding contents of descriptor %d\n",
9745 device_xname(sc->sc_dev), i));
9746 wm_init_rxdesc(rxq, i);
9747 if (wm_rxdesc_is_eop(rxq, status)) {
9748 /* Reset our state. */
9749 DPRINTF(sc, WM_DEBUG_RX,
9750 ("%s: RX: resetting rxdiscard -> 0\n",
9751 device_xname(sc->sc_dev)));
9752 rxq->rxq_discard = 0;
9753 }
9754 continue;
9755 }
9756
9757 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9758 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9759
9760 m = rxs->rxs_mbuf;
9761
9762 /*
9763 * Add a new receive buffer to the ring, unless of
9764 * course the length is zero. Treat the latter as a
9765 * failed mapping.
9766 */
9767 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9768 /*
9769 * Failed, throw away what we've done so
9770 * far, and discard the rest of the packet.
9771 */
9772 if_statinc(ifp, if_ierrors);
9773 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9774 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9775 wm_init_rxdesc(rxq, i);
9776 if (!wm_rxdesc_is_eop(rxq, status))
9777 rxq->rxq_discard = 1;
9778 if (rxq->rxq_head != NULL)
9779 m_freem(rxq->rxq_head);
9780 WM_RXCHAIN_RESET(rxq);
9781 DPRINTF(sc, WM_DEBUG_RX,
9782 ("%s: RX: Rx buffer allocation failed, "
9783 "dropping packet%s\n", device_xname(sc->sc_dev),
9784 rxq->rxq_discard ? " (discard)" : ""));
9785 continue;
9786 }
9787
9788 m->m_len = len;
9789 rxq->rxq_len += len;
9790 DPRINTF(sc, WM_DEBUG_RX,
9791 ("%s: RX: buffer at %p len %d\n",
9792 device_xname(sc->sc_dev), m->m_data, len));
9793
9794 /* If this is not the end of the packet, keep looking. */
9795 if (!wm_rxdesc_is_eop(rxq, status)) {
9796 WM_RXCHAIN_LINK(rxq, m);
9797 DPRINTF(sc, WM_DEBUG_RX,
9798 ("%s: RX: not yet EOP, rxlen -> %d\n",
9799 device_xname(sc->sc_dev), rxq->rxq_len));
9800 continue;
9801 }
9802
9803 /*
9804 * Okay, we have the entire packet now. The chip is
9805 * configured to include the FCS except I35[04], I21[01].
9806 * (not all chips can be configured to strip it), so we need
9807 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9808 * in RCTL register is always set, so we don't trim it.
9809 * PCH2 and newer chip also not include FCS when jumbo
9810 * frame is used to do workaround an errata.
9811 * May need to adjust length of previous mbuf in the
9812 * chain if the current mbuf is too short.
9813 */
9814 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9815 if (m->m_len < ETHER_CRC_LEN) {
9816 rxq->rxq_tail->m_len
9817 -= (ETHER_CRC_LEN - m->m_len);
9818 m->m_len = 0;
9819 } else
9820 m->m_len -= ETHER_CRC_LEN;
9821 len = rxq->rxq_len - ETHER_CRC_LEN;
9822 } else
9823 len = rxq->rxq_len;
9824
9825 WM_RXCHAIN_LINK(rxq, m);
9826
9827 *rxq->rxq_tailp = NULL;
9828 m = rxq->rxq_head;
9829
9830 WM_RXCHAIN_RESET(rxq);
9831
9832 DPRINTF(sc, WM_DEBUG_RX,
9833 ("%s: RX: have entire packet, len -> %d\n",
9834 device_xname(sc->sc_dev), len));
9835
9836 /* If an error occurred, update stats and drop the packet. */
9837 if (wm_rxdesc_has_errors(rxq, errors)) {
9838 m_freem(m);
9839 continue;
9840 }
9841
9842 /* No errors. Receive the packet. */
9843 m_set_rcvif(m, ifp);
9844 m->m_pkthdr.len = len;
9845 /*
9846 * TODO
9847 * should be save rsshash and rsstype to this mbuf.
9848 */
9849 DPRINTF(sc, WM_DEBUG_RX,
9850 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9851 device_xname(sc->sc_dev), rsstype, rsshash));
9852
9853 /*
9854 * If VLANs are enabled, VLAN packets have been unwrapped
9855 * for us. Associate the tag with the packet.
9856 */
9857 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9858 continue;
9859
9860 /* Set up checksum info for this packet. */
9861 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9862
9863 rxq->rxq_packets++;
9864 rxq->rxq_bytes += len;
9865 /* Pass it on. */
9866 if_percpuq_enqueue(sc->sc_ipq, m);
9867
9868 if (rxq->rxq_stopping)
9869 break;
9870 }
9871 rxq->rxq_ptr = i;
9872
9873 if (count != 0)
9874 rnd_add_uint32(&sc->rnd_source, count);
9875
9876 DPRINTF(sc, WM_DEBUG_RX,
9877 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9878
9879 return more;
9880 }
9881
9882 /*
9883 * wm_linkintr_gmii:
9884 *
9885 * Helper; handle link interrupts for GMII.
9886 */
9887 static void
9888 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9889 {
9890 device_t dev = sc->sc_dev;
9891 uint32_t status, reg;
9892 bool link;
9893 int rv;
9894
9895 KASSERT(mutex_owned(sc->sc_core_lock));
9896
9897 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9898 __func__));
9899
9900 if ((icr & ICR_LSC) == 0) {
9901 if (icr & ICR_RXSEQ)
9902 DPRINTF(sc, WM_DEBUG_LINK,
9903 ("%s: LINK Receive sequence error\n",
9904 device_xname(dev)));
9905 return;
9906 }
9907
9908 /* Link status changed */
9909 status = CSR_READ(sc, WMREG_STATUS);
9910 link = status & STATUS_LU;
9911 if (link) {
9912 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9913 device_xname(dev),
9914 (status & STATUS_FD) ? "FDX" : "HDX"));
9915 if (wm_phy_need_linkdown_discard(sc)) {
9916 DPRINTF(sc, WM_DEBUG_LINK,
9917 ("%s: linkintr: Clear linkdown discard flag\n",
9918 device_xname(dev)));
9919 wm_clear_linkdown_discard(sc);
9920 }
9921 } else {
9922 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9923 device_xname(dev)));
9924 if (wm_phy_need_linkdown_discard(sc)) {
9925 DPRINTF(sc, WM_DEBUG_LINK,
9926 ("%s: linkintr: Set linkdown discard flag\n",
9927 device_xname(dev)));
9928 wm_set_linkdown_discard(sc);
9929 }
9930 }
9931 if ((sc->sc_type == WM_T_ICH8) && (link == false))
9932 wm_gig_downshift_workaround_ich8lan(sc);
9933
9934 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
9935 wm_kmrn_lock_loss_workaround_ich8lan(sc);
9936
9937 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9938 device_xname(dev)));
9939 mii_pollstat(&sc->sc_mii);
9940 if (sc->sc_type == WM_T_82543) {
9941 int miistatus, active;
9942
9943 /*
9944 * With 82543, we need to force speed and
9945 * duplex on the MAC equal to what the PHY
9946 * speed and duplex configuration is.
9947 */
9948 miistatus = sc->sc_mii.mii_media_status;
9949
9950 if (miistatus & IFM_ACTIVE) {
9951 active = sc->sc_mii.mii_media_active;
9952 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9953 switch (IFM_SUBTYPE(active)) {
9954 case IFM_10_T:
9955 sc->sc_ctrl |= CTRL_SPEED_10;
9956 break;
9957 case IFM_100_TX:
9958 sc->sc_ctrl |= CTRL_SPEED_100;
9959 break;
9960 case IFM_1000_T:
9961 sc->sc_ctrl |= CTRL_SPEED_1000;
9962 break;
9963 default:
9964 /*
9965 * Fiber?
9966 * Shoud not enter here.
9967 */
9968 device_printf(dev, "unknown media (%x)\n",
9969 active);
9970 break;
9971 }
9972 if (active & IFM_FDX)
9973 sc->sc_ctrl |= CTRL_FD;
9974 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9975 }
9976 } else if (sc->sc_type == WM_T_PCH) {
9977 wm_k1_gig_workaround_hv(sc,
9978 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9979 }
9980
9981 /*
9982 * When connected at 10Mbps half-duplex, some parts are excessively
9983 * aggressive resulting in many collisions. To avoid this, increase
9984 * the IPG and reduce Rx latency in the PHY.
9985 */
9986 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9987 && link) {
9988 uint32_t tipg_reg;
9989 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9990 bool fdx;
9991 uint16_t emi_addr, emi_val;
9992
9993 tipg_reg = CSR_READ(sc, WMREG_TIPG);
9994 tipg_reg &= ~TIPG_IPGT_MASK;
9995 fdx = status & STATUS_FD;
9996
9997 if (!fdx && (speed == STATUS_SPEED_10)) {
9998 tipg_reg |= 0xff;
9999 /* Reduce Rx latency in analog PHY */
10000 emi_val = 0;
10001 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10002 fdx && speed != STATUS_SPEED_1000) {
10003 tipg_reg |= 0xc;
10004 emi_val = 1;
10005 } else {
10006 /* Roll back the default values */
10007 tipg_reg |= 0x08;
10008 emi_val = 1;
10009 }
10010
10011 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10012
10013 rv = sc->phy.acquire(sc);
10014 if (rv)
10015 return;
10016
10017 if (sc->sc_type == WM_T_PCH2)
10018 emi_addr = I82579_RX_CONFIG;
10019 else
10020 emi_addr = I217_RX_CONFIG;
10021 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10022
10023 if (sc->sc_type >= WM_T_PCH_LPT) {
10024 uint16_t phy_reg;
10025
10026 sc->phy.readreg_locked(dev, 2,
10027 I217_PLL_CLOCK_GATE_REG, &phy_reg);
10028 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10029 if (speed == STATUS_SPEED_100
10030 || speed == STATUS_SPEED_10)
10031 phy_reg |= 0x3e8;
10032 else
10033 phy_reg |= 0xfa;
10034 sc->phy.writereg_locked(dev, 2,
10035 I217_PLL_CLOCK_GATE_REG, phy_reg);
10036
10037 if (speed == STATUS_SPEED_1000) {
10038 sc->phy.readreg_locked(dev, 2,
10039 HV_PM_CTRL, &phy_reg);
10040
10041 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10042
10043 sc->phy.writereg_locked(dev, 2,
10044 HV_PM_CTRL, phy_reg);
10045 }
10046 }
10047 sc->phy.release(sc);
10048
10049 if (rv)
10050 return;
10051
10052 if (sc->sc_type >= WM_T_PCH_SPT) {
10053 uint16_t data, ptr_gap;
10054
10055 if (speed == STATUS_SPEED_1000) {
10056 rv = sc->phy.acquire(sc);
10057 if (rv)
10058 return;
10059
10060 rv = sc->phy.readreg_locked(dev, 2,
10061 I82579_UNKNOWN1, &data);
10062 if (rv) {
10063 sc->phy.release(sc);
10064 return;
10065 }
10066
10067 ptr_gap = (data & (0x3ff << 2)) >> 2;
10068 if (ptr_gap < 0x18) {
10069 data &= ~(0x3ff << 2);
10070 data |= (0x18 << 2);
10071 rv = sc->phy.writereg_locked(dev,
10072 2, I82579_UNKNOWN1, data);
10073 }
10074 sc->phy.release(sc);
10075 if (rv)
10076 return;
10077 } else {
10078 rv = sc->phy.acquire(sc);
10079 if (rv)
10080 return;
10081
10082 rv = sc->phy.writereg_locked(dev, 2,
10083 I82579_UNKNOWN1, 0xc023);
10084 sc->phy.release(sc);
10085 if (rv)
10086 return;
10087
10088 }
10089 }
10090 }
10091
10092 /*
10093 * I217 Packet Loss issue:
10094 * ensure that FEXTNVM4 Beacon Duration is set correctly
10095 * on power up.
10096 * Set the Beacon Duration for I217 to 8 usec
10097 */
10098 if (sc->sc_type >= WM_T_PCH_LPT) {
10099 reg = CSR_READ(sc, WMREG_FEXTNVM4);
10100 reg &= ~FEXTNVM4_BEACON_DURATION;
10101 reg |= FEXTNVM4_BEACON_DURATION_8US;
10102 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10103 }
10104
10105 /* Work-around I218 hang issue */
10106 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10107 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10108 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10109 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10110 wm_k1_workaround_lpt_lp(sc, link);
10111
10112 if (sc->sc_type >= WM_T_PCH_LPT) {
10113 /*
10114 * Set platform power management values for Latency
10115 * Tolerance Reporting (LTR)
10116 */
10117 wm_platform_pm_pch_lpt(sc,
10118 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10119 }
10120
10121 /* Clear link partner's EEE ability */
10122 sc->eee_lp_ability = 0;
10123
10124 /* FEXTNVM6 K1-off workaround */
10125 if (sc->sc_type == WM_T_PCH_SPT) {
10126 reg = CSR_READ(sc, WMREG_FEXTNVM6);
10127 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10128 reg |= FEXTNVM6_K1_OFF_ENABLE;
10129 else
10130 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10131 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10132 }
10133
10134 if (!link)
10135 return;
10136
10137 switch (sc->sc_type) {
10138 case WM_T_PCH2:
10139 wm_k1_workaround_lv(sc);
10140 /* FALLTHROUGH */
10141 case WM_T_PCH:
10142 if (sc->sc_phytype == WMPHY_82578)
10143 wm_link_stall_workaround_hv(sc);
10144 break;
10145 default:
10146 break;
10147 }
10148
10149 /* Enable/Disable EEE after link up */
10150 if (sc->sc_phytype > WMPHY_82579)
10151 wm_set_eee_pchlan(sc);
10152 }
10153
10154 /*
10155 * wm_linkintr_tbi:
10156 *
10157 * Helper; handle link interrupts for TBI mode.
10158 */
10159 static void
10160 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10161 {
10162 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10163 uint32_t status;
10164
10165 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10166 __func__));
10167
10168 status = CSR_READ(sc, WMREG_STATUS);
10169 if (icr & ICR_LSC) {
10170 wm_check_for_link(sc);
10171 if (status & STATUS_LU) {
10172 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10173 device_xname(sc->sc_dev),
10174 (status & STATUS_FD) ? "FDX" : "HDX"));
10175 /*
10176 * NOTE: CTRL will update TFCE and RFCE automatically,
10177 * so we should update sc->sc_ctrl
10178 */
10179
10180 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10181 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10182 sc->sc_fcrtl &= ~FCRTL_XONE;
10183 if (status & STATUS_FD)
10184 sc->sc_tctl |=
10185 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10186 else
10187 sc->sc_tctl |=
10188 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10189 if (sc->sc_ctrl & CTRL_TFCE)
10190 sc->sc_fcrtl |= FCRTL_XONE;
10191 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10192 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10193 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10194 sc->sc_tbi_linkup = 1;
10195 if_link_state_change(ifp, LINK_STATE_UP);
10196 } else {
10197 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10198 device_xname(sc->sc_dev)));
10199 sc->sc_tbi_linkup = 0;
10200 if_link_state_change(ifp, LINK_STATE_DOWN);
10201 }
10202 /* Update LED */
10203 wm_tbi_serdes_set_linkled(sc);
10204 } else if (icr & ICR_RXSEQ)
10205 DPRINTF(sc, WM_DEBUG_LINK,
10206 ("%s: LINK: Receive sequence error\n",
10207 device_xname(sc->sc_dev)));
10208 }
10209
10210 /*
10211 * wm_linkintr_serdes:
10212 *
10213 * Helper; handle link interrupts for TBI mode.
10214 */
10215 static void
10216 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10217 {
10218 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10219 struct mii_data *mii = &sc->sc_mii;
10220 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10221 uint32_t pcs_adv, pcs_lpab, reg;
10222
10223 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10224 __func__));
10225
10226 if (icr & ICR_LSC) {
10227 /* Check PCS */
10228 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10229 if ((reg & PCS_LSTS_LINKOK) != 0) {
10230 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10231 device_xname(sc->sc_dev)));
10232 mii->mii_media_status |= IFM_ACTIVE;
10233 sc->sc_tbi_linkup = 1;
10234 if_link_state_change(ifp, LINK_STATE_UP);
10235 } else {
10236 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10237 device_xname(sc->sc_dev)));
10238 mii->mii_media_status |= IFM_NONE;
10239 sc->sc_tbi_linkup = 0;
10240 if_link_state_change(ifp, LINK_STATE_DOWN);
10241 wm_tbi_serdes_set_linkled(sc);
10242 return;
10243 }
10244 mii->mii_media_active |= IFM_1000_SX;
10245 if ((reg & PCS_LSTS_FDX) != 0)
10246 mii->mii_media_active |= IFM_FDX;
10247 else
10248 mii->mii_media_active |= IFM_HDX;
10249 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10250 /* Check flow */
10251 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10252 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10253 DPRINTF(sc, WM_DEBUG_LINK,
10254 ("XXX LINKOK but not ACOMP\n"));
10255 return;
10256 }
10257 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10258 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10259 DPRINTF(sc, WM_DEBUG_LINK,
10260 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10261 if ((pcs_adv & TXCW_SYM_PAUSE)
10262 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10263 mii->mii_media_active |= IFM_FLOW
10264 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10265 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10266 && (pcs_adv & TXCW_ASYM_PAUSE)
10267 && (pcs_lpab & TXCW_SYM_PAUSE)
10268 && (pcs_lpab & TXCW_ASYM_PAUSE))
10269 mii->mii_media_active |= IFM_FLOW
10270 | IFM_ETH_TXPAUSE;
10271 else if ((pcs_adv & TXCW_SYM_PAUSE)
10272 && (pcs_adv & TXCW_ASYM_PAUSE)
10273 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10274 && (pcs_lpab & TXCW_ASYM_PAUSE))
10275 mii->mii_media_active |= IFM_FLOW
10276 | IFM_ETH_RXPAUSE;
10277 }
10278 /* Update LED */
10279 wm_tbi_serdes_set_linkled(sc);
10280 } else
10281 DPRINTF(sc, WM_DEBUG_LINK,
10282 ("%s: LINK: Receive sequence error\n",
10283 device_xname(sc->sc_dev)));
10284 }
10285
10286 /*
10287 * wm_linkintr:
10288 *
10289 * Helper; handle link interrupts.
10290 */
10291 static void
10292 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10293 {
10294
10295 KASSERT(mutex_owned(sc->sc_core_lock));
10296
10297 if (sc->sc_flags & WM_F_HAS_MII)
10298 wm_linkintr_gmii(sc, icr);
10299 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10300 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10301 wm_linkintr_serdes(sc, icr);
10302 else
10303 wm_linkintr_tbi(sc, icr);
10304 }
10305
10306
10307 static inline void
10308 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10309 {
10310
10311 if (wmq->wmq_txrx_use_workqueue)
10312 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
10313 else
10314 softint_schedule(wmq->wmq_si);
10315 }
10316
10317 static inline void
10318 wm_legacy_intr_disable(struct wm_softc *sc)
10319 {
10320
10321 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10322 }
10323
10324 static inline void
10325 wm_legacy_intr_enable(struct wm_softc *sc)
10326 {
10327
10328 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10329 }
10330
10331 /*
10332 * wm_intr_legacy:
10333 *
10334 * Interrupt service routine for INTx and MSI.
10335 */
10336 static int
10337 wm_intr_legacy(void *arg)
10338 {
10339 struct wm_softc *sc = arg;
10340 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10341 struct wm_queue *wmq = &sc->sc_queue[0];
10342 struct wm_txqueue *txq = &wmq->wmq_txq;
10343 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10344 u_int txlimit = sc->sc_tx_intr_process_limit;
10345 u_int rxlimit = sc->sc_rx_intr_process_limit;
10346 uint32_t icr, rndval = 0;
10347 bool more = false;
10348
10349 icr = CSR_READ(sc, WMREG_ICR);
10350 if ((icr & sc->sc_icr) == 0)
10351 return 0;
10352
10353 DPRINTF(sc, WM_DEBUG_TX,
10354 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10355 if (rndval == 0)
10356 rndval = icr;
10357
10358 mutex_enter(txq->txq_lock);
10359
10360 if (txq->txq_stopping) {
10361 mutex_exit(txq->txq_lock);
10362 return 1;
10363 }
10364
10365 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10366 if (icr & ICR_TXDW) {
10367 DPRINTF(sc, WM_DEBUG_TX,
10368 ("%s: TX: got TXDW interrupt\n",
10369 device_xname(sc->sc_dev)));
10370 WM_Q_EVCNT_INCR(txq, txdw);
10371 }
10372 #endif
10373 if (txlimit > 0) {
10374 more |= wm_txeof(txq, txlimit);
10375 if (!IF_IS_EMPTY(&ifp->if_snd))
10376 more = true;
10377 } else
10378 more = true;
10379 mutex_exit(txq->txq_lock);
10380
10381 mutex_enter(rxq->rxq_lock);
10382
10383 if (rxq->rxq_stopping) {
10384 mutex_exit(rxq->rxq_lock);
10385 return 1;
10386 }
10387
10388 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10389 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10390 DPRINTF(sc, WM_DEBUG_RX,
10391 ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10392 device_xname(sc->sc_dev),
10393 icr & (ICR_RXDMT0 | ICR_RXT0)));
10394 WM_Q_EVCNT_INCR(rxq, intr);
10395 }
10396 #endif
10397 if (rxlimit > 0) {
10398 /*
10399 * wm_rxeof() does *not* call upper layer functions directly,
10400 * as if_percpuq_enqueue() just call softint_schedule().
10401 * So, we can call wm_rxeof() in interrupt context.
10402 */
10403 more = wm_rxeof(rxq, rxlimit);
10404 } else
10405 more = true;
10406
10407 mutex_exit(rxq->rxq_lock);
10408
10409 mutex_enter(sc->sc_core_lock);
10410
10411 if (sc->sc_core_stopping) {
10412 mutex_exit(sc->sc_core_lock);
10413 return 1;
10414 }
10415
10416 if (icr & (ICR_LSC | ICR_RXSEQ)) {
10417 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10418 wm_linkintr(sc, icr);
10419 }
10420 if ((icr & ICR_GPI(0)) != 0)
10421 device_printf(sc->sc_dev, "got module interrupt\n");
10422
10423 mutex_exit(sc->sc_core_lock);
10424
10425 if (icr & ICR_RXO) {
10426 #if defined(WM_DEBUG)
10427 log(LOG_WARNING, "%s: Receive overrun\n",
10428 device_xname(sc->sc_dev));
10429 #endif /* defined(WM_DEBUG) */
10430 }
10431
10432 rnd_add_uint32(&sc->rnd_source, rndval);
10433
10434 if (more) {
10435 /* Try to get more packets going. */
10436 wm_legacy_intr_disable(sc);
10437 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10438 wm_sched_handle_queue(sc, wmq);
10439 }
10440
10441 return 1;
10442 }
10443
10444 static inline void
10445 wm_txrxintr_disable(struct wm_queue *wmq)
10446 {
10447 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10448
10449 if (__predict_false(!wm_is_using_msix(sc))) {
10450 wm_legacy_intr_disable(sc);
10451 return;
10452 }
10453
10454 if (sc->sc_type == WM_T_82574)
10455 CSR_WRITE(sc, WMREG_IMC,
10456 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10457 else if (sc->sc_type == WM_T_82575)
10458 CSR_WRITE(sc, WMREG_EIMC,
10459 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10460 else
10461 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10462 }
10463
10464 static inline void
10465 wm_txrxintr_enable(struct wm_queue *wmq)
10466 {
10467 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10468
10469 wm_itrs_calculate(sc, wmq);
10470
10471 if (__predict_false(!wm_is_using_msix(sc))) {
10472 wm_legacy_intr_enable(sc);
10473 return;
10474 }
10475
10476 /*
10477 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10478 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10479 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10480 * while each wm_handle_queue(wmq) is runnig.
10481 */
10482 if (sc->sc_type == WM_T_82574)
10483 CSR_WRITE(sc, WMREG_IMS,
10484 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10485 else if (sc->sc_type == WM_T_82575)
10486 CSR_WRITE(sc, WMREG_EIMS,
10487 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10488 else
10489 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10490 }
10491
10492 static int
10493 wm_txrxintr_msix(void *arg)
10494 {
10495 struct wm_queue *wmq = arg;
10496 struct wm_txqueue *txq = &wmq->wmq_txq;
10497 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10498 struct wm_softc *sc = txq->txq_sc;
10499 u_int txlimit = sc->sc_tx_intr_process_limit;
10500 u_int rxlimit = sc->sc_rx_intr_process_limit;
10501 bool txmore;
10502 bool rxmore;
10503
10504 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10505
10506 DPRINTF(sc, WM_DEBUG_TX,
10507 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10508
10509 wm_txrxintr_disable(wmq);
10510
10511 mutex_enter(txq->txq_lock);
10512
10513 if (txq->txq_stopping) {
10514 mutex_exit(txq->txq_lock);
10515 return 1;
10516 }
10517
10518 WM_Q_EVCNT_INCR(txq, txdw);
10519 if (txlimit > 0) {
10520 txmore = wm_txeof(txq, txlimit);
10521 /* wm_deferred start() is done in wm_handle_queue(). */
10522 } else
10523 txmore = true;
10524 mutex_exit(txq->txq_lock);
10525
10526 DPRINTF(sc, WM_DEBUG_RX,
10527 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10528 mutex_enter(rxq->rxq_lock);
10529
10530 if (rxq->rxq_stopping) {
10531 mutex_exit(rxq->rxq_lock);
10532 return 1;
10533 }
10534
10535 WM_Q_EVCNT_INCR(rxq, intr);
10536 if (rxlimit > 0) {
10537 rxmore = wm_rxeof(rxq, rxlimit);
10538 } else
10539 rxmore = true;
10540 mutex_exit(rxq->rxq_lock);
10541
10542 wm_itrs_writereg(sc, wmq);
10543
10544 if (txmore || rxmore) {
10545 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10546 wm_sched_handle_queue(sc, wmq);
10547 } else
10548 wm_txrxintr_enable(wmq);
10549
10550 return 1;
10551 }
10552
10553 static void
10554 wm_handle_queue(void *arg)
10555 {
10556 struct wm_queue *wmq = arg;
10557 struct wm_txqueue *txq = &wmq->wmq_txq;
10558 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10559 struct wm_softc *sc = txq->txq_sc;
10560 u_int txlimit = sc->sc_tx_process_limit;
10561 u_int rxlimit = sc->sc_rx_process_limit;
10562 bool txmore;
10563 bool rxmore;
10564
10565 mutex_enter(txq->txq_lock);
10566 if (txq->txq_stopping) {
10567 mutex_exit(txq->txq_lock);
10568 return;
10569 }
10570 txmore = wm_txeof(txq, txlimit);
10571 wm_deferred_start_locked(txq);
10572 mutex_exit(txq->txq_lock);
10573
10574 mutex_enter(rxq->rxq_lock);
10575 if (rxq->rxq_stopping) {
10576 mutex_exit(rxq->rxq_lock);
10577 return;
10578 }
10579 WM_Q_EVCNT_INCR(rxq, defer);
10580 rxmore = wm_rxeof(rxq, rxlimit);
10581 mutex_exit(rxq->rxq_lock);
10582
10583 if (txmore || rxmore) {
10584 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10585 wm_sched_handle_queue(sc, wmq);
10586 } else
10587 wm_txrxintr_enable(wmq);
10588 }
10589
10590 static void
10591 wm_handle_queue_work(struct work *wk, void *context)
10592 {
10593 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10594
10595 /*
10596 * "enqueued flag" is not required here.
10597 */
10598 wm_handle_queue(wmq);
10599 }
10600
10601 /*
10602 * wm_linkintr_msix:
10603 *
10604 * Interrupt service routine for link status change for MSI-X.
10605 */
10606 static int
10607 wm_linkintr_msix(void *arg)
10608 {
10609 struct wm_softc *sc = arg;
10610 uint32_t reg;
10611 bool has_rxo;
10612
10613 reg = CSR_READ(sc, WMREG_ICR);
10614 mutex_enter(sc->sc_core_lock);
10615 DPRINTF(sc, WM_DEBUG_LINK,
10616 ("%s: LINK: got link intr. ICR = %08x\n",
10617 device_xname(sc->sc_dev), reg));
10618
10619 if (sc->sc_core_stopping)
10620 goto out;
10621
10622 if ((reg & ICR_LSC) != 0) {
10623 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10624 wm_linkintr(sc, ICR_LSC);
10625 }
10626 if ((reg & ICR_GPI(0)) != 0)
10627 device_printf(sc->sc_dev, "got module interrupt\n");
10628
10629 /*
10630 * XXX 82574 MSI-X mode workaround
10631 *
10632 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10633 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10634 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10635 * interrupts by writing WMREG_ICS to process receive packets.
10636 */
10637 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10638 #if defined(WM_DEBUG)
10639 log(LOG_WARNING, "%s: Receive overrun\n",
10640 device_xname(sc->sc_dev));
10641 #endif /* defined(WM_DEBUG) */
10642
10643 has_rxo = true;
10644 /*
10645 * The RXO interrupt is very high rate when receive traffic is
10646 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10647 * interrupts. ICR_OTHER will be enabled at the end of
10648 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10649 * ICR_RXQ(1) interrupts.
10650 */
10651 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10652
10653 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10654 }
10655
10656
10657
10658 out:
10659 mutex_exit(sc->sc_core_lock);
10660
10661 if (sc->sc_type == WM_T_82574) {
10662 if (!has_rxo)
10663 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10664 else
10665 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10666 } else if (sc->sc_type == WM_T_82575)
10667 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10668 else
10669 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10670
10671 return 1;
10672 }
10673
10674 /*
10675 * Media related.
10676 * GMII, SGMII, TBI (and SERDES)
10677 */
10678
10679 /* Common */
10680
10681 /*
10682 * wm_tbi_serdes_set_linkled:
10683 *
10684 * Update the link LED on TBI and SERDES devices.
10685 */
10686 static void
10687 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10688 {
10689
10690 if (sc->sc_tbi_linkup)
10691 sc->sc_ctrl |= CTRL_SWDPIN(0);
10692 else
10693 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10694
10695 /* 82540 or newer devices are active low */
10696 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10697
10698 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10699 }
10700
10701 /* GMII related */
10702
10703 /*
10704 * wm_gmii_reset:
10705 *
10706 * Reset the PHY.
10707 */
10708 static void
10709 wm_gmii_reset(struct wm_softc *sc)
10710 {
10711 uint32_t reg;
10712 int rv;
10713
10714 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10715 device_xname(sc->sc_dev), __func__));
10716
10717 rv = sc->phy.acquire(sc);
10718 if (rv != 0) {
10719 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10720 __func__);
10721 return;
10722 }
10723
10724 switch (sc->sc_type) {
10725 case WM_T_82542_2_0:
10726 case WM_T_82542_2_1:
10727 /* null */
10728 break;
10729 case WM_T_82543:
10730 /*
10731 * With 82543, we need to force speed and duplex on the MAC
10732 * equal to what the PHY speed and duplex configuration is.
10733 * In addition, we need to perform a hardware reset on the PHY
10734 * to take it out of reset.
10735 */
10736 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10737 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10738
10739 /* The PHY reset pin is active-low. */
10740 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10741 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10742 CTRL_EXT_SWDPIN(4));
10743 reg |= CTRL_EXT_SWDPIO(4);
10744
10745 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10746 CSR_WRITE_FLUSH(sc);
10747 delay(10*1000);
10748
10749 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10750 CSR_WRITE_FLUSH(sc);
10751 delay(150);
10752 #if 0
10753 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10754 #endif
10755 delay(20*1000); /* XXX extra delay to get PHY ID? */
10756 break;
10757 case WM_T_82544: /* Reset 10000us */
10758 case WM_T_82540:
10759 case WM_T_82545:
10760 case WM_T_82545_3:
10761 case WM_T_82546:
10762 case WM_T_82546_3:
10763 case WM_T_82541:
10764 case WM_T_82541_2:
10765 case WM_T_82547:
10766 case WM_T_82547_2:
10767 case WM_T_82571: /* Reset 100us */
10768 case WM_T_82572:
10769 case WM_T_82573:
10770 case WM_T_82574:
10771 case WM_T_82575:
10772 case WM_T_82576:
10773 case WM_T_82580:
10774 case WM_T_I350:
10775 case WM_T_I354:
10776 case WM_T_I210:
10777 case WM_T_I211:
10778 case WM_T_82583:
10779 case WM_T_80003:
10780 /* Generic reset */
10781 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10782 CSR_WRITE_FLUSH(sc);
10783 delay(20000);
10784 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10785 CSR_WRITE_FLUSH(sc);
10786 delay(20000);
10787
10788 if ((sc->sc_type == WM_T_82541)
10789 || (sc->sc_type == WM_T_82541_2)
10790 || (sc->sc_type == WM_T_82547)
10791 || (sc->sc_type == WM_T_82547_2)) {
10792 /* Workaround for igp are done in igp_reset() */
10793 /* XXX add code to set LED after phy reset */
10794 }
10795 break;
10796 case WM_T_ICH8:
10797 case WM_T_ICH9:
10798 case WM_T_ICH10:
10799 case WM_T_PCH:
10800 case WM_T_PCH2:
10801 case WM_T_PCH_LPT:
10802 case WM_T_PCH_SPT:
10803 case WM_T_PCH_CNP:
10804 /* Generic reset */
10805 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10806 CSR_WRITE_FLUSH(sc);
10807 delay(100);
10808 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10809 CSR_WRITE_FLUSH(sc);
10810 delay(150);
10811 break;
10812 default:
10813 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10814 __func__);
10815 break;
10816 }
10817
10818 sc->phy.release(sc);
10819
10820 /* get_cfg_done */
10821 wm_get_cfg_done(sc);
10822
10823 /* Extra setup */
10824 switch (sc->sc_type) {
10825 case WM_T_82542_2_0:
10826 case WM_T_82542_2_1:
10827 case WM_T_82543:
10828 case WM_T_82544:
10829 case WM_T_82540:
10830 case WM_T_82545:
10831 case WM_T_82545_3:
10832 case WM_T_82546:
10833 case WM_T_82546_3:
10834 case WM_T_82541_2:
10835 case WM_T_82547_2:
10836 case WM_T_82571:
10837 case WM_T_82572:
10838 case WM_T_82573:
10839 case WM_T_82574:
10840 case WM_T_82583:
10841 case WM_T_82575:
10842 case WM_T_82576:
10843 case WM_T_82580:
10844 case WM_T_I350:
10845 case WM_T_I354:
10846 case WM_T_I210:
10847 case WM_T_I211:
10848 case WM_T_80003:
10849 /* Null */
10850 break;
10851 case WM_T_82541:
10852 case WM_T_82547:
10853 /* XXX Configure actively LED after PHY reset */
10854 break;
10855 case WM_T_ICH8:
10856 case WM_T_ICH9:
10857 case WM_T_ICH10:
10858 case WM_T_PCH:
10859 case WM_T_PCH2:
10860 case WM_T_PCH_LPT:
10861 case WM_T_PCH_SPT:
10862 case WM_T_PCH_CNP:
10863 wm_phy_post_reset(sc);
10864 break;
10865 default:
10866 panic("%s: unknown type\n", __func__);
10867 break;
10868 }
10869 }
10870
10871 /*
10872 * Set up sc_phytype and mii_{read|write}reg.
10873 *
10874 * To identify PHY type, correct read/write function should be selected.
10875 * To select correct read/write function, PCI ID or MAC type are required
10876 * without accessing PHY registers.
10877 *
10878 * On the first call of this function, PHY ID is not known yet. Check
10879 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10880 * result might be incorrect.
10881 *
10882 * In the second call, PHY OUI and model is used to identify PHY type.
10883 * It might not be perfect because of the lack of compared entry, but it
10884 * would be better than the first call.
10885 *
10886 * If the detected new result and previous assumption is different,
10887 * a diagnostic message will be printed.
10888 */
10889 static void
10890 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10891 uint16_t phy_model)
10892 {
10893 device_t dev = sc->sc_dev;
10894 struct mii_data *mii = &sc->sc_mii;
10895 uint16_t new_phytype = WMPHY_UNKNOWN;
10896 uint16_t doubt_phytype = WMPHY_UNKNOWN;
10897 mii_readreg_t new_readreg;
10898 mii_writereg_t new_writereg;
10899 bool dodiag = true;
10900
10901 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10902 device_xname(sc->sc_dev), __func__));
10903
10904 /*
10905 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10906 * incorrect. So don't print diag output when it's 2nd call.
10907 */
10908 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10909 dodiag = false;
10910
10911 if (mii->mii_readreg == NULL) {
10912 /*
10913 * This is the first call of this function. For ICH and PCH
10914 * variants, it's difficult to determine the PHY access method
10915 * by sc_type, so use the PCI product ID for some devices.
10916 */
10917
10918 switch (sc->sc_pcidevid) {
10919 case PCI_PRODUCT_INTEL_PCH_M_LM:
10920 case PCI_PRODUCT_INTEL_PCH_M_LC:
10921 /* 82577 */
10922 new_phytype = WMPHY_82577;
10923 break;
10924 case PCI_PRODUCT_INTEL_PCH_D_DM:
10925 case PCI_PRODUCT_INTEL_PCH_D_DC:
10926 /* 82578 */
10927 new_phytype = WMPHY_82578;
10928 break;
10929 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10930 case PCI_PRODUCT_INTEL_PCH2_LV_V:
10931 /* 82579 */
10932 new_phytype = WMPHY_82579;
10933 break;
10934 case PCI_PRODUCT_INTEL_82801H_82567V_3:
10935 case PCI_PRODUCT_INTEL_82801I_BM:
10936 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10937 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10938 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10939 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10940 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10941 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10942 /* ICH8, 9, 10 with 82567 */
10943 new_phytype = WMPHY_BM;
10944 break;
10945 default:
10946 break;
10947 }
10948 } else {
10949 /* It's not the first call. Use PHY OUI and model */
10950 switch (phy_oui) {
10951 case MII_OUI_ATTANSIC: /* atphy(4) */
10952 switch (phy_model) {
10953 case MII_MODEL_ATTANSIC_AR8021:
10954 new_phytype = WMPHY_82578;
10955 break;
10956 default:
10957 break;
10958 }
10959 break;
10960 case MII_OUI_xxMARVELL:
10961 switch (phy_model) {
10962 case MII_MODEL_xxMARVELL_I210:
10963 new_phytype = WMPHY_I210;
10964 break;
10965 case MII_MODEL_xxMARVELL_E1011:
10966 case MII_MODEL_xxMARVELL_E1000_3:
10967 case MII_MODEL_xxMARVELL_E1000_5:
10968 case MII_MODEL_xxMARVELL_E1112:
10969 new_phytype = WMPHY_M88;
10970 break;
10971 case MII_MODEL_xxMARVELL_E1149:
10972 new_phytype = WMPHY_BM;
10973 break;
10974 case MII_MODEL_xxMARVELL_E1111:
10975 case MII_MODEL_xxMARVELL_I347:
10976 case MII_MODEL_xxMARVELL_E1512:
10977 case MII_MODEL_xxMARVELL_E1340M:
10978 case MII_MODEL_xxMARVELL_E1543:
10979 new_phytype = WMPHY_M88;
10980 break;
10981 case MII_MODEL_xxMARVELL_I82563:
10982 new_phytype = WMPHY_GG82563;
10983 break;
10984 default:
10985 break;
10986 }
10987 break;
10988 case MII_OUI_INTEL:
10989 switch (phy_model) {
10990 case MII_MODEL_INTEL_I82577:
10991 new_phytype = WMPHY_82577;
10992 break;
10993 case MII_MODEL_INTEL_I82579:
10994 new_phytype = WMPHY_82579;
10995 break;
10996 case MII_MODEL_INTEL_I217:
10997 new_phytype = WMPHY_I217;
10998 break;
10999 case MII_MODEL_INTEL_I82580:
11000 new_phytype = WMPHY_82580;
11001 break;
11002 case MII_MODEL_INTEL_I350:
11003 new_phytype = WMPHY_I350;
11004 break;
11005 default:
11006 break;
11007 }
11008 break;
11009 case MII_OUI_yyINTEL:
11010 switch (phy_model) {
11011 case MII_MODEL_yyINTEL_I82562G:
11012 case MII_MODEL_yyINTEL_I82562EM:
11013 case MII_MODEL_yyINTEL_I82562ET:
11014 new_phytype = WMPHY_IFE;
11015 break;
11016 case MII_MODEL_yyINTEL_IGP01E1000:
11017 new_phytype = WMPHY_IGP;
11018 break;
11019 case MII_MODEL_yyINTEL_I82566:
11020 new_phytype = WMPHY_IGP_3;
11021 break;
11022 default:
11023 break;
11024 }
11025 break;
11026 default:
11027 break;
11028 }
11029
11030 if (dodiag) {
11031 if (new_phytype == WMPHY_UNKNOWN)
11032 aprint_verbose_dev(dev,
11033 "%s: Unknown PHY model. OUI=%06x, "
11034 "model=%04x\n", __func__, phy_oui,
11035 phy_model);
11036
11037 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11038 && (sc->sc_phytype != new_phytype)) {
11039 aprint_error_dev(dev, "Previously assumed PHY "
11040 "type(%u) was incorrect. PHY type from PHY"
11041 "ID = %u\n", sc->sc_phytype, new_phytype);
11042 }
11043 }
11044 }
11045
11046 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11047 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11048 /* SGMII */
11049 new_readreg = wm_sgmii_readreg;
11050 new_writereg = wm_sgmii_writereg;
11051 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11052 /* BM2 (phyaddr == 1) */
11053 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11054 && (new_phytype != WMPHY_BM)
11055 && (new_phytype != WMPHY_UNKNOWN))
11056 doubt_phytype = new_phytype;
11057 new_phytype = WMPHY_BM;
11058 new_readreg = wm_gmii_bm_readreg;
11059 new_writereg = wm_gmii_bm_writereg;
11060 } else if (sc->sc_type >= WM_T_PCH) {
11061 /* All PCH* use _hv_ */
11062 new_readreg = wm_gmii_hv_readreg;
11063 new_writereg = wm_gmii_hv_writereg;
11064 } else if (sc->sc_type >= WM_T_ICH8) {
11065 /* non-82567 ICH8, 9 and 10 */
11066 new_readreg = wm_gmii_i82544_readreg;
11067 new_writereg = wm_gmii_i82544_writereg;
11068 } else if (sc->sc_type >= WM_T_80003) {
11069 /* 80003 */
11070 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11071 && (new_phytype != WMPHY_GG82563)
11072 && (new_phytype != WMPHY_UNKNOWN))
11073 doubt_phytype = new_phytype;
11074 new_phytype = WMPHY_GG82563;
11075 new_readreg = wm_gmii_i80003_readreg;
11076 new_writereg = wm_gmii_i80003_writereg;
11077 } else if (sc->sc_type >= WM_T_I210) {
11078 /* I210 and I211 */
11079 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11080 && (new_phytype != WMPHY_I210)
11081 && (new_phytype != WMPHY_UNKNOWN))
11082 doubt_phytype = new_phytype;
11083 new_phytype = WMPHY_I210;
11084 new_readreg = wm_gmii_gs40g_readreg;
11085 new_writereg = wm_gmii_gs40g_writereg;
11086 } else if (sc->sc_type >= WM_T_82580) {
11087 /* 82580, I350 and I354 */
11088 new_readreg = wm_gmii_82580_readreg;
11089 new_writereg = wm_gmii_82580_writereg;
11090 } else if (sc->sc_type >= WM_T_82544) {
11091 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
11092 new_readreg = wm_gmii_i82544_readreg;
11093 new_writereg = wm_gmii_i82544_writereg;
11094 } else {
11095 new_readreg = wm_gmii_i82543_readreg;
11096 new_writereg = wm_gmii_i82543_writereg;
11097 }
11098
11099 if (new_phytype == WMPHY_BM) {
11100 /* All BM use _bm_ */
11101 new_readreg = wm_gmii_bm_readreg;
11102 new_writereg = wm_gmii_bm_writereg;
11103 }
11104 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
11105 /* All PCH* use _hv_ */
11106 new_readreg = wm_gmii_hv_readreg;
11107 new_writereg = wm_gmii_hv_writereg;
11108 }
11109
11110 /* Diag output */
11111 if (dodiag) {
11112 if (doubt_phytype != WMPHY_UNKNOWN)
11113 aprint_error_dev(dev, "Assumed new PHY type was "
11114 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11115 new_phytype);
11116 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11117 && (sc->sc_phytype != new_phytype))
11118 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11119 "was incorrect. New PHY type = %u\n",
11120 sc->sc_phytype, new_phytype);
11121
11122 if ((mii->mii_readreg != NULL) &&
11123 (new_phytype == WMPHY_UNKNOWN))
11124 aprint_error_dev(dev, "PHY type is still unknown.\n");
11125
11126 if ((mii->mii_readreg != NULL) &&
11127 (mii->mii_readreg != new_readreg))
11128 aprint_error_dev(dev, "Previously assumed PHY "
11129 "read/write function was incorrect.\n");
11130 }
11131
11132 /* Update now */
11133 sc->sc_phytype = new_phytype;
11134 mii->mii_readreg = new_readreg;
11135 mii->mii_writereg = new_writereg;
11136 if (new_readreg == wm_gmii_hv_readreg) {
11137 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11138 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11139 } else if (new_readreg == wm_sgmii_readreg) {
11140 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11141 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11142 } else if (new_readreg == wm_gmii_i82544_readreg) {
11143 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11144 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11145 }
11146 }
11147
11148 /*
11149 * wm_get_phy_id_82575:
11150 *
11151 * Return PHY ID. Return -1 if it failed.
11152 */
11153 static int
11154 wm_get_phy_id_82575(struct wm_softc *sc)
11155 {
11156 uint32_t reg;
11157 int phyid = -1;
11158
11159 /* XXX */
11160 if ((sc->sc_flags & WM_F_SGMII) == 0)
11161 return -1;
11162
11163 if (wm_sgmii_uses_mdio(sc)) {
11164 switch (sc->sc_type) {
11165 case WM_T_82575:
11166 case WM_T_82576:
11167 reg = CSR_READ(sc, WMREG_MDIC);
11168 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11169 break;
11170 case WM_T_82580:
11171 case WM_T_I350:
11172 case WM_T_I354:
11173 case WM_T_I210:
11174 case WM_T_I211:
11175 reg = CSR_READ(sc, WMREG_MDICNFG);
11176 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11177 break;
11178 default:
11179 return -1;
11180 }
11181 }
11182
11183 return phyid;
11184 }
11185
11186 /*
11187 * wm_gmii_mediainit:
11188 *
11189 * Initialize media for use on 1000BASE-T devices.
11190 */
11191 static void
11192 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11193 {
11194 device_t dev = sc->sc_dev;
11195 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11196 struct mii_data *mii = &sc->sc_mii;
11197
11198 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11199 device_xname(sc->sc_dev), __func__));
11200
11201 /* We have GMII. */
11202 sc->sc_flags |= WM_F_HAS_MII;
11203
11204 if (sc->sc_type == WM_T_80003)
11205 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11206 else
11207 sc->sc_tipg = TIPG_1000T_DFLT;
11208
11209 /*
11210 * Let the chip set speed/duplex on its own based on
11211 * signals from the PHY.
11212 * XXXbouyer - I'm not sure this is right for the 80003,
11213 * the em driver only sets CTRL_SLU here - but it seems to work.
11214 */
11215 sc->sc_ctrl |= CTRL_SLU;
11216 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11217
11218 /* Initialize our media structures and probe the GMII. */
11219 mii->mii_ifp = ifp;
11220
11221 mii->mii_statchg = wm_gmii_statchg;
11222
11223 /* get PHY control from SMBus to PCIe */
11224 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11225 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11226 || (sc->sc_type == WM_T_PCH_CNP))
11227 wm_init_phy_workarounds_pchlan(sc);
11228
11229 wm_gmii_reset(sc);
11230
11231 sc->sc_ethercom.ec_mii = &sc->sc_mii;
11232 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11233 wm_gmii_mediastatus, sc->sc_core_lock);
11234
11235 /* Setup internal SGMII PHY for SFP */
11236 wm_sgmii_sfp_preconfig(sc);
11237
11238 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11239 || (sc->sc_type == WM_T_82580)
11240 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11241 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11242 if ((sc->sc_flags & WM_F_SGMII) == 0) {
11243 /* Attach only one port */
11244 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11245 MII_OFFSET_ANY, MIIF_DOPAUSE);
11246 } else {
11247 int i, id;
11248 uint32_t ctrl_ext;
11249
11250 id = wm_get_phy_id_82575(sc);
11251 if (id != -1) {
11252 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11253 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11254 }
11255 if ((id == -1)
11256 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11257 /* Power on sgmii phy if it is disabled */
11258 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11259 CSR_WRITE(sc, WMREG_CTRL_EXT,
11260 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11261 CSR_WRITE_FLUSH(sc);
11262 delay(300*1000); /* XXX too long */
11263
11264 /*
11265 * From 1 to 8.
11266 *
11267 * I2C access fails with I2C register's ERROR
11268 * bit set, so prevent error message while
11269 * scanning.
11270 */
11271 sc->phy.no_errprint = true;
11272 for (i = 1; i < 8; i++)
11273 mii_attach(sc->sc_dev, &sc->sc_mii,
11274 0xffffffff, i, MII_OFFSET_ANY,
11275 MIIF_DOPAUSE);
11276 sc->phy.no_errprint = false;
11277
11278 /* Restore previous sfp cage power state */
11279 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11280 }
11281 }
11282 } else
11283 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11284 MII_OFFSET_ANY, MIIF_DOPAUSE);
11285
11286 /*
11287 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11288 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11289 */
11290 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
11291 || (sc->sc_type == WM_T_PCH_SPT)
11292 || (sc->sc_type == WM_T_PCH_CNP))
11293 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11294 wm_set_mdio_slow_mode_hv(sc);
11295 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11296 MII_OFFSET_ANY, MIIF_DOPAUSE);
11297 }
11298
11299 /*
11300 * (For ICH8 variants)
11301 * If PHY detection failed, use BM's r/w function and retry.
11302 */
11303 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11304 /* if failed, retry with *_bm_* */
11305 aprint_verbose_dev(dev, "Assumed PHY access function "
11306 "(type = %d) might be incorrect. Use BM and retry.\n",
11307 sc->sc_phytype);
11308 sc->sc_phytype = WMPHY_BM;
11309 mii->mii_readreg = wm_gmii_bm_readreg;
11310 mii->mii_writereg = wm_gmii_bm_writereg;
11311
11312 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11313 MII_OFFSET_ANY, MIIF_DOPAUSE);
11314 }
11315
11316 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11317 /* Any PHY wasn't found */
11318 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11319 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11320 sc->sc_phytype = WMPHY_NONE;
11321 } else {
11322 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11323
11324 /*
11325 * PHY found! Check PHY type again by the second call of
11326 * wm_gmii_setup_phytype.
11327 */
11328 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11329 child->mii_mpd_model);
11330
11331 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11332 }
11333 }
11334
11335 /*
11336 * wm_gmii_mediachange: [ifmedia interface function]
11337 *
11338 * Set hardware to newly-selected media on a 1000BASE-T device.
11339 */
11340 static int
11341 wm_gmii_mediachange(struct ifnet *ifp)
11342 {
11343 struct wm_softc *sc = ifp->if_softc;
11344 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11345 uint32_t reg;
11346 int rc;
11347
11348 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11349 device_xname(sc->sc_dev), __func__));
11350
11351 KASSERT(mutex_owned(sc->sc_core_lock));
11352
11353 if ((sc->sc_if_flags & IFF_UP) == 0)
11354 return 0;
11355
11356 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11357 if ((sc->sc_type == WM_T_82580)
11358 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11359 || (sc->sc_type == WM_T_I211)) {
11360 reg = CSR_READ(sc, WMREG_PHPM);
11361 reg &= ~PHPM_GO_LINK_D;
11362 CSR_WRITE(sc, WMREG_PHPM, reg);
11363 }
11364
11365 /* Disable D0 LPLU. */
11366 wm_lplu_d0_disable(sc);
11367
11368 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11369 sc->sc_ctrl |= CTRL_SLU;
11370 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11371 || (sc->sc_type > WM_T_82543)) {
11372 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11373 } else {
11374 sc->sc_ctrl &= ~CTRL_ASDE;
11375 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11376 if (ife->ifm_media & IFM_FDX)
11377 sc->sc_ctrl |= CTRL_FD;
11378 switch (IFM_SUBTYPE(ife->ifm_media)) {
11379 case IFM_10_T:
11380 sc->sc_ctrl |= CTRL_SPEED_10;
11381 break;
11382 case IFM_100_TX:
11383 sc->sc_ctrl |= CTRL_SPEED_100;
11384 break;
11385 case IFM_1000_T:
11386 sc->sc_ctrl |= CTRL_SPEED_1000;
11387 break;
11388 case IFM_NONE:
11389 /* There is no specific setting for IFM_NONE */
11390 break;
11391 default:
11392 panic("wm_gmii_mediachange: bad media 0x%x",
11393 ife->ifm_media);
11394 }
11395 }
11396 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11397 CSR_WRITE_FLUSH(sc);
11398
11399 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11400 wm_serdes_mediachange(ifp);
11401
11402 if (sc->sc_type <= WM_T_82543)
11403 wm_gmii_reset(sc);
11404 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11405 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11406 /* allow time for SFP cage time to power up phy */
11407 delay(300 * 1000);
11408 wm_gmii_reset(sc);
11409 }
11410
11411 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11412 return 0;
11413 return rc;
11414 }
11415
11416 /*
11417 * wm_gmii_mediastatus: [ifmedia interface function]
11418 *
11419 * Get the current interface media status on a 1000BASE-T device.
11420 */
11421 static void
11422 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11423 {
11424 struct wm_softc *sc = ifp->if_softc;
11425
11426 KASSERT(mutex_owned(sc->sc_core_lock));
11427
11428 ether_mediastatus(ifp, ifmr);
11429 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11430 | sc->sc_flowflags;
11431 }
11432
11433 #define MDI_IO CTRL_SWDPIN(2)
11434 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11435 #define MDI_CLK CTRL_SWDPIN(3)
11436
11437 static void
11438 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11439 {
11440 uint32_t i, v;
11441
11442 v = CSR_READ(sc, WMREG_CTRL);
11443 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11444 v |= MDI_DIR | CTRL_SWDPIO(3);
11445
11446 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11447 if (data & i)
11448 v |= MDI_IO;
11449 else
11450 v &= ~MDI_IO;
11451 CSR_WRITE(sc, WMREG_CTRL, v);
11452 CSR_WRITE_FLUSH(sc);
11453 delay(10);
11454 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11455 CSR_WRITE_FLUSH(sc);
11456 delay(10);
11457 CSR_WRITE(sc, WMREG_CTRL, v);
11458 CSR_WRITE_FLUSH(sc);
11459 delay(10);
11460 }
11461 }
11462
11463 static uint16_t
11464 wm_i82543_mii_recvbits(struct wm_softc *sc)
11465 {
11466 uint32_t v, i;
11467 uint16_t data = 0;
11468
11469 v = CSR_READ(sc, WMREG_CTRL);
11470 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11471 v |= CTRL_SWDPIO(3);
11472
11473 CSR_WRITE(sc, WMREG_CTRL, v);
11474 CSR_WRITE_FLUSH(sc);
11475 delay(10);
11476 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11477 CSR_WRITE_FLUSH(sc);
11478 delay(10);
11479 CSR_WRITE(sc, WMREG_CTRL, v);
11480 CSR_WRITE_FLUSH(sc);
11481 delay(10);
11482
11483 for (i = 0; i < 16; i++) {
11484 data <<= 1;
11485 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11486 CSR_WRITE_FLUSH(sc);
11487 delay(10);
11488 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11489 data |= 1;
11490 CSR_WRITE(sc, WMREG_CTRL, v);
11491 CSR_WRITE_FLUSH(sc);
11492 delay(10);
11493 }
11494
11495 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11496 CSR_WRITE_FLUSH(sc);
11497 delay(10);
11498 CSR_WRITE(sc, WMREG_CTRL, v);
11499 CSR_WRITE_FLUSH(sc);
11500 delay(10);
11501
11502 return data;
11503 }
11504
11505 #undef MDI_IO
11506 #undef MDI_DIR
11507 #undef MDI_CLK
11508
11509 /*
11510 * wm_gmii_i82543_readreg: [mii interface function]
11511 *
11512 * Read a PHY register on the GMII (i82543 version).
11513 */
11514 static int
11515 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11516 {
11517 struct wm_softc *sc = device_private(dev);
11518
11519 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11520 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11521 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11522 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
11523
11524 DPRINTF(sc, WM_DEBUG_GMII,
11525 ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11526 device_xname(dev), phy, reg, *val));
11527
11528 return 0;
11529 }
11530
11531 /*
11532 * wm_gmii_i82543_writereg: [mii interface function]
11533 *
11534 * Write a PHY register on the GMII (i82543 version).
11535 */
11536 static int
11537 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11538 {
11539 struct wm_softc *sc = device_private(dev);
11540
11541 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11542 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11543 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11544 (MII_COMMAND_START << 30), 32);
11545
11546 return 0;
11547 }
11548
11549 /*
11550 * wm_gmii_mdic_readreg: [mii interface function]
11551 *
11552 * Read a PHY register on the GMII.
11553 */
11554 static int
11555 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11556 {
11557 struct wm_softc *sc = device_private(dev);
11558 uint32_t mdic = 0;
11559 int i;
11560
11561 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11562 && (reg > MII_ADDRMASK)) {
11563 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11564 __func__, sc->sc_phytype, reg);
11565 reg &= MII_ADDRMASK;
11566 }
11567
11568 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11569 MDIC_REGADD(reg));
11570
11571 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11572 delay(50);
11573 mdic = CSR_READ(sc, WMREG_MDIC);
11574 if (mdic & MDIC_READY)
11575 break;
11576 }
11577
11578 if ((mdic & MDIC_READY) == 0) {
11579 DPRINTF(sc, WM_DEBUG_GMII,
11580 ("%s: MDIC read timed out: phy %d reg %d\n",
11581 device_xname(dev), phy, reg));
11582 return ETIMEDOUT;
11583 } else if (mdic & MDIC_E) {
11584 /* This is normal if no PHY is present. */
11585 DPRINTF(sc, WM_DEBUG_GMII,
11586 ("%s: MDIC read error: phy %d reg %d\n",
11587 device_xname(sc->sc_dev), phy, reg));
11588 return -1;
11589 } else
11590 *val = MDIC_DATA(mdic);
11591
11592 /*
11593 * Allow some time after each MDIC transaction to avoid
11594 * reading duplicate data in the next MDIC transaction.
11595 */
11596 if (sc->sc_type == WM_T_PCH2)
11597 delay(100);
11598
11599 return 0;
11600 }
11601
11602 /*
11603 * wm_gmii_mdic_writereg: [mii interface function]
11604 *
11605 * Write a PHY register on the GMII.
11606 */
11607 static int
11608 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11609 {
11610 struct wm_softc *sc = device_private(dev);
11611 uint32_t mdic = 0;
11612 int i;
11613
11614 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11615 && (reg > MII_ADDRMASK)) {
11616 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11617 __func__, sc->sc_phytype, reg);
11618 reg &= MII_ADDRMASK;
11619 }
11620
11621 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11622 MDIC_REGADD(reg) | MDIC_DATA(val));
11623
11624 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11625 delay(50);
11626 mdic = CSR_READ(sc, WMREG_MDIC);
11627 if (mdic & MDIC_READY)
11628 break;
11629 }
11630
11631 if ((mdic & MDIC_READY) == 0) {
11632 DPRINTF(sc, WM_DEBUG_GMII,
11633 ("%s: MDIC write timed out: phy %d reg %d\n",
11634 device_xname(dev), phy, reg));
11635 return ETIMEDOUT;
11636 } else if (mdic & MDIC_E) {
11637 DPRINTF(sc, WM_DEBUG_GMII,
11638 ("%s: MDIC write error: phy %d reg %d\n",
11639 device_xname(dev), phy, reg));
11640 return -1;
11641 }
11642
11643 /*
11644 * Allow some time after each MDIC transaction to avoid
11645 * reading duplicate data in the next MDIC transaction.
11646 */
11647 if (sc->sc_type == WM_T_PCH2)
11648 delay(100);
11649
11650 return 0;
11651 }
11652
11653 /*
11654 * wm_gmii_i82544_readreg: [mii interface function]
11655 *
11656 * Read a PHY register on the GMII.
11657 */
11658 static int
11659 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11660 {
11661 struct wm_softc *sc = device_private(dev);
11662 int rv;
11663
11664 rv = sc->phy.acquire(sc);
11665 if (rv != 0) {
11666 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11667 return rv;
11668 }
11669
11670 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11671
11672 sc->phy.release(sc);
11673
11674 return rv;
11675 }
11676
11677 static int
11678 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11679 {
11680 struct wm_softc *sc = device_private(dev);
11681 int rv;
11682
11683 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11684 switch (sc->sc_phytype) {
11685 case WMPHY_IGP:
11686 case WMPHY_IGP_2:
11687 case WMPHY_IGP_3:
11688 rv = wm_gmii_mdic_writereg(dev, phy,
11689 IGPHY_PAGE_SELECT, reg);
11690 if (rv != 0)
11691 return rv;
11692 break;
11693 default:
11694 #ifdef WM_DEBUG
11695 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11696 __func__, sc->sc_phytype, reg);
11697 #endif
11698 break;
11699 }
11700 }
11701
11702 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11703 }
11704
11705 /*
11706 * wm_gmii_i82544_writereg: [mii interface function]
11707 *
11708 * Write a PHY register on the GMII.
11709 */
11710 static int
11711 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11712 {
11713 struct wm_softc *sc = device_private(dev);
11714 int rv;
11715
11716 rv = sc->phy.acquire(sc);
11717 if (rv != 0) {
11718 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11719 return rv;
11720 }
11721
11722 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11723 sc->phy.release(sc);
11724
11725 return rv;
11726 }
11727
11728 static int
11729 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11730 {
11731 struct wm_softc *sc = device_private(dev);
11732 int rv;
11733
11734 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11735 switch (sc->sc_phytype) {
11736 case WMPHY_IGP:
11737 case WMPHY_IGP_2:
11738 case WMPHY_IGP_3:
11739 rv = wm_gmii_mdic_writereg(dev, phy,
11740 IGPHY_PAGE_SELECT, reg);
11741 if (rv != 0)
11742 return rv;
11743 break;
11744 default:
11745 #ifdef WM_DEBUG
11746 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11747 __func__, sc->sc_phytype, reg);
11748 #endif
11749 break;
11750 }
11751 }
11752
11753 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11754 }
11755
11756 /*
11757 * wm_gmii_i80003_readreg: [mii interface function]
11758 *
11759 * Read a PHY register on the kumeran
11760 * This could be handled by the PHY layer if we didn't have to lock the
11761 * resource ...
11762 */
11763 static int
11764 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11765 {
11766 struct wm_softc *sc = device_private(dev);
11767 int page_select;
11768 uint16_t temp, temp2;
11769 int rv;
11770
11771 if (phy != 1) /* Only one PHY on kumeran bus */
11772 return -1;
11773
11774 rv = sc->phy.acquire(sc);
11775 if (rv != 0) {
11776 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11777 return rv;
11778 }
11779
11780 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11781 page_select = GG82563_PHY_PAGE_SELECT;
11782 else {
11783 /*
11784 * Use Alternative Page Select register to access registers
11785 * 30 and 31.
11786 */
11787 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11788 }
11789 temp = reg >> GG82563_PAGE_SHIFT;
11790 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11791 goto out;
11792
11793 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11794 /*
11795 * Wait more 200us for a bug of the ready bit in the MDIC
11796 * register.
11797 */
11798 delay(200);
11799 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11800 if ((rv != 0) || (temp2 != temp)) {
11801 device_printf(dev, "%s failed\n", __func__);
11802 rv = -1;
11803 goto out;
11804 }
11805 delay(200);
11806 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11807 delay(200);
11808 } else
11809 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11810
11811 out:
11812 sc->phy.release(sc);
11813 return rv;
11814 }
11815
11816 /*
11817 * wm_gmii_i80003_writereg: [mii interface function]
11818 *
11819 * Write a PHY register on the kumeran.
11820 * This could be handled by the PHY layer if we didn't have to lock the
11821 * resource ...
11822 */
11823 static int
11824 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11825 {
11826 struct wm_softc *sc = device_private(dev);
11827 int page_select, rv;
11828 uint16_t temp, temp2;
11829
11830 if (phy != 1) /* Only one PHY on kumeran bus */
11831 return -1;
11832
11833 rv = sc->phy.acquire(sc);
11834 if (rv != 0) {
11835 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11836 return rv;
11837 }
11838
11839 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11840 page_select = GG82563_PHY_PAGE_SELECT;
11841 else {
11842 /*
11843 * Use Alternative Page Select register to access registers
11844 * 30 and 31.
11845 */
11846 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11847 }
11848 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11849 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11850 goto out;
11851
11852 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11853 /*
11854 * Wait more 200us for a bug of the ready bit in the MDIC
11855 * register.
11856 */
11857 delay(200);
11858 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11859 if ((rv != 0) || (temp2 != temp)) {
11860 device_printf(dev, "%s failed\n", __func__);
11861 rv = -1;
11862 goto out;
11863 }
11864 delay(200);
11865 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11866 delay(200);
11867 } else
11868 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11869
11870 out:
11871 sc->phy.release(sc);
11872 return rv;
11873 }
11874
11875 /*
11876 * wm_gmii_bm_readreg: [mii interface function]
11877 *
11878 * Read a PHY register on the kumeran
11879 * This could be handled by the PHY layer if we didn't have to lock the
11880 * resource ...
11881 */
11882 static int
11883 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11884 {
11885 struct wm_softc *sc = device_private(dev);
11886 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11887 int rv;
11888
11889 rv = sc->phy.acquire(sc);
11890 if (rv != 0) {
11891 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11892 return rv;
11893 }
11894
11895 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11896 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11897 || (reg == 31)) ? 1 : phy;
11898 /* Page 800 works differently than the rest so it has its own func */
11899 if (page == BM_WUC_PAGE) {
11900 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11901 goto release;
11902 }
11903
11904 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11905 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11906 && (sc->sc_type != WM_T_82583))
11907 rv = wm_gmii_mdic_writereg(dev, phy,
11908 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11909 else
11910 rv = wm_gmii_mdic_writereg(dev, phy,
11911 BME1000_PHY_PAGE_SELECT, page);
11912 if (rv != 0)
11913 goto release;
11914 }
11915
11916 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11917
11918 release:
11919 sc->phy.release(sc);
11920 return rv;
11921 }
11922
11923 /*
11924 * wm_gmii_bm_writereg: [mii interface function]
11925 *
11926 * Write a PHY register on the kumeran.
11927 * This could be handled by the PHY layer if we didn't have to lock the
11928 * resource ...
11929 */
11930 static int
11931 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11932 {
11933 struct wm_softc *sc = device_private(dev);
11934 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11935 int rv;
11936
11937 rv = sc->phy.acquire(sc);
11938 if (rv != 0) {
11939 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11940 return rv;
11941 }
11942
11943 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11944 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11945 || (reg == 31)) ? 1 : phy;
11946 /* Page 800 works differently than the rest so it has its own func */
11947 if (page == BM_WUC_PAGE) {
11948 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11949 goto release;
11950 }
11951
11952 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11953 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11954 && (sc->sc_type != WM_T_82583))
11955 rv = wm_gmii_mdic_writereg(dev, phy,
11956 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11957 else
11958 rv = wm_gmii_mdic_writereg(dev, phy,
11959 BME1000_PHY_PAGE_SELECT, page);
11960 if (rv != 0)
11961 goto release;
11962 }
11963
11964 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11965
11966 release:
11967 sc->phy.release(sc);
11968 return rv;
11969 }
11970
11971 /*
11972 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11973 * @dev: pointer to the HW structure
11974 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11975 *
11976 * Assumes semaphore already acquired and phy_reg points to a valid memory
11977 * address to store contents of the BM_WUC_ENABLE_REG register.
11978 */
11979 static int
11980 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11981 {
11982 #ifdef WM_DEBUG
11983 struct wm_softc *sc = device_private(dev);
11984 #endif
11985 uint16_t temp;
11986 int rv;
11987
11988 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11989 device_xname(dev), __func__));
11990
11991 if (!phy_regp)
11992 return -1;
11993
11994 /* All page select, port ctrl and wakeup registers use phy address 1 */
11995
11996 /* Select Port Control Registers page */
11997 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11998 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11999 if (rv != 0)
12000 return rv;
12001
12002 /* Read WUCE and save it */
12003 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12004 if (rv != 0)
12005 return rv;
12006
12007 /* Enable both PHY wakeup mode and Wakeup register page writes.
12008 * Prevent a power state change by disabling ME and Host PHY wakeup.
12009 */
12010 temp = *phy_regp;
12011 temp |= BM_WUC_ENABLE_BIT;
12012 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12013
12014 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12015 return rv;
12016
12017 /* Select Host Wakeup Registers page - caller now able to write
12018 * registers on the Wakeup registers page
12019 */
12020 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12021 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12022 }
12023
12024 /*
12025 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12026 * @dev: pointer to the HW structure
12027 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12028 *
12029 * Restore BM_WUC_ENABLE_REG to its original value.
12030 *
12031 * Assumes semaphore already acquired and *phy_reg is the contents of the
12032 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12033 * caller.
12034 */
12035 static int
12036 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12037 {
12038 #ifdef WM_DEBUG
12039 struct wm_softc *sc = device_private(dev);
12040 #endif
12041
12042 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12043 device_xname(dev), __func__));
12044
12045 if (!phy_regp)
12046 return -1;
12047
12048 /* Select Port Control Registers page */
12049 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12050 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12051
12052 /* Restore 769.17 to its original value */
12053 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12054
12055 return 0;
12056 }
12057
12058 /*
12059 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12060 * @sc: pointer to the HW structure
12061 * @offset: register offset to be read or written
12062 * @val: pointer to the data to read or write
12063 * @rd: determines if operation is read or write
12064 * @page_set: BM_WUC_PAGE already set and access enabled
12065 *
12066 * Read the PHY register at offset and store the retrieved information in
12067 * data, or write data to PHY register at offset. Note the procedure to
12068 * access the PHY wakeup registers is different than reading the other PHY
12069 * registers. It works as such:
12070 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12071 * 2) Set page to 800 for host (801 if we were manageability)
12072 * 3) Write the address using the address opcode (0x11)
12073 * 4) Read or write the data using the data opcode (0x12)
12074 * 5) Restore 769.17.2 to its original value
12075 *
12076 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12077 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12078 *
12079 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
12080 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12081 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12082 */
12083 static int
12084 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12085 bool page_set)
12086 {
12087 struct wm_softc *sc = device_private(dev);
12088 uint16_t regnum = BM_PHY_REG_NUM(offset);
12089 uint16_t page = BM_PHY_REG_PAGE(offset);
12090 uint16_t wuce;
12091 int rv = 0;
12092
12093 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12094 device_xname(dev), __func__));
12095 /* XXX Gig must be disabled for MDIO accesses to page 800 */
12096 if ((sc->sc_type == WM_T_PCH)
12097 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12098 device_printf(dev,
12099 "Attempting to access page %d while gig enabled.\n", page);
12100 }
12101
12102 if (!page_set) {
12103 /* Enable access to PHY wakeup registers */
12104 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12105 if (rv != 0) {
12106 device_printf(dev,
12107 "%s: Could not enable PHY wakeup reg access\n",
12108 __func__);
12109 return rv;
12110 }
12111 }
12112 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12113 device_xname(sc->sc_dev), __func__, page, regnum));
12114
12115 /*
12116 * 2) Access PHY wakeup register.
12117 * See wm_access_phy_wakeup_reg_bm.
12118 */
12119
12120 /* Write the Wakeup register page offset value using opcode 0x11 */
12121 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12122 if (rv != 0)
12123 return rv;
12124
12125 if (rd) {
12126 /* Read the Wakeup register page value using opcode 0x12 */
12127 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12128 } else {
12129 /* Write the Wakeup register page value using opcode 0x12 */
12130 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12131 }
12132 if (rv != 0)
12133 return rv;
12134
12135 if (!page_set)
12136 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12137
12138 return rv;
12139 }
12140
12141 /*
12142 * wm_gmii_hv_readreg: [mii interface function]
12143 *
12144 * Read a PHY register on the kumeran
12145 * This could be handled by the PHY layer if we didn't have to lock the
12146 * resource ...
12147 */
12148 static int
12149 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12150 {
12151 struct wm_softc *sc = device_private(dev);
12152 int rv;
12153
12154 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12155 device_xname(dev), __func__));
12156
12157 rv = sc->phy.acquire(sc);
12158 if (rv != 0) {
12159 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12160 return rv;
12161 }
12162
12163 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12164 sc->phy.release(sc);
12165 return rv;
12166 }
12167
12168 static int
12169 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12170 {
12171 uint16_t page = BM_PHY_REG_PAGE(reg);
12172 uint16_t regnum = BM_PHY_REG_NUM(reg);
12173 int rv;
12174
12175 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12176
12177 /* Page 800 works differently than the rest so it has its own func */
12178 if (page == BM_WUC_PAGE)
12179 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12180
12181 /*
12182 * Lower than page 768 works differently than the rest so it has its
12183 * own func
12184 */
12185 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12186 device_printf(dev, "gmii_hv_readreg!!!\n");
12187 return -1;
12188 }
12189
12190 /*
12191 * XXX I21[789] documents say that the SMBus Address register is at
12192 * PHY address 01, Page 0 (not 768), Register 26.
12193 */
12194 if (page == HV_INTC_FC_PAGE_START)
12195 page = 0;
12196
12197 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12198 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12199 page << BME1000_PAGE_SHIFT);
12200 if (rv != 0)
12201 return rv;
12202 }
12203
12204 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12205 }
12206
12207 /*
12208 * wm_gmii_hv_writereg: [mii interface function]
12209 *
12210 * Write a PHY register on the kumeran.
12211 * This could be handled by the PHY layer if we didn't have to lock the
12212 * resource ...
12213 */
12214 static int
12215 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12216 {
12217 struct wm_softc *sc = device_private(dev);
12218 int rv;
12219
12220 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12221 device_xname(dev), __func__));
12222
12223 rv = sc->phy.acquire(sc);
12224 if (rv != 0) {
12225 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12226 return rv;
12227 }
12228
12229 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12230 sc->phy.release(sc);
12231
12232 return rv;
12233 }
12234
12235 static int
12236 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12237 {
12238 struct wm_softc *sc = device_private(dev);
12239 uint16_t page = BM_PHY_REG_PAGE(reg);
12240 uint16_t regnum = BM_PHY_REG_NUM(reg);
12241 int rv;
12242
12243 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12244
12245 /* Page 800 works differently than the rest so it has its own func */
12246 if (page == BM_WUC_PAGE)
12247 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12248 false);
12249
12250 /*
12251 * Lower than page 768 works differently than the rest so it has its
12252 * own func
12253 */
12254 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12255 device_printf(dev, "gmii_hv_writereg!!!\n");
12256 return -1;
12257 }
12258
12259 {
12260 /*
12261 * XXX I21[789] documents say that the SMBus Address register
12262 * is at PHY address 01, Page 0 (not 768), Register 26.
12263 */
12264 if (page == HV_INTC_FC_PAGE_START)
12265 page = 0;
12266
12267 /*
12268 * XXX Workaround MDIO accesses being disabled after entering
12269 * IEEE Power Down (whenever bit 11 of the PHY control
12270 * register is set)
12271 */
12272 if (sc->sc_phytype == WMPHY_82578) {
12273 struct mii_softc *child;
12274
12275 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12276 if ((child != NULL) && (child->mii_mpd_rev >= 1)
12277 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12278 && ((val & (1 << 11)) != 0)) {
12279 device_printf(dev, "XXX need workaround\n");
12280 }
12281 }
12282
12283 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12284 rv = wm_gmii_mdic_writereg(dev, 1,
12285 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12286 if (rv != 0)
12287 return rv;
12288 }
12289 }
12290
12291 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12292 }
12293
12294 /*
12295 * wm_gmii_82580_readreg: [mii interface function]
12296 *
12297 * Read a PHY register on the 82580 and I350.
12298 * This could be handled by the PHY layer if we didn't have to lock the
12299 * resource ...
12300 */
12301 static int
12302 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12303 {
12304 struct wm_softc *sc = device_private(dev);
12305 int rv;
12306
12307 rv = sc->phy.acquire(sc);
12308 if (rv != 0) {
12309 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12310 return rv;
12311 }
12312
12313 #ifdef DIAGNOSTIC
12314 if (reg > MII_ADDRMASK) {
12315 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12316 __func__, sc->sc_phytype, reg);
12317 reg &= MII_ADDRMASK;
12318 }
12319 #endif
12320 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12321
12322 sc->phy.release(sc);
12323 return rv;
12324 }
12325
12326 /*
12327 * wm_gmii_82580_writereg: [mii interface function]
12328 *
12329 * Write a PHY register on the 82580 and I350.
12330 * This could be handled by the PHY layer if we didn't have to lock the
12331 * resource ...
12332 */
12333 static int
12334 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12335 {
12336 struct wm_softc *sc = device_private(dev);
12337 int rv;
12338
12339 rv = sc->phy.acquire(sc);
12340 if (rv != 0) {
12341 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12342 return rv;
12343 }
12344
12345 #ifdef DIAGNOSTIC
12346 if (reg > MII_ADDRMASK) {
12347 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12348 __func__, sc->sc_phytype, reg);
12349 reg &= MII_ADDRMASK;
12350 }
12351 #endif
12352 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12353
12354 sc->phy.release(sc);
12355 return rv;
12356 }
12357
12358 /*
12359 * wm_gmii_gs40g_readreg: [mii interface function]
12360 *
12361 * Read a PHY register on the I2100 and I211.
12362 * This could be handled by the PHY layer if we didn't have to lock the
12363 * resource ...
12364 */
12365 static int
12366 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12367 {
12368 struct wm_softc *sc = device_private(dev);
12369 int page, offset;
12370 int rv;
12371
12372 /* Acquire semaphore */
12373 rv = sc->phy.acquire(sc);
12374 if (rv != 0) {
12375 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12376 return rv;
12377 }
12378
12379 /* Page select */
12380 page = reg >> GS40G_PAGE_SHIFT;
12381 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12382 if (rv != 0)
12383 goto release;
12384
12385 /* Read reg */
12386 offset = reg & GS40G_OFFSET_MASK;
12387 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12388
12389 release:
12390 sc->phy.release(sc);
12391 return rv;
12392 }
12393
12394 /*
12395 * wm_gmii_gs40g_writereg: [mii interface function]
12396 *
12397 * Write a PHY register on the I210 and I211.
12398 * This could be handled by the PHY layer if we didn't have to lock the
12399 * resource ...
12400 */
12401 static int
12402 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
12403 {
12404 struct wm_softc *sc = device_private(dev);
12405 uint16_t page;
12406 int offset, rv;
12407
12408 /* Acquire semaphore */
12409 rv = sc->phy.acquire(sc);
12410 if (rv != 0) {
12411 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12412 return rv;
12413 }
12414
12415 /* Page select */
12416 page = reg >> GS40G_PAGE_SHIFT;
12417 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12418 if (rv != 0)
12419 goto release;
12420
12421 /* Write reg */
12422 offset = reg & GS40G_OFFSET_MASK;
12423 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
12424
12425 release:
12426 /* Release semaphore */
12427 sc->phy.release(sc);
12428 return rv;
12429 }
12430
12431 /*
12432 * wm_gmii_statchg: [mii interface function]
12433 *
12434 * Callback from MII layer when media changes.
12435 */
12436 static void
12437 wm_gmii_statchg(struct ifnet *ifp)
12438 {
12439 struct wm_softc *sc = ifp->if_softc;
12440 struct mii_data *mii = &sc->sc_mii;
12441
12442 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12443 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12444 sc->sc_fcrtl &= ~FCRTL_XONE;
12445
12446 /* Get flow control negotiation result. */
12447 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12448 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12449 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12450 mii->mii_media_active &= ~IFM_ETH_FMASK;
12451 }
12452
12453 if (sc->sc_flowflags & IFM_FLOW) {
12454 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12455 sc->sc_ctrl |= CTRL_TFCE;
12456 sc->sc_fcrtl |= FCRTL_XONE;
12457 }
12458 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12459 sc->sc_ctrl |= CTRL_RFCE;
12460 }
12461
12462 if (mii->mii_media_active & IFM_FDX) {
12463 DPRINTF(sc, WM_DEBUG_LINK,
12464 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12465 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12466 } else {
12467 DPRINTF(sc, WM_DEBUG_LINK,
12468 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12469 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12470 }
12471
12472 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12473 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12474 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12475 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12476 if (sc->sc_type == WM_T_80003) {
12477 switch (IFM_SUBTYPE(mii->mii_media_active)) {
12478 case IFM_1000_T:
12479 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12480 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12481 sc->sc_tipg = TIPG_1000T_80003_DFLT;
12482 break;
12483 default:
12484 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12485 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12486 sc->sc_tipg = TIPG_10_100_80003_DFLT;
12487 break;
12488 }
12489 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12490 }
12491 }
12492
12493 /* kumeran related (80003, ICH* and PCH*) */
12494
12495 /*
12496 * wm_kmrn_readreg:
12497 *
12498 * Read a kumeran register
12499 */
12500 static int
12501 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12502 {
12503 int rv;
12504
12505 if (sc->sc_type == WM_T_80003)
12506 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12507 else
12508 rv = sc->phy.acquire(sc);
12509 if (rv != 0) {
12510 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12511 __func__);
12512 return rv;
12513 }
12514
12515 rv = wm_kmrn_readreg_locked(sc, reg, val);
12516
12517 if (sc->sc_type == WM_T_80003)
12518 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12519 else
12520 sc->phy.release(sc);
12521
12522 return rv;
12523 }
12524
12525 static int
12526 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12527 {
12528
12529 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12530 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12531 KUMCTRLSTA_REN);
12532 CSR_WRITE_FLUSH(sc);
12533 delay(2);
12534
12535 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12536
12537 return 0;
12538 }
12539
12540 /*
12541 * wm_kmrn_writereg:
12542 *
12543 * Write a kumeran register
12544 */
12545 static int
12546 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12547 {
12548 int rv;
12549
12550 if (sc->sc_type == WM_T_80003)
12551 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12552 else
12553 rv = sc->phy.acquire(sc);
12554 if (rv != 0) {
12555 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12556 __func__);
12557 return rv;
12558 }
12559
12560 rv = wm_kmrn_writereg_locked(sc, reg, val);
12561
12562 if (sc->sc_type == WM_T_80003)
12563 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12564 else
12565 sc->phy.release(sc);
12566
12567 return rv;
12568 }
12569
12570 static int
12571 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12572 {
12573
12574 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12575 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12576
12577 return 0;
12578 }
12579
12580 /*
12581 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12582 * This access method is different from IEEE MMD.
12583 */
12584 static int
12585 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12586 {
12587 struct wm_softc *sc = device_private(dev);
12588 int rv;
12589
12590 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12591 if (rv != 0)
12592 return rv;
12593
12594 if (rd)
12595 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12596 else
12597 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12598 return rv;
12599 }
12600
12601 static int
12602 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12603 {
12604
12605 return wm_access_emi_reg_locked(dev, reg, val, true);
12606 }
12607
12608 static int
12609 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12610 {
12611
12612 return wm_access_emi_reg_locked(dev, reg, &val, false);
12613 }
12614
12615 /* SGMII related */
12616
12617 /*
12618 * wm_sgmii_uses_mdio
12619 *
12620 * Check whether the transaction is to the internal PHY or the external
12621 * MDIO interface. Return true if it's MDIO.
12622 */
12623 static bool
12624 wm_sgmii_uses_mdio(struct wm_softc *sc)
12625 {
12626 uint32_t reg;
12627 bool ismdio = false;
12628
12629 switch (sc->sc_type) {
12630 case WM_T_82575:
12631 case WM_T_82576:
12632 reg = CSR_READ(sc, WMREG_MDIC);
12633 ismdio = ((reg & MDIC_DEST) != 0);
12634 break;
12635 case WM_T_82580:
12636 case WM_T_I350:
12637 case WM_T_I354:
12638 case WM_T_I210:
12639 case WM_T_I211:
12640 reg = CSR_READ(sc, WMREG_MDICNFG);
12641 ismdio = ((reg & MDICNFG_DEST) != 0);
12642 break;
12643 default:
12644 break;
12645 }
12646
12647 return ismdio;
12648 }
12649
12650 /* Setup internal SGMII PHY for SFP */
12651 static void
12652 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12653 {
12654 uint16_t id1, id2, phyreg;
12655 int i, rv;
12656
12657 if (((sc->sc_flags & WM_F_SGMII) == 0)
12658 || ((sc->sc_flags & WM_F_SFP) == 0))
12659 return;
12660
12661 for (i = 0; i < MII_NPHY; i++) {
12662 sc->phy.no_errprint = true;
12663 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12664 if (rv != 0)
12665 continue;
12666 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12667 if (rv != 0)
12668 continue;
12669 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12670 continue;
12671 sc->phy.no_errprint = false;
12672
12673 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12674 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12675 phyreg |= ESSR_SGMII_WOC_COPPER;
12676 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12677 break;
12678 }
12679
12680 }
12681
12682 /*
12683 * wm_sgmii_readreg: [mii interface function]
12684 *
12685 * Read a PHY register on the SGMII
12686 * This could be handled by the PHY layer if we didn't have to lock the
12687 * resource ...
12688 */
12689 static int
12690 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12691 {
12692 struct wm_softc *sc = device_private(dev);
12693 int rv;
12694
12695 rv = sc->phy.acquire(sc);
12696 if (rv != 0) {
12697 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12698 return rv;
12699 }
12700
12701 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12702
12703 sc->phy.release(sc);
12704 return rv;
12705 }
12706
12707 static int
12708 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12709 {
12710 struct wm_softc *sc = device_private(dev);
12711 uint32_t i2ccmd;
12712 int i, rv = 0;
12713
12714 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12715 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12716 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12717
12718 /* Poll the ready bit */
12719 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12720 delay(50);
12721 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12722 if (i2ccmd & I2CCMD_READY)
12723 break;
12724 }
12725 if ((i2ccmd & I2CCMD_READY) == 0) {
12726 device_printf(dev, "I2CCMD Read did not complete\n");
12727 rv = ETIMEDOUT;
12728 }
12729 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12730 if (!sc->phy.no_errprint)
12731 device_printf(dev, "I2CCMD Error bit set\n");
12732 rv = EIO;
12733 }
12734
12735 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12736
12737 return rv;
12738 }
12739
12740 /*
12741 * wm_sgmii_writereg: [mii interface function]
12742 *
12743 * Write a PHY register on the SGMII.
12744 * This could be handled by the PHY layer if we didn't have to lock the
12745 * resource ...
12746 */
12747 static int
12748 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12749 {
12750 struct wm_softc *sc = device_private(dev);
12751 int rv;
12752
12753 rv = sc->phy.acquire(sc);
12754 if (rv != 0) {
12755 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12756 return rv;
12757 }
12758
12759 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12760
12761 sc->phy.release(sc);
12762
12763 return rv;
12764 }
12765
12766 static int
12767 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12768 {
12769 struct wm_softc *sc = device_private(dev);
12770 uint32_t i2ccmd;
12771 uint16_t swapdata;
12772 int rv = 0;
12773 int i;
12774
12775 /* Swap the data bytes for the I2C interface */
12776 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12777 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12778 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12779 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12780
12781 /* Poll the ready bit */
12782 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12783 delay(50);
12784 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12785 if (i2ccmd & I2CCMD_READY)
12786 break;
12787 }
12788 if ((i2ccmd & I2CCMD_READY) == 0) {
12789 device_printf(dev, "I2CCMD Write did not complete\n");
12790 rv = ETIMEDOUT;
12791 }
12792 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12793 device_printf(dev, "I2CCMD Error bit set\n");
12794 rv = EIO;
12795 }
12796
12797 return rv;
12798 }
12799
12800 /* TBI related */
12801
12802 static bool
12803 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12804 {
12805 bool sig;
12806
12807 sig = ctrl & CTRL_SWDPIN(1);
12808
12809 /*
12810 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12811 * detect a signal, 1 if they don't.
12812 */
12813 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12814 sig = !sig;
12815
12816 return sig;
12817 }
12818
12819 /*
12820 * wm_tbi_mediainit:
12821 *
12822 * Initialize media for use on 1000BASE-X devices.
12823 */
12824 static void
12825 wm_tbi_mediainit(struct wm_softc *sc)
12826 {
12827 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12828 const char *sep = "";
12829
12830 if (sc->sc_type < WM_T_82543)
12831 sc->sc_tipg = TIPG_WM_DFLT;
12832 else
12833 sc->sc_tipg = TIPG_LG_DFLT;
12834
12835 sc->sc_tbi_serdes_anegticks = 5;
12836
12837 /* Initialize our media structures */
12838 sc->sc_mii.mii_ifp = ifp;
12839 sc->sc_ethercom.ec_mii = &sc->sc_mii;
12840
12841 ifp->if_baudrate = IF_Gbps(1);
12842 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12843 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12844 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12845 wm_serdes_mediachange, wm_serdes_mediastatus,
12846 sc->sc_core_lock);
12847 } else {
12848 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12849 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12850 }
12851
12852 /*
12853 * SWD Pins:
12854 *
12855 * 0 = Link LED (output)
12856 * 1 = Loss Of Signal (input)
12857 */
12858 sc->sc_ctrl |= CTRL_SWDPIO(0);
12859
12860 /* XXX Perhaps this is only for TBI */
12861 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12862 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12863
12864 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12865 sc->sc_ctrl &= ~CTRL_LRST;
12866
12867 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12868
12869 #define ADD(ss, mm, dd) \
12870 do { \
12871 aprint_normal("%s%s", sep, ss); \
12872 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12873 sep = ", "; \
12874 } while (/*CONSTCOND*/0)
12875
12876 aprint_normal_dev(sc->sc_dev, "");
12877
12878 if (sc->sc_type == WM_T_I354) {
12879 uint32_t status;
12880
12881 status = CSR_READ(sc, WMREG_STATUS);
12882 if (((status & STATUS_2P5_SKU) != 0)
12883 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12884 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12885 } else
12886 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12887 } else if (sc->sc_type == WM_T_82545) {
12888 /* Only 82545 is LX (XXX except SFP) */
12889 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12890 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12891 } else if (sc->sc_sfptype != 0) {
12892 /* XXX wm(4) fiber/serdes don't use ifm_data */
12893 switch (sc->sc_sfptype) {
12894 default:
12895 case SFF_SFP_ETH_FLAGS_1000SX:
12896 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12897 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12898 break;
12899 case SFF_SFP_ETH_FLAGS_1000LX:
12900 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12901 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12902 break;
12903 case SFF_SFP_ETH_FLAGS_1000CX:
12904 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12905 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12906 break;
12907 case SFF_SFP_ETH_FLAGS_1000T:
12908 ADD("1000baseT", IFM_1000_T, 0);
12909 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12910 break;
12911 case SFF_SFP_ETH_FLAGS_100FX:
12912 ADD("100baseFX", IFM_100_FX, ANAR_TX);
12913 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12914 break;
12915 }
12916 } else {
12917 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12918 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12919 }
12920 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12921 aprint_normal("\n");
12922
12923 #undef ADD
12924
12925 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12926 }
12927
12928 /*
12929 * wm_tbi_mediachange: [ifmedia interface function]
12930 *
12931 * Set hardware to newly-selected media on a 1000BASE-X device.
12932 */
12933 static int
12934 wm_tbi_mediachange(struct ifnet *ifp)
12935 {
12936 struct wm_softc *sc = ifp->if_softc;
12937 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12938 uint32_t status, ctrl;
12939 bool signal;
12940 int i;
12941
12942 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12943 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12944 /* XXX need some work for >= 82571 and < 82575 */
12945 if (sc->sc_type < WM_T_82575)
12946 return 0;
12947 }
12948
12949 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12950 || (sc->sc_type >= WM_T_82575))
12951 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12952
12953 sc->sc_ctrl &= ~CTRL_LRST;
12954 sc->sc_txcw = TXCW_ANE;
12955 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12956 sc->sc_txcw |= TXCW_FD | TXCW_HD;
12957 else if (ife->ifm_media & IFM_FDX)
12958 sc->sc_txcw |= TXCW_FD;
12959 else
12960 sc->sc_txcw |= TXCW_HD;
12961
12962 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12963 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12964
12965 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12966 device_xname(sc->sc_dev), sc->sc_txcw));
12967 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12968 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12969 CSR_WRITE_FLUSH(sc);
12970 delay(1000);
12971
12972 ctrl = CSR_READ(sc, WMREG_CTRL);
12973 signal = wm_tbi_havesignal(sc, ctrl);
12974
12975 DPRINTF(sc, WM_DEBUG_LINK,
12976 ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
12977
12978 if (signal) {
12979 /* Have signal; wait for the link to come up. */
12980 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12981 delay(10000);
12982 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12983 break;
12984 }
12985
12986 DPRINTF(sc, WM_DEBUG_LINK,
12987 ("%s: i = %d after waiting for link\n",
12988 device_xname(sc->sc_dev), i));
12989
12990 status = CSR_READ(sc, WMREG_STATUS);
12991 DPRINTF(sc, WM_DEBUG_LINK,
12992 ("%s: status after final read = 0x%x, STATUS_LU = %#"
12993 __PRIxBIT "\n",
12994 device_xname(sc->sc_dev), status, STATUS_LU));
12995 if (status & STATUS_LU) {
12996 /* Link is up. */
12997 DPRINTF(sc, WM_DEBUG_LINK,
12998 ("%s: LINK: set media -> link up %s\n",
12999 device_xname(sc->sc_dev),
13000 (status & STATUS_FD) ? "FDX" : "HDX"));
13001
13002 /*
13003 * NOTE: CTRL will update TFCE and RFCE automatically,
13004 * so we should update sc->sc_ctrl
13005 */
13006 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13007 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13008 sc->sc_fcrtl &= ~FCRTL_XONE;
13009 if (status & STATUS_FD)
13010 sc->sc_tctl |=
13011 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13012 else
13013 sc->sc_tctl |=
13014 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13015 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13016 sc->sc_fcrtl |= FCRTL_XONE;
13017 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13018 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13019 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13020 sc->sc_tbi_linkup = 1;
13021 } else {
13022 if (i == WM_LINKUP_TIMEOUT)
13023 wm_check_for_link(sc);
13024 /* Link is down. */
13025 DPRINTF(sc, WM_DEBUG_LINK,
13026 ("%s: LINK: set media -> link down\n",
13027 device_xname(sc->sc_dev)));
13028 sc->sc_tbi_linkup = 0;
13029 }
13030 } else {
13031 DPRINTF(sc, WM_DEBUG_LINK,
13032 ("%s: LINK: set media -> no signal\n",
13033 device_xname(sc->sc_dev)));
13034 sc->sc_tbi_linkup = 0;
13035 }
13036
13037 wm_tbi_serdes_set_linkled(sc);
13038
13039 return 0;
13040 }
13041
13042 /*
13043 * wm_tbi_mediastatus: [ifmedia interface function]
13044 *
13045 * Get the current interface media status on a 1000BASE-X device.
13046 */
13047 static void
13048 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13049 {
13050 struct wm_softc *sc = ifp->if_softc;
13051 uint32_t ctrl, status;
13052
13053 ifmr->ifm_status = IFM_AVALID;
13054 ifmr->ifm_active = IFM_ETHER;
13055
13056 status = CSR_READ(sc, WMREG_STATUS);
13057 if ((status & STATUS_LU) == 0) {
13058 ifmr->ifm_active |= IFM_NONE;
13059 return;
13060 }
13061
13062 ifmr->ifm_status |= IFM_ACTIVE;
13063 /* Only 82545 is LX */
13064 if (sc->sc_type == WM_T_82545)
13065 ifmr->ifm_active |= IFM_1000_LX;
13066 else
13067 ifmr->ifm_active |= IFM_1000_SX;
13068 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13069 ifmr->ifm_active |= IFM_FDX;
13070 else
13071 ifmr->ifm_active |= IFM_HDX;
13072 ctrl = CSR_READ(sc, WMREG_CTRL);
13073 if (ctrl & CTRL_RFCE)
13074 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13075 if (ctrl & CTRL_TFCE)
13076 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13077 }
13078
13079 /* XXX TBI only */
13080 static int
13081 wm_check_for_link(struct wm_softc *sc)
13082 {
13083 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13084 uint32_t rxcw;
13085 uint32_t ctrl;
13086 uint32_t status;
13087 bool signal;
13088
13089 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13090 device_xname(sc->sc_dev), __func__));
13091
13092 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13093 /* XXX need some work for >= 82571 */
13094 if (sc->sc_type >= WM_T_82571) {
13095 sc->sc_tbi_linkup = 1;
13096 return 0;
13097 }
13098 }
13099
13100 rxcw = CSR_READ(sc, WMREG_RXCW);
13101 ctrl = CSR_READ(sc, WMREG_CTRL);
13102 status = CSR_READ(sc, WMREG_STATUS);
13103 signal = wm_tbi_havesignal(sc, ctrl);
13104
13105 DPRINTF(sc, WM_DEBUG_LINK,
13106 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13107 device_xname(sc->sc_dev), __func__, signal,
13108 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13109
13110 /*
13111 * SWDPIN LU RXCW
13112 * 0 0 0
13113 * 0 0 1 (should not happen)
13114 * 0 1 0 (should not happen)
13115 * 0 1 1 (should not happen)
13116 * 1 0 0 Disable autonego and force linkup
13117 * 1 0 1 got /C/ but not linkup yet
13118 * 1 1 0 (linkup)
13119 * 1 1 1 If IFM_AUTO, back to autonego
13120 *
13121 */
13122 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13123 DPRINTF(sc, WM_DEBUG_LINK,
13124 ("%s: %s: force linkup and fullduplex\n",
13125 device_xname(sc->sc_dev), __func__));
13126 sc->sc_tbi_linkup = 0;
13127 /* Disable auto-negotiation in the TXCW register */
13128 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13129
13130 /*
13131 * Force link-up and also force full-duplex.
13132 *
13133 * NOTE: CTRL was updated TFCE and RFCE automatically,
13134 * so we should update sc->sc_ctrl
13135 */
13136 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13137 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13138 } else if (((status & STATUS_LU) != 0)
13139 && ((rxcw & RXCW_C) != 0)
13140 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13141 sc->sc_tbi_linkup = 1;
13142 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13143 device_xname(sc->sc_dev), __func__));
13144 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13145 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13146 } else if (signal && ((rxcw & RXCW_C) != 0)) {
13147 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13148 device_xname(sc->sc_dev), __func__));
13149 } else {
13150 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13151 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13152 status));
13153 }
13154
13155 return 0;
13156 }
13157
13158 /*
13159 * wm_tbi_tick:
13160 *
13161 * Check the link on TBI devices.
13162 * This function acts as mii_tick().
13163 */
13164 static void
13165 wm_tbi_tick(struct wm_softc *sc)
13166 {
13167 struct mii_data *mii = &sc->sc_mii;
13168 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13169 uint32_t status;
13170
13171 KASSERT(mutex_owned(sc->sc_core_lock));
13172
13173 status = CSR_READ(sc, WMREG_STATUS);
13174
13175 /* XXX is this needed? */
13176 (void)CSR_READ(sc, WMREG_RXCW);
13177 (void)CSR_READ(sc, WMREG_CTRL);
13178
13179 /* set link status */
13180 if ((status & STATUS_LU) == 0) {
13181 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13182 device_xname(sc->sc_dev)));
13183 sc->sc_tbi_linkup = 0;
13184 } else if (sc->sc_tbi_linkup == 0) {
13185 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13186 device_xname(sc->sc_dev),
13187 (status & STATUS_FD) ? "FDX" : "HDX"));
13188 sc->sc_tbi_linkup = 1;
13189 sc->sc_tbi_serdes_ticks = 0;
13190 }
13191
13192 if ((sc->sc_if_flags & IFF_UP) == 0)
13193 goto setled;
13194
13195 if ((status & STATUS_LU) == 0) {
13196 sc->sc_tbi_linkup = 0;
13197 /* If the timer expired, retry autonegotiation */
13198 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13199 && (++sc->sc_tbi_serdes_ticks
13200 >= sc->sc_tbi_serdes_anegticks)) {
13201 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13202 device_xname(sc->sc_dev), __func__));
13203 sc->sc_tbi_serdes_ticks = 0;
13204 /*
13205 * Reset the link, and let autonegotiation do
13206 * its thing
13207 */
13208 sc->sc_ctrl |= CTRL_LRST;
13209 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13210 CSR_WRITE_FLUSH(sc);
13211 delay(1000);
13212 sc->sc_ctrl &= ~CTRL_LRST;
13213 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13214 CSR_WRITE_FLUSH(sc);
13215 delay(1000);
13216 CSR_WRITE(sc, WMREG_TXCW,
13217 sc->sc_txcw & ~TXCW_ANE);
13218 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13219 }
13220 }
13221
13222 setled:
13223 wm_tbi_serdes_set_linkled(sc);
13224 }
13225
13226 /* SERDES related */
13227 static void
13228 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13229 {
13230 uint32_t reg;
13231
13232 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13233 && ((sc->sc_flags & WM_F_SGMII) == 0))
13234 return;
13235
13236 /* Enable PCS to turn on link */
13237 reg = CSR_READ(sc, WMREG_PCS_CFG);
13238 reg |= PCS_CFG_PCS_EN;
13239 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13240
13241 /* Power up the laser */
13242 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13243 reg &= ~CTRL_EXT_SWDPIN(3);
13244 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13245
13246 /* Flush the write to verify completion */
13247 CSR_WRITE_FLUSH(sc);
13248 delay(1000);
13249 }
13250
13251 static int
13252 wm_serdes_mediachange(struct ifnet *ifp)
13253 {
13254 struct wm_softc *sc = ifp->if_softc;
13255 bool pcs_autoneg = true; /* XXX */
13256 uint32_t ctrl_ext, pcs_lctl, reg;
13257
13258 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13259 && ((sc->sc_flags & WM_F_SGMII) == 0))
13260 return 0;
13261
13262 /* XXX Currently, this function is not called on 8257[12] */
13263 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13264 || (sc->sc_type >= WM_T_82575))
13265 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13266
13267 /* Power on the sfp cage if present */
13268 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13269 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13270 ctrl_ext |= CTRL_EXT_I2C_ENA;
13271 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13272
13273 sc->sc_ctrl |= CTRL_SLU;
13274
13275 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13276 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13277
13278 reg = CSR_READ(sc, WMREG_CONNSW);
13279 reg |= CONNSW_ENRGSRC;
13280 CSR_WRITE(sc, WMREG_CONNSW, reg);
13281 }
13282
13283 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13284 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13285 case CTRL_EXT_LINK_MODE_SGMII:
13286 /* SGMII mode lets the phy handle forcing speed/duplex */
13287 pcs_autoneg = true;
13288 /* Autoneg time out should be disabled for SGMII mode */
13289 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13290 break;
13291 case CTRL_EXT_LINK_MODE_1000KX:
13292 pcs_autoneg = false;
13293 /* FALLTHROUGH */
13294 default:
13295 if ((sc->sc_type == WM_T_82575)
13296 || (sc->sc_type == WM_T_82576)) {
13297 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13298 pcs_autoneg = false;
13299 }
13300 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13301 | CTRL_FRCFDX;
13302
13303 /* Set speed of 1000/Full if speed/duplex is forced */
13304 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13305 }
13306 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13307
13308 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13309 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13310
13311 if (pcs_autoneg) {
13312 /* Set PCS register for autoneg */
13313 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13314
13315 /* Disable force flow control for autoneg */
13316 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13317
13318 /* Configure flow control advertisement for autoneg */
13319 reg = CSR_READ(sc, WMREG_PCS_ANADV);
13320 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13321 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13322 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13323 } else
13324 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13325
13326 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13327
13328 return 0;
13329 }
13330
13331 static void
13332 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13333 {
13334 struct wm_softc *sc = ifp->if_softc;
13335 struct mii_data *mii = &sc->sc_mii;
13336 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13337 uint32_t pcs_adv, pcs_lpab, reg;
13338
13339 ifmr->ifm_status = IFM_AVALID;
13340 ifmr->ifm_active = IFM_ETHER;
13341
13342 /* Check PCS */
13343 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13344 if ((reg & PCS_LSTS_LINKOK) == 0) {
13345 ifmr->ifm_active |= IFM_NONE;
13346 sc->sc_tbi_linkup = 0;
13347 goto setled;
13348 }
13349
13350 sc->sc_tbi_linkup = 1;
13351 ifmr->ifm_status |= IFM_ACTIVE;
13352 if (sc->sc_type == WM_T_I354) {
13353 uint32_t status;
13354
13355 status = CSR_READ(sc, WMREG_STATUS);
13356 if (((status & STATUS_2P5_SKU) != 0)
13357 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13358 ifmr->ifm_active |= IFM_2500_KX;
13359 } else
13360 ifmr->ifm_active |= IFM_1000_KX;
13361 } else {
13362 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13363 case PCS_LSTS_SPEED_10:
13364 ifmr->ifm_active |= IFM_10_T; /* XXX */
13365 break;
13366 case PCS_LSTS_SPEED_100:
13367 ifmr->ifm_active |= IFM_100_FX; /* XXX */
13368 break;
13369 case PCS_LSTS_SPEED_1000:
13370 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13371 break;
13372 default:
13373 device_printf(sc->sc_dev, "Unknown speed\n");
13374 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13375 break;
13376 }
13377 }
13378 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13379 if ((reg & PCS_LSTS_FDX) != 0)
13380 ifmr->ifm_active |= IFM_FDX;
13381 else
13382 ifmr->ifm_active |= IFM_HDX;
13383 mii->mii_media_active &= ~IFM_ETH_FMASK;
13384 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13385 /* Check flow */
13386 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13387 if ((reg & PCS_LSTS_AN_COMP) == 0) {
13388 DPRINTF(sc, WM_DEBUG_LINK,
13389 ("XXX LINKOK but not ACOMP\n"));
13390 goto setled;
13391 }
13392 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13393 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13394 DPRINTF(sc, WM_DEBUG_LINK,
13395 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13396 if ((pcs_adv & TXCW_SYM_PAUSE)
13397 && (pcs_lpab & TXCW_SYM_PAUSE)) {
13398 mii->mii_media_active |= IFM_FLOW
13399 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
13400 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
13401 && (pcs_adv & TXCW_ASYM_PAUSE)
13402 && (pcs_lpab & TXCW_SYM_PAUSE)
13403 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13404 mii->mii_media_active |= IFM_FLOW
13405 | IFM_ETH_TXPAUSE;
13406 } else if ((pcs_adv & TXCW_SYM_PAUSE)
13407 && (pcs_adv & TXCW_ASYM_PAUSE)
13408 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
13409 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13410 mii->mii_media_active |= IFM_FLOW
13411 | IFM_ETH_RXPAUSE;
13412 }
13413 }
13414 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
13415 | (mii->mii_media_active & IFM_ETH_FMASK);
13416 setled:
13417 wm_tbi_serdes_set_linkled(sc);
13418 }
13419
13420 /*
13421 * wm_serdes_tick:
13422 *
13423 * Check the link on serdes devices.
13424 */
13425 static void
13426 wm_serdes_tick(struct wm_softc *sc)
13427 {
13428 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13429 struct mii_data *mii = &sc->sc_mii;
13430 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13431 uint32_t reg;
13432
13433 KASSERT(mutex_owned(sc->sc_core_lock));
13434
13435 mii->mii_media_status = IFM_AVALID;
13436 mii->mii_media_active = IFM_ETHER;
13437
13438 /* Check PCS */
13439 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13440 if ((reg & PCS_LSTS_LINKOK) != 0) {
13441 mii->mii_media_status |= IFM_ACTIVE;
13442 sc->sc_tbi_linkup = 1;
13443 sc->sc_tbi_serdes_ticks = 0;
13444 mii->mii_media_active |= IFM_1000_SX; /* XXX */
13445 if ((reg & PCS_LSTS_FDX) != 0)
13446 mii->mii_media_active |= IFM_FDX;
13447 else
13448 mii->mii_media_active |= IFM_HDX;
13449 } else {
13450 mii->mii_media_status |= IFM_NONE;
13451 sc->sc_tbi_linkup = 0;
13452 /* If the timer expired, retry autonegotiation */
13453 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13454 && (++sc->sc_tbi_serdes_ticks
13455 >= sc->sc_tbi_serdes_anegticks)) {
13456 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13457 device_xname(sc->sc_dev), __func__));
13458 sc->sc_tbi_serdes_ticks = 0;
13459 /* XXX */
13460 wm_serdes_mediachange(ifp);
13461 }
13462 }
13463
13464 wm_tbi_serdes_set_linkled(sc);
13465 }
13466
13467 /* SFP related */
13468
13469 static int
13470 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13471 {
13472 uint32_t i2ccmd;
13473 int i;
13474
13475 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13476 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13477
13478 /* Poll the ready bit */
13479 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13480 delay(50);
13481 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13482 if (i2ccmd & I2CCMD_READY)
13483 break;
13484 }
13485 if ((i2ccmd & I2CCMD_READY) == 0)
13486 return -1;
13487 if ((i2ccmd & I2CCMD_ERROR) != 0)
13488 return -1;
13489
13490 *data = i2ccmd & 0x00ff;
13491
13492 return 0;
13493 }
13494
13495 static uint32_t
13496 wm_sfp_get_media_type(struct wm_softc *sc)
13497 {
13498 uint32_t ctrl_ext;
13499 uint8_t val = 0;
13500 int timeout = 3;
13501 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13502 int rv = -1;
13503
13504 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13505 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13506 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13507 CSR_WRITE_FLUSH(sc);
13508
13509 /* Read SFP module data */
13510 while (timeout) {
13511 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13512 if (rv == 0)
13513 break;
13514 delay(100*1000); /* XXX too big */
13515 timeout--;
13516 }
13517 if (rv != 0)
13518 goto out;
13519
13520 switch (val) {
13521 case SFF_SFP_ID_SFF:
13522 aprint_normal_dev(sc->sc_dev,
13523 "Module/Connector soldered to board\n");
13524 break;
13525 case SFF_SFP_ID_SFP:
13526 sc->sc_flags |= WM_F_SFP;
13527 break;
13528 case SFF_SFP_ID_UNKNOWN:
13529 goto out;
13530 default:
13531 break;
13532 }
13533
13534 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13535 if (rv != 0)
13536 goto out;
13537
13538 sc->sc_sfptype = val;
13539 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13540 mediatype = WM_MEDIATYPE_SERDES;
13541 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13542 sc->sc_flags |= WM_F_SGMII;
13543 mediatype = WM_MEDIATYPE_COPPER;
13544 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13545 sc->sc_flags |= WM_F_SGMII;
13546 mediatype = WM_MEDIATYPE_SERDES;
13547 } else {
13548 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13549 __func__, sc->sc_sfptype);
13550 sc->sc_sfptype = 0; /* XXX unknown */
13551 }
13552
13553 out:
13554 /* Restore I2C interface setting */
13555 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13556
13557 return mediatype;
13558 }
13559
13560 /*
13561 * NVM related.
13562 * Microwire, SPI (w/wo EERD) and Flash.
13563 */
13564
13565 /* Both spi and uwire */
13566
13567 /*
13568 * wm_eeprom_sendbits:
13569 *
13570 * Send a series of bits to the EEPROM.
13571 */
13572 static void
13573 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13574 {
13575 uint32_t reg;
13576 int x;
13577
13578 reg = CSR_READ(sc, WMREG_EECD);
13579
13580 for (x = nbits; x > 0; x--) {
13581 if (bits & (1U << (x - 1)))
13582 reg |= EECD_DI;
13583 else
13584 reg &= ~EECD_DI;
13585 CSR_WRITE(sc, WMREG_EECD, reg);
13586 CSR_WRITE_FLUSH(sc);
13587 delay(2);
13588 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13589 CSR_WRITE_FLUSH(sc);
13590 delay(2);
13591 CSR_WRITE(sc, WMREG_EECD, reg);
13592 CSR_WRITE_FLUSH(sc);
13593 delay(2);
13594 }
13595 }
13596
13597 /*
13598 * wm_eeprom_recvbits:
13599 *
13600 * Receive a series of bits from the EEPROM.
13601 */
13602 static void
13603 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13604 {
13605 uint32_t reg, val;
13606 int x;
13607
13608 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13609
13610 val = 0;
13611 for (x = nbits; x > 0; x--) {
13612 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13613 CSR_WRITE_FLUSH(sc);
13614 delay(2);
13615 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13616 val |= (1U << (x - 1));
13617 CSR_WRITE(sc, WMREG_EECD, reg);
13618 CSR_WRITE_FLUSH(sc);
13619 delay(2);
13620 }
13621 *valp = val;
13622 }
13623
13624 /* Microwire */
13625
13626 /*
13627 * wm_nvm_read_uwire:
13628 *
13629 * Read a word from the EEPROM using the MicroWire protocol.
13630 */
13631 static int
13632 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13633 {
13634 uint32_t reg, val;
13635 int i, rv;
13636
13637 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13638 device_xname(sc->sc_dev), __func__));
13639
13640 rv = sc->nvm.acquire(sc);
13641 if (rv != 0)
13642 return rv;
13643
13644 for (i = 0; i < wordcnt; i++) {
13645 /* Clear SK and DI. */
13646 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13647 CSR_WRITE(sc, WMREG_EECD, reg);
13648
13649 /*
13650 * XXX: workaround for a bug in qemu-0.12.x and prior
13651 * and Xen.
13652 *
13653 * We use this workaround only for 82540 because qemu's
13654 * e1000 act as 82540.
13655 */
13656 if (sc->sc_type == WM_T_82540) {
13657 reg |= EECD_SK;
13658 CSR_WRITE(sc, WMREG_EECD, reg);
13659 reg &= ~EECD_SK;
13660 CSR_WRITE(sc, WMREG_EECD, reg);
13661 CSR_WRITE_FLUSH(sc);
13662 delay(2);
13663 }
13664 /* XXX: end of workaround */
13665
13666 /* Set CHIP SELECT. */
13667 reg |= EECD_CS;
13668 CSR_WRITE(sc, WMREG_EECD, reg);
13669 CSR_WRITE_FLUSH(sc);
13670 delay(2);
13671
13672 /* Shift in the READ command. */
13673 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13674
13675 /* Shift in address. */
13676 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13677
13678 /* Shift out the data. */
13679 wm_eeprom_recvbits(sc, &val, 16);
13680 data[i] = val & 0xffff;
13681
13682 /* Clear CHIP SELECT. */
13683 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13684 CSR_WRITE(sc, WMREG_EECD, reg);
13685 CSR_WRITE_FLUSH(sc);
13686 delay(2);
13687 }
13688
13689 sc->nvm.release(sc);
13690 return 0;
13691 }
13692
13693 /* SPI */
13694
13695 /*
13696 * Set SPI and FLASH related information from the EECD register.
13697 * For 82541 and 82547, the word size is taken from EEPROM.
13698 */
13699 static int
13700 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13701 {
13702 int size;
13703 uint32_t reg;
13704 uint16_t data;
13705
13706 reg = CSR_READ(sc, WMREG_EECD);
13707 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13708
13709 /* Read the size of NVM from EECD by default */
13710 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13711 switch (sc->sc_type) {
13712 case WM_T_82541:
13713 case WM_T_82541_2:
13714 case WM_T_82547:
13715 case WM_T_82547_2:
13716 /* Set dummy value to access EEPROM */
13717 sc->sc_nvm_wordsize = 64;
13718 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13719 aprint_error_dev(sc->sc_dev,
13720 "%s: failed to read EEPROM size\n", __func__);
13721 }
13722 reg = data;
13723 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13724 if (size == 0)
13725 size = 6; /* 64 word size */
13726 else
13727 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13728 break;
13729 case WM_T_80003:
13730 case WM_T_82571:
13731 case WM_T_82572:
13732 case WM_T_82573: /* SPI case */
13733 case WM_T_82574: /* SPI case */
13734 case WM_T_82583: /* SPI case */
13735 size += NVM_WORD_SIZE_BASE_SHIFT;
13736 if (size > 14)
13737 size = 14;
13738 break;
13739 case WM_T_82575:
13740 case WM_T_82576:
13741 case WM_T_82580:
13742 case WM_T_I350:
13743 case WM_T_I354:
13744 case WM_T_I210:
13745 case WM_T_I211:
13746 size += NVM_WORD_SIZE_BASE_SHIFT;
13747 if (size > 15)
13748 size = 15;
13749 break;
13750 default:
13751 aprint_error_dev(sc->sc_dev,
13752 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13753 return -1;
13754 break;
13755 }
13756
13757 sc->sc_nvm_wordsize = 1 << size;
13758
13759 return 0;
13760 }
13761
13762 /*
13763 * wm_nvm_ready_spi:
13764 *
13765 * Wait for a SPI EEPROM to be ready for commands.
13766 */
13767 static int
13768 wm_nvm_ready_spi(struct wm_softc *sc)
13769 {
13770 uint32_t val;
13771 int usec;
13772
13773 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13774 device_xname(sc->sc_dev), __func__));
13775
13776 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13777 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13778 wm_eeprom_recvbits(sc, &val, 8);
13779 if ((val & SPI_SR_RDY) == 0)
13780 break;
13781 }
13782 if (usec >= SPI_MAX_RETRIES) {
13783 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13784 return -1;
13785 }
13786 return 0;
13787 }
13788
13789 /*
13790 * wm_nvm_read_spi:
13791 *
13792 * Read a work from the EEPROM using the SPI protocol.
13793 */
13794 static int
13795 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13796 {
13797 uint32_t reg, val;
13798 int i;
13799 uint8_t opc;
13800 int rv;
13801
13802 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13803 device_xname(sc->sc_dev), __func__));
13804
13805 rv = sc->nvm.acquire(sc);
13806 if (rv != 0)
13807 return rv;
13808
13809 /* Clear SK and CS. */
13810 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13811 CSR_WRITE(sc, WMREG_EECD, reg);
13812 CSR_WRITE_FLUSH(sc);
13813 delay(2);
13814
13815 if ((rv = wm_nvm_ready_spi(sc)) != 0)
13816 goto out;
13817
13818 /* Toggle CS to flush commands. */
13819 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13820 CSR_WRITE_FLUSH(sc);
13821 delay(2);
13822 CSR_WRITE(sc, WMREG_EECD, reg);
13823 CSR_WRITE_FLUSH(sc);
13824 delay(2);
13825
13826 opc = SPI_OPC_READ;
13827 if (sc->sc_nvm_addrbits == 8 && word >= 128)
13828 opc |= SPI_OPC_A8;
13829
13830 wm_eeprom_sendbits(sc, opc, 8);
13831 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13832
13833 for (i = 0; i < wordcnt; i++) {
13834 wm_eeprom_recvbits(sc, &val, 16);
13835 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13836 }
13837
13838 /* Raise CS and clear SK. */
13839 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13840 CSR_WRITE(sc, WMREG_EECD, reg);
13841 CSR_WRITE_FLUSH(sc);
13842 delay(2);
13843
13844 out:
13845 sc->nvm.release(sc);
13846 return rv;
13847 }
13848
13849 /* Using with EERD */
13850
13851 static int
13852 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13853 {
13854 uint32_t attempts = 100000;
13855 uint32_t i, reg = 0;
13856 int32_t done = -1;
13857
13858 for (i = 0; i < attempts; i++) {
13859 reg = CSR_READ(sc, rw);
13860
13861 if (reg & EERD_DONE) {
13862 done = 0;
13863 break;
13864 }
13865 delay(5);
13866 }
13867
13868 return done;
13869 }
13870
13871 static int
13872 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13873 {
13874 int i, eerd = 0;
13875 int rv;
13876
13877 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13878 device_xname(sc->sc_dev), __func__));
13879
13880 rv = sc->nvm.acquire(sc);
13881 if (rv != 0)
13882 return rv;
13883
13884 for (i = 0; i < wordcnt; i++) {
13885 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13886 CSR_WRITE(sc, WMREG_EERD, eerd);
13887 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13888 if (rv != 0) {
13889 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13890 "offset=%d. wordcnt=%d\n", offset, wordcnt);
13891 break;
13892 }
13893 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13894 }
13895
13896 sc->nvm.release(sc);
13897 return rv;
13898 }
13899
13900 /* Flash */
13901
13902 static int
13903 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13904 {
13905 uint32_t eecd;
13906 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13907 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13908 uint32_t nvm_dword = 0;
13909 uint8_t sig_byte = 0;
13910 int rv;
13911
13912 switch (sc->sc_type) {
13913 case WM_T_PCH_SPT:
13914 case WM_T_PCH_CNP:
13915 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13916 act_offset = ICH_NVM_SIG_WORD * 2;
13917
13918 /* Set bank to 0 in case flash read fails. */
13919 *bank = 0;
13920
13921 /* Check bank 0 */
13922 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13923 if (rv != 0)
13924 return rv;
13925 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13926 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13927 *bank = 0;
13928 return 0;
13929 }
13930
13931 /* Check bank 1 */
13932 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13933 &nvm_dword);
13934 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13935 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13936 *bank = 1;
13937 return 0;
13938 }
13939 aprint_error_dev(sc->sc_dev,
13940 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13941 return -1;
13942 case WM_T_ICH8:
13943 case WM_T_ICH9:
13944 eecd = CSR_READ(sc, WMREG_EECD);
13945 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13946 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13947 return 0;
13948 }
13949 /* FALLTHROUGH */
13950 default:
13951 /* Default to 0 */
13952 *bank = 0;
13953
13954 /* Check bank 0 */
13955 wm_read_ich8_byte(sc, act_offset, &sig_byte);
13956 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13957 *bank = 0;
13958 return 0;
13959 }
13960
13961 /* Check bank 1 */
13962 wm_read_ich8_byte(sc, act_offset + bank1_offset,
13963 &sig_byte);
13964 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13965 *bank = 1;
13966 return 0;
13967 }
13968 }
13969
13970 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13971 device_xname(sc->sc_dev)));
13972 return -1;
13973 }
13974
13975 /******************************************************************************
13976 * This function does initial flash setup so that a new read/write/erase cycle
13977 * can be started.
13978 *
13979 * sc - The pointer to the hw structure
13980 ****************************************************************************/
13981 static int32_t
13982 wm_ich8_cycle_init(struct wm_softc *sc)
13983 {
13984 uint16_t hsfsts;
13985 int32_t error = 1;
13986 int32_t i = 0;
13987
13988 if (sc->sc_type >= WM_T_PCH_SPT)
13989 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13990 else
13991 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13992
13993 /* May be check the Flash Des Valid bit in Hw status */
13994 if ((hsfsts & HSFSTS_FLDVAL) == 0)
13995 return error;
13996
13997 /* Clear FCERR in Hw status by writing 1 */
13998 /* Clear DAEL in Hw status by writing a 1 */
13999 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14000
14001 if (sc->sc_type >= WM_T_PCH_SPT)
14002 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14003 else
14004 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14005
14006 /*
14007 * Either we should have a hardware SPI cycle in progress bit to check
14008 * against, in order to start a new cycle or FDONE bit should be
14009 * changed in the hardware so that it is 1 after hardware reset, which
14010 * can then be used as an indication whether a cycle is in progress or
14011 * has been completed .. we should also have some software semaphore
14012 * mechanism to guard FDONE or the cycle in progress bit so that two
14013 * threads access to those bits can be sequentiallized or a way so that
14014 * 2 threads don't start the cycle at the same time
14015 */
14016
14017 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14018 /*
14019 * There is no cycle running at present, so we can start a
14020 * cycle
14021 */
14022
14023 /* Begin by setting Flash Cycle Done. */
14024 hsfsts |= HSFSTS_DONE;
14025 if (sc->sc_type >= WM_T_PCH_SPT)
14026 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14027 hsfsts & 0xffffUL);
14028 else
14029 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14030 error = 0;
14031 } else {
14032 /*
14033 * Otherwise poll for sometime so the current cycle has a
14034 * chance to end before giving up.
14035 */
14036 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14037 if (sc->sc_type >= WM_T_PCH_SPT)
14038 hsfsts = ICH8_FLASH_READ32(sc,
14039 ICH_FLASH_HSFSTS) & 0xffffUL;
14040 else
14041 hsfsts = ICH8_FLASH_READ16(sc,
14042 ICH_FLASH_HSFSTS);
14043 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14044 error = 0;
14045 break;
14046 }
14047 delay(1);
14048 }
14049 if (error == 0) {
14050 /*
14051 * Successful in waiting for previous cycle to timeout,
14052 * now set the Flash Cycle Done.
14053 */
14054 hsfsts |= HSFSTS_DONE;
14055 if (sc->sc_type >= WM_T_PCH_SPT)
14056 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14057 hsfsts & 0xffffUL);
14058 else
14059 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14060 hsfsts);
14061 }
14062 }
14063 return error;
14064 }
14065
14066 /******************************************************************************
14067 * This function starts a flash cycle and waits for its completion
14068 *
14069 * sc - The pointer to the hw structure
14070 ****************************************************************************/
14071 static int32_t
14072 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14073 {
14074 uint16_t hsflctl;
14075 uint16_t hsfsts;
14076 int32_t error = 1;
14077 uint32_t i = 0;
14078
14079 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14080 if (sc->sc_type >= WM_T_PCH_SPT)
14081 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14082 else
14083 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14084 hsflctl |= HSFCTL_GO;
14085 if (sc->sc_type >= WM_T_PCH_SPT)
14086 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14087 (uint32_t)hsflctl << 16);
14088 else
14089 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14090
14091 /* Wait till FDONE bit is set to 1 */
14092 do {
14093 if (sc->sc_type >= WM_T_PCH_SPT)
14094 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14095 & 0xffffUL;
14096 else
14097 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14098 if (hsfsts & HSFSTS_DONE)
14099 break;
14100 delay(1);
14101 i++;
14102 } while (i < timeout);
14103 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14104 error = 0;
14105
14106 return error;
14107 }
14108
14109 /******************************************************************************
14110 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14111 *
14112 * sc - The pointer to the hw structure
14113 * index - The index of the byte or word to read.
14114 * size - Size of data to read, 1=byte 2=word, 4=dword
14115 * data - Pointer to the word to store the value read.
14116 *****************************************************************************/
14117 static int32_t
14118 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14119 uint32_t size, uint32_t *data)
14120 {
14121 uint16_t hsfsts;
14122 uint16_t hsflctl;
14123 uint32_t flash_linear_address;
14124 uint32_t flash_data = 0;
14125 int32_t error = 1;
14126 int32_t count = 0;
14127
14128 if (size < 1 || size > 4 || data == 0x0 ||
14129 index > ICH_FLASH_LINEAR_ADDR_MASK)
14130 return error;
14131
14132 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14133 sc->sc_ich8_flash_base;
14134
14135 do {
14136 delay(1);
14137 /* Steps */
14138 error = wm_ich8_cycle_init(sc);
14139 if (error)
14140 break;
14141
14142 if (sc->sc_type >= WM_T_PCH_SPT)
14143 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14144 >> 16;
14145 else
14146 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14147 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14148 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14149 & HSFCTL_BCOUNT_MASK;
14150 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14151 if (sc->sc_type >= WM_T_PCH_SPT) {
14152 /*
14153 * In SPT, This register is in Lan memory space, not
14154 * flash. Therefore, only 32 bit access is supported.
14155 */
14156 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14157 (uint32_t)hsflctl << 16);
14158 } else
14159 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14160
14161 /*
14162 * Write the last 24 bits of index into Flash Linear address
14163 * field in Flash Address
14164 */
14165 /* TODO: TBD maybe check the index against the size of flash */
14166
14167 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14168
14169 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14170
14171 /*
14172 * Check if FCERR is set to 1, if set to 1, clear it and try
14173 * the whole sequence a few more times, else read in (shift in)
14174 * the Flash Data0, the order is least significant byte first
14175 * msb to lsb
14176 */
14177 if (error == 0) {
14178 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14179 if (size == 1)
14180 *data = (uint8_t)(flash_data & 0x000000FF);
14181 else if (size == 2)
14182 *data = (uint16_t)(flash_data & 0x0000FFFF);
14183 else if (size == 4)
14184 *data = (uint32_t)flash_data;
14185 break;
14186 } else {
14187 /*
14188 * If we've gotten here, then things are probably
14189 * completely hosed, but if the error condition is
14190 * detected, it won't hurt to give it another try...
14191 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14192 */
14193 if (sc->sc_type >= WM_T_PCH_SPT)
14194 hsfsts = ICH8_FLASH_READ32(sc,
14195 ICH_FLASH_HSFSTS) & 0xffffUL;
14196 else
14197 hsfsts = ICH8_FLASH_READ16(sc,
14198 ICH_FLASH_HSFSTS);
14199
14200 if (hsfsts & HSFSTS_ERR) {
14201 /* Repeat for some time before giving up. */
14202 continue;
14203 } else if ((hsfsts & HSFSTS_DONE) == 0)
14204 break;
14205 }
14206 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14207
14208 return error;
14209 }
14210
14211 /******************************************************************************
14212 * Reads a single byte from the NVM using the ICH8 flash access registers.
14213 *
14214 * sc - pointer to wm_hw structure
14215 * index - The index of the byte to read.
14216 * data - Pointer to a byte to store the value read.
14217 *****************************************************************************/
14218 static int32_t
14219 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14220 {
14221 int32_t status;
14222 uint32_t word = 0;
14223
14224 status = wm_read_ich8_data(sc, index, 1, &word);
14225 if (status == 0)
14226 *data = (uint8_t)word;
14227 else
14228 *data = 0;
14229
14230 return status;
14231 }
14232
14233 /******************************************************************************
14234 * Reads a word from the NVM using the ICH8 flash access registers.
14235 *
14236 * sc - pointer to wm_hw structure
14237 * index - The starting byte index of the word to read.
14238 * data - Pointer to a word to store the value read.
14239 *****************************************************************************/
14240 static int32_t
14241 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14242 {
14243 int32_t status;
14244 uint32_t word = 0;
14245
14246 status = wm_read_ich8_data(sc, index, 2, &word);
14247 if (status == 0)
14248 *data = (uint16_t)word;
14249 else
14250 *data = 0;
14251
14252 return status;
14253 }
14254
14255 /******************************************************************************
14256 * Reads a dword from the NVM using the ICH8 flash access registers.
14257 *
14258 * sc - pointer to wm_hw structure
14259 * index - The starting byte index of the word to read.
14260 * data - Pointer to a word to store the value read.
14261 *****************************************************************************/
14262 static int32_t
14263 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14264 {
14265 int32_t status;
14266
14267 status = wm_read_ich8_data(sc, index, 4, data);
14268 return status;
14269 }
14270
14271 /******************************************************************************
14272 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14273 * register.
14274 *
14275 * sc - Struct containing variables accessed by shared code
14276 * offset - offset of word in the EEPROM to read
14277 * data - word read from the EEPROM
14278 * words - number of words to read
14279 *****************************************************************************/
14280 static int
14281 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14282 {
14283 int rv;
14284 uint32_t flash_bank = 0;
14285 uint32_t act_offset = 0;
14286 uint32_t bank_offset = 0;
14287 uint16_t word = 0;
14288 uint16_t i = 0;
14289
14290 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14291 device_xname(sc->sc_dev), __func__));
14292
14293 rv = sc->nvm.acquire(sc);
14294 if (rv != 0)
14295 return rv;
14296
14297 /*
14298 * We need to know which is the valid flash bank. In the event
14299 * that we didn't allocate eeprom_shadow_ram, we may not be
14300 * managing flash_bank. So it cannot be trusted and needs
14301 * to be updated with each read.
14302 */
14303 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14304 if (rv) {
14305 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14306 device_xname(sc->sc_dev)));
14307 flash_bank = 0;
14308 }
14309
14310 /*
14311 * Adjust offset appropriately if we're on bank 1 - adjust for word
14312 * size
14313 */
14314 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14315
14316 for (i = 0; i < words; i++) {
14317 /* The NVM part needs a byte offset, hence * 2 */
14318 act_offset = bank_offset + ((offset + i) * 2);
14319 rv = wm_read_ich8_word(sc, act_offset, &word);
14320 if (rv) {
14321 aprint_error_dev(sc->sc_dev,
14322 "%s: failed to read NVM\n", __func__);
14323 break;
14324 }
14325 data[i] = word;
14326 }
14327
14328 sc->nvm.release(sc);
14329 return rv;
14330 }
14331
14332 /******************************************************************************
14333 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14334 * register.
14335 *
14336 * sc - Struct containing variables accessed by shared code
14337 * offset - offset of word in the EEPROM to read
14338 * data - word read from the EEPROM
14339 * words - number of words to read
14340 *****************************************************************************/
14341 static int
14342 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14343 {
14344 int rv;
14345 uint32_t flash_bank = 0;
14346 uint32_t act_offset = 0;
14347 uint32_t bank_offset = 0;
14348 uint32_t dword = 0;
14349 uint16_t i = 0;
14350
14351 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14352 device_xname(sc->sc_dev), __func__));
14353
14354 rv = sc->nvm.acquire(sc);
14355 if (rv != 0)
14356 return rv;
14357
14358 /*
14359 * We need to know which is the valid flash bank. In the event
14360 * that we didn't allocate eeprom_shadow_ram, we may not be
14361 * managing flash_bank. So it cannot be trusted and needs
14362 * to be updated with each read.
14363 */
14364 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14365 if (rv) {
14366 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14367 device_xname(sc->sc_dev)));
14368 flash_bank = 0;
14369 }
14370
14371 /*
14372 * Adjust offset appropriately if we're on bank 1 - adjust for word
14373 * size
14374 */
14375 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14376
14377 for (i = 0; i < words; i++) {
14378 /* The NVM part needs a byte offset, hence * 2 */
14379 act_offset = bank_offset + ((offset + i) * 2);
14380 /* but we must read dword aligned, so mask ... */
14381 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14382 if (rv) {
14383 aprint_error_dev(sc->sc_dev,
14384 "%s: failed to read NVM\n", __func__);
14385 break;
14386 }
14387 /* ... and pick out low or high word */
14388 if ((act_offset & 0x2) == 0)
14389 data[i] = (uint16_t)(dword & 0xFFFF);
14390 else
14391 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14392 }
14393
14394 sc->nvm.release(sc);
14395 return rv;
14396 }
14397
14398 /* iNVM */
14399
14400 static int
14401 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14402 {
14403 int32_t rv = 0;
14404 uint32_t invm_dword;
14405 uint16_t i;
14406 uint8_t record_type, word_address;
14407
14408 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14409 device_xname(sc->sc_dev), __func__));
14410
14411 for (i = 0; i < INVM_SIZE; i++) {
14412 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14413 /* Get record type */
14414 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14415 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14416 break;
14417 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14418 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14419 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14420 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14421 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14422 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14423 if (word_address == address) {
14424 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14425 rv = 0;
14426 break;
14427 }
14428 }
14429 }
14430
14431 return rv;
14432 }
14433
14434 static int
14435 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14436 {
14437 int i, rv;
14438
14439 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14440 device_xname(sc->sc_dev), __func__));
14441
14442 rv = sc->nvm.acquire(sc);
14443 if (rv != 0)
14444 return rv;
14445
14446 for (i = 0; i < words; i++) {
14447 switch (offset + i) {
14448 case NVM_OFF_MACADDR:
14449 case NVM_OFF_MACADDR1:
14450 case NVM_OFF_MACADDR2:
14451 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14452 if (rv != 0) {
14453 data[i] = 0xffff;
14454 rv = -1;
14455 }
14456 break;
14457 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14458 rv = wm_nvm_read_word_invm(sc, offset, data);
14459 if (rv != 0) {
14460 *data = INVM_DEFAULT_AL;
14461 rv = 0;
14462 }
14463 break;
14464 case NVM_OFF_CFG2:
14465 rv = wm_nvm_read_word_invm(sc, offset, data);
14466 if (rv != 0) {
14467 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
14468 rv = 0;
14469 }
14470 break;
14471 case NVM_OFF_CFG4:
14472 rv = wm_nvm_read_word_invm(sc, offset, data);
14473 if (rv != 0) {
14474 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
14475 rv = 0;
14476 }
14477 break;
14478 case NVM_OFF_LED_1_CFG:
14479 rv = wm_nvm_read_word_invm(sc, offset, data);
14480 if (rv != 0) {
14481 *data = NVM_LED_1_CFG_DEFAULT_I211;
14482 rv = 0;
14483 }
14484 break;
14485 case NVM_OFF_LED_0_2_CFG:
14486 rv = wm_nvm_read_word_invm(sc, offset, data);
14487 if (rv != 0) {
14488 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
14489 rv = 0;
14490 }
14491 break;
14492 case NVM_OFF_ID_LED_SETTINGS:
14493 rv = wm_nvm_read_word_invm(sc, offset, data);
14494 if (rv != 0) {
14495 *data = ID_LED_RESERVED_FFFF;
14496 rv = 0;
14497 }
14498 break;
14499 default:
14500 DPRINTF(sc, WM_DEBUG_NVM,
14501 ("NVM word 0x%02x is not mapped.\n", offset));
14502 *data = NVM_RESERVED_WORD;
14503 break;
14504 }
14505 }
14506
14507 sc->nvm.release(sc);
14508 return rv;
14509 }
14510
14511 /* Lock, detecting NVM type, validate checksum, version and read */
14512
14513 static int
14514 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14515 {
14516 uint32_t eecd = 0;
14517
14518 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14519 || sc->sc_type == WM_T_82583) {
14520 eecd = CSR_READ(sc, WMREG_EECD);
14521
14522 /* Isolate bits 15 & 16 */
14523 eecd = ((eecd >> 15) & 0x03);
14524
14525 /* If both bits are set, device is Flash type */
14526 if (eecd == 0x03)
14527 return 0;
14528 }
14529 return 1;
14530 }
14531
14532 static int
14533 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14534 {
14535 uint32_t eec;
14536
14537 eec = CSR_READ(sc, WMREG_EEC);
14538 if ((eec & EEC_FLASH_DETECTED) != 0)
14539 return 1;
14540
14541 return 0;
14542 }
14543
14544 /*
14545 * wm_nvm_validate_checksum
14546 *
14547 * The checksum is defined as the sum of the first 64 (16 bit) words.
14548 */
14549 static int
14550 wm_nvm_validate_checksum(struct wm_softc *sc)
14551 {
14552 uint16_t checksum;
14553 uint16_t eeprom_data;
14554 #ifdef WM_DEBUG
14555 uint16_t csum_wordaddr, valid_checksum;
14556 #endif
14557 int i;
14558
14559 checksum = 0;
14560
14561 /* Don't check for I211 */
14562 if (sc->sc_type == WM_T_I211)
14563 return 0;
14564
14565 #ifdef WM_DEBUG
14566 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14567 || (sc->sc_type == WM_T_PCH_CNP)) {
14568 csum_wordaddr = NVM_OFF_COMPAT;
14569 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14570 } else {
14571 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14572 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14573 }
14574
14575 /* Dump EEPROM image for debug */
14576 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14577 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14578 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14579 /* XXX PCH_SPT? */
14580 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14581 if ((eeprom_data & valid_checksum) == 0)
14582 DPRINTF(sc, WM_DEBUG_NVM,
14583 ("%s: NVM need to be updated (%04x != %04x)\n",
14584 device_xname(sc->sc_dev), eeprom_data,
14585 valid_checksum));
14586 }
14587
14588 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14589 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14590 for (i = 0; i < NVM_SIZE; i++) {
14591 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14592 printf("XXXX ");
14593 else
14594 printf("%04hx ", eeprom_data);
14595 if (i % 8 == 7)
14596 printf("\n");
14597 }
14598 }
14599
14600 #endif /* WM_DEBUG */
14601
14602 for (i = 0; i < NVM_SIZE; i++) {
14603 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14604 return -1;
14605 checksum += eeprom_data;
14606 }
14607
14608 if (checksum != (uint16_t) NVM_CHECKSUM) {
14609 #ifdef WM_DEBUG
14610 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14611 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14612 #endif
14613 }
14614
14615 return 0;
14616 }
14617
14618 static void
14619 wm_nvm_version_invm(struct wm_softc *sc)
14620 {
14621 uint32_t dword;
14622
14623 /*
14624 * Linux's code to decode version is very strange, so we don't
14625 * obey that algorithm and just use word 61 as the document.
14626 * Perhaps it's not perfect though...
14627 *
14628 * Example:
14629 *
14630 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14631 */
14632 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14633 dword = __SHIFTOUT(dword, INVM_VER_1);
14634 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14635 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14636 }
14637
14638 static void
14639 wm_nvm_version(struct wm_softc *sc)
14640 {
14641 uint16_t major, minor, build, patch;
14642 uint16_t uid0, uid1;
14643 uint16_t nvm_data;
14644 uint16_t off;
14645 bool check_version = false;
14646 bool check_optionrom = false;
14647 bool have_build = false;
14648 bool have_uid = true;
14649
14650 /*
14651 * Version format:
14652 *
14653 * XYYZ
14654 * X0YZ
14655 * X0YY
14656 *
14657 * Example:
14658 *
14659 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
14660 * 82571 0x50a6 5.10.6?
14661 * 82572 0x506a 5.6.10?
14662 * 82572EI 0x5069 5.6.9?
14663 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
14664 * 0x2013 2.1.3?
14665 * 82583 0x10a0 1.10.0? (document says it's default value)
14666 * ICH8+82567 0x0040 0.4.0?
14667 * ICH9+82566 0x1040 1.4.0?
14668 *ICH10+82567 0x0043 0.4.3?
14669 * PCH+82577 0x00c1 0.12.1?
14670 * PCH2+82579 0x00d3 0.13.3?
14671 * 0x00d4 0.13.4?
14672 * LPT+I218 0x0023 0.2.3?
14673 * SPT+I219 0x0084 0.8.4?
14674 * CNP+I219 0x0054 0.5.4?
14675 */
14676
14677 /*
14678 * XXX
14679 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14680 * I've never seen real 82574 hardware with such small SPI ROM.
14681 */
14682 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14683 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14684 have_uid = false;
14685
14686 switch (sc->sc_type) {
14687 case WM_T_82571:
14688 case WM_T_82572:
14689 case WM_T_82574:
14690 case WM_T_82583:
14691 check_version = true;
14692 check_optionrom = true;
14693 have_build = true;
14694 break;
14695 case WM_T_ICH8:
14696 case WM_T_ICH9:
14697 case WM_T_ICH10:
14698 case WM_T_PCH:
14699 case WM_T_PCH2:
14700 case WM_T_PCH_LPT:
14701 case WM_T_PCH_SPT:
14702 case WM_T_PCH_CNP:
14703 check_version = true;
14704 have_build = true;
14705 have_uid = false;
14706 break;
14707 case WM_T_82575:
14708 case WM_T_82576:
14709 case WM_T_82580:
14710 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14711 check_version = true;
14712 break;
14713 case WM_T_I211:
14714 wm_nvm_version_invm(sc);
14715 have_uid = false;
14716 goto printver;
14717 case WM_T_I210:
14718 if (!wm_nvm_flash_presence_i210(sc)) {
14719 wm_nvm_version_invm(sc);
14720 have_uid = false;
14721 goto printver;
14722 }
14723 /* FALLTHROUGH */
14724 case WM_T_I350:
14725 case WM_T_I354:
14726 check_version = true;
14727 check_optionrom = true;
14728 break;
14729 default:
14730 return;
14731 }
14732 if (check_version
14733 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14734 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14735 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14736 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14737 build = nvm_data & NVM_BUILD_MASK;
14738 have_build = true;
14739 } else
14740 minor = nvm_data & 0x00ff;
14741
14742 /* Decimal */
14743 minor = (minor / 16) * 10 + (minor % 16);
14744 sc->sc_nvm_ver_major = major;
14745 sc->sc_nvm_ver_minor = minor;
14746
14747 printver:
14748 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14749 sc->sc_nvm_ver_minor);
14750 if (have_build) {
14751 sc->sc_nvm_ver_build = build;
14752 aprint_verbose(".%d", build);
14753 }
14754 }
14755
14756 /* Assume the Option ROM area is at avove NVM_SIZE */
14757 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14758 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14759 /* Option ROM Version */
14760 if ((off != 0x0000) && (off != 0xffff)) {
14761 int rv;
14762
14763 off += NVM_COMBO_VER_OFF;
14764 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14765 rv |= wm_nvm_read(sc, off, 1, &uid0);
14766 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14767 && (uid1 != 0) && (uid1 != 0xffff)) {
14768 /* 16bits */
14769 major = uid0 >> 8;
14770 build = (uid0 << 8) | (uid1 >> 8);
14771 patch = uid1 & 0x00ff;
14772 aprint_verbose(", option ROM Version %d.%d.%d",
14773 major, build, patch);
14774 }
14775 }
14776 }
14777
14778 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14779 aprint_verbose(", Image Unique ID %08x",
14780 ((uint32_t)uid1 << 16) | uid0);
14781 }
14782
14783 /*
14784 * wm_nvm_read:
14785 *
14786 * Read data from the serial EEPROM.
14787 */
14788 static int
14789 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14790 {
14791 int rv;
14792
14793 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14794 device_xname(sc->sc_dev), __func__));
14795
14796 if (sc->sc_flags & WM_F_EEPROM_INVALID)
14797 return -1;
14798
14799 rv = sc->nvm.read(sc, word, wordcnt, data);
14800
14801 return rv;
14802 }
14803
14804 /*
14805 * Hardware semaphores.
14806 * Very complexed...
14807 */
14808
14809 static int
14810 wm_get_null(struct wm_softc *sc)
14811 {
14812
14813 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14814 device_xname(sc->sc_dev), __func__));
14815 return 0;
14816 }
14817
14818 static void
14819 wm_put_null(struct wm_softc *sc)
14820 {
14821
14822 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14823 device_xname(sc->sc_dev), __func__));
14824 return;
14825 }
14826
14827 static int
14828 wm_get_eecd(struct wm_softc *sc)
14829 {
14830 uint32_t reg;
14831 int x;
14832
14833 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14834 device_xname(sc->sc_dev), __func__));
14835
14836 reg = CSR_READ(sc, WMREG_EECD);
14837
14838 /* Request EEPROM access. */
14839 reg |= EECD_EE_REQ;
14840 CSR_WRITE(sc, WMREG_EECD, reg);
14841
14842 /* ..and wait for it to be granted. */
14843 for (x = 0; x < 1000; x++) {
14844 reg = CSR_READ(sc, WMREG_EECD);
14845 if (reg & EECD_EE_GNT)
14846 break;
14847 delay(5);
14848 }
14849 if ((reg & EECD_EE_GNT) == 0) {
14850 aprint_error_dev(sc->sc_dev,
14851 "could not acquire EEPROM GNT\n");
14852 reg &= ~EECD_EE_REQ;
14853 CSR_WRITE(sc, WMREG_EECD, reg);
14854 return -1;
14855 }
14856
14857 return 0;
14858 }
14859
14860 static void
14861 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14862 {
14863
14864 *eecd |= EECD_SK;
14865 CSR_WRITE(sc, WMREG_EECD, *eecd);
14866 CSR_WRITE_FLUSH(sc);
14867 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14868 delay(1);
14869 else
14870 delay(50);
14871 }
14872
14873 static void
14874 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14875 {
14876
14877 *eecd &= ~EECD_SK;
14878 CSR_WRITE(sc, WMREG_EECD, *eecd);
14879 CSR_WRITE_FLUSH(sc);
14880 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14881 delay(1);
14882 else
14883 delay(50);
14884 }
14885
14886 static void
14887 wm_put_eecd(struct wm_softc *sc)
14888 {
14889 uint32_t reg;
14890
14891 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14892 device_xname(sc->sc_dev), __func__));
14893
14894 /* Stop nvm */
14895 reg = CSR_READ(sc, WMREG_EECD);
14896 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14897 /* Pull CS high */
14898 reg |= EECD_CS;
14899 wm_nvm_eec_clock_lower(sc, ®);
14900 } else {
14901 /* CS on Microwire is active-high */
14902 reg &= ~(EECD_CS | EECD_DI);
14903 CSR_WRITE(sc, WMREG_EECD, reg);
14904 wm_nvm_eec_clock_raise(sc, ®);
14905 wm_nvm_eec_clock_lower(sc, ®);
14906 }
14907
14908 reg = CSR_READ(sc, WMREG_EECD);
14909 reg &= ~EECD_EE_REQ;
14910 CSR_WRITE(sc, WMREG_EECD, reg);
14911
14912 return;
14913 }
14914
14915 /*
14916 * Get hardware semaphore.
14917 * Same as e1000_get_hw_semaphore_generic()
14918 */
14919 static int
14920 wm_get_swsm_semaphore(struct wm_softc *sc)
14921 {
14922 int32_t timeout;
14923 uint32_t swsm;
14924
14925 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14926 device_xname(sc->sc_dev), __func__));
14927 KASSERT(sc->sc_nvm_wordsize > 0);
14928
14929 retry:
14930 /* Get the SW semaphore. */
14931 timeout = sc->sc_nvm_wordsize + 1;
14932 while (timeout) {
14933 swsm = CSR_READ(sc, WMREG_SWSM);
14934
14935 if ((swsm & SWSM_SMBI) == 0)
14936 break;
14937
14938 delay(50);
14939 timeout--;
14940 }
14941
14942 if (timeout == 0) {
14943 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14944 /*
14945 * In rare circumstances, the SW semaphore may already
14946 * be held unintentionally. Clear the semaphore once
14947 * before giving up.
14948 */
14949 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14950 wm_put_swsm_semaphore(sc);
14951 goto retry;
14952 }
14953 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
14954 return -1;
14955 }
14956
14957 /* Get the FW semaphore. */
14958 timeout = sc->sc_nvm_wordsize + 1;
14959 while (timeout) {
14960 swsm = CSR_READ(sc, WMREG_SWSM);
14961 swsm |= SWSM_SWESMBI;
14962 CSR_WRITE(sc, WMREG_SWSM, swsm);
14963 /* If we managed to set the bit we got the semaphore. */
14964 swsm = CSR_READ(sc, WMREG_SWSM);
14965 if (swsm & SWSM_SWESMBI)
14966 break;
14967
14968 delay(50);
14969 timeout--;
14970 }
14971
14972 if (timeout == 0) {
14973 aprint_error_dev(sc->sc_dev,
14974 "could not acquire SWSM SWESMBI\n");
14975 /* Release semaphores */
14976 wm_put_swsm_semaphore(sc);
14977 return -1;
14978 }
14979 return 0;
14980 }
14981
14982 /*
14983 * Put hardware semaphore.
14984 * Same as e1000_put_hw_semaphore_generic()
14985 */
14986 static void
14987 wm_put_swsm_semaphore(struct wm_softc *sc)
14988 {
14989 uint32_t swsm;
14990
14991 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14992 device_xname(sc->sc_dev), __func__));
14993
14994 swsm = CSR_READ(sc, WMREG_SWSM);
14995 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14996 CSR_WRITE(sc, WMREG_SWSM, swsm);
14997 }
14998
14999 /*
15000 * Get SW/FW semaphore.
15001 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15002 */
15003 static int
15004 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15005 {
15006 uint32_t swfw_sync;
15007 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15008 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15009 int timeout;
15010
15011 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15012 device_xname(sc->sc_dev), __func__));
15013
15014 if (sc->sc_type == WM_T_80003)
15015 timeout = 50;
15016 else
15017 timeout = 200;
15018
15019 while (timeout) {
15020 if (wm_get_swsm_semaphore(sc)) {
15021 aprint_error_dev(sc->sc_dev,
15022 "%s: failed to get semaphore\n",
15023 __func__);
15024 return -1;
15025 }
15026 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15027 if ((swfw_sync & (swmask | fwmask)) == 0) {
15028 swfw_sync |= swmask;
15029 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15030 wm_put_swsm_semaphore(sc);
15031 return 0;
15032 }
15033 wm_put_swsm_semaphore(sc);
15034 delay(5000);
15035 timeout--;
15036 }
15037 device_printf(sc->sc_dev,
15038 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15039 mask, swfw_sync);
15040 return -1;
15041 }
15042
15043 static void
15044 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15045 {
15046 uint32_t swfw_sync;
15047
15048 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15049 device_xname(sc->sc_dev), __func__));
15050
15051 while (wm_get_swsm_semaphore(sc) != 0)
15052 continue;
15053
15054 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15055 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15056 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15057
15058 wm_put_swsm_semaphore(sc);
15059 }
15060
15061 static int
15062 wm_get_nvm_80003(struct wm_softc *sc)
15063 {
15064 int rv;
15065
15066 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15067 device_xname(sc->sc_dev), __func__));
15068
15069 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15070 aprint_error_dev(sc->sc_dev,
15071 "%s: failed to get semaphore(SWFW)\n", __func__);
15072 return rv;
15073 }
15074
15075 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15076 && (rv = wm_get_eecd(sc)) != 0) {
15077 aprint_error_dev(sc->sc_dev,
15078 "%s: failed to get semaphore(EECD)\n", __func__);
15079 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15080 return rv;
15081 }
15082
15083 return 0;
15084 }
15085
15086 static void
15087 wm_put_nvm_80003(struct wm_softc *sc)
15088 {
15089
15090 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15091 device_xname(sc->sc_dev), __func__));
15092
15093 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15094 wm_put_eecd(sc);
15095 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15096 }
15097
15098 static int
15099 wm_get_nvm_82571(struct wm_softc *sc)
15100 {
15101 int rv;
15102
15103 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15104 device_xname(sc->sc_dev), __func__));
15105
15106 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15107 return rv;
15108
15109 switch (sc->sc_type) {
15110 case WM_T_82573:
15111 break;
15112 default:
15113 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15114 rv = wm_get_eecd(sc);
15115 break;
15116 }
15117
15118 if (rv != 0) {
15119 aprint_error_dev(sc->sc_dev,
15120 "%s: failed to get semaphore\n",
15121 __func__);
15122 wm_put_swsm_semaphore(sc);
15123 }
15124
15125 return rv;
15126 }
15127
15128 static void
15129 wm_put_nvm_82571(struct wm_softc *sc)
15130 {
15131
15132 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15133 device_xname(sc->sc_dev), __func__));
15134
15135 switch (sc->sc_type) {
15136 case WM_T_82573:
15137 break;
15138 default:
15139 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15140 wm_put_eecd(sc);
15141 break;
15142 }
15143
15144 wm_put_swsm_semaphore(sc);
15145 }
15146
15147 static int
15148 wm_get_phy_82575(struct wm_softc *sc)
15149 {
15150
15151 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15152 device_xname(sc->sc_dev), __func__));
15153 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15154 }
15155
15156 static void
15157 wm_put_phy_82575(struct wm_softc *sc)
15158 {
15159
15160 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15161 device_xname(sc->sc_dev), __func__));
15162 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15163 }
15164
15165 static int
15166 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15167 {
15168 uint32_t ext_ctrl;
15169 int timeout = 200;
15170
15171 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15172 device_xname(sc->sc_dev), __func__));
15173
15174 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15175 for (timeout = 0; timeout < 200; timeout++) {
15176 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15177 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15178 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15179
15180 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15181 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15182 return 0;
15183 delay(5000);
15184 }
15185 device_printf(sc->sc_dev,
15186 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15187 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15188 return -1;
15189 }
15190
15191 static void
15192 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15193 {
15194 uint32_t ext_ctrl;
15195
15196 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15197 device_xname(sc->sc_dev), __func__));
15198
15199 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15200 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15201 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15202
15203 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15204 }
15205
15206 static int
15207 wm_get_swflag_ich8lan(struct wm_softc *sc)
15208 {
15209 uint32_t ext_ctrl;
15210 int timeout;
15211
15212 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15213 device_xname(sc->sc_dev), __func__));
15214 mutex_enter(sc->sc_ich_phymtx);
15215 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15216 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15217 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15218 break;
15219 delay(1000);
15220 }
15221 if (timeout >= WM_PHY_CFG_TIMEOUT) {
15222 device_printf(sc->sc_dev,
15223 "SW has already locked the resource\n");
15224 goto out;
15225 }
15226
15227 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15228 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15229 for (timeout = 0; timeout < 1000; timeout++) {
15230 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15231 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15232 break;
15233 delay(1000);
15234 }
15235 if (timeout >= 1000) {
15236 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15237 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15238 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15239 goto out;
15240 }
15241 return 0;
15242
15243 out:
15244 mutex_exit(sc->sc_ich_phymtx);
15245 return -1;
15246 }
15247
15248 static void
15249 wm_put_swflag_ich8lan(struct wm_softc *sc)
15250 {
15251 uint32_t ext_ctrl;
15252
15253 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15254 device_xname(sc->sc_dev), __func__));
15255 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15256 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15257 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15258 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15259 } else
15260 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15261
15262 mutex_exit(sc->sc_ich_phymtx);
15263 }
15264
15265 static int
15266 wm_get_nvm_ich8lan(struct wm_softc *sc)
15267 {
15268
15269 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15270 device_xname(sc->sc_dev), __func__));
15271 mutex_enter(sc->sc_ich_nvmmtx);
15272
15273 return 0;
15274 }
15275
15276 static void
15277 wm_put_nvm_ich8lan(struct wm_softc *sc)
15278 {
15279
15280 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15281 device_xname(sc->sc_dev), __func__));
15282 mutex_exit(sc->sc_ich_nvmmtx);
15283 }
15284
15285 static int
15286 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15287 {
15288 int i = 0;
15289 uint32_t reg;
15290
15291 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15292 device_xname(sc->sc_dev), __func__));
15293
15294 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15295 do {
15296 CSR_WRITE(sc, WMREG_EXTCNFCTR,
15297 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15298 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15299 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15300 break;
15301 delay(2*1000);
15302 i++;
15303 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15304
15305 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15306 wm_put_hw_semaphore_82573(sc);
15307 log(LOG_ERR, "%s: Driver can't access the PHY\n",
15308 device_xname(sc->sc_dev));
15309 return -1;
15310 }
15311
15312 return 0;
15313 }
15314
15315 static void
15316 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15317 {
15318 uint32_t reg;
15319
15320 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15321 device_xname(sc->sc_dev), __func__));
15322
15323 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15324 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15325 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15326 }
15327
15328 /*
15329 * Management mode and power management related subroutines.
15330 * BMC, AMT, suspend/resume and EEE.
15331 */
15332
15333 #ifdef WM_WOL
15334 static int
15335 wm_check_mng_mode(struct wm_softc *sc)
15336 {
15337 int rv;
15338
15339 switch (sc->sc_type) {
15340 case WM_T_ICH8:
15341 case WM_T_ICH9:
15342 case WM_T_ICH10:
15343 case WM_T_PCH:
15344 case WM_T_PCH2:
15345 case WM_T_PCH_LPT:
15346 case WM_T_PCH_SPT:
15347 case WM_T_PCH_CNP:
15348 rv = wm_check_mng_mode_ich8lan(sc);
15349 break;
15350 case WM_T_82574:
15351 case WM_T_82583:
15352 rv = wm_check_mng_mode_82574(sc);
15353 break;
15354 case WM_T_82571:
15355 case WM_T_82572:
15356 case WM_T_82573:
15357 case WM_T_80003:
15358 rv = wm_check_mng_mode_generic(sc);
15359 break;
15360 default:
15361 /* Noting to do */
15362 rv = 0;
15363 break;
15364 }
15365
15366 return rv;
15367 }
15368
15369 static int
15370 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15371 {
15372 uint32_t fwsm;
15373
15374 fwsm = CSR_READ(sc, WMREG_FWSM);
15375
15376 if (((fwsm & FWSM_FW_VALID) != 0)
15377 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15378 return 1;
15379
15380 return 0;
15381 }
15382
15383 static int
15384 wm_check_mng_mode_82574(struct wm_softc *sc)
15385 {
15386 uint16_t data;
15387
15388 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15389
15390 if ((data & NVM_CFG2_MNGM_MASK) != 0)
15391 return 1;
15392
15393 return 0;
15394 }
15395
15396 static int
15397 wm_check_mng_mode_generic(struct wm_softc *sc)
15398 {
15399 uint32_t fwsm;
15400
15401 fwsm = CSR_READ(sc, WMREG_FWSM);
15402
15403 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15404 return 1;
15405
15406 return 0;
15407 }
15408 #endif /* WM_WOL */
15409
15410 static int
15411 wm_enable_mng_pass_thru(struct wm_softc *sc)
15412 {
15413 uint32_t manc, fwsm, factps;
15414
15415 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15416 return 0;
15417
15418 manc = CSR_READ(sc, WMREG_MANC);
15419
15420 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15421 device_xname(sc->sc_dev), manc));
15422 if ((manc & MANC_RECV_TCO_EN) == 0)
15423 return 0;
15424
15425 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15426 fwsm = CSR_READ(sc, WMREG_FWSM);
15427 factps = CSR_READ(sc, WMREG_FACTPS);
15428 if (((factps & FACTPS_MNGCG) == 0)
15429 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15430 return 1;
15431 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15432 uint16_t data;
15433
15434 factps = CSR_READ(sc, WMREG_FACTPS);
15435 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15436 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15437 device_xname(sc->sc_dev), factps, data));
15438 if (((factps & FACTPS_MNGCG) == 0)
15439 && ((data & NVM_CFG2_MNGM_MASK)
15440 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15441 return 1;
15442 } else if (((manc & MANC_SMBUS_EN) != 0)
15443 && ((manc & MANC_ASF_EN) == 0))
15444 return 1;
15445
15446 return 0;
15447 }
15448
15449 static bool
15450 wm_phy_resetisblocked(struct wm_softc *sc)
15451 {
15452 bool blocked = false;
15453 uint32_t reg;
15454 int i = 0;
15455
15456 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15457 device_xname(sc->sc_dev), __func__));
15458
15459 switch (sc->sc_type) {
15460 case WM_T_ICH8:
15461 case WM_T_ICH9:
15462 case WM_T_ICH10:
15463 case WM_T_PCH:
15464 case WM_T_PCH2:
15465 case WM_T_PCH_LPT:
15466 case WM_T_PCH_SPT:
15467 case WM_T_PCH_CNP:
15468 do {
15469 reg = CSR_READ(sc, WMREG_FWSM);
15470 if ((reg & FWSM_RSPCIPHY) == 0) {
15471 blocked = true;
15472 delay(10*1000);
15473 continue;
15474 }
15475 blocked = false;
15476 } while (blocked && (i++ < 30));
15477 return blocked;
15478 break;
15479 case WM_T_82571:
15480 case WM_T_82572:
15481 case WM_T_82573:
15482 case WM_T_82574:
15483 case WM_T_82583:
15484 case WM_T_80003:
15485 reg = CSR_READ(sc, WMREG_MANC);
15486 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15487 return true;
15488 else
15489 return false;
15490 break;
15491 default:
15492 /* No problem */
15493 break;
15494 }
15495
15496 return false;
15497 }
15498
15499 static void
15500 wm_get_hw_control(struct wm_softc *sc)
15501 {
15502 uint32_t reg;
15503
15504 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15505 device_xname(sc->sc_dev), __func__));
15506
15507 if (sc->sc_type == WM_T_82573) {
15508 reg = CSR_READ(sc, WMREG_SWSM);
15509 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15510 } else if (sc->sc_type >= WM_T_82571) {
15511 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15512 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15513 }
15514 }
15515
15516 static void
15517 wm_release_hw_control(struct wm_softc *sc)
15518 {
15519 uint32_t reg;
15520
15521 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15522 device_xname(sc->sc_dev), __func__));
15523
15524 if (sc->sc_type == WM_T_82573) {
15525 reg = CSR_READ(sc, WMREG_SWSM);
15526 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15527 } else if (sc->sc_type >= WM_T_82571) {
15528 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15529 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15530 }
15531 }
15532
15533 static void
15534 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15535 {
15536 uint32_t reg;
15537
15538 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15539 device_xname(sc->sc_dev), __func__));
15540
15541 if (sc->sc_type < WM_T_PCH2)
15542 return;
15543
15544 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15545
15546 if (gate)
15547 reg |= EXTCNFCTR_GATE_PHY_CFG;
15548 else
15549 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15550
15551 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15552 }
15553
15554 static int
15555 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15556 {
15557 uint32_t fwsm, reg;
15558 int rv;
15559
15560 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15561 device_xname(sc->sc_dev), __func__));
15562
15563 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
15564 wm_gate_hw_phy_config_ich8lan(sc, true);
15565
15566 /* Disable ULP */
15567 wm_ulp_disable(sc);
15568
15569 /* Acquire PHY semaphore */
15570 rv = sc->phy.acquire(sc);
15571 if (rv != 0) {
15572 DPRINTF(sc, WM_DEBUG_INIT,
15573 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15574 return rv;
15575 }
15576
15577 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
15578 * inaccessible and resetting the PHY is not blocked, toggle the
15579 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15580 */
15581 fwsm = CSR_READ(sc, WMREG_FWSM);
15582 switch (sc->sc_type) {
15583 case WM_T_PCH_LPT:
15584 case WM_T_PCH_SPT:
15585 case WM_T_PCH_CNP:
15586 if (wm_phy_is_accessible_pchlan(sc))
15587 break;
15588
15589 /* Before toggling LANPHYPC, see if PHY is accessible by
15590 * forcing MAC to SMBus mode first.
15591 */
15592 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15593 reg |= CTRL_EXT_FORCE_SMBUS;
15594 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15595 #if 0
15596 /* XXX Isn't this required??? */
15597 CSR_WRITE_FLUSH(sc);
15598 #endif
15599 /* Wait 50 milliseconds for MAC to finish any retries
15600 * that it might be trying to perform from previous
15601 * attempts to acknowledge any phy read requests.
15602 */
15603 delay(50 * 1000);
15604 /* FALLTHROUGH */
15605 case WM_T_PCH2:
15606 if (wm_phy_is_accessible_pchlan(sc) == true)
15607 break;
15608 /* FALLTHROUGH */
15609 case WM_T_PCH:
15610 if (sc->sc_type == WM_T_PCH)
15611 if ((fwsm & FWSM_FW_VALID) != 0)
15612 break;
15613
15614 if (wm_phy_resetisblocked(sc) == true) {
15615 device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
15616 break;
15617 }
15618
15619 /* Toggle LANPHYPC Value bit */
15620 wm_toggle_lanphypc_pch_lpt(sc);
15621
15622 if (sc->sc_type >= WM_T_PCH_LPT) {
15623 if (wm_phy_is_accessible_pchlan(sc) == true)
15624 break;
15625
15626 /* Toggling LANPHYPC brings the PHY out of SMBus mode
15627 * so ensure that the MAC is also out of SMBus mode
15628 */
15629 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15630 reg &= ~CTRL_EXT_FORCE_SMBUS;
15631 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15632
15633 if (wm_phy_is_accessible_pchlan(sc) == true)
15634 break;
15635 rv = -1;
15636 }
15637 break;
15638 default:
15639 break;
15640 }
15641
15642 /* Release semaphore */
15643 sc->phy.release(sc);
15644
15645 if (rv == 0) {
15646 /* Check to see if able to reset PHY. Print error if not */
15647 if (wm_phy_resetisblocked(sc)) {
15648 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15649 goto out;
15650 }
15651
15652 /* Reset the PHY before any access to it. Doing so, ensures
15653 * that the PHY is in a known good state before we read/write
15654 * PHY registers. The generic reset is sufficient here,
15655 * because we haven't determined the PHY type yet.
15656 */
15657 if (wm_reset_phy(sc) != 0)
15658 goto out;
15659
15660 /* On a successful reset, possibly need to wait for the PHY
15661 * to quiesce to an accessible state before returning control
15662 * to the calling function. If the PHY does not quiesce, then
15663 * return E1000E_BLK_PHY_RESET, as this is the condition that
15664 * the PHY is in.
15665 */
15666 if (wm_phy_resetisblocked(sc))
15667 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15668 }
15669
15670 out:
15671 /* Ungate automatic PHY configuration on non-managed 82579 */
15672 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15673 delay(10*1000);
15674 wm_gate_hw_phy_config_ich8lan(sc, false);
15675 }
15676
15677 return 0;
15678 }
15679
15680 static void
15681 wm_init_manageability(struct wm_softc *sc)
15682 {
15683
15684 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15685 device_xname(sc->sc_dev), __func__));
15686 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
15687
15688 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15689 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15690 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15691
15692 /* Disable hardware interception of ARP */
15693 manc &= ~MANC_ARP_EN;
15694
15695 /* Enable receiving management packets to the host */
15696 if (sc->sc_type >= WM_T_82571) {
15697 manc |= MANC_EN_MNG2HOST;
15698 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15699 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15700 }
15701
15702 CSR_WRITE(sc, WMREG_MANC, manc);
15703 }
15704 }
15705
15706 static void
15707 wm_release_manageability(struct wm_softc *sc)
15708 {
15709
15710 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15711 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15712
15713 manc |= MANC_ARP_EN;
15714 if (sc->sc_type >= WM_T_82571)
15715 manc &= ~MANC_EN_MNG2HOST;
15716
15717 CSR_WRITE(sc, WMREG_MANC, manc);
15718 }
15719 }
15720
15721 static void
15722 wm_get_wakeup(struct wm_softc *sc)
15723 {
15724
15725 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15726 switch (sc->sc_type) {
15727 case WM_T_82573:
15728 case WM_T_82583:
15729 sc->sc_flags |= WM_F_HAS_AMT;
15730 /* FALLTHROUGH */
15731 case WM_T_80003:
15732 case WM_T_82575:
15733 case WM_T_82576:
15734 case WM_T_82580:
15735 case WM_T_I350:
15736 case WM_T_I354:
15737 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15738 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15739 /* FALLTHROUGH */
15740 case WM_T_82541:
15741 case WM_T_82541_2:
15742 case WM_T_82547:
15743 case WM_T_82547_2:
15744 case WM_T_82571:
15745 case WM_T_82572:
15746 case WM_T_82574:
15747 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15748 break;
15749 case WM_T_ICH8:
15750 case WM_T_ICH9:
15751 case WM_T_ICH10:
15752 case WM_T_PCH:
15753 case WM_T_PCH2:
15754 case WM_T_PCH_LPT:
15755 case WM_T_PCH_SPT:
15756 case WM_T_PCH_CNP:
15757 sc->sc_flags |= WM_F_HAS_AMT;
15758 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15759 break;
15760 default:
15761 break;
15762 }
15763
15764 /* 1: HAS_MANAGE */
15765 if (wm_enable_mng_pass_thru(sc) != 0)
15766 sc->sc_flags |= WM_F_HAS_MANAGE;
15767
15768 /*
15769 * Note that the WOL flags is set after the resetting of the eeprom
15770 * stuff
15771 */
15772 }
15773
15774 /*
15775 * Unconfigure Ultra Low Power mode.
15776 * Only for I217 and newer (see below).
15777 */
15778 static int
15779 wm_ulp_disable(struct wm_softc *sc)
15780 {
15781 uint32_t reg;
15782 uint16_t phyreg;
15783 int i = 0, rv;
15784
15785 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15786 device_xname(sc->sc_dev), __func__));
15787 /* Exclude old devices */
15788 if ((sc->sc_type < WM_T_PCH_LPT)
15789 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15790 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15791 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15792 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15793 return 0;
15794
15795 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15796 /* Request ME un-configure ULP mode in the PHY */
15797 reg = CSR_READ(sc, WMREG_H2ME);
15798 reg &= ~H2ME_ULP;
15799 reg |= H2ME_ENFORCE_SETTINGS;
15800 CSR_WRITE(sc, WMREG_H2ME, reg);
15801
15802 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15803 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15804 if (i++ == 30) {
15805 device_printf(sc->sc_dev, "%s timed out\n",
15806 __func__);
15807 return -1;
15808 }
15809 delay(10 * 1000);
15810 }
15811 reg = CSR_READ(sc, WMREG_H2ME);
15812 reg &= ~H2ME_ENFORCE_SETTINGS;
15813 CSR_WRITE(sc, WMREG_H2ME, reg);
15814
15815 return 0;
15816 }
15817
15818 /* Acquire semaphore */
15819 rv = sc->phy.acquire(sc);
15820 if (rv != 0) {
15821 DPRINTF(sc, WM_DEBUG_INIT,
15822 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15823 return rv;
15824 }
15825
15826 /* Toggle LANPHYPC */
15827 wm_toggle_lanphypc_pch_lpt(sc);
15828
15829 /* Unforce SMBus mode in PHY */
15830 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15831 if (rv != 0) {
15832 uint32_t reg2;
15833
15834 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15835 __func__);
15836 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15837 reg2 |= CTRL_EXT_FORCE_SMBUS;
15838 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15839 delay(50 * 1000);
15840
15841 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15842 &phyreg);
15843 if (rv != 0)
15844 goto release;
15845 }
15846 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15847 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15848
15849 /* Unforce SMBus mode in MAC */
15850 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15851 reg &= ~CTRL_EXT_FORCE_SMBUS;
15852 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15853
15854 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15855 if (rv != 0)
15856 goto release;
15857 phyreg |= HV_PM_CTRL_K1_ENA;
15858 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15859
15860 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15861 &phyreg);
15862 if (rv != 0)
15863 goto release;
15864 phyreg &= ~(I218_ULP_CONFIG1_IND
15865 | I218_ULP_CONFIG1_STICKY_ULP
15866 | I218_ULP_CONFIG1_RESET_TO_SMBUS
15867 | I218_ULP_CONFIG1_WOL_HOST
15868 | I218_ULP_CONFIG1_INBAND_EXIT
15869 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15870 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15871 | I218_ULP_CONFIG1_DIS_SMB_PERST);
15872 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15873 phyreg |= I218_ULP_CONFIG1_START;
15874 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15875
15876 reg = CSR_READ(sc, WMREG_FEXTNVM7);
15877 reg &= ~FEXTNVM7_DIS_SMB_PERST;
15878 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15879
15880 release:
15881 /* Release semaphore */
15882 sc->phy.release(sc);
15883 wm_gmii_reset(sc);
15884 delay(50 * 1000);
15885
15886 return rv;
15887 }
15888
15889 /* WOL in the newer chipset interfaces (pchlan) */
15890 static int
15891 wm_enable_phy_wakeup(struct wm_softc *sc)
15892 {
15893 device_t dev = sc->sc_dev;
15894 uint32_t mreg, moff;
15895 uint16_t wuce, wuc, wufc, preg;
15896 int i, rv;
15897
15898 KASSERT(sc->sc_type >= WM_T_PCH);
15899
15900 /* Copy MAC RARs to PHY RARs */
15901 wm_copy_rx_addrs_to_phy_ich8lan(sc);
15902
15903 /* Activate PHY wakeup */
15904 rv = sc->phy.acquire(sc);
15905 if (rv != 0) {
15906 device_printf(dev, "%s: failed to acquire semaphore\n",
15907 __func__);
15908 return rv;
15909 }
15910
15911 /*
15912 * Enable access to PHY wakeup registers.
15913 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15914 */
15915 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15916 if (rv != 0) {
15917 device_printf(dev,
15918 "%s: Could not enable PHY wakeup reg access\n", __func__);
15919 goto release;
15920 }
15921
15922 /* Copy MAC MTA to PHY MTA */
15923 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15924 uint16_t lo, hi;
15925
15926 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15927 lo = (uint16_t)(mreg & 0xffff);
15928 hi = (uint16_t)((mreg >> 16) & 0xffff);
15929 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15930 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15931 }
15932
15933 /* Configure PHY Rx Control register */
15934 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15935 mreg = CSR_READ(sc, WMREG_RCTL);
15936 if (mreg & RCTL_UPE)
15937 preg |= BM_RCTL_UPE;
15938 if (mreg & RCTL_MPE)
15939 preg |= BM_RCTL_MPE;
15940 preg &= ~(BM_RCTL_MO_MASK);
15941 moff = __SHIFTOUT(mreg, RCTL_MO);
15942 if (moff != 0)
15943 preg |= moff << BM_RCTL_MO_SHIFT;
15944 if (mreg & RCTL_BAM)
15945 preg |= BM_RCTL_BAM;
15946 if (mreg & RCTL_PMCF)
15947 preg |= BM_RCTL_PMCF;
15948 mreg = CSR_READ(sc, WMREG_CTRL);
15949 if (mreg & CTRL_RFCE)
15950 preg |= BM_RCTL_RFCE;
15951 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15952
15953 wuc = WUC_APME | WUC_PME_EN;
15954 wufc = WUFC_MAG;
15955 /* Enable PHY wakeup in MAC register */
15956 CSR_WRITE(sc, WMREG_WUC,
15957 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15958 CSR_WRITE(sc, WMREG_WUFC, wufc);
15959
15960 /* Configure and enable PHY wakeup in PHY registers */
15961 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15962 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15963
15964 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15965 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15966
15967 release:
15968 sc->phy.release(sc);
15969
15970 return 0;
15971 }
15972
15973 /* Power down workaround on D3 */
15974 static void
15975 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15976 {
15977 uint32_t reg;
15978 uint16_t phyreg;
15979 int i;
15980
15981 for (i = 0; i < 2; i++) {
15982 /* Disable link */
15983 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15984 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15985 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15986
15987 /*
15988 * Call gig speed drop workaround on Gig disable before
15989 * accessing any PHY registers
15990 */
15991 if (sc->sc_type == WM_T_ICH8)
15992 wm_gig_downshift_workaround_ich8lan(sc);
15993
15994 /* Write VR power-down enable */
15995 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15996 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15997 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15998 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15999
16000 /* Read it back and test */
16001 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16002 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16003 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16004 break;
16005
16006 /* Issue PHY reset and repeat at most one more time */
16007 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16008 }
16009 }
16010
16011 /*
16012 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16013 * @sc: pointer to the HW structure
16014 *
16015 * During S0 to Sx transition, it is possible the link remains at gig
16016 * instead of negotiating to a lower speed. Before going to Sx, set
16017 * 'Gig Disable' to force link speed negotiation to a lower speed based on
16018 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
16019 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16020 * needs to be written.
16021 * Parts that support (and are linked to a partner which support) EEE in
16022 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16023 * than 10Mbps w/o EEE.
16024 */
16025 static void
16026 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16027 {
16028 device_t dev = sc->sc_dev;
16029 struct ethercom *ec = &sc->sc_ethercom;
16030 uint32_t phy_ctrl;
16031 int rv;
16032
16033 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16034 phy_ctrl |= PHY_CTRL_GBE_DIS;
16035
16036 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
16037
16038 if (sc->sc_phytype == WMPHY_I217) {
16039 uint16_t devid = sc->sc_pcidevid;
16040
16041 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16042 (devid == PCI_PRODUCT_INTEL_I218_V) ||
16043 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16044 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16045 (sc->sc_type >= WM_T_PCH_SPT))
16046 CSR_WRITE(sc, WMREG_FEXTNVM6,
16047 CSR_READ(sc, WMREG_FEXTNVM6)
16048 & ~FEXTNVM6_REQ_PLL_CLK);
16049
16050 if (sc->phy.acquire(sc) != 0)
16051 goto out;
16052
16053 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16054 uint16_t eee_advert;
16055
16056 rv = wm_read_emi_reg_locked(dev,
16057 I217_EEE_ADVERTISEMENT, &eee_advert);
16058 if (rv)
16059 goto release;
16060
16061 /*
16062 * Disable LPLU if both link partners support 100BaseT
16063 * EEE and 100Full is advertised on both ends of the
16064 * link, and enable Auto Enable LPI since there will
16065 * be no driver to enable LPI while in Sx.
16066 */
16067 if ((eee_advert & AN_EEEADVERT_100_TX) &&
16068 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16069 uint16_t anar, phy_reg;
16070
16071 sc->phy.readreg_locked(dev, 2, MII_ANAR,
16072 &anar);
16073 if (anar & ANAR_TX_FD) {
16074 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16075 PHY_CTRL_NOND0A_LPLU);
16076
16077 /* Set Auto Enable LPI after link up */
16078 sc->phy.readreg_locked(dev, 2,
16079 I217_LPI_GPIO_CTRL, &phy_reg);
16080 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16081 sc->phy.writereg_locked(dev, 2,
16082 I217_LPI_GPIO_CTRL, phy_reg);
16083 }
16084 }
16085 }
16086
16087 /*
16088 * For i217 Intel Rapid Start Technology support,
16089 * when the system is going into Sx and no manageability engine
16090 * is present, the driver must configure proxy to reset only on
16091 * power good. LPI (Low Power Idle) state must also reset only
16092 * on power good, as well as the MTA (Multicast table array).
16093 * The SMBus release must also be disabled on LCD reset.
16094 */
16095
16096 /*
16097 * Enable MTA to reset for Intel Rapid Start Technology
16098 * Support
16099 */
16100
16101 release:
16102 sc->phy.release(sc);
16103 }
16104 out:
16105 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16106
16107 if (sc->sc_type == WM_T_ICH8)
16108 wm_gig_downshift_workaround_ich8lan(sc);
16109
16110 if (sc->sc_type >= WM_T_PCH) {
16111 wm_oem_bits_config_ich8lan(sc, false);
16112
16113 /* Reset PHY to activate OEM bits on 82577/8 */
16114 if (sc->sc_type == WM_T_PCH)
16115 wm_reset_phy(sc);
16116
16117 if (sc->phy.acquire(sc) != 0)
16118 return;
16119 wm_write_smbus_addr(sc);
16120 sc->phy.release(sc);
16121 }
16122 }
16123
16124 /*
16125 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16126 * @sc: pointer to the HW structure
16127 *
16128 * During Sx to S0 transitions on non-managed devices or managed devices
16129 * on which PHY resets are not blocked, if the PHY registers cannot be
16130 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
16131 * the PHY.
16132 * On i217, setup Intel Rapid Start Technology.
16133 */
16134 static int
16135 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16136 {
16137 device_t dev = sc->sc_dev;
16138 int rv;
16139
16140 if (sc->sc_type < WM_T_PCH2)
16141 return 0;
16142
16143 rv = wm_init_phy_workarounds_pchlan(sc);
16144 if (rv != 0)
16145 return rv;
16146
16147 /* For i217 Intel Rapid Start Technology support when the system
16148 * is transitioning from Sx and no manageability engine is present
16149 * configure SMBus to restore on reset, disable proxy, and enable
16150 * the reset on MTA (Multicast table array).
16151 */
16152 if (sc->sc_phytype == WMPHY_I217) {
16153 uint16_t phy_reg;
16154
16155 rv = sc->phy.acquire(sc);
16156 if (rv != 0)
16157 return rv;
16158
16159 /* Clear Auto Enable LPI after link up */
16160 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16161 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16162 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16163
16164 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16165 /* Restore clear on SMB if no manageability engine
16166 * is present
16167 */
16168 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16169 &phy_reg);
16170 if (rv != 0)
16171 goto release;
16172 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16173 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16174
16175 /* Disable Proxy */
16176 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16177 }
16178 /* Enable reset on MTA */
16179 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16180 if (rv != 0)
16181 goto release;
16182 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16183 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16184
16185 release:
16186 sc->phy.release(sc);
16187 return rv;
16188 }
16189
16190 return 0;
16191 }
16192
16193 static void
16194 wm_enable_wakeup(struct wm_softc *sc)
16195 {
16196 uint32_t reg, pmreg;
16197 pcireg_t pmode;
16198 int rv = 0;
16199
16200 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16201 device_xname(sc->sc_dev), __func__));
16202
16203 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16204 &pmreg, NULL) == 0)
16205 return;
16206
16207 if ((sc->sc_flags & WM_F_WOL) == 0)
16208 goto pme;
16209
16210 /* Advertise the wakeup capability */
16211 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16212 | CTRL_SWDPIN(3));
16213
16214 /* Keep the laser running on fiber adapters */
16215 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16216 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16217 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16218 reg |= CTRL_EXT_SWDPIN(3);
16219 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16220 }
16221
16222 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16223 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16224 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16225 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
16226 wm_suspend_workarounds_ich8lan(sc);
16227
16228 #if 0 /* For the multicast packet */
16229 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16230 reg |= WUFC_MC;
16231 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16232 #endif
16233
16234 if (sc->sc_type >= WM_T_PCH) {
16235 rv = wm_enable_phy_wakeup(sc);
16236 if (rv != 0)
16237 goto pme;
16238 } else {
16239 /* Enable wakeup by the MAC */
16240 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16241 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16242 }
16243
16244 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16245 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16246 || (sc->sc_type == WM_T_PCH2))
16247 && (sc->sc_phytype == WMPHY_IGP_3))
16248 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16249
16250 pme:
16251 /* Request PME */
16252 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16253 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16254 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16255 /* For WOL */
16256 pmode |= PCI_PMCSR_PME_EN;
16257 } else {
16258 /* Disable WOL */
16259 pmode &= ~PCI_PMCSR_PME_EN;
16260 }
16261 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16262 }
16263
16264 /* Disable ASPM L0s and/or L1 for workaround */
16265 static void
16266 wm_disable_aspm(struct wm_softc *sc)
16267 {
16268 pcireg_t reg, mask = 0;
16269 unsigned const char *str = "";
16270
16271 /*
16272 * Only for PCIe device which has PCIe capability in the PCI config
16273 * space.
16274 */
16275 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16276 return;
16277
16278 switch (sc->sc_type) {
16279 case WM_T_82571:
16280 case WM_T_82572:
16281 /*
16282 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16283 * State Power management L1 State (ASPM L1).
16284 */
16285 mask = PCIE_LCSR_ASPM_L1;
16286 str = "L1 is";
16287 break;
16288 case WM_T_82573:
16289 case WM_T_82574:
16290 case WM_T_82583:
16291 /*
16292 * The 82573 disappears when PCIe ASPM L0s is enabled.
16293 *
16294 * The 82574 and 82583 does not support PCIe ASPM L0s with
16295 * some chipset. The document of 82574 and 82583 says that
16296 * disabling L0s with some specific chipset is sufficient,
16297 * but we follow as of the Intel em driver does.
16298 *
16299 * References:
16300 * Errata 8 of the Specification Update of i82573.
16301 * Errata 20 of the Specification Update of i82574.
16302 * Errata 9 of the Specification Update of i82583.
16303 */
16304 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16305 str = "L0s and L1 are";
16306 break;
16307 default:
16308 return;
16309 }
16310
16311 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16312 sc->sc_pcixe_capoff + PCIE_LCSR);
16313 reg &= ~mask;
16314 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16315 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16316
16317 /* Print only in wm_attach() */
16318 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16319 aprint_verbose_dev(sc->sc_dev,
16320 "ASPM %s disabled to workaround the errata.\n", str);
16321 }
16322
16323 /* LPLU */
16324
16325 static void
16326 wm_lplu_d0_disable(struct wm_softc *sc)
16327 {
16328 struct mii_data *mii = &sc->sc_mii;
16329 uint32_t reg;
16330 uint16_t phyval;
16331
16332 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16333 device_xname(sc->sc_dev), __func__));
16334
16335 if (sc->sc_phytype == WMPHY_IFE)
16336 return;
16337
16338 switch (sc->sc_type) {
16339 case WM_T_82571:
16340 case WM_T_82572:
16341 case WM_T_82573:
16342 case WM_T_82575:
16343 case WM_T_82576:
16344 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16345 phyval &= ~PMR_D0_LPLU;
16346 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16347 break;
16348 case WM_T_82580:
16349 case WM_T_I350:
16350 case WM_T_I210:
16351 case WM_T_I211:
16352 reg = CSR_READ(sc, WMREG_PHPM);
16353 reg &= ~PHPM_D0A_LPLU;
16354 CSR_WRITE(sc, WMREG_PHPM, reg);
16355 break;
16356 case WM_T_82574:
16357 case WM_T_82583:
16358 case WM_T_ICH8:
16359 case WM_T_ICH9:
16360 case WM_T_ICH10:
16361 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16362 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16363 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16364 CSR_WRITE_FLUSH(sc);
16365 break;
16366 case WM_T_PCH:
16367 case WM_T_PCH2:
16368 case WM_T_PCH_LPT:
16369 case WM_T_PCH_SPT:
16370 case WM_T_PCH_CNP:
16371 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16372 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16373 if (wm_phy_resetisblocked(sc) == false)
16374 phyval |= HV_OEM_BITS_ANEGNOW;
16375 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16376 break;
16377 default:
16378 break;
16379 }
16380 }
16381
16382 /* EEE */
16383
16384 static int
16385 wm_set_eee_i350(struct wm_softc *sc)
16386 {
16387 struct ethercom *ec = &sc->sc_ethercom;
16388 uint32_t ipcnfg, eeer;
16389 uint32_t ipcnfg_mask
16390 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
16391 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
16392
16393 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
16394
16395 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
16396 eeer = CSR_READ(sc, WMREG_EEER);
16397
16398 /* Enable or disable per user setting */
16399 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16400 ipcnfg |= ipcnfg_mask;
16401 eeer |= eeer_mask;
16402 } else {
16403 ipcnfg &= ~ipcnfg_mask;
16404 eeer &= ~eeer_mask;
16405 }
16406
16407 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
16408 CSR_WRITE(sc, WMREG_EEER, eeer);
16409 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
16410 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
16411
16412 return 0;
16413 }
16414
16415 static int
16416 wm_set_eee_pchlan(struct wm_softc *sc)
16417 {
16418 device_t dev = sc->sc_dev;
16419 struct ethercom *ec = &sc->sc_ethercom;
16420 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
16421 int rv;
16422
16423 switch (sc->sc_phytype) {
16424 case WMPHY_82579:
16425 lpa = I82579_EEE_LP_ABILITY;
16426 pcs_status = I82579_EEE_PCS_STATUS;
16427 adv_addr = I82579_EEE_ADVERTISEMENT;
16428 break;
16429 case WMPHY_I217:
16430 lpa = I217_EEE_LP_ABILITY;
16431 pcs_status = I217_EEE_PCS_STATUS;
16432 adv_addr = I217_EEE_ADVERTISEMENT;
16433 break;
16434 default:
16435 return 0;
16436 }
16437
16438 rv = sc->phy.acquire(sc);
16439 if (rv != 0) {
16440 device_printf(dev, "%s: failed to get semaphore\n", __func__);
16441 return rv;
16442 }
16443
16444 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16445 if (rv != 0)
16446 goto release;
16447
16448 /* Clear bits that enable EEE in various speeds */
16449 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
16450
16451 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16452 /* Save off link partner's EEE ability */
16453 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
16454 if (rv != 0)
16455 goto release;
16456
16457 /* Read EEE advertisement */
16458 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
16459 goto release;
16460
16461 /*
16462 * Enable EEE only for speeds in which the link partner is
16463 * EEE capable and for which we advertise EEE.
16464 */
16465 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16466 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16467 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16468 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16469 if ((data & ANLPAR_TX_FD) != 0)
16470 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16471 else {
16472 /*
16473 * EEE is not supported in 100Half, so ignore
16474 * partner's EEE in 100 ability if full-duplex
16475 * is not advertised.
16476 */
16477 sc->eee_lp_ability
16478 &= ~AN_EEEADVERT_100_TX;
16479 }
16480 }
16481 }
16482
16483 if (sc->sc_phytype == WMPHY_82579) {
16484 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16485 if (rv != 0)
16486 goto release;
16487
16488 data &= ~I82579_LPI_PLL_SHUT_100;
16489 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16490 }
16491
16492 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16493 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16494 goto release;
16495
16496 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16497 release:
16498 sc->phy.release(sc);
16499
16500 return rv;
16501 }
16502
16503 static int
16504 wm_set_eee(struct wm_softc *sc)
16505 {
16506 struct ethercom *ec = &sc->sc_ethercom;
16507
16508 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16509 return 0;
16510
16511 if (sc->sc_type == WM_T_I354) {
16512 /* I354 uses an external PHY */
16513 return 0; /* not yet */
16514 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16515 return wm_set_eee_i350(sc);
16516 else if (sc->sc_type >= WM_T_PCH2)
16517 return wm_set_eee_pchlan(sc);
16518
16519 return 0;
16520 }
16521
16522 /*
16523 * Workarounds (mainly PHY related).
16524 * Basically, PHY's workarounds are in the PHY drivers.
16525 */
16526
16527 /* Workaround for 82566 Kumeran PCS lock loss */
16528 static int
16529 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16530 {
16531 struct mii_data *mii = &sc->sc_mii;
16532 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16533 int i, reg, rv;
16534 uint16_t phyreg;
16535
16536 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16537 device_xname(sc->sc_dev), __func__));
16538
16539 /* If the link is not up, do nothing */
16540 if ((status & STATUS_LU) == 0)
16541 return 0;
16542
16543 /* Nothing to do if the link is other than 1Gbps */
16544 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16545 return 0;
16546
16547 for (i = 0; i < 10; i++) {
16548 /* read twice */
16549 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16550 if (rv != 0)
16551 return rv;
16552 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16553 if (rv != 0)
16554 return rv;
16555
16556 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16557 goto out; /* GOOD! */
16558
16559 /* Reset the PHY */
16560 wm_reset_phy(sc);
16561 delay(5*1000);
16562 }
16563
16564 /* Disable GigE link negotiation */
16565 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16566 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16567 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16568
16569 /*
16570 * Call gig speed drop workaround on Gig disable before accessing
16571 * any PHY registers.
16572 */
16573 wm_gig_downshift_workaround_ich8lan(sc);
16574
16575 out:
16576 return 0;
16577 }
16578
16579 /*
16580 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16581 * @sc: pointer to the HW structure
16582 *
16583 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16584 * LPLU, Gig disable, MDIC PHY reset):
16585 * 1) Set Kumeran Near-end loopback
16586 * 2) Clear Kumeran Near-end loopback
16587 * Should only be called for ICH8[m] devices with any 1G Phy.
16588 */
16589 static void
16590 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16591 {
16592 uint16_t kmreg;
16593
16594 /* Only for igp3 */
16595 if (sc->sc_phytype == WMPHY_IGP_3) {
16596 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16597 return;
16598 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16599 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16600 return;
16601 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16602 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16603 }
16604 }
16605
16606 /*
16607 * Workaround for pch's PHYs
16608 * XXX should be moved to new PHY driver?
16609 */
16610 static int
16611 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16612 {
16613 device_t dev = sc->sc_dev;
16614 struct mii_data *mii = &sc->sc_mii;
16615 struct mii_softc *child;
16616 uint16_t phy_data, phyrev = 0;
16617 int phytype = sc->sc_phytype;
16618 int rv;
16619
16620 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16621 device_xname(dev), __func__));
16622 KASSERT(sc->sc_type == WM_T_PCH);
16623
16624 /* Set MDIO slow mode before any other MDIO access */
16625 if (phytype == WMPHY_82577)
16626 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16627 return rv;
16628
16629 child = LIST_FIRST(&mii->mii_phys);
16630 if (child != NULL)
16631 phyrev = child->mii_mpd_rev;
16632
16633 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16634 if ((child != NULL) &&
16635 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16636 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
16637 /* Disable generation of early preamble (0x4431) */
16638 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16639 &phy_data);
16640 if (rv != 0)
16641 return rv;
16642 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16643 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16644 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16645 phy_data);
16646 if (rv != 0)
16647 return rv;
16648
16649 /* Preamble tuning for SSC */
16650 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16651 if (rv != 0)
16652 return rv;
16653 }
16654
16655 /* 82578 */
16656 if (phytype == WMPHY_82578) {
16657 /*
16658 * Return registers to default by doing a soft reset then
16659 * writing 0x3140 to the control register
16660 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16661 */
16662 if ((child != NULL) && (phyrev < 2)) {
16663 PHY_RESET(child);
16664 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16665 if (rv != 0)
16666 return rv;
16667 }
16668 }
16669
16670 /* Select page 0 */
16671 if ((rv = sc->phy.acquire(sc)) != 0)
16672 return rv;
16673 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16674 sc->phy.release(sc);
16675 if (rv != 0)
16676 return rv;
16677
16678 /*
16679 * Configure the K1 Si workaround during phy reset assuming there is
16680 * link so that it disables K1 if link is in 1Gbps.
16681 */
16682 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16683 return rv;
16684
16685 /* Workaround for link disconnects on a busy hub in half duplex */
16686 rv = sc->phy.acquire(sc);
16687 if (rv)
16688 return rv;
16689 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16690 if (rv)
16691 goto release;
16692 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16693 phy_data & 0x00ff);
16694 if (rv)
16695 goto release;
16696
16697 /* Set MSE higher to enable link to stay up when noise is high */
16698 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16699 release:
16700 sc->phy.release(sc);
16701
16702 return rv;
16703 }
16704
16705 /*
16706 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16707 * @sc: pointer to the HW structure
16708 */
16709 static void
16710 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16711 {
16712
16713 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16714 device_xname(sc->sc_dev), __func__));
16715
16716 if (sc->phy.acquire(sc) != 0)
16717 return;
16718
16719 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16720
16721 sc->phy.release(sc);
16722 }
16723
16724 static void
16725 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16726 {
16727 device_t dev = sc->sc_dev;
16728 uint32_t mac_reg;
16729 uint16_t i, wuce;
16730 int count;
16731
16732 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16733 device_xname(dev), __func__));
16734
16735 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16736 return;
16737
16738 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16739 count = wm_rar_count(sc);
16740 for (i = 0; i < count; i++) {
16741 uint16_t lo, hi;
16742 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16743 lo = (uint16_t)(mac_reg & 0xffff);
16744 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16745 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16746 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16747
16748 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16749 lo = (uint16_t)(mac_reg & 0xffff);
16750 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16751 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16752 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16753 }
16754
16755 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16756 }
16757
16758 /*
16759 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16760 * with 82579 PHY
16761 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
16762 */
16763 static int
16764 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16765 {
16766 device_t dev = sc->sc_dev;
16767 int rar_count;
16768 int rv;
16769 uint32_t mac_reg;
16770 uint16_t dft_ctrl, data;
16771 uint16_t i;
16772
16773 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16774 device_xname(dev), __func__));
16775
16776 if (sc->sc_type < WM_T_PCH2)
16777 return 0;
16778
16779 /* Acquire PHY semaphore */
16780 rv = sc->phy.acquire(sc);
16781 if (rv != 0)
16782 return rv;
16783
16784 /* Disable Rx path while enabling/disabling workaround */
16785 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16786 if (rv != 0)
16787 goto out;
16788 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16789 dft_ctrl | (1 << 14));
16790 if (rv != 0)
16791 goto out;
16792
16793 if (enable) {
16794 /* Write Rx addresses (rar_entry_count for RAL/H, and
16795 * SHRAL/H) and initial CRC values to the MAC
16796 */
16797 rar_count = wm_rar_count(sc);
16798 for (i = 0; i < rar_count; i++) {
16799 uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16800 uint32_t addr_high, addr_low;
16801
16802 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16803 if (!(addr_high & RAL_AV))
16804 continue;
16805 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16806 mac_addr[0] = (addr_low & 0xFF);
16807 mac_addr[1] = ((addr_low >> 8) & 0xFF);
16808 mac_addr[2] = ((addr_low >> 16) & 0xFF);
16809 mac_addr[3] = ((addr_low >> 24) & 0xFF);
16810 mac_addr[4] = (addr_high & 0xFF);
16811 mac_addr[5] = ((addr_high >> 8) & 0xFF);
16812
16813 CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16814 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16815 }
16816
16817 /* Write Rx addresses to the PHY */
16818 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16819 }
16820
16821 /*
16822 * If enable ==
16823 * true: Enable jumbo frame workaround in the MAC.
16824 * false: Write MAC register values back to h/w defaults.
16825 */
16826 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16827 if (enable) {
16828 mac_reg &= ~(1 << 14);
16829 mac_reg |= (7 << 15);
16830 } else
16831 mac_reg &= ~(0xf << 14);
16832 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16833
16834 mac_reg = CSR_READ(sc, WMREG_RCTL);
16835 if (enable) {
16836 mac_reg |= RCTL_SECRC;
16837 sc->sc_rctl |= RCTL_SECRC;
16838 sc->sc_flags |= WM_F_CRC_STRIP;
16839 } else {
16840 mac_reg &= ~RCTL_SECRC;
16841 sc->sc_rctl &= ~RCTL_SECRC;
16842 sc->sc_flags &= ~WM_F_CRC_STRIP;
16843 }
16844 CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16845
16846 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16847 if (rv != 0)
16848 goto out;
16849 if (enable)
16850 data |= 1 << 0;
16851 else
16852 data &= ~(1 << 0);
16853 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16854 if (rv != 0)
16855 goto out;
16856
16857 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16858 if (rv != 0)
16859 goto out;
16860 /*
16861 * XXX FreeBSD and Linux do the same thing that they set the same value
16862 * on both the enable case and the disable case. Is it correct?
16863 */
16864 data &= ~(0xf << 8);
16865 data |= (0xb << 8);
16866 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16867 if (rv != 0)
16868 goto out;
16869
16870 /*
16871 * If enable ==
16872 * true: Enable jumbo frame workaround in the PHY.
16873 * false: Write PHY register values back to h/w defaults.
16874 */
16875 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16876 if (rv != 0)
16877 goto out;
16878 data &= ~(0x7F << 5);
16879 if (enable)
16880 data |= (0x37 << 5);
16881 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16882 if (rv != 0)
16883 goto out;
16884
16885 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16886 if (rv != 0)
16887 goto out;
16888 if (enable)
16889 data &= ~(1 << 13);
16890 else
16891 data |= (1 << 13);
16892 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16893 if (rv != 0)
16894 goto out;
16895
16896 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16897 if (rv != 0)
16898 goto out;
16899 data &= ~(0x3FF << 2);
16900 if (enable)
16901 data |= (I82579_TX_PTR_GAP << 2);
16902 else
16903 data |= (0x8 << 2);
16904 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16905 if (rv != 0)
16906 goto out;
16907
16908 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16909 enable ? 0xf100 : 0x7e00);
16910 if (rv != 0)
16911 goto out;
16912
16913 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16914 if (rv != 0)
16915 goto out;
16916 if (enable)
16917 data |= 1 << 10;
16918 else
16919 data &= ~(1 << 10);
16920 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16921 if (rv != 0)
16922 goto out;
16923
16924 /* Re-enable Rx path after enabling/disabling workaround */
16925 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16926 dft_ctrl & ~(1 << 14));
16927
16928 out:
16929 sc->phy.release(sc);
16930
16931 return rv;
16932 }
16933
16934 /*
16935 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16936 * done after every PHY reset.
16937 */
16938 static int
16939 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16940 {
16941 device_t dev = sc->sc_dev;
16942 int rv;
16943
16944 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16945 device_xname(dev), __func__));
16946 KASSERT(sc->sc_type == WM_T_PCH2);
16947
16948 /* Set MDIO slow mode before any other MDIO access */
16949 rv = wm_set_mdio_slow_mode_hv(sc);
16950 if (rv != 0)
16951 return rv;
16952
16953 rv = sc->phy.acquire(sc);
16954 if (rv != 0)
16955 return rv;
16956 /* Set MSE higher to enable link to stay up when noise is high */
16957 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16958 if (rv != 0)
16959 goto release;
16960 /* Drop link after 5 times MSE threshold was reached */
16961 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16962 release:
16963 sc->phy.release(sc);
16964
16965 return rv;
16966 }
16967
16968 /**
16969 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16970 * @link: link up bool flag
16971 *
16972 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16973 * preventing further DMA write requests. Workaround the issue by disabling
16974 * the de-assertion of the clock request when in 1Gpbs mode.
16975 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16976 * speeds in order to avoid Tx hangs.
16977 **/
16978 static int
16979 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16980 {
16981 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
16982 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16983 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
16984 uint16_t phyreg;
16985
16986 if (link && (speed == STATUS_SPEED_1000)) {
16987 int rv;
16988
16989 rv = sc->phy.acquire(sc);
16990 if (rv != 0)
16991 return rv;
16992 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16993 &phyreg);
16994 if (rv != 0)
16995 goto release;
16996 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16997 phyreg & ~KUMCTRLSTA_K1_ENABLE);
16998 if (rv != 0)
16999 goto release;
17000 delay(20);
17001 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17002
17003 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17004 &phyreg);
17005 release:
17006 sc->phy.release(sc);
17007 return rv;
17008 }
17009
17010 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17011
17012 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17013 if (((child != NULL) && (child->mii_mpd_rev > 5))
17014 || !link
17015 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17016 goto update_fextnvm6;
17017
17018 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17019
17020 /* Clear link status transmit timeout */
17021 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17022 if (speed == STATUS_SPEED_100) {
17023 /* Set inband Tx timeout to 5x10us for 100Half */
17024 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17025
17026 /* Do not extend the K1 entry latency for 100Half */
17027 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17028 } else {
17029 /* Set inband Tx timeout to 50x10us for 10Full/Half */
17030 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17031
17032 /* Extend the K1 entry latency for 10 Mbps */
17033 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17034 }
17035
17036 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17037
17038 update_fextnvm6:
17039 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17040 return 0;
17041 }
17042
17043 /*
17044 * wm_k1_gig_workaround_hv - K1 Si workaround
17045 * @sc: pointer to the HW structure
17046 * @link: link up bool flag
17047 *
17048 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17049 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
17050 * If link is down, the function will restore the default K1 setting located
17051 * in the NVM.
17052 */
17053 static int
17054 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17055 {
17056 int k1_enable = sc->sc_nvm_k1_enabled;
17057 int rv;
17058
17059 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17060 device_xname(sc->sc_dev), __func__));
17061
17062 rv = sc->phy.acquire(sc);
17063 if (rv != 0)
17064 return rv;
17065
17066 if (link) {
17067 k1_enable = 0;
17068
17069 /* Link stall fix for link up */
17070 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17071 0x0100);
17072 } else {
17073 /* Link stall fix for link down */
17074 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17075 0x4100);
17076 }
17077
17078 wm_configure_k1_ich8lan(sc, k1_enable);
17079 sc->phy.release(sc);
17080
17081 return 0;
17082 }
17083
17084 /*
17085 * wm_k1_workaround_lv - K1 Si workaround
17086 * @sc: pointer to the HW structure
17087 *
17088 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17089 * Disable K1 for 1000 and 100 speeds
17090 */
17091 static int
17092 wm_k1_workaround_lv(struct wm_softc *sc)
17093 {
17094 uint32_t reg;
17095 uint16_t phyreg;
17096 int rv;
17097
17098 if (sc->sc_type != WM_T_PCH2)
17099 return 0;
17100
17101 /* Set K1 beacon duration based on 10Mbps speed */
17102 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17103 if (rv != 0)
17104 return rv;
17105
17106 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17107 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17108 if (phyreg &
17109 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17110 /* LV 1G/100 Packet drop issue wa */
17111 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17112 &phyreg);
17113 if (rv != 0)
17114 return rv;
17115 phyreg &= ~HV_PM_CTRL_K1_ENA;
17116 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17117 phyreg);
17118 if (rv != 0)
17119 return rv;
17120 } else {
17121 /* For 10Mbps */
17122 reg = CSR_READ(sc, WMREG_FEXTNVM4);
17123 reg &= ~FEXTNVM4_BEACON_DURATION;
17124 reg |= FEXTNVM4_BEACON_DURATION_16US;
17125 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17126 }
17127 }
17128
17129 return 0;
17130 }
17131
17132 /*
17133 * wm_link_stall_workaround_hv - Si workaround
17134 * @sc: pointer to the HW structure
17135 *
17136 * This function works around a Si bug where the link partner can get
17137 * a link up indication before the PHY does. If small packets are sent
17138 * by the link partner they can be placed in the packet buffer without
17139 * being properly accounted for by the PHY and will stall preventing
17140 * further packets from being received. The workaround is to clear the
17141 * packet buffer after the PHY detects link up.
17142 */
17143 static int
17144 wm_link_stall_workaround_hv(struct wm_softc *sc)
17145 {
17146 uint16_t phyreg;
17147
17148 if (sc->sc_phytype != WMPHY_82578)
17149 return 0;
17150
17151 /* Do not apply workaround if in PHY loopback bit 14 set */
17152 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17153 if ((phyreg & BMCR_LOOP) != 0)
17154 return 0;
17155
17156 /* Check if link is up and at 1Gbps */
17157 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17158 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17159 | BM_CS_STATUS_SPEED_MASK;
17160 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17161 | BM_CS_STATUS_SPEED_1000))
17162 return 0;
17163
17164 delay(200 * 1000); /* XXX too big */
17165
17166 /* Flush the packets in the fifo buffer */
17167 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17168 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17169 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17170 HV_MUX_DATA_CTRL_GEN_TO_MAC);
17171
17172 return 0;
17173 }
17174
17175 static int
17176 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17177 {
17178 int rv;
17179
17180 rv = sc->phy.acquire(sc);
17181 if (rv != 0) {
17182 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17183 __func__);
17184 return rv;
17185 }
17186
17187 rv = wm_set_mdio_slow_mode_hv_locked(sc);
17188
17189 sc->phy.release(sc);
17190
17191 return rv;
17192 }
17193
17194 static int
17195 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17196 {
17197 int rv;
17198 uint16_t reg;
17199
17200 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
17201 if (rv != 0)
17202 return rv;
17203
17204 return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17205 reg | HV_KMRN_MDIO_SLOW);
17206 }
17207
17208 /*
17209 * wm_configure_k1_ich8lan - Configure K1 power state
17210 * @sc: pointer to the HW structure
17211 * @enable: K1 state to configure
17212 *
17213 * Configure the K1 power state based on the provided parameter.
17214 * Assumes semaphore already acquired.
17215 */
17216 static void
17217 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17218 {
17219 uint32_t ctrl, ctrl_ext, tmp;
17220 uint16_t kmreg;
17221 int rv;
17222
17223 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17224
17225 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17226 if (rv != 0)
17227 return;
17228
17229 if (k1_enable)
17230 kmreg |= KUMCTRLSTA_K1_ENABLE;
17231 else
17232 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17233
17234 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17235 if (rv != 0)
17236 return;
17237
17238 delay(20);
17239
17240 ctrl = CSR_READ(sc, WMREG_CTRL);
17241 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17242
17243 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17244 tmp |= CTRL_FRCSPD;
17245
17246 CSR_WRITE(sc, WMREG_CTRL, tmp);
17247 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17248 CSR_WRITE_FLUSH(sc);
17249 delay(20);
17250
17251 CSR_WRITE(sc, WMREG_CTRL, ctrl);
17252 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17253 CSR_WRITE_FLUSH(sc);
17254 delay(20);
17255
17256 return;
17257 }
17258
17259 /* special case - for 82575 - need to do manual init ... */
17260 static void
17261 wm_reset_init_script_82575(struct wm_softc *sc)
17262 {
17263 /*
17264 * Remark: this is untested code - we have no board without EEPROM
17265 * same setup as mentioned int the FreeBSD driver for the i82575
17266 */
17267
17268 /* SerDes configuration via SERDESCTRL */
17269 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17270 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17271 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17272 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17273
17274 /* CCM configuration via CCMCTL register */
17275 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17276 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17277
17278 /* PCIe lanes configuration */
17279 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17280 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17281 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17282 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17283
17284 /* PCIe PLL Configuration */
17285 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17286 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17287 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17288 }
17289
17290 static void
17291 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17292 {
17293 uint32_t reg;
17294 uint16_t nvmword;
17295 int rv;
17296
17297 if (sc->sc_type != WM_T_82580)
17298 return;
17299 if ((sc->sc_flags & WM_F_SGMII) == 0)
17300 return;
17301
17302 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17303 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17304 if (rv != 0) {
17305 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17306 __func__);
17307 return;
17308 }
17309
17310 reg = CSR_READ(sc, WMREG_MDICNFG);
17311 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17312 reg |= MDICNFG_DEST;
17313 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17314 reg |= MDICNFG_COM_MDIO;
17315 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17316 }
17317
17318 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
17319
17320 static bool
17321 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17322 {
17323 uint32_t reg;
17324 uint16_t id1, id2;
17325 int i, rv;
17326
17327 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17328 device_xname(sc->sc_dev), __func__));
17329 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17330
17331 id1 = id2 = 0xffff;
17332 for (i = 0; i < 2; i++) {
17333 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17334 &id1);
17335 if ((rv != 0) || MII_INVALIDID(id1))
17336 continue;
17337 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17338 &id2);
17339 if ((rv != 0) || MII_INVALIDID(id2))
17340 continue;
17341 break;
17342 }
17343 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17344 goto out;
17345
17346 /*
17347 * In case the PHY needs to be in mdio slow mode,
17348 * set slow mode and try to get the PHY id again.
17349 */
17350 rv = 0;
17351 if (sc->sc_type < WM_T_PCH_LPT) {
17352 wm_set_mdio_slow_mode_hv_locked(sc);
17353 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17354 &id1);
17355 rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17356 &id2);
17357 }
17358 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17359 device_printf(sc->sc_dev, "XXX return with false\n");
17360 return false;
17361 }
17362 out:
17363 if (sc->sc_type >= WM_T_PCH_LPT) {
17364 /* Only unforce SMBus if ME is not active */
17365 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17366 uint16_t phyreg;
17367
17368 /* Unforce SMBus mode in PHY */
17369 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17370 CV_SMB_CTRL, &phyreg);
17371 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17372 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17373 CV_SMB_CTRL, phyreg);
17374
17375 /* Unforce SMBus mode in MAC */
17376 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17377 reg &= ~CTRL_EXT_FORCE_SMBUS;
17378 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17379 }
17380 }
17381 return true;
17382 }
17383
17384 static void
17385 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17386 {
17387 uint32_t reg;
17388 int i;
17389
17390 /* Set PHY Config Counter to 50msec */
17391 reg = CSR_READ(sc, WMREG_FEXTNVM3);
17392 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
17393 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
17394 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
17395
17396 /* Toggle LANPHYPC */
17397 reg = CSR_READ(sc, WMREG_CTRL);
17398 reg |= CTRL_LANPHYPC_OVERRIDE;
17399 reg &= ~CTRL_LANPHYPC_VALUE;
17400 CSR_WRITE(sc, WMREG_CTRL, reg);
17401 CSR_WRITE_FLUSH(sc);
17402 delay(1000);
17403 reg &= ~CTRL_LANPHYPC_OVERRIDE;
17404 CSR_WRITE(sc, WMREG_CTRL, reg);
17405 CSR_WRITE_FLUSH(sc);
17406
17407 if (sc->sc_type < WM_T_PCH_LPT)
17408 delay(50 * 1000);
17409 else {
17410 i = 20;
17411
17412 do {
17413 delay(5 * 1000);
17414 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
17415 && i--);
17416
17417 delay(30 * 1000);
17418 }
17419 }
17420
17421 static int
17422 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
17423 {
17424 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
17425 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
17426 uint32_t rxa;
17427 uint16_t scale = 0, lat_enc = 0;
17428 int32_t obff_hwm = 0;
17429 int64_t lat_ns, value;
17430
17431 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17432 device_xname(sc->sc_dev), __func__));
17433
17434 if (link) {
17435 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
17436 uint32_t status;
17437 uint16_t speed;
17438 pcireg_t preg;
17439
17440 status = CSR_READ(sc, WMREG_STATUS);
17441 switch (__SHIFTOUT(status, STATUS_SPEED)) {
17442 case STATUS_SPEED_10:
17443 speed = 10;
17444 break;
17445 case STATUS_SPEED_100:
17446 speed = 100;
17447 break;
17448 case STATUS_SPEED_1000:
17449 speed = 1000;
17450 break;
17451 default:
17452 device_printf(sc->sc_dev, "Unknown speed "
17453 "(status = %08x)\n", status);
17454 return -1;
17455 }
17456
17457 /* Rx Packet Buffer Allocation size (KB) */
17458 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
17459
17460 /*
17461 * Determine the maximum latency tolerated by the device.
17462 *
17463 * Per the PCIe spec, the tolerated latencies are encoded as
17464 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
17465 * a 10-bit value (0-1023) to provide a range from 1 ns to
17466 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
17467 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
17468 */
17469 lat_ns = ((int64_t)rxa * 1024 -
17470 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
17471 + ETHER_HDR_LEN))) * 8 * 1000;
17472 if (lat_ns < 0)
17473 lat_ns = 0;
17474 else
17475 lat_ns /= speed;
17476 value = lat_ns;
17477
17478 while (value > LTRV_VALUE) {
17479 scale ++;
17480 value = howmany(value, __BIT(5));
17481 }
17482 if (scale > LTRV_SCALE_MAX) {
17483 device_printf(sc->sc_dev,
17484 "Invalid LTR latency scale %d\n", scale);
17485 return -1;
17486 }
17487 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17488
17489 /* Determine the maximum latency tolerated by the platform */
17490 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17491 WM_PCI_LTR_CAP_LPT);
17492 max_snoop = preg & 0xffff;
17493 max_nosnoop = preg >> 16;
17494
17495 max_ltr_enc = MAX(max_snoop, max_nosnoop);
17496
17497 if (lat_enc > max_ltr_enc) {
17498 lat_enc = max_ltr_enc;
17499 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17500 * PCI_LTR_SCALETONS(
17501 __SHIFTOUT(lat_enc,
17502 PCI_LTR_MAXSNOOPLAT_SCALE));
17503 }
17504
17505 if (lat_ns) {
17506 lat_ns *= speed * 1000;
17507 lat_ns /= 8;
17508 lat_ns /= 1000000000;
17509 obff_hwm = (int32_t)(rxa - lat_ns);
17510 }
17511 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17512 device_printf(sc->sc_dev, "Invalid high water mark %d"
17513 "(rxa = %d, lat_ns = %d)\n",
17514 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17515 return -1;
17516 }
17517 }
17518 /* Snoop and No-Snoop latencies the same */
17519 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17520 CSR_WRITE(sc, WMREG_LTRV, reg);
17521
17522 /* Set OBFF high water mark */
17523 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17524 reg |= obff_hwm;
17525 CSR_WRITE(sc, WMREG_SVT, reg);
17526
17527 /* Enable OBFF */
17528 reg = CSR_READ(sc, WMREG_SVCR);
17529 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17530 CSR_WRITE(sc, WMREG_SVCR, reg);
17531
17532 return 0;
17533 }
17534
17535 /*
17536 * I210 Errata 25 and I211 Errata 10
17537 * Slow System Clock.
17538 *
17539 * Note that this function is called on both FLASH and iNVM case on NetBSD.
17540 */
17541 static int
17542 wm_pll_workaround_i210(struct wm_softc *sc)
17543 {
17544 uint32_t mdicnfg, wuc;
17545 uint32_t reg;
17546 pcireg_t pcireg;
17547 uint32_t pmreg;
17548 uint16_t nvmword, tmp_nvmword;
17549 uint16_t phyval;
17550 bool wa_done = false;
17551 int i, rv = 0;
17552
17553 /* Get Power Management cap offset */
17554 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17555 &pmreg, NULL) == 0)
17556 return -1;
17557
17558 /* Save WUC and MDICNFG registers */
17559 wuc = CSR_READ(sc, WMREG_WUC);
17560 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17561
17562 reg = mdicnfg & ~MDICNFG_DEST;
17563 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17564
17565 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17566 /*
17567 * The default value of the Initialization Control Word 1
17568 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17569 */
17570 nvmword = INVM_DEFAULT_AL;
17571 }
17572 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17573
17574 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17575 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17576 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17577
17578 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17579 rv = 0;
17580 break; /* OK */
17581 } else
17582 rv = -1;
17583
17584 wa_done = true;
17585 /* Directly reset the internal PHY */
17586 reg = CSR_READ(sc, WMREG_CTRL);
17587 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17588
17589 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17590 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17591 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17592
17593 CSR_WRITE(sc, WMREG_WUC, 0);
17594 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17595 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17596
17597 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17598 pmreg + PCI_PMCSR);
17599 pcireg |= PCI_PMCSR_STATE_D3;
17600 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17601 pmreg + PCI_PMCSR, pcireg);
17602 delay(1000);
17603 pcireg &= ~PCI_PMCSR_STATE_D3;
17604 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17605 pmreg + PCI_PMCSR, pcireg);
17606
17607 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
17608 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17609
17610 /* Restore WUC register */
17611 CSR_WRITE(sc, WMREG_WUC, wuc);
17612 }
17613
17614 /* Restore MDICNFG setting */
17615 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
17616 if (wa_done)
17617 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
17618 return rv;
17619 }
17620
17621 static void
17622 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
17623 {
17624 uint32_t reg;
17625
17626 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17627 device_xname(sc->sc_dev), __func__));
17628 KASSERT((sc->sc_type == WM_T_PCH_SPT)
17629 || (sc->sc_type == WM_T_PCH_CNP));
17630
17631 reg = CSR_READ(sc, WMREG_FEXTNVM7);
17632 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
17633 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
17634
17635 reg = CSR_READ(sc, WMREG_FEXTNVM9);
17636 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17637 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17638 }
17639
17640 /* Sysctl functions */
17641 static int
17642 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
17643 {
17644 struct sysctlnode node = *rnode;
17645 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17646 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17647 struct wm_softc *sc = txq->txq_sc;
17648 uint32_t reg;
17649
17650 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
17651 node.sysctl_data = ®
17652 return sysctl_lookup(SYSCTLFN_CALL(&node));
17653 }
17654
17655 static int
17656 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
17657 {
17658 struct sysctlnode node = *rnode;
17659 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17660 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17661 struct wm_softc *sc = txq->txq_sc;
17662 uint32_t reg;
17663
17664 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
17665 node.sysctl_data = ®
17666 return sysctl_lookup(SYSCTLFN_CALL(&node));
17667 }
17668
17669 #ifdef WM_DEBUG
17670 static int
17671 wm_sysctl_debug(SYSCTLFN_ARGS)
17672 {
17673 struct sysctlnode node = *rnode;
17674 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17675 uint32_t dflags;
17676 int error;
17677
17678 dflags = sc->sc_debug;
17679 node.sysctl_data = &dflags;
17680 error = sysctl_lookup(SYSCTLFN_CALL(&node));
17681
17682 if (error || newp == NULL)
17683 return error;
17684
17685 sc->sc_debug = dflags;
17686 device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
17687 device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
17688
17689 return 0;
17690 }
17691 #endif
17692