if_wm.c revision 1.790 1 /* $NetBSD: if_wm.c,v 1.790 2023/10/11 15:05:26 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.790 2023/10/11 15:05:26 msaitoh Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90
91 #include <sys/param.h>
92
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116
117 #include <net/bpf.h>
118
119 #include <net/rss_config.h>
120
121 #include <netinet/in.h> /* XXX for struct ip */
122 #include <netinet/in_systm.h> /* XXX for struct ip */
123 #include <netinet/ip.h> /* XXX for struct ip */
124 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h> /* XXX for struct tcphdr */
126
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149
150 #ifdef WM_DEBUG
151 #define WM_DEBUG_LINK __BIT(0)
152 #define WM_DEBUG_TX __BIT(1)
153 #define WM_DEBUG_RX __BIT(2)
154 #define WM_DEBUG_GMII __BIT(3)
155 #define WM_DEBUG_MANAGE __BIT(4)
156 #define WM_DEBUG_NVM __BIT(5)
157 #define WM_DEBUG_INIT __BIT(6)
158 #define WM_DEBUG_LOCK __BIT(7)
159
160 #if 0
161 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
163 WM_DEBUG_LOCK
164 #endif
165
166 #define DPRINTF(sc, x, y) \
167 do { \
168 if ((sc)->sc_debug & (x)) \
169 printf y; \
170 } while (0)
171 #else
172 #define DPRINTF(sc, x, y) __nothing
173 #endif /* WM_DEBUG */
174
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176
177 /*
178 * This device driver's max interrupt numbers.
179 */
180 #define WM_MAX_NQUEUEINTR 16
181 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
182
183 #ifndef WM_DISABLE_MSI
184 #define WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define WM_DISABLE_MSIX 0
188 #endif
189
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197
198 /*
199 * Transmit descriptor list size. Due to errata, we can only have
200 * 256 hardware descriptors in the ring on < 82544, but we use 4096
201 * on >= 82544. We tell the upper layers that they can queue a lot
202 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203 * of them at a time.
204 *
205 * We allow up to 64 DMA segments per packet. Pathological packet
206 * chains containing many small mbufs have been observed in zero-copy
207 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208 * m_defrag() is called to reduce it.
209 */
210 #define WM_NTXSEGS 64
211 #define WM_IFQUEUELEN 256
212 #define WM_TXQUEUELEN_MAX 64
213 #define WM_TXQUEUELEN_MAX_82547 16
214 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
215 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
216 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
217 #define WM_NTXDESC_82542 256
218 #define WM_NTXDESC_82544 4096
219 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
220 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
221 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224
225 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
226
227 #define WM_TXINTERQSIZE 256
228
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
234 #endif
235
236 /*
237 * Receive descriptor list size. We have one Rx buffer for normal
238 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
239 * packet. We allocate 256 receive descriptors, each with a 2k
240 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241 */
242 #define WM_NRXDESC 256U
243 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
244 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
245 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
246
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
252 #endif
253
254 typedef union txdescs {
255 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258
259 typedef union rxdescs {
260 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264
265 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
266 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
267
268 /*
269 * Software state for transmit jobs.
270 */
271 struct wm_txsoft {
272 struct mbuf *txs_mbuf; /* head of our mbuf chain */
273 bus_dmamap_t txs_dmamap; /* our DMA map */
274 int txs_firstdesc; /* first descriptor in packet */
275 int txs_lastdesc; /* last descriptor in packet */
276 int txs_ndesc; /* # of descriptors used */
277 };
278
279 /*
280 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281 * buffer and a DMA map. For packets which fill more than one buffer, we chain
282 * them together.
283 */
284 struct wm_rxsoft {
285 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
286 bus_dmamap_t rxs_dmamap; /* our DMA map */
287 };
288
289 #define WM_LINKUP_TIMEOUT 50
290
291 static uint16_t swfwphysem[] = {
292 SWFW_PHY0_SM,
293 SWFW_PHY1_SM,
294 SWFW_PHY2_SM,
295 SWFW_PHY3_SM
296 };
297
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301
302 struct wm_softc;
303
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname) \
312 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 struct evcnt qname##_ev_##evname
314
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
316 do { \
317 snprintf((q)->qname##_##evname##_evcnt_name, \
318 sizeof((q)->qname##_##evname##_evcnt_name), \
319 "%s%02d%s", #qname, (qnum), #evname); \
320 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
321 (evtype), NULL, (xname), \
322 (q)->qname##_##evname##_evcnt_name); \
323 } while (0)
324
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
326 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
329 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
332 evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334
335 struct wm_txqueue {
336 kmutex_t *txq_lock; /* lock for tx operations */
337
338 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
339
340 /* Software state for the transmit descriptors. */
341 int txq_num; /* must be a power of two */
342 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343
344 /* TX control data structures. */
345 int txq_ndesc; /* must be a power of two */
346 size_t txq_descsize; /* a tx descriptor size */
347 txdescs_t *txq_descs_u;
348 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
349 bus_dma_segment_t txq_desc_seg; /* control data segment */
350 int txq_desc_rseg; /* real number of control segment */
351 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
352 #define txq_descs txq_descs_u->sctxu_txdescs
353 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
354
355 bus_addr_t txq_tdt_reg; /* offset of TDT register */
356
357 int txq_free; /* number of free Tx descriptors */
358 int txq_next; /* next ready Tx descriptor */
359
360 int txq_sfree; /* number of free Tx jobs */
361 int txq_snext; /* next free Tx job */
362 int txq_sdirty; /* dirty Tx jobs */
363
364 /* These 4 variables are used only on the 82547. */
365 int txq_fifo_size; /* Tx FIFO size */
366 int txq_fifo_head; /* current head of FIFO */
367 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
368 int txq_fifo_stall; /* Tx FIFO is stalled */
369
370 /*
371 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 * CPUs. This queue intermediate them without block.
373 */
374 pcq_t *txq_interq;
375
376 /*
377 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 * to manage Tx H/W queue's busy flag.
379 */
380 int txq_flags; /* flags for H/W queue, see below */
381 #define WM_TXQ_NO_SPACE 0x1
382 #define WM_TXQ_LINKDOWN_DISCARD 0x2
383
384 bool txq_stopping;
385
386 bool txq_sending;
387 time_t txq_lastsent;
388
389 /* Checksum flags used for previous packet */
390 uint32_t txq_last_hw_cmd;
391 uint8_t txq_last_hw_fields;
392 uint16_t txq_last_hw_ipcs;
393 uint16_t txq_last_hw_tucs;
394
395 uint32_t txq_packets; /* for AIM */
396 uint32_t txq_bytes; /* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 /* TX event counters */
399 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
400 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
401 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
403 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
404 /* XXX not used? */
405
406 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
407 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
408 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
409 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
410 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
411 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
412 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
413 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
414 /* other than toomanyseg */
415
416 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
418 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
419 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420
421 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425
426 struct wm_rxqueue {
427 kmutex_t *rxq_lock; /* lock for rx operations */
428
429 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
430
431 /* Software state for the receive descriptors. */
432 struct wm_rxsoft rxq_soft[WM_NRXDESC];
433
434 /* RX control data structures. */
435 int rxq_ndesc; /* must be a power of two */
436 size_t rxq_descsize; /* a rx descriptor size */
437 rxdescs_t *rxq_descs_u;
438 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
439 bus_dma_segment_t rxq_desc_seg; /* control data segment */
440 int rxq_desc_rseg; /* real number of control segment */
441 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define rxq_descs rxq_descs_u->sctxu_rxdescs
443 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
444 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
445
446 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
447
448 int rxq_ptr; /* next ready Rx desc/queue ent */
449 int rxq_discard;
450 int rxq_len;
451 struct mbuf *rxq_head;
452 struct mbuf *rxq_tail;
453 struct mbuf **rxq_tailp;
454
455 bool rxq_stopping;
456
457 uint32_t rxq_packets; /* for AIM */
458 uint32_t rxq_bytes; /* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 /* RX event counters */
461 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
462 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
463
464 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
465 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
466 #endif
467 };
468
469 struct wm_queue {
470 int wmq_id; /* index of TX/RX queues */
471 int wmq_intr_idx; /* index of MSI-X tables */
472
473 uint32_t wmq_itr; /* interrupt interval per queue. */
474 bool wmq_set_itr;
475
476 struct wm_txqueue wmq_txq;
477 struct wm_rxqueue wmq_rxq;
478 char sysctlname[32]; /* Name for sysctl */
479
480 bool wmq_txrx_use_workqueue;
481 bool wmq_wq_enqueued;
482 struct work wmq_cookie;
483 void *wmq_si;
484 };
485
486 struct wm_phyop {
487 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
488 void (*release)(struct wm_softc *);
489 int (*readreg_locked)(device_t, int, int, uint16_t *);
490 int (*writereg_locked)(device_t, int, int, uint16_t);
491 int reset_delay_us;
492 bool no_errprint;
493 };
494
495 struct wm_nvmop {
496 int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
497 void (*release)(struct wm_softc *);
498 int (*read)(struct wm_softc *, int, int, uint16_t *);
499 };
500
501 /*
502 * Software state per device.
503 */
504 struct wm_softc {
505 device_t sc_dev; /* generic device information */
506 bus_space_tag_t sc_st; /* bus space tag */
507 bus_space_handle_t sc_sh; /* bus space handle */
508 bus_size_t sc_ss; /* bus space size */
509 bus_space_tag_t sc_iot; /* I/O space tag */
510 bus_space_handle_t sc_ioh; /* I/O space handle */
511 bus_size_t sc_ios; /* I/O space size */
512 bus_space_tag_t sc_flasht; /* flash registers space tag */
513 bus_space_handle_t sc_flashh; /* flash registers space handle */
514 bus_size_t sc_flashs; /* flash registers space size */
515 off_t sc_flashreg_offset; /*
516 * offset to flash registers from
517 * start of BAR
518 */
519 bus_dma_tag_t sc_dmat; /* bus DMA tag */
520
521 struct ethercom sc_ethercom; /* Ethernet common data */
522 struct mii_data sc_mii; /* MII/media information */
523
524 pci_chipset_tag_t sc_pc;
525 pcitag_t sc_pcitag;
526 int sc_bus_speed; /* PCI/PCIX bus speed */
527 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
528
529 uint16_t sc_pcidevid; /* PCI device ID */
530 wm_chip_type sc_type; /* MAC type */
531 int sc_rev; /* MAC revision */
532 wm_phy_type sc_phytype; /* PHY type */
533 uint8_t sc_sfptype; /* SFP type */
534 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
535 #define WM_MEDIATYPE_UNKNOWN 0x00
536 #define WM_MEDIATYPE_FIBER 0x01
537 #define WM_MEDIATYPE_COPPER 0x02
538 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
539 int sc_funcid; /* unit number of the chip (0 to 3) */
540 u_int sc_flags; /* flags; see below */
541 u_short sc_if_flags; /* last if_flags */
542 int sc_ec_capenable; /* last ec_capenable */
543 int sc_flowflags; /* 802.3x flow control flags */
544 uint16_t eee_lp_ability; /* EEE link partner's ability */
545 int sc_align_tweak;
546
547 void *sc_ihs[WM_MAX_NINTR]; /*
548 * interrupt cookie.
549 * - legacy and msi use sc_ihs[0] only
550 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
551 */
552 pci_intr_handle_t *sc_intrs; /*
553 * legacy and msi use sc_intrs[0] only
554 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
555 */
556 int sc_nintrs; /* number of interrupts */
557
558 int sc_link_intr_idx; /* index of MSI-X tables */
559
560 callout_t sc_tick_ch; /* tick callout */
561 bool sc_core_stopping;
562
563 int sc_nvm_ver_major;
564 int sc_nvm_ver_minor;
565 int sc_nvm_ver_build;
566 int sc_nvm_addrbits; /* NVM address bits */
567 unsigned int sc_nvm_wordsize; /* NVM word size */
568 int sc_ich8_flash_base;
569 int sc_ich8_flash_bank_size;
570 int sc_nvm_k1_enabled;
571
572 int sc_nqueues;
573 struct wm_queue *sc_queue;
574 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
575 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
576 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
577 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
578 struct workqueue *sc_queue_wq;
579 bool sc_txrx_use_workqueue;
580
581 int sc_affinity_offset;
582
583 #ifdef WM_EVENT_COUNTERS
584 /* Event counters. */
585 struct evcnt sc_ev_linkintr; /* Link interrupts */
586
587 /* >= WM_T_82542_2_1 */
588 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
589 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
590 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
591 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
592 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
593
594 struct evcnt sc_ev_crcerrs; /* CRC Error */
595 struct evcnt sc_ev_algnerrc; /* Alignment Error */
596 struct evcnt sc_ev_symerrc; /* Symbol Error */
597 struct evcnt sc_ev_rxerrc; /* Receive Error */
598 struct evcnt sc_ev_mpc; /* Missed Packets */
599 struct evcnt sc_ev_scc; /* Single Collision */
600 struct evcnt sc_ev_ecol; /* Excessive Collision */
601 struct evcnt sc_ev_mcc; /* Multiple Collision */
602 struct evcnt sc_ev_latecol; /* Late Collision */
603 struct evcnt sc_ev_colc; /* Collision */
604 struct evcnt sc_ev_cbtmpc; /* Circuit Breaker Tx Mng. Packet */
605 struct evcnt sc_ev_dc; /* Defer */
606 struct evcnt sc_ev_tncrs; /* Tx-No CRS */
607 struct evcnt sc_ev_sec; /* Sequence Error */
608
609 /* Old */
610 struct evcnt sc_ev_cexterr; /* Carrier Extension Error */
611 /* New */
612 struct evcnt sc_ev_htdpmc; /* Host Tx Discarded Pkts by MAC */
613
614 struct evcnt sc_ev_rlec; /* Receive Length Error */
615 struct evcnt sc_ev_cbrdpc; /* Circuit Breaker Rx Dropped Packet */
616 struct evcnt sc_ev_prc64; /* Packets Rx (64 bytes) */
617 struct evcnt sc_ev_prc127; /* Packets Rx (65-127 bytes) */
618 struct evcnt sc_ev_prc255; /* Packets Rx (128-255 bytes) */
619 struct evcnt sc_ev_prc511; /* Packets Rx (256-511 bytes) */
620 struct evcnt sc_ev_prc1023; /* Packets Rx (512-1023 bytes) */
621 struct evcnt sc_ev_prc1522; /* Packets Rx (1024-1522 bytes) */
622 struct evcnt sc_ev_gprc; /* Good Packets Rx */
623 struct evcnt sc_ev_bprc; /* Broadcast Packets Rx */
624 struct evcnt sc_ev_mprc; /* Multicast Packets Rx */
625 struct evcnt sc_ev_gptc; /* Good Packets Tx */
626 struct evcnt sc_ev_gorc; /* Good Octets Rx */
627 struct evcnt sc_ev_gotc; /* Good Octets Tx */
628 struct evcnt sc_ev_rnbc; /* Rx No Buffers */
629 struct evcnt sc_ev_ruc; /* Rx Undersize */
630 struct evcnt sc_ev_rfc; /* Rx Fragment */
631 struct evcnt sc_ev_roc; /* Rx Oversize */
632 struct evcnt sc_ev_rjc; /* Rx Jabber */
633 struct evcnt sc_ev_mgtprc; /* Management Packets RX */
634 struct evcnt sc_ev_mgtpdc; /* Management Packets Dropped */
635 struct evcnt sc_ev_mgtptc; /* Management Packets TX */
636 struct evcnt sc_ev_tor; /* Total Octets Rx */
637 struct evcnt sc_ev_tot; /* Total Octets Tx */
638 struct evcnt sc_ev_tpr; /* Total Packets Rx */
639 struct evcnt sc_ev_tpt; /* Total Packets Tx */
640 struct evcnt sc_ev_ptc64; /* Packets Tx (64 bytes) */
641 struct evcnt sc_ev_ptc127; /* Packets Tx (65-127 bytes) */
642 struct evcnt sc_ev_ptc255; /* Packets Tx (128-255 bytes) */
643 struct evcnt sc_ev_ptc511; /* Packets Tx (256-511 bytes) */
644 struct evcnt sc_ev_ptc1023; /* Packets Tx (512-1023 bytes) */
645 struct evcnt sc_ev_ptc1522; /* Packets Tx (1024-1522 Bytes) */
646 struct evcnt sc_ev_mptc; /* Multicast Packets Tx */
647 struct evcnt sc_ev_bptc; /* Broadcast Packets Tx */
648 struct evcnt sc_ev_tsctc; /* TCP Segmentation Context Tx */
649
650 /* Old */
651 struct evcnt sc_ev_tsctfc; /* TCP Segmentation Context Tx Fail */
652 /* New */
653 struct evcnt sc_ev_cbrmpc; /* Circuit Breaker Rx Mng. Packet */
654
655 struct evcnt sc_ev_iac; /* Interrupt Assertion */
656
657 /* Old */
658 struct evcnt sc_ev_icrxptc; /* Intr. Cause Rx Pkt Timer Expire */
659 struct evcnt sc_ev_icrxatc; /* Intr. Cause Rx Abs Timer Expire */
660 struct evcnt sc_ev_ictxptc; /* Intr. Cause Tx Pkt Timer Expire */
661 struct evcnt sc_ev_ictxatc; /* Intr. Cause Tx Abs Timer Expire */
662 struct evcnt sc_ev_ictxqec; /* Intr. Cause Tx Queue Empty */
663 struct evcnt sc_ev_ictxqmtc; /* Intr. Cause Tx Queue Min Thresh */
664 /*
665 * sc_ev_rxdmtc is shared with both "Intr. cause" and
666 * non "Intr. cause" register.
667 */
668 struct evcnt sc_ev_rxdmtc; /* (Intr. Cause) Rx Desc Min Thresh */
669 struct evcnt sc_ev_icrxoc; /* Intr. Cause Receiver Overrun */
670 /* New */
671 struct evcnt sc_ev_rpthc; /* Rx Packets To Host */
672 struct evcnt sc_ev_debug1; /* Debug Counter 1 */
673 struct evcnt sc_ev_debug2; /* Debug Counter 2 */
674 struct evcnt sc_ev_debug3; /* Debug Counter 3 */
675 struct evcnt sc_ev_hgptc; /* Host Good Packets TX */
676 struct evcnt sc_ev_debug4; /* Debug Counter 4 */
677 struct evcnt sc_ev_htcbdpc; /* Host Tx Circuit Breaker Drp. Pkts */
678 struct evcnt sc_ev_hgorc; /* Host Good Octets Rx */
679 struct evcnt sc_ev_hgotc; /* Host Good Octets Tx */
680 struct evcnt sc_ev_lenerrs; /* Length Error */
681 struct evcnt sc_ev_tlpic; /* EEE Tx LPI */
682 struct evcnt sc_ev_rlpic; /* EEE Rx LPI */
683 struct evcnt sc_ev_b2ogprc; /* BMC2OS pkts received by host */
684 struct evcnt sc_ev_o2bspc; /* OS2BMC pkts transmitted by host */
685 struct evcnt sc_ev_b2ospc; /* BMC2OS pkts sent by BMC */
686 struct evcnt sc_ev_o2bgptc; /* OS2BMC pkts received by BMC */
687 struct evcnt sc_ev_scvpc; /* SerDes/SGMII Code Violation Pkt. */
688 struct evcnt sc_ev_hrmpc; /* Header Redirection Missed Packet */
689 #endif /* WM_EVENT_COUNTERS */
690
691 struct sysctllog *sc_sysctllog;
692
693 /* This variable are used only on the 82547. */
694 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
695
696 uint32_t sc_ctrl; /* prototype CTRL register */
697 #if 0
698 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
699 #endif
700 uint32_t sc_icr; /* prototype interrupt bits */
701 uint32_t sc_itr_init; /* prototype intr throttling reg */
702 uint32_t sc_tctl; /* prototype TCTL register */
703 uint32_t sc_rctl; /* prototype RCTL register */
704 uint32_t sc_txcw; /* prototype TXCW register */
705 uint32_t sc_tipg; /* prototype TIPG register */
706 uint32_t sc_fcrtl; /* prototype FCRTL register */
707 uint32_t sc_pba; /* prototype PBA register */
708
709 int sc_tbi_linkup; /* TBI link status */
710 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
711 int sc_tbi_serdes_ticks; /* tbi ticks */
712 struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
713
714 int sc_mchash_type; /* multicast filter offset */
715
716 krndsource_t rnd_source; /* random source */
717
718 struct if_percpuq *sc_ipq; /* softint-based input queues */
719
720 kmutex_t *sc_core_lock; /* lock for softc operations */
721 kmutex_t *sc_ich_phymtx; /*
722 * 82574/82583/ICH/PCH specific PHY
723 * mutex. For 82574/82583, the mutex
724 * is used for both PHY and NVM.
725 */
726 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
727
728 struct wm_phyop phy;
729 struct wm_nvmop nvm;
730
731 struct workqueue *sc_reset_wq;
732 struct work sc_reset_work;
733 volatile unsigned sc_reset_pending;
734
735 bool sc_dying;
736
737 #ifdef WM_DEBUG
738 uint32_t sc_debug;
739 bool sc_trigger_reset;
740 #endif
741 };
742
743 #define WM_RXCHAIN_RESET(rxq) \
744 do { \
745 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
746 *(rxq)->rxq_tailp = NULL; \
747 (rxq)->rxq_len = 0; \
748 } while (/*CONSTCOND*/0)
749
750 #define WM_RXCHAIN_LINK(rxq, m) \
751 do { \
752 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
753 (rxq)->rxq_tailp = &(m)->m_next; \
754 } while (/*CONSTCOND*/0)
755
756 #ifdef WM_EVENT_COUNTERS
757 #ifdef __HAVE_ATOMIC64_LOADSTORE
758 #define WM_EVCNT_INCR(ev) \
759 atomic_store_relaxed(&((ev)->ev_count), \
760 atomic_load_relaxed(&(ev)->ev_count) + 1)
761 #define WM_EVCNT_STORE(ev, val) \
762 atomic_store_relaxed(&((ev)->ev_count), (val))
763 #define WM_EVCNT_ADD(ev, val) \
764 atomic_store_relaxed(&((ev)->ev_count), \
765 atomic_load_relaxed(&(ev)->ev_count) + (val))
766 #else
767 #define WM_EVCNT_INCR(ev) \
768 ((ev)->ev_count)++
769 #define WM_EVCNT_STORE(ev, val) \
770 ((ev)->ev_count = (val))
771 #define WM_EVCNT_ADD(ev, val) \
772 (ev)->ev_count += (val)
773 #endif
774
775 #define WM_Q_EVCNT_INCR(qname, evname) \
776 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
777 #define WM_Q_EVCNT_STORE(qname, evname, val) \
778 WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
779 #define WM_Q_EVCNT_ADD(qname, evname, val) \
780 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
781 #else /* !WM_EVENT_COUNTERS */
782 #define WM_EVCNT_INCR(ev) __nothing
783 #define WM_EVCNT_STORE(ev, val) __nothing
784 #define WM_EVCNT_ADD(ev, val) __nothing
785
786 #define WM_Q_EVCNT_INCR(qname, evname) __nothing
787 #define WM_Q_EVCNT_STORE(qname, evname, val) __nothing
788 #define WM_Q_EVCNT_ADD(qname, evname, val) __nothing
789 #endif /* !WM_EVENT_COUNTERS */
790
791 #define CSR_READ(sc, reg) \
792 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
793 #define CSR_WRITE(sc, reg, val) \
794 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
795 #define CSR_WRITE_FLUSH(sc) \
796 (void)CSR_READ((sc), WMREG_STATUS)
797
798 #define ICH8_FLASH_READ32(sc, reg) \
799 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
800 (reg) + sc->sc_flashreg_offset)
801 #define ICH8_FLASH_WRITE32(sc, reg, data) \
802 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
803 (reg) + sc->sc_flashreg_offset, (data))
804
805 #define ICH8_FLASH_READ16(sc, reg) \
806 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
807 (reg) + sc->sc_flashreg_offset)
808 #define ICH8_FLASH_WRITE16(sc, reg, data) \
809 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
810 (reg) + sc->sc_flashreg_offset, (data))
811
812 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
813 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
814
815 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
816 #define WM_CDTXADDR_HI(txq, x) \
817 (sizeof(bus_addr_t) == 8 ? \
818 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
819
820 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
821 #define WM_CDRXADDR_HI(rxq, x) \
822 (sizeof(bus_addr_t) == 8 ? \
823 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
824
825 /*
826 * Register read/write functions.
827 * Other than CSR_{READ|WRITE}().
828 */
829 #if 0
830 static inline uint32_t wm_io_read(struct wm_softc *, int);
831 #endif
832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
834 uint32_t, uint32_t);
835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
836
837 /*
838 * Descriptor sync/init functions.
839 */
840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
843
844 /*
845 * Device driver interface functions and commonly used functions.
846 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
847 */
848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
849 static int wm_match(device_t, cfdata_t, void *);
850 static void wm_attach(device_t, device_t, void *);
851 static int wm_detach(device_t, int);
852 static bool wm_suspend(device_t, const pmf_qual_t *);
853 static bool wm_resume(device_t, const pmf_qual_t *);
854 static bool wm_watchdog(struct ifnet *);
855 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
856 uint16_t *);
857 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
858 uint16_t *);
859 static void wm_tick(void *);
860 static int wm_ifflags_cb(struct ethercom *);
861 static int wm_ioctl(struct ifnet *, u_long, void *);
862 /* MAC address related */
863 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
864 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
865 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
866 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
867 static int wm_rar_count(struct wm_softc *);
868 static void wm_set_filter(struct wm_softc *);
869 /* Reset and init related */
870 static void wm_set_vlan(struct wm_softc *);
871 static void wm_set_pcie_completion_timeout(struct wm_softc *);
872 static void wm_get_auto_rd_done(struct wm_softc *);
873 static void wm_lan_init_done(struct wm_softc *);
874 static void wm_get_cfg_done(struct wm_softc *);
875 static int wm_phy_post_reset(struct wm_softc *);
876 static int wm_write_smbus_addr(struct wm_softc *);
877 static int wm_init_lcd_from_nvm(struct wm_softc *);
878 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
879 static void wm_initialize_hardware_bits(struct wm_softc *);
880 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
881 static int wm_reset_phy(struct wm_softc *);
882 static void wm_flush_desc_rings(struct wm_softc *);
883 static void wm_reset(struct wm_softc *);
884 static int wm_add_rxbuf(struct wm_rxqueue *, int);
885 static void wm_rxdrain(struct wm_rxqueue *);
886 static void wm_init_rss(struct wm_softc *);
887 static void wm_adjust_qnum(struct wm_softc *, int);
888 static inline bool wm_is_using_msix(struct wm_softc *);
889 static inline bool wm_is_using_multiqueue(struct wm_softc *);
890 static int wm_softint_establish_queue(struct wm_softc *, int, int);
891 static int wm_setup_legacy(struct wm_softc *);
892 static int wm_setup_msix(struct wm_softc *);
893 static int wm_init(struct ifnet *);
894 static int wm_init_locked(struct ifnet *);
895 static void wm_init_sysctls(struct wm_softc *);
896 static void wm_update_stats(struct wm_softc *);
897 static void wm_clear_evcnt(struct wm_softc *);
898 static void wm_unset_stopping_flags(struct wm_softc *);
899 static void wm_set_stopping_flags(struct wm_softc *);
900 static void wm_stop(struct ifnet *, int);
901 static void wm_stop_locked(struct ifnet *, bool, bool);
902 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
903 static void wm_82547_txfifo_stall(void *);
904 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
905 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
906 /* DMA related */
907 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
908 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
909 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
910 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
911 struct wm_txqueue *);
912 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
913 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
914 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
915 struct wm_rxqueue *);
916 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
917 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
918 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
919 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
920 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
921 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
922 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
923 struct wm_txqueue *);
924 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
925 struct wm_rxqueue *);
926 static int wm_alloc_txrx_queues(struct wm_softc *);
927 static void wm_free_txrx_queues(struct wm_softc *);
928 static int wm_init_txrx_queues(struct wm_softc *);
929 /* Start */
930 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
931 struct wm_txsoft *, uint32_t *, uint8_t *);
932 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
933 static void wm_start(struct ifnet *);
934 static void wm_start_locked(struct ifnet *);
935 static int wm_transmit(struct ifnet *, struct mbuf *);
936 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
937 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
938 bool);
939 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
940 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
941 static void wm_nq_start(struct ifnet *);
942 static void wm_nq_start_locked(struct ifnet *);
943 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
944 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
945 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
946 bool);
947 static void wm_deferred_start_locked(struct wm_txqueue *);
948 static void wm_handle_queue(void *);
949 static void wm_handle_queue_work(struct work *, void *);
950 static void wm_handle_reset_work(struct work *, void *);
951 /* Interrupt */
952 static bool wm_txeof(struct wm_txqueue *, u_int);
953 static bool wm_rxeof(struct wm_rxqueue *, u_int);
954 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
955 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
956 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
957 static void wm_linkintr(struct wm_softc *, uint32_t);
958 static int wm_intr_legacy(void *);
959 static inline void wm_txrxintr_disable(struct wm_queue *);
960 static inline void wm_txrxintr_enable(struct wm_queue *);
961 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
962 static int wm_txrxintr_msix(void *);
963 static int wm_linkintr_msix(void *);
964
965 /*
966 * Media related.
967 * GMII, SGMII, TBI, SERDES and SFP.
968 */
969 /* Common */
970 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
971 /* GMII related */
972 static void wm_gmii_reset(struct wm_softc *);
973 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
974 static int wm_get_phy_id_82575(struct wm_softc *);
975 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
976 static int wm_gmii_mediachange(struct ifnet *);
977 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
978 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
979 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
980 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
981 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
982 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
983 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
984 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
985 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
986 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
987 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
988 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
989 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
990 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
991 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
992 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
993 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
994 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
995 bool);
996 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
997 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
998 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
999 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
1000 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
1001 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
1002 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
1003 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
1004 static void wm_gmii_statchg(struct ifnet *);
1005 /*
1006 * kumeran related (80003, ICH* and PCH*).
1007 * These functions are not for accessing MII registers but for accessing
1008 * kumeran specific registers.
1009 */
1010 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
1011 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
1012 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
1013 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
1014 /* EMI register related */
1015 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
1016 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
1017 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
1018 /* SGMII */
1019 static bool wm_sgmii_uses_mdio(struct wm_softc *);
1020 static void wm_sgmii_sfp_preconfig(struct wm_softc *);
1021 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
1022 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
1023 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
1024 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
1025 /* TBI related */
1026 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
1027 static void wm_tbi_mediainit(struct wm_softc *);
1028 static int wm_tbi_mediachange(struct ifnet *);
1029 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
1030 static int wm_check_for_link(struct wm_softc *);
1031 static void wm_tbi_tick(struct wm_softc *);
1032 /* SERDES related */
1033 static void wm_serdes_power_up_link_82575(struct wm_softc *);
1034 static int wm_serdes_mediachange(struct ifnet *);
1035 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
1036 static void wm_serdes_tick(struct wm_softc *);
1037 /* SFP related */
1038 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
1039 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
1040
1041 /*
1042 * NVM related.
1043 * Microwire, SPI (w/wo EERD) and Flash.
1044 */
1045 /* Misc functions */
1046 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1047 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1048 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1049 /* Microwire */
1050 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1051 /* SPI */
1052 static int wm_nvm_ready_spi(struct wm_softc *);
1053 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1054 /* Using with EERD */
1055 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
1056 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1057 /* Flash */
1058 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1059 unsigned int *);
1060 static int32_t wm_ich8_cycle_init(struct wm_softc *);
1061 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1062 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1063 uint32_t *);
1064 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1065 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1066 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1067 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1068 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1069 /* iNVM */
1070 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1071 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1072 /* Lock, detecting NVM type, validate checksum and read */
1073 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
1074 static int wm_nvm_flash_presence_i210(struct wm_softc *);
1075 static int wm_nvm_validate_checksum(struct wm_softc *);
1076 static void wm_nvm_version_invm(struct wm_softc *);
1077 static void wm_nvm_version(struct wm_softc *);
1078 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1079
1080 /*
1081 * Hardware semaphores.
1082 * Very complexed...
1083 */
1084 static int wm_get_null(struct wm_softc *);
1085 static void wm_put_null(struct wm_softc *);
1086 static int wm_get_eecd(struct wm_softc *);
1087 static void wm_put_eecd(struct wm_softc *);
1088 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1089 static void wm_put_swsm_semaphore(struct wm_softc *);
1090 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1091 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1092 static int wm_get_nvm_80003(struct wm_softc *);
1093 static void wm_put_nvm_80003(struct wm_softc *);
1094 static int wm_get_nvm_82571(struct wm_softc *);
1095 static void wm_put_nvm_82571(struct wm_softc *);
1096 static int wm_get_phy_82575(struct wm_softc *);
1097 static void wm_put_phy_82575(struct wm_softc *);
1098 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1099 static void wm_put_swfwhw_semaphore(struct wm_softc *);
1100 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1101 static void wm_put_swflag_ich8lan(struct wm_softc *);
1102 static int wm_get_nvm_ich8lan(struct wm_softc *);
1103 static void wm_put_nvm_ich8lan(struct wm_softc *);
1104 static int wm_get_hw_semaphore_82573(struct wm_softc *);
1105 static void wm_put_hw_semaphore_82573(struct wm_softc *);
1106
1107 /*
1108 * Management mode and power management related subroutines.
1109 * BMC, AMT, suspend/resume and EEE.
1110 */
1111 #if 0
1112 static int wm_check_mng_mode(struct wm_softc *);
1113 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1114 static int wm_check_mng_mode_82574(struct wm_softc *);
1115 static int wm_check_mng_mode_generic(struct wm_softc *);
1116 #endif
1117 static int wm_enable_mng_pass_thru(struct wm_softc *);
1118 static bool wm_phy_resetisblocked(struct wm_softc *);
1119 static void wm_get_hw_control(struct wm_softc *);
1120 static void wm_release_hw_control(struct wm_softc *);
1121 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1122 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1123 static void wm_init_manageability(struct wm_softc *);
1124 static void wm_release_manageability(struct wm_softc *);
1125 static void wm_get_wakeup(struct wm_softc *);
1126 static int wm_ulp_disable(struct wm_softc *);
1127 static int wm_enable_phy_wakeup(struct wm_softc *);
1128 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1129 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1130 static int wm_resume_workarounds_pchlan(struct wm_softc *);
1131 static void wm_enable_wakeup(struct wm_softc *);
1132 static void wm_disable_aspm(struct wm_softc *);
1133 /* LPLU (Low Power Link Up) */
1134 static void wm_lplu_d0_disable(struct wm_softc *);
1135 /* EEE */
1136 static int wm_set_eee_i350(struct wm_softc *);
1137 static int wm_set_eee_pchlan(struct wm_softc *);
1138 static int wm_set_eee(struct wm_softc *);
1139
1140 /*
1141 * Workarounds (mainly PHY related).
1142 * Basically, PHY's workarounds are in the PHY drivers.
1143 */
1144 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1145 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1146 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1147 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1148 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1149 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1150 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1151 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1152 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1153 static int wm_k1_workaround_lv(struct wm_softc *);
1154 static int wm_link_stall_workaround_hv(struct wm_softc *);
1155 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1156 static int wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1157 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1158 static void wm_reset_init_script_82575(struct wm_softc *);
1159 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1160 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1161 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1162 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1163 static int wm_pll_workaround_i210(struct wm_softc *);
1164 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1165 static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1166 static void wm_set_linkdown_discard(struct wm_softc *);
1167 static void wm_clear_linkdown_discard(struct wm_softc *);
1168
1169 static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1170 static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1171 #ifdef WM_DEBUG
1172 static int wm_sysctl_debug(SYSCTLFN_PROTO);
1173 #endif
1174
1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1176 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1177
1178 /*
1179 * Devices supported by this driver.
1180 */
1181 static const struct wm_product {
1182 pci_vendor_id_t wmp_vendor;
1183 pci_product_id_t wmp_product;
1184 const char *wmp_name;
1185 wm_chip_type wmp_type;
1186 uint32_t wmp_flags;
1187 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1188 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1189 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1190 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1191 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1192 } wm_products[] = {
1193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1194 "Intel i82542 1000BASE-X Ethernet",
1195 WM_T_82542_2_1, WMP_F_FIBER },
1196
1197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1198 "Intel i82543GC 1000BASE-X Ethernet",
1199 WM_T_82543, WMP_F_FIBER },
1200
1201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1202 "Intel i82543GC 1000BASE-T Ethernet",
1203 WM_T_82543, WMP_F_COPPER },
1204
1205 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1206 "Intel i82544EI 1000BASE-T Ethernet",
1207 WM_T_82544, WMP_F_COPPER },
1208
1209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1210 "Intel i82544EI 1000BASE-X Ethernet",
1211 WM_T_82544, WMP_F_FIBER },
1212
1213 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1214 "Intel i82544GC 1000BASE-T Ethernet",
1215 WM_T_82544, WMP_F_COPPER },
1216
1217 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1218 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1219 WM_T_82544, WMP_F_COPPER },
1220
1221 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1222 "Intel i82540EM 1000BASE-T Ethernet",
1223 WM_T_82540, WMP_F_COPPER },
1224
1225 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1226 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1227 WM_T_82540, WMP_F_COPPER },
1228
1229 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1230 "Intel i82540EP 1000BASE-T Ethernet",
1231 WM_T_82540, WMP_F_COPPER },
1232
1233 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1234 "Intel i82540EP 1000BASE-T Ethernet",
1235 WM_T_82540, WMP_F_COPPER },
1236
1237 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1238 "Intel i82540EP 1000BASE-T Ethernet",
1239 WM_T_82540, WMP_F_COPPER },
1240
1241 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1242 "Intel i82545EM 1000BASE-T Ethernet",
1243 WM_T_82545, WMP_F_COPPER },
1244
1245 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1246 "Intel i82545GM 1000BASE-T Ethernet",
1247 WM_T_82545_3, WMP_F_COPPER },
1248
1249 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1250 "Intel i82545GM 1000BASE-X Ethernet",
1251 WM_T_82545_3, WMP_F_FIBER },
1252
1253 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1254 "Intel i82545GM Gigabit Ethernet (SERDES)",
1255 WM_T_82545_3, WMP_F_SERDES },
1256
1257 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1258 "Intel i82546EB 1000BASE-T Ethernet",
1259 WM_T_82546, WMP_F_COPPER },
1260
1261 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1262 "Intel i82546EB 1000BASE-T Ethernet",
1263 WM_T_82546, WMP_F_COPPER },
1264
1265 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1266 "Intel i82545EM 1000BASE-X Ethernet",
1267 WM_T_82545, WMP_F_FIBER },
1268
1269 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1270 "Intel i82546EB 1000BASE-X Ethernet",
1271 WM_T_82546, WMP_F_FIBER },
1272
1273 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1274 "Intel i82546GB 1000BASE-T Ethernet",
1275 WM_T_82546_3, WMP_F_COPPER },
1276
1277 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1278 "Intel i82546GB 1000BASE-X Ethernet",
1279 WM_T_82546_3, WMP_F_FIBER },
1280
1281 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1282 "Intel i82546GB Gigabit Ethernet (SERDES)",
1283 WM_T_82546_3, WMP_F_SERDES },
1284
1285 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1286 "i82546GB quad-port Gigabit Ethernet",
1287 WM_T_82546_3, WMP_F_COPPER },
1288
1289 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1290 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1291 WM_T_82546_3, WMP_F_COPPER },
1292
1293 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1294 "Intel PRO/1000MT (82546GB)",
1295 WM_T_82546_3, WMP_F_COPPER },
1296
1297 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1298 "Intel i82541EI 1000BASE-T Ethernet",
1299 WM_T_82541, WMP_F_COPPER },
1300
1301 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1302 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1303 WM_T_82541, WMP_F_COPPER },
1304
1305 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1306 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1307 WM_T_82541, WMP_F_COPPER },
1308
1309 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1310 "Intel i82541ER 1000BASE-T Ethernet",
1311 WM_T_82541_2, WMP_F_COPPER },
1312
1313 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1314 "Intel i82541GI 1000BASE-T Ethernet",
1315 WM_T_82541_2, WMP_F_COPPER },
1316
1317 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1318 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1319 WM_T_82541_2, WMP_F_COPPER },
1320
1321 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1322 "Intel i82541PI 1000BASE-T Ethernet",
1323 WM_T_82541_2, WMP_F_COPPER },
1324
1325 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1326 "Intel i82547EI 1000BASE-T Ethernet",
1327 WM_T_82547, WMP_F_COPPER },
1328
1329 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1330 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1331 WM_T_82547, WMP_F_COPPER },
1332
1333 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1334 "Intel i82547GI 1000BASE-T Ethernet",
1335 WM_T_82547_2, WMP_F_COPPER },
1336
1337 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1338 "Intel PRO/1000 PT (82571EB)",
1339 WM_T_82571, WMP_F_COPPER },
1340
1341 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1342 "Intel PRO/1000 PF (82571EB)",
1343 WM_T_82571, WMP_F_FIBER },
1344
1345 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1346 "Intel PRO/1000 PB (82571EB)",
1347 WM_T_82571, WMP_F_SERDES },
1348
1349 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1350 "Intel PRO/1000 QT (82571EB)",
1351 WM_T_82571, WMP_F_COPPER },
1352
1353 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1354 "Intel PRO/1000 PT Quad Port Server Adapter",
1355 WM_T_82571, WMP_F_COPPER },
1356
1357 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1358 "Intel Gigabit PT Quad Port Server ExpressModule",
1359 WM_T_82571, WMP_F_COPPER },
1360
1361 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1362 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1363 WM_T_82571, WMP_F_SERDES },
1364
1365 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1366 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1367 WM_T_82571, WMP_F_SERDES },
1368
1369 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1370 "Intel 82571EB Quad 1000baseX Ethernet",
1371 WM_T_82571, WMP_F_FIBER },
1372
1373 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1374 "Intel i82572EI 1000baseT Ethernet",
1375 WM_T_82572, WMP_F_COPPER },
1376
1377 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1378 "Intel i82572EI 1000baseX Ethernet",
1379 WM_T_82572, WMP_F_FIBER },
1380
1381 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1382 "Intel i82572EI Gigabit Ethernet (SERDES)",
1383 WM_T_82572, WMP_F_SERDES },
1384
1385 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1386 "Intel i82572EI 1000baseT Ethernet",
1387 WM_T_82572, WMP_F_COPPER },
1388
1389 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1390 "Intel i82573E",
1391 WM_T_82573, WMP_F_COPPER },
1392
1393 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1394 "Intel i82573E IAMT",
1395 WM_T_82573, WMP_F_COPPER },
1396
1397 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1398 "Intel i82573L Gigabit Ethernet",
1399 WM_T_82573, WMP_F_COPPER },
1400
1401 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1402 "Intel i82574L",
1403 WM_T_82574, WMP_F_COPPER },
1404
1405 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1406 "Intel i82574L",
1407 WM_T_82574, WMP_F_COPPER },
1408
1409 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1410 "Intel i82583V",
1411 WM_T_82583, WMP_F_COPPER },
1412
1413 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1414 "i80003 dual 1000baseT Ethernet",
1415 WM_T_80003, WMP_F_COPPER },
1416
1417 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1418 "i80003 dual 1000baseX Ethernet",
1419 WM_T_80003, WMP_F_COPPER },
1420
1421 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1422 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1423 WM_T_80003, WMP_F_SERDES },
1424
1425 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1426 "Intel i80003 1000baseT Ethernet",
1427 WM_T_80003, WMP_F_COPPER },
1428
1429 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1430 "Intel i80003 Gigabit Ethernet (SERDES)",
1431 WM_T_80003, WMP_F_SERDES },
1432
1433 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1434 "Intel i82801H (M_AMT) LAN Controller",
1435 WM_T_ICH8, WMP_F_COPPER },
1436 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1437 "Intel i82801H (AMT) LAN Controller",
1438 WM_T_ICH8, WMP_F_COPPER },
1439 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1440 "Intel i82801H LAN Controller",
1441 WM_T_ICH8, WMP_F_COPPER },
1442 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1443 "Intel i82801H (IFE) 10/100 LAN Controller",
1444 WM_T_ICH8, WMP_F_COPPER },
1445 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1446 "Intel i82801H (M) LAN Controller",
1447 WM_T_ICH8, WMP_F_COPPER },
1448 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1449 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1450 WM_T_ICH8, WMP_F_COPPER },
1451 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1452 "Intel i82801H IFE (G) 10/100 LAN Controller",
1453 WM_T_ICH8, WMP_F_COPPER },
1454 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1455 "82567V-3 LAN Controller",
1456 WM_T_ICH8, WMP_F_COPPER },
1457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1458 "82801I (AMT) LAN Controller",
1459 WM_T_ICH9, WMP_F_COPPER },
1460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1461 "82801I 10/100 LAN Controller",
1462 WM_T_ICH9, WMP_F_COPPER },
1463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1464 "82801I (G) 10/100 LAN Controller",
1465 WM_T_ICH9, WMP_F_COPPER },
1466 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1467 "82801I (GT) 10/100 LAN Controller",
1468 WM_T_ICH9, WMP_F_COPPER },
1469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1470 "82801I (C) LAN Controller",
1471 WM_T_ICH9, WMP_F_COPPER },
1472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1473 "82801I mobile LAN Controller",
1474 WM_T_ICH9, WMP_F_COPPER },
1475 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1476 "82801I mobile (V) LAN Controller",
1477 WM_T_ICH9, WMP_F_COPPER },
1478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1479 "82801I mobile (AMT) LAN Controller",
1480 WM_T_ICH9, WMP_F_COPPER },
1481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1482 "82567LM-4 LAN Controller",
1483 WM_T_ICH9, WMP_F_COPPER },
1484 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1485 "82567LM-2 LAN Controller",
1486 WM_T_ICH10, WMP_F_COPPER },
1487 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1488 "82567LF-2 LAN Controller",
1489 WM_T_ICH10, WMP_F_COPPER },
1490 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1491 "82567LM-3 LAN Controller",
1492 WM_T_ICH10, WMP_F_COPPER },
1493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1494 "82567LF-3 LAN Controller",
1495 WM_T_ICH10, WMP_F_COPPER },
1496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1497 "82567V-2 LAN Controller",
1498 WM_T_ICH10, WMP_F_COPPER },
1499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1500 "82567V-3? LAN Controller",
1501 WM_T_ICH10, WMP_F_COPPER },
1502 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1503 "HANKSVILLE LAN Controller",
1504 WM_T_ICH10, WMP_F_COPPER },
1505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1506 "PCH LAN (82577LM) Controller",
1507 WM_T_PCH, WMP_F_COPPER },
1508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1509 "PCH LAN (82577LC) Controller",
1510 WM_T_PCH, WMP_F_COPPER },
1511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1512 "PCH LAN (82578DM) Controller",
1513 WM_T_PCH, WMP_F_COPPER },
1514 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1515 "PCH LAN (82578DC) Controller",
1516 WM_T_PCH, WMP_F_COPPER },
1517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1518 "PCH2 LAN (82579LM) Controller",
1519 WM_T_PCH2, WMP_F_COPPER },
1520 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1521 "PCH2 LAN (82579V) Controller",
1522 WM_T_PCH2, WMP_F_COPPER },
1523 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1524 "82575EB dual-1000baseT Ethernet",
1525 WM_T_82575, WMP_F_COPPER },
1526 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1527 "82575EB dual-1000baseX Ethernet (SERDES)",
1528 WM_T_82575, WMP_F_SERDES },
1529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1530 "82575GB quad-1000baseT Ethernet",
1531 WM_T_82575, WMP_F_COPPER },
1532 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1533 "82575GB quad-1000baseT Ethernet (PM)",
1534 WM_T_82575, WMP_F_COPPER },
1535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1536 "82576 1000BaseT Ethernet",
1537 WM_T_82576, WMP_F_COPPER },
1538 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1539 "82576 1000BaseX Ethernet",
1540 WM_T_82576, WMP_F_FIBER },
1541
1542 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1543 "82576 gigabit Ethernet (SERDES)",
1544 WM_T_82576, WMP_F_SERDES },
1545
1546 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1547 "82576 quad-1000BaseT Ethernet",
1548 WM_T_82576, WMP_F_COPPER },
1549
1550 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1551 "82576 Gigabit ET2 Quad Port Server Adapter",
1552 WM_T_82576, WMP_F_COPPER },
1553
1554 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1555 "82576 gigabit Ethernet",
1556 WM_T_82576, WMP_F_COPPER },
1557
1558 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1559 "82576 gigabit Ethernet (SERDES)",
1560 WM_T_82576, WMP_F_SERDES },
1561 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1562 "82576 quad-gigabit Ethernet (SERDES)",
1563 WM_T_82576, WMP_F_SERDES },
1564
1565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1566 "82580 1000BaseT Ethernet",
1567 WM_T_82580, WMP_F_COPPER },
1568 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1569 "82580 1000BaseX Ethernet",
1570 WM_T_82580, WMP_F_FIBER },
1571
1572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1573 "82580 1000BaseT Ethernet (SERDES)",
1574 WM_T_82580, WMP_F_SERDES },
1575
1576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1577 "82580 gigabit Ethernet (SGMII)",
1578 WM_T_82580, WMP_F_COPPER },
1579 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1580 "82580 dual-1000BaseT Ethernet",
1581 WM_T_82580, WMP_F_COPPER },
1582
1583 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1584 "82580 quad-1000BaseX Ethernet",
1585 WM_T_82580, WMP_F_FIBER },
1586
1587 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1588 "DH89XXCC Gigabit Ethernet (SGMII)",
1589 WM_T_82580, WMP_F_COPPER },
1590
1591 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1592 "DH89XXCC Gigabit Ethernet (SERDES)",
1593 WM_T_82580, WMP_F_SERDES },
1594
1595 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1596 "DH89XXCC 1000BASE-KX Ethernet",
1597 WM_T_82580, WMP_F_SERDES },
1598
1599 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1600 "DH89XXCC Gigabit Ethernet (SFP)",
1601 WM_T_82580, WMP_F_SERDES },
1602
1603 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1604 "I350 Gigabit Network Connection",
1605 WM_T_I350, WMP_F_COPPER },
1606
1607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1608 "I350 Gigabit Fiber Network Connection",
1609 WM_T_I350, WMP_F_FIBER },
1610
1611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1612 "I350 Gigabit Backplane Connection",
1613 WM_T_I350, WMP_F_SERDES },
1614
1615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1616 "I350 Quad Port Gigabit Ethernet",
1617 WM_T_I350, WMP_F_SERDES },
1618
1619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1620 "I350 Gigabit Connection",
1621 WM_T_I350, WMP_F_COPPER },
1622
1623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1624 "I354 Gigabit Ethernet (KX)",
1625 WM_T_I354, WMP_F_SERDES },
1626
1627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1628 "I354 Gigabit Ethernet (SGMII)",
1629 WM_T_I354, WMP_F_COPPER },
1630
1631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1632 "I354 Gigabit Ethernet (2.5G)",
1633 WM_T_I354, WMP_F_COPPER },
1634
1635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1636 "I210-T1 Ethernet Server Adapter",
1637 WM_T_I210, WMP_F_COPPER },
1638
1639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1640 "I210 Ethernet (Copper OEM)",
1641 WM_T_I210, WMP_F_COPPER },
1642
1643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1644 "I210 Ethernet (Copper IT)",
1645 WM_T_I210, WMP_F_COPPER },
1646
1647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1648 "I210 Ethernet (Copper, FLASH less)",
1649 WM_T_I210, WMP_F_COPPER },
1650
1651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1652 "I210 Gigabit Ethernet (Fiber)",
1653 WM_T_I210, WMP_F_FIBER },
1654
1655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1656 "I210 Gigabit Ethernet (SERDES)",
1657 WM_T_I210, WMP_F_SERDES },
1658
1659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1660 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1661 WM_T_I210, WMP_F_SERDES },
1662
1663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1664 "I210 Gigabit Ethernet (SGMII)",
1665 WM_T_I210, WMP_F_COPPER },
1666
1667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1668 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1669 WM_T_I210, WMP_F_COPPER },
1670
1671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1672 "I211 Ethernet (COPPER)",
1673 WM_T_I211, WMP_F_COPPER },
1674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1675 "I217 V Ethernet Connection",
1676 WM_T_PCH_LPT, WMP_F_COPPER },
1677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1678 "I217 LM Ethernet Connection",
1679 WM_T_PCH_LPT, WMP_F_COPPER },
1680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1681 "I218 V Ethernet Connection",
1682 WM_T_PCH_LPT, WMP_F_COPPER },
1683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1684 "I218 V Ethernet Connection",
1685 WM_T_PCH_LPT, WMP_F_COPPER },
1686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1687 "I218 V Ethernet Connection",
1688 WM_T_PCH_LPT, WMP_F_COPPER },
1689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1690 "I218 LM Ethernet Connection",
1691 WM_T_PCH_LPT, WMP_F_COPPER },
1692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1693 "I218 LM Ethernet Connection",
1694 WM_T_PCH_LPT, WMP_F_COPPER },
1695 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1696 "I218 LM Ethernet Connection",
1697 WM_T_PCH_LPT, WMP_F_COPPER },
1698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1699 "I219 LM Ethernet Connection",
1700 WM_T_PCH_SPT, WMP_F_COPPER },
1701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1702 "I219 LM (2) Ethernet Connection",
1703 WM_T_PCH_SPT, WMP_F_COPPER },
1704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1705 "I219 LM (3) Ethernet Connection",
1706 WM_T_PCH_SPT, WMP_F_COPPER },
1707 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1708 "I219 LM (4) Ethernet Connection",
1709 WM_T_PCH_SPT, WMP_F_COPPER },
1710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1711 "I219 LM (5) Ethernet Connection",
1712 WM_T_PCH_SPT, WMP_F_COPPER },
1713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1714 "I219 LM (6) Ethernet Connection",
1715 WM_T_PCH_CNP, WMP_F_COPPER },
1716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1717 "I219 LM (7) Ethernet Connection",
1718 WM_T_PCH_CNP, WMP_F_COPPER },
1719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1720 "I219 LM (8) Ethernet Connection",
1721 WM_T_PCH_CNP, WMP_F_COPPER },
1722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1723 "I219 LM (9) Ethernet Connection",
1724 WM_T_PCH_CNP, WMP_F_COPPER },
1725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1726 "I219 LM (10) Ethernet Connection",
1727 WM_T_PCH_CNP, WMP_F_COPPER },
1728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1729 "I219 LM (11) Ethernet Connection",
1730 WM_T_PCH_CNP, WMP_F_COPPER },
1731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1732 "I219 LM (12) Ethernet Connection",
1733 WM_T_PCH_SPT, WMP_F_COPPER },
1734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1735 "I219 LM (13) Ethernet Connection",
1736 WM_T_PCH_TGP, WMP_F_COPPER },
1737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1738 "I219 LM (14) Ethernet Connection",
1739 WM_T_PCH_TGP, WMP_F_COPPER },
1740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1741 "I219 LM (15) Ethernet Connection",
1742 WM_T_PCH_TGP, WMP_F_COPPER },
1743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16,
1744 "I219 LM (16) Ethernet Connection",
1745 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */
1746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17,
1747 "I219 LM (17) Ethernet Connection",
1748 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */
1749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18,
1750 "I219 LM (18) Ethernet Connection",
1751 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19,
1753 "I219 LM (19) Ethernet Connection",
1754 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1756 "I219 V Ethernet Connection",
1757 WM_T_PCH_SPT, WMP_F_COPPER },
1758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1759 "I219 V (2) Ethernet Connection",
1760 WM_T_PCH_SPT, WMP_F_COPPER },
1761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1762 "I219 V (4) Ethernet Connection",
1763 WM_T_PCH_SPT, WMP_F_COPPER },
1764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1765 "I219 V (5) Ethernet Connection",
1766 WM_T_PCH_SPT, WMP_F_COPPER },
1767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1768 "I219 V (6) Ethernet Connection",
1769 WM_T_PCH_CNP, WMP_F_COPPER },
1770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1771 "I219 V (7) Ethernet Connection",
1772 WM_T_PCH_CNP, WMP_F_COPPER },
1773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1774 "I219 V (8) Ethernet Connection",
1775 WM_T_PCH_CNP, WMP_F_COPPER },
1776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1777 "I219 V (9) Ethernet Connection",
1778 WM_T_PCH_CNP, WMP_F_COPPER },
1779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1780 "I219 V (10) Ethernet Connection",
1781 WM_T_PCH_CNP, WMP_F_COPPER },
1782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1783 "I219 V (11) Ethernet Connection",
1784 WM_T_PCH_CNP, WMP_F_COPPER },
1785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1786 "I219 V (12) Ethernet Connection",
1787 WM_T_PCH_SPT, WMP_F_COPPER },
1788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1789 "I219 V (13) Ethernet Connection",
1790 WM_T_PCH_TGP, WMP_F_COPPER },
1791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1792 "I219 V (14) Ethernet Connection",
1793 WM_T_PCH_TGP, WMP_F_COPPER },
1794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15,
1795 "I219 V (15) Ethernet Connection",
1796 WM_T_PCH_TGP, WMP_F_COPPER },
1797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16,
1798 "I219 V (16) Ethernet Connection",
1799 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */
1800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17,
1801 "I219 V (17) Ethernet Connection",
1802 WM_T_PCH_TGP, WMP_F_COPPER }, /* ADP */
1803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18,
1804 "I219 V (18) Ethernet Connection",
1805 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19,
1807 "I219 V (19) Ethernet Connection",
1808 WM_T_PCH_TGP, WMP_F_COPPER }, /* MTP */
1809 { 0, 0,
1810 NULL,
1811 0, 0 },
1812 };
1813
1814 /*
1815 * Register read/write functions.
1816 * Other than CSR_{READ|WRITE}().
1817 */
1818
1819 #if 0 /* Not currently used */
1820 static inline uint32_t
1821 wm_io_read(struct wm_softc *sc, int reg)
1822 {
1823
1824 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1825 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1826 }
1827 #endif
1828
1829 static inline void
1830 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1831 {
1832
1833 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1834 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1835 }
1836
1837 static inline void
1838 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1839 uint32_t data)
1840 {
1841 uint32_t regval;
1842 int i;
1843
1844 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1845
1846 CSR_WRITE(sc, reg, regval);
1847
1848 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1849 delay(5);
1850 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1851 break;
1852 }
1853 if (i == SCTL_CTL_POLL_TIMEOUT) {
1854 aprint_error("%s: WARNING:"
1855 " i82575 reg 0x%08x setup did not indicate ready\n",
1856 device_xname(sc->sc_dev), reg);
1857 }
1858 }
1859
1860 static inline void
1861 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1862 {
1863 wa->wa_low = htole32(BUS_ADDR_LO32(v));
1864 wa->wa_high = htole32(BUS_ADDR_HI32(v));
1865 }
1866
1867 /*
1868 * Descriptor sync/init functions.
1869 */
1870 static inline void
1871 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1872 {
1873 struct wm_softc *sc = txq->txq_sc;
1874
1875 /* If it will wrap around, sync to the end of the ring. */
1876 if ((start + num) > WM_NTXDESC(txq)) {
1877 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1878 WM_CDTXOFF(txq, start), txq->txq_descsize *
1879 (WM_NTXDESC(txq) - start), ops);
1880 num -= (WM_NTXDESC(txq) - start);
1881 start = 0;
1882 }
1883
1884 /* Now sync whatever is left. */
1885 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1886 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1887 }
1888
1889 static inline void
1890 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1891 {
1892 struct wm_softc *sc = rxq->rxq_sc;
1893
1894 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1895 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1896 }
1897
1898 static inline void
1899 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1900 {
1901 struct wm_softc *sc = rxq->rxq_sc;
1902 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1903 struct mbuf *m = rxs->rxs_mbuf;
1904
1905 /*
1906 * Note: We scoot the packet forward 2 bytes in the buffer
1907 * so that the payload after the Ethernet header is aligned
1908 * to a 4-byte boundary.
1909
1910 * XXX BRAINDAMAGE ALERT!
1911 * The stupid chip uses the same size for every buffer, which
1912 * is set in the Receive Control register. We are using the 2K
1913 * size option, but what we REALLY want is (2K - 2)! For this
1914 * reason, we can't "scoot" packets longer than the standard
1915 * Ethernet MTU. On strict-alignment platforms, if the total
1916 * size exceeds (2K - 2) we set align_tweak to 0 and let
1917 * the upper layer copy the headers.
1918 */
1919 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1920
1921 if (sc->sc_type == WM_T_82574) {
1922 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1923 rxd->erx_data.erxd_addr =
1924 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1925 rxd->erx_data.erxd_dd = 0;
1926 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1927 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1928
1929 rxd->nqrx_data.nrxd_paddr =
1930 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1931 /* Currently, split header is not supported. */
1932 rxd->nqrx_data.nrxd_haddr = 0;
1933 } else {
1934 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1935
1936 wm_set_dma_addr(&rxd->wrx_addr,
1937 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1938 rxd->wrx_len = 0;
1939 rxd->wrx_cksum = 0;
1940 rxd->wrx_status = 0;
1941 rxd->wrx_errors = 0;
1942 rxd->wrx_special = 0;
1943 }
1944 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1945
1946 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1947 }
1948
1949 /*
1950 * Device driver interface functions and commonly used functions.
1951 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1952 */
1953
1954 /* Lookup supported device table */
1955 static const struct wm_product *
1956 wm_lookup(const struct pci_attach_args *pa)
1957 {
1958 const struct wm_product *wmp;
1959
1960 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1961 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1962 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1963 return wmp;
1964 }
1965 return NULL;
1966 }
1967
1968 /* The match function (ca_match) */
1969 static int
1970 wm_match(device_t parent, cfdata_t cf, void *aux)
1971 {
1972 struct pci_attach_args *pa = aux;
1973
1974 if (wm_lookup(pa) != NULL)
1975 return 1;
1976
1977 return 0;
1978 }
1979
1980 /* The attach function (ca_attach) */
1981 static void
1982 wm_attach(device_t parent, device_t self, void *aux)
1983 {
1984 struct wm_softc *sc = device_private(self);
1985 struct pci_attach_args *pa = aux;
1986 prop_dictionary_t dict;
1987 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1988 pci_chipset_tag_t pc = pa->pa_pc;
1989 int counts[PCI_INTR_TYPE_SIZE];
1990 pci_intr_type_t max_type;
1991 const char *eetype, *xname;
1992 bus_space_tag_t memt;
1993 bus_space_handle_t memh;
1994 bus_size_t memsize;
1995 int memh_valid;
1996 int i, error;
1997 const struct wm_product *wmp;
1998 prop_data_t ea;
1999 prop_number_t pn;
2000 uint8_t enaddr[ETHER_ADDR_LEN];
2001 char buf[256];
2002 char wqname[MAXCOMLEN];
2003 uint16_t cfg1, cfg2, swdpin, nvmword;
2004 pcireg_t preg, memtype;
2005 uint16_t eeprom_data, apme_mask;
2006 bool force_clear_smbi;
2007 uint32_t link_mode;
2008 uint32_t reg;
2009
2010 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
2011 sc->sc_debug = WM_DEBUG_DEFAULT;
2012 #endif
2013 sc->sc_dev = self;
2014 callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
2015 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
2016 sc->sc_core_stopping = false;
2017
2018 wmp = wm_lookup(pa);
2019 #ifdef DIAGNOSTIC
2020 if (wmp == NULL) {
2021 printf("\n");
2022 panic("wm_attach: impossible");
2023 }
2024 #endif
2025 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
2026
2027 sc->sc_pc = pa->pa_pc;
2028 sc->sc_pcitag = pa->pa_tag;
2029
2030 if (pci_dma64_available(pa)) {
2031 aprint_verbose(", 64-bit DMA");
2032 sc->sc_dmat = pa->pa_dmat64;
2033 } else {
2034 aprint_verbose(", 32-bit DMA");
2035 sc->sc_dmat = pa->pa_dmat;
2036 }
2037
2038 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
2039 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
2040 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
2041
2042 sc->sc_type = wmp->wmp_type;
2043
2044 /* Set default function pointers */
2045 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2046 sc->phy.release = sc->nvm.release = wm_put_null;
2047 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2048
2049 if (sc->sc_type < WM_T_82543) {
2050 if (sc->sc_rev < 2) {
2051 aprint_error_dev(sc->sc_dev,
2052 "i82542 must be at least rev. 2\n");
2053 return;
2054 }
2055 if (sc->sc_rev < 3)
2056 sc->sc_type = WM_T_82542_2_0;
2057 }
2058
2059 /*
2060 * Disable MSI for Errata:
2061 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2062 *
2063 * 82544: Errata 25
2064 * 82540: Errata 6 (easy to reproduce device timeout)
2065 * 82545: Errata 4 (easy to reproduce device timeout)
2066 * 82546: Errata 26 (easy to reproduce device timeout)
2067 * 82541: Errata 7 (easy to reproduce device timeout)
2068 *
2069 * "Byte Enables 2 and 3 are not set on MSI writes"
2070 *
2071 * 82571 & 82572: Errata 63
2072 */
2073 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2074 || (sc->sc_type == WM_T_82572))
2075 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2076
2077 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2078 || (sc->sc_type == WM_T_82580)
2079 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2080 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2081 sc->sc_flags |= WM_F_NEWQUEUE;
2082
2083 /* Set device properties (mactype) */
2084 dict = device_properties(sc->sc_dev);
2085 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2086
2087 /*
2088 * Map the device. All devices support memory-mapped acccess,
2089 * and it is really required for normal operation.
2090 */
2091 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2092 switch (memtype) {
2093 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2094 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2095 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2096 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2097 break;
2098 default:
2099 memh_valid = 0;
2100 break;
2101 }
2102
2103 if (memh_valid) {
2104 sc->sc_st = memt;
2105 sc->sc_sh = memh;
2106 sc->sc_ss = memsize;
2107 } else {
2108 aprint_error_dev(sc->sc_dev,
2109 "unable to map device registers\n");
2110 return;
2111 }
2112
2113 /*
2114 * In addition, i82544 and later support I/O mapped indirect
2115 * register access. It is not desirable (nor supported in
2116 * this driver) to use it for normal operation, though it is
2117 * required to work around bugs in some chip versions.
2118 */
2119 switch (sc->sc_type) {
2120 case WM_T_82544:
2121 case WM_T_82541:
2122 case WM_T_82541_2:
2123 case WM_T_82547:
2124 case WM_T_82547_2:
2125 /* First we have to find the I/O BAR. */
2126 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2127 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2128 if (memtype == PCI_MAPREG_TYPE_IO)
2129 break;
2130 if (PCI_MAPREG_MEM_TYPE(memtype) ==
2131 PCI_MAPREG_MEM_TYPE_64BIT)
2132 i += 4; /* skip high bits, too */
2133 }
2134 if (i < PCI_MAPREG_END) {
2135 /*
2136 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2137 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2138 * It's no problem because newer chips has no this
2139 * bug.
2140 *
2141 * The i8254x doesn't apparently respond when the
2142 * I/O BAR is 0, which looks somewhat like it's not
2143 * been configured.
2144 */
2145 preg = pci_conf_read(pc, pa->pa_tag, i);
2146 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2147 aprint_error_dev(sc->sc_dev,
2148 "WARNING: I/O BAR at zero.\n");
2149 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2150 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2151 == 0) {
2152 sc->sc_flags |= WM_F_IOH_VALID;
2153 } else
2154 aprint_error_dev(sc->sc_dev,
2155 "WARNING: unable to map I/O space\n");
2156 }
2157 break;
2158 default:
2159 break;
2160 }
2161
2162 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2163 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2164 preg |= PCI_COMMAND_MASTER_ENABLE;
2165 if (sc->sc_type < WM_T_82542_2_1)
2166 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2167 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2168
2169 /* Power up chip */
2170 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2171 && error != EOPNOTSUPP) {
2172 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2173 return;
2174 }
2175
2176 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2177 /*
2178 * Don't use MSI-X if we can use only one queue to save interrupt
2179 * resource.
2180 */
2181 if (sc->sc_nqueues > 1) {
2182 max_type = PCI_INTR_TYPE_MSIX;
2183 /*
2184 * 82583 has a MSI-X capability in the PCI configuration space
2185 * but it doesn't support it. At least the document doesn't
2186 * say anything about MSI-X.
2187 */
2188 counts[PCI_INTR_TYPE_MSIX]
2189 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2190 } else {
2191 max_type = PCI_INTR_TYPE_MSI;
2192 counts[PCI_INTR_TYPE_MSIX] = 0;
2193 }
2194
2195 /* Allocation settings */
2196 counts[PCI_INTR_TYPE_MSI] = 1;
2197 counts[PCI_INTR_TYPE_INTX] = 1;
2198 /* overridden by disable flags */
2199 if (wm_disable_msi != 0) {
2200 counts[PCI_INTR_TYPE_MSI] = 0;
2201 if (wm_disable_msix != 0) {
2202 max_type = PCI_INTR_TYPE_INTX;
2203 counts[PCI_INTR_TYPE_MSIX] = 0;
2204 }
2205 } else if (wm_disable_msix != 0) {
2206 max_type = PCI_INTR_TYPE_MSI;
2207 counts[PCI_INTR_TYPE_MSIX] = 0;
2208 }
2209
2210 alloc_retry:
2211 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2212 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2213 return;
2214 }
2215
2216 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2217 error = wm_setup_msix(sc);
2218 if (error) {
2219 pci_intr_release(pc, sc->sc_intrs,
2220 counts[PCI_INTR_TYPE_MSIX]);
2221
2222 /* Setup for MSI: Disable MSI-X */
2223 max_type = PCI_INTR_TYPE_MSI;
2224 counts[PCI_INTR_TYPE_MSI] = 1;
2225 counts[PCI_INTR_TYPE_INTX] = 1;
2226 goto alloc_retry;
2227 }
2228 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2229 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2230 error = wm_setup_legacy(sc);
2231 if (error) {
2232 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2233 counts[PCI_INTR_TYPE_MSI]);
2234
2235 /* The next try is for INTx: Disable MSI */
2236 max_type = PCI_INTR_TYPE_INTX;
2237 counts[PCI_INTR_TYPE_INTX] = 1;
2238 goto alloc_retry;
2239 }
2240 } else {
2241 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2242 error = wm_setup_legacy(sc);
2243 if (error) {
2244 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2245 counts[PCI_INTR_TYPE_INTX]);
2246 return;
2247 }
2248 }
2249
2250 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2251 error = workqueue_create(&sc->sc_queue_wq, wqname,
2252 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2253 WQ_PERCPU | WQ_MPSAFE);
2254 if (error) {
2255 aprint_error_dev(sc->sc_dev,
2256 "unable to create TxRx workqueue\n");
2257 goto out;
2258 }
2259
2260 snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2261 error = workqueue_create(&sc->sc_reset_wq, wqname,
2262 wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2263 WQ_MPSAFE);
2264 if (error) {
2265 workqueue_destroy(sc->sc_queue_wq);
2266 aprint_error_dev(sc->sc_dev,
2267 "unable to create reset workqueue\n");
2268 goto out;
2269 }
2270
2271 /*
2272 * Check the function ID (unit number of the chip).
2273 */
2274 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2275 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2276 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2277 || (sc->sc_type == WM_T_82580)
2278 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2279 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2280 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2281 else
2282 sc->sc_funcid = 0;
2283
2284 /*
2285 * Determine a few things about the bus we're connected to.
2286 */
2287 if (sc->sc_type < WM_T_82543) {
2288 /* We don't really know the bus characteristics here. */
2289 sc->sc_bus_speed = 33;
2290 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2291 /*
2292 * CSA (Communication Streaming Architecture) is about as fast
2293 * a 32-bit 66MHz PCI Bus.
2294 */
2295 sc->sc_flags |= WM_F_CSA;
2296 sc->sc_bus_speed = 66;
2297 aprint_verbose_dev(sc->sc_dev,
2298 "Communication Streaming Architecture\n");
2299 if (sc->sc_type == WM_T_82547) {
2300 callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2301 callout_setfunc(&sc->sc_txfifo_ch,
2302 wm_82547_txfifo_stall, sc);
2303 aprint_verbose_dev(sc->sc_dev,
2304 "using 82547 Tx FIFO stall work-around\n");
2305 }
2306 } else if (sc->sc_type >= WM_T_82571) {
2307 sc->sc_flags |= WM_F_PCIE;
2308 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2309 && (sc->sc_type != WM_T_ICH10)
2310 && (sc->sc_type != WM_T_PCH)
2311 && (sc->sc_type != WM_T_PCH2)
2312 && (sc->sc_type != WM_T_PCH_LPT)
2313 && (sc->sc_type != WM_T_PCH_SPT)
2314 && (sc->sc_type != WM_T_PCH_CNP)
2315 && (sc->sc_type != WM_T_PCH_TGP)) {
2316 /* ICH* and PCH* have no PCIe capability registers */
2317 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2318 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2319 NULL) == 0)
2320 aprint_error_dev(sc->sc_dev,
2321 "unable to find PCIe capability\n");
2322 }
2323 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2324 } else {
2325 reg = CSR_READ(sc, WMREG_STATUS);
2326 if (reg & STATUS_BUS64)
2327 sc->sc_flags |= WM_F_BUS64;
2328 if ((reg & STATUS_PCIX_MODE) != 0) {
2329 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2330
2331 sc->sc_flags |= WM_F_PCIX;
2332 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2333 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2334 aprint_error_dev(sc->sc_dev,
2335 "unable to find PCIX capability\n");
2336 else if (sc->sc_type != WM_T_82545_3 &&
2337 sc->sc_type != WM_T_82546_3) {
2338 /*
2339 * Work around a problem caused by the BIOS
2340 * setting the max memory read byte count
2341 * incorrectly.
2342 */
2343 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2344 sc->sc_pcixe_capoff + PCIX_CMD);
2345 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2346 sc->sc_pcixe_capoff + PCIX_STATUS);
2347
2348 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2349 PCIX_CMD_BYTECNT_SHIFT;
2350 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2351 PCIX_STATUS_MAXB_SHIFT;
2352 if (bytecnt > maxb) {
2353 aprint_verbose_dev(sc->sc_dev,
2354 "resetting PCI-X MMRBC: %d -> %d\n",
2355 512 << bytecnt, 512 << maxb);
2356 pcix_cmd = (pcix_cmd &
2357 ~PCIX_CMD_BYTECNT_MASK) |
2358 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2359 pci_conf_write(pa->pa_pc, pa->pa_tag,
2360 sc->sc_pcixe_capoff + PCIX_CMD,
2361 pcix_cmd);
2362 }
2363 }
2364 }
2365 /*
2366 * The quad port adapter is special; it has a PCIX-PCIX
2367 * bridge on the board, and can run the secondary bus at
2368 * a higher speed.
2369 */
2370 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2371 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2372 : 66;
2373 } else if (sc->sc_flags & WM_F_PCIX) {
2374 switch (reg & STATUS_PCIXSPD_MASK) {
2375 case STATUS_PCIXSPD_50_66:
2376 sc->sc_bus_speed = 66;
2377 break;
2378 case STATUS_PCIXSPD_66_100:
2379 sc->sc_bus_speed = 100;
2380 break;
2381 case STATUS_PCIXSPD_100_133:
2382 sc->sc_bus_speed = 133;
2383 break;
2384 default:
2385 aprint_error_dev(sc->sc_dev,
2386 "unknown PCIXSPD %d; assuming 66MHz\n",
2387 reg & STATUS_PCIXSPD_MASK);
2388 sc->sc_bus_speed = 66;
2389 break;
2390 }
2391 } else
2392 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2393 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2394 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2395 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2396 }
2397
2398 /* clear interesting stat counters */
2399 CSR_READ(sc, WMREG_COLC);
2400 CSR_READ(sc, WMREG_RXERRC);
2401
2402 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2403 || (sc->sc_type >= WM_T_ICH8))
2404 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2405 if (sc->sc_type >= WM_T_ICH8)
2406 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2407
2408 /* Set PHY, NVM mutex related stuff */
2409 switch (sc->sc_type) {
2410 case WM_T_82542_2_0:
2411 case WM_T_82542_2_1:
2412 case WM_T_82543:
2413 case WM_T_82544:
2414 /* Microwire */
2415 sc->nvm.read = wm_nvm_read_uwire;
2416 sc->sc_nvm_wordsize = 64;
2417 sc->sc_nvm_addrbits = 6;
2418 break;
2419 case WM_T_82540:
2420 case WM_T_82545:
2421 case WM_T_82545_3:
2422 case WM_T_82546:
2423 case WM_T_82546_3:
2424 /* Microwire */
2425 sc->nvm.read = wm_nvm_read_uwire;
2426 reg = CSR_READ(sc, WMREG_EECD);
2427 if (reg & EECD_EE_SIZE) {
2428 sc->sc_nvm_wordsize = 256;
2429 sc->sc_nvm_addrbits = 8;
2430 } else {
2431 sc->sc_nvm_wordsize = 64;
2432 sc->sc_nvm_addrbits = 6;
2433 }
2434 sc->sc_flags |= WM_F_LOCK_EECD;
2435 sc->nvm.acquire = wm_get_eecd;
2436 sc->nvm.release = wm_put_eecd;
2437 break;
2438 case WM_T_82541:
2439 case WM_T_82541_2:
2440 case WM_T_82547:
2441 case WM_T_82547_2:
2442 reg = CSR_READ(sc, WMREG_EECD);
2443 /*
2444 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2445 * on 8254[17], so set flags and functios before calling it.
2446 */
2447 sc->sc_flags |= WM_F_LOCK_EECD;
2448 sc->nvm.acquire = wm_get_eecd;
2449 sc->nvm.release = wm_put_eecd;
2450 if (reg & EECD_EE_TYPE) {
2451 /* SPI */
2452 sc->nvm.read = wm_nvm_read_spi;
2453 sc->sc_flags |= WM_F_EEPROM_SPI;
2454 wm_nvm_set_addrbits_size_eecd(sc);
2455 } else {
2456 /* Microwire */
2457 sc->nvm.read = wm_nvm_read_uwire;
2458 if ((reg & EECD_EE_ABITS) != 0) {
2459 sc->sc_nvm_wordsize = 256;
2460 sc->sc_nvm_addrbits = 8;
2461 } else {
2462 sc->sc_nvm_wordsize = 64;
2463 sc->sc_nvm_addrbits = 6;
2464 }
2465 }
2466 break;
2467 case WM_T_82571:
2468 case WM_T_82572:
2469 /* SPI */
2470 sc->nvm.read = wm_nvm_read_eerd;
2471 /* Not use WM_F_LOCK_EECD because we use EERD */
2472 sc->sc_flags |= WM_F_EEPROM_SPI;
2473 wm_nvm_set_addrbits_size_eecd(sc);
2474 sc->phy.acquire = wm_get_swsm_semaphore;
2475 sc->phy.release = wm_put_swsm_semaphore;
2476 sc->nvm.acquire = wm_get_nvm_82571;
2477 sc->nvm.release = wm_put_nvm_82571;
2478 break;
2479 case WM_T_82573:
2480 case WM_T_82574:
2481 case WM_T_82583:
2482 sc->nvm.read = wm_nvm_read_eerd;
2483 /* Not use WM_F_LOCK_EECD because we use EERD */
2484 if (sc->sc_type == WM_T_82573) {
2485 sc->phy.acquire = wm_get_swsm_semaphore;
2486 sc->phy.release = wm_put_swsm_semaphore;
2487 sc->nvm.acquire = wm_get_nvm_82571;
2488 sc->nvm.release = wm_put_nvm_82571;
2489 } else {
2490 /* Both PHY and NVM use the same semaphore. */
2491 sc->phy.acquire = sc->nvm.acquire
2492 = wm_get_swfwhw_semaphore;
2493 sc->phy.release = sc->nvm.release
2494 = wm_put_swfwhw_semaphore;
2495 }
2496 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2497 sc->sc_flags |= WM_F_EEPROM_FLASH;
2498 sc->sc_nvm_wordsize = 2048;
2499 } else {
2500 /* SPI */
2501 sc->sc_flags |= WM_F_EEPROM_SPI;
2502 wm_nvm_set_addrbits_size_eecd(sc);
2503 }
2504 break;
2505 case WM_T_82575:
2506 case WM_T_82576:
2507 case WM_T_82580:
2508 case WM_T_I350:
2509 case WM_T_I354:
2510 case WM_T_80003:
2511 /* SPI */
2512 sc->sc_flags |= WM_F_EEPROM_SPI;
2513 wm_nvm_set_addrbits_size_eecd(sc);
2514 if ((sc->sc_type == WM_T_80003)
2515 || (sc->sc_nvm_wordsize < (1 << 15))) {
2516 sc->nvm.read = wm_nvm_read_eerd;
2517 /* Don't use WM_F_LOCK_EECD because we use EERD */
2518 } else {
2519 sc->nvm.read = wm_nvm_read_spi;
2520 sc->sc_flags |= WM_F_LOCK_EECD;
2521 }
2522 sc->phy.acquire = wm_get_phy_82575;
2523 sc->phy.release = wm_put_phy_82575;
2524 sc->nvm.acquire = wm_get_nvm_80003;
2525 sc->nvm.release = wm_put_nvm_80003;
2526 break;
2527 case WM_T_ICH8:
2528 case WM_T_ICH9:
2529 case WM_T_ICH10:
2530 case WM_T_PCH:
2531 case WM_T_PCH2:
2532 case WM_T_PCH_LPT:
2533 sc->nvm.read = wm_nvm_read_ich8;
2534 /* FLASH */
2535 sc->sc_flags |= WM_F_EEPROM_FLASH;
2536 sc->sc_nvm_wordsize = 2048;
2537 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2538 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2539 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2540 aprint_error_dev(sc->sc_dev,
2541 "can't map FLASH registers\n");
2542 goto out;
2543 }
2544 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2545 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2546 ICH_FLASH_SECTOR_SIZE;
2547 sc->sc_ich8_flash_bank_size =
2548 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2549 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2550 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2551 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2552 sc->sc_flashreg_offset = 0;
2553 sc->phy.acquire = wm_get_swflag_ich8lan;
2554 sc->phy.release = wm_put_swflag_ich8lan;
2555 sc->nvm.acquire = wm_get_nvm_ich8lan;
2556 sc->nvm.release = wm_put_nvm_ich8lan;
2557 break;
2558 case WM_T_PCH_SPT:
2559 case WM_T_PCH_CNP:
2560 case WM_T_PCH_TGP:
2561 sc->nvm.read = wm_nvm_read_spt;
2562 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2563 sc->sc_flags |= WM_F_EEPROM_FLASH;
2564 sc->sc_flasht = sc->sc_st;
2565 sc->sc_flashh = sc->sc_sh;
2566 sc->sc_ich8_flash_base = 0;
2567 sc->sc_nvm_wordsize =
2568 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2569 * NVM_SIZE_MULTIPLIER;
2570 /* It is size in bytes, we want words */
2571 sc->sc_nvm_wordsize /= 2;
2572 /* Assume 2 banks */
2573 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2574 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2575 sc->phy.acquire = wm_get_swflag_ich8lan;
2576 sc->phy.release = wm_put_swflag_ich8lan;
2577 sc->nvm.acquire = wm_get_nvm_ich8lan;
2578 sc->nvm.release = wm_put_nvm_ich8lan;
2579 break;
2580 case WM_T_I210:
2581 case WM_T_I211:
2582 /* Allow a single clear of the SW semaphore on I210 and newer*/
2583 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2584 if (wm_nvm_flash_presence_i210(sc)) {
2585 sc->nvm.read = wm_nvm_read_eerd;
2586 /* Don't use WM_F_LOCK_EECD because we use EERD */
2587 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2588 wm_nvm_set_addrbits_size_eecd(sc);
2589 } else {
2590 sc->nvm.read = wm_nvm_read_invm;
2591 sc->sc_flags |= WM_F_EEPROM_INVM;
2592 sc->sc_nvm_wordsize = INVM_SIZE;
2593 }
2594 sc->phy.acquire = wm_get_phy_82575;
2595 sc->phy.release = wm_put_phy_82575;
2596 sc->nvm.acquire = wm_get_nvm_80003;
2597 sc->nvm.release = wm_put_nvm_80003;
2598 break;
2599 default:
2600 break;
2601 }
2602
2603 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2604 switch (sc->sc_type) {
2605 case WM_T_82571:
2606 case WM_T_82572:
2607 reg = CSR_READ(sc, WMREG_SWSM2);
2608 if ((reg & SWSM2_LOCK) == 0) {
2609 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2610 force_clear_smbi = true;
2611 } else
2612 force_clear_smbi = false;
2613 break;
2614 case WM_T_82573:
2615 case WM_T_82574:
2616 case WM_T_82583:
2617 force_clear_smbi = true;
2618 break;
2619 default:
2620 force_clear_smbi = false;
2621 break;
2622 }
2623 if (force_clear_smbi) {
2624 reg = CSR_READ(sc, WMREG_SWSM);
2625 if ((reg & SWSM_SMBI) != 0)
2626 aprint_error_dev(sc->sc_dev,
2627 "Please update the Bootagent\n");
2628 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2629 }
2630
2631 /*
2632 * Defer printing the EEPROM type until after verifying the checksum
2633 * This allows the EEPROM type to be printed correctly in the case
2634 * that no EEPROM is attached.
2635 */
2636 /*
2637 * Validate the EEPROM checksum. If the checksum fails, flag
2638 * this for later, so we can fail future reads from the EEPROM.
2639 */
2640 if (wm_nvm_validate_checksum(sc)) {
2641 /*
2642 * Read twice again because some PCI-e parts fail the
2643 * first check due to the link being in sleep state.
2644 */
2645 if (wm_nvm_validate_checksum(sc))
2646 sc->sc_flags |= WM_F_EEPROM_INVALID;
2647 }
2648
2649 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2650 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2651 else {
2652 aprint_verbose_dev(sc->sc_dev, "%u words ",
2653 sc->sc_nvm_wordsize);
2654 if (sc->sc_flags & WM_F_EEPROM_INVM)
2655 aprint_verbose("iNVM");
2656 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2657 aprint_verbose("FLASH(HW)");
2658 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2659 aprint_verbose("FLASH");
2660 else {
2661 if (sc->sc_flags & WM_F_EEPROM_SPI)
2662 eetype = "SPI";
2663 else
2664 eetype = "MicroWire";
2665 aprint_verbose("(%d address bits) %s EEPROM",
2666 sc->sc_nvm_addrbits, eetype);
2667 }
2668 }
2669 wm_nvm_version(sc);
2670 aprint_verbose("\n");
2671
2672 /*
2673 * XXX The first call of wm_gmii_setup_phytype. The result might be
2674 * incorrect.
2675 */
2676 wm_gmii_setup_phytype(sc, 0, 0);
2677
2678 /* Check for WM_F_WOL on some chips before wm_reset() */
2679 switch (sc->sc_type) {
2680 case WM_T_ICH8:
2681 case WM_T_ICH9:
2682 case WM_T_ICH10:
2683 case WM_T_PCH:
2684 case WM_T_PCH2:
2685 case WM_T_PCH_LPT:
2686 case WM_T_PCH_SPT:
2687 case WM_T_PCH_CNP:
2688 case WM_T_PCH_TGP:
2689 apme_mask = WUC_APME;
2690 eeprom_data = CSR_READ(sc, WMREG_WUC);
2691 if ((eeprom_data & apme_mask) != 0)
2692 sc->sc_flags |= WM_F_WOL;
2693 break;
2694 default:
2695 break;
2696 }
2697
2698 /* Reset the chip to a known state. */
2699 wm_reset(sc);
2700
2701 /*
2702 * Check for I21[01] PLL workaround.
2703 *
2704 * Three cases:
2705 * a) Chip is I211.
2706 * b) Chip is I210 and it uses INVM (not FLASH).
2707 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2708 */
2709 if (sc->sc_type == WM_T_I211)
2710 sc->sc_flags |= WM_F_PLL_WA_I210;
2711 if (sc->sc_type == WM_T_I210) {
2712 if (!wm_nvm_flash_presence_i210(sc))
2713 sc->sc_flags |= WM_F_PLL_WA_I210;
2714 else if ((sc->sc_nvm_ver_major < 3)
2715 || ((sc->sc_nvm_ver_major == 3)
2716 && (sc->sc_nvm_ver_minor < 25))) {
2717 aprint_verbose_dev(sc->sc_dev,
2718 "ROM image version %d.%d is older than 3.25\n",
2719 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2720 sc->sc_flags |= WM_F_PLL_WA_I210;
2721 }
2722 }
2723 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2724 wm_pll_workaround_i210(sc);
2725
2726 wm_get_wakeup(sc);
2727
2728 /* Non-AMT based hardware can now take control from firmware */
2729 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2730 wm_get_hw_control(sc);
2731
2732 /*
2733 * Read the Ethernet address from the EEPROM, if not first found
2734 * in device properties.
2735 */
2736 ea = prop_dictionary_get(dict, "mac-address");
2737 if (ea != NULL) {
2738 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2739 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2740 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2741 } else {
2742 if (wm_read_mac_addr(sc, enaddr) != 0) {
2743 aprint_error_dev(sc->sc_dev,
2744 "unable to read Ethernet address\n");
2745 goto out;
2746 }
2747 }
2748
2749 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2750 ether_sprintf(enaddr));
2751
2752 /*
2753 * Read the config info from the EEPROM, and set up various
2754 * bits in the control registers based on their contents.
2755 */
2756 pn = prop_dictionary_get(dict, "i82543-cfg1");
2757 if (pn != NULL) {
2758 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2759 cfg1 = (uint16_t) prop_number_signed_value(pn);
2760 } else {
2761 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2762 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2763 goto out;
2764 }
2765 }
2766
2767 pn = prop_dictionary_get(dict, "i82543-cfg2");
2768 if (pn != NULL) {
2769 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2770 cfg2 = (uint16_t) prop_number_signed_value(pn);
2771 } else {
2772 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2773 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2774 goto out;
2775 }
2776 }
2777
2778 /* check for WM_F_WOL */
2779 switch (sc->sc_type) {
2780 case WM_T_82542_2_0:
2781 case WM_T_82542_2_1:
2782 case WM_T_82543:
2783 /* dummy? */
2784 eeprom_data = 0;
2785 apme_mask = NVM_CFG3_APME;
2786 break;
2787 case WM_T_82544:
2788 apme_mask = NVM_CFG2_82544_APM_EN;
2789 eeprom_data = cfg2;
2790 break;
2791 case WM_T_82546:
2792 case WM_T_82546_3:
2793 case WM_T_82571:
2794 case WM_T_82572:
2795 case WM_T_82573:
2796 case WM_T_82574:
2797 case WM_T_82583:
2798 case WM_T_80003:
2799 case WM_T_82575:
2800 case WM_T_82576:
2801 apme_mask = NVM_CFG3_APME;
2802 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2803 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2804 break;
2805 case WM_T_82580:
2806 case WM_T_I350:
2807 case WM_T_I354:
2808 case WM_T_I210:
2809 case WM_T_I211:
2810 apme_mask = NVM_CFG3_APME;
2811 wm_nvm_read(sc,
2812 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2813 1, &eeprom_data);
2814 break;
2815 case WM_T_ICH8:
2816 case WM_T_ICH9:
2817 case WM_T_ICH10:
2818 case WM_T_PCH:
2819 case WM_T_PCH2:
2820 case WM_T_PCH_LPT:
2821 case WM_T_PCH_SPT:
2822 case WM_T_PCH_CNP:
2823 case WM_T_PCH_TGP:
2824 /* Already checked before wm_reset () */
2825 apme_mask = eeprom_data = 0;
2826 break;
2827 default: /* XXX 82540 */
2828 apme_mask = NVM_CFG3_APME;
2829 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2830 break;
2831 }
2832 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2833 if ((eeprom_data & apme_mask) != 0)
2834 sc->sc_flags |= WM_F_WOL;
2835
2836 /*
2837 * We have the eeprom settings, now apply the special cases
2838 * where the eeprom may be wrong or the board won't support
2839 * wake on lan on a particular port
2840 */
2841 switch (sc->sc_pcidevid) {
2842 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2843 sc->sc_flags &= ~WM_F_WOL;
2844 break;
2845 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2846 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2847 /* Wake events only supported on port A for dual fiber
2848 * regardless of eeprom setting */
2849 if (sc->sc_funcid == 1)
2850 sc->sc_flags &= ~WM_F_WOL;
2851 break;
2852 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2853 /* If quad port adapter, disable WoL on all but port A */
2854 if (sc->sc_funcid != 0)
2855 sc->sc_flags &= ~WM_F_WOL;
2856 break;
2857 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2858 /* Wake events only supported on port A for dual fiber
2859 * regardless of eeprom setting */
2860 if (sc->sc_funcid == 1)
2861 sc->sc_flags &= ~WM_F_WOL;
2862 break;
2863 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2864 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2865 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2866 /* If quad port adapter, disable WoL on all but port A */
2867 if (sc->sc_funcid != 0)
2868 sc->sc_flags &= ~WM_F_WOL;
2869 break;
2870 }
2871
2872 if (sc->sc_type >= WM_T_82575) {
2873 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2874 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2875 nvmword);
2876 if ((sc->sc_type == WM_T_82575) ||
2877 (sc->sc_type == WM_T_82576)) {
2878 /* Check NVM for autonegotiation */
2879 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2880 != 0)
2881 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2882 }
2883 if ((sc->sc_type == WM_T_82575) ||
2884 (sc->sc_type == WM_T_I350)) {
2885 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2886 sc->sc_flags |= WM_F_MAS;
2887 }
2888 }
2889 }
2890
2891 /*
2892 * XXX need special handling for some multiple port cards
2893 * to disable a paticular port.
2894 */
2895
2896 if (sc->sc_type >= WM_T_82544) {
2897 pn = prop_dictionary_get(dict, "i82543-swdpin");
2898 if (pn != NULL) {
2899 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2900 swdpin = (uint16_t) prop_number_signed_value(pn);
2901 } else {
2902 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2903 aprint_error_dev(sc->sc_dev,
2904 "unable to read SWDPIN\n");
2905 goto out;
2906 }
2907 }
2908 }
2909
2910 if (cfg1 & NVM_CFG1_ILOS)
2911 sc->sc_ctrl |= CTRL_ILOS;
2912
2913 /*
2914 * XXX
2915 * This code isn't correct because pin 2 and 3 are located
2916 * in different position on newer chips. Check all datasheet.
2917 *
2918 * Until resolve this problem, check if a chip < 82580
2919 */
2920 if (sc->sc_type <= WM_T_82580) {
2921 if (sc->sc_type >= WM_T_82544) {
2922 sc->sc_ctrl |=
2923 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2924 CTRL_SWDPIO_SHIFT;
2925 sc->sc_ctrl |=
2926 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2927 CTRL_SWDPINS_SHIFT;
2928 } else {
2929 sc->sc_ctrl |=
2930 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2931 CTRL_SWDPIO_SHIFT;
2932 }
2933 }
2934
2935 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2936 wm_nvm_read(sc,
2937 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2938 1, &nvmword);
2939 if (nvmword & NVM_CFG3_ILOS)
2940 sc->sc_ctrl |= CTRL_ILOS;
2941 }
2942
2943 #if 0
2944 if (sc->sc_type >= WM_T_82544) {
2945 if (cfg1 & NVM_CFG1_IPS0)
2946 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2947 if (cfg1 & NVM_CFG1_IPS1)
2948 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2949 sc->sc_ctrl_ext |=
2950 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2951 CTRL_EXT_SWDPIO_SHIFT;
2952 sc->sc_ctrl_ext |=
2953 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2954 CTRL_EXT_SWDPINS_SHIFT;
2955 } else {
2956 sc->sc_ctrl_ext |=
2957 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2958 CTRL_EXT_SWDPIO_SHIFT;
2959 }
2960 #endif
2961
2962 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2963 #if 0
2964 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2965 #endif
2966
2967 if (sc->sc_type == WM_T_PCH) {
2968 uint16_t val;
2969
2970 /* Save the NVM K1 bit setting */
2971 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2972
2973 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2974 sc->sc_nvm_k1_enabled = 1;
2975 else
2976 sc->sc_nvm_k1_enabled = 0;
2977 }
2978
2979 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2980 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2981 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2982 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2983 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2984 || sc->sc_type == WM_T_PCH_TGP
2985 || sc->sc_type == WM_T_82573
2986 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2987 /* Copper only */
2988 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2989 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2990 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2991 || (sc->sc_type ==WM_T_I211)) {
2992 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2993 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2994 switch (link_mode) {
2995 case CTRL_EXT_LINK_MODE_1000KX:
2996 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2997 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2998 break;
2999 case CTRL_EXT_LINK_MODE_SGMII:
3000 if (wm_sgmii_uses_mdio(sc)) {
3001 aprint_normal_dev(sc->sc_dev,
3002 "SGMII(MDIO)\n");
3003 sc->sc_flags |= WM_F_SGMII;
3004 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3005 break;
3006 }
3007 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
3008 /*FALLTHROUGH*/
3009 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
3010 sc->sc_mediatype = wm_sfp_get_media_type(sc);
3011 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
3012 if (link_mode
3013 == CTRL_EXT_LINK_MODE_SGMII) {
3014 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3015 sc->sc_flags |= WM_F_SGMII;
3016 aprint_verbose_dev(sc->sc_dev,
3017 "SGMII\n");
3018 } else {
3019 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
3020 aprint_verbose_dev(sc->sc_dev,
3021 "SERDES\n");
3022 }
3023 break;
3024 }
3025 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
3026 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
3027 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3028 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
3029 sc->sc_flags |= WM_F_SGMII;
3030 }
3031 /* Do not change link mode for 100BaseFX */
3032 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
3033 break;
3034
3035 /* Change current link mode setting */
3036 reg &= ~CTRL_EXT_LINK_MODE_MASK;
3037 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3038 reg |= CTRL_EXT_LINK_MODE_SGMII;
3039 else
3040 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
3041 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3042 break;
3043 case CTRL_EXT_LINK_MODE_GMII:
3044 default:
3045 aprint_normal_dev(sc->sc_dev, "Copper\n");
3046 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3047 break;
3048 }
3049
3050 reg &= ~CTRL_EXT_I2C_ENA;
3051 if ((sc->sc_flags & WM_F_SGMII) != 0)
3052 reg |= CTRL_EXT_I2C_ENA;
3053 else
3054 reg &= ~CTRL_EXT_I2C_ENA;
3055 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3056 if ((sc->sc_flags & WM_F_SGMII) != 0) {
3057 if (!wm_sgmii_uses_mdio(sc))
3058 wm_gmii_setup_phytype(sc, 0, 0);
3059 wm_reset_mdicnfg_82580(sc);
3060 }
3061 } else if (sc->sc_type < WM_T_82543 ||
3062 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3063 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3064 aprint_error_dev(sc->sc_dev,
3065 "WARNING: TBIMODE set on 1000BASE-T product!\n");
3066 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3067 }
3068 } else {
3069 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3070 aprint_error_dev(sc->sc_dev,
3071 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3072 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3073 }
3074 }
3075
3076 if (sc->sc_type >= WM_T_PCH2)
3077 sc->sc_flags |= WM_F_EEE;
3078 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3079 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3080 /* XXX: Need special handling for I354. (not yet) */
3081 if (sc->sc_type != WM_T_I354)
3082 sc->sc_flags |= WM_F_EEE;
3083 }
3084
3085 /*
3086 * The I350 has a bug where it always strips the CRC whether
3087 * asked to or not. So ask for stripped CRC here and cope in rxeof
3088 */
3089 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3090 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3091 sc->sc_flags |= WM_F_CRC_STRIP;
3092
3093 /*
3094 * Workaround for some chips to delay sending LINK_STATE_UP.
3095 * Some systems can't send packet soon after linkup. See also
3096 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
3097 */
3098 switch (sc->sc_type) {
3099 case WM_T_I350:
3100 case WM_T_I354:
3101 case WM_T_I210:
3102 case WM_T_I211:
3103 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3104 sc->sc_flags |= WM_F_DELAY_LINKUP;
3105 break;
3106 default:
3107 break;
3108 }
3109
3110 /* Set device properties (macflags) */
3111 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3112
3113 if (sc->sc_flags != 0) {
3114 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3115 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3116 }
3117
3118 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3119
3120 /* Initialize the media structures accordingly. */
3121 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3122 wm_gmii_mediainit(sc, wmp->wmp_product);
3123 else
3124 wm_tbi_mediainit(sc); /* All others */
3125
3126 ifp = &sc->sc_ethercom.ec_if;
3127 xname = device_xname(sc->sc_dev);
3128 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3129 ifp->if_softc = sc;
3130 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3131 ifp->if_extflags = IFEF_MPSAFE;
3132 ifp->if_ioctl = wm_ioctl;
3133 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3134 ifp->if_start = wm_nq_start;
3135 /*
3136 * When the number of CPUs is one and the controller can use
3137 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3138 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3139 * and the other is used for link status changing.
3140 * In this situation, wm_nq_transmit() is disadvantageous
3141 * because of wm_select_txqueue() and pcq(9) overhead.
3142 */
3143 if (wm_is_using_multiqueue(sc))
3144 ifp->if_transmit = wm_nq_transmit;
3145 } else {
3146 ifp->if_start = wm_start;
3147 /*
3148 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3149 * described above.
3150 */
3151 if (wm_is_using_multiqueue(sc))
3152 ifp->if_transmit = wm_transmit;
3153 }
3154 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3155 ifp->if_init = wm_init;
3156 ifp->if_stop = wm_stop;
3157 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3158 IFQ_SET_READY(&ifp->if_snd);
3159
3160 /* Check for jumbo frame */
3161 switch (sc->sc_type) {
3162 case WM_T_82573:
3163 /* XXX limited to 9234 if ASPM is disabled */
3164 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3165 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3166 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3167 break;
3168 case WM_T_82571:
3169 case WM_T_82572:
3170 case WM_T_82574:
3171 case WM_T_82583:
3172 case WM_T_82575:
3173 case WM_T_82576:
3174 case WM_T_82580:
3175 case WM_T_I350:
3176 case WM_T_I354:
3177 case WM_T_I210:
3178 case WM_T_I211:
3179 case WM_T_80003:
3180 case WM_T_ICH9:
3181 case WM_T_ICH10:
3182 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3183 case WM_T_PCH_LPT:
3184 case WM_T_PCH_SPT:
3185 case WM_T_PCH_CNP:
3186 case WM_T_PCH_TGP:
3187 /* XXX limited to 9234 */
3188 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3189 break;
3190 case WM_T_PCH:
3191 /* XXX limited to 4096 */
3192 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3193 break;
3194 case WM_T_82542_2_0:
3195 case WM_T_82542_2_1:
3196 case WM_T_ICH8:
3197 /* No support for jumbo frame */
3198 break;
3199 default:
3200 /* ETHER_MAX_LEN_JUMBO */
3201 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3202 break;
3203 }
3204
3205 /* If we're a i82543 or greater, we can support VLANs. */
3206 if (sc->sc_type >= WM_T_82543) {
3207 sc->sc_ethercom.ec_capabilities |=
3208 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3209 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3210 }
3211
3212 if ((sc->sc_flags & WM_F_EEE) != 0)
3213 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3214
3215 /*
3216 * We can perform TCPv4 and UDPv4 checksums in-bound. Only
3217 * on i82543 and later.
3218 */
3219 if (sc->sc_type >= WM_T_82543) {
3220 ifp->if_capabilities |=
3221 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3222 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3223 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3224 IFCAP_CSUM_TCPv6_Tx |
3225 IFCAP_CSUM_UDPv6_Tx;
3226 }
3227
3228 /*
3229 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3230 *
3231 * 82541GI (8086:1076) ... no
3232 * 82572EI (8086:10b9) ... yes
3233 */
3234 if (sc->sc_type >= WM_T_82571) {
3235 ifp->if_capabilities |=
3236 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3237 }
3238
3239 /*
3240 * If we're a i82544 or greater (except i82547), we can do
3241 * TCP segmentation offload.
3242 */
3243 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3244 ifp->if_capabilities |= IFCAP_TSOv4;
3245
3246 if (sc->sc_type >= WM_T_82571)
3247 ifp->if_capabilities |= IFCAP_TSOv6;
3248
3249 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3250 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3251 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3252 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3253
3254 /* Attach the interface. */
3255 if_initialize(ifp);
3256 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3257 ether_ifattach(ifp, enaddr);
3258 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3259 if_register(ifp);
3260 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3261 RND_FLAG_DEFAULT);
3262
3263 #ifdef WM_EVENT_COUNTERS
3264 /* Attach event counters. */
3265 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3266 NULL, xname, "linkintr");
3267
3268 evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3269 NULL, xname, "CRC Error");
3270 evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3271 NULL, xname, "Symbol Error");
3272 evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3273 NULL, xname, "Missed Packets");
3274 evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3275 NULL, xname, "Collision");
3276 evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3277 NULL, xname, "Sequence Error");
3278 evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3279 NULL, xname, "Receive Length Error");
3280
3281 if (sc->sc_type >= WM_T_82543) {
3282 evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3283 NULL, xname, "Alignment Error");
3284 evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3285 NULL, xname, "Receive Error");
3286 /* XXX Does 82575 have HTDPMC? */
3287 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3288 evcnt_attach_dynamic(&sc->sc_ev_cexterr,
3289 EVCNT_TYPE_MISC, NULL, xname,
3290 "Carrier Extension Error");
3291 else
3292 evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
3293 EVCNT_TYPE_MISC, NULL, xname,
3294 "Host Transmit Discarded Packets by MAC");
3295
3296 evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3297 NULL, xname, "Tx with No CRS");
3298 evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3299 NULL, xname, "TCP Segmentation Context Tx");
3300 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3301 evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
3302 EVCNT_TYPE_MISC, NULL, xname,
3303 "TCP Segmentation Context Tx Fail");
3304 else {
3305 /* XXX Is the circuit breaker only for 82576? */
3306 evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
3307 EVCNT_TYPE_MISC, NULL, xname,
3308 "Circuit Breaker Rx Dropped Packet");
3309 evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
3310 EVCNT_TYPE_MISC, NULL, xname,
3311 "Circuit Breaker Rx Manageability Packet");
3312 }
3313 }
3314
3315 if (sc->sc_type >= WM_T_82542_2_1) {
3316 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3317 NULL, xname, "XOFF Transmitted");
3318 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3319 NULL, xname, "XON Transmitted");
3320 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3321 NULL, xname, "XOFF Received");
3322 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3323 NULL, xname, "XON Received");
3324 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3325 NULL, xname, "FC Received Unsupported");
3326 }
3327
3328 evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3329 NULL, xname, "Single Collision");
3330 evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3331 NULL, xname, "Excessive Collisions");
3332 evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3333 NULL, xname, "Multiple Collision");
3334 evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3335 NULL, xname, "Late Collisions");
3336
3337 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3338 evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
3339 NULL, xname, "Circuit Breaker Tx Manageability Packet");
3340
3341 evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3342 NULL, xname, "Defer");
3343 evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3344 NULL, xname, "Packets Rx (64 bytes)");
3345 evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3346 NULL, xname, "Packets Rx (65-127 bytes)");
3347 evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3348 NULL, xname, "Packets Rx (128-255 bytes)");
3349 evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3350 NULL, xname, "Packets Rx (256-511 bytes)");
3351 evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3352 NULL, xname, "Packets Rx (512-1023 bytes)");
3353 evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3354 NULL, xname, "Packets Rx (1024-1522 bytes)");
3355 evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3356 NULL, xname, "Good Packets Rx");
3357 evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3358 NULL, xname, "Broadcast Packets Rx");
3359 evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3360 NULL, xname, "Multicast Packets Rx");
3361 evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3362 NULL, xname, "Good Packets Tx");
3363 evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3364 NULL, xname, "Good Octets Rx");
3365 evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3366 NULL, xname, "Good Octets Tx");
3367 evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3368 NULL, xname, "Rx No Buffers");
3369 evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3370 NULL, xname, "Rx Undersize (valid CRC)");
3371 evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3372 NULL, xname, "Rx Fragment (bad CRC)");
3373 evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3374 NULL, xname, "Rx Oversize (valid CRC)");
3375 evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3376 NULL, xname, "Rx Jabber (bad CRC)");
3377 if (sc->sc_type >= WM_T_82540) {
3378 evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3379 NULL, xname, "Management Packets RX");
3380 evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3381 NULL, xname, "Management Packets Dropped");
3382 evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3383 NULL, xname, "Management Packets TX");
3384 }
3385 evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3386 NULL, xname, "Total Octets Rx");
3387 evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3388 NULL, xname, "Total Octets Tx");
3389 evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3390 NULL, xname, "Total Packets Rx");
3391 evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3392 NULL, xname, "Total Packets Tx");
3393 evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3394 NULL, xname, "Packets Tx (64 bytes)");
3395 evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3396 NULL, xname, "Packets Tx (65-127 bytes)");
3397 evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3398 NULL, xname, "Packets Tx (128-255 bytes)");
3399 evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3400 NULL, xname, "Packets Tx (256-511 bytes)");
3401 evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3402 NULL, xname, "Packets Tx (512-1023 bytes)");
3403 evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3404 NULL, xname, "Packets Tx (1024-1522 Bytes)");
3405 evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3406 NULL, xname, "Multicast Packets Tx");
3407 evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3408 NULL, xname, "Broadcast Packets Tx");
3409 if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */
3410 evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3411 NULL, xname, "Interrupt Assertion");
3412 if (sc->sc_type < WM_T_82575) {
3413 evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3414 NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3415 evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3416 NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3417 evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3418 NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3419 evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
3420 NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3421 evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3422 NULL, xname, "Intr. Cause Tx Queue Empty");
3423 evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3424 NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3425 evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3426 NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3427
3428 /* XXX 82575 document says it has ICRXOC. Is that right? */
3429 evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3430 NULL, xname, "Interrupt Cause Receiver Overrun");
3431 } else if (!WM_IS_ICHPCH(sc)) {
3432 /*
3433 * For 82575 and newer.
3434 *
3435 * On 80003, ICHs and PCHs, it seems all of the following
3436 * registers are zero.
3437 */
3438 evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
3439 NULL, xname, "Rx Packets To Host");
3440 evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
3441 NULL, xname, "Debug Counter 1");
3442 evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
3443 NULL, xname, "Debug Counter 2");
3444 evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
3445 NULL, xname, "Debug Counter 3");
3446
3447 /*
3448 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
3449 * I think it's wrong. The real count I observed is the same
3450 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
3451 * It's HGPTC(Host Good Packets Tx) which is described in
3452 * 82576's datasheet.
3453 */
3454 evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
3455 NULL, xname, "Host Good Packets TX");
3456
3457 evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
3458 NULL, xname, "Debug Counter 4");
3459 evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3460 NULL, xname, "Rx Desc Min Thresh");
3461 /* XXX Is the circuit breaker only for 82576? */
3462 evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
3463 NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
3464
3465 evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
3466 NULL, xname, "Host Good Octets Rx");
3467 evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
3468 NULL, xname, "Host Good Octets Tx");
3469 evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
3470 NULL, xname, "Length Errors (length/type <= 1500)");
3471 evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
3472 NULL, xname, "SerDes/SGMII Code Violation Packet");
3473 evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
3474 NULL, xname, "Header Redirection Missed Packet");
3475 }
3476 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3477 evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
3478 NULL, xname, "EEE Tx LPI");
3479 evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
3480 NULL, xname, "EEE Rx LPI");
3481 evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3482 NULL, xname, "BMC2OS Packets received by host");
3483 evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3484 NULL, xname, "OS2BMC Packets transmitted by host");
3485 evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3486 NULL, xname, "BMC2OS Packets sent by BMC");
3487 evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3488 NULL, xname, "OS2BMC Packets received by BMC");
3489 }
3490 #endif /* WM_EVENT_COUNTERS */
3491
3492 sc->sc_txrx_use_workqueue = false;
3493
3494 if (wm_phy_need_linkdown_discard(sc)) {
3495 DPRINTF(sc, WM_DEBUG_LINK,
3496 ("%s: %s: Set linkdown discard flag\n",
3497 device_xname(sc->sc_dev), __func__));
3498 wm_set_linkdown_discard(sc);
3499 }
3500
3501 wm_init_sysctls(sc);
3502
3503 if (pmf_device_register(self, wm_suspend, wm_resume))
3504 pmf_class_network_register(self, ifp);
3505 else
3506 aprint_error_dev(self, "couldn't establish power handler\n");
3507
3508 sc->sc_flags |= WM_F_ATTACHED;
3509 out:
3510 return;
3511 }
3512
3513 /* The detach function (ca_detach) */
3514 static int
3515 wm_detach(device_t self, int flags __unused)
3516 {
3517 struct wm_softc *sc = device_private(self);
3518 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3519 int i;
3520
3521 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3522 return 0;
3523
3524 /* Stop the interface. Callouts are stopped in it. */
3525 IFNET_LOCK(ifp);
3526 sc->sc_dying = true;
3527 wm_stop(ifp, 1);
3528 IFNET_UNLOCK(ifp);
3529
3530 pmf_device_deregister(self);
3531
3532 sysctl_teardown(&sc->sc_sysctllog);
3533
3534 #ifdef WM_EVENT_COUNTERS
3535 evcnt_detach(&sc->sc_ev_linkintr);
3536
3537 evcnt_detach(&sc->sc_ev_crcerrs);
3538 evcnt_detach(&sc->sc_ev_symerrc);
3539 evcnt_detach(&sc->sc_ev_mpc);
3540 evcnt_detach(&sc->sc_ev_colc);
3541 evcnt_detach(&sc->sc_ev_sec);
3542 evcnt_detach(&sc->sc_ev_rlec);
3543
3544 if (sc->sc_type >= WM_T_82543) {
3545 evcnt_detach(&sc->sc_ev_algnerrc);
3546 evcnt_detach(&sc->sc_ev_rxerrc);
3547 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3548 evcnt_detach(&sc->sc_ev_cexterr);
3549 else
3550 evcnt_detach(&sc->sc_ev_htdpmc);
3551
3552 evcnt_detach(&sc->sc_ev_tncrs);
3553 evcnt_detach(&sc->sc_ev_tsctc);
3554 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3555 evcnt_detach(&sc->sc_ev_tsctfc);
3556 else {
3557 evcnt_detach(&sc->sc_ev_cbrdpc);
3558 evcnt_detach(&sc->sc_ev_cbrmpc);
3559 }
3560 }
3561
3562 if (sc->sc_type >= WM_T_82542_2_1) {
3563 evcnt_detach(&sc->sc_ev_tx_xoff);
3564 evcnt_detach(&sc->sc_ev_tx_xon);
3565 evcnt_detach(&sc->sc_ev_rx_xoff);
3566 evcnt_detach(&sc->sc_ev_rx_xon);
3567 evcnt_detach(&sc->sc_ev_rx_macctl);
3568 }
3569
3570 evcnt_detach(&sc->sc_ev_scc);
3571 evcnt_detach(&sc->sc_ev_ecol);
3572 evcnt_detach(&sc->sc_ev_mcc);
3573 evcnt_detach(&sc->sc_ev_latecol);
3574
3575 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3576 evcnt_detach(&sc->sc_ev_cbtmpc);
3577
3578 evcnt_detach(&sc->sc_ev_dc);
3579 evcnt_detach(&sc->sc_ev_prc64);
3580 evcnt_detach(&sc->sc_ev_prc127);
3581 evcnt_detach(&sc->sc_ev_prc255);
3582 evcnt_detach(&sc->sc_ev_prc511);
3583 evcnt_detach(&sc->sc_ev_prc1023);
3584 evcnt_detach(&sc->sc_ev_prc1522);
3585 evcnt_detach(&sc->sc_ev_gprc);
3586 evcnt_detach(&sc->sc_ev_bprc);
3587 evcnt_detach(&sc->sc_ev_mprc);
3588 evcnt_detach(&sc->sc_ev_gptc);
3589 evcnt_detach(&sc->sc_ev_gorc);
3590 evcnt_detach(&sc->sc_ev_gotc);
3591 evcnt_detach(&sc->sc_ev_rnbc);
3592 evcnt_detach(&sc->sc_ev_ruc);
3593 evcnt_detach(&sc->sc_ev_rfc);
3594 evcnt_detach(&sc->sc_ev_roc);
3595 evcnt_detach(&sc->sc_ev_rjc);
3596 if (sc->sc_type >= WM_T_82540) {
3597 evcnt_detach(&sc->sc_ev_mgtprc);
3598 evcnt_detach(&sc->sc_ev_mgtpdc);
3599 evcnt_detach(&sc->sc_ev_mgtptc);
3600 }
3601 evcnt_detach(&sc->sc_ev_tor);
3602 evcnt_detach(&sc->sc_ev_tot);
3603 evcnt_detach(&sc->sc_ev_tpr);
3604 evcnt_detach(&sc->sc_ev_tpt);
3605 evcnt_detach(&sc->sc_ev_ptc64);
3606 evcnt_detach(&sc->sc_ev_ptc127);
3607 evcnt_detach(&sc->sc_ev_ptc255);
3608 evcnt_detach(&sc->sc_ev_ptc511);
3609 evcnt_detach(&sc->sc_ev_ptc1023);
3610 evcnt_detach(&sc->sc_ev_ptc1522);
3611 evcnt_detach(&sc->sc_ev_mptc);
3612 evcnt_detach(&sc->sc_ev_bptc);
3613 if (sc->sc_type >= WM_T_82571)
3614 evcnt_detach(&sc->sc_ev_iac);
3615 if (sc->sc_type < WM_T_82575) {
3616 evcnt_detach(&sc->sc_ev_icrxptc);
3617 evcnt_detach(&sc->sc_ev_icrxatc);
3618 evcnt_detach(&sc->sc_ev_ictxptc);
3619 evcnt_detach(&sc->sc_ev_ictxatc);
3620 evcnt_detach(&sc->sc_ev_ictxqec);
3621 evcnt_detach(&sc->sc_ev_ictxqmtc);
3622 evcnt_detach(&sc->sc_ev_rxdmtc);
3623 evcnt_detach(&sc->sc_ev_icrxoc);
3624 } else if (!WM_IS_ICHPCH(sc)) {
3625 evcnt_detach(&sc->sc_ev_rpthc);
3626 evcnt_detach(&sc->sc_ev_debug1);
3627 evcnt_detach(&sc->sc_ev_debug2);
3628 evcnt_detach(&sc->sc_ev_debug3);
3629 evcnt_detach(&sc->sc_ev_hgptc);
3630 evcnt_detach(&sc->sc_ev_debug4);
3631 evcnt_detach(&sc->sc_ev_rxdmtc);
3632 evcnt_detach(&sc->sc_ev_htcbdpc);
3633
3634 evcnt_detach(&sc->sc_ev_hgorc);
3635 evcnt_detach(&sc->sc_ev_hgotc);
3636 evcnt_detach(&sc->sc_ev_lenerrs);
3637 evcnt_detach(&sc->sc_ev_scvpc);
3638 evcnt_detach(&sc->sc_ev_hrmpc);
3639 }
3640 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3641 evcnt_detach(&sc->sc_ev_tlpic);
3642 evcnt_detach(&sc->sc_ev_rlpic);
3643 evcnt_detach(&sc->sc_ev_b2ogprc);
3644 evcnt_detach(&sc->sc_ev_o2bspc);
3645 evcnt_detach(&sc->sc_ev_b2ospc);
3646 evcnt_detach(&sc->sc_ev_o2bgptc);
3647 }
3648 #endif /* WM_EVENT_COUNTERS */
3649
3650 rnd_detach_source(&sc->rnd_source);
3651
3652 /* Tell the firmware about the release */
3653 mutex_enter(sc->sc_core_lock);
3654 wm_release_manageability(sc);
3655 wm_release_hw_control(sc);
3656 wm_enable_wakeup(sc);
3657 mutex_exit(sc->sc_core_lock);
3658
3659 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3660
3661 ether_ifdetach(ifp);
3662 if_detach(ifp);
3663 if_percpuq_destroy(sc->sc_ipq);
3664
3665 /* Delete all remaining media. */
3666 ifmedia_fini(&sc->sc_mii.mii_media);
3667
3668 /* Unload RX dmamaps and free mbufs */
3669 for (i = 0; i < sc->sc_nqueues; i++) {
3670 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3671 mutex_enter(rxq->rxq_lock);
3672 wm_rxdrain(rxq);
3673 mutex_exit(rxq->rxq_lock);
3674 }
3675 /* Must unlock here */
3676
3677 /* Disestablish the interrupt handler */
3678 for (i = 0; i < sc->sc_nintrs; i++) {
3679 if (sc->sc_ihs[i] != NULL) {
3680 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3681 sc->sc_ihs[i] = NULL;
3682 }
3683 }
3684 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3685
3686 /* wm_stop() ensured that the workqueues are stopped. */
3687 workqueue_destroy(sc->sc_queue_wq);
3688 workqueue_destroy(sc->sc_reset_wq);
3689
3690 for (i = 0; i < sc->sc_nqueues; i++)
3691 softint_disestablish(sc->sc_queue[i].wmq_si);
3692
3693 wm_free_txrx_queues(sc);
3694
3695 /* Unmap the registers */
3696 if (sc->sc_ss) {
3697 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3698 sc->sc_ss = 0;
3699 }
3700 if (sc->sc_ios) {
3701 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3702 sc->sc_ios = 0;
3703 }
3704 if (sc->sc_flashs) {
3705 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3706 sc->sc_flashs = 0;
3707 }
3708
3709 if (sc->sc_core_lock)
3710 mutex_obj_free(sc->sc_core_lock);
3711 if (sc->sc_ich_phymtx)
3712 mutex_obj_free(sc->sc_ich_phymtx);
3713 if (sc->sc_ich_nvmmtx)
3714 mutex_obj_free(sc->sc_ich_nvmmtx);
3715
3716 return 0;
3717 }
3718
3719 static bool
3720 wm_suspend(device_t self, const pmf_qual_t *qual)
3721 {
3722 struct wm_softc *sc = device_private(self);
3723
3724 wm_release_manageability(sc);
3725 wm_release_hw_control(sc);
3726 wm_enable_wakeup(sc);
3727
3728 return true;
3729 }
3730
3731 static bool
3732 wm_resume(device_t self, const pmf_qual_t *qual)
3733 {
3734 struct wm_softc *sc = device_private(self);
3735 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3736 pcireg_t reg;
3737 char buf[256];
3738
3739 reg = CSR_READ(sc, WMREG_WUS);
3740 if (reg != 0) {
3741 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3742 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3743 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3744 }
3745
3746 if (sc->sc_type >= WM_T_PCH2)
3747 wm_resume_workarounds_pchlan(sc);
3748 IFNET_LOCK(ifp);
3749 if ((ifp->if_flags & IFF_UP) == 0) {
3750 /* >= PCH_SPT hardware workaround before reset. */
3751 if (sc->sc_type >= WM_T_PCH_SPT)
3752 wm_flush_desc_rings(sc);
3753
3754 wm_reset(sc);
3755 /* Non-AMT based hardware can now take control from firmware */
3756 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3757 wm_get_hw_control(sc);
3758 wm_init_manageability(sc);
3759 } else {
3760 /*
3761 * We called pmf_class_network_register(), so if_init() is
3762 * automatically called when IFF_UP. wm_reset(),
3763 * wm_get_hw_control() and wm_init_manageability() are called
3764 * via wm_init().
3765 */
3766 }
3767 IFNET_UNLOCK(ifp);
3768
3769 return true;
3770 }
3771
3772 /*
3773 * wm_watchdog:
3774 *
3775 * Watchdog checker.
3776 */
3777 static bool
3778 wm_watchdog(struct ifnet *ifp)
3779 {
3780 int qid;
3781 struct wm_softc *sc = ifp->if_softc;
3782 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3783
3784 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3785 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3786
3787 wm_watchdog_txq(ifp, txq, &hang_queue);
3788 }
3789
3790 #ifdef WM_DEBUG
3791 if (sc->sc_trigger_reset) {
3792 /* debug operation, no need for atomicity or reliability */
3793 sc->sc_trigger_reset = 0;
3794 hang_queue++;
3795 }
3796 #endif
3797
3798 if (hang_queue == 0)
3799 return true;
3800
3801 if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3802 workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3803
3804 return false;
3805 }
3806
3807 /*
3808 * Perform an interface watchdog reset.
3809 */
3810 static void
3811 wm_handle_reset_work(struct work *work, void *arg)
3812 {
3813 struct wm_softc * const sc = arg;
3814 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3815
3816 /* Don't want ioctl operations to happen */
3817 IFNET_LOCK(ifp);
3818
3819 /* reset the interface. */
3820 wm_init(ifp);
3821
3822 IFNET_UNLOCK(ifp);
3823
3824 /*
3825 * There are still some upper layer processing which call
3826 * ifp->if_start(). e.g. ALTQ or one CPU system
3827 */
3828 /* Try to get more packets going. */
3829 ifp->if_start(ifp);
3830
3831 atomic_store_relaxed(&sc->sc_reset_pending, 0);
3832 }
3833
3834
3835 static void
3836 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3837 {
3838
3839 mutex_enter(txq->txq_lock);
3840 if (txq->txq_sending &&
3841 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3842 wm_watchdog_txq_locked(ifp, txq, hang);
3843
3844 mutex_exit(txq->txq_lock);
3845 }
3846
3847 static void
3848 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3849 uint16_t *hang)
3850 {
3851 struct wm_softc *sc = ifp->if_softc;
3852 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3853
3854 KASSERT(mutex_owned(txq->txq_lock));
3855
3856 /*
3857 * Since we're using delayed interrupts, sweep up
3858 * before we report an error.
3859 */
3860 wm_txeof(txq, UINT_MAX);
3861
3862 if (txq->txq_sending)
3863 *hang |= __BIT(wmq->wmq_id);
3864
3865 if (txq->txq_free == WM_NTXDESC(txq)) {
3866 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3867 device_xname(sc->sc_dev));
3868 } else {
3869 #ifdef WM_DEBUG
3870 int i, j;
3871 struct wm_txsoft *txs;
3872 #endif
3873 log(LOG_ERR,
3874 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3875 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3876 txq->txq_next);
3877 if_statinc(ifp, if_oerrors);
3878 #ifdef WM_DEBUG
3879 for (i = txq->txq_sdirty; i != txq->txq_snext;
3880 i = WM_NEXTTXS(txq, i)) {
3881 txs = &txq->txq_soft[i];
3882 printf("txs %d tx %d -> %d\n",
3883 i, txs->txs_firstdesc, txs->txs_lastdesc);
3884 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3885 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3886 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3887 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3888 printf("\t %#08x%08x\n",
3889 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3890 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3891 } else {
3892 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3893 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3894 txq->txq_descs[j].wtx_addr.wa_low);
3895 printf("\t %#04x%02x%02x%08x\n",
3896 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3897 txq->txq_descs[j].wtx_fields.wtxu_options,
3898 txq->txq_descs[j].wtx_fields.wtxu_status,
3899 txq->txq_descs[j].wtx_cmdlen);
3900 }
3901 if (j == txs->txs_lastdesc)
3902 break;
3903 }
3904 }
3905 #endif
3906 }
3907 }
3908
3909 /*
3910 * wm_tick:
3911 *
3912 * One second timer, used to check link status, sweep up
3913 * completed transmit jobs, etc.
3914 */
3915 static void
3916 wm_tick(void *arg)
3917 {
3918 struct wm_softc *sc = arg;
3919 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3920
3921 mutex_enter(sc->sc_core_lock);
3922
3923 if (sc->sc_core_stopping) {
3924 mutex_exit(sc->sc_core_lock);
3925 return;
3926 }
3927
3928 wm_update_stats(sc);
3929
3930 if (sc->sc_flags & WM_F_HAS_MII) {
3931 bool dotick = true;
3932
3933 /*
3934 * Workaround for some chips to delay sending LINK_STATE_UP.
3935 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
3936 */
3937 if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
3938 struct timeval now;
3939
3940 getmicrotime(&now);
3941 if (timercmp(&now, &sc->sc_linkup_delay_time, <))
3942 dotick = false;
3943 else if (sc->sc_linkup_delay_time.tv_sec != 0) {
3944 /* Simplify by checking tv_sec only. */
3945
3946 sc->sc_linkup_delay_time.tv_sec = 0;
3947 sc->sc_linkup_delay_time.tv_usec = 0;
3948 }
3949 }
3950 if (dotick)
3951 mii_tick(&sc->sc_mii);
3952 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3953 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3954 wm_serdes_tick(sc);
3955 else
3956 wm_tbi_tick(sc);
3957
3958 mutex_exit(sc->sc_core_lock);
3959
3960 if (wm_watchdog(ifp))
3961 callout_schedule(&sc->sc_tick_ch, hz);
3962 }
3963
3964 static int
3965 wm_ifflags_cb(struct ethercom *ec)
3966 {
3967 struct ifnet *ifp = &ec->ec_if;
3968 struct wm_softc *sc = ifp->if_softc;
3969 u_short iffchange;
3970 int ecchange;
3971 bool needreset = false;
3972 int rc = 0;
3973
3974 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3975 device_xname(sc->sc_dev), __func__));
3976
3977 KASSERT(IFNET_LOCKED(ifp));
3978
3979 mutex_enter(sc->sc_core_lock);
3980
3981 /*
3982 * Check for if_flags.
3983 * Main usage is to prevent linkdown when opening bpf.
3984 */
3985 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3986 sc->sc_if_flags = ifp->if_flags;
3987 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3988 needreset = true;
3989 goto ec;
3990 }
3991
3992 /* iff related updates */
3993 if ((iffchange & IFF_PROMISC) != 0)
3994 wm_set_filter(sc);
3995
3996 wm_set_vlan(sc);
3997
3998 ec:
3999 /* Check for ec_capenable. */
4000 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
4001 sc->sc_ec_capenable = ec->ec_capenable;
4002 if ((ecchange & ~ETHERCAP_EEE) != 0) {
4003 needreset = true;
4004 goto out;
4005 }
4006
4007 /* ec related updates */
4008 wm_set_eee(sc);
4009
4010 out:
4011 if (needreset)
4012 rc = ENETRESET;
4013 mutex_exit(sc->sc_core_lock);
4014
4015 return rc;
4016 }
4017
4018 static bool
4019 wm_phy_need_linkdown_discard(struct wm_softc *sc)
4020 {
4021
4022 switch (sc->sc_phytype) {
4023 case WMPHY_82577: /* ihphy */
4024 case WMPHY_82578: /* atphy */
4025 case WMPHY_82579: /* ihphy */
4026 case WMPHY_I217: /* ihphy */
4027 case WMPHY_82580: /* ihphy */
4028 case WMPHY_I350: /* ihphy */
4029 return true;
4030 default:
4031 return false;
4032 }
4033 }
4034
4035 static void
4036 wm_set_linkdown_discard(struct wm_softc *sc)
4037 {
4038
4039 for (int i = 0; i < sc->sc_nqueues; i++) {
4040 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4041
4042 mutex_enter(txq->txq_lock);
4043 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
4044 mutex_exit(txq->txq_lock);
4045 }
4046 }
4047
4048 static void
4049 wm_clear_linkdown_discard(struct wm_softc *sc)
4050 {
4051
4052 for (int i = 0; i < sc->sc_nqueues; i++) {
4053 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4054
4055 mutex_enter(txq->txq_lock);
4056 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
4057 mutex_exit(txq->txq_lock);
4058 }
4059 }
4060
4061 /*
4062 * wm_ioctl: [ifnet interface function]
4063 *
4064 * Handle control requests from the operator.
4065 */
4066 static int
4067 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4068 {
4069 struct wm_softc *sc = ifp->if_softc;
4070 struct ifreq *ifr = (struct ifreq *)data;
4071 struct ifaddr *ifa = (struct ifaddr *)data;
4072 struct sockaddr_dl *sdl;
4073 int error;
4074
4075 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4076 device_xname(sc->sc_dev), __func__));
4077
4078 switch (cmd) {
4079 case SIOCADDMULTI:
4080 case SIOCDELMULTI:
4081 break;
4082 default:
4083 KASSERT(IFNET_LOCKED(ifp));
4084 }
4085
4086 if (cmd == SIOCZIFDATA) {
4087 /*
4088 * Special handling for SIOCZIFDATA.
4089 * Copying and clearing the if_data structure is done with
4090 * ether_ioctl() below.
4091 */
4092 mutex_enter(sc->sc_core_lock);
4093 wm_update_stats(sc);
4094 wm_clear_evcnt(sc);
4095 mutex_exit(sc->sc_core_lock);
4096 }
4097
4098 switch (cmd) {
4099 case SIOCSIFMEDIA:
4100 mutex_enter(sc->sc_core_lock);
4101 /* Flow control requires full-duplex mode. */
4102 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4103 (ifr->ifr_media & IFM_FDX) == 0)
4104 ifr->ifr_media &= ~IFM_ETH_FMASK;
4105 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4106 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4107 /* We can do both TXPAUSE and RXPAUSE. */
4108 ifr->ifr_media |=
4109 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4110 }
4111 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4112 }
4113 mutex_exit(sc->sc_core_lock);
4114 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4115 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4116 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4117 DPRINTF(sc, WM_DEBUG_LINK,
4118 ("%s: %s: Set linkdown discard flag\n",
4119 device_xname(sc->sc_dev), __func__));
4120 wm_set_linkdown_discard(sc);
4121 }
4122 }
4123 break;
4124 case SIOCINITIFADDR:
4125 mutex_enter(sc->sc_core_lock);
4126 if (ifa->ifa_addr->sa_family == AF_LINK) {
4127 sdl = satosdl(ifp->if_dl->ifa_addr);
4128 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4129 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4130 /* Unicast address is the first multicast entry */
4131 wm_set_filter(sc);
4132 error = 0;
4133 mutex_exit(sc->sc_core_lock);
4134 break;
4135 }
4136 mutex_exit(sc->sc_core_lock);
4137 /*FALLTHROUGH*/
4138 default:
4139 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4140 if (((ifp->if_flags & IFF_UP) != 0) &&
4141 ((ifr->ifr_flags & IFF_UP) == 0)) {
4142 DPRINTF(sc, WM_DEBUG_LINK,
4143 ("%s: %s: Set linkdown discard flag\n",
4144 device_xname(sc->sc_dev), __func__));
4145 wm_set_linkdown_discard(sc);
4146 }
4147 }
4148 const int s = splnet();
4149 /* It may call wm_start, so unlock here */
4150 error = ether_ioctl(ifp, cmd, data);
4151 splx(s);
4152 if (error != ENETRESET)
4153 break;
4154
4155 error = 0;
4156
4157 if (cmd == SIOCSIFCAP)
4158 error = if_init(ifp);
4159 else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4160 mutex_enter(sc->sc_core_lock);
4161 if (sc->sc_if_flags & IFF_RUNNING) {
4162 /*
4163 * Multicast list has changed; set the
4164 * hardware filter accordingly.
4165 */
4166 wm_set_filter(sc);
4167 }
4168 mutex_exit(sc->sc_core_lock);
4169 }
4170 break;
4171 }
4172
4173 return error;
4174 }
4175
4176 /* MAC address related */
4177
4178 /*
4179 * Get the offset of MAC address and return it.
4180 * If error occured, use offset 0.
4181 */
4182 static uint16_t
4183 wm_check_alt_mac_addr(struct wm_softc *sc)
4184 {
4185 uint16_t myea[ETHER_ADDR_LEN / 2];
4186 uint16_t offset = NVM_OFF_MACADDR;
4187
4188 /* Try to read alternative MAC address pointer */
4189 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4190 return 0;
4191
4192 /* Check pointer if it's valid or not. */
4193 if ((offset == 0x0000) || (offset == 0xffff))
4194 return 0;
4195
4196 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4197 /*
4198 * Check whether alternative MAC address is valid or not.
4199 * Some cards have non 0xffff pointer but those don't use
4200 * alternative MAC address in reality.
4201 *
4202 * Check whether the broadcast bit is set or not.
4203 */
4204 if (wm_nvm_read(sc, offset, 1, myea) == 0)
4205 if (((myea[0] & 0xff) & 0x01) == 0)
4206 return offset; /* Found */
4207
4208 /* Not found */
4209 return 0;
4210 }
4211
4212 static int
4213 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4214 {
4215 uint16_t myea[ETHER_ADDR_LEN / 2];
4216 uint16_t offset = NVM_OFF_MACADDR;
4217 int do_invert = 0;
4218
4219 switch (sc->sc_type) {
4220 case WM_T_82580:
4221 case WM_T_I350:
4222 case WM_T_I354:
4223 /* EEPROM Top Level Partitioning */
4224 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4225 break;
4226 case WM_T_82571:
4227 case WM_T_82575:
4228 case WM_T_82576:
4229 case WM_T_80003:
4230 case WM_T_I210:
4231 case WM_T_I211:
4232 offset = wm_check_alt_mac_addr(sc);
4233 if (offset == 0)
4234 if ((sc->sc_funcid & 0x01) == 1)
4235 do_invert = 1;
4236 break;
4237 default:
4238 if ((sc->sc_funcid & 0x01) == 1)
4239 do_invert = 1;
4240 break;
4241 }
4242
4243 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4244 goto bad;
4245
4246 enaddr[0] = myea[0] & 0xff;
4247 enaddr[1] = myea[0] >> 8;
4248 enaddr[2] = myea[1] & 0xff;
4249 enaddr[3] = myea[1] >> 8;
4250 enaddr[4] = myea[2] & 0xff;
4251 enaddr[5] = myea[2] >> 8;
4252
4253 /*
4254 * Toggle the LSB of the MAC address on the second port
4255 * of some dual port cards.
4256 */
4257 if (do_invert != 0)
4258 enaddr[5] ^= 1;
4259
4260 return 0;
4261
4262 bad:
4263 return -1;
4264 }
4265
4266 /*
4267 * wm_set_ral:
4268 *
4269 * Set an entery in the receive address list.
4270 */
4271 static void
4272 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4273 {
4274 uint32_t ral_lo, ral_hi, addrl, addrh;
4275 uint32_t wlock_mac;
4276 int rv;
4277
4278 if (enaddr != NULL) {
4279 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4280 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4281 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4282 ral_hi |= RAL_AV;
4283 } else {
4284 ral_lo = 0;
4285 ral_hi = 0;
4286 }
4287
4288 switch (sc->sc_type) {
4289 case WM_T_82542_2_0:
4290 case WM_T_82542_2_1:
4291 case WM_T_82543:
4292 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4293 CSR_WRITE_FLUSH(sc);
4294 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4295 CSR_WRITE_FLUSH(sc);
4296 break;
4297 case WM_T_PCH2:
4298 case WM_T_PCH_LPT:
4299 case WM_T_PCH_SPT:
4300 case WM_T_PCH_CNP:
4301 case WM_T_PCH_TGP:
4302 if (idx == 0) {
4303 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4304 CSR_WRITE_FLUSH(sc);
4305 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4306 CSR_WRITE_FLUSH(sc);
4307 return;
4308 }
4309 if (sc->sc_type != WM_T_PCH2) {
4310 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4311 FWSM_WLOCK_MAC);
4312 addrl = WMREG_SHRAL(idx - 1);
4313 addrh = WMREG_SHRAH(idx - 1);
4314 } else {
4315 wlock_mac = 0;
4316 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4317 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4318 }
4319
4320 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4321 rv = wm_get_swflag_ich8lan(sc);
4322 if (rv != 0)
4323 return;
4324 CSR_WRITE(sc, addrl, ral_lo);
4325 CSR_WRITE_FLUSH(sc);
4326 CSR_WRITE(sc, addrh, ral_hi);
4327 CSR_WRITE_FLUSH(sc);
4328 wm_put_swflag_ich8lan(sc);
4329 }
4330
4331 break;
4332 default:
4333 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4334 CSR_WRITE_FLUSH(sc);
4335 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4336 CSR_WRITE_FLUSH(sc);
4337 break;
4338 }
4339 }
4340
4341 /*
4342 * wm_mchash:
4343 *
4344 * Compute the hash of the multicast address for the 4096-bit
4345 * multicast filter.
4346 */
4347 static uint32_t
4348 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4349 {
4350 static const int lo_shift[4] = { 4, 3, 2, 0 };
4351 static const int hi_shift[4] = { 4, 5, 6, 8 };
4352 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4353 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4354 uint32_t hash;
4355
4356 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4357 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4358 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4359 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
4360 || (sc->sc_type == WM_T_PCH_TGP)) {
4361 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4362 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4363 return (hash & 0x3ff);
4364 }
4365 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4366 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4367
4368 return (hash & 0xfff);
4369 }
4370
4371 /*
4372 *
4373 *
4374 */
4375 static int
4376 wm_rar_count(struct wm_softc *sc)
4377 {
4378 int size;
4379
4380 switch (sc->sc_type) {
4381 case WM_T_ICH8:
4382 size = WM_RAL_TABSIZE_ICH8 -1;
4383 break;
4384 case WM_T_ICH9:
4385 case WM_T_ICH10:
4386 case WM_T_PCH:
4387 size = WM_RAL_TABSIZE_ICH8;
4388 break;
4389 case WM_T_PCH2:
4390 size = WM_RAL_TABSIZE_PCH2;
4391 break;
4392 case WM_T_PCH_LPT:
4393 case WM_T_PCH_SPT:
4394 case WM_T_PCH_CNP:
4395 case WM_T_PCH_TGP:
4396 size = WM_RAL_TABSIZE_PCH_LPT;
4397 break;
4398 case WM_T_82575:
4399 case WM_T_I210:
4400 case WM_T_I211:
4401 size = WM_RAL_TABSIZE_82575;
4402 break;
4403 case WM_T_82576:
4404 case WM_T_82580:
4405 size = WM_RAL_TABSIZE_82576;
4406 break;
4407 case WM_T_I350:
4408 case WM_T_I354:
4409 size = WM_RAL_TABSIZE_I350;
4410 break;
4411 default:
4412 size = WM_RAL_TABSIZE;
4413 }
4414
4415 return size;
4416 }
4417
4418 /*
4419 * wm_set_filter:
4420 *
4421 * Set up the receive filter.
4422 */
4423 static void
4424 wm_set_filter(struct wm_softc *sc)
4425 {
4426 struct ethercom *ec = &sc->sc_ethercom;
4427 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4428 struct ether_multi *enm;
4429 struct ether_multistep step;
4430 bus_addr_t mta_reg;
4431 uint32_t hash, reg, bit;
4432 int i, size, ralmax, rv;
4433
4434 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4435 device_xname(sc->sc_dev), __func__));
4436 KASSERT(mutex_owned(sc->sc_core_lock));
4437
4438 if (sc->sc_type >= WM_T_82544)
4439 mta_reg = WMREG_CORDOVA_MTA;
4440 else
4441 mta_reg = WMREG_MTA;
4442
4443 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4444
4445 if (sc->sc_if_flags & IFF_BROADCAST)
4446 sc->sc_rctl |= RCTL_BAM;
4447 if (sc->sc_if_flags & IFF_PROMISC) {
4448 sc->sc_rctl |= RCTL_UPE;
4449 ETHER_LOCK(ec);
4450 ec->ec_flags |= ETHER_F_ALLMULTI;
4451 ETHER_UNLOCK(ec);
4452 goto allmulti;
4453 }
4454
4455 /*
4456 * Set the station address in the first RAL slot, and
4457 * clear the remaining slots.
4458 */
4459 size = wm_rar_count(sc);
4460 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4461
4462 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
4463 (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
4464 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4465 switch (i) {
4466 case 0:
4467 /* We can use all entries */
4468 ralmax = size;
4469 break;
4470 case 1:
4471 /* Only RAR[0] */
4472 ralmax = 1;
4473 break;
4474 default:
4475 /* Available SHRA + RAR[0] */
4476 ralmax = i + 1;
4477 }
4478 } else
4479 ralmax = size;
4480 for (i = 1; i < size; i++) {
4481 if (i < ralmax)
4482 wm_set_ral(sc, NULL, i);
4483 }
4484
4485 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4486 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4487 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4488 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
4489 || (sc->sc_type == WM_T_PCH_TGP))
4490 size = WM_ICH8_MC_TABSIZE;
4491 else
4492 size = WM_MC_TABSIZE;
4493 /* Clear out the multicast table. */
4494 for (i = 0; i < size; i++) {
4495 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4496 CSR_WRITE_FLUSH(sc);
4497 }
4498
4499 ETHER_LOCK(ec);
4500 ETHER_FIRST_MULTI(step, ec, enm);
4501 while (enm != NULL) {
4502 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4503 ec->ec_flags |= ETHER_F_ALLMULTI;
4504 ETHER_UNLOCK(ec);
4505 /*
4506 * We must listen to a range of multicast addresses.
4507 * For now, just accept all multicasts, rather than
4508 * trying to set only those filter bits needed to match
4509 * the range. (At this time, the only use of address
4510 * ranges is for IP multicast routing, for which the
4511 * range is big enough to require all bits set.)
4512 */
4513 goto allmulti;
4514 }
4515
4516 hash = wm_mchash(sc, enm->enm_addrlo);
4517
4518 reg = (hash >> 5);
4519 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4520 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4521 || (sc->sc_type == WM_T_PCH2)
4522 || (sc->sc_type == WM_T_PCH_LPT)
4523 || (sc->sc_type == WM_T_PCH_SPT)
4524 || (sc->sc_type == WM_T_PCH_CNP)
4525 || (sc->sc_type == WM_T_PCH_TGP))
4526 reg &= 0x1f;
4527 else
4528 reg &= 0x7f;
4529 bit = hash & 0x1f;
4530
4531 hash = CSR_READ(sc, mta_reg + (reg << 2));
4532 hash |= 1U << bit;
4533
4534 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4535 /*
4536 * 82544 Errata 9: Certain register cannot be written
4537 * with particular alignments in PCI-X bus operation
4538 * (FCAH, MTA and VFTA).
4539 */
4540 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4541 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4542 CSR_WRITE_FLUSH(sc);
4543 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4544 CSR_WRITE_FLUSH(sc);
4545 } else {
4546 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4547 CSR_WRITE_FLUSH(sc);
4548 }
4549
4550 ETHER_NEXT_MULTI(step, enm);
4551 }
4552 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4553 ETHER_UNLOCK(ec);
4554
4555 goto setit;
4556
4557 allmulti:
4558 sc->sc_rctl |= RCTL_MPE;
4559
4560 setit:
4561 if (sc->sc_type >= WM_T_PCH2) {
4562 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4563 && (ifp->if_mtu > ETHERMTU))
4564 rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4565 else
4566 rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4567 if (rv != 0)
4568 device_printf(sc->sc_dev,
4569 "Failed to do workaround for jumbo frame.\n");
4570 }
4571
4572 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4573 }
4574
4575 /* Reset and init related */
4576
4577 static void
4578 wm_set_vlan(struct wm_softc *sc)
4579 {
4580
4581 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4582 device_xname(sc->sc_dev), __func__));
4583
4584 /* Deal with VLAN enables. */
4585 if (VLAN_ATTACHED(&sc->sc_ethercom))
4586 sc->sc_ctrl |= CTRL_VME;
4587 else
4588 sc->sc_ctrl &= ~CTRL_VME;
4589
4590 /* Write the control registers. */
4591 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4592 }
4593
4594 static void
4595 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4596 {
4597 uint32_t gcr;
4598 pcireg_t ctrl2;
4599
4600 gcr = CSR_READ(sc, WMREG_GCR);
4601
4602 /* Only take action if timeout value is defaulted to 0 */
4603 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4604 goto out;
4605
4606 if ((gcr & GCR_CAP_VER2) == 0) {
4607 gcr |= GCR_CMPL_TMOUT_10MS;
4608 goto out;
4609 }
4610
4611 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4612 sc->sc_pcixe_capoff + PCIE_DCSR2);
4613 ctrl2 |= WM_PCIE_DCSR2_16MS;
4614 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4615 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4616
4617 out:
4618 /* Disable completion timeout resend */
4619 gcr &= ~GCR_CMPL_TMOUT_RESEND;
4620
4621 CSR_WRITE(sc, WMREG_GCR, gcr);
4622 }
4623
4624 void
4625 wm_get_auto_rd_done(struct wm_softc *sc)
4626 {
4627 int i;
4628
4629 /* wait for eeprom to reload */
4630 switch (sc->sc_type) {
4631 case WM_T_82571:
4632 case WM_T_82572:
4633 case WM_T_82573:
4634 case WM_T_82574:
4635 case WM_T_82583:
4636 case WM_T_82575:
4637 case WM_T_82576:
4638 case WM_T_82580:
4639 case WM_T_I350:
4640 case WM_T_I354:
4641 case WM_T_I210:
4642 case WM_T_I211:
4643 case WM_T_80003:
4644 case WM_T_ICH8:
4645 case WM_T_ICH9:
4646 for (i = 0; i < 10; i++) {
4647 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4648 break;
4649 delay(1000);
4650 }
4651 if (i == 10) {
4652 log(LOG_ERR, "%s: auto read from eeprom failed to "
4653 "complete\n", device_xname(sc->sc_dev));
4654 }
4655 break;
4656 default:
4657 break;
4658 }
4659 }
4660
4661 void
4662 wm_lan_init_done(struct wm_softc *sc)
4663 {
4664 uint32_t reg = 0;
4665 int i;
4666
4667 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4668 device_xname(sc->sc_dev), __func__));
4669
4670 /* Wait for eeprom to reload */
4671 switch (sc->sc_type) {
4672 case WM_T_ICH10:
4673 case WM_T_PCH:
4674 case WM_T_PCH2:
4675 case WM_T_PCH_LPT:
4676 case WM_T_PCH_SPT:
4677 case WM_T_PCH_CNP:
4678 case WM_T_PCH_TGP:
4679 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4680 reg = CSR_READ(sc, WMREG_STATUS);
4681 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4682 break;
4683 delay(100);
4684 }
4685 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4686 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4687 "complete\n", device_xname(sc->sc_dev), __func__);
4688 }
4689 break;
4690 default:
4691 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4692 __func__);
4693 break;
4694 }
4695
4696 reg &= ~STATUS_LAN_INIT_DONE;
4697 CSR_WRITE(sc, WMREG_STATUS, reg);
4698 }
4699
4700 void
4701 wm_get_cfg_done(struct wm_softc *sc)
4702 {
4703 int mask;
4704 uint32_t reg;
4705 int i;
4706
4707 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4708 device_xname(sc->sc_dev), __func__));
4709
4710 /* Wait for eeprom to reload */
4711 switch (sc->sc_type) {
4712 case WM_T_82542_2_0:
4713 case WM_T_82542_2_1:
4714 /* null */
4715 break;
4716 case WM_T_82543:
4717 case WM_T_82544:
4718 case WM_T_82540:
4719 case WM_T_82545:
4720 case WM_T_82545_3:
4721 case WM_T_82546:
4722 case WM_T_82546_3:
4723 case WM_T_82541:
4724 case WM_T_82541_2:
4725 case WM_T_82547:
4726 case WM_T_82547_2:
4727 case WM_T_82573:
4728 case WM_T_82574:
4729 case WM_T_82583:
4730 /* generic */
4731 delay(10*1000);
4732 break;
4733 case WM_T_80003:
4734 case WM_T_82571:
4735 case WM_T_82572:
4736 case WM_T_82575:
4737 case WM_T_82576:
4738 case WM_T_82580:
4739 case WM_T_I350:
4740 case WM_T_I354:
4741 case WM_T_I210:
4742 case WM_T_I211:
4743 if (sc->sc_type == WM_T_82571) {
4744 /* Only 82571 shares port 0 */
4745 mask = EEMNGCTL_CFGDONE_0;
4746 } else
4747 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4748 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4749 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4750 break;
4751 delay(1000);
4752 }
4753 if (i >= WM_PHY_CFG_TIMEOUT)
4754 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4755 device_xname(sc->sc_dev), __func__));
4756 break;
4757 case WM_T_ICH8:
4758 case WM_T_ICH9:
4759 case WM_T_ICH10:
4760 case WM_T_PCH:
4761 case WM_T_PCH2:
4762 case WM_T_PCH_LPT:
4763 case WM_T_PCH_SPT:
4764 case WM_T_PCH_CNP:
4765 case WM_T_PCH_TGP:
4766 delay(10*1000);
4767 if (sc->sc_type >= WM_T_ICH10)
4768 wm_lan_init_done(sc);
4769 else
4770 wm_get_auto_rd_done(sc);
4771
4772 /* Clear PHY Reset Asserted bit */
4773 reg = CSR_READ(sc, WMREG_STATUS);
4774 if ((reg & STATUS_PHYRA) != 0)
4775 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4776 break;
4777 default:
4778 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4779 __func__);
4780 break;
4781 }
4782 }
4783
4784 int
4785 wm_phy_post_reset(struct wm_softc *sc)
4786 {
4787 device_t dev = sc->sc_dev;
4788 uint16_t reg;
4789 int rv = 0;
4790
4791 /* This function is only for ICH8 and newer. */
4792 if (sc->sc_type < WM_T_ICH8)
4793 return 0;
4794
4795 if (wm_phy_resetisblocked(sc)) {
4796 /* XXX */
4797 device_printf(dev, "PHY is blocked\n");
4798 return -1;
4799 }
4800
4801 /* Allow time for h/w to get to quiescent state after reset */
4802 delay(10*1000);
4803
4804 /* Perform any necessary post-reset workarounds */
4805 if (sc->sc_type == WM_T_PCH)
4806 rv = wm_hv_phy_workarounds_ich8lan(sc);
4807 else if (sc->sc_type == WM_T_PCH2)
4808 rv = wm_lv_phy_workarounds_ich8lan(sc);
4809 if (rv != 0)
4810 return rv;
4811
4812 /* Clear the host wakeup bit after lcd reset */
4813 if (sc->sc_type >= WM_T_PCH) {
4814 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4815 reg &= ~BM_WUC_HOST_WU_BIT;
4816 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4817 }
4818
4819 /* Configure the LCD with the extended configuration region in NVM */
4820 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4821 return rv;
4822
4823 /* Configure the LCD with the OEM bits in NVM */
4824 rv = wm_oem_bits_config_ich8lan(sc, true);
4825
4826 if (sc->sc_type == WM_T_PCH2) {
4827 /* Ungate automatic PHY configuration on non-managed 82579 */
4828 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4829 delay(10 * 1000);
4830 wm_gate_hw_phy_config_ich8lan(sc, false);
4831 }
4832 /* Set EEE LPI Update Timer to 200usec */
4833 rv = sc->phy.acquire(sc);
4834 if (rv)
4835 return rv;
4836 rv = wm_write_emi_reg_locked(dev,
4837 I82579_LPI_UPDATE_TIMER, 0x1387);
4838 sc->phy.release(sc);
4839 }
4840
4841 return rv;
4842 }
4843
4844 /* Only for PCH and newer */
4845 static int
4846 wm_write_smbus_addr(struct wm_softc *sc)
4847 {
4848 uint32_t strap, freq;
4849 uint16_t phy_data;
4850 int rv;
4851
4852 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4853 device_xname(sc->sc_dev), __func__));
4854 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4855
4856 strap = CSR_READ(sc, WMREG_STRAP);
4857 freq = __SHIFTOUT(strap, STRAP_FREQ);
4858
4859 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4860 if (rv != 0)
4861 return rv;
4862
4863 phy_data &= ~HV_SMB_ADDR_ADDR;
4864 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4865 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4866
4867 if (sc->sc_phytype == WMPHY_I217) {
4868 /* Restore SMBus frequency */
4869 if (freq --) {
4870 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4871 | HV_SMB_ADDR_FREQ_HIGH);
4872 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4873 HV_SMB_ADDR_FREQ_LOW);
4874 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4875 HV_SMB_ADDR_FREQ_HIGH);
4876 } else
4877 DPRINTF(sc, WM_DEBUG_INIT,
4878 ("%s: %s Unsupported SMB frequency in PHY\n",
4879 device_xname(sc->sc_dev), __func__));
4880 }
4881
4882 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4883 phy_data);
4884 }
4885
4886 static int
4887 wm_init_lcd_from_nvm(struct wm_softc *sc)
4888 {
4889 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4890 uint16_t phy_page = 0;
4891 int rv = 0;
4892
4893 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4894 device_xname(sc->sc_dev), __func__));
4895
4896 switch (sc->sc_type) {
4897 case WM_T_ICH8:
4898 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4899 || (sc->sc_phytype != WMPHY_IGP_3))
4900 return 0;
4901
4902 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4903 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4904 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4905 break;
4906 }
4907 /* FALLTHROUGH */
4908 case WM_T_PCH:
4909 case WM_T_PCH2:
4910 case WM_T_PCH_LPT:
4911 case WM_T_PCH_SPT:
4912 case WM_T_PCH_CNP:
4913 case WM_T_PCH_TGP:
4914 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4915 break;
4916 default:
4917 return 0;
4918 }
4919
4920 if ((rv = sc->phy.acquire(sc)) != 0)
4921 return rv;
4922
4923 reg = CSR_READ(sc, WMREG_FEXTNVM);
4924 if ((reg & sw_cfg_mask) == 0)
4925 goto release;
4926
4927 /*
4928 * Make sure HW does not configure LCD from PHY extended configuration
4929 * before SW configuration
4930 */
4931 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4932 if ((sc->sc_type < WM_T_PCH2)
4933 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4934 goto release;
4935
4936 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4937 device_xname(sc->sc_dev), __func__));
4938 /* word_addr is in DWORD */
4939 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4940
4941 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4942 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4943 if (cnf_size == 0)
4944 goto release;
4945
4946 if (((sc->sc_type == WM_T_PCH)
4947 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4948 || (sc->sc_type > WM_T_PCH)) {
4949 /*
4950 * HW configures the SMBus address and LEDs when the OEM and
4951 * LCD Write Enable bits are set in the NVM. When both NVM bits
4952 * are cleared, SW will configure them instead.
4953 */
4954 DPRINTF(sc, WM_DEBUG_INIT,
4955 ("%s: %s: Configure SMBus and LED\n",
4956 device_xname(sc->sc_dev), __func__));
4957 if ((rv = wm_write_smbus_addr(sc)) != 0)
4958 goto release;
4959
4960 reg = CSR_READ(sc, WMREG_LEDCTL);
4961 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4962 (uint16_t)reg);
4963 if (rv != 0)
4964 goto release;
4965 }
4966
4967 /* Configure LCD from extended configuration region. */
4968 for (i = 0; i < cnf_size; i++) {
4969 uint16_t reg_data, reg_addr;
4970
4971 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4972 goto release;
4973
4974 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4975 goto release;
4976
4977 if (reg_addr == IGPHY_PAGE_SELECT)
4978 phy_page = reg_data;
4979
4980 reg_addr &= IGPHY_MAXREGADDR;
4981 reg_addr |= phy_page;
4982
4983 KASSERT(sc->phy.writereg_locked != NULL);
4984 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4985 reg_data);
4986 }
4987
4988 release:
4989 sc->phy.release(sc);
4990 return rv;
4991 }
4992
4993 /*
4994 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4995 * @sc: pointer to the HW structure
4996 * @d0_state: boolean if entering d0 or d3 device state
4997 *
4998 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4999 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
5000 * in NVM determines whether HW should configure LPLU and Gbe Disable.
5001 */
5002 int
5003 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
5004 {
5005 uint32_t mac_reg;
5006 uint16_t oem_reg;
5007 int rv;
5008
5009 if (sc->sc_type < WM_T_PCH)
5010 return 0;
5011
5012 rv = sc->phy.acquire(sc);
5013 if (rv != 0)
5014 return rv;
5015
5016 if (sc->sc_type == WM_T_PCH) {
5017 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
5018 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
5019 goto release;
5020 }
5021
5022 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
5023 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
5024 goto release;
5025
5026 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
5027
5028 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
5029 if (rv != 0)
5030 goto release;
5031 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
5032
5033 if (d0_state) {
5034 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
5035 oem_reg |= HV_OEM_BITS_A1KDIS;
5036 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
5037 oem_reg |= HV_OEM_BITS_LPLU;
5038 } else {
5039 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
5040 != 0)
5041 oem_reg |= HV_OEM_BITS_A1KDIS;
5042 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
5043 != 0)
5044 oem_reg |= HV_OEM_BITS_LPLU;
5045 }
5046
5047 /* Set Restart auto-neg to activate the bits */
5048 if ((d0_state || (sc->sc_type != WM_T_PCH))
5049 && (wm_phy_resetisblocked(sc) == false))
5050 oem_reg |= HV_OEM_BITS_ANEGNOW;
5051
5052 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
5053
5054 release:
5055 sc->phy.release(sc);
5056
5057 return rv;
5058 }
5059
5060 /* Init hardware bits */
5061 void
5062 wm_initialize_hardware_bits(struct wm_softc *sc)
5063 {
5064 uint32_t tarc0, tarc1, reg;
5065
5066 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5067 device_xname(sc->sc_dev), __func__));
5068
5069 /* For 82571 variant, 80003 and ICHs */
5070 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
5071 || WM_IS_ICHPCH(sc)) {
5072
5073 /* Transmit Descriptor Control 0 */
5074 reg = CSR_READ(sc, WMREG_TXDCTL(0));
5075 reg |= TXDCTL_COUNT_DESC;
5076 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
5077
5078 /* Transmit Descriptor Control 1 */
5079 reg = CSR_READ(sc, WMREG_TXDCTL(1));
5080 reg |= TXDCTL_COUNT_DESC;
5081 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
5082
5083 /* TARC0 */
5084 tarc0 = CSR_READ(sc, WMREG_TARC0);
5085 switch (sc->sc_type) {
5086 case WM_T_82571:
5087 case WM_T_82572:
5088 case WM_T_82573:
5089 case WM_T_82574:
5090 case WM_T_82583:
5091 case WM_T_80003:
5092 /* Clear bits 30..27 */
5093 tarc0 &= ~__BITS(30, 27);
5094 break;
5095 default:
5096 break;
5097 }
5098
5099 switch (sc->sc_type) {
5100 case WM_T_82571:
5101 case WM_T_82572:
5102 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5103
5104 tarc1 = CSR_READ(sc, WMREG_TARC1);
5105 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5106 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5107 /* 8257[12] Errata No.7 */
5108 tarc1 |= __BIT(22); /* TARC1 bits 22 */
5109
5110 /* TARC1 bit 28 */
5111 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5112 tarc1 &= ~__BIT(28);
5113 else
5114 tarc1 |= __BIT(28);
5115 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5116
5117 /*
5118 * 8257[12] Errata No.13
5119 * Disable Dyamic Clock Gating.
5120 */
5121 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5122 reg &= ~CTRL_EXT_DMA_DYN_CLK;
5123 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5124 break;
5125 case WM_T_82573:
5126 case WM_T_82574:
5127 case WM_T_82583:
5128 if ((sc->sc_type == WM_T_82574)
5129 || (sc->sc_type == WM_T_82583))
5130 tarc0 |= __BIT(26); /* TARC0 bit 26 */
5131
5132 /* Extended Device Control */
5133 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5134 reg &= ~__BIT(23); /* Clear bit 23 */
5135 reg |= __BIT(22); /* Set bit 22 */
5136 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5137
5138 /* Device Control */
5139 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
5140 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5141
5142 /* PCIe Control Register */
5143 /*
5144 * 82573 Errata (unknown).
5145 *
5146 * 82574 Errata 25 and 82583 Errata 12
5147 * "Dropped Rx Packets":
5148 * NVM Image Version 2.1.4 and newer has no this bug.
5149 */
5150 reg = CSR_READ(sc, WMREG_GCR);
5151 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5152 CSR_WRITE(sc, WMREG_GCR, reg);
5153
5154 if ((sc->sc_type == WM_T_82574)
5155 || (sc->sc_type == WM_T_82583)) {
5156 /*
5157 * Document says this bit must be set for
5158 * proper operation.
5159 */
5160 reg = CSR_READ(sc, WMREG_GCR);
5161 reg |= __BIT(22);
5162 CSR_WRITE(sc, WMREG_GCR, reg);
5163
5164 /*
5165 * Apply workaround for hardware errata
5166 * documented in errata docs Fixes issue where
5167 * some error prone or unreliable PCIe
5168 * completions are occurring, particularly
5169 * with ASPM enabled. Without fix, issue can
5170 * cause Tx timeouts.
5171 */
5172 reg = CSR_READ(sc, WMREG_GCR2);
5173 reg |= __BIT(0);
5174 CSR_WRITE(sc, WMREG_GCR2, reg);
5175 }
5176 break;
5177 case WM_T_80003:
5178 /* TARC0 */
5179 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5180 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5181 tarc0 &= ~__BIT(20); /* Clear bits 20 */
5182
5183 /* TARC1 bit 28 */
5184 tarc1 = CSR_READ(sc, WMREG_TARC1);
5185 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5186 tarc1 &= ~__BIT(28);
5187 else
5188 tarc1 |= __BIT(28);
5189 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5190 break;
5191 case WM_T_ICH8:
5192 case WM_T_ICH9:
5193 case WM_T_ICH10:
5194 case WM_T_PCH:
5195 case WM_T_PCH2:
5196 case WM_T_PCH_LPT:
5197 case WM_T_PCH_SPT:
5198 case WM_T_PCH_CNP:
5199 case WM_T_PCH_TGP:
5200 /* TARC0 */
5201 if (sc->sc_type == WM_T_ICH8) {
5202 /* Set TARC0 bits 29 and 28 */
5203 tarc0 |= __BITS(29, 28);
5204 } else if (sc->sc_type == WM_T_PCH_SPT) {
5205 tarc0 |= __BIT(29);
5206 /*
5207 * Drop bit 28. From Linux.
5208 * See I218/I219 spec update
5209 * "5. Buffer Overrun While the I219 is
5210 * Processing DMA Transactions"
5211 */
5212 tarc0 &= ~__BIT(28);
5213 }
5214 /* Set TARC0 bits 23,24,26,27 */
5215 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5216
5217 /* CTRL_EXT */
5218 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5219 reg |= __BIT(22); /* Set bit 22 */
5220 /*
5221 * Enable PHY low-power state when MAC is at D3
5222 * w/o WoL
5223 */
5224 if (sc->sc_type >= WM_T_PCH)
5225 reg |= CTRL_EXT_PHYPDEN;
5226 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5227
5228 /* TARC1 */
5229 tarc1 = CSR_READ(sc, WMREG_TARC1);
5230 /* bit 28 */
5231 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5232 tarc1 &= ~__BIT(28);
5233 else
5234 tarc1 |= __BIT(28);
5235 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5236 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5237
5238 /* Device Status */
5239 if (sc->sc_type == WM_T_ICH8) {
5240 reg = CSR_READ(sc, WMREG_STATUS);
5241 reg &= ~__BIT(31);
5242 CSR_WRITE(sc, WMREG_STATUS, reg);
5243
5244 }
5245
5246 /* IOSFPC */
5247 if (sc->sc_type == WM_T_PCH_SPT) {
5248 reg = CSR_READ(sc, WMREG_IOSFPC);
5249 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5250 CSR_WRITE(sc, WMREG_IOSFPC, reg);
5251 }
5252 /*
5253 * Work-around descriptor data corruption issue during
5254 * NFS v2 UDP traffic, just disable the NFS filtering
5255 * capability.
5256 */
5257 reg = CSR_READ(sc, WMREG_RFCTL);
5258 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5259 CSR_WRITE(sc, WMREG_RFCTL, reg);
5260 break;
5261 default:
5262 break;
5263 }
5264 CSR_WRITE(sc, WMREG_TARC0, tarc0);
5265
5266 switch (sc->sc_type) {
5267 case WM_T_82571:
5268 case WM_T_82572:
5269 case WM_T_82573:
5270 case WM_T_80003:
5271 case WM_T_ICH8:
5272 /*
5273 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5274 * others to avoid RSS Hash Value bug.
5275 */
5276 reg = CSR_READ(sc, WMREG_RFCTL);
5277 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5278 CSR_WRITE(sc, WMREG_RFCTL, reg);
5279 break;
5280 case WM_T_82574:
5281 /* Use extened Rx descriptor. */
5282 reg = CSR_READ(sc, WMREG_RFCTL);
5283 reg |= WMREG_RFCTL_EXSTEN;
5284 CSR_WRITE(sc, WMREG_RFCTL, reg);
5285 break;
5286 default:
5287 break;
5288 }
5289 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5290 /*
5291 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5292 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5293 * "Certain Malformed IPv6 Extension Headers are Not Processed
5294 * Correctly by the Device"
5295 *
5296 * I354(C2000) Errata AVR53:
5297 * "Malformed IPv6 Extension Headers May Result in LAN Device
5298 * Hang"
5299 */
5300 reg = CSR_READ(sc, WMREG_RFCTL);
5301 reg |= WMREG_RFCTL_IPV6EXDIS;
5302 CSR_WRITE(sc, WMREG_RFCTL, reg);
5303 }
5304 }
5305
5306 static uint32_t
5307 wm_rxpbs_adjust_82580(uint32_t val)
5308 {
5309 uint32_t rv = 0;
5310
5311 if (val < __arraycount(wm_82580_rxpbs_table))
5312 rv = wm_82580_rxpbs_table[val];
5313
5314 return rv;
5315 }
5316
5317 /*
5318 * wm_reset_phy:
5319 *
5320 * generic PHY reset function.
5321 * Same as e1000_phy_hw_reset_generic()
5322 */
5323 static int
5324 wm_reset_phy(struct wm_softc *sc)
5325 {
5326 uint32_t reg;
5327 int rv;
5328
5329 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5330 device_xname(sc->sc_dev), __func__));
5331 if (wm_phy_resetisblocked(sc))
5332 return -1;
5333
5334 rv = sc->phy.acquire(sc);
5335 if (rv) {
5336 device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5337 __func__, rv);
5338 return rv;
5339 }
5340
5341 reg = CSR_READ(sc, WMREG_CTRL);
5342 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5343 CSR_WRITE_FLUSH(sc);
5344
5345 delay(sc->phy.reset_delay_us);
5346
5347 CSR_WRITE(sc, WMREG_CTRL, reg);
5348 CSR_WRITE_FLUSH(sc);
5349
5350 delay(150);
5351
5352 sc->phy.release(sc);
5353
5354 wm_get_cfg_done(sc);
5355 wm_phy_post_reset(sc);
5356
5357 return 0;
5358 }
5359
5360 /*
5361 * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5362 *
5363 * In i219, the descriptor rings must be emptied before resetting the HW
5364 * or before changing the device state to D3 during runtime (runtime PM).
5365 *
5366 * Failure to do this will cause the HW to enter a unit hang state which can
5367 * only be released by PCI reset on the device.
5368 *
5369 * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5370 */
5371 static void
5372 wm_flush_desc_rings(struct wm_softc *sc)
5373 {
5374 pcireg_t preg;
5375 uint32_t reg;
5376 struct wm_txqueue *txq;
5377 wiseman_txdesc_t *txd;
5378 int nexttx;
5379 uint32_t rctl;
5380
5381 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5382
5383 /* First, disable MULR fix in FEXTNVM11 */
5384 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5385 reg |= FEXTNVM11_DIS_MULRFIX;
5386 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5387
5388 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5389 reg = CSR_READ(sc, WMREG_TDLEN(0));
5390 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5391 return;
5392
5393 /*
5394 * Remove all descriptors from the tx_ring.
5395 *
5396 * We want to clear all pending descriptors from the TX ring. Zeroing
5397 * happens when the HW reads the regs. We assign the ring itself as
5398 * the data of the next descriptor. We don't care about the data we are
5399 * about to reset the HW.
5400 */
5401 #ifdef WM_DEBUG
5402 device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5403 #endif
5404 reg = CSR_READ(sc, WMREG_TCTL);
5405 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5406
5407 txq = &sc->sc_queue[0].wmq_txq;
5408 nexttx = txq->txq_next;
5409 txd = &txq->txq_descs[nexttx];
5410 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5411 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5412 txd->wtx_fields.wtxu_status = 0;
5413 txd->wtx_fields.wtxu_options = 0;
5414 txd->wtx_fields.wtxu_vlan = 0;
5415
5416 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5417 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5418
5419 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5420 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5421 CSR_WRITE_FLUSH(sc);
5422 delay(250);
5423
5424 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5425 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5426 return;
5427
5428 /*
5429 * Mark all descriptors in the RX ring as consumed and disable the
5430 * rx ring.
5431 */
5432 #ifdef WM_DEBUG
5433 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5434 #endif
5435 rctl = CSR_READ(sc, WMREG_RCTL);
5436 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5437 CSR_WRITE_FLUSH(sc);
5438 delay(150);
5439
5440 reg = CSR_READ(sc, WMREG_RXDCTL(0));
5441 /* Zero the lower 14 bits (prefetch and host thresholds) */
5442 reg &= 0xffffc000;
5443 /*
5444 * Update thresholds: prefetch threshold to 31, host threshold
5445 * to 1 and make sure the granularity is "descriptors" and not
5446 * "cache lines"
5447 */
5448 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5449 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5450
5451 /* Momentarily enable the RX ring for the changes to take effect */
5452 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5453 CSR_WRITE_FLUSH(sc);
5454 delay(150);
5455 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5456 }
5457
5458 /*
5459 * wm_reset:
5460 *
5461 * Reset the i82542 chip.
5462 */
5463 static void
5464 wm_reset(struct wm_softc *sc)
5465 {
5466 int phy_reset = 0;
5467 int i, error = 0;
5468 uint32_t reg;
5469 uint16_t kmreg;
5470 int rv;
5471
5472 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5473 device_xname(sc->sc_dev), __func__));
5474 KASSERT(sc->sc_type != 0);
5475
5476 /*
5477 * Allocate on-chip memory according to the MTU size.
5478 * The Packet Buffer Allocation register must be written
5479 * before the chip is reset.
5480 */
5481 switch (sc->sc_type) {
5482 case WM_T_82547:
5483 case WM_T_82547_2:
5484 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5485 PBA_22K : PBA_30K;
5486 for (i = 0; i < sc->sc_nqueues; i++) {
5487 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5488 txq->txq_fifo_head = 0;
5489 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5490 txq->txq_fifo_size =
5491 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5492 txq->txq_fifo_stall = 0;
5493 }
5494 break;
5495 case WM_T_82571:
5496 case WM_T_82572:
5497 case WM_T_82575: /* XXX need special handing for jumbo frames */
5498 case WM_T_80003:
5499 sc->sc_pba = PBA_32K;
5500 break;
5501 case WM_T_82573:
5502 sc->sc_pba = PBA_12K;
5503 break;
5504 case WM_T_82574:
5505 case WM_T_82583:
5506 sc->sc_pba = PBA_20K;
5507 break;
5508 case WM_T_82576:
5509 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5510 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5511 break;
5512 case WM_T_82580:
5513 case WM_T_I350:
5514 case WM_T_I354:
5515 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5516 break;
5517 case WM_T_I210:
5518 case WM_T_I211:
5519 sc->sc_pba = PBA_34K;
5520 break;
5521 case WM_T_ICH8:
5522 /* Workaround for a bit corruption issue in FIFO memory */
5523 sc->sc_pba = PBA_8K;
5524 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5525 break;
5526 case WM_T_ICH9:
5527 case WM_T_ICH10:
5528 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5529 PBA_14K : PBA_10K;
5530 break;
5531 case WM_T_PCH:
5532 case WM_T_PCH2: /* XXX 14K? */
5533 case WM_T_PCH_LPT:
5534 case WM_T_PCH_SPT:
5535 case WM_T_PCH_CNP:
5536 case WM_T_PCH_TGP:
5537 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5538 PBA_12K : PBA_26K;
5539 break;
5540 default:
5541 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5542 PBA_40K : PBA_48K;
5543 break;
5544 }
5545 /*
5546 * Only old or non-multiqueue devices have the PBA register
5547 * XXX Need special handling for 82575.
5548 */
5549 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5550 || (sc->sc_type == WM_T_82575))
5551 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5552
5553 /* Prevent the PCI-E bus from sticking */
5554 if (sc->sc_flags & WM_F_PCIE) {
5555 int timeout = 800;
5556
5557 sc->sc_ctrl |= CTRL_GIO_M_DIS;
5558 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5559
5560 while (timeout--) {
5561 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5562 == 0)
5563 break;
5564 delay(100);
5565 }
5566 if (timeout == 0)
5567 device_printf(sc->sc_dev,
5568 "failed to disable bus mastering\n");
5569 }
5570
5571 /* Set the completion timeout for interface */
5572 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5573 || (sc->sc_type == WM_T_82580)
5574 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5575 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5576 wm_set_pcie_completion_timeout(sc);
5577
5578 /* Clear interrupt */
5579 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5580 if (wm_is_using_msix(sc)) {
5581 if (sc->sc_type != WM_T_82574) {
5582 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5583 CSR_WRITE(sc, WMREG_EIAC, 0);
5584 } else
5585 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5586 }
5587
5588 /* Stop the transmit and receive processes. */
5589 CSR_WRITE(sc, WMREG_RCTL, 0);
5590 sc->sc_rctl &= ~RCTL_EN;
5591 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5592 CSR_WRITE_FLUSH(sc);
5593
5594 /* XXX set_tbi_sbp_82543() */
5595
5596 delay(10*1000);
5597
5598 /* Must acquire the MDIO ownership before MAC reset */
5599 switch (sc->sc_type) {
5600 case WM_T_82573:
5601 case WM_T_82574:
5602 case WM_T_82583:
5603 error = wm_get_hw_semaphore_82573(sc);
5604 break;
5605 default:
5606 break;
5607 }
5608
5609 /*
5610 * 82541 Errata 29? & 82547 Errata 28?
5611 * See also the description about PHY_RST bit in CTRL register
5612 * in 8254x_GBe_SDM.pdf.
5613 */
5614 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5615 CSR_WRITE(sc, WMREG_CTRL,
5616 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5617 CSR_WRITE_FLUSH(sc);
5618 delay(5000);
5619 }
5620
5621 switch (sc->sc_type) {
5622 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5623 case WM_T_82541:
5624 case WM_T_82541_2:
5625 case WM_T_82547:
5626 case WM_T_82547_2:
5627 /*
5628 * On some chipsets, a reset through a memory-mapped write
5629 * cycle can cause the chip to reset before completing the
5630 * write cycle. This causes major headache that can be avoided
5631 * by issuing the reset via indirect register writes through
5632 * I/O space.
5633 *
5634 * So, if we successfully mapped the I/O BAR at attach time,
5635 * use that. Otherwise, try our luck with a memory-mapped
5636 * reset.
5637 */
5638 if (sc->sc_flags & WM_F_IOH_VALID)
5639 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5640 else
5641 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5642 break;
5643 case WM_T_82545_3:
5644 case WM_T_82546_3:
5645 /* Use the shadow control register on these chips. */
5646 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5647 break;
5648 case WM_T_80003:
5649 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5650 if (sc->phy.acquire(sc) != 0)
5651 break;
5652 CSR_WRITE(sc, WMREG_CTRL, reg);
5653 sc->phy.release(sc);
5654 break;
5655 case WM_T_ICH8:
5656 case WM_T_ICH9:
5657 case WM_T_ICH10:
5658 case WM_T_PCH:
5659 case WM_T_PCH2:
5660 case WM_T_PCH_LPT:
5661 case WM_T_PCH_SPT:
5662 case WM_T_PCH_CNP:
5663 case WM_T_PCH_TGP:
5664 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5665 if (wm_phy_resetisblocked(sc) == false) {
5666 /*
5667 * Gate automatic PHY configuration by hardware on
5668 * non-managed 82579
5669 */
5670 if ((sc->sc_type == WM_T_PCH2)
5671 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5672 == 0))
5673 wm_gate_hw_phy_config_ich8lan(sc, true);
5674
5675 reg |= CTRL_PHY_RESET;
5676 phy_reset = 1;
5677 } else
5678 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5679 if (sc->phy.acquire(sc) != 0)
5680 break;
5681 CSR_WRITE(sc, WMREG_CTRL, reg);
5682 /* Don't insert a completion barrier when reset */
5683 delay(20*1000);
5684 /*
5685 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5686 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5687 * only. See also wm_get_swflag_ich8lan().
5688 */
5689 mutex_exit(sc->sc_ich_phymtx);
5690 break;
5691 case WM_T_82580:
5692 case WM_T_I350:
5693 case WM_T_I354:
5694 case WM_T_I210:
5695 case WM_T_I211:
5696 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5697 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5698 CSR_WRITE_FLUSH(sc);
5699 delay(5000);
5700 break;
5701 case WM_T_82542_2_0:
5702 case WM_T_82542_2_1:
5703 case WM_T_82543:
5704 case WM_T_82540:
5705 case WM_T_82545:
5706 case WM_T_82546:
5707 case WM_T_82571:
5708 case WM_T_82572:
5709 case WM_T_82573:
5710 case WM_T_82574:
5711 case WM_T_82575:
5712 case WM_T_82576:
5713 case WM_T_82583:
5714 default:
5715 /* Everything else can safely use the documented method. */
5716 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5717 break;
5718 }
5719
5720 /* Must release the MDIO ownership after MAC reset */
5721 switch (sc->sc_type) {
5722 case WM_T_82573:
5723 case WM_T_82574:
5724 case WM_T_82583:
5725 if (error == 0)
5726 wm_put_hw_semaphore_82573(sc);
5727 break;
5728 default:
5729 break;
5730 }
5731
5732 /* Set Phy Config Counter to 50msec */
5733 if (sc->sc_type == WM_T_PCH2) {
5734 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5735 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5736 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5737 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5738 }
5739
5740 if (phy_reset != 0)
5741 wm_get_cfg_done(sc);
5742
5743 /* Reload EEPROM */
5744 switch (sc->sc_type) {
5745 case WM_T_82542_2_0:
5746 case WM_T_82542_2_1:
5747 case WM_T_82543:
5748 case WM_T_82544:
5749 delay(10);
5750 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5751 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5752 CSR_WRITE_FLUSH(sc);
5753 delay(2000);
5754 break;
5755 case WM_T_82540:
5756 case WM_T_82545:
5757 case WM_T_82545_3:
5758 case WM_T_82546:
5759 case WM_T_82546_3:
5760 delay(5*1000);
5761 /* XXX Disable HW ARPs on ASF enabled adapters */
5762 break;
5763 case WM_T_82541:
5764 case WM_T_82541_2:
5765 case WM_T_82547:
5766 case WM_T_82547_2:
5767 delay(20000);
5768 /* XXX Disable HW ARPs on ASF enabled adapters */
5769 break;
5770 case WM_T_82571:
5771 case WM_T_82572:
5772 case WM_T_82573:
5773 case WM_T_82574:
5774 case WM_T_82583:
5775 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5776 delay(10);
5777 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5778 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5779 CSR_WRITE_FLUSH(sc);
5780 }
5781 /* check EECD_EE_AUTORD */
5782 wm_get_auto_rd_done(sc);
5783 /*
5784 * Phy configuration from NVM just starts after EECD_AUTO_RD
5785 * is set.
5786 */
5787 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5788 || (sc->sc_type == WM_T_82583))
5789 delay(25*1000);
5790 break;
5791 case WM_T_82575:
5792 case WM_T_82576:
5793 case WM_T_82580:
5794 case WM_T_I350:
5795 case WM_T_I354:
5796 case WM_T_I210:
5797 case WM_T_I211:
5798 case WM_T_80003:
5799 /* check EECD_EE_AUTORD */
5800 wm_get_auto_rd_done(sc);
5801 break;
5802 case WM_T_ICH8:
5803 case WM_T_ICH9:
5804 case WM_T_ICH10:
5805 case WM_T_PCH:
5806 case WM_T_PCH2:
5807 case WM_T_PCH_LPT:
5808 case WM_T_PCH_SPT:
5809 case WM_T_PCH_CNP:
5810 case WM_T_PCH_TGP:
5811 break;
5812 default:
5813 panic("%s: unknown type\n", __func__);
5814 }
5815
5816 /* Check whether EEPROM is present or not */
5817 switch (sc->sc_type) {
5818 case WM_T_82575:
5819 case WM_T_82576:
5820 case WM_T_82580:
5821 case WM_T_I350:
5822 case WM_T_I354:
5823 case WM_T_ICH8:
5824 case WM_T_ICH9:
5825 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5826 /* Not found */
5827 sc->sc_flags |= WM_F_EEPROM_INVALID;
5828 if (sc->sc_type == WM_T_82575)
5829 wm_reset_init_script_82575(sc);
5830 }
5831 break;
5832 default:
5833 break;
5834 }
5835
5836 if (phy_reset != 0)
5837 wm_phy_post_reset(sc);
5838
5839 if ((sc->sc_type == WM_T_82580)
5840 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5841 /* Clear global device reset status bit */
5842 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5843 }
5844
5845 /* Clear any pending interrupt events. */
5846 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5847 reg = CSR_READ(sc, WMREG_ICR);
5848 if (wm_is_using_msix(sc)) {
5849 if (sc->sc_type != WM_T_82574) {
5850 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5851 CSR_WRITE(sc, WMREG_EIAC, 0);
5852 } else
5853 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5854 }
5855
5856 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5857 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5858 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5859 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
5860 || (sc->sc_type == WM_T_PCH_TGP)) {
5861 reg = CSR_READ(sc, WMREG_KABGTXD);
5862 reg |= KABGTXD_BGSQLBIAS;
5863 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5864 }
5865
5866 /* Reload sc_ctrl */
5867 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5868
5869 wm_set_eee(sc);
5870
5871 /*
5872 * For PCH, this write will make sure that any noise will be detected
5873 * as a CRC error and be dropped rather than show up as a bad packet
5874 * to the DMA engine
5875 */
5876 if (sc->sc_type == WM_T_PCH)
5877 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5878
5879 if (sc->sc_type >= WM_T_82544)
5880 CSR_WRITE(sc, WMREG_WUC, 0);
5881
5882 if (sc->sc_type < WM_T_82575)
5883 wm_disable_aspm(sc); /* Workaround for some chips */
5884
5885 wm_reset_mdicnfg_82580(sc);
5886
5887 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5888 wm_pll_workaround_i210(sc);
5889
5890 if (sc->sc_type == WM_T_80003) {
5891 /* Default to TRUE to enable the MDIC W/A */
5892 sc->sc_flags |= WM_F_80003_MDIC_WA;
5893
5894 rv = wm_kmrn_readreg(sc,
5895 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5896 if (rv == 0) {
5897 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5898 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5899 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5900 else
5901 sc->sc_flags |= WM_F_80003_MDIC_WA;
5902 }
5903 }
5904 }
5905
5906 /*
5907 * wm_add_rxbuf:
5908 *
5909 * Add a receive buffer to the indiciated descriptor.
5910 */
5911 static int
5912 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5913 {
5914 struct wm_softc *sc = rxq->rxq_sc;
5915 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5916 struct mbuf *m;
5917 int error;
5918
5919 KASSERT(mutex_owned(rxq->rxq_lock));
5920
5921 MGETHDR(m, M_DONTWAIT, MT_DATA);
5922 if (m == NULL)
5923 return ENOBUFS;
5924
5925 MCLGET(m, M_DONTWAIT);
5926 if ((m->m_flags & M_EXT) == 0) {
5927 m_freem(m);
5928 return ENOBUFS;
5929 }
5930
5931 if (rxs->rxs_mbuf != NULL)
5932 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5933
5934 rxs->rxs_mbuf = m;
5935
5936 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5937 /*
5938 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5939 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5940 */
5941 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5942 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5943 if (error) {
5944 /* XXX XXX XXX */
5945 aprint_error_dev(sc->sc_dev,
5946 "unable to load rx DMA map %d, error = %d\n", idx, error);
5947 panic("wm_add_rxbuf");
5948 }
5949
5950 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5951 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5952
5953 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5954 if ((sc->sc_rctl & RCTL_EN) != 0)
5955 wm_init_rxdesc(rxq, idx);
5956 } else
5957 wm_init_rxdesc(rxq, idx);
5958
5959 return 0;
5960 }
5961
5962 /*
5963 * wm_rxdrain:
5964 *
5965 * Drain the receive queue.
5966 */
5967 static void
5968 wm_rxdrain(struct wm_rxqueue *rxq)
5969 {
5970 struct wm_softc *sc = rxq->rxq_sc;
5971 struct wm_rxsoft *rxs;
5972 int i;
5973
5974 KASSERT(mutex_owned(rxq->rxq_lock));
5975
5976 for (i = 0; i < WM_NRXDESC; i++) {
5977 rxs = &rxq->rxq_soft[i];
5978 if (rxs->rxs_mbuf != NULL) {
5979 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5980 m_freem(rxs->rxs_mbuf);
5981 rxs->rxs_mbuf = NULL;
5982 }
5983 }
5984 }
5985
5986 /*
5987 * Setup registers for RSS.
5988 *
5989 * XXX not yet VMDq support
5990 */
5991 static void
5992 wm_init_rss(struct wm_softc *sc)
5993 {
5994 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5995 int i;
5996
5997 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5998
5999 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
6000 unsigned int qid, reta_ent;
6001
6002 qid = i % sc->sc_nqueues;
6003 switch (sc->sc_type) {
6004 case WM_T_82574:
6005 reta_ent = __SHIFTIN(qid,
6006 RETA_ENT_QINDEX_MASK_82574);
6007 break;
6008 case WM_T_82575:
6009 reta_ent = __SHIFTIN(qid,
6010 RETA_ENT_QINDEX1_MASK_82575);
6011 break;
6012 default:
6013 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
6014 break;
6015 }
6016
6017 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
6018 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
6019 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
6020 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
6021 }
6022
6023 rss_getkey((uint8_t *)rss_key);
6024 for (i = 0; i < RSSRK_NUM_REGS; i++)
6025 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
6026
6027 if (sc->sc_type == WM_T_82574)
6028 mrqc = MRQC_ENABLE_RSS_MQ_82574;
6029 else
6030 mrqc = MRQC_ENABLE_RSS_MQ;
6031
6032 /*
6033 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
6034 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
6035 */
6036 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
6037 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
6038 #if 0
6039 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
6040 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
6041 #endif
6042 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
6043
6044 CSR_WRITE(sc, WMREG_MRQC, mrqc);
6045 }
6046
6047 /*
6048 * Adjust TX and RX queue numbers which the system actulally uses.
6049 *
6050 * The numbers are affected by below parameters.
6051 * - The nubmer of hardware queues
6052 * - The number of MSI-X vectors (= "nvectors" argument)
6053 * - ncpu
6054 */
6055 static void
6056 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
6057 {
6058 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
6059
6060 if (nvectors < 2) {
6061 sc->sc_nqueues = 1;
6062 return;
6063 }
6064
6065 switch (sc->sc_type) {
6066 case WM_T_82572:
6067 hw_ntxqueues = 2;
6068 hw_nrxqueues = 2;
6069 break;
6070 case WM_T_82574:
6071 hw_ntxqueues = 2;
6072 hw_nrxqueues = 2;
6073 break;
6074 case WM_T_82575:
6075 hw_ntxqueues = 4;
6076 hw_nrxqueues = 4;
6077 break;
6078 case WM_T_82576:
6079 hw_ntxqueues = 16;
6080 hw_nrxqueues = 16;
6081 break;
6082 case WM_T_82580:
6083 case WM_T_I350:
6084 case WM_T_I354:
6085 hw_ntxqueues = 8;
6086 hw_nrxqueues = 8;
6087 break;
6088 case WM_T_I210:
6089 hw_ntxqueues = 4;
6090 hw_nrxqueues = 4;
6091 break;
6092 case WM_T_I211:
6093 hw_ntxqueues = 2;
6094 hw_nrxqueues = 2;
6095 break;
6096 /*
6097 * The below Ethernet controllers do not support MSI-X;
6098 * this driver doesn't let them use multiqueue.
6099 * - WM_T_80003
6100 * - WM_T_ICH8
6101 * - WM_T_ICH9
6102 * - WM_T_ICH10
6103 * - WM_T_PCH
6104 * - WM_T_PCH2
6105 * - WM_T_PCH_LPT
6106 */
6107 default:
6108 hw_ntxqueues = 1;
6109 hw_nrxqueues = 1;
6110 break;
6111 }
6112
6113 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6114
6115 /*
6116 * As queues more than MSI-X vectors cannot improve scaling, we limit
6117 * the number of queues used actually.
6118 */
6119 if (nvectors < hw_nqueues + 1)
6120 sc->sc_nqueues = nvectors - 1;
6121 else
6122 sc->sc_nqueues = hw_nqueues;
6123
6124 /*
6125 * As queues more than CPUs cannot improve scaling, we limit
6126 * the number of queues used actually.
6127 */
6128 if (ncpu < sc->sc_nqueues)
6129 sc->sc_nqueues = ncpu;
6130 }
6131
6132 static inline bool
6133 wm_is_using_msix(struct wm_softc *sc)
6134 {
6135
6136 return (sc->sc_nintrs > 1);
6137 }
6138
6139 static inline bool
6140 wm_is_using_multiqueue(struct wm_softc *sc)
6141 {
6142
6143 return (sc->sc_nqueues > 1);
6144 }
6145
6146 static int
6147 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6148 {
6149 struct wm_queue *wmq = &sc->sc_queue[qidx];
6150
6151 wmq->wmq_id = qidx;
6152 wmq->wmq_intr_idx = intr_idx;
6153 wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6154 wm_handle_queue, wmq);
6155 if (wmq->wmq_si != NULL)
6156 return 0;
6157
6158 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6159 wmq->wmq_id);
6160 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6161 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6162 return ENOMEM;
6163 }
6164
6165 /*
6166 * Both single interrupt MSI and INTx can use this function.
6167 */
6168 static int
6169 wm_setup_legacy(struct wm_softc *sc)
6170 {
6171 pci_chipset_tag_t pc = sc->sc_pc;
6172 const char *intrstr = NULL;
6173 char intrbuf[PCI_INTRSTR_LEN];
6174 int error;
6175
6176 error = wm_alloc_txrx_queues(sc);
6177 if (error) {
6178 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6179 error);
6180 return ENOMEM;
6181 }
6182 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6183 sizeof(intrbuf));
6184 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6185 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6186 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6187 if (sc->sc_ihs[0] == NULL) {
6188 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6189 (pci_intr_type(pc, sc->sc_intrs[0])
6190 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6191 return ENOMEM;
6192 }
6193
6194 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6195 sc->sc_nintrs = 1;
6196
6197 return wm_softint_establish_queue(sc, 0, 0);
6198 }
6199
6200 static int
6201 wm_setup_msix(struct wm_softc *sc)
6202 {
6203 void *vih;
6204 kcpuset_t *affinity;
6205 int qidx, error, intr_idx, txrx_established;
6206 pci_chipset_tag_t pc = sc->sc_pc;
6207 const char *intrstr = NULL;
6208 char intrbuf[PCI_INTRSTR_LEN];
6209 char intr_xname[INTRDEVNAMEBUF];
6210
6211 if (sc->sc_nqueues < ncpu) {
6212 /*
6213 * To avoid other devices' interrupts, the affinity of Tx/Rx
6214 * interrupts start from CPU#1.
6215 */
6216 sc->sc_affinity_offset = 1;
6217 } else {
6218 /*
6219 * In this case, this device use all CPUs. So, we unify
6220 * affinitied cpu_index to msix vector number for readability.
6221 */
6222 sc->sc_affinity_offset = 0;
6223 }
6224
6225 error = wm_alloc_txrx_queues(sc);
6226 if (error) {
6227 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6228 error);
6229 return ENOMEM;
6230 }
6231
6232 kcpuset_create(&affinity, false);
6233 intr_idx = 0;
6234
6235 /*
6236 * TX and RX
6237 */
6238 txrx_established = 0;
6239 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6240 struct wm_queue *wmq = &sc->sc_queue[qidx];
6241 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6242
6243 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6244 sizeof(intrbuf));
6245 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6246 PCI_INTR_MPSAFE, true);
6247 memset(intr_xname, 0, sizeof(intr_xname));
6248 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6249 device_xname(sc->sc_dev), qidx);
6250 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6251 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6252 if (vih == NULL) {
6253 aprint_error_dev(sc->sc_dev,
6254 "unable to establish MSI-X(for TX and RX)%s%s\n",
6255 intrstr ? " at " : "",
6256 intrstr ? intrstr : "");
6257
6258 goto fail;
6259 }
6260 kcpuset_zero(affinity);
6261 /* Round-robin affinity */
6262 kcpuset_set(affinity, affinity_to);
6263 error = interrupt_distribute(vih, affinity, NULL);
6264 if (error == 0) {
6265 aprint_normal_dev(sc->sc_dev,
6266 "for TX and RX interrupting at %s affinity to %u\n",
6267 intrstr, affinity_to);
6268 } else {
6269 aprint_normal_dev(sc->sc_dev,
6270 "for TX and RX interrupting at %s\n", intrstr);
6271 }
6272 sc->sc_ihs[intr_idx] = vih;
6273 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6274 goto fail;
6275 txrx_established++;
6276 intr_idx++;
6277 }
6278
6279 /* LINK */
6280 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6281 sizeof(intrbuf));
6282 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6283 memset(intr_xname, 0, sizeof(intr_xname));
6284 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6285 device_xname(sc->sc_dev));
6286 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6287 IPL_NET, wm_linkintr_msix, sc, intr_xname);
6288 if (vih == NULL) {
6289 aprint_error_dev(sc->sc_dev,
6290 "unable to establish MSI-X(for LINK)%s%s\n",
6291 intrstr ? " at " : "",
6292 intrstr ? intrstr : "");
6293
6294 goto fail;
6295 }
6296 /* Keep default affinity to LINK interrupt */
6297 aprint_normal_dev(sc->sc_dev,
6298 "for LINK interrupting at %s\n", intrstr);
6299 sc->sc_ihs[intr_idx] = vih;
6300 sc->sc_link_intr_idx = intr_idx;
6301
6302 sc->sc_nintrs = sc->sc_nqueues + 1;
6303 kcpuset_destroy(affinity);
6304 return 0;
6305
6306 fail:
6307 for (qidx = 0; qidx < txrx_established; qidx++) {
6308 struct wm_queue *wmq = &sc->sc_queue[qidx];
6309 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6310 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6311 }
6312
6313 kcpuset_destroy(affinity);
6314 return ENOMEM;
6315 }
6316
6317 static void
6318 wm_unset_stopping_flags(struct wm_softc *sc)
6319 {
6320 int i;
6321
6322 KASSERT(mutex_owned(sc->sc_core_lock));
6323
6324 /* Must unset stopping flags in ascending order. */
6325 for (i = 0; i < sc->sc_nqueues; i++) {
6326 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6327 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6328
6329 mutex_enter(txq->txq_lock);
6330 txq->txq_stopping = false;
6331 mutex_exit(txq->txq_lock);
6332
6333 mutex_enter(rxq->rxq_lock);
6334 rxq->rxq_stopping = false;
6335 mutex_exit(rxq->rxq_lock);
6336 }
6337
6338 sc->sc_core_stopping = false;
6339 }
6340
6341 static void
6342 wm_set_stopping_flags(struct wm_softc *sc)
6343 {
6344 int i;
6345
6346 KASSERT(mutex_owned(sc->sc_core_lock));
6347
6348 sc->sc_core_stopping = true;
6349
6350 /* Must set stopping flags in ascending order. */
6351 for (i = 0; i < sc->sc_nqueues; i++) {
6352 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6353 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6354
6355 mutex_enter(rxq->rxq_lock);
6356 rxq->rxq_stopping = true;
6357 mutex_exit(rxq->rxq_lock);
6358
6359 mutex_enter(txq->txq_lock);
6360 txq->txq_stopping = true;
6361 mutex_exit(txq->txq_lock);
6362 }
6363 }
6364
6365 /*
6366 * Write interrupt interval value to ITR or EITR
6367 */
6368 static void
6369 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6370 {
6371
6372 if (!wmq->wmq_set_itr)
6373 return;
6374
6375 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6376 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6377
6378 /*
6379 * 82575 doesn't have CNT_INGR field.
6380 * So, overwrite counter field by software.
6381 */
6382 if (sc->sc_type == WM_T_82575)
6383 eitr |= __SHIFTIN(wmq->wmq_itr,
6384 EITR_COUNTER_MASK_82575);
6385 else
6386 eitr |= EITR_CNT_INGR;
6387
6388 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6389 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6390 /*
6391 * 82574 has both ITR and EITR. SET EITR when we use
6392 * the multi queue function with MSI-X.
6393 */
6394 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6395 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6396 } else {
6397 KASSERT(wmq->wmq_id == 0);
6398 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6399 }
6400
6401 wmq->wmq_set_itr = false;
6402 }
6403
6404 /*
6405 * TODO
6406 * Below dynamic calculation of itr is almost the same as Linux igb,
6407 * however it does not fit to wm(4). So, we will have been disable AIM
6408 * until we will find appropriate calculation of itr.
6409 */
6410 /*
6411 * Calculate interrupt interval value to be going to write register in
6412 * wm_itrs_writereg(). This function does not write ITR/EITR register.
6413 */
6414 static void
6415 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6416 {
6417 #ifdef NOTYET
6418 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6419 struct wm_txqueue *txq = &wmq->wmq_txq;
6420 uint32_t avg_size = 0;
6421 uint32_t new_itr;
6422
6423 if (rxq->rxq_packets)
6424 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
6425 if (txq->txq_packets)
6426 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6427
6428 if (avg_size == 0) {
6429 new_itr = 450; /* restore default value */
6430 goto out;
6431 }
6432
6433 /* Add 24 bytes to size to account for CRC, preamble, and gap */
6434 avg_size += 24;
6435
6436 /* Don't starve jumbo frames */
6437 avg_size = uimin(avg_size, 3000);
6438
6439 /* Give a little boost to mid-size frames */
6440 if ((avg_size > 300) && (avg_size < 1200))
6441 new_itr = avg_size / 3;
6442 else
6443 new_itr = avg_size / 2;
6444
6445 out:
6446 /*
6447 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6448 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6449 */
6450 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6451 new_itr *= 4;
6452
6453 if (new_itr != wmq->wmq_itr) {
6454 wmq->wmq_itr = new_itr;
6455 wmq->wmq_set_itr = true;
6456 } else
6457 wmq->wmq_set_itr = false;
6458
6459 rxq->rxq_packets = 0;
6460 rxq->rxq_bytes = 0;
6461 txq->txq_packets = 0;
6462 txq->txq_bytes = 0;
6463 #endif
6464 }
6465
6466 static void
6467 wm_init_sysctls(struct wm_softc *sc)
6468 {
6469 struct sysctllog **log;
6470 const struct sysctlnode *rnode, *qnode, *cnode;
6471 int i, rv;
6472 const char *dvname;
6473
6474 log = &sc->sc_sysctllog;
6475 dvname = device_xname(sc->sc_dev);
6476
6477 rv = sysctl_createv(log, 0, NULL, &rnode,
6478 0, CTLTYPE_NODE, dvname,
6479 SYSCTL_DESCR("wm information and settings"),
6480 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6481 if (rv != 0)
6482 goto err;
6483
6484 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6485 CTLTYPE_BOOL, "txrx_workqueue",
6486 SYSCTL_DESCR("Use workqueue for packet processing"),
6487 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6488 if (rv != 0)
6489 goto teardown;
6490
6491 for (i = 0; i < sc->sc_nqueues; i++) {
6492 struct wm_queue *wmq = &sc->sc_queue[i];
6493 struct wm_txqueue *txq = &wmq->wmq_txq;
6494 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6495
6496 snprintf(sc->sc_queue[i].sysctlname,
6497 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6498
6499 if (sysctl_createv(log, 0, &rnode, &qnode,
6500 0, CTLTYPE_NODE,
6501 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6502 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6503 break;
6504
6505 if (sysctl_createv(log, 0, &qnode, &cnode,
6506 CTLFLAG_READONLY, CTLTYPE_INT,
6507 "txq_free", SYSCTL_DESCR("TX queue free"),
6508 NULL, 0, &txq->txq_free,
6509 0, CTL_CREATE, CTL_EOL) != 0)
6510 break;
6511 if (sysctl_createv(log, 0, &qnode, &cnode,
6512 CTLFLAG_READONLY, CTLTYPE_INT,
6513 "txd_head", SYSCTL_DESCR("TX descriptor head"),
6514 wm_sysctl_tdh_handler, 0, (void *)txq,
6515 0, CTL_CREATE, CTL_EOL) != 0)
6516 break;
6517 if (sysctl_createv(log, 0, &qnode, &cnode,
6518 CTLFLAG_READONLY, CTLTYPE_INT,
6519 "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6520 wm_sysctl_tdt_handler, 0, (void *)txq,
6521 0, CTL_CREATE, CTL_EOL) != 0)
6522 break;
6523 if (sysctl_createv(log, 0, &qnode, &cnode,
6524 CTLFLAG_READONLY, CTLTYPE_INT,
6525 "txq_next", SYSCTL_DESCR("TX queue next"),
6526 NULL, 0, &txq->txq_next,
6527 0, CTL_CREATE, CTL_EOL) != 0)
6528 break;
6529 if (sysctl_createv(log, 0, &qnode, &cnode,
6530 CTLFLAG_READONLY, CTLTYPE_INT,
6531 "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6532 NULL, 0, &txq->txq_sfree,
6533 0, CTL_CREATE, CTL_EOL) != 0)
6534 break;
6535 if (sysctl_createv(log, 0, &qnode, &cnode,
6536 CTLFLAG_READONLY, CTLTYPE_INT,
6537 "txq_snext", SYSCTL_DESCR("TX queue snext"),
6538 NULL, 0, &txq->txq_snext,
6539 0, CTL_CREATE, CTL_EOL) != 0)
6540 break;
6541 if (sysctl_createv(log, 0, &qnode, &cnode,
6542 CTLFLAG_READONLY, CTLTYPE_INT,
6543 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6544 NULL, 0, &txq->txq_sdirty,
6545 0, CTL_CREATE, CTL_EOL) != 0)
6546 break;
6547 if (sysctl_createv(log, 0, &qnode, &cnode,
6548 CTLFLAG_READONLY, CTLTYPE_INT,
6549 "txq_flags", SYSCTL_DESCR("TX queue flags"),
6550 NULL, 0, &txq->txq_flags,
6551 0, CTL_CREATE, CTL_EOL) != 0)
6552 break;
6553 if (sysctl_createv(log, 0, &qnode, &cnode,
6554 CTLFLAG_READONLY, CTLTYPE_BOOL,
6555 "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6556 NULL, 0, &txq->txq_stopping,
6557 0, CTL_CREATE, CTL_EOL) != 0)
6558 break;
6559 if (sysctl_createv(log, 0, &qnode, &cnode,
6560 CTLFLAG_READONLY, CTLTYPE_BOOL,
6561 "txq_sending", SYSCTL_DESCR("TX queue sending"),
6562 NULL, 0, &txq->txq_sending,
6563 0, CTL_CREATE, CTL_EOL) != 0)
6564 break;
6565
6566 if (sysctl_createv(log, 0, &qnode, &cnode,
6567 CTLFLAG_READONLY, CTLTYPE_INT,
6568 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6569 NULL, 0, &rxq->rxq_ptr,
6570 0, CTL_CREATE, CTL_EOL) != 0)
6571 break;
6572 }
6573
6574 #ifdef WM_DEBUG
6575 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6576 CTLTYPE_INT, "debug_flags",
6577 SYSCTL_DESCR(
6578 "Debug flags:\n" \
6579 "\t0x01 LINK\n" \
6580 "\t0x02 TX\n" \
6581 "\t0x04 RX\n" \
6582 "\t0x08 GMII\n" \
6583 "\t0x10 MANAGE\n" \
6584 "\t0x20 NVM\n" \
6585 "\t0x40 INIT\n" \
6586 "\t0x80 LOCK"),
6587 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6588 if (rv != 0)
6589 goto teardown;
6590 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6591 CTLTYPE_BOOL, "trigger_reset",
6592 SYSCTL_DESCR("Trigger an interface reset"),
6593 NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6594 if (rv != 0)
6595 goto teardown;
6596 #endif
6597
6598 return;
6599
6600 teardown:
6601 sysctl_teardown(log);
6602 err:
6603 sc->sc_sysctllog = NULL;
6604 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6605 __func__, rv);
6606 }
6607
6608 static void
6609 wm_update_stats(struct wm_softc *sc)
6610 {
6611 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6612 uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc,
6613 cexterr;
6614
6615 crcerrs = CSR_READ(sc, WMREG_CRCERRS);
6616 symerrc = CSR_READ(sc, WMREG_SYMERRC);
6617 mpc = CSR_READ(sc, WMREG_MPC);
6618 colc = CSR_READ(sc, WMREG_COLC);
6619 sec = CSR_READ(sc, WMREG_SEC);
6620 rlec = CSR_READ(sc, WMREG_RLEC);
6621
6622 WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
6623 WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
6624 WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
6625 WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
6626 WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
6627 WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
6628
6629 if (sc->sc_type >= WM_T_82543) {
6630 algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
6631 rxerrc = CSR_READ(sc, WMREG_RXERRC);
6632 WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
6633 WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
6634 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
6635 cexterr = CSR_READ(sc, WMREG_CEXTERR);
6636 WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
6637 } else {
6638 cexterr = 0;
6639 /* Excessive collision + Link down */
6640 WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
6641 CSR_READ(sc, WMREG_HTDPMC));
6642 }
6643
6644 WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
6645 WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
6646 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6647 WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
6648 CSR_READ(sc, WMREG_TSCTFC));
6649 else {
6650 WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
6651 CSR_READ(sc, WMREG_CBRDPC));
6652 WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
6653 CSR_READ(sc, WMREG_CBRMPC));
6654 }
6655 } else
6656 algnerrc = rxerrc = cexterr = 0;
6657
6658 if (sc->sc_type >= WM_T_82542_2_1) {
6659 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
6660 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
6661 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
6662 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
6663 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
6664 }
6665
6666 WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
6667 WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
6668 WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
6669 WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
6670
6671 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6672 WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
6673 }
6674
6675 WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
6676 WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
6677 WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
6678 WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
6679 WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
6680 WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
6681 WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
6682 WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
6683 WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
6684 WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
6685 WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
6686
6687 WM_EVCNT_ADD(&sc->sc_ev_gorc,
6688 CSR_READ(sc, WMREG_GORCL) +
6689 ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
6690 WM_EVCNT_ADD(&sc->sc_ev_gotc,
6691 CSR_READ(sc, WMREG_GOTCL) +
6692 ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
6693
6694 WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
6695 WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
6696 WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
6697 WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
6698 WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
6699
6700 if (sc->sc_type >= WM_T_82540) {
6701 WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
6702 WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
6703 WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
6704 }
6705
6706 /*
6707 * The TOR(L) register includes:
6708 * - Error
6709 * - Flow control
6710 * - Broadcast rejected (This note is described in 82574 and newer
6711 * datasheets. What does "broadcast rejected" mean?)
6712 */
6713 WM_EVCNT_ADD(&sc->sc_ev_tor,
6714 CSR_READ(sc, WMREG_TORL) +
6715 ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
6716 WM_EVCNT_ADD(&sc->sc_ev_tot,
6717 CSR_READ(sc, WMREG_TOTL) +
6718 ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
6719
6720 WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
6721 WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
6722 WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
6723 WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
6724 WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
6725 WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
6726 WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
6727 WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
6728 WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
6729 WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
6730 if (sc->sc_type >= WM_T_82571)
6731 WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
6732 if (sc->sc_type < WM_T_82575) {
6733 WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
6734 WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
6735 WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
6736 WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
6737 WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
6738 WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
6739 CSR_READ(sc, WMREG_ICTXQMTC));
6740 WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
6741 CSR_READ(sc, WMREG_ICRXDMTC));
6742 WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
6743 } else if (!WM_IS_ICHPCH(sc)) {
6744 WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
6745 WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
6746 WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
6747 WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
6748 WM_EVCNT_ADD(&sc->sc_ev_hgptc, CSR_READ(sc, WMREG_HGPTC));
6749 WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
6750 WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
6751 WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
6752
6753 WM_EVCNT_ADD(&sc->sc_ev_hgorc,
6754 CSR_READ(sc, WMREG_HGORCL) +
6755 ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
6756 WM_EVCNT_ADD(&sc->sc_ev_hgotc,
6757 CSR_READ(sc, WMREG_HGOTCL) +
6758 ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
6759 WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
6760 WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
6761 WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
6762 }
6763 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6764 WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
6765 WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
6766 if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
6767 WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
6768 CSR_READ(sc, WMREG_B2OGPRC));
6769 WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
6770 CSR_READ(sc, WMREG_O2BSPC));
6771 WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
6772 CSR_READ(sc, WMREG_B2OSPC));
6773 WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
6774 CSR_READ(sc, WMREG_O2BGPTC));
6775 }
6776 }
6777 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
6778 if_statadd_ref(nsr, if_collisions, colc);
6779 if_statadd_ref(nsr, if_ierrors,
6780 crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
6781 /*
6782 * WMREG_RNBC is incremented when there are no available buffers in
6783 * host memory. It does not mean the number of dropped packets, because
6784 * an Ethernet controller can receive packets in such case if there is
6785 * space in the phy's FIFO.
6786 *
6787 * If you want to know the nubmer of WMREG_RMBC, you should use such as
6788 * own EVCNT instead of if_iqdrops.
6789 */
6790 if_statadd_ref(nsr, if_iqdrops, mpc);
6791 IF_STAT_PUTREF(ifp);
6792 }
6793
6794 void
6795 wm_clear_evcnt(struct wm_softc *sc)
6796 {
6797 #ifdef WM_EVENT_COUNTERS
6798 int i;
6799
6800 /* RX queues */
6801 for (i = 0; i < sc->sc_nqueues; i++) {
6802 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6803
6804 WM_Q_EVCNT_STORE(rxq, intr, 0);
6805 WM_Q_EVCNT_STORE(rxq, defer, 0);
6806 WM_Q_EVCNT_STORE(rxq, ipsum, 0);
6807 WM_Q_EVCNT_STORE(rxq, tusum, 0);
6808 }
6809
6810 /* TX queues */
6811 for (i = 0; i < sc->sc_nqueues; i++) {
6812 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6813 int j;
6814
6815 WM_Q_EVCNT_STORE(txq, txsstall, 0);
6816 WM_Q_EVCNT_STORE(txq, txdstall, 0);
6817 WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
6818 WM_Q_EVCNT_STORE(txq, txdw, 0);
6819 WM_Q_EVCNT_STORE(txq, txqe, 0);
6820 WM_Q_EVCNT_STORE(txq, ipsum, 0);
6821 WM_Q_EVCNT_STORE(txq, tusum, 0);
6822 WM_Q_EVCNT_STORE(txq, tusum6, 0);
6823 WM_Q_EVCNT_STORE(txq, tso, 0);
6824 WM_Q_EVCNT_STORE(txq, tso6, 0);
6825 WM_Q_EVCNT_STORE(txq, tsopain, 0);
6826
6827 for (j = 0; j < WM_NTXSEGS; j++)
6828 WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
6829
6830 WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
6831 WM_Q_EVCNT_STORE(txq, descdrop, 0);
6832 WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
6833 WM_Q_EVCNT_STORE(txq, defrag, 0);
6834 if (sc->sc_type <= WM_T_82544)
6835 WM_Q_EVCNT_STORE(txq, underrun, 0);
6836 WM_Q_EVCNT_STORE(txq, skipcontext, 0);
6837 }
6838
6839 /* Miscs */
6840 WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
6841
6842 WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
6843 WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
6844 WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
6845 WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
6846 WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
6847 WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
6848
6849 if (sc->sc_type >= WM_T_82543) {
6850 WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
6851 WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
6852 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6853 WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
6854 else
6855 WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
6856
6857 WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
6858 WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
6859 if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6860 WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
6861 else {
6862 WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
6863 WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
6864 }
6865 }
6866
6867 if (sc->sc_type >= WM_T_82542_2_1) {
6868 WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
6869 WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
6870 WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
6871 WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
6872 WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
6873 }
6874
6875 WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
6876 WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
6877 WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
6878 WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
6879
6880 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
6881 WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
6882
6883 WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
6884 WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
6885 WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
6886 WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
6887 WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
6888 WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
6889 WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
6890 WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
6891 WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
6892 WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
6893 WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
6894 WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
6895 WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
6896 WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
6897 WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
6898 WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
6899 WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
6900 WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
6901 if (sc->sc_type >= WM_T_82540) {
6902 WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
6903 WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
6904 WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
6905 }
6906 WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
6907 WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
6908 WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
6909 WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
6910 WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
6911 WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
6912 WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
6913 WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
6914 WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
6915 WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
6916 WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
6917 WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
6918 if (sc->sc_type >= WM_T_82571)
6919 WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
6920 if (sc->sc_type < WM_T_82575) {
6921 WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
6922 WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
6923 WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
6924 WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
6925 WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
6926 WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
6927 WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6928 WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
6929 } else if (!WM_IS_ICHPCH(sc)) {
6930 WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
6931 WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
6932 WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
6933 WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
6934 WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
6935 WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
6936 WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6937 WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
6938
6939 WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
6940 WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
6941 WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
6942 WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
6943 WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
6944 }
6945 if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6946 WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
6947 WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
6948 WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
6949 WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
6950 WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
6951 WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
6952 }
6953 #endif
6954 }
6955
6956 /*
6957 * wm_init: [ifnet interface function]
6958 *
6959 * Initialize the interface.
6960 */
6961 static int
6962 wm_init(struct ifnet *ifp)
6963 {
6964 struct wm_softc *sc = ifp->if_softc;
6965 int ret;
6966
6967 KASSERT(IFNET_LOCKED(ifp));
6968
6969 if (sc->sc_dying)
6970 return ENXIO;
6971
6972 mutex_enter(sc->sc_core_lock);
6973 ret = wm_init_locked(ifp);
6974 mutex_exit(sc->sc_core_lock);
6975
6976 return ret;
6977 }
6978
6979 static int
6980 wm_init_locked(struct ifnet *ifp)
6981 {
6982 struct wm_softc *sc = ifp->if_softc;
6983 struct ethercom *ec = &sc->sc_ethercom;
6984 int i, j, trynum, error = 0;
6985 uint32_t reg, sfp_mask = 0;
6986
6987 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6988 device_xname(sc->sc_dev), __func__));
6989 KASSERT(IFNET_LOCKED(ifp));
6990 KASSERT(mutex_owned(sc->sc_core_lock));
6991
6992 /*
6993 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6994 * There is a small but measurable benefit to avoiding the adjusment
6995 * of the descriptor so that the headers are aligned, for normal mtu,
6996 * on such platforms. One possibility is that the DMA itself is
6997 * slightly more efficient if the front of the entire packet (instead
6998 * of the front of the headers) is aligned.
6999 *
7000 * Note we must always set align_tweak to 0 if we are using
7001 * jumbo frames.
7002 */
7003 #ifdef __NO_STRICT_ALIGNMENT
7004 sc->sc_align_tweak = 0;
7005 #else
7006 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
7007 sc->sc_align_tweak = 0;
7008 else
7009 sc->sc_align_tweak = 2;
7010 #endif /* __NO_STRICT_ALIGNMENT */
7011
7012 /* Cancel any pending I/O. */
7013 wm_stop_locked(ifp, false, false);
7014
7015 /* Update statistics before reset */
7016 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
7017 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
7018
7019 /* >= PCH_SPT hardware workaround before reset. */
7020 if (sc->sc_type >= WM_T_PCH_SPT)
7021 wm_flush_desc_rings(sc);
7022
7023 /* Reset the chip to a known state. */
7024 wm_reset(sc);
7025
7026 /*
7027 * AMT based hardware can now take control from firmware
7028 * Do this after reset.
7029 */
7030 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7031 wm_get_hw_control(sc);
7032
7033 if ((sc->sc_type >= WM_T_PCH_SPT) &&
7034 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
7035 wm_legacy_irq_quirk_spt(sc);
7036
7037 /* Init hardware bits */
7038 wm_initialize_hardware_bits(sc);
7039
7040 /* Reset the PHY. */
7041 if (sc->sc_flags & WM_F_HAS_MII)
7042 wm_gmii_reset(sc);
7043
7044 if (sc->sc_type >= WM_T_ICH8) {
7045 reg = CSR_READ(sc, WMREG_GCR);
7046 /*
7047 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
7048 * default after reset.
7049 */
7050 if (sc->sc_type == WM_T_ICH8)
7051 reg |= GCR_NO_SNOOP_ALL;
7052 else
7053 reg &= ~GCR_NO_SNOOP_ALL;
7054 CSR_WRITE(sc, WMREG_GCR, reg);
7055 }
7056
7057 /* Ungate DMA clock to avoid packet loss */
7058 if (sc->sc_type >= WM_T_PCH_TGP) {
7059 reg = CSR_READ(sc, WMREG_FFLT_DBG);
7060 reg |= (1 << 12);
7061 CSR_WRITE(sc, WMREG_FFLT_DBG, reg);
7062 }
7063
7064 if ((sc->sc_type >= WM_T_ICH8)
7065 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
7066 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
7067
7068 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7069 reg |= CTRL_EXT_RO_DIS;
7070 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7071 }
7072
7073 /* Calculate (E)ITR value */
7074 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
7075 /*
7076 * For NEWQUEUE's EITR (except for 82575).
7077 * 82575's EITR should be set same throttling value as other
7078 * old controllers' ITR because the interrupt/sec calculation
7079 * is the same, that is, 1,000,000,000 / (N * 256).
7080 *
7081 * 82574's EITR should be set same throttling value as ITR.
7082 *
7083 * For N interrupts/sec, set this value to:
7084 * 1,000,000 / N in contrast to ITR throttling value.
7085 */
7086 sc->sc_itr_init = 450;
7087 } else if (sc->sc_type >= WM_T_82543) {
7088 /*
7089 * Set up the interrupt throttling register (units of 256ns)
7090 * Note that a footnote in Intel's documentation says this
7091 * ticker runs at 1/4 the rate when the chip is in 100Mbit
7092 * or 10Mbit mode. Empirically, it appears to be the case
7093 * that that is also true for the 1024ns units of the other
7094 * interrupt-related timer registers -- so, really, we ought
7095 * to divide this value by 4 when the link speed is low.
7096 *
7097 * XXX implement this division at link speed change!
7098 */
7099
7100 /*
7101 * For N interrupts/sec, set this value to:
7102 * 1,000,000,000 / (N * 256). Note that we set the
7103 * absolute and packet timer values to this value
7104 * divided by 4 to get "simple timer" behavior.
7105 */
7106 sc->sc_itr_init = 1500; /* 2604 ints/sec */
7107 }
7108
7109 error = wm_init_txrx_queues(sc);
7110 if (error)
7111 goto out;
7112
7113 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
7114 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
7115 (sc->sc_type >= WM_T_82575))
7116 wm_serdes_power_up_link_82575(sc);
7117
7118 /* Clear out the VLAN table -- we don't use it (yet). */
7119 CSR_WRITE(sc, WMREG_VET, 0);
7120 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
7121 trynum = 10; /* Due to hw errata */
7122 else
7123 trynum = 1;
7124 for (i = 0; i < WM_VLAN_TABSIZE; i++)
7125 for (j = 0; j < trynum; j++)
7126 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
7127
7128 /*
7129 * Set up flow-control parameters.
7130 *
7131 * XXX Values could probably stand some tuning.
7132 */
7133 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
7134 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
7135 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
7136 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)
7137 && (sc->sc_type != WM_T_PCH_TGP)) {
7138 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
7139 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
7140 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
7141 }
7142
7143 sc->sc_fcrtl = FCRTL_DFLT;
7144 if (sc->sc_type < WM_T_82543) {
7145 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
7146 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
7147 } else {
7148 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
7149 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
7150 }
7151
7152 if (sc->sc_type == WM_T_80003)
7153 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
7154 else
7155 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
7156
7157 /* Writes the control register. */
7158 wm_set_vlan(sc);
7159
7160 if (sc->sc_flags & WM_F_HAS_MII) {
7161 uint16_t kmreg;
7162
7163 switch (sc->sc_type) {
7164 case WM_T_80003:
7165 case WM_T_ICH8:
7166 case WM_T_ICH9:
7167 case WM_T_ICH10:
7168 case WM_T_PCH:
7169 case WM_T_PCH2:
7170 case WM_T_PCH_LPT:
7171 case WM_T_PCH_SPT:
7172 case WM_T_PCH_CNP:
7173 case WM_T_PCH_TGP:
7174 /*
7175 * Set the mac to wait the maximum time between each
7176 * iteration and increase the max iterations when
7177 * polling the phy; this fixes erroneous timeouts at
7178 * 10Mbps.
7179 */
7180 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
7181 0xFFFF);
7182 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7183 &kmreg);
7184 kmreg |= 0x3F;
7185 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7186 kmreg);
7187 break;
7188 default:
7189 break;
7190 }
7191
7192 if (sc->sc_type == WM_T_80003) {
7193 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7194 reg &= ~CTRL_EXT_LINK_MODE_MASK;
7195 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7196
7197 /* Bypass RX and TX FIFOs */
7198 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
7199 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
7200 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
7201 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
7202 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
7203 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
7204 }
7205 }
7206 #if 0
7207 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
7208 #endif
7209
7210 /* Set up checksum offload parameters. */
7211 reg = CSR_READ(sc, WMREG_RXCSUM);
7212 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
7213 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
7214 reg |= RXCSUM_IPOFL;
7215 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
7216 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
7217 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
7218 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
7219 CSR_WRITE(sc, WMREG_RXCSUM, reg);
7220
7221 /* Set registers about MSI-X */
7222 if (wm_is_using_msix(sc)) {
7223 uint32_t ivar, qintr_idx;
7224 struct wm_queue *wmq;
7225 unsigned int qid;
7226
7227 if (sc->sc_type == WM_T_82575) {
7228 /* Interrupt control */
7229 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7230 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
7231 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7232
7233 /* TX and RX */
7234 for (i = 0; i < sc->sc_nqueues; i++) {
7235 wmq = &sc->sc_queue[i];
7236 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
7237 EITR_TX_QUEUE(wmq->wmq_id)
7238 | EITR_RX_QUEUE(wmq->wmq_id));
7239 }
7240 /* Link status */
7241 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
7242 EITR_OTHER);
7243 } else if (sc->sc_type == WM_T_82574) {
7244 /* Interrupt control */
7245 reg = CSR_READ(sc, WMREG_CTRL_EXT);
7246 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
7247 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7248
7249 /*
7250 * Work around issue with spurious interrupts
7251 * in MSI-X mode.
7252 * At wm_initialize_hardware_bits(), sc_nintrs has not
7253 * initialized yet. So re-initialize WMREG_RFCTL here.
7254 */
7255 reg = CSR_READ(sc, WMREG_RFCTL);
7256 reg |= WMREG_RFCTL_ACKDIS;
7257 CSR_WRITE(sc, WMREG_RFCTL, reg);
7258
7259 ivar = 0;
7260 /* TX and RX */
7261 for (i = 0; i < sc->sc_nqueues; i++) {
7262 wmq = &sc->sc_queue[i];
7263 qid = wmq->wmq_id;
7264 qintr_idx = wmq->wmq_intr_idx;
7265
7266 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7267 IVAR_TX_MASK_Q_82574(qid));
7268 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7269 IVAR_RX_MASK_Q_82574(qid));
7270 }
7271 /* Link status */
7272 ivar |= __SHIFTIN((IVAR_VALID_82574
7273 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
7274 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
7275 } else {
7276 /* Interrupt control */
7277 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
7278 | GPIE_EIAME | GPIE_PBA);
7279
7280 switch (sc->sc_type) {
7281 case WM_T_82580:
7282 case WM_T_I350:
7283 case WM_T_I354:
7284 case WM_T_I210:
7285 case WM_T_I211:
7286 /* TX and RX */
7287 for (i = 0; i < sc->sc_nqueues; i++) {
7288 wmq = &sc->sc_queue[i];
7289 qid = wmq->wmq_id;
7290 qintr_idx = wmq->wmq_intr_idx;
7291
7292 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
7293 ivar &= ~IVAR_TX_MASK_Q(qid);
7294 ivar |= __SHIFTIN((qintr_idx
7295 | IVAR_VALID),
7296 IVAR_TX_MASK_Q(qid));
7297 ivar &= ~IVAR_RX_MASK_Q(qid);
7298 ivar |= __SHIFTIN((qintr_idx
7299 | IVAR_VALID),
7300 IVAR_RX_MASK_Q(qid));
7301 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
7302 }
7303 break;
7304 case WM_T_82576:
7305 /* TX and RX */
7306 for (i = 0; i < sc->sc_nqueues; i++) {
7307 wmq = &sc->sc_queue[i];
7308 qid = wmq->wmq_id;
7309 qintr_idx = wmq->wmq_intr_idx;
7310
7311 ivar = CSR_READ(sc,
7312 WMREG_IVAR_Q_82576(qid));
7313 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
7314 ivar |= __SHIFTIN((qintr_idx
7315 | IVAR_VALID),
7316 IVAR_TX_MASK_Q_82576(qid));
7317 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
7318 ivar |= __SHIFTIN((qintr_idx
7319 | IVAR_VALID),
7320 IVAR_RX_MASK_Q_82576(qid));
7321 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
7322 ivar);
7323 }
7324 break;
7325 default:
7326 break;
7327 }
7328
7329 /* Link status */
7330 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
7331 IVAR_MISC_OTHER);
7332 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
7333 }
7334
7335 if (wm_is_using_multiqueue(sc)) {
7336 wm_init_rss(sc);
7337
7338 /*
7339 ** NOTE: Receive Full-Packet Checksum Offload
7340 ** is mutually exclusive with Multiqueue. However
7341 ** this is not the same as TCP/IP checksums which
7342 ** still work.
7343 */
7344 reg = CSR_READ(sc, WMREG_RXCSUM);
7345 reg |= RXCSUM_PCSD;
7346 CSR_WRITE(sc, WMREG_RXCSUM, reg);
7347 }
7348 }
7349
7350 /* Set up the interrupt registers. */
7351 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7352
7353 /* Enable SFP module insertion interrupt if it's required */
7354 if ((sc->sc_flags & WM_F_SFP) != 0) {
7355 sc->sc_ctrl |= CTRL_EXTLINK_EN;
7356 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7357 sfp_mask = ICR_GPI(0);
7358 }
7359
7360 if (wm_is_using_msix(sc)) {
7361 uint32_t mask;
7362 struct wm_queue *wmq;
7363
7364 switch (sc->sc_type) {
7365 case WM_T_82574:
7366 mask = 0;
7367 for (i = 0; i < sc->sc_nqueues; i++) {
7368 wmq = &sc->sc_queue[i];
7369 mask |= ICR_TXQ(wmq->wmq_id);
7370 mask |= ICR_RXQ(wmq->wmq_id);
7371 }
7372 mask |= ICR_OTHER;
7373 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
7374 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
7375 break;
7376 default:
7377 if (sc->sc_type == WM_T_82575) {
7378 mask = 0;
7379 for (i = 0; i < sc->sc_nqueues; i++) {
7380 wmq = &sc->sc_queue[i];
7381 mask |= EITR_TX_QUEUE(wmq->wmq_id);
7382 mask |= EITR_RX_QUEUE(wmq->wmq_id);
7383 }
7384 mask |= EITR_OTHER;
7385 } else {
7386 mask = 0;
7387 for (i = 0; i < sc->sc_nqueues; i++) {
7388 wmq = &sc->sc_queue[i];
7389 mask |= 1 << wmq->wmq_intr_idx;
7390 }
7391 mask |= 1 << sc->sc_link_intr_idx;
7392 }
7393 CSR_WRITE(sc, WMREG_EIAC, mask);
7394 CSR_WRITE(sc, WMREG_EIAM, mask);
7395 CSR_WRITE(sc, WMREG_EIMS, mask);
7396
7397 /* For other interrupts */
7398 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
7399 break;
7400 }
7401 } else {
7402 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
7403 ICR_RXO | ICR_RXT0 | sfp_mask;
7404 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
7405 }
7406
7407 /* Set up the inter-packet gap. */
7408 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7409
7410 if (sc->sc_type >= WM_T_82543) {
7411 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7412 struct wm_queue *wmq = &sc->sc_queue[qidx];
7413 wm_itrs_writereg(sc, wmq);
7414 }
7415 /*
7416 * Link interrupts occur much less than TX
7417 * interrupts and RX interrupts. So, we don't
7418 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
7419 * FreeBSD's if_igb.
7420 */
7421 }
7422
7423 /* Set the VLAN EtherType. */
7424 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
7425
7426 /*
7427 * Set up the transmit control register; we start out with
7428 * a collision distance suitable for FDX, but update it when
7429 * we resolve the media type.
7430 */
7431 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
7432 | TCTL_CT(TX_COLLISION_THRESHOLD)
7433 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7434 if (sc->sc_type >= WM_T_82571)
7435 sc->sc_tctl |= TCTL_MULR;
7436 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7437
7438 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7439 /* Write TDT after TCTL.EN is set. See the document. */
7440 CSR_WRITE(sc, WMREG_TDT(0), 0);
7441 }
7442
7443 if (sc->sc_type == WM_T_80003) {
7444 reg = CSR_READ(sc, WMREG_TCTL_EXT);
7445 reg &= ~TCTL_EXT_GCEX_MASK;
7446 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
7447 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
7448 }
7449
7450 /* Set the media. */
7451 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
7452 goto out;
7453
7454 /* Configure for OS presence */
7455 wm_init_manageability(sc);
7456
7457 /*
7458 * Set up the receive control register; we actually program the
7459 * register when we set the receive filter. Use multicast address
7460 * offset type 0.
7461 *
7462 * Only the i82544 has the ability to strip the incoming CRC, so we
7463 * don't enable that feature.
7464 */
7465 sc->sc_mchash_type = 0;
7466 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7467 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7468
7469 /* 82574 use one buffer extended Rx descriptor. */
7470 if (sc->sc_type == WM_T_82574)
7471 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7472
7473 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7474 sc->sc_rctl |= RCTL_SECRC;
7475
7476 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7477 && (ifp->if_mtu > ETHERMTU)) {
7478 sc->sc_rctl |= RCTL_LPE;
7479 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7480 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7481 }
7482
7483 if (MCLBYTES == 2048)
7484 sc->sc_rctl |= RCTL_2k;
7485 else {
7486 if (sc->sc_type >= WM_T_82543) {
7487 switch (MCLBYTES) {
7488 case 4096:
7489 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7490 break;
7491 case 8192:
7492 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7493 break;
7494 case 16384:
7495 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7496 break;
7497 default:
7498 panic("wm_init: MCLBYTES %d unsupported",
7499 MCLBYTES);
7500 break;
7501 }
7502 } else
7503 panic("wm_init: i82542 requires MCLBYTES = 2048");
7504 }
7505
7506 /* Enable ECC */
7507 switch (sc->sc_type) {
7508 case WM_T_82571:
7509 reg = CSR_READ(sc, WMREG_PBA_ECC);
7510 reg |= PBA_ECC_CORR_EN;
7511 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7512 break;
7513 case WM_T_PCH_LPT:
7514 case WM_T_PCH_SPT:
7515 case WM_T_PCH_CNP:
7516 case WM_T_PCH_TGP:
7517 reg = CSR_READ(sc, WMREG_PBECCSTS);
7518 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7519 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7520
7521 sc->sc_ctrl |= CTRL_MEHE;
7522 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7523 break;
7524 default:
7525 break;
7526 }
7527
7528 /*
7529 * Set the receive filter.
7530 *
7531 * For 82575 and 82576, the RX descriptors must be initialized after
7532 * the setting of RCTL.EN in wm_set_filter()
7533 */
7534 wm_set_filter(sc);
7535
7536 /* On 575 and later set RDT only if RX enabled */
7537 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7538 int qidx;
7539 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7540 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7541 for (i = 0; i < WM_NRXDESC; i++) {
7542 mutex_enter(rxq->rxq_lock);
7543 wm_init_rxdesc(rxq, i);
7544 mutex_exit(rxq->rxq_lock);
7545
7546 }
7547 }
7548 }
7549
7550 wm_unset_stopping_flags(sc);
7551
7552 /* Start the one second link check clock. */
7553 callout_schedule(&sc->sc_tick_ch, hz);
7554
7555 /*
7556 * ...all done! (IFNET_LOCKED asserted above.)
7557 */
7558 ifp->if_flags |= IFF_RUNNING;
7559
7560 out:
7561 /* Save last flags for the callback */
7562 sc->sc_if_flags = ifp->if_flags;
7563 sc->sc_ec_capenable = ec->ec_capenable;
7564 if (error)
7565 log(LOG_ERR, "%s: interface not running\n",
7566 device_xname(sc->sc_dev));
7567 return error;
7568 }
7569
7570 /*
7571 * wm_stop: [ifnet interface function]
7572 *
7573 * Stop transmission on the interface.
7574 */
7575 static void
7576 wm_stop(struct ifnet *ifp, int disable)
7577 {
7578 struct wm_softc *sc = ifp->if_softc;
7579
7580 ASSERT_SLEEPABLE();
7581 KASSERT(IFNET_LOCKED(ifp));
7582
7583 mutex_enter(sc->sc_core_lock);
7584 wm_stop_locked(ifp, disable ? true : false, true);
7585 mutex_exit(sc->sc_core_lock);
7586
7587 /*
7588 * After wm_set_stopping_flags(), it is guaranteed that
7589 * wm_handle_queue_work() does not call workqueue_enqueue().
7590 * However, workqueue_wait() cannot call in wm_stop_locked()
7591 * because it can sleep...
7592 * so, call workqueue_wait() here.
7593 */
7594 for (int i = 0; i < sc->sc_nqueues; i++)
7595 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7596 workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7597 }
7598
7599 static void
7600 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7601 {
7602 struct wm_softc *sc = ifp->if_softc;
7603 struct wm_txsoft *txs;
7604 int i, qidx;
7605
7606 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7607 device_xname(sc->sc_dev), __func__));
7608 KASSERT(IFNET_LOCKED(ifp));
7609 KASSERT(mutex_owned(sc->sc_core_lock));
7610
7611 wm_set_stopping_flags(sc);
7612
7613 if (sc->sc_flags & WM_F_HAS_MII) {
7614 /* Down the MII. */
7615 mii_down(&sc->sc_mii);
7616 } else {
7617 #if 0
7618 /* Should we clear PHY's status properly? */
7619 wm_reset(sc);
7620 #endif
7621 }
7622
7623 /* Stop the transmit and receive processes. */
7624 CSR_WRITE(sc, WMREG_TCTL, 0);
7625 CSR_WRITE(sc, WMREG_RCTL, 0);
7626 sc->sc_rctl &= ~RCTL_EN;
7627
7628 /*
7629 * Clear the interrupt mask to ensure the device cannot assert its
7630 * interrupt line.
7631 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7632 * service any currently pending or shared interrupt.
7633 */
7634 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7635 sc->sc_icr = 0;
7636 if (wm_is_using_msix(sc)) {
7637 if (sc->sc_type != WM_T_82574) {
7638 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7639 CSR_WRITE(sc, WMREG_EIAC, 0);
7640 } else
7641 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7642 }
7643
7644 /*
7645 * Stop callouts after interrupts are disabled; if we have
7646 * to wait for them, we will be releasing the CORE_LOCK
7647 * briefly, which will unblock interrupts on the current CPU.
7648 */
7649
7650 /* Stop the one second clock. */
7651 if (wait)
7652 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7653 else
7654 callout_stop(&sc->sc_tick_ch);
7655
7656 /* Stop the 82547 Tx FIFO stall check timer. */
7657 if (sc->sc_type == WM_T_82547) {
7658 if (wait)
7659 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7660 else
7661 callout_stop(&sc->sc_txfifo_ch);
7662 }
7663
7664 /* Release any queued transmit buffers. */
7665 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7666 struct wm_queue *wmq = &sc->sc_queue[qidx];
7667 struct wm_txqueue *txq = &wmq->wmq_txq;
7668 struct mbuf *m;
7669
7670 mutex_enter(txq->txq_lock);
7671 txq->txq_sending = false; /* Ensure watchdog disabled */
7672 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7673 txs = &txq->txq_soft[i];
7674 if (txs->txs_mbuf != NULL) {
7675 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7676 m_freem(txs->txs_mbuf);
7677 txs->txs_mbuf = NULL;
7678 }
7679 }
7680 /* Drain txq_interq */
7681 while ((m = pcq_get(txq->txq_interq)) != NULL)
7682 m_freem(m);
7683 mutex_exit(txq->txq_lock);
7684 }
7685
7686 /* Mark the interface as down and cancel the watchdog timer. */
7687 ifp->if_flags &= ~IFF_RUNNING;
7688 sc->sc_if_flags = ifp->if_flags;
7689
7690 if (disable) {
7691 for (i = 0; i < sc->sc_nqueues; i++) {
7692 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7693 mutex_enter(rxq->rxq_lock);
7694 wm_rxdrain(rxq);
7695 mutex_exit(rxq->rxq_lock);
7696 }
7697 }
7698
7699 #if 0 /* notyet */
7700 if (sc->sc_type >= WM_T_82544)
7701 CSR_WRITE(sc, WMREG_WUC, 0);
7702 #endif
7703 }
7704
7705 static void
7706 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7707 {
7708 struct mbuf *m;
7709 int i;
7710
7711 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7712 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7713 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7714 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7715 m->m_data, m->m_len, m->m_flags);
7716 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7717 i, i == 1 ? "" : "s");
7718 }
7719
7720 /*
7721 * wm_82547_txfifo_stall:
7722 *
7723 * Callout used to wait for the 82547 Tx FIFO to drain,
7724 * reset the FIFO pointers, and restart packet transmission.
7725 */
7726 static void
7727 wm_82547_txfifo_stall(void *arg)
7728 {
7729 struct wm_softc *sc = arg;
7730 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7731
7732 mutex_enter(txq->txq_lock);
7733
7734 if (txq->txq_stopping)
7735 goto out;
7736
7737 if (txq->txq_fifo_stall) {
7738 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7739 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7740 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7741 /*
7742 * Packets have drained. Stop transmitter, reset
7743 * FIFO pointers, restart transmitter, and kick
7744 * the packet queue.
7745 */
7746 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7747 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7748 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7749 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7750 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7751 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7752 CSR_WRITE(sc, WMREG_TCTL, tctl);
7753 CSR_WRITE_FLUSH(sc);
7754
7755 txq->txq_fifo_head = 0;
7756 txq->txq_fifo_stall = 0;
7757 wm_start_locked(&sc->sc_ethercom.ec_if);
7758 } else {
7759 /*
7760 * Still waiting for packets to drain; try again in
7761 * another tick.
7762 */
7763 callout_schedule(&sc->sc_txfifo_ch, 1);
7764 }
7765 }
7766
7767 out:
7768 mutex_exit(txq->txq_lock);
7769 }
7770
7771 /*
7772 * wm_82547_txfifo_bugchk:
7773 *
7774 * Check for bug condition in the 82547 Tx FIFO. We need to
7775 * prevent enqueueing a packet that would wrap around the end
7776 * if the Tx FIFO ring buffer, otherwise the chip will croak.
7777 *
7778 * We do this by checking the amount of space before the end
7779 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
7780 * the Tx FIFO, wait for all remaining packets to drain, reset
7781 * the internal FIFO pointers to the beginning, and restart
7782 * transmission on the interface.
7783 */
7784 #define WM_FIFO_HDR 0x10
7785 #define WM_82547_PAD_LEN 0x3e0
7786 static int
7787 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7788 {
7789 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7790 int space = txq->txq_fifo_size - txq->txq_fifo_head;
7791 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7792
7793 /* Just return if already stalled. */
7794 if (txq->txq_fifo_stall)
7795 return 1;
7796
7797 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7798 /* Stall only occurs in half-duplex mode. */
7799 goto send_packet;
7800 }
7801
7802 if (len >= WM_82547_PAD_LEN + space) {
7803 txq->txq_fifo_stall = 1;
7804 callout_schedule(&sc->sc_txfifo_ch, 1);
7805 return 1;
7806 }
7807
7808 send_packet:
7809 txq->txq_fifo_head += len;
7810 if (txq->txq_fifo_head >= txq->txq_fifo_size)
7811 txq->txq_fifo_head -= txq->txq_fifo_size;
7812
7813 return 0;
7814 }
7815
7816 static int
7817 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7818 {
7819 int error;
7820
7821 /*
7822 * Allocate the control data structures, and create and load the
7823 * DMA map for it.
7824 *
7825 * NOTE: All Tx descriptors must be in the same 4G segment of
7826 * memory. So must Rx descriptors. We simplify by allocating
7827 * both sets within the same 4G segment.
7828 */
7829 if (sc->sc_type < WM_T_82544)
7830 WM_NTXDESC(txq) = WM_NTXDESC_82542;
7831 else
7832 WM_NTXDESC(txq) = WM_NTXDESC_82544;
7833 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7834 txq->txq_descsize = sizeof(nq_txdesc_t);
7835 else
7836 txq->txq_descsize = sizeof(wiseman_txdesc_t);
7837
7838 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7839 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7840 1, &txq->txq_desc_rseg, 0)) != 0) {
7841 aprint_error_dev(sc->sc_dev,
7842 "unable to allocate TX control data, error = %d\n",
7843 error);
7844 goto fail_0;
7845 }
7846
7847 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7848 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7849 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7850 aprint_error_dev(sc->sc_dev,
7851 "unable to map TX control data, error = %d\n", error);
7852 goto fail_1;
7853 }
7854
7855 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7856 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7857 aprint_error_dev(sc->sc_dev,
7858 "unable to create TX control data DMA map, error = %d\n",
7859 error);
7860 goto fail_2;
7861 }
7862
7863 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7864 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7865 aprint_error_dev(sc->sc_dev,
7866 "unable to load TX control data DMA map, error = %d\n",
7867 error);
7868 goto fail_3;
7869 }
7870
7871 return 0;
7872
7873 fail_3:
7874 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7875 fail_2:
7876 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7877 WM_TXDESCS_SIZE(txq));
7878 fail_1:
7879 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7880 fail_0:
7881 return error;
7882 }
7883
7884 static void
7885 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7886 {
7887
7888 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7889 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7890 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7891 WM_TXDESCS_SIZE(txq));
7892 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7893 }
7894
7895 static int
7896 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7897 {
7898 int error;
7899 size_t rxq_descs_size;
7900
7901 /*
7902 * Allocate the control data structures, and create and load the
7903 * DMA map for it.
7904 *
7905 * NOTE: All Tx descriptors must be in the same 4G segment of
7906 * memory. So must Rx descriptors. We simplify by allocating
7907 * both sets within the same 4G segment.
7908 */
7909 rxq->rxq_ndesc = WM_NRXDESC;
7910 if (sc->sc_type == WM_T_82574)
7911 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7912 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7913 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7914 else
7915 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7916 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7917
7918 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7919 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7920 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7921 aprint_error_dev(sc->sc_dev,
7922 "unable to allocate RX control data, error = %d\n",
7923 error);
7924 goto fail_0;
7925 }
7926
7927 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7928 rxq->rxq_desc_rseg, rxq_descs_size,
7929 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7930 aprint_error_dev(sc->sc_dev,
7931 "unable to map RX control data, error = %d\n", error);
7932 goto fail_1;
7933 }
7934
7935 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7936 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7937 aprint_error_dev(sc->sc_dev,
7938 "unable to create RX control data DMA map, error = %d\n",
7939 error);
7940 goto fail_2;
7941 }
7942
7943 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7944 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7945 aprint_error_dev(sc->sc_dev,
7946 "unable to load RX control data DMA map, error = %d\n",
7947 error);
7948 goto fail_3;
7949 }
7950
7951 return 0;
7952
7953 fail_3:
7954 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7955 fail_2:
7956 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7957 rxq_descs_size);
7958 fail_1:
7959 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7960 fail_0:
7961 return error;
7962 }
7963
7964 static void
7965 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7966 {
7967
7968 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7969 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7970 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7971 rxq->rxq_descsize * rxq->rxq_ndesc);
7972 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7973 }
7974
7975
7976 static int
7977 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7978 {
7979 int i, error;
7980
7981 /* Create the transmit buffer DMA maps. */
7982 WM_TXQUEUELEN(txq) =
7983 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7984 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7985 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7986 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7987 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7988 &txq->txq_soft[i].txs_dmamap)) != 0) {
7989 aprint_error_dev(sc->sc_dev,
7990 "unable to create Tx DMA map %d, error = %d\n",
7991 i, error);
7992 goto fail;
7993 }
7994 }
7995
7996 return 0;
7997
7998 fail:
7999 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8000 if (txq->txq_soft[i].txs_dmamap != NULL)
8001 bus_dmamap_destroy(sc->sc_dmat,
8002 txq->txq_soft[i].txs_dmamap);
8003 }
8004 return error;
8005 }
8006
8007 static void
8008 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
8009 {
8010 int i;
8011
8012 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8013 if (txq->txq_soft[i].txs_dmamap != NULL)
8014 bus_dmamap_destroy(sc->sc_dmat,
8015 txq->txq_soft[i].txs_dmamap);
8016 }
8017 }
8018
8019 static int
8020 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8021 {
8022 int i, error;
8023
8024 /* Create the receive buffer DMA maps. */
8025 for (i = 0; i < rxq->rxq_ndesc; i++) {
8026 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
8027 MCLBYTES, 0, 0,
8028 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
8029 aprint_error_dev(sc->sc_dev,
8030 "unable to create Rx DMA map %d error = %d\n",
8031 i, error);
8032 goto fail;
8033 }
8034 rxq->rxq_soft[i].rxs_mbuf = NULL;
8035 }
8036
8037 return 0;
8038
8039 fail:
8040 for (i = 0; i < rxq->rxq_ndesc; i++) {
8041 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
8042 bus_dmamap_destroy(sc->sc_dmat,
8043 rxq->rxq_soft[i].rxs_dmamap);
8044 }
8045 return error;
8046 }
8047
8048 static void
8049 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8050 {
8051 int i;
8052
8053 for (i = 0; i < rxq->rxq_ndesc; i++) {
8054 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
8055 bus_dmamap_destroy(sc->sc_dmat,
8056 rxq->rxq_soft[i].rxs_dmamap);
8057 }
8058 }
8059
8060 /*
8061 * wm_alloc_quques:
8062 * Allocate {tx,rx}descs and {tx,rx} buffers
8063 */
8064 static int
8065 wm_alloc_txrx_queues(struct wm_softc *sc)
8066 {
8067 int i, error, tx_done, rx_done;
8068
8069 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
8070 KM_SLEEP);
8071 if (sc->sc_queue == NULL) {
8072 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
8073 error = ENOMEM;
8074 goto fail_0;
8075 }
8076
8077 /* For transmission */
8078 error = 0;
8079 tx_done = 0;
8080 for (i = 0; i < sc->sc_nqueues; i++) {
8081 #ifdef WM_EVENT_COUNTERS
8082 int j;
8083 const char *xname;
8084 #endif
8085 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8086 txq->txq_sc = sc;
8087 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8088
8089 error = wm_alloc_tx_descs(sc, txq);
8090 if (error)
8091 break;
8092 error = wm_alloc_tx_buffer(sc, txq);
8093 if (error) {
8094 wm_free_tx_descs(sc, txq);
8095 break;
8096 }
8097 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
8098 if (txq->txq_interq == NULL) {
8099 wm_free_tx_descs(sc, txq);
8100 wm_free_tx_buffer(sc, txq);
8101 error = ENOMEM;
8102 break;
8103 }
8104
8105 #ifdef WM_EVENT_COUNTERS
8106 xname = device_xname(sc->sc_dev);
8107
8108 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
8109 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
8110 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
8111 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
8112 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
8113 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
8114 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
8115 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
8116 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
8117 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
8118 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
8119
8120 for (j = 0; j < WM_NTXSEGS; j++) {
8121 snprintf(txq->txq_txseg_evcnt_names[j],
8122 sizeof(txq->txq_txseg_evcnt_names[j]),
8123 "txq%02dtxseg%d", i, j);
8124 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
8125 EVCNT_TYPE_MISC,
8126 NULL, xname, txq->txq_txseg_evcnt_names[j]);
8127 }
8128
8129 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
8130 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
8131 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
8132 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
8133 /* Only for 82544 (and earlier?) */
8134 if (sc->sc_type <= WM_T_82544)
8135 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
8136 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
8137 #endif /* WM_EVENT_COUNTERS */
8138
8139 tx_done++;
8140 }
8141 if (error)
8142 goto fail_1;
8143
8144 /* For receive */
8145 error = 0;
8146 rx_done = 0;
8147 for (i = 0; i < sc->sc_nqueues; i++) {
8148 #ifdef WM_EVENT_COUNTERS
8149 const char *xname;
8150 #endif
8151 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8152 rxq->rxq_sc = sc;
8153 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8154
8155 error = wm_alloc_rx_descs(sc, rxq);
8156 if (error)
8157 break;
8158
8159 error = wm_alloc_rx_buffer(sc, rxq);
8160 if (error) {
8161 wm_free_rx_descs(sc, rxq);
8162 break;
8163 }
8164
8165 #ifdef WM_EVENT_COUNTERS
8166 xname = device_xname(sc->sc_dev);
8167
8168 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
8169 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
8170
8171 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
8172 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
8173 #endif /* WM_EVENT_COUNTERS */
8174
8175 rx_done++;
8176 }
8177 if (error)
8178 goto fail_2;
8179
8180 return 0;
8181
8182 fail_2:
8183 for (i = 0; i < rx_done; i++) {
8184 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8185 wm_free_rx_buffer(sc, rxq);
8186 wm_free_rx_descs(sc, rxq);
8187 if (rxq->rxq_lock)
8188 mutex_obj_free(rxq->rxq_lock);
8189 }
8190 fail_1:
8191 for (i = 0; i < tx_done; i++) {
8192 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8193 pcq_destroy(txq->txq_interq);
8194 wm_free_tx_buffer(sc, txq);
8195 wm_free_tx_descs(sc, txq);
8196 if (txq->txq_lock)
8197 mutex_obj_free(txq->txq_lock);
8198 }
8199
8200 kmem_free(sc->sc_queue,
8201 sizeof(struct wm_queue) * sc->sc_nqueues);
8202 fail_0:
8203 return error;
8204 }
8205
8206 /*
8207 * wm_free_quques:
8208 * Free {tx,rx}descs and {tx,rx} buffers
8209 */
8210 static void
8211 wm_free_txrx_queues(struct wm_softc *sc)
8212 {
8213 int i;
8214
8215 for (i = 0; i < sc->sc_nqueues; i++) {
8216 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8217
8218 #ifdef WM_EVENT_COUNTERS
8219 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
8220 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
8221 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
8222 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
8223 #endif /* WM_EVENT_COUNTERS */
8224
8225 wm_free_rx_buffer(sc, rxq);
8226 wm_free_rx_descs(sc, rxq);
8227 if (rxq->rxq_lock)
8228 mutex_obj_free(rxq->rxq_lock);
8229 }
8230
8231 for (i = 0; i < sc->sc_nqueues; i++) {
8232 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8233 struct mbuf *m;
8234 #ifdef WM_EVENT_COUNTERS
8235 int j;
8236
8237 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
8238 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
8239 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
8240 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
8241 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
8242 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
8243 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
8244 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
8245 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
8246 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
8247 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
8248
8249 for (j = 0; j < WM_NTXSEGS; j++)
8250 evcnt_detach(&txq->txq_ev_txseg[j]);
8251
8252 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
8253 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
8254 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
8255 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
8256 if (sc->sc_type <= WM_T_82544)
8257 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
8258 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
8259 #endif /* WM_EVENT_COUNTERS */
8260
8261 /* Drain txq_interq */
8262 while ((m = pcq_get(txq->txq_interq)) != NULL)
8263 m_freem(m);
8264 pcq_destroy(txq->txq_interq);
8265
8266 wm_free_tx_buffer(sc, txq);
8267 wm_free_tx_descs(sc, txq);
8268 if (txq->txq_lock)
8269 mutex_obj_free(txq->txq_lock);
8270 }
8271
8272 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
8273 }
8274
8275 static void
8276 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8277 {
8278
8279 KASSERT(mutex_owned(txq->txq_lock));
8280
8281 /* Initialize the transmit descriptor ring. */
8282 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
8283 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
8284 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8285 txq->txq_free = WM_NTXDESC(txq);
8286 txq->txq_next = 0;
8287 }
8288
8289 static void
8290 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8291 struct wm_txqueue *txq)
8292 {
8293
8294 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8295 device_xname(sc->sc_dev), __func__));
8296 KASSERT(mutex_owned(txq->txq_lock));
8297
8298 if (sc->sc_type < WM_T_82543) {
8299 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
8300 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
8301 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
8302 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
8303 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
8304 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
8305 } else {
8306 int qid = wmq->wmq_id;
8307
8308 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
8309 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
8310 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
8311 CSR_WRITE(sc, WMREG_TDH(qid), 0);
8312
8313 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8314 /*
8315 * Don't write TDT before TCTL.EN is set.
8316 * See the document.
8317 */
8318 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
8319 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
8320 | TXDCTL_WTHRESH(0));
8321 else {
8322 /* XXX should update with AIM? */
8323 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
8324 if (sc->sc_type >= WM_T_82540) {
8325 /* Should be the same */
8326 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
8327 }
8328
8329 CSR_WRITE(sc, WMREG_TDT(qid), 0);
8330 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
8331 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
8332 }
8333 }
8334 }
8335
8336 static void
8337 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8338 {
8339 int i;
8340
8341 KASSERT(mutex_owned(txq->txq_lock));
8342
8343 /* Initialize the transmit job descriptors. */
8344 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
8345 txq->txq_soft[i].txs_mbuf = NULL;
8346 txq->txq_sfree = WM_TXQUEUELEN(txq);
8347 txq->txq_snext = 0;
8348 txq->txq_sdirty = 0;
8349 }
8350
8351 static void
8352 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8353 struct wm_txqueue *txq)
8354 {
8355
8356 KASSERT(mutex_owned(txq->txq_lock));
8357
8358 /*
8359 * Set up some register offsets that are different between
8360 * the i82542 and the i82543 and later chips.
8361 */
8362 if (sc->sc_type < WM_T_82543)
8363 txq->txq_tdt_reg = WMREG_OLD_TDT;
8364 else
8365 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
8366
8367 wm_init_tx_descs(sc, txq);
8368 wm_init_tx_regs(sc, wmq, txq);
8369 wm_init_tx_buffer(sc, txq);
8370
8371 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */
8372 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
8373
8374 txq->txq_sending = false;
8375 }
8376
8377 static void
8378 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8379 struct wm_rxqueue *rxq)
8380 {
8381
8382 KASSERT(mutex_owned(rxq->rxq_lock));
8383
8384 /*
8385 * Initialize the receive descriptor and receive job
8386 * descriptor rings.
8387 */
8388 if (sc->sc_type < WM_T_82543) {
8389 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
8390 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
8391 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
8392 rxq->rxq_descsize * rxq->rxq_ndesc);
8393 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
8394 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
8395 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
8396
8397 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
8398 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
8399 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
8400 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
8401 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
8402 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
8403 } else {
8404 int qid = wmq->wmq_id;
8405
8406 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
8407 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
8408 CSR_WRITE(sc, WMREG_RDLEN(qid),
8409 rxq->rxq_descsize * rxq->rxq_ndesc);
8410
8411 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8412 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
8413 panic("%s: MCLBYTES %d unsupported for 82575 "
8414 "or higher\n", __func__, MCLBYTES);
8415
8416 /*
8417 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
8418 * only.
8419 */
8420 CSR_WRITE(sc, WMREG_SRRCTL(qid),
8421 SRRCTL_DESCTYPE_ADV_ONEBUF
8422 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
8423 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
8424 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
8425 | RXDCTL_WTHRESH(1));
8426 CSR_WRITE(sc, WMREG_RDH(qid), 0);
8427 CSR_WRITE(sc, WMREG_RDT(qid), 0);
8428 } else {
8429 CSR_WRITE(sc, WMREG_RDH(qid), 0);
8430 CSR_WRITE(sc, WMREG_RDT(qid), 0);
8431 /* XXX should update with AIM? */
8432 CSR_WRITE(sc, WMREG_RDTR,
8433 (wmq->wmq_itr / 4) | RDTR_FPD);
8434 /* MUST be same */
8435 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
8436 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
8437 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
8438 }
8439 }
8440 }
8441
8442 static int
8443 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8444 {
8445 struct wm_rxsoft *rxs;
8446 int error, i;
8447
8448 KASSERT(mutex_owned(rxq->rxq_lock));
8449
8450 for (i = 0; i < rxq->rxq_ndesc; i++) {
8451 rxs = &rxq->rxq_soft[i];
8452 if (rxs->rxs_mbuf == NULL) {
8453 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8454 log(LOG_ERR, "%s: unable to allocate or map "
8455 "rx buffer %d, error = %d\n",
8456 device_xname(sc->sc_dev), i, error);
8457 /*
8458 * XXX Should attempt to run with fewer receive
8459 * XXX buffers instead of just failing.
8460 */
8461 wm_rxdrain(rxq);
8462 return ENOMEM;
8463 }
8464 } else {
8465 /*
8466 * For 82575 and 82576, the RX descriptors must be
8467 * initialized after the setting of RCTL.EN in
8468 * wm_set_filter()
8469 */
8470 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8471 wm_init_rxdesc(rxq, i);
8472 }
8473 }
8474 rxq->rxq_ptr = 0;
8475 rxq->rxq_discard = 0;
8476 WM_RXCHAIN_RESET(rxq);
8477
8478 return 0;
8479 }
8480
8481 static int
8482 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8483 struct wm_rxqueue *rxq)
8484 {
8485
8486 KASSERT(mutex_owned(rxq->rxq_lock));
8487
8488 /*
8489 * Set up some register offsets that are different between
8490 * the i82542 and the i82543 and later chips.
8491 */
8492 if (sc->sc_type < WM_T_82543)
8493 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8494 else
8495 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8496
8497 wm_init_rx_regs(sc, wmq, rxq);
8498 return wm_init_rx_buffer(sc, rxq);
8499 }
8500
8501 /*
8502 * wm_init_quques:
8503 * Initialize {tx,rx}descs and {tx,rx} buffers
8504 */
8505 static int
8506 wm_init_txrx_queues(struct wm_softc *sc)
8507 {
8508 int i, error = 0;
8509
8510 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8511 device_xname(sc->sc_dev), __func__));
8512
8513 for (i = 0; i < sc->sc_nqueues; i++) {
8514 struct wm_queue *wmq = &sc->sc_queue[i];
8515 struct wm_txqueue *txq = &wmq->wmq_txq;
8516 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8517
8518 /*
8519 * TODO
8520 * Currently, use constant variable instead of AIM.
8521 * Furthermore, the interrupt interval of multiqueue which use
8522 * polling mode is less than default value.
8523 * More tuning and AIM are required.
8524 */
8525 if (wm_is_using_multiqueue(sc))
8526 wmq->wmq_itr = 50;
8527 else
8528 wmq->wmq_itr = sc->sc_itr_init;
8529 wmq->wmq_set_itr = true;
8530
8531 mutex_enter(txq->txq_lock);
8532 wm_init_tx_queue(sc, wmq, txq);
8533 mutex_exit(txq->txq_lock);
8534
8535 mutex_enter(rxq->rxq_lock);
8536 error = wm_init_rx_queue(sc, wmq, rxq);
8537 mutex_exit(rxq->rxq_lock);
8538 if (error)
8539 break;
8540 }
8541
8542 return error;
8543 }
8544
8545 /*
8546 * wm_tx_offload:
8547 *
8548 * Set up TCP/IP checksumming parameters for the
8549 * specified packet.
8550 */
8551 static void
8552 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8553 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8554 {
8555 struct mbuf *m0 = txs->txs_mbuf;
8556 struct livengood_tcpip_ctxdesc *t;
8557 uint32_t ipcs, tucs, cmd, cmdlen, seg;
8558 uint32_t ipcse;
8559 struct ether_header *eh;
8560 int offset, iphl;
8561 uint8_t fields;
8562
8563 /*
8564 * XXX It would be nice if the mbuf pkthdr had offset
8565 * fields for the protocol headers.
8566 */
8567
8568 eh = mtod(m0, struct ether_header *);
8569 switch (htons(eh->ether_type)) {
8570 case ETHERTYPE_IP:
8571 case ETHERTYPE_IPV6:
8572 offset = ETHER_HDR_LEN;
8573 break;
8574
8575 case ETHERTYPE_VLAN:
8576 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8577 break;
8578
8579 default:
8580 /* Don't support this protocol or encapsulation. */
8581 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8582 txq->txq_last_hw_ipcs = 0;
8583 txq->txq_last_hw_tucs = 0;
8584 *fieldsp = 0;
8585 *cmdp = 0;
8586 return;
8587 }
8588
8589 if ((m0->m_pkthdr.csum_flags &
8590 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8591 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8592 } else
8593 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8594
8595 ipcse = offset + iphl - 1;
8596
8597 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8598 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8599 seg = 0;
8600 fields = 0;
8601
8602 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8603 int hlen = offset + iphl;
8604 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8605
8606 if (__predict_false(m0->m_len <
8607 (hlen + sizeof(struct tcphdr)))) {
8608 /*
8609 * TCP/IP headers are not in the first mbuf; we need
8610 * to do this the slow and painful way. Let's just
8611 * hope this doesn't happen very often.
8612 */
8613 struct tcphdr th;
8614
8615 WM_Q_EVCNT_INCR(txq, tsopain);
8616
8617 m_copydata(m0, hlen, sizeof(th), &th);
8618 if (v4) {
8619 struct ip ip;
8620
8621 m_copydata(m0, offset, sizeof(ip), &ip);
8622 ip.ip_len = 0;
8623 m_copyback(m0,
8624 offset + offsetof(struct ip, ip_len),
8625 sizeof(ip.ip_len), &ip.ip_len);
8626 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8627 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8628 } else {
8629 struct ip6_hdr ip6;
8630
8631 m_copydata(m0, offset, sizeof(ip6), &ip6);
8632 ip6.ip6_plen = 0;
8633 m_copyback(m0,
8634 offset + offsetof(struct ip6_hdr, ip6_plen),
8635 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8636 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8637 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8638 }
8639 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8640 sizeof(th.th_sum), &th.th_sum);
8641
8642 hlen += th.th_off << 2;
8643 } else {
8644 /*
8645 * TCP/IP headers are in the first mbuf; we can do
8646 * this the easy way.
8647 */
8648 struct tcphdr *th;
8649
8650 if (v4) {
8651 struct ip *ip =
8652 (void *)(mtod(m0, char *) + offset);
8653 th = (void *)(mtod(m0, char *) + hlen);
8654
8655 ip->ip_len = 0;
8656 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8657 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8658 } else {
8659 struct ip6_hdr *ip6 =
8660 (void *)(mtod(m0, char *) + offset);
8661 th = (void *)(mtod(m0, char *) + hlen);
8662
8663 ip6->ip6_plen = 0;
8664 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8665 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8666 }
8667 hlen += th->th_off << 2;
8668 }
8669
8670 if (v4) {
8671 WM_Q_EVCNT_INCR(txq, tso);
8672 cmdlen |= WTX_TCPIP_CMD_IP;
8673 } else {
8674 WM_Q_EVCNT_INCR(txq, tso6);
8675 ipcse = 0;
8676 }
8677 cmd |= WTX_TCPIP_CMD_TSE;
8678 cmdlen |= WTX_TCPIP_CMD_TSE |
8679 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8680 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8681 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8682 }
8683
8684 /*
8685 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8686 * offload feature, if we load the context descriptor, we
8687 * MUST provide valid values for IPCSS and TUCSS fields.
8688 */
8689
8690 ipcs = WTX_TCPIP_IPCSS(offset) |
8691 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8692 WTX_TCPIP_IPCSE(ipcse);
8693 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8694 WM_Q_EVCNT_INCR(txq, ipsum);
8695 fields |= WTX_IXSM;
8696 }
8697
8698 offset += iphl;
8699
8700 if (m0->m_pkthdr.csum_flags &
8701 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8702 WM_Q_EVCNT_INCR(txq, tusum);
8703 fields |= WTX_TXSM;
8704 tucs = WTX_TCPIP_TUCSS(offset) |
8705 WTX_TCPIP_TUCSO(offset +
8706 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8707 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8708 } else if ((m0->m_pkthdr.csum_flags &
8709 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8710 WM_Q_EVCNT_INCR(txq, tusum6);
8711 fields |= WTX_TXSM;
8712 tucs = WTX_TCPIP_TUCSS(offset) |
8713 WTX_TCPIP_TUCSO(offset +
8714 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8715 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8716 } else {
8717 /* Just initialize it to a valid TCP context. */
8718 tucs = WTX_TCPIP_TUCSS(offset) |
8719 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8720 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8721 }
8722
8723 *cmdp = cmd;
8724 *fieldsp = fields;
8725
8726 /*
8727 * We don't have to write context descriptor for every packet
8728 * except for 82574. For 82574, we must write context descriptor
8729 * for every packet when we use two descriptor queues.
8730 *
8731 * The 82574L can only remember the *last* context used
8732 * regardless of queue that it was use for. We cannot reuse
8733 * contexts on this hardware platform and must generate a new
8734 * context every time. 82574L hardware spec, section 7.2.6,
8735 * second note.
8736 */
8737 if (sc->sc_nqueues < 2) {
8738 /*
8739 * Setting up new checksum offload context for every
8740 * frames takes a lot of processing time for hardware.
8741 * This also reduces performance a lot for small sized
8742 * frames so avoid it if driver can use previously
8743 * configured checksum offload context.
8744 * For TSO, in theory we can use the same TSO context only if
8745 * frame is the same type(IP/TCP) and the same MSS. However
8746 * checking whether a frame has the same IP/TCP structure is a
8747 * hard thing so just ignore that and always restablish a
8748 * new TSO context.
8749 */
8750 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8751 == 0) {
8752 if (txq->txq_last_hw_cmd == cmd &&
8753 txq->txq_last_hw_fields == fields &&
8754 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8755 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8756 WM_Q_EVCNT_INCR(txq, skipcontext);
8757 return;
8758 }
8759 }
8760
8761 txq->txq_last_hw_cmd = cmd;
8762 txq->txq_last_hw_fields = fields;
8763 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8764 txq->txq_last_hw_tucs = (tucs & 0xffff);
8765 }
8766
8767 /* Fill in the context descriptor. */
8768 t = (struct livengood_tcpip_ctxdesc *)
8769 &txq->txq_descs[txq->txq_next];
8770 t->tcpip_ipcs = htole32(ipcs);
8771 t->tcpip_tucs = htole32(tucs);
8772 t->tcpip_cmdlen = htole32(cmdlen);
8773 t->tcpip_seg = htole32(seg);
8774 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8775
8776 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8777 txs->txs_ndesc++;
8778 }
8779
8780 static inline int
8781 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8782 {
8783 struct wm_softc *sc = ifp->if_softc;
8784 u_int cpuid = cpu_index(curcpu());
8785
8786 /*
8787 * Currently, simple distribute strategy.
8788 * TODO:
8789 * distribute by flowid(RSS has value).
8790 */
8791 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8792 }
8793
8794 static inline bool
8795 wm_linkdown_discard(struct wm_txqueue *txq)
8796 {
8797
8798 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8799 return true;
8800
8801 return false;
8802 }
8803
8804 /*
8805 * wm_start: [ifnet interface function]
8806 *
8807 * Start packet transmission on the interface.
8808 */
8809 static void
8810 wm_start(struct ifnet *ifp)
8811 {
8812 struct wm_softc *sc = ifp->if_softc;
8813 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8814
8815 KASSERT(if_is_mpsafe(ifp));
8816 /*
8817 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8818 */
8819
8820 mutex_enter(txq->txq_lock);
8821 if (!txq->txq_stopping)
8822 wm_start_locked(ifp);
8823 mutex_exit(txq->txq_lock);
8824 }
8825
8826 static void
8827 wm_start_locked(struct ifnet *ifp)
8828 {
8829 struct wm_softc *sc = ifp->if_softc;
8830 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8831
8832 wm_send_common_locked(ifp, txq, false);
8833 }
8834
8835 static int
8836 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8837 {
8838 int qid;
8839 struct wm_softc *sc = ifp->if_softc;
8840 struct wm_txqueue *txq;
8841
8842 qid = wm_select_txqueue(ifp, m);
8843 txq = &sc->sc_queue[qid].wmq_txq;
8844
8845 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8846 m_freem(m);
8847 WM_Q_EVCNT_INCR(txq, pcqdrop);
8848 return ENOBUFS;
8849 }
8850
8851 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8852 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8853 if (m->m_flags & M_MCAST)
8854 if_statinc_ref(nsr, if_omcasts);
8855 IF_STAT_PUTREF(ifp);
8856
8857 if (mutex_tryenter(txq->txq_lock)) {
8858 if (!txq->txq_stopping)
8859 wm_transmit_locked(ifp, txq);
8860 mutex_exit(txq->txq_lock);
8861 }
8862
8863 return 0;
8864 }
8865
8866 static void
8867 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8868 {
8869
8870 wm_send_common_locked(ifp, txq, true);
8871 }
8872
8873 static void
8874 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8875 bool is_transmit)
8876 {
8877 struct wm_softc *sc = ifp->if_softc;
8878 struct mbuf *m0;
8879 struct wm_txsoft *txs;
8880 bus_dmamap_t dmamap;
8881 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8882 bus_addr_t curaddr;
8883 bus_size_t seglen, curlen;
8884 uint32_t cksumcmd;
8885 uint8_t cksumfields;
8886 bool remap = true;
8887
8888 KASSERT(mutex_owned(txq->txq_lock));
8889 KASSERT(!txq->txq_stopping);
8890
8891 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8892 return;
8893
8894 if (__predict_false(wm_linkdown_discard(txq))) {
8895 do {
8896 if (is_transmit)
8897 m0 = pcq_get(txq->txq_interq);
8898 else
8899 IFQ_DEQUEUE(&ifp->if_snd, m0);
8900 /*
8901 * increment successed packet counter as in the case
8902 * which the packet is discarded by link down PHY.
8903 */
8904 if (m0 != NULL) {
8905 if_statinc(ifp, if_opackets);
8906 m_freem(m0);
8907 }
8908 } while (m0 != NULL);
8909 return;
8910 }
8911
8912 /* Remember the previous number of free descriptors. */
8913 ofree = txq->txq_free;
8914
8915 /*
8916 * Loop through the send queue, setting up transmit descriptors
8917 * until we drain the queue, or use up all available transmit
8918 * descriptors.
8919 */
8920 for (;;) {
8921 m0 = NULL;
8922
8923 /* Get a work queue entry. */
8924 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8925 wm_txeof(txq, UINT_MAX);
8926 if (txq->txq_sfree == 0) {
8927 DPRINTF(sc, WM_DEBUG_TX,
8928 ("%s: TX: no free job descriptors\n",
8929 device_xname(sc->sc_dev)));
8930 WM_Q_EVCNT_INCR(txq, txsstall);
8931 break;
8932 }
8933 }
8934
8935 /* Grab a packet off the queue. */
8936 if (is_transmit)
8937 m0 = pcq_get(txq->txq_interq);
8938 else
8939 IFQ_DEQUEUE(&ifp->if_snd, m0);
8940 if (m0 == NULL)
8941 break;
8942
8943 DPRINTF(sc, WM_DEBUG_TX,
8944 ("%s: TX: have packet to transmit: %p\n",
8945 device_xname(sc->sc_dev), m0));
8946
8947 txs = &txq->txq_soft[txq->txq_snext];
8948 dmamap = txs->txs_dmamap;
8949
8950 use_tso = (m0->m_pkthdr.csum_flags &
8951 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8952
8953 /*
8954 * So says the Linux driver:
8955 * The controller does a simple calculation to make sure
8956 * there is enough room in the FIFO before initiating the
8957 * DMA for each buffer. The calc is:
8958 * 4 = ceil(buffer len / MSS)
8959 * To make sure we don't overrun the FIFO, adjust the max
8960 * buffer len if the MSS drops.
8961 */
8962 dmamap->dm_maxsegsz =
8963 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8964 ? m0->m_pkthdr.segsz << 2
8965 : WTX_MAX_LEN;
8966
8967 /*
8968 * Load the DMA map. If this fails, the packet either
8969 * didn't fit in the allotted number of segments, or we
8970 * were short on resources. For the too-many-segments
8971 * case, we simply report an error and drop the packet,
8972 * since we can't sanely copy a jumbo packet to a single
8973 * buffer.
8974 */
8975 retry:
8976 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8977 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8978 if (__predict_false(error)) {
8979 if (error == EFBIG) {
8980 if (remap == true) {
8981 struct mbuf *m;
8982
8983 remap = false;
8984 m = m_defrag(m0, M_NOWAIT);
8985 if (m != NULL) {
8986 WM_Q_EVCNT_INCR(txq, defrag);
8987 m0 = m;
8988 goto retry;
8989 }
8990 }
8991 WM_Q_EVCNT_INCR(txq, toomanyseg);
8992 log(LOG_ERR, "%s: Tx packet consumes too many "
8993 "DMA segments, dropping...\n",
8994 device_xname(sc->sc_dev));
8995 wm_dump_mbuf_chain(sc, m0);
8996 m_freem(m0);
8997 continue;
8998 }
8999 /* Short on resources, just stop for now. */
9000 DPRINTF(sc, WM_DEBUG_TX,
9001 ("%s: TX: dmamap load failed: %d\n",
9002 device_xname(sc->sc_dev), error));
9003 break;
9004 }
9005
9006 segs_needed = dmamap->dm_nsegs;
9007 if (use_tso) {
9008 /* For sentinel descriptor; see below. */
9009 segs_needed++;
9010 }
9011
9012 /*
9013 * Ensure we have enough descriptors free to describe
9014 * the packet. Note, we always reserve one descriptor
9015 * at the end of the ring due to the semantics of the
9016 * TDT register, plus one more in the event we need
9017 * to load offload context.
9018 */
9019 if (segs_needed > txq->txq_free - 2) {
9020 /*
9021 * Not enough free descriptors to transmit this
9022 * packet. We haven't committed anything yet,
9023 * so just unload the DMA map, put the packet
9024 * pack on the queue, and punt. Notify the upper
9025 * layer that there are no more slots left.
9026 */
9027 DPRINTF(sc, WM_DEBUG_TX,
9028 ("%s: TX: need %d (%d) descriptors, have %d\n",
9029 device_xname(sc->sc_dev), dmamap->dm_nsegs,
9030 segs_needed, txq->txq_free - 1));
9031 txq->txq_flags |= WM_TXQ_NO_SPACE;
9032 bus_dmamap_unload(sc->sc_dmat, dmamap);
9033 WM_Q_EVCNT_INCR(txq, txdstall);
9034 break;
9035 }
9036
9037 /*
9038 * Check for 82547 Tx FIFO bug. We need to do this
9039 * once we know we can transmit the packet, since we
9040 * do some internal FIFO space accounting here.
9041 */
9042 if (sc->sc_type == WM_T_82547 &&
9043 wm_82547_txfifo_bugchk(sc, m0)) {
9044 DPRINTF(sc, WM_DEBUG_TX,
9045 ("%s: TX: 82547 Tx FIFO bug detected\n",
9046 device_xname(sc->sc_dev)));
9047 txq->txq_flags |= WM_TXQ_NO_SPACE;
9048 bus_dmamap_unload(sc->sc_dmat, dmamap);
9049 WM_Q_EVCNT_INCR(txq, fifo_stall);
9050 break;
9051 }
9052
9053 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9054
9055 DPRINTF(sc, WM_DEBUG_TX,
9056 ("%s: TX: packet has %d (%d) DMA segments\n",
9057 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9058
9059 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9060
9061 /*
9062 * Store a pointer to the packet so that we can free it
9063 * later.
9064 *
9065 * Initially, we consider the number of descriptors the
9066 * packet uses the number of DMA segments. This may be
9067 * incremented by 1 if we do checksum offload (a descriptor
9068 * is used to set the checksum context).
9069 */
9070 txs->txs_mbuf = m0;
9071 txs->txs_firstdesc = txq->txq_next;
9072 txs->txs_ndesc = segs_needed;
9073
9074 /* Set up offload parameters for this packet. */
9075 if (m0->m_pkthdr.csum_flags &
9076 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9077 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9078 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9079 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
9080 } else {
9081 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
9082 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
9083 cksumcmd = 0;
9084 cksumfields = 0;
9085 }
9086
9087 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
9088
9089 /* Sync the DMA map. */
9090 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9091 BUS_DMASYNC_PREWRITE);
9092
9093 /* Initialize the transmit descriptor. */
9094 for (nexttx = txq->txq_next, seg = 0;
9095 seg < dmamap->dm_nsegs; seg++) {
9096 for (seglen = dmamap->dm_segs[seg].ds_len,
9097 curaddr = dmamap->dm_segs[seg].ds_addr;
9098 seglen != 0;
9099 curaddr += curlen, seglen -= curlen,
9100 nexttx = WM_NEXTTX(txq, nexttx)) {
9101 curlen = seglen;
9102
9103 /*
9104 * So says the Linux driver:
9105 * Work around for premature descriptor
9106 * write-backs in TSO mode. Append a
9107 * 4-byte sentinel descriptor.
9108 */
9109 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
9110 curlen > 8)
9111 curlen -= 4;
9112
9113 wm_set_dma_addr(
9114 &txq->txq_descs[nexttx].wtx_addr, curaddr);
9115 txq->txq_descs[nexttx].wtx_cmdlen
9116 = htole32(cksumcmd | curlen);
9117 txq->txq_descs[nexttx].wtx_fields.wtxu_status
9118 = 0;
9119 txq->txq_descs[nexttx].wtx_fields.wtxu_options
9120 = cksumfields;
9121 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9122 lasttx = nexttx;
9123
9124 DPRINTF(sc, WM_DEBUG_TX,
9125 ("%s: TX: desc %d: low %#" PRIx64 ", "
9126 "len %#04zx\n",
9127 device_xname(sc->sc_dev), nexttx,
9128 (uint64_t)curaddr, curlen));
9129 }
9130 }
9131
9132 KASSERT(lasttx != -1);
9133
9134 /*
9135 * Set up the command byte on the last descriptor of
9136 * the packet. If we're in the interrupt delay window,
9137 * delay the interrupt.
9138 */
9139 txq->txq_descs[lasttx].wtx_cmdlen |=
9140 htole32(WTX_CMD_EOP | WTX_CMD_RS);
9141
9142 /*
9143 * If VLANs are enabled and the packet has a VLAN tag, set
9144 * up the descriptor to encapsulate the packet for us.
9145 *
9146 * This is only valid on the last descriptor of the packet.
9147 */
9148 if (vlan_has_tag(m0)) {
9149 txq->txq_descs[lasttx].wtx_cmdlen |=
9150 htole32(WTX_CMD_VLE);
9151 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
9152 = htole16(vlan_get_tag(m0));
9153 }
9154
9155 txs->txs_lastdesc = lasttx;
9156
9157 DPRINTF(sc, WM_DEBUG_TX,
9158 ("%s: TX: desc %d: cmdlen 0x%08x\n",
9159 device_xname(sc->sc_dev),
9160 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9161
9162 /* Sync the descriptors we're using. */
9163 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9164 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9165
9166 /* Give the packet to the chip. */
9167 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9168
9169 DPRINTF(sc, WM_DEBUG_TX,
9170 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9171
9172 DPRINTF(sc, WM_DEBUG_TX,
9173 ("%s: TX: finished transmitting packet, job %d\n",
9174 device_xname(sc->sc_dev), txq->txq_snext));
9175
9176 /* Advance the tx pointer. */
9177 txq->txq_free -= txs->txs_ndesc;
9178 txq->txq_next = nexttx;
9179
9180 txq->txq_sfree--;
9181 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9182
9183 /* Pass the packet to any BPF listeners. */
9184 bpf_mtap(ifp, m0, BPF_D_OUT);
9185 }
9186
9187 if (m0 != NULL) {
9188 txq->txq_flags |= WM_TXQ_NO_SPACE;
9189 WM_Q_EVCNT_INCR(txq, descdrop);
9190 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9191 __func__));
9192 m_freem(m0);
9193 }
9194
9195 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9196 /* No more slots; notify upper layer. */
9197 txq->txq_flags |= WM_TXQ_NO_SPACE;
9198 }
9199
9200 if (txq->txq_free != ofree) {
9201 /* Set a watchdog timer in case the chip flakes out. */
9202 txq->txq_lastsent = time_uptime;
9203 txq->txq_sending = true;
9204 }
9205 }
9206
9207 /*
9208 * wm_nq_tx_offload:
9209 *
9210 * Set up TCP/IP checksumming parameters for the
9211 * specified packet, for NEWQUEUE devices
9212 */
9213 static void
9214 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
9215 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
9216 {
9217 struct mbuf *m0 = txs->txs_mbuf;
9218 uint32_t vl_len, mssidx, cmdc;
9219 struct ether_header *eh;
9220 int offset, iphl;
9221
9222 /*
9223 * XXX It would be nice if the mbuf pkthdr had offset
9224 * fields for the protocol headers.
9225 */
9226 *cmdlenp = 0;
9227 *fieldsp = 0;
9228
9229 eh = mtod(m0, struct ether_header *);
9230 switch (htons(eh->ether_type)) {
9231 case ETHERTYPE_IP:
9232 case ETHERTYPE_IPV6:
9233 offset = ETHER_HDR_LEN;
9234 break;
9235
9236 case ETHERTYPE_VLAN:
9237 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
9238 break;
9239
9240 default:
9241 /* Don't support this protocol or encapsulation. */
9242 *do_csum = false;
9243 return;
9244 }
9245 *do_csum = true;
9246 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
9247 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
9248
9249 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
9250 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
9251
9252 if ((m0->m_pkthdr.csum_flags &
9253 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
9254 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
9255 } else {
9256 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
9257 }
9258 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
9259 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
9260
9261 if (vlan_has_tag(m0)) {
9262 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
9263 << NQTXC_VLLEN_VLAN_SHIFT);
9264 *cmdlenp |= NQTX_CMD_VLE;
9265 }
9266
9267 mssidx = 0;
9268
9269 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
9270 int hlen = offset + iphl;
9271 int tcp_hlen;
9272 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
9273
9274 if (__predict_false(m0->m_len <
9275 (hlen + sizeof(struct tcphdr)))) {
9276 /*
9277 * TCP/IP headers are not in the first mbuf; we need
9278 * to do this the slow and painful way. Let's just
9279 * hope this doesn't happen very often.
9280 */
9281 struct tcphdr th;
9282
9283 WM_Q_EVCNT_INCR(txq, tsopain);
9284
9285 m_copydata(m0, hlen, sizeof(th), &th);
9286 if (v4) {
9287 struct ip ip;
9288
9289 m_copydata(m0, offset, sizeof(ip), &ip);
9290 ip.ip_len = 0;
9291 m_copyback(m0,
9292 offset + offsetof(struct ip, ip_len),
9293 sizeof(ip.ip_len), &ip.ip_len);
9294 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
9295 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
9296 } else {
9297 struct ip6_hdr ip6;
9298
9299 m_copydata(m0, offset, sizeof(ip6), &ip6);
9300 ip6.ip6_plen = 0;
9301 m_copyback(m0,
9302 offset + offsetof(struct ip6_hdr, ip6_plen),
9303 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
9304 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
9305 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
9306 }
9307 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
9308 sizeof(th.th_sum), &th.th_sum);
9309
9310 tcp_hlen = th.th_off << 2;
9311 } else {
9312 /*
9313 * TCP/IP headers are in the first mbuf; we can do
9314 * this the easy way.
9315 */
9316 struct tcphdr *th;
9317
9318 if (v4) {
9319 struct ip *ip =
9320 (void *)(mtod(m0, char *) + offset);
9321 th = (void *)(mtod(m0, char *) + hlen);
9322
9323 ip->ip_len = 0;
9324 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
9325 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
9326 } else {
9327 struct ip6_hdr *ip6 =
9328 (void *)(mtod(m0, char *) + offset);
9329 th = (void *)(mtod(m0, char *) + hlen);
9330
9331 ip6->ip6_plen = 0;
9332 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
9333 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
9334 }
9335 tcp_hlen = th->th_off << 2;
9336 }
9337 hlen += tcp_hlen;
9338 *cmdlenp |= NQTX_CMD_TSE;
9339
9340 if (v4) {
9341 WM_Q_EVCNT_INCR(txq, tso);
9342 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
9343 } else {
9344 WM_Q_EVCNT_INCR(txq, tso6);
9345 *fieldsp |= NQTXD_FIELDS_TUXSM;
9346 }
9347 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
9348 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9349 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
9350 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
9351 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
9352 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
9353 } else {
9354 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
9355 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9356 }
9357
9358 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
9359 *fieldsp |= NQTXD_FIELDS_IXSM;
9360 cmdc |= NQTXC_CMD_IP4;
9361 }
9362
9363 if (m0->m_pkthdr.csum_flags &
9364 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
9365 WM_Q_EVCNT_INCR(txq, tusum);
9366 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
9367 cmdc |= NQTXC_CMD_TCP;
9368 else
9369 cmdc |= NQTXC_CMD_UDP;
9370
9371 cmdc |= NQTXC_CMD_IP4;
9372 *fieldsp |= NQTXD_FIELDS_TUXSM;
9373 }
9374 if (m0->m_pkthdr.csum_flags &
9375 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
9376 WM_Q_EVCNT_INCR(txq, tusum6);
9377 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
9378 cmdc |= NQTXC_CMD_TCP;
9379 else
9380 cmdc |= NQTXC_CMD_UDP;
9381
9382 cmdc |= NQTXC_CMD_IP6;
9383 *fieldsp |= NQTXD_FIELDS_TUXSM;
9384 }
9385
9386 /*
9387 * We don't have to write context descriptor for every packet to
9388 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
9389 * I210 and I211. It is enough to write once per a Tx queue for these
9390 * controllers.
9391 * It would be overhead to write context descriptor for every packet,
9392 * however it does not cause problems.
9393 */
9394 /* Fill in the context descriptor. */
9395 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
9396 htole32(vl_len);
9397 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
9398 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
9399 htole32(cmdc);
9400 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
9401 htole32(mssidx);
9402 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
9403 DPRINTF(sc, WM_DEBUG_TX,
9404 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
9405 txq->txq_next, 0, vl_len));
9406 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
9407 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
9408 txs->txs_ndesc++;
9409 }
9410
9411 /*
9412 * wm_nq_start: [ifnet interface function]
9413 *
9414 * Start packet transmission on the interface for NEWQUEUE devices
9415 */
9416 static void
9417 wm_nq_start(struct ifnet *ifp)
9418 {
9419 struct wm_softc *sc = ifp->if_softc;
9420 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9421
9422 KASSERT(if_is_mpsafe(ifp));
9423 /*
9424 * if_obytes and if_omcasts are added in if_transmit()@if.c.
9425 */
9426
9427 mutex_enter(txq->txq_lock);
9428 if (!txq->txq_stopping)
9429 wm_nq_start_locked(ifp);
9430 mutex_exit(txq->txq_lock);
9431 }
9432
9433 static void
9434 wm_nq_start_locked(struct ifnet *ifp)
9435 {
9436 struct wm_softc *sc = ifp->if_softc;
9437 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9438
9439 wm_nq_send_common_locked(ifp, txq, false);
9440 }
9441
9442 static int
9443 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
9444 {
9445 int qid;
9446 struct wm_softc *sc = ifp->if_softc;
9447 struct wm_txqueue *txq;
9448
9449 qid = wm_select_txqueue(ifp, m);
9450 txq = &sc->sc_queue[qid].wmq_txq;
9451
9452 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
9453 m_freem(m);
9454 WM_Q_EVCNT_INCR(txq, pcqdrop);
9455 return ENOBUFS;
9456 }
9457
9458 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
9459 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
9460 if (m->m_flags & M_MCAST)
9461 if_statinc_ref(nsr, if_omcasts);
9462 IF_STAT_PUTREF(ifp);
9463
9464 /*
9465 * The situations which this mutex_tryenter() fails at running time
9466 * are below two patterns.
9467 * (1) contention with interrupt handler(wm_txrxintr_msix())
9468 * (2) contention with deferred if_start softint(wm_handle_queue())
9469 * In the case of (1), the last packet enqueued to txq->txq_interq is
9470 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9471 * In the case of (2), the last packet enqueued to txq->txq_interq is
9472 * also dequeued by wm_deferred_start_locked(). So, it does not get
9473 * stuck, either.
9474 */
9475 if (mutex_tryenter(txq->txq_lock)) {
9476 if (!txq->txq_stopping)
9477 wm_nq_transmit_locked(ifp, txq);
9478 mutex_exit(txq->txq_lock);
9479 }
9480
9481 return 0;
9482 }
9483
9484 static void
9485 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9486 {
9487
9488 wm_nq_send_common_locked(ifp, txq, true);
9489 }
9490
9491 static void
9492 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9493 bool is_transmit)
9494 {
9495 struct wm_softc *sc = ifp->if_softc;
9496 struct mbuf *m0;
9497 struct wm_txsoft *txs;
9498 bus_dmamap_t dmamap;
9499 int error, nexttx, lasttx = -1, seg, segs_needed;
9500 bool do_csum, sent;
9501 bool remap = true;
9502
9503 KASSERT(mutex_owned(txq->txq_lock));
9504 KASSERT(!txq->txq_stopping);
9505
9506 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9507 return;
9508
9509 if (__predict_false(wm_linkdown_discard(txq))) {
9510 do {
9511 if (is_transmit)
9512 m0 = pcq_get(txq->txq_interq);
9513 else
9514 IFQ_DEQUEUE(&ifp->if_snd, m0);
9515 /*
9516 * increment successed packet counter as in the case
9517 * which the packet is discarded by link down PHY.
9518 */
9519 if (m0 != NULL) {
9520 if_statinc(ifp, if_opackets);
9521 m_freem(m0);
9522 }
9523 } while (m0 != NULL);
9524 return;
9525 }
9526
9527 sent = false;
9528
9529 /*
9530 * Loop through the send queue, setting up transmit descriptors
9531 * until we drain the queue, or use up all available transmit
9532 * descriptors.
9533 */
9534 for (;;) {
9535 m0 = NULL;
9536
9537 /* Get a work queue entry. */
9538 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9539 wm_txeof(txq, UINT_MAX);
9540 if (txq->txq_sfree == 0) {
9541 DPRINTF(sc, WM_DEBUG_TX,
9542 ("%s: TX: no free job descriptors\n",
9543 device_xname(sc->sc_dev)));
9544 WM_Q_EVCNT_INCR(txq, txsstall);
9545 break;
9546 }
9547 }
9548
9549 /* Grab a packet off the queue. */
9550 if (is_transmit)
9551 m0 = pcq_get(txq->txq_interq);
9552 else
9553 IFQ_DEQUEUE(&ifp->if_snd, m0);
9554 if (m0 == NULL)
9555 break;
9556
9557 DPRINTF(sc, WM_DEBUG_TX,
9558 ("%s: TX: have packet to transmit: %p\n",
9559 device_xname(sc->sc_dev), m0));
9560
9561 txs = &txq->txq_soft[txq->txq_snext];
9562 dmamap = txs->txs_dmamap;
9563
9564 /*
9565 * Load the DMA map. If this fails, the packet either
9566 * didn't fit in the allotted number of segments, or we
9567 * were short on resources. For the too-many-segments
9568 * case, we simply report an error and drop the packet,
9569 * since we can't sanely copy a jumbo packet to a single
9570 * buffer.
9571 */
9572 retry:
9573 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9574 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9575 if (__predict_false(error)) {
9576 if (error == EFBIG) {
9577 if (remap == true) {
9578 struct mbuf *m;
9579
9580 remap = false;
9581 m = m_defrag(m0, M_NOWAIT);
9582 if (m != NULL) {
9583 WM_Q_EVCNT_INCR(txq, defrag);
9584 m0 = m;
9585 goto retry;
9586 }
9587 }
9588 WM_Q_EVCNT_INCR(txq, toomanyseg);
9589 log(LOG_ERR, "%s: Tx packet consumes too many "
9590 "DMA segments, dropping...\n",
9591 device_xname(sc->sc_dev));
9592 wm_dump_mbuf_chain(sc, m0);
9593 m_freem(m0);
9594 continue;
9595 }
9596 /* Short on resources, just stop for now. */
9597 DPRINTF(sc, WM_DEBUG_TX,
9598 ("%s: TX: dmamap load failed: %d\n",
9599 device_xname(sc->sc_dev), error));
9600 break;
9601 }
9602
9603 segs_needed = dmamap->dm_nsegs;
9604
9605 /*
9606 * Ensure we have enough descriptors free to describe
9607 * the packet. Note, we always reserve one descriptor
9608 * at the end of the ring due to the semantics of the
9609 * TDT register, plus one more in the event we need
9610 * to load offload context.
9611 */
9612 if (segs_needed > txq->txq_free - 2) {
9613 /*
9614 * Not enough free descriptors to transmit this
9615 * packet. We haven't committed anything yet,
9616 * so just unload the DMA map, put the packet
9617 * pack on the queue, and punt. Notify the upper
9618 * layer that there are no more slots left.
9619 */
9620 DPRINTF(sc, WM_DEBUG_TX,
9621 ("%s: TX: need %d (%d) descriptors, have %d\n",
9622 device_xname(sc->sc_dev), dmamap->dm_nsegs,
9623 segs_needed, txq->txq_free - 1));
9624 txq->txq_flags |= WM_TXQ_NO_SPACE;
9625 bus_dmamap_unload(sc->sc_dmat, dmamap);
9626 WM_Q_EVCNT_INCR(txq, txdstall);
9627 break;
9628 }
9629
9630 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9631
9632 DPRINTF(sc, WM_DEBUG_TX,
9633 ("%s: TX: packet has %d (%d) DMA segments\n",
9634 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9635
9636 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9637
9638 /*
9639 * Store a pointer to the packet so that we can free it
9640 * later.
9641 *
9642 * Initially, we consider the number of descriptors the
9643 * packet uses the number of DMA segments. This may be
9644 * incremented by 1 if we do checksum offload (a descriptor
9645 * is used to set the checksum context).
9646 */
9647 txs->txs_mbuf = m0;
9648 txs->txs_firstdesc = txq->txq_next;
9649 txs->txs_ndesc = segs_needed;
9650
9651 /* Set up offload parameters for this packet. */
9652 uint32_t cmdlen, fields, dcmdlen;
9653 if (m0->m_pkthdr.csum_flags &
9654 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9655 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9656 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9657 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9658 &do_csum);
9659 } else {
9660 do_csum = false;
9661 cmdlen = 0;
9662 fields = 0;
9663 }
9664
9665 /* Sync the DMA map. */
9666 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9667 BUS_DMASYNC_PREWRITE);
9668
9669 /* Initialize the first transmit descriptor. */
9670 nexttx = txq->txq_next;
9671 if (!do_csum) {
9672 /* Set up a legacy descriptor */
9673 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9674 dmamap->dm_segs[0].ds_addr);
9675 txq->txq_descs[nexttx].wtx_cmdlen =
9676 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9677 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9678 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9679 if (vlan_has_tag(m0)) {
9680 txq->txq_descs[nexttx].wtx_cmdlen |=
9681 htole32(WTX_CMD_VLE);
9682 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9683 htole16(vlan_get_tag(m0));
9684 } else
9685 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9686
9687 dcmdlen = 0;
9688 } else {
9689 /* Set up an advanced data descriptor */
9690 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9691 htole64(dmamap->dm_segs[0].ds_addr);
9692 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9693 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9694 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9695 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9696 htole32(fields);
9697 DPRINTF(sc, WM_DEBUG_TX,
9698 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9699 device_xname(sc->sc_dev), nexttx,
9700 (uint64_t)dmamap->dm_segs[0].ds_addr));
9701 DPRINTF(sc, WM_DEBUG_TX,
9702 ("\t 0x%08x%08x\n", fields,
9703 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9704 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9705 }
9706
9707 lasttx = nexttx;
9708 nexttx = WM_NEXTTX(txq, nexttx);
9709 /*
9710 * Fill in the next descriptors. Legacy or advanced format
9711 * is the same here.
9712 */
9713 for (seg = 1; seg < dmamap->dm_nsegs;
9714 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9715 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9716 htole64(dmamap->dm_segs[seg].ds_addr);
9717 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9718 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9719 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9720 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9721 lasttx = nexttx;
9722
9723 DPRINTF(sc, WM_DEBUG_TX,
9724 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9725 device_xname(sc->sc_dev), nexttx,
9726 (uint64_t)dmamap->dm_segs[seg].ds_addr,
9727 dmamap->dm_segs[seg].ds_len));
9728 }
9729
9730 KASSERT(lasttx != -1);
9731
9732 /*
9733 * Set up the command byte on the last descriptor of
9734 * the packet. If we're in the interrupt delay window,
9735 * delay the interrupt.
9736 */
9737 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9738 (NQTX_CMD_EOP | NQTX_CMD_RS));
9739 txq->txq_descs[lasttx].wtx_cmdlen |=
9740 htole32(WTX_CMD_EOP | WTX_CMD_RS);
9741
9742 txs->txs_lastdesc = lasttx;
9743
9744 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9745 device_xname(sc->sc_dev),
9746 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9747
9748 /* Sync the descriptors we're using. */
9749 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9750 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9751
9752 /* Give the packet to the chip. */
9753 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9754 sent = true;
9755
9756 DPRINTF(sc, WM_DEBUG_TX,
9757 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9758
9759 DPRINTF(sc, WM_DEBUG_TX,
9760 ("%s: TX: finished transmitting packet, job %d\n",
9761 device_xname(sc->sc_dev), txq->txq_snext));
9762
9763 /* Advance the tx pointer. */
9764 txq->txq_free -= txs->txs_ndesc;
9765 txq->txq_next = nexttx;
9766
9767 txq->txq_sfree--;
9768 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9769
9770 /* Pass the packet to any BPF listeners. */
9771 bpf_mtap(ifp, m0, BPF_D_OUT);
9772 }
9773
9774 if (m0 != NULL) {
9775 txq->txq_flags |= WM_TXQ_NO_SPACE;
9776 WM_Q_EVCNT_INCR(txq, descdrop);
9777 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9778 __func__));
9779 m_freem(m0);
9780 }
9781
9782 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9783 /* No more slots; notify upper layer. */
9784 txq->txq_flags |= WM_TXQ_NO_SPACE;
9785 }
9786
9787 if (sent) {
9788 /* Set a watchdog timer in case the chip flakes out. */
9789 txq->txq_lastsent = time_uptime;
9790 txq->txq_sending = true;
9791 }
9792 }
9793
9794 static void
9795 wm_deferred_start_locked(struct wm_txqueue *txq)
9796 {
9797 struct wm_softc *sc = txq->txq_sc;
9798 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9799 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9800 int qid = wmq->wmq_id;
9801
9802 KASSERT(mutex_owned(txq->txq_lock));
9803 KASSERT(!txq->txq_stopping);
9804
9805 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9806 /* XXX need for ALTQ or one CPU system */
9807 if (qid == 0)
9808 wm_nq_start_locked(ifp);
9809 wm_nq_transmit_locked(ifp, txq);
9810 } else {
9811 /* XXX need for ALTQ or one CPU system */
9812 if (qid == 0)
9813 wm_start_locked(ifp);
9814 wm_transmit_locked(ifp, txq);
9815 }
9816 }
9817
9818 /* Interrupt */
9819
9820 /*
9821 * wm_txeof:
9822 *
9823 * Helper; handle transmit interrupts.
9824 */
9825 static bool
9826 wm_txeof(struct wm_txqueue *txq, u_int limit)
9827 {
9828 struct wm_softc *sc = txq->txq_sc;
9829 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9830 struct wm_txsoft *txs;
9831 int count = 0;
9832 int i;
9833 uint8_t status;
9834 bool more = false;
9835
9836 KASSERT(mutex_owned(txq->txq_lock));
9837
9838 if (txq->txq_stopping)
9839 return false;
9840
9841 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9842
9843 /*
9844 * Go through the Tx list and free mbufs for those
9845 * frames which have been transmitted.
9846 */
9847 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9848 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9849 txs = &txq->txq_soft[i];
9850
9851 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9852 device_xname(sc->sc_dev), i));
9853
9854 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9855 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9856
9857 status =
9858 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9859 if ((status & WTX_ST_DD) == 0) {
9860 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9861 BUS_DMASYNC_PREREAD);
9862 break;
9863 }
9864
9865 if (limit-- == 0) {
9866 more = true;
9867 DPRINTF(sc, WM_DEBUG_TX,
9868 ("%s: TX: loop limited, job %d is not processed\n",
9869 device_xname(sc->sc_dev), i));
9870 break;
9871 }
9872
9873 count++;
9874 DPRINTF(sc, WM_DEBUG_TX,
9875 ("%s: TX: job %d done: descs %d..%d\n",
9876 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9877 txs->txs_lastdesc));
9878
9879 #ifdef WM_EVENT_COUNTERS
9880 if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
9881 WM_Q_EVCNT_INCR(txq, underrun);
9882 #endif /* WM_EVENT_COUNTERS */
9883
9884 /*
9885 * 82574 and newer's document says the status field has neither
9886 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9887 * (reserved). Refer "PCIe GbE Controller Open Source Software
9888 * Developer's Manual", 82574 datasheet and newer.
9889 *
9890 * XXX I saw the LC bit was set on I218 even though the media
9891 * was full duplex, so the bit might be used for other
9892 * meaning ...(I have no document).
9893 */
9894
9895 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9896 && ((sc->sc_type < WM_T_82574)
9897 || (sc->sc_type == WM_T_80003))) {
9898 if_statinc(ifp, if_oerrors);
9899 if (status & WTX_ST_LC)
9900 log(LOG_WARNING, "%s: late collision\n",
9901 device_xname(sc->sc_dev));
9902 else if (status & WTX_ST_EC) {
9903 if_statadd(ifp, if_collisions,
9904 TX_COLLISION_THRESHOLD + 1);
9905 log(LOG_WARNING, "%s: excessive collisions\n",
9906 device_xname(sc->sc_dev));
9907 }
9908 } else
9909 if_statinc(ifp, if_opackets);
9910
9911 txq->txq_packets++;
9912 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9913
9914 txq->txq_free += txs->txs_ndesc;
9915 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9916 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9917 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9918 m_freem(txs->txs_mbuf);
9919 txs->txs_mbuf = NULL;
9920 }
9921
9922 /* Update the dirty transmit buffer pointer. */
9923 txq->txq_sdirty = i;
9924 DPRINTF(sc, WM_DEBUG_TX,
9925 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9926
9927 if (count != 0)
9928 rnd_add_uint32(&sc->rnd_source, count);
9929
9930 /*
9931 * If there are no more pending transmissions, cancel the watchdog
9932 * timer.
9933 */
9934 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9935 txq->txq_sending = false;
9936
9937 return more;
9938 }
9939
9940 static inline uint32_t
9941 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9942 {
9943 struct wm_softc *sc = rxq->rxq_sc;
9944
9945 if (sc->sc_type == WM_T_82574)
9946 return EXTRXC_STATUS(
9947 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9948 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9949 return NQRXC_STATUS(
9950 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9951 else
9952 return rxq->rxq_descs[idx].wrx_status;
9953 }
9954
9955 static inline uint32_t
9956 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9957 {
9958 struct wm_softc *sc = rxq->rxq_sc;
9959
9960 if (sc->sc_type == WM_T_82574)
9961 return EXTRXC_ERROR(
9962 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9963 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9964 return NQRXC_ERROR(
9965 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9966 else
9967 return rxq->rxq_descs[idx].wrx_errors;
9968 }
9969
9970 static inline uint16_t
9971 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9972 {
9973 struct wm_softc *sc = rxq->rxq_sc;
9974
9975 if (sc->sc_type == WM_T_82574)
9976 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9977 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9978 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9979 else
9980 return rxq->rxq_descs[idx].wrx_special;
9981 }
9982
9983 static inline int
9984 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9985 {
9986 struct wm_softc *sc = rxq->rxq_sc;
9987
9988 if (sc->sc_type == WM_T_82574)
9989 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9990 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9991 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9992 else
9993 return rxq->rxq_descs[idx].wrx_len;
9994 }
9995
9996 #ifdef WM_DEBUG
9997 static inline uint32_t
9998 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9999 {
10000 struct wm_softc *sc = rxq->rxq_sc;
10001
10002 if (sc->sc_type == WM_T_82574)
10003 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
10004 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10005 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
10006 else
10007 return 0;
10008 }
10009
10010 static inline uint8_t
10011 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
10012 {
10013 struct wm_softc *sc = rxq->rxq_sc;
10014
10015 if (sc->sc_type == WM_T_82574)
10016 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
10017 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10018 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
10019 else
10020 return 0;
10021 }
10022 #endif /* WM_DEBUG */
10023
10024 static inline bool
10025 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
10026 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
10027 {
10028
10029 if (sc->sc_type == WM_T_82574)
10030 return (status & ext_bit) != 0;
10031 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10032 return (status & nq_bit) != 0;
10033 else
10034 return (status & legacy_bit) != 0;
10035 }
10036
10037 static inline bool
10038 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
10039 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
10040 {
10041
10042 if (sc->sc_type == WM_T_82574)
10043 return (error & ext_bit) != 0;
10044 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10045 return (error & nq_bit) != 0;
10046 else
10047 return (error & legacy_bit) != 0;
10048 }
10049
10050 static inline bool
10051 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
10052 {
10053
10054 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10055 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
10056 return true;
10057 else
10058 return false;
10059 }
10060
10061 static inline bool
10062 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
10063 {
10064 struct wm_softc *sc = rxq->rxq_sc;
10065
10066 /* XXX missing error bit for newqueue? */
10067 if (wm_rxdesc_is_set_error(sc, errors,
10068 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
10069 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
10070 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
10071 NQRXC_ERROR_RXE)) {
10072 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
10073 EXTRXC_ERROR_SE, 0))
10074 log(LOG_WARNING, "%s: symbol error\n",
10075 device_xname(sc->sc_dev));
10076 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
10077 EXTRXC_ERROR_SEQ, 0))
10078 log(LOG_WARNING, "%s: receive sequence error\n",
10079 device_xname(sc->sc_dev));
10080 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
10081 EXTRXC_ERROR_CE, 0))
10082 log(LOG_WARNING, "%s: CRC error\n",
10083 device_xname(sc->sc_dev));
10084 return true;
10085 }
10086
10087 return false;
10088 }
10089
10090 static inline bool
10091 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
10092 {
10093 struct wm_softc *sc = rxq->rxq_sc;
10094
10095 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
10096 NQRXC_STATUS_DD)) {
10097 /* We have processed all of the receive descriptors. */
10098 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
10099 return false;
10100 }
10101
10102 return true;
10103 }
10104
10105 static inline bool
10106 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
10107 uint16_t vlantag, struct mbuf *m)
10108 {
10109
10110 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10111 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
10112 vlan_set_tag(m, le16toh(vlantag));
10113 }
10114
10115 return true;
10116 }
10117
10118 static inline void
10119 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
10120 uint32_t errors, struct mbuf *m)
10121 {
10122 struct wm_softc *sc = rxq->rxq_sc;
10123
10124 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
10125 if (wm_rxdesc_is_set_status(sc, status,
10126 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
10127 WM_Q_EVCNT_INCR(rxq, ipsum);
10128 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
10129 if (wm_rxdesc_is_set_error(sc, errors,
10130 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
10131 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
10132 }
10133 if (wm_rxdesc_is_set_status(sc, status,
10134 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
10135 /*
10136 * Note: we don't know if this was TCP or UDP,
10137 * so we just set both bits, and expect the
10138 * upper layers to deal.
10139 */
10140 WM_Q_EVCNT_INCR(rxq, tusum);
10141 m->m_pkthdr.csum_flags |=
10142 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
10143 M_CSUM_TCPv6 | M_CSUM_UDPv6;
10144 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
10145 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
10146 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
10147 }
10148 }
10149 }
10150
10151 /*
10152 * wm_rxeof:
10153 *
10154 * Helper; handle receive interrupts.
10155 */
10156 static bool
10157 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
10158 {
10159 struct wm_softc *sc = rxq->rxq_sc;
10160 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10161 struct wm_rxsoft *rxs;
10162 struct mbuf *m;
10163 int i, len;
10164 int count = 0;
10165 uint32_t status, errors;
10166 uint16_t vlantag;
10167 bool more = false;
10168
10169 KASSERT(mutex_owned(rxq->rxq_lock));
10170
10171 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
10172 rxs = &rxq->rxq_soft[i];
10173
10174 DPRINTF(sc, WM_DEBUG_RX,
10175 ("%s: RX: checking descriptor %d\n",
10176 device_xname(sc->sc_dev), i));
10177 wm_cdrxsync(rxq, i,
10178 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
10179
10180 status = wm_rxdesc_get_status(rxq, i);
10181 errors = wm_rxdesc_get_errors(rxq, i);
10182 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
10183 vlantag = wm_rxdesc_get_vlantag(rxq, i);
10184 #ifdef WM_DEBUG
10185 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
10186 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
10187 #endif
10188
10189 if (!wm_rxdesc_dd(rxq, i, status))
10190 break;
10191
10192 if (limit-- == 0) {
10193 more = true;
10194 DPRINTF(sc, WM_DEBUG_RX,
10195 ("%s: RX: loop limited, descriptor %d is not processed\n",
10196 device_xname(sc->sc_dev), i));
10197 break;
10198 }
10199
10200 count++;
10201 if (__predict_false(rxq->rxq_discard)) {
10202 DPRINTF(sc, WM_DEBUG_RX,
10203 ("%s: RX: discarding contents of descriptor %d\n",
10204 device_xname(sc->sc_dev), i));
10205 wm_init_rxdesc(rxq, i);
10206 if (wm_rxdesc_is_eop(rxq, status)) {
10207 /* Reset our state. */
10208 DPRINTF(sc, WM_DEBUG_RX,
10209 ("%s: RX: resetting rxdiscard -> 0\n",
10210 device_xname(sc->sc_dev)));
10211 rxq->rxq_discard = 0;
10212 }
10213 continue;
10214 }
10215
10216 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10217 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
10218
10219 m = rxs->rxs_mbuf;
10220
10221 /*
10222 * Add a new receive buffer to the ring, unless of
10223 * course the length is zero. Treat the latter as a
10224 * failed mapping.
10225 */
10226 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
10227 /*
10228 * Failed, throw away what we've done so
10229 * far, and discard the rest of the packet.
10230 */
10231 if_statinc(ifp, if_ierrors);
10232 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10233 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
10234 wm_init_rxdesc(rxq, i);
10235 if (!wm_rxdesc_is_eop(rxq, status))
10236 rxq->rxq_discard = 1;
10237 if (rxq->rxq_head != NULL)
10238 m_freem(rxq->rxq_head);
10239 WM_RXCHAIN_RESET(rxq);
10240 DPRINTF(sc, WM_DEBUG_RX,
10241 ("%s: RX: Rx buffer allocation failed, "
10242 "dropping packet%s\n", device_xname(sc->sc_dev),
10243 rxq->rxq_discard ? " (discard)" : ""));
10244 continue;
10245 }
10246
10247 m->m_len = len;
10248 rxq->rxq_len += len;
10249 DPRINTF(sc, WM_DEBUG_RX,
10250 ("%s: RX: buffer at %p len %d\n",
10251 device_xname(sc->sc_dev), m->m_data, len));
10252
10253 /* If this is not the end of the packet, keep looking. */
10254 if (!wm_rxdesc_is_eop(rxq, status)) {
10255 WM_RXCHAIN_LINK(rxq, m);
10256 DPRINTF(sc, WM_DEBUG_RX,
10257 ("%s: RX: not yet EOP, rxlen -> %d\n",
10258 device_xname(sc->sc_dev), rxq->rxq_len));
10259 continue;
10260 }
10261
10262 /*
10263 * Okay, we have the entire packet now. The chip is
10264 * configured to include the FCS except I35[04], I21[01].
10265 * (not all chips can be configured to strip it), so we need
10266 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
10267 * in RCTL register is always set, so we don't trim it.
10268 * PCH2 and newer chip also not include FCS when jumbo
10269 * frame is used to do workaround an errata.
10270 * May need to adjust length of previous mbuf in the
10271 * chain if the current mbuf is too short.
10272 */
10273 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
10274 if (m->m_len < ETHER_CRC_LEN) {
10275 rxq->rxq_tail->m_len
10276 -= (ETHER_CRC_LEN - m->m_len);
10277 m->m_len = 0;
10278 } else
10279 m->m_len -= ETHER_CRC_LEN;
10280 len = rxq->rxq_len - ETHER_CRC_LEN;
10281 } else
10282 len = rxq->rxq_len;
10283
10284 WM_RXCHAIN_LINK(rxq, m);
10285
10286 *rxq->rxq_tailp = NULL;
10287 m = rxq->rxq_head;
10288
10289 WM_RXCHAIN_RESET(rxq);
10290
10291 DPRINTF(sc, WM_DEBUG_RX,
10292 ("%s: RX: have entire packet, len -> %d\n",
10293 device_xname(sc->sc_dev), len));
10294
10295 /* If an error occurred, update stats and drop the packet. */
10296 if (wm_rxdesc_has_errors(rxq, errors)) {
10297 m_freem(m);
10298 continue;
10299 }
10300
10301 /* No errors. Receive the packet. */
10302 m_set_rcvif(m, ifp);
10303 m->m_pkthdr.len = len;
10304 /*
10305 * TODO
10306 * should be save rsshash and rsstype to this mbuf.
10307 */
10308 DPRINTF(sc, WM_DEBUG_RX,
10309 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
10310 device_xname(sc->sc_dev), rsstype, rsshash));
10311
10312 /*
10313 * If VLANs are enabled, VLAN packets have been unwrapped
10314 * for us. Associate the tag with the packet.
10315 */
10316 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
10317 continue;
10318
10319 /* Set up checksum info for this packet. */
10320 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
10321
10322 rxq->rxq_packets++;
10323 rxq->rxq_bytes += len;
10324 /* Pass it on. */
10325 if_percpuq_enqueue(sc->sc_ipq, m);
10326
10327 if (rxq->rxq_stopping)
10328 break;
10329 }
10330 rxq->rxq_ptr = i;
10331
10332 if (count != 0)
10333 rnd_add_uint32(&sc->rnd_source, count);
10334
10335 DPRINTF(sc, WM_DEBUG_RX,
10336 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
10337
10338 return more;
10339 }
10340
10341 /*
10342 * wm_linkintr_gmii:
10343 *
10344 * Helper; handle link interrupts for GMII.
10345 */
10346 static void
10347 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
10348 {
10349 device_t dev = sc->sc_dev;
10350 uint32_t status, reg;
10351 bool link;
10352 bool dopoll = true;
10353 int rv;
10354
10355 KASSERT(mutex_owned(sc->sc_core_lock));
10356
10357 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
10358 __func__));
10359
10360 if ((icr & ICR_LSC) == 0) {
10361 if (icr & ICR_RXSEQ)
10362 DPRINTF(sc, WM_DEBUG_LINK,
10363 ("%s: LINK Receive sequence error\n",
10364 device_xname(dev)));
10365 return;
10366 }
10367
10368 /* Link status changed */
10369 status = CSR_READ(sc, WMREG_STATUS);
10370 link = status & STATUS_LU;
10371 if (link) {
10372 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10373 device_xname(dev),
10374 (status & STATUS_FD) ? "FDX" : "HDX"));
10375 if (wm_phy_need_linkdown_discard(sc)) {
10376 DPRINTF(sc, WM_DEBUG_LINK,
10377 ("%s: linkintr: Clear linkdown discard flag\n",
10378 device_xname(dev)));
10379 wm_clear_linkdown_discard(sc);
10380 }
10381 } else {
10382 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10383 device_xname(dev)));
10384 if (wm_phy_need_linkdown_discard(sc)) {
10385 DPRINTF(sc, WM_DEBUG_LINK,
10386 ("%s: linkintr: Set linkdown discard flag\n",
10387 device_xname(dev)));
10388 wm_set_linkdown_discard(sc);
10389 }
10390 }
10391 if ((sc->sc_type == WM_T_ICH8) && (link == false))
10392 wm_gig_downshift_workaround_ich8lan(sc);
10393
10394 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
10395 wm_kmrn_lock_loss_workaround_ich8lan(sc);
10396
10397 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
10398 device_xname(dev)));
10399 if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
10400 if (link) {
10401 /*
10402 * To workaround the problem, it's required to wait
10403 * several hundred miliseconds. The time depend
10404 * on the environment. Wait 1 second for the safety.
10405 */
10406 dopoll = false;
10407 getmicrotime(&sc->sc_linkup_delay_time);
10408 sc->sc_linkup_delay_time.tv_sec += 1;
10409 } else if (sc->sc_linkup_delay_time.tv_sec != 0) {
10410 /*
10411 * Simplify by checking tv_sec only. It's enough.
10412 *
10413 * Currently, it's not required to clear the time.
10414 * It's just to know the timer is stopped
10415 * (for debugging).
10416 */
10417
10418 sc->sc_linkup_delay_time.tv_sec = 0;
10419 sc->sc_linkup_delay_time.tv_usec = 0;
10420 }
10421 }
10422
10423 /*
10424 * Call mii_pollstat().
10425 *
10426 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
10427 * after linkup. The MAC send a packet to the PHY and any error is not
10428 * observed. This behavior causes a problem that gratuitous ARP and/or
10429 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
10430 * call mii_pollstat() here which will send LINK_STATE_UP notification
10431 * to the upper layer. Instead, mii_pollstat() will be called in
10432 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
10433 */
10434 if (dopoll)
10435 mii_pollstat(&sc->sc_mii);
10436
10437 /* Do some workarounds soon after link status is changed. */
10438
10439 if (sc->sc_type == WM_T_82543) {
10440 int miistatus, active;
10441
10442 /*
10443 * With 82543, we need to force speed and
10444 * duplex on the MAC equal to what the PHY
10445 * speed and duplex configuration is.
10446 */
10447 miistatus = sc->sc_mii.mii_media_status;
10448
10449 if (miistatus & IFM_ACTIVE) {
10450 active = sc->sc_mii.mii_media_active;
10451 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10452 switch (IFM_SUBTYPE(active)) {
10453 case IFM_10_T:
10454 sc->sc_ctrl |= CTRL_SPEED_10;
10455 break;
10456 case IFM_100_TX:
10457 sc->sc_ctrl |= CTRL_SPEED_100;
10458 break;
10459 case IFM_1000_T:
10460 sc->sc_ctrl |= CTRL_SPEED_1000;
10461 break;
10462 default:
10463 /*
10464 * Fiber?
10465 * Shoud not enter here.
10466 */
10467 device_printf(dev, "unknown media (%x)\n",
10468 active);
10469 break;
10470 }
10471 if (active & IFM_FDX)
10472 sc->sc_ctrl |= CTRL_FD;
10473 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10474 }
10475 } else if (sc->sc_type == WM_T_PCH) {
10476 wm_k1_gig_workaround_hv(sc,
10477 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10478 }
10479
10480 /*
10481 * When connected at 10Mbps half-duplex, some parts are excessively
10482 * aggressive resulting in many collisions. To avoid this, increase
10483 * the IPG and reduce Rx latency in the PHY.
10484 */
10485 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_TGP)
10486 && link) {
10487 uint32_t tipg_reg;
10488 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
10489 bool fdx;
10490 uint16_t emi_addr, emi_val;
10491
10492 tipg_reg = CSR_READ(sc, WMREG_TIPG);
10493 tipg_reg &= ~TIPG_IPGT_MASK;
10494 fdx = status & STATUS_FD;
10495
10496 if (!fdx && (speed == STATUS_SPEED_10)) {
10497 tipg_reg |= 0xff;
10498 /* Reduce Rx latency in analog PHY */
10499 emi_val = 0;
10500 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10501 fdx && speed != STATUS_SPEED_1000) {
10502 tipg_reg |= 0xc;
10503 emi_val = 1;
10504 } else {
10505 /* Roll back the default values */
10506 tipg_reg |= 0x08;
10507 emi_val = 1;
10508 }
10509
10510 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10511
10512 rv = sc->phy.acquire(sc);
10513 if (rv)
10514 return;
10515
10516 if (sc->sc_type == WM_T_PCH2)
10517 emi_addr = I82579_RX_CONFIG;
10518 else
10519 emi_addr = I217_RX_CONFIG;
10520 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10521
10522 if (sc->sc_type >= WM_T_PCH_LPT) {
10523 uint16_t phy_reg;
10524
10525 sc->phy.readreg_locked(dev, 2,
10526 I217_PLL_CLOCK_GATE_REG, &phy_reg);
10527 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10528 if (speed == STATUS_SPEED_100
10529 || speed == STATUS_SPEED_10)
10530 phy_reg |= 0x3e8;
10531 else
10532 phy_reg |= 0xfa;
10533 sc->phy.writereg_locked(dev, 2,
10534 I217_PLL_CLOCK_GATE_REG, phy_reg);
10535
10536 if (speed == STATUS_SPEED_1000) {
10537 sc->phy.readreg_locked(dev, 2,
10538 HV_PM_CTRL, &phy_reg);
10539
10540 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10541
10542 sc->phy.writereg_locked(dev, 2,
10543 HV_PM_CTRL, phy_reg);
10544 }
10545 }
10546 sc->phy.release(sc);
10547
10548 if (rv)
10549 return;
10550
10551 if (sc->sc_type >= WM_T_PCH_SPT) {
10552 uint16_t data, ptr_gap;
10553
10554 if (speed == STATUS_SPEED_1000) {
10555 rv = sc->phy.acquire(sc);
10556 if (rv)
10557 return;
10558
10559 rv = sc->phy.readreg_locked(dev, 2,
10560 I82579_UNKNOWN1, &data);
10561 if (rv) {
10562 sc->phy.release(sc);
10563 return;
10564 }
10565
10566 ptr_gap = (data & (0x3ff << 2)) >> 2;
10567 if (ptr_gap < 0x18) {
10568 data &= ~(0x3ff << 2);
10569 data |= (0x18 << 2);
10570 rv = sc->phy.writereg_locked(dev,
10571 2, I82579_UNKNOWN1, data);
10572 }
10573 sc->phy.release(sc);
10574 if (rv)
10575 return;
10576 } else {
10577 rv = sc->phy.acquire(sc);
10578 if (rv)
10579 return;
10580
10581 rv = sc->phy.writereg_locked(dev, 2,
10582 I82579_UNKNOWN1, 0xc023);
10583 sc->phy.release(sc);
10584 if (rv)
10585 return;
10586
10587 }
10588 }
10589 }
10590
10591 /*
10592 * I217 Packet Loss issue:
10593 * ensure that FEXTNVM4 Beacon Duration is set correctly
10594 * on power up.
10595 * Set the Beacon Duration for I217 to 8 usec
10596 */
10597 if (sc->sc_type >= WM_T_PCH_LPT) {
10598 reg = CSR_READ(sc, WMREG_FEXTNVM4);
10599 reg &= ~FEXTNVM4_BEACON_DURATION;
10600 reg |= FEXTNVM4_BEACON_DURATION_8US;
10601 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10602 }
10603
10604 /* Work-around I218 hang issue */
10605 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10606 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10607 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10608 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10609 wm_k1_workaround_lpt_lp(sc, link);
10610
10611 if (sc->sc_type >= WM_T_PCH_LPT) {
10612 /*
10613 * Set platform power management values for Latency
10614 * Tolerance Reporting (LTR)
10615 */
10616 wm_platform_pm_pch_lpt(sc,
10617 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10618 }
10619
10620 /* Clear link partner's EEE ability */
10621 sc->eee_lp_ability = 0;
10622
10623 /* FEXTNVM6 K1-off workaround */
10624 if (sc->sc_type == WM_T_PCH_SPT) {
10625 reg = CSR_READ(sc, WMREG_FEXTNVM6);
10626 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10627 reg |= FEXTNVM6_K1_OFF_ENABLE;
10628 else
10629 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10630 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10631 }
10632
10633 if (!link)
10634 return;
10635
10636 switch (sc->sc_type) {
10637 case WM_T_PCH2:
10638 wm_k1_workaround_lv(sc);
10639 /* FALLTHROUGH */
10640 case WM_T_PCH:
10641 if (sc->sc_phytype == WMPHY_82578)
10642 wm_link_stall_workaround_hv(sc);
10643 break;
10644 default:
10645 break;
10646 }
10647
10648 /* Enable/Disable EEE after link up */
10649 if (sc->sc_phytype > WMPHY_82579)
10650 wm_set_eee_pchlan(sc);
10651 }
10652
10653 /*
10654 * wm_linkintr_tbi:
10655 *
10656 * Helper; handle link interrupts for TBI mode.
10657 */
10658 static void
10659 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10660 {
10661 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10662 uint32_t status;
10663
10664 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10665 __func__));
10666
10667 status = CSR_READ(sc, WMREG_STATUS);
10668 if (icr & ICR_LSC) {
10669 wm_check_for_link(sc);
10670 if (status & STATUS_LU) {
10671 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10672 device_xname(sc->sc_dev),
10673 (status & STATUS_FD) ? "FDX" : "HDX"));
10674 /*
10675 * NOTE: CTRL will update TFCE and RFCE automatically,
10676 * so we should update sc->sc_ctrl
10677 */
10678
10679 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10680 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10681 sc->sc_fcrtl &= ~FCRTL_XONE;
10682 if (status & STATUS_FD)
10683 sc->sc_tctl |=
10684 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10685 else
10686 sc->sc_tctl |=
10687 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10688 if (sc->sc_ctrl & CTRL_TFCE)
10689 sc->sc_fcrtl |= FCRTL_XONE;
10690 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10691 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10692 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10693 sc->sc_tbi_linkup = 1;
10694 if_link_state_change(ifp, LINK_STATE_UP);
10695 } else {
10696 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10697 device_xname(sc->sc_dev)));
10698 sc->sc_tbi_linkup = 0;
10699 if_link_state_change(ifp, LINK_STATE_DOWN);
10700 }
10701 /* Update LED */
10702 wm_tbi_serdes_set_linkled(sc);
10703 } else if (icr & ICR_RXSEQ)
10704 DPRINTF(sc, WM_DEBUG_LINK,
10705 ("%s: LINK: Receive sequence error\n",
10706 device_xname(sc->sc_dev)));
10707 }
10708
10709 /*
10710 * wm_linkintr_serdes:
10711 *
10712 * Helper; handle link interrupts for TBI mode.
10713 */
10714 static void
10715 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10716 {
10717 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10718 struct mii_data *mii = &sc->sc_mii;
10719 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10720 uint32_t pcs_adv, pcs_lpab, reg;
10721
10722 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10723 __func__));
10724
10725 if (icr & ICR_LSC) {
10726 /* Check PCS */
10727 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10728 if ((reg & PCS_LSTS_LINKOK) != 0) {
10729 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10730 device_xname(sc->sc_dev)));
10731 mii->mii_media_status |= IFM_ACTIVE;
10732 sc->sc_tbi_linkup = 1;
10733 if_link_state_change(ifp, LINK_STATE_UP);
10734 } else {
10735 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10736 device_xname(sc->sc_dev)));
10737 mii->mii_media_status |= IFM_NONE;
10738 sc->sc_tbi_linkup = 0;
10739 if_link_state_change(ifp, LINK_STATE_DOWN);
10740 wm_tbi_serdes_set_linkled(sc);
10741 return;
10742 }
10743 mii->mii_media_active |= IFM_1000_SX;
10744 if ((reg & PCS_LSTS_FDX) != 0)
10745 mii->mii_media_active |= IFM_FDX;
10746 else
10747 mii->mii_media_active |= IFM_HDX;
10748 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10749 /* Check flow */
10750 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10751 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10752 DPRINTF(sc, WM_DEBUG_LINK,
10753 ("XXX LINKOK but not ACOMP\n"));
10754 return;
10755 }
10756 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10757 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10758 DPRINTF(sc, WM_DEBUG_LINK,
10759 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10760 if ((pcs_adv & TXCW_SYM_PAUSE)
10761 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10762 mii->mii_media_active |= IFM_FLOW
10763 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10764 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10765 && (pcs_adv & TXCW_ASYM_PAUSE)
10766 && (pcs_lpab & TXCW_SYM_PAUSE)
10767 && (pcs_lpab & TXCW_ASYM_PAUSE))
10768 mii->mii_media_active |= IFM_FLOW
10769 | IFM_ETH_TXPAUSE;
10770 else if ((pcs_adv & TXCW_SYM_PAUSE)
10771 && (pcs_adv & TXCW_ASYM_PAUSE)
10772 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10773 && (pcs_lpab & TXCW_ASYM_PAUSE))
10774 mii->mii_media_active |= IFM_FLOW
10775 | IFM_ETH_RXPAUSE;
10776 }
10777 /* Update LED */
10778 wm_tbi_serdes_set_linkled(sc);
10779 } else
10780 DPRINTF(sc, WM_DEBUG_LINK,
10781 ("%s: LINK: Receive sequence error\n",
10782 device_xname(sc->sc_dev)));
10783 }
10784
10785 /*
10786 * wm_linkintr:
10787 *
10788 * Helper; handle link interrupts.
10789 */
10790 static void
10791 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10792 {
10793
10794 KASSERT(mutex_owned(sc->sc_core_lock));
10795
10796 if (sc->sc_flags & WM_F_HAS_MII)
10797 wm_linkintr_gmii(sc, icr);
10798 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10799 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10800 wm_linkintr_serdes(sc, icr);
10801 else
10802 wm_linkintr_tbi(sc, icr);
10803 }
10804
10805
10806 static inline void
10807 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10808 {
10809
10810 if (wmq->wmq_txrx_use_workqueue) {
10811 if (!wmq->wmq_wq_enqueued) {
10812 wmq->wmq_wq_enqueued = true;
10813 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10814 curcpu());
10815 }
10816 } else
10817 softint_schedule(wmq->wmq_si);
10818 }
10819
10820 static inline void
10821 wm_legacy_intr_disable(struct wm_softc *sc)
10822 {
10823
10824 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10825 }
10826
10827 static inline void
10828 wm_legacy_intr_enable(struct wm_softc *sc)
10829 {
10830
10831 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10832 }
10833
10834 /*
10835 * wm_intr_legacy:
10836 *
10837 * Interrupt service routine for INTx and MSI.
10838 */
10839 static int
10840 wm_intr_legacy(void *arg)
10841 {
10842 struct wm_softc *sc = arg;
10843 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10844 struct wm_queue *wmq = &sc->sc_queue[0];
10845 struct wm_txqueue *txq = &wmq->wmq_txq;
10846 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10847 u_int txlimit = sc->sc_tx_intr_process_limit;
10848 u_int rxlimit = sc->sc_rx_intr_process_limit;
10849 uint32_t icr, rndval = 0;
10850 bool more = false;
10851
10852 icr = CSR_READ(sc, WMREG_ICR);
10853 if ((icr & sc->sc_icr) == 0)
10854 return 0;
10855
10856 DPRINTF(sc, WM_DEBUG_TX,
10857 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10858 if (rndval == 0)
10859 rndval = icr;
10860
10861 mutex_enter(txq->txq_lock);
10862
10863 if (txq->txq_stopping) {
10864 mutex_exit(txq->txq_lock);
10865 return 1;
10866 }
10867
10868 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10869 if (icr & ICR_TXDW) {
10870 DPRINTF(sc, WM_DEBUG_TX,
10871 ("%s: TX: got TXDW interrupt\n",
10872 device_xname(sc->sc_dev)));
10873 WM_Q_EVCNT_INCR(txq, txdw);
10874 }
10875 #endif
10876 if (txlimit > 0) {
10877 more |= wm_txeof(txq, txlimit);
10878 if (!IF_IS_EMPTY(&ifp->if_snd))
10879 more = true;
10880 } else
10881 more = true;
10882 mutex_exit(txq->txq_lock);
10883
10884 mutex_enter(rxq->rxq_lock);
10885
10886 if (rxq->rxq_stopping) {
10887 mutex_exit(rxq->rxq_lock);
10888 return 1;
10889 }
10890
10891 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10892 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10893 DPRINTF(sc, WM_DEBUG_RX,
10894 ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10895 device_xname(sc->sc_dev),
10896 icr & (ICR_RXDMT0 | ICR_RXT0)));
10897 WM_Q_EVCNT_INCR(rxq, intr);
10898 }
10899 #endif
10900 if (rxlimit > 0) {
10901 /*
10902 * wm_rxeof() does *not* call upper layer functions directly,
10903 * as if_percpuq_enqueue() just call softint_schedule().
10904 * So, we can call wm_rxeof() in interrupt context.
10905 */
10906 more = wm_rxeof(rxq, rxlimit);
10907 } else
10908 more = true;
10909
10910 mutex_exit(rxq->rxq_lock);
10911
10912 mutex_enter(sc->sc_core_lock);
10913
10914 if (sc->sc_core_stopping) {
10915 mutex_exit(sc->sc_core_lock);
10916 return 1;
10917 }
10918
10919 if (icr & (ICR_LSC | ICR_RXSEQ)) {
10920 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10921 wm_linkintr(sc, icr);
10922 }
10923 if ((icr & ICR_GPI(0)) != 0)
10924 device_printf(sc->sc_dev, "got module interrupt\n");
10925
10926 mutex_exit(sc->sc_core_lock);
10927
10928 if (icr & ICR_RXO) {
10929 #if defined(WM_DEBUG)
10930 log(LOG_WARNING, "%s: Receive overrun\n",
10931 device_xname(sc->sc_dev));
10932 #endif /* defined(WM_DEBUG) */
10933 }
10934
10935 rnd_add_uint32(&sc->rnd_source, rndval);
10936
10937 if (more) {
10938 /* Try to get more packets going. */
10939 wm_legacy_intr_disable(sc);
10940 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10941 wm_sched_handle_queue(sc, wmq);
10942 }
10943
10944 return 1;
10945 }
10946
10947 static inline void
10948 wm_txrxintr_disable(struct wm_queue *wmq)
10949 {
10950 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10951
10952 if (__predict_false(!wm_is_using_msix(sc))) {
10953 wm_legacy_intr_disable(sc);
10954 return;
10955 }
10956
10957 if (sc->sc_type == WM_T_82574)
10958 CSR_WRITE(sc, WMREG_IMC,
10959 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10960 else if (sc->sc_type == WM_T_82575)
10961 CSR_WRITE(sc, WMREG_EIMC,
10962 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10963 else
10964 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10965 }
10966
10967 static inline void
10968 wm_txrxintr_enable(struct wm_queue *wmq)
10969 {
10970 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10971
10972 wm_itrs_calculate(sc, wmq);
10973
10974 if (__predict_false(!wm_is_using_msix(sc))) {
10975 wm_legacy_intr_enable(sc);
10976 return;
10977 }
10978
10979 /*
10980 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10981 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10982 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10983 * while each wm_handle_queue(wmq) is runnig.
10984 */
10985 if (sc->sc_type == WM_T_82574)
10986 CSR_WRITE(sc, WMREG_IMS,
10987 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10988 else if (sc->sc_type == WM_T_82575)
10989 CSR_WRITE(sc, WMREG_EIMS,
10990 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10991 else
10992 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10993 }
10994
10995 static int
10996 wm_txrxintr_msix(void *arg)
10997 {
10998 struct wm_queue *wmq = arg;
10999 struct wm_txqueue *txq = &wmq->wmq_txq;
11000 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11001 struct wm_softc *sc = txq->txq_sc;
11002 u_int txlimit = sc->sc_tx_intr_process_limit;
11003 u_int rxlimit = sc->sc_rx_intr_process_limit;
11004 bool txmore;
11005 bool rxmore;
11006
11007 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
11008
11009 DPRINTF(sc, WM_DEBUG_TX,
11010 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
11011
11012 wm_txrxintr_disable(wmq);
11013
11014 mutex_enter(txq->txq_lock);
11015
11016 if (txq->txq_stopping) {
11017 mutex_exit(txq->txq_lock);
11018 return 1;
11019 }
11020
11021 WM_Q_EVCNT_INCR(txq, txdw);
11022 if (txlimit > 0) {
11023 txmore = wm_txeof(txq, txlimit);
11024 /* wm_deferred start() is done in wm_handle_queue(). */
11025 } else
11026 txmore = true;
11027 mutex_exit(txq->txq_lock);
11028
11029 DPRINTF(sc, WM_DEBUG_RX,
11030 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
11031 mutex_enter(rxq->rxq_lock);
11032
11033 if (rxq->rxq_stopping) {
11034 mutex_exit(rxq->rxq_lock);
11035 return 1;
11036 }
11037
11038 WM_Q_EVCNT_INCR(rxq, intr);
11039 if (rxlimit > 0) {
11040 rxmore = wm_rxeof(rxq, rxlimit);
11041 } else
11042 rxmore = true;
11043 mutex_exit(rxq->rxq_lock);
11044
11045 wm_itrs_writereg(sc, wmq);
11046
11047 if (txmore || rxmore) {
11048 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11049 wm_sched_handle_queue(sc, wmq);
11050 } else
11051 wm_txrxintr_enable(wmq);
11052
11053 return 1;
11054 }
11055
11056 static void
11057 wm_handle_queue(void *arg)
11058 {
11059 struct wm_queue *wmq = arg;
11060 struct wm_txqueue *txq = &wmq->wmq_txq;
11061 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11062 struct wm_softc *sc = txq->txq_sc;
11063 u_int txlimit = sc->sc_tx_process_limit;
11064 u_int rxlimit = sc->sc_rx_process_limit;
11065 bool txmore;
11066 bool rxmore;
11067
11068 mutex_enter(txq->txq_lock);
11069 if (txq->txq_stopping) {
11070 mutex_exit(txq->txq_lock);
11071 return;
11072 }
11073 txmore = wm_txeof(txq, txlimit);
11074 wm_deferred_start_locked(txq);
11075 mutex_exit(txq->txq_lock);
11076
11077 mutex_enter(rxq->rxq_lock);
11078 if (rxq->rxq_stopping) {
11079 mutex_exit(rxq->rxq_lock);
11080 return;
11081 }
11082 WM_Q_EVCNT_INCR(rxq, defer);
11083 rxmore = wm_rxeof(rxq, rxlimit);
11084 mutex_exit(rxq->rxq_lock);
11085
11086 if (txmore || rxmore) {
11087 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11088 wm_sched_handle_queue(sc, wmq);
11089 } else
11090 wm_txrxintr_enable(wmq);
11091 }
11092
11093 static void
11094 wm_handle_queue_work(struct work *wk, void *context)
11095 {
11096 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
11097
11098 /*
11099 * Some qemu environment workaround. They don't stop interrupt
11100 * immediately.
11101 */
11102 wmq->wmq_wq_enqueued = false;
11103 wm_handle_queue(wmq);
11104 }
11105
11106 /*
11107 * wm_linkintr_msix:
11108 *
11109 * Interrupt service routine for link status change for MSI-X.
11110 */
11111 static int
11112 wm_linkintr_msix(void *arg)
11113 {
11114 struct wm_softc *sc = arg;
11115 uint32_t reg;
11116 bool has_rxo;
11117
11118 reg = CSR_READ(sc, WMREG_ICR);
11119 mutex_enter(sc->sc_core_lock);
11120 DPRINTF(sc, WM_DEBUG_LINK,
11121 ("%s: LINK: got link intr. ICR = %08x\n",
11122 device_xname(sc->sc_dev), reg));
11123
11124 if (sc->sc_core_stopping)
11125 goto out;
11126
11127 if ((reg & ICR_LSC) != 0) {
11128 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
11129 wm_linkintr(sc, ICR_LSC);
11130 }
11131 if ((reg & ICR_GPI(0)) != 0)
11132 device_printf(sc->sc_dev, "got module interrupt\n");
11133
11134 /*
11135 * XXX 82574 MSI-X mode workaround
11136 *
11137 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
11138 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
11139 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
11140 * interrupts by writing WMREG_ICS to process receive packets.
11141 */
11142 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
11143 #if defined(WM_DEBUG)
11144 log(LOG_WARNING, "%s: Receive overrun\n",
11145 device_xname(sc->sc_dev));
11146 #endif /* defined(WM_DEBUG) */
11147
11148 has_rxo = true;
11149 /*
11150 * The RXO interrupt is very high rate when receive traffic is
11151 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
11152 * interrupts. ICR_OTHER will be enabled at the end of
11153 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
11154 * ICR_RXQ(1) interrupts.
11155 */
11156 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
11157
11158 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
11159 }
11160
11161
11162
11163 out:
11164 mutex_exit(sc->sc_core_lock);
11165
11166 if (sc->sc_type == WM_T_82574) {
11167 if (!has_rxo)
11168 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
11169 else
11170 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
11171 } else if (sc->sc_type == WM_T_82575)
11172 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
11173 else
11174 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
11175
11176 return 1;
11177 }
11178
11179 /*
11180 * Media related.
11181 * GMII, SGMII, TBI (and SERDES)
11182 */
11183
11184 /* Common */
11185
11186 /*
11187 * wm_tbi_serdes_set_linkled:
11188 *
11189 * Update the link LED on TBI and SERDES devices.
11190 */
11191 static void
11192 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
11193 {
11194
11195 if (sc->sc_tbi_linkup)
11196 sc->sc_ctrl |= CTRL_SWDPIN(0);
11197 else
11198 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
11199
11200 /* 82540 or newer devices are active low */
11201 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
11202
11203 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11204 }
11205
11206 /* GMII related */
11207
11208 /*
11209 * wm_gmii_reset:
11210 *
11211 * Reset the PHY.
11212 */
11213 static void
11214 wm_gmii_reset(struct wm_softc *sc)
11215 {
11216 uint32_t reg;
11217 int rv;
11218
11219 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11220 device_xname(sc->sc_dev), __func__));
11221
11222 rv = sc->phy.acquire(sc);
11223 if (rv != 0) {
11224 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11225 __func__);
11226 return;
11227 }
11228
11229 switch (sc->sc_type) {
11230 case WM_T_82542_2_0:
11231 case WM_T_82542_2_1:
11232 /* null */
11233 break;
11234 case WM_T_82543:
11235 /*
11236 * With 82543, we need to force speed and duplex on the MAC
11237 * equal to what the PHY speed and duplex configuration is.
11238 * In addition, we need to perform a hardware reset on the PHY
11239 * to take it out of reset.
11240 */
11241 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11242 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11243
11244 /* The PHY reset pin is active-low. */
11245 reg = CSR_READ(sc, WMREG_CTRL_EXT);
11246 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
11247 CTRL_EXT_SWDPIN(4));
11248 reg |= CTRL_EXT_SWDPIO(4);
11249
11250 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11251 CSR_WRITE_FLUSH(sc);
11252 delay(10*1000);
11253
11254 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
11255 CSR_WRITE_FLUSH(sc);
11256 delay(150);
11257 #if 0
11258 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
11259 #endif
11260 delay(20*1000); /* XXX extra delay to get PHY ID? */
11261 break;
11262 case WM_T_82544: /* Reset 10000us */
11263 case WM_T_82540:
11264 case WM_T_82545:
11265 case WM_T_82545_3:
11266 case WM_T_82546:
11267 case WM_T_82546_3:
11268 case WM_T_82541:
11269 case WM_T_82541_2:
11270 case WM_T_82547:
11271 case WM_T_82547_2:
11272 case WM_T_82571: /* Reset 100us */
11273 case WM_T_82572:
11274 case WM_T_82573:
11275 case WM_T_82574:
11276 case WM_T_82575:
11277 case WM_T_82576:
11278 case WM_T_82580:
11279 case WM_T_I350:
11280 case WM_T_I354:
11281 case WM_T_I210:
11282 case WM_T_I211:
11283 case WM_T_82583:
11284 case WM_T_80003:
11285 /* Generic reset */
11286 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11287 CSR_WRITE_FLUSH(sc);
11288 delay(20000);
11289 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11290 CSR_WRITE_FLUSH(sc);
11291 delay(20000);
11292
11293 if ((sc->sc_type == WM_T_82541)
11294 || (sc->sc_type == WM_T_82541_2)
11295 || (sc->sc_type == WM_T_82547)
11296 || (sc->sc_type == WM_T_82547_2)) {
11297 /* Workaround for igp are done in igp_reset() */
11298 /* XXX add code to set LED after phy reset */
11299 }
11300 break;
11301 case WM_T_ICH8:
11302 case WM_T_ICH9:
11303 case WM_T_ICH10:
11304 case WM_T_PCH:
11305 case WM_T_PCH2:
11306 case WM_T_PCH_LPT:
11307 case WM_T_PCH_SPT:
11308 case WM_T_PCH_CNP:
11309 case WM_T_PCH_TGP:
11310 /* Generic reset */
11311 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11312 CSR_WRITE_FLUSH(sc);
11313 delay(100);
11314 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11315 CSR_WRITE_FLUSH(sc);
11316 delay(150);
11317 break;
11318 default:
11319 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
11320 __func__);
11321 break;
11322 }
11323
11324 sc->phy.release(sc);
11325
11326 /* get_cfg_done */
11327 wm_get_cfg_done(sc);
11328
11329 /* Extra setup */
11330 switch (sc->sc_type) {
11331 case WM_T_82542_2_0:
11332 case WM_T_82542_2_1:
11333 case WM_T_82543:
11334 case WM_T_82544:
11335 case WM_T_82540:
11336 case WM_T_82545:
11337 case WM_T_82545_3:
11338 case WM_T_82546:
11339 case WM_T_82546_3:
11340 case WM_T_82541_2:
11341 case WM_T_82547_2:
11342 case WM_T_82571:
11343 case WM_T_82572:
11344 case WM_T_82573:
11345 case WM_T_82574:
11346 case WM_T_82583:
11347 case WM_T_82575:
11348 case WM_T_82576:
11349 case WM_T_82580:
11350 case WM_T_I350:
11351 case WM_T_I354:
11352 case WM_T_I210:
11353 case WM_T_I211:
11354 case WM_T_80003:
11355 /* Null */
11356 break;
11357 case WM_T_82541:
11358 case WM_T_82547:
11359 /* XXX Configure actively LED after PHY reset */
11360 break;
11361 case WM_T_ICH8:
11362 case WM_T_ICH9:
11363 case WM_T_ICH10:
11364 case WM_T_PCH:
11365 case WM_T_PCH2:
11366 case WM_T_PCH_LPT:
11367 case WM_T_PCH_SPT:
11368 case WM_T_PCH_CNP:
11369 case WM_T_PCH_TGP:
11370 wm_phy_post_reset(sc);
11371 break;
11372 default:
11373 panic("%s: unknown type\n", __func__);
11374 break;
11375 }
11376 }
11377
11378 /*
11379 * Set up sc_phytype and mii_{read|write}reg.
11380 *
11381 * To identify PHY type, correct read/write function should be selected.
11382 * To select correct read/write function, PCI ID or MAC type are required
11383 * without accessing PHY registers.
11384 *
11385 * On the first call of this function, PHY ID is not known yet. Check
11386 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
11387 * result might be incorrect.
11388 *
11389 * In the second call, PHY OUI and model is used to identify PHY type.
11390 * It might not be perfect because of the lack of compared entry, but it
11391 * would be better than the first call.
11392 *
11393 * If the detected new result and previous assumption is different,
11394 * a diagnostic message will be printed.
11395 */
11396 static void
11397 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
11398 uint16_t phy_model)
11399 {
11400 device_t dev = sc->sc_dev;
11401 struct mii_data *mii = &sc->sc_mii;
11402 uint16_t new_phytype = WMPHY_UNKNOWN;
11403 uint16_t doubt_phytype = WMPHY_UNKNOWN;
11404 mii_readreg_t new_readreg;
11405 mii_writereg_t new_writereg;
11406 bool dodiag = true;
11407
11408 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11409 device_xname(sc->sc_dev), __func__));
11410
11411 /*
11412 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
11413 * incorrect. So don't print diag output when it's 2nd call.
11414 */
11415 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
11416 dodiag = false;
11417
11418 if (mii->mii_readreg == NULL) {
11419 /*
11420 * This is the first call of this function. For ICH and PCH
11421 * variants, it's difficult to determine the PHY access method
11422 * by sc_type, so use the PCI product ID for some devices.
11423 */
11424
11425 switch (sc->sc_pcidevid) {
11426 case PCI_PRODUCT_INTEL_PCH_M_LM:
11427 case PCI_PRODUCT_INTEL_PCH_M_LC:
11428 /* 82577 */
11429 new_phytype = WMPHY_82577;
11430 break;
11431 case PCI_PRODUCT_INTEL_PCH_D_DM:
11432 case PCI_PRODUCT_INTEL_PCH_D_DC:
11433 /* 82578 */
11434 new_phytype = WMPHY_82578;
11435 break;
11436 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
11437 case PCI_PRODUCT_INTEL_PCH2_LV_V:
11438 /* 82579 */
11439 new_phytype = WMPHY_82579;
11440 break;
11441 case PCI_PRODUCT_INTEL_82801H_82567V_3:
11442 case PCI_PRODUCT_INTEL_82801I_BM:
11443 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
11444 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
11445 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
11446 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
11447 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
11448 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
11449 /* ICH8, 9, 10 with 82567 */
11450 new_phytype = WMPHY_BM;
11451 break;
11452 default:
11453 break;
11454 }
11455 } else {
11456 /* It's not the first call. Use PHY OUI and model */
11457 switch (phy_oui) {
11458 case MII_OUI_ATTANSIC: /* atphy(4) */
11459 switch (phy_model) {
11460 case MII_MODEL_ATTANSIC_AR8021:
11461 new_phytype = WMPHY_82578;
11462 break;
11463 default:
11464 break;
11465 }
11466 break;
11467 case MII_OUI_xxMARVELL:
11468 switch (phy_model) {
11469 case MII_MODEL_xxMARVELL_I210:
11470 new_phytype = WMPHY_I210;
11471 break;
11472 case MII_MODEL_xxMARVELL_E1011:
11473 case MII_MODEL_xxMARVELL_E1000_3:
11474 case MII_MODEL_xxMARVELL_E1000_5:
11475 case MII_MODEL_xxMARVELL_E1112:
11476 new_phytype = WMPHY_M88;
11477 break;
11478 case MII_MODEL_xxMARVELL_E1149:
11479 new_phytype = WMPHY_BM;
11480 break;
11481 case MII_MODEL_xxMARVELL_E1111:
11482 case MII_MODEL_xxMARVELL_I347:
11483 case MII_MODEL_xxMARVELL_E1512:
11484 case MII_MODEL_xxMARVELL_E1340M:
11485 case MII_MODEL_xxMARVELL_E1543:
11486 new_phytype = WMPHY_M88;
11487 break;
11488 case MII_MODEL_xxMARVELL_I82563:
11489 new_phytype = WMPHY_GG82563;
11490 break;
11491 default:
11492 break;
11493 }
11494 break;
11495 case MII_OUI_INTEL:
11496 switch (phy_model) {
11497 case MII_MODEL_INTEL_I82577:
11498 new_phytype = WMPHY_82577;
11499 break;
11500 case MII_MODEL_INTEL_I82579:
11501 new_phytype = WMPHY_82579;
11502 break;
11503 case MII_MODEL_INTEL_I217:
11504 new_phytype = WMPHY_I217;
11505 break;
11506 case MII_MODEL_INTEL_I82580:
11507 new_phytype = WMPHY_82580;
11508 break;
11509 case MII_MODEL_INTEL_I350:
11510 new_phytype = WMPHY_I350;
11511 break;
11512 default:
11513 break;
11514 }
11515 break;
11516 case MII_OUI_yyINTEL:
11517 switch (phy_model) {
11518 case MII_MODEL_yyINTEL_I82562G:
11519 case MII_MODEL_yyINTEL_I82562EM:
11520 case MII_MODEL_yyINTEL_I82562ET:
11521 new_phytype = WMPHY_IFE;
11522 break;
11523 case MII_MODEL_yyINTEL_IGP01E1000:
11524 new_phytype = WMPHY_IGP;
11525 break;
11526 case MII_MODEL_yyINTEL_I82566:
11527 new_phytype = WMPHY_IGP_3;
11528 break;
11529 default:
11530 break;
11531 }
11532 break;
11533 default:
11534 break;
11535 }
11536
11537 if (dodiag) {
11538 if (new_phytype == WMPHY_UNKNOWN)
11539 aprint_verbose_dev(dev,
11540 "%s: Unknown PHY model. OUI=%06x, "
11541 "model=%04x\n", __func__, phy_oui,
11542 phy_model);
11543
11544 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11545 && (sc->sc_phytype != new_phytype)) {
11546 aprint_error_dev(dev, "Previously assumed PHY "
11547 "type(%u) was incorrect. PHY type from PHY"
11548 "ID = %u\n", sc->sc_phytype, new_phytype);
11549 }
11550 }
11551 }
11552
11553 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11554 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11555 /* SGMII */
11556 new_readreg = wm_sgmii_readreg;
11557 new_writereg = wm_sgmii_writereg;
11558 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11559 /* BM2 (phyaddr == 1) */
11560 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11561 && (new_phytype != WMPHY_BM)
11562 && (new_phytype != WMPHY_UNKNOWN))
11563 doubt_phytype = new_phytype;
11564 new_phytype = WMPHY_BM;
11565 new_readreg = wm_gmii_bm_readreg;
11566 new_writereg = wm_gmii_bm_writereg;
11567 } else if (sc->sc_type >= WM_T_PCH) {
11568 /* All PCH* use _hv_ */
11569 new_readreg = wm_gmii_hv_readreg;
11570 new_writereg = wm_gmii_hv_writereg;
11571 } else if (sc->sc_type >= WM_T_ICH8) {
11572 /* non-82567 ICH8, 9 and 10 */
11573 new_readreg = wm_gmii_i82544_readreg;
11574 new_writereg = wm_gmii_i82544_writereg;
11575 } else if (sc->sc_type >= WM_T_80003) {
11576 /* 80003 */
11577 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11578 && (new_phytype != WMPHY_GG82563)
11579 && (new_phytype != WMPHY_UNKNOWN))
11580 doubt_phytype = new_phytype;
11581 new_phytype = WMPHY_GG82563;
11582 new_readreg = wm_gmii_i80003_readreg;
11583 new_writereg = wm_gmii_i80003_writereg;
11584 } else if (sc->sc_type >= WM_T_I210) {
11585 /* I210 and I211 */
11586 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11587 && (new_phytype != WMPHY_I210)
11588 && (new_phytype != WMPHY_UNKNOWN))
11589 doubt_phytype = new_phytype;
11590 new_phytype = WMPHY_I210;
11591 new_readreg = wm_gmii_gs40g_readreg;
11592 new_writereg = wm_gmii_gs40g_writereg;
11593 } else if (sc->sc_type >= WM_T_82580) {
11594 /* 82580, I350 and I354 */
11595 new_readreg = wm_gmii_82580_readreg;
11596 new_writereg = wm_gmii_82580_writereg;
11597 } else if (sc->sc_type >= WM_T_82544) {
11598 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
11599 new_readreg = wm_gmii_i82544_readreg;
11600 new_writereg = wm_gmii_i82544_writereg;
11601 } else {
11602 new_readreg = wm_gmii_i82543_readreg;
11603 new_writereg = wm_gmii_i82543_writereg;
11604 }
11605
11606 if (new_phytype == WMPHY_BM) {
11607 /* All BM use _bm_ */
11608 new_readreg = wm_gmii_bm_readreg;
11609 new_writereg = wm_gmii_bm_writereg;
11610 }
11611 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) {
11612 /* All PCH* use _hv_ */
11613 new_readreg = wm_gmii_hv_readreg;
11614 new_writereg = wm_gmii_hv_writereg;
11615 }
11616
11617 /* Diag output */
11618 if (dodiag) {
11619 if (doubt_phytype != WMPHY_UNKNOWN)
11620 aprint_error_dev(dev, "Assumed new PHY type was "
11621 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11622 new_phytype);
11623 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11624 && (sc->sc_phytype != new_phytype))
11625 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11626 "was incorrect. New PHY type = %u\n",
11627 sc->sc_phytype, new_phytype);
11628
11629 if ((mii->mii_readreg != NULL) &&
11630 (new_phytype == WMPHY_UNKNOWN))
11631 aprint_error_dev(dev, "PHY type is still unknown.\n");
11632
11633 if ((mii->mii_readreg != NULL) &&
11634 (mii->mii_readreg != new_readreg))
11635 aprint_error_dev(dev, "Previously assumed PHY "
11636 "read/write function was incorrect.\n");
11637 }
11638
11639 /* Update now */
11640 sc->sc_phytype = new_phytype;
11641 mii->mii_readreg = new_readreg;
11642 mii->mii_writereg = new_writereg;
11643 if (new_readreg == wm_gmii_hv_readreg) {
11644 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11645 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11646 } else if (new_readreg == wm_sgmii_readreg) {
11647 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11648 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11649 } else if (new_readreg == wm_gmii_i82544_readreg) {
11650 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11651 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11652 }
11653 }
11654
11655 /*
11656 * wm_get_phy_id_82575:
11657 *
11658 * Return PHY ID. Return -1 if it failed.
11659 */
11660 static int
11661 wm_get_phy_id_82575(struct wm_softc *sc)
11662 {
11663 uint32_t reg;
11664 int phyid = -1;
11665
11666 /* XXX */
11667 if ((sc->sc_flags & WM_F_SGMII) == 0)
11668 return -1;
11669
11670 if (wm_sgmii_uses_mdio(sc)) {
11671 switch (sc->sc_type) {
11672 case WM_T_82575:
11673 case WM_T_82576:
11674 reg = CSR_READ(sc, WMREG_MDIC);
11675 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11676 break;
11677 case WM_T_82580:
11678 case WM_T_I350:
11679 case WM_T_I354:
11680 case WM_T_I210:
11681 case WM_T_I211:
11682 reg = CSR_READ(sc, WMREG_MDICNFG);
11683 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11684 break;
11685 default:
11686 return -1;
11687 }
11688 }
11689
11690 return phyid;
11691 }
11692
11693 /*
11694 * wm_gmii_mediainit:
11695 *
11696 * Initialize media for use on 1000BASE-T devices.
11697 */
11698 static void
11699 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11700 {
11701 device_t dev = sc->sc_dev;
11702 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11703 struct mii_data *mii = &sc->sc_mii;
11704
11705 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11706 device_xname(sc->sc_dev), __func__));
11707
11708 /* We have GMII. */
11709 sc->sc_flags |= WM_F_HAS_MII;
11710
11711 if (sc->sc_type == WM_T_80003)
11712 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11713 else
11714 sc->sc_tipg = TIPG_1000T_DFLT;
11715
11716 /*
11717 * Let the chip set speed/duplex on its own based on
11718 * signals from the PHY.
11719 * XXXbouyer - I'm not sure this is right for the 80003,
11720 * the em driver only sets CTRL_SLU here - but it seems to work.
11721 */
11722 sc->sc_ctrl |= CTRL_SLU;
11723 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11724
11725 /* Initialize our media structures and probe the GMII. */
11726 mii->mii_ifp = ifp;
11727
11728 mii->mii_statchg = wm_gmii_statchg;
11729
11730 /* get PHY control from SMBus to PCIe */
11731 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11732 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11733 || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP))
11734 wm_init_phy_workarounds_pchlan(sc);
11735
11736 wm_gmii_reset(sc);
11737
11738 sc->sc_ethercom.ec_mii = &sc->sc_mii;
11739 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11740 wm_gmii_mediastatus, sc->sc_core_lock);
11741
11742 /* Setup internal SGMII PHY for SFP */
11743 wm_sgmii_sfp_preconfig(sc);
11744
11745 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11746 || (sc->sc_type == WM_T_82580)
11747 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11748 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11749 if ((sc->sc_flags & WM_F_SGMII) == 0) {
11750 /* Attach only one port */
11751 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11752 MII_OFFSET_ANY, MIIF_DOPAUSE);
11753 } else {
11754 int i, id;
11755 uint32_t ctrl_ext;
11756
11757 id = wm_get_phy_id_82575(sc);
11758 if (id != -1) {
11759 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11760 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11761 }
11762 if ((id == -1)
11763 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11764 /* Power on sgmii phy if it is disabled */
11765 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11766 CSR_WRITE(sc, WMREG_CTRL_EXT,
11767 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11768 CSR_WRITE_FLUSH(sc);
11769 delay(300*1000); /* XXX too long */
11770
11771 /*
11772 * From 1 to 8.
11773 *
11774 * I2C access fails with I2C register's ERROR
11775 * bit set, so prevent error message while
11776 * scanning.
11777 */
11778 sc->phy.no_errprint = true;
11779 for (i = 1; i < 8; i++)
11780 mii_attach(sc->sc_dev, &sc->sc_mii,
11781 0xffffffff, i, MII_OFFSET_ANY,
11782 MIIF_DOPAUSE);
11783 sc->phy.no_errprint = false;
11784
11785 /* Restore previous sfp cage power state */
11786 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11787 }
11788 }
11789 } else
11790 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11791 MII_OFFSET_ANY, MIIF_DOPAUSE);
11792
11793 /*
11794 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11795 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11796 */
11797 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
11798 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
11799 || (sc->sc_type == WM_T_PCH_TGP))
11800 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11801 wm_set_mdio_slow_mode_hv(sc);
11802 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11803 MII_OFFSET_ANY, MIIF_DOPAUSE);
11804 }
11805
11806 /*
11807 * (For ICH8 variants)
11808 * If PHY detection failed, use BM's r/w function and retry.
11809 */
11810 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11811 /* if failed, retry with *_bm_* */
11812 aprint_verbose_dev(dev, "Assumed PHY access function "
11813 "(type = %d) might be incorrect. Use BM and retry.\n",
11814 sc->sc_phytype);
11815 sc->sc_phytype = WMPHY_BM;
11816 mii->mii_readreg = wm_gmii_bm_readreg;
11817 mii->mii_writereg = wm_gmii_bm_writereg;
11818
11819 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11820 MII_OFFSET_ANY, MIIF_DOPAUSE);
11821 }
11822
11823 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11824 /* Any PHY wasn't found */
11825 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11826 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11827 sc->sc_phytype = WMPHY_NONE;
11828 } else {
11829 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11830
11831 /*
11832 * PHY found! Check PHY type again by the second call of
11833 * wm_gmii_setup_phytype.
11834 */
11835 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11836 child->mii_mpd_model);
11837
11838 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11839 }
11840 }
11841
11842 /*
11843 * wm_gmii_mediachange: [ifmedia interface function]
11844 *
11845 * Set hardware to newly-selected media on a 1000BASE-T device.
11846 */
11847 static int
11848 wm_gmii_mediachange(struct ifnet *ifp)
11849 {
11850 struct wm_softc *sc = ifp->if_softc;
11851 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11852 uint32_t reg;
11853 int rc;
11854
11855 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11856 device_xname(sc->sc_dev), __func__));
11857
11858 KASSERT(mutex_owned(sc->sc_core_lock));
11859
11860 if ((sc->sc_if_flags & IFF_UP) == 0)
11861 return 0;
11862
11863 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11864 if ((sc->sc_type == WM_T_82580)
11865 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11866 || (sc->sc_type == WM_T_I211)) {
11867 reg = CSR_READ(sc, WMREG_PHPM);
11868 reg &= ~PHPM_GO_LINK_D;
11869 CSR_WRITE(sc, WMREG_PHPM, reg);
11870 }
11871
11872 /* Disable D0 LPLU. */
11873 wm_lplu_d0_disable(sc);
11874
11875 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11876 sc->sc_ctrl |= CTRL_SLU;
11877 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11878 || (sc->sc_type > WM_T_82543)) {
11879 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11880 } else {
11881 sc->sc_ctrl &= ~CTRL_ASDE;
11882 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11883 if (ife->ifm_media & IFM_FDX)
11884 sc->sc_ctrl |= CTRL_FD;
11885 switch (IFM_SUBTYPE(ife->ifm_media)) {
11886 case IFM_10_T:
11887 sc->sc_ctrl |= CTRL_SPEED_10;
11888 break;
11889 case IFM_100_TX:
11890 sc->sc_ctrl |= CTRL_SPEED_100;
11891 break;
11892 case IFM_1000_T:
11893 sc->sc_ctrl |= CTRL_SPEED_1000;
11894 break;
11895 case IFM_NONE:
11896 /* There is no specific setting for IFM_NONE */
11897 break;
11898 default:
11899 panic("wm_gmii_mediachange: bad media 0x%x",
11900 ife->ifm_media);
11901 }
11902 }
11903 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11904 CSR_WRITE_FLUSH(sc);
11905
11906 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11907 wm_serdes_mediachange(ifp);
11908
11909 if (sc->sc_type <= WM_T_82543)
11910 wm_gmii_reset(sc);
11911 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11912 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11913 /* allow time for SFP cage time to power up phy */
11914 delay(300 * 1000);
11915 wm_gmii_reset(sc);
11916 }
11917
11918 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11919 return 0;
11920 return rc;
11921 }
11922
11923 /*
11924 * wm_gmii_mediastatus: [ifmedia interface function]
11925 *
11926 * Get the current interface media status on a 1000BASE-T device.
11927 */
11928 static void
11929 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11930 {
11931 struct wm_softc *sc = ifp->if_softc;
11932 struct ethercom *ec = &sc->sc_ethercom;
11933 struct mii_data *mii;
11934 bool dopoll = true;
11935
11936 /*
11937 * In normal drivers, ether_mediastatus() is called here.
11938 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
11939 */
11940 KASSERT(mutex_owned(sc->sc_core_lock));
11941 KASSERT(ec->ec_mii != NULL);
11942 KASSERT(mii_locked(ec->ec_mii));
11943
11944 mii = ec->ec_mii;
11945 if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
11946 struct timeval now;
11947
11948 getmicrotime(&now);
11949 if (timercmp(&now, &sc->sc_linkup_delay_time, <))
11950 dopoll = false;
11951 else if (sc->sc_linkup_delay_time.tv_sec != 0) {
11952 /* Simplify by checking tv_sec only. It's enough. */
11953
11954 sc->sc_linkup_delay_time.tv_sec = 0;
11955 sc->sc_linkup_delay_time.tv_usec = 0;
11956 }
11957 }
11958
11959 /*
11960 * Don't call mii_pollstat() while doing workaround.
11961 * See also wm_linkintr_gmii() and wm_tick().
11962 */
11963 if (dopoll)
11964 mii_pollstat(mii);
11965 ifmr->ifm_active = mii->mii_media_active;
11966 ifmr->ifm_status = mii->mii_media_status;
11967
11968 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11969 | sc->sc_flowflags;
11970 }
11971
11972 #define MDI_IO CTRL_SWDPIN(2)
11973 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11974 #define MDI_CLK CTRL_SWDPIN(3)
11975
11976 static void
11977 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11978 {
11979 uint32_t i, v;
11980
11981 v = CSR_READ(sc, WMREG_CTRL);
11982 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11983 v |= MDI_DIR | CTRL_SWDPIO(3);
11984
11985 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11986 if (data & i)
11987 v |= MDI_IO;
11988 else
11989 v &= ~MDI_IO;
11990 CSR_WRITE(sc, WMREG_CTRL, v);
11991 CSR_WRITE_FLUSH(sc);
11992 delay(10);
11993 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11994 CSR_WRITE_FLUSH(sc);
11995 delay(10);
11996 CSR_WRITE(sc, WMREG_CTRL, v);
11997 CSR_WRITE_FLUSH(sc);
11998 delay(10);
11999 }
12000 }
12001
12002 static uint16_t
12003 wm_i82543_mii_recvbits(struct wm_softc *sc)
12004 {
12005 uint32_t v, i;
12006 uint16_t data = 0;
12007
12008 v = CSR_READ(sc, WMREG_CTRL);
12009 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
12010 v |= CTRL_SWDPIO(3);
12011
12012 CSR_WRITE(sc, WMREG_CTRL, v);
12013 CSR_WRITE_FLUSH(sc);
12014 delay(10);
12015 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12016 CSR_WRITE_FLUSH(sc);
12017 delay(10);
12018 CSR_WRITE(sc, WMREG_CTRL, v);
12019 CSR_WRITE_FLUSH(sc);
12020 delay(10);
12021
12022 for (i = 0; i < 16; i++) {
12023 data <<= 1;
12024 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12025 CSR_WRITE_FLUSH(sc);
12026 delay(10);
12027 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
12028 data |= 1;
12029 CSR_WRITE(sc, WMREG_CTRL, v);
12030 CSR_WRITE_FLUSH(sc);
12031 delay(10);
12032 }
12033
12034 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12035 CSR_WRITE_FLUSH(sc);
12036 delay(10);
12037 CSR_WRITE(sc, WMREG_CTRL, v);
12038 CSR_WRITE_FLUSH(sc);
12039 delay(10);
12040
12041 return data;
12042 }
12043
12044 #undef MDI_IO
12045 #undef MDI_DIR
12046 #undef MDI_CLK
12047
12048 /*
12049 * wm_gmii_i82543_readreg: [mii interface function]
12050 *
12051 * Read a PHY register on the GMII (i82543 version).
12052 */
12053 static int
12054 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
12055 {
12056 struct wm_softc *sc = device_private(dev);
12057
12058 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
12059 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
12060 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
12061 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
12062
12063 DPRINTF(sc, WM_DEBUG_GMII,
12064 ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
12065 device_xname(dev), phy, reg, *val));
12066
12067 return 0;
12068 }
12069
12070 /*
12071 * wm_gmii_i82543_writereg: [mii interface function]
12072 *
12073 * Write a PHY register on the GMII (i82543 version).
12074 */
12075 static int
12076 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
12077 {
12078 struct wm_softc *sc = device_private(dev);
12079
12080 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
12081 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
12082 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
12083 (MII_COMMAND_START << 30), 32);
12084
12085 return 0;
12086 }
12087
12088 /*
12089 * wm_gmii_mdic_readreg: [mii interface function]
12090 *
12091 * Read a PHY register on the GMII.
12092 */
12093 static int
12094 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
12095 {
12096 struct wm_softc *sc = device_private(dev);
12097 uint32_t mdic = 0;
12098 int i;
12099
12100 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
12101 && (reg > MII_ADDRMASK)) {
12102 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12103 __func__, sc->sc_phytype, reg);
12104 reg &= MII_ADDRMASK;
12105 }
12106
12107 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
12108 MDIC_REGADD(reg));
12109
12110 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
12111 delay(50);
12112 mdic = CSR_READ(sc, WMREG_MDIC);
12113 if (mdic & MDIC_READY)
12114 break;
12115 }
12116
12117 if ((mdic & MDIC_READY) == 0) {
12118 DPRINTF(sc, WM_DEBUG_GMII,
12119 ("%s: MDIC read timed out: phy %d reg %d\n",
12120 device_xname(dev), phy, reg));
12121 return ETIMEDOUT;
12122 } else if (mdic & MDIC_E) {
12123 /* This is normal if no PHY is present. */
12124 DPRINTF(sc, WM_DEBUG_GMII,
12125 ("%s: MDIC read error: phy %d reg %d\n",
12126 device_xname(sc->sc_dev), phy, reg));
12127 return -1;
12128 } else
12129 *val = MDIC_DATA(mdic);
12130
12131 /*
12132 * Allow some time after each MDIC transaction to avoid
12133 * reading duplicate data in the next MDIC transaction.
12134 */
12135 if (sc->sc_type == WM_T_PCH2)
12136 delay(100);
12137
12138 return 0;
12139 }
12140
12141 /*
12142 * wm_gmii_mdic_writereg: [mii interface function]
12143 *
12144 * Write a PHY register on the GMII.
12145 */
12146 static int
12147 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
12148 {
12149 struct wm_softc *sc = device_private(dev);
12150 uint32_t mdic = 0;
12151 int i;
12152
12153 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
12154 && (reg > MII_ADDRMASK)) {
12155 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12156 __func__, sc->sc_phytype, reg);
12157 reg &= MII_ADDRMASK;
12158 }
12159
12160 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
12161 MDIC_REGADD(reg) | MDIC_DATA(val));
12162
12163 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
12164 delay(50);
12165 mdic = CSR_READ(sc, WMREG_MDIC);
12166 if (mdic & MDIC_READY)
12167 break;
12168 }
12169
12170 if ((mdic & MDIC_READY) == 0) {
12171 DPRINTF(sc, WM_DEBUG_GMII,
12172 ("%s: MDIC write timed out: phy %d reg %d\n",
12173 device_xname(dev), phy, reg));
12174 return ETIMEDOUT;
12175 } else if (mdic & MDIC_E) {
12176 DPRINTF(sc, WM_DEBUG_GMII,
12177 ("%s: MDIC write error: phy %d reg %d\n",
12178 device_xname(dev), phy, reg));
12179 return -1;
12180 }
12181
12182 /*
12183 * Allow some time after each MDIC transaction to avoid
12184 * reading duplicate data in the next MDIC transaction.
12185 */
12186 if (sc->sc_type == WM_T_PCH2)
12187 delay(100);
12188
12189 return 0;
12190 }
12191
12192 /*
12193 * wm_gmii_i82544_readreg: [mii interface function]
12194 *
12195 * Read a PHY register on the GMII.
12196 */
12197 static int
12198 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
12199 {
12200 struct wm_softc *sc = device_private(dev);
12201 int rv;
12202
12203 rv = sc->phy.acquire(sc);
12204 if (rv != 0) {
12205 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12206 return rv;
12207 }
12208
12209 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
12210
12211 sc->phy.release(sc);
12212
12213 return rv;
12214 }
12215
12216 static int
12217 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12218 {
12219 struct wm_softc *sc = device_private(dev);
12220 int rv;
12221
12222 switch (sc->sc_phytype) {
12223 case WMPHY_IGP:
12224 case WMPHY_IGP_2:
12225 case WMPHY_IGP_3:
12226 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12227 rv = wm_gmii_mdic_writereg(dev, phy,
12228 IGPHY_PAGE_SELECT, reg);
12229 if (rv != 0)
12230 return rv;
12231 }
12232 break;
12233 default:
12234 #ifdef WM_DEBUG
12235 if ((reg >> MII_ADDRBITS) != 0)
12236 device_printf(dev,
12237 "%s: PHYTYPE = 0x%x, addr = 0x%02x\n",
12238 __func__, sc->sc_phytype, reg);
12239 #endif
12240 break;
12241 }
12242
12243 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12244 }
12245
12246 /*
12247 * wm_gmii_i82544_writereg: [mii interface function]
12248 *
12249 * Write a PHY register on the GMII.
12250 */
12251 static int
12252 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
12253 {
12254 struct wm_softc *sc = device_private(dev);
12255 int rv;
12256
12257 rv = sc->phy.acquire(sc);
12258 if (rv != 0) {
12259 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12260 return rv;
12261 }
12262
12263 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
12264 sc->phy.release(sc);
12265
12266 return rv;
12267 }
12268
12269 static int
12270 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12271 {
12272 struct wm_softc *sc = device_private(dev);
12273 int rv;
12274
12275 switch (sc->sc_phytype) {
12276 case WMPHY_IGP:
12277 case WMPHY_IGP_2:
12278 case WMPHY_IGP_3:
12279 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12280 rv = wm_gmii_mdic_writereg(dev, phy,
12281 IGPHY_PAGE_SELECT, reg);
12282 if (rv != 0)
12283 return rv;
12284 }
12285 break;
12286 default:
12287 #ifdef WM_DEBUG
12288 if ((reg >> MII_ADDRBITS) != 0)
12289 device_printf(dev,
12290 "%s: PHYTYPE == 0x%x, addr = 0x%02x",
12291 __func__, sc->sc_phytype, reg);
12292 #endif
12293 break;
12294 }
12295
12296 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12297 }
12298
12299 /*
12300 * wm_gmii_i80003_readreg: [mii interface function]
12301 *
12302 * Read a PHY register on the kumeran
12303 * This could be handled by the PHY layer if we didn't have to lock the
12304 * resource ...
12305 */
12306 static int
12307 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
12308 {
12309 struct wm_softc *sc = device_private(dev);
12310 int page_select;
12311 uint16_t temp, temp2;
12312 int rv;
12313
12314 if (phy != 1) /* Only one PHY on kumeran bus */
12315 return -1;
12316
12317 rv = sc->phy.acquire(sc);
12318 if (rv != 0) {
12319 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12320 return rv;
12321 }
12322
12323 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12324 page_select = GG82563_PHY_PAGE_SELECT;
12325 else {
12326 /*
12327 * Use Alternative Page Select register to access registers
12328 * 30 and 31.
12329 */
12330 page_select = GG82563_PHY_PAGE_SELECT_ALT;
12331 }
12332 temp = reg >> GG82563_PAGE_SHIFT;
12333 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12334 goto out;
12335
12336 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12337 /*
12338 * Wait more 200us for a bug of the ready bit in the MDIC
12339 * register.
12340 */
12341 delay(200);
12342 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12343 if ((rv != 0) || (temp2 != temp)) {
12344 device_printf(dev, "%s failed\n", __func__);
12345 rv = -1;
12346 goto out;
12347 }
12348 delay(200);
12349 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12350 delay(200);
12351 } else
12352 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12353
12354 out:
12355 sc->phy.release(sc);
12356 return rv;
12357 }
12358
12359 /*
12360 * wm_gmii_i80003_writereg: [mii interface function]
12361 *
12362 * Write a PHY register on the kumeran.
12363 * This could be handled by the PHY layer if we didn't have to lock the
12364 * resource ...
12365 */
12366 static int
12367 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
12368 {
12369 struct wm_softc *sc = device_private(dev);
12370 int page_select, rv;
12371 uint16_t temp, temp2;
12372
12373 if (phy != 1) /* Only one PHY on kumeran bus */
12374 return -1;
12375
12376 rv = sc->phy.acquire(sc);
12377 if (rv != 0) {
12378 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12379 return rv;
12380 }
12381
12382 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12383 page_select = GG82563_PHY_PAGE_SELECT;
12384 else {
12385 /*
12386 * Use Alternative Page Select register to access registers
12387 * 30 and 31.
12388 */
12389 page_select = GG82563_PHY_PAGE_SELECT_ALT;
12390 }
12391 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
12392 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12393 goto out;
12394
12395 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12396 /*
12397 * Wait more 200us for a bug of the ready bit in the MDIC
12398 * register.
12399 */
12400 delay(200);
12401 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12402 if ((rv != 0) || (temp2 != temp)) {
12403 device_printf(dev, "%s failed\n", __func__);
12404 rv = -1;
12405 goto out;
12406 }
12407 delay(200);
12408 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12409 delay(200);
12410 } else
12411 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12412
12413 out:
12414 sc->phy.release(sc);
12415 return rv;
12416 }
12417
12418 /*
12419 * wm_gmii_bm_readreg: [mii interface function]
12420 *
12421 * Read a PHY register on the kumeran
12422 * This could be handled by the PHY layer if we didn't have to lock the
12423 * resource ...
12424 */
12425 static int
12426 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
12427 {
12428 struct wm_softc *sc = device_private(dev);
12429 uint16_t page = reg >> BME1000_PAGE_SHIFT;
12430 int rv;
12431
12432 rv = sc->phy.acquire(sc);
12433 if (rv != 0) {
12434 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12435 return rv;
12436 }
12437
12438 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12439 phy = ((page >= 768) || ((page == 0) && (reg == 25))
12440 || (reg == 31)) ? 1 : phy;
12441 /* Page 800 works differently than the rest so it has its own func */
12442 if (page == BM_WUC_PAGE) {
12443 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12444 goto release;
12445 }
12446
12447 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12448 if ((phy == 1) && (sc->sc_type != WM_T_82574)
12449 && (sc->sc_type != WM_T_82583))
12450 rv = wm_gmii_mdic_writereg(dev, phy,
12451 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12452 else
12453 rv = wm_gmii_mdic_writereg(dev, phy,
12454 BME1000_PHY_PAGE_SELECT, page);
12455 if (rv != 0)
12456 goto release;
12457 }
12458
12459 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12460
12461 release:
12462 sc->phy.release(sc);
12463 return rv;
12464 }
12465
12466 /*
12467 * wm_gmii_bm_writereg: [mii interface function]
12468 *
12469 * Write a PHY register on the kumeran.
12470 * This could be handled by the PHY layer if we didn't have to lock the
12471 * resource ...
12472 */
12473 static int
12474 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
12475 {
12476 struct wm_softc *sc = device_private(dev);
12477 uint16_t page = reg >> BME1000_PAGE_SHIFT;
12478 int rv;
12479
12480 rv = sc->phy.acquire(sc);
12481 if (rv != 0) {
12482 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12483 return rv;
12484 }
12485
12486 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12487 phy = ((page >= 768) || ((page == 0) && (reg == 25))
12488 || (reg == 31)) ? 1 : phy;
12489 /* Page 800 works differently than the rest so it has its own func */
12490 if (page == BM_WUC_PAGE) {
12491 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
12492 goto release;
12493 }
12494
12495 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12496 if ((phy == 1) && (sc->sc_type != WM_T_82574)
12497 && (sc->sc_type != WM_T_82583))
12498 rv = wm_gmii_mdic_writereg(dev, phy,
12499 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12500 else
12501 rv = wm_gmii_mdic_writereg(dev, phy,
12502 BME1000_PHY_PAGE_SELECT, page);
12503 if (rv != 0)
12504 goto release;
12505 }
12506
12507 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12508
12509 release:
12510 sc->phy.release(sc);
12511 return rv;
12512 }
12513
12514 /*
12515 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
12516 * @dev: pointer to the HW structure
12517 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
12518 *
12519 * Assumes semaphore already acquired and phy_reg points to a valid memory
12520 * address to store contents of the BM_WUC_ENABLE_REG register.
12521 */
12522 static int
12523 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12524 {
12525 #ifdef WM_DEBUG
12526 struct wm_softc *sc = device_private(dev);
12527 #endif
12528 uint16_t temp;
12529 int rv;
12530
12531 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12532 device_xname(dev), __func__));
12533
12534 if (!phy_regp)
12535 return -1;
12536
12537 /* All page select, port ctrl and wakeup registers use phy address 1 */
12538
12539 /* Select Port Control Registers page */
12540 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12541 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12542 if (rv != 0)
12543 return rv;
12544
12545 /* Read WUCE and save it */
12546 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12547 if (rv != 0)
12548 return rv;
12549
12550 /* Enable both PHY wakeup mode and Wakeup register page writes.
12551 * Prevent a power state change by disabling ME and Host PHY wakeup.
12552 */
12553 temp = *phy_regp;
12554 temp |= BM_WUC_ENABLE_BIT;
12555 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12556
12557 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12558 return rv;
12559
12560 /* Select Host Wakeup Registers page - caller now able to write
12561 * registers on the Wakeup registers page
12562 */
12563 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12564 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12565 }
12566
12567 /*
12568 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12569 * @dev: pointer to the HW structure
12570 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12571 *
12572 * Restore BM_WUC_ENABLE_REG to its original value.
12573 *
12574 * Assumes semaphore already acquired and *phy_reg is the contents of the
12575 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12576 * caller.
12577 */
12578 static int
12579 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12580 {
12581 #ifdef WM_DEBUG
12582 struct wm_softc *sc = device_private(dev);
12583 #endif
12584
12585 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12586 device_xname(dev), __func__));
12587
12588 if (!phy_regp)
12589 return -1;
12590
12591 /* Select Port Control Registers page */
12592 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12593 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12594
12595 /* Restore 769.17 to its original value */
12596 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12597
12598 return 0;
12599 }
12600
12601 /*
12602 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12603 * @sc: pointer to the HW structure
12604 * @offset: register offset to be read or written
12605 * @val: pointer to the data to read or write
12606 * @rd: determines if operation is read or write
12607 * @page_set: BM_WUC_PAGE already set and access enabled
12608 *
12609 * Read the PHY register at offset and store the retrieved information in
12610 * data, or write data to PHY register at offset. Note the procedure to
12611 * access the PHY wakeup registers is different than reading the other PHY
12612 * registers. It works as such:
12613 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12614 * 2) Set page to 800 for host (801 if we were manageability)
12615 * 3) Write the address using the address opcode (0x11)
12616 * 4) Read or write the data using the data opcode (0x12)
12617 * 5) Restore 769.17.2 to its original value
12618 *
12619 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12620 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12621 *
12622 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
12623 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12624 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12625 */
12626 static int
12627 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12628 bool page_set)
12629 {
12630 struct wm_softc *sc = device_private(dev);
12631 uint16_t regnum = BM_PHY_REG_NUM(offset);
12632 uint16_t page = BM_PHY_REG_PAGE(offset);
12633 uint16_t wuce;
12634 int rv = 0;
12635
12636 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12637 device_xname(dev), __func__));
12638 /* XXX Gig must be disabled for MDIO accesses to page 800 */
12639 if ((sc->sc_type == WM_T_PCH)
12640 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12641 device_printf(dev,
12642 "Attempting to access page %d while gig enabled.\n", page);
12643 }
12644
12645 if (!page_set) {
12646 /* Enable access to PHY wakeup registers */
12647 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12648 if (rv != 0) {
12649 device_printf(dev,
12650 "%s: Could not enable PHY wakeup reg access\n",
12651 __func__);
12652 return rv;
12653 }
12654 }
12655 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12656 device_xname(sc->sc_dev), __func__, page, regnum));
12657
12658 /*
12659 * 2) Access PHY wakeup register.
12660 * See wm_access_phy_wakeup_reg_bm.
12661 */
12662
12663 /* Write the Wakeup register page offset value using opcode 0x11 */
12664 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12665 if (rv != 0)
12666 return rv;
12667
12668 if (rd) {
12669 /* Read the Wakeup register page value using opcode 0x12 */
12670 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12671 } else {
12672 /* Write the Wakeup register page value using opcode 0x12 */
12673 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12674 }
12675 if (rv != 0)
12676 return rv;
12677
12678 if (!page_set)
12679 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12680
12681 return rv;
12682 }
12683
12684 /*
12685 * wm_gmii_hv_readreg: [mii interface function]
12686 *
12687 * Read a PHY register on the kumeran
12688 * This could be handled by the PHY layer if we didn't have to lock the
12689 * resource ...
12690 */
12691 static int
12692 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12693 {
12694 struct wm_softc *sc = device_private(dev);
12695 int rv;
12696
12697 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12698 device_xname(dev), __func__));
12699
12700 rv = sc->phy.acquire(sc);
12701 if (rv != 0) {
12702 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12703 return rv;
12704 }
12705
12706 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12707 sc->phy.release(sc);
12708 return rv;
12709 }
12710
12711 static int
12712 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12713 {
12714 uint16_t page = BM_PHY_REG_PAGE(reg);
12715 uint16_t regnum = BM_PHY_REG_NUM(reg);
12716 int rv;
12717
12718 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12719
12720 /* Page 800 works differently than the rest so it has its own func */
12721 if (page == BM_WUC_PAGE)
12722 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12723
12724 /*
12725 * Lower than page 768 works differently than the rest so it has its
12726 * own func
12727 */
12728 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12729 device_printf(dev, "gmii_hv_readreg!!!\n");
12730 return -1;
12731 }
12732
12733 /*
12734 * XXX I21[789] documents say that the SMBus Address register is at
12735 * PHY address 01, Page 0 (not 768), Register 26.
12736 */
12737 if (page == HV_INTC_FC_PAGE_START)
12738 page = 0;
12739
12740 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12741 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12742 page << BME1000_PAGE_SHIFT);
12743 if (rv != 0)
12744 return rv;
12745 }
12746
12747 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12748 }
12749
12750 /*
12751 * wm_gmii_hv_writereg: [mii interface function]
12752 *
12753 * Write a PHY register on the kumeran.
12754 * This could be handled by the PHY layer if we didn't have to lock the
12755 * resource ...
12756 */
12757 static int
12758 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12759 {
12760 struct wm_softc *sc = device_private(dev);
12761 int rv;
12762
12763 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12764 device_xname(dev), __func__));
12765
12766 rv = sc->phy.acquire(sc);
12767 if (rv != 0) {
12768 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12769 return rv;
12770 }
12771
12772 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12773 sc->phy.release(sc);
12774
12775 return rv;
12776 }
12777
12778 static int
12779 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12780 {
12781 struct wm_softc *sc = device_private(dev);
12782 uint16_t page = BM_PHY_REG_PAGE(reg);
12783 uint16_t regnum = BM_PHY_REG_NUM(reg);
12784 int rv;
12785
12786 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12787
12788 /* Page 800 works differently than the rest so it has its own func */
12789 if (page == BM_WUC_PAGE)
12790 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12791 false);
12792
12793 /*
12794 * Lower than page 768 works differently than the rest so it has its
12795 * own func
12796 */
12797 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12798 device_printf(dev, "gmii_hv_writereg!!!\n");
12799 return -1;
12800 }
12801
12802 {
12803 /*
12804 * XXX I21[789] documents say that the SMBus Address register
12805 * is at PHY address 01, Page 0 (not 768), Register 26.
12806 */
12807 if (page == HV_INTC_FC_PAGE_START)
12808 page = 0;
12809
12810 /*
12811 * XXX Workaround MDIO accesses being disabled after entering
12812 * IEEE Power Down (whenever bit 11 of the PHY control
12813 * register is set)
12814 */
12815 if (sc->sc_phytype == WMPHY_82578) {
12816 struct mii_softc *child;
12817
12818 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12819 if ((child != NULL) && (child->mii_mpd_rev >= 1)
12820 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12821 && ((val & (1 << 11)) != 0)) {
12822 device_printf(dev, "XXX need workaround\n");
12823 }
12824 }
12825
12826 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12827 rv = wm_gmii_mdic_writereg(dev, 1,
12828 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12829 if (rv != 0)
12830 return rv;
12831 }
12832 }
12833
12834 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12835 }
12836
12837 /*
12838 * wm_gmii_82580_readreg: [mii interface function]
12839 *
12840 * Read a PHY register on the 82580 and I350.
12841 * This could be handled by the PHY layer if we didn't have to lock the
12842 * resource ...
12843 */
12844 static int
12845 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12846 {
12847 struct wm_softc *sc = device_private(dev);
12848 int rv;
12849
12850 rv = sc->phy.acquire(sc);
12851 if (rv != 0) {
12852 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12853 return rv;
12854 }
12855
12856 #ifdef DIAGNOSTIC
12857 if (reg > MII_ADDRMASK) {
12858 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12859 __func__, sc->sc_phytype, reg);
12860 reg &= MII_ADDRMASK;
12861 }
12862 #endif
12863 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12864
12865 sc->phy.release(sc);
12866 return rv;
12867 }
12868
12869 /*
12870 * wm_gmii_82580_writereg: [mii interface function]
12871 *
12872 * Write a PHY register on the 82580 and I350.
12873 * This could be handled by the PHY layer if we didn't have to lock the
12874 * resource ...
12875 */
12876 static int
12877 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12878 {
12879 struct wm_softc *sc = device_private(dev);
12880 int rv;
12881
12882 rv = sc->phy.acquire(sc);
12883 if (rv != 0) {
12884 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12885 return rv;
12886 }
12887
12888 #ifdef DIAGNOSTIC
12889 if (reg > MII_ADDRMASK) {
12890 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12891 __func__, sc->sc_phytype, reg);
12892 reg &= MII_ADDRMASK;
12893 }
12894 #endif
12895 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12896
12897 sc->phy.release(sc);
12898 return rv;
12899 }
12900
12901 /*
12902 * wm_gmii_gs40g_readreg: [mii interface function]
12903 *
12904 * Read a PHY register on the I2100 and I211.
12905 * This could be handled by the PHY layer if we didn't have to lock the
12906 * resource ...
12907 */
12908 static int
12909 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12910 {
12911 struct wm_softc *sc = device_private(dev);
12912 int page, offset;
12913 int rv;
12914
12915 /* Acquire semaphore */
12916 rv = sc->phy.acquire(sc);
12917 if (rv != 0) {
12918 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12919 return rv;
12920 }
12921
12922 /* Page select */
12923 page = reg >> GS40G_PAGE_SHIFT;
12924 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12925 if (rv != 0)
12926 goto release;
12927
12928 /* Read reg */
12929 offset = reg & GS40G_OFFSET_MASK;
12930 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12931
12932 release:
12933 sc->phy.release(sc);
12934 return rv;
12935 }
12936
12937 /*
12938 * wm_gmii_gs40g_writereg: [mii interface function]
12939 *
12940 * Write a PHY register on the I210 and I211.
12941 * This could be handled by the PHY layer if we didn't have to lock the
12942 * resource ...
12943 */
12944 static int
12945 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
12946 {
12947 struct wm_softc *sc = device_private(dev);
12948 uint16_t page;
12949 int offset, rv;
12950
12951 /* Acquire semaphore */
12952 rv = sc->phy.acquire(sc);
12953 if (rv != 0) {
12954 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12955 return rv;
12956 }
12957
12958 /* Page select */
12959 page = reg >> GS40G_PAGE_SHIFT;
12960 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12961 if (rv != 0)
12962 goto release;
12963
12964 /* Write reg */
12965 offset = reg & GS40G_OFFSET_MASK;
12966 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
12967
12968 release:
12969 /* Release semaphore */
12970 sc->phy.release(sc);
12971 return rv;
12972 }
12973
12974 /*
12975 * wm_gmii_statchg: [mii interface function]
12976 *
12977 * Callback from MII layer when media changes.
12978 */
12979 static void
12980 wm_gmii_statchg(struct ifnet *ifp)
12981 {
12982 struct wm_softc *sc = ifp->if_softc;
12983 struct mii_data *mii = &sc->sc_mii;
12984
12985 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12986 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12987 sc->sc_fcrtl &= ~FCRTL_XONE;
12988
12989 /* Get flow control negotiation result. */
12990 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12991 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12992 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12993 mii->mii_media_active &= ~IFM_ETH_FMASK;
12994 }
12995
12996 if (sc->sc_flowflags & IFM_FLOW) {
12997 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12998 sc->sc_ctrl |= CTRL_TFCE;
12999 sc->sc_fcrtl |= FCRTL_XONE;
13000 }
13001 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
13002 sc->sc_ctrl |= CTRL_RFCE;
13003 }
13004
13005 if (mii->mii_media_active & IFM_FDX) {
13006 DPRINTF(sc, WM_DEBUG_LINK,
13007 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
13008 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13009 } else {
13010 DPRINTF(sc, WM_DEBUG_LINK,
13011 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
13012 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13013 }
13014
13015 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13016 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13017 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13018 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13019 if (sc->sc_type == WM_T_80003) {
13020 switch (IFM_SUBTYPE(mii->mii_media_active)) {
13021 case IFM_1000_T:
13022 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
13023 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
13024 sc->sc_tipg = TIPG_1000T_80003_DFLT;
13025 break;
13026 default:
13027 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
13028 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
13029 sc->sc_tipg = TIPG_10_100_80003_DFLT;
13030 break;
13031 }
13032 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
13033 }
13034 }
13035
13036 /* kumeran related (80003, ICH* and PCH*) */
13037
13038 /*
13039 * wm_kmrn_readreg:
13040 *
13041 * Read a kumeran register
13042 */
13043 static int
13044 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
13045 {
13046 int rv;
13047
13048 if (sc->sc_type == WM_T_80003)
13049 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13050 else
13051 rv = sc->phy.acquire(sc);
13052 if (rv != 0) {
13053 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
13054 __func__);
13055 return rv;
13056 }
13057
13058 rv = wm_kmrn_readreg_locked(sc, reg, val);
13059
13060 if (sc->sc_type == WM_T_80003)
13061 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13062 else
13063 sc->phy.release(sc);
13064
13065 return rv;
13066 }
13067
13068 static int
13069 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
13070 {
13071
13072 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
13073 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
13074 KUMCTRLSTA_REN);
13075 CSR_WRITE_FLUSH(sc);
13076 delay(2);
13077
13078 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
13079
13080 return 0;
13081 }
13082
13083 /*
13084 * wm_kmrn_writereg:
13085 *
13086 * Write a kumeran register
13087 */
13088 static int
13089 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
13090 {
13091 int rv;
13092
13093 if (sc->sc_type == WM_T_80003)
13094 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13095 else
13096 rv = sc->phy.acquire(sc);
13097 if (rv != 0) {
13098 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
13099 __func__);
13100 return rv;
13101 }
13102
13103 rv = wm_kmrn_writereg_locked(sc, reg, val);
13104
13105 if (sc->sc_type == WM_T_80003)
13106 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13107 else
13108 sc->phy.release(sc);
13109
13110 return rv;
13111 }
13112
13113 static int
13114 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
13115 {
13116
13117 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
13118 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
13119
13120 return 0;
13121 }
13122
13123 /*
13124 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
13125 * This access method is different from IEEE MMD.
13126 */
13127 static int
13128 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
13129 {
13130 struct wm_softc *sc = device_private(dev);
13131 int rv;
13132
13133 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
13134 if (rv != 0)
13135 return rv;
13136
13137 if (rd)
13138 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
13139 else
13140 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
13141 return rv;
13142 }
13143
13144 static int
13145 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
13146 {
13147
13148 return wm_access_emi_reg_locked(dev, reg, val, true);
13149 }
13150
13151 static int
13152 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
13153 {
13154
13155 return wm_access_emi_reg_locked(dev, reg, &val, false);
13156 }
13157
13158 /* SGMII related */
13159
13160 /*
13161 * wm_sgmii_uses_mdio
13162 *
13163 * Check whether the transaction is to the internal PHY or the external
13164 * MDIO interface. Return true if it's MDIO.
13165 */
13166 static bool
13167 wm_sgmii_uses_mdio(struct wm_softc *sc)
13168 {
13169 uint32_t reg;
13170 bool ismdio = false;
13171
13172 switch (sc->sc_type) {
13173 case WM_T_82575:
13174 case WM_T_82576:
13175 reg = CSR_READ(sc, WMREG_MDIC);
13176 ismdio = ((reg & MDIC_DEST) != 0);
13177 break;
13178 case WM_T_82580:
13179 case WM_T_I350:
13180 case WM_T_I354:
13181 case WM_T_I210:
13182 case WM_T_I211:
13183 reg = CSR_READ(sc, WMREG_MDICNFG);
13184 ismdio = ((reg & MDICNFG_DEST) != 0);
13185 break;
13186 default:
13187 break;
13188 }
13189
13190 return ismdio;
13191 }
13192
13193 /* Setup internal SGMII PHY for SFP */
13194 static void
13195 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
13196 {
13197 uint16_t id1, id2, phyreg;
13198 int i, rv;
13199
13200 if (((sc->sc_flags & WM_F_SGMII) == 0)
13201 || ((sc->sc_flags & WM_F_SFP) == 0))
13202 return;
13203
13204 for (i = 0; i < MII_NPHY; i++) {
13205 sc->phy.no_errprint = true;
13206 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
13207 if (rv != 0)
13208 continue;
13209 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
13210 if (rv != 0)
13211 continue;
13212 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
13213 continue;
13214 sc->phy.no_errprint = false;
13215
13216 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
13217 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
13218 phyreg |= ESSR_SGMII_WOC_COPPER;
13219 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
13220 break;
13221 }
13222
13223 }
13224
13225 /*
13226 * wm_sgmii_readreg: [mii interface function]
13227 *
13228 * Read a PHY register on the SGMII
13229 * This could be handled by the PHY layer if we didn't have to lock the
13230 * resource ...
13231 */
13232 static int
13233 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
13234 {
13235 struct wm_softc *sc = device_private(dev);
13236 int rv;
13237
13238 rv = sc->phy.acquire(sc);
13239 if (rv != 0) {
13240 device_printf(dev, "%s: failed to get semaphore\n", __func__);
13241 return rv;
13242 }
13243
13244 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
13245
13246 sc->phy.release(sc);
13247 return rv;
13248 }
13249
13250 static int
13251 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
13252 {
13253 struct wm_softc *sc = device_private(dev);
13254 uint32_t i2ccmd;
13255 int i, rv = 0;
13256
13257 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
13258 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13259 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13260
13261 /* Poll the ready bit */
13262 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13263 delay(50);
13264 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13265 if (i2ccmd & I2CCMD_READY)
13266 break;
13267 }
13268 if ((i2ccmd & I2CCMD_READY) == 0) {
13269 device_printf(dev, "I2CCMD Read did not complete\n");
13270 rv = ETIMEDOUT;
13271 }
13272 if ((i2ccmd & I2CCMD_ERROR) != 0) {
13273 if (!sc->phy.no_errprint)
13274 device_printf(dev, "I2CCMD Error bit set\n");
13275 rv = EIO;
13276 }
13277
13278 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
13279
13280 return rv;
13281 }
13282
13283 /*
13284 * wm_sgmii_writereg: [mii interface function]
13285 *
13286 * Write a PHY register on the SGMII.
13287 * This could be handled by the PHY layer if we didn't have to lock the
13288 * resource ...
13289 */
13290 static int
13291 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
13292 {
13293 struct wm_softc *sc = device_private(dev);
13294 int rv;
13295
13296 rv = sc->phy.acquire(sc);
13297 if (rv != 0) {
13298 device_printf(dev, "%s: failed to get semaphore\n", __func__);
13299 return rv;
13300 }
13301
13302 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
13303
13304 sc->phy.release(sc);
13305
13306 return rv;
13307 }
13308
13309 static int
13310 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
13311 {
13312 struct wm_softc *sc = device_private(dev);
13313 uint32_t i2ccmd;
13314 uint16_t swapdata;
13315 int rv = 0;
13316 int i;
13317
13318 /* Swap the data bytes for the I2C interface */
13319 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
13320 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
13321 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
13322 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13323
13324 /* Poll the ready bit */
13325 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13326 delay(50);
13327 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13328 if (i2ccmd & I2CCMD_READY)
13329 break;
13330 }
13331 if ((i2ccmd & I2CCMD_READY) == 0) {
13332 device_printf(dev, "I2CCMD Write did not complete\n");
13333 rv = ETIMEDOUT;
13334 }
13335 if ((i2ccmd & I2CCMD_ERROR) != 0) {
13336 device_printf(dev, "I2CCMD Error bit set\n");
13337 rv = EIO;
13338 }
13339
13340 return rv;
13341 }
13342
13343 /* TBI related */
13344
13345 static bool
13346 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
13347 {
13348 bool sig;
13349
13350 sig = ctrl & CTRL_SWDPIN(1);
13351
13352 /*
13353 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
13354 * detect a signal, 1 if they don't.
13355 */
13356 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
13357 sig = !sig;
13358
13359 return sig;
13360 }
13361
13362 /*
13363 * wm_tbi_mediainit:
13364 *
13365 * Initialize media for use on 1000BASE-X devices.
13366 */
13367 static void
13368 wm_tbi_mediainit(struct wm_softc *sc)
13369 {
13370 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13371 const char *sep = "";
13372
13373 if (sc->sc_type < WM_T_82543)
13374 sc->sc_tipg = TIPG_WM_DFLT;
13375 else
13376 sc->sc_tipg = TIPG_LG_DFLT;
13377
13378 sc->sc_tbi_serdes_anegticks = 5;
13379
13380 /* Initialize our media structures */
13381 sc->sc_mii.mii_ifp = ifp;
13382 sc->sc_ethercom.ec_mii = &sc->sc_mii;
13383
13384 ifp->if_baudrate = IF_Gbps(1);
13385 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
13386 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13387 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13388 wm_serdes_mediachange, wm_serdes_mediastatus,
13389 sc->sc_core_lock);
13390 } else {
13391 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13392 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
13393 }
13394
13395 /*
13396 * SWD Pins:
13397 *
13398 * 0 = Link LED (output)
13399 * 1 = Loss Of Signal (input)
13400 */
13401 sc->sc_ctrl |= CTRL_SWDPIO(0);
13402
13403 /* XXX Perhaps this is only for TBI */
13404 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13405 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
13406
13407 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
13408 sc->sc_ctrl &= ~CTRL_LRST;
13409
13410 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13411
13412 #define ADD(ss, mm, dd) \
13413 do { \
13414 aprint_normal("%s%s", sep, ss); \
13415 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
13416 sep = ", "; \
13417 } while (/*CONSTCOND*/0)
13418
13419 aprint_normal_dev(sc->sc_dev, "");
13420
13421 if (sc->sc_type == WM_T_I354) {
13422 uint32_t status;
13423
13424 status = CSR_READ(sc, WMREG_STATUS);
13425 if (((status & STATUS_2P5_SKU) != 0)
13426 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13427 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
13428 } else
13429 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
13430 } else if (sc->sc_type == WM_T_82545) {
13431 /* Only 82545 is LX (XXX except SFP) */
13432 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13433 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13434 } else if (sc->sc_sfptype != 0) {
13435 /* XXX wm(4) fiber/serdes don't use ifm_data */
13436 switch (sc->sc_sfptype) {
13437 default:
13438 case SFF_SFP_ETH_FLAGS_1000SX:
13439 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13440 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13441 break;
13442 case SFF_SFP_ETH_FLAGS_1000LX:
13443 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13444 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13445 break;
13446 case SFF_SFP_ETH_FLAGS_1000CX:
13447 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
13448 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
13449 break;
13450 case SFF_SFP_ETH_FLAGS_1000T:
13451 ADD("1000baseT", IFM_1000_T, 0);
13452 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
13453 break;
13454 case SFF_SFP_ETH_FLAGS_100FX:
13455 ADD("100baseFX", IFM_100_FX, ANAR_TX);
13456 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
13457 break;
13458 }
13459 } else {
13460 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13461 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13462 }
13463 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
13464 aprint_normal("\n");
13465
13466 #undef ADD
13467
13468 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
13469 }
13470
13471 /*
13472 * wm_tbi_mediachange: [ifmedia interface function]
13473 *
13474 * Set hardware to newly-selected media on a 1000BASE-X device.
13475 */
13476 static int
13477 wm_tbi_mediachange(struct ifnet *ifp)
13478 {
13479 struct wm_softc *sc = ifp->if_softc;
13480 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13481 uint32_t status, ctrl;
13482 bool signal;
13483 int i;
13484
13485 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
13486 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13487 /* XXX need some work for >= 82571 and < 82575 */
13488 if (sc->sc_type < WM_T_82575)
13489 return 0;
13490 }
13491
13492 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13493 || (sc->sc_type >= WM_T_82575))
13494 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13495
13496 sc->sc_ctrl &= ~CTRL_LRST;
13497 sc->sc_txcw = TXCW_ANE;
13498 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13499 sc->sc_txcw |= TXCW_FD | TXCW_HD;
13500 else if (ife->ifm_media & IFM_FDX)
13501 sc->sc_txcw |= TXCW_FD;
13502 else
13503 sc->sc_txcw |= TXCW_HD;
13504
13505 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
13506 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
13507
13508 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
13509 device_xname(sc->sc_dev), sc->sc_txcw));
13510 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13511 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13512 CSR_WRITE_FLUSH(sc);
13513 delay(1000);
13514
13515 ctrl = CSR_READ(sc, WMREG_CTRL);
13516 signal = wm_tbi_havesignal(sc, ctrl);
13517
13518 DPRINTF(sc, WM_DEBUG_LINK,
13519 ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
13520
13521 if (signal) {
13522 /* Have signal; wait for the link to come up. */
13523 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
13524 delay(10000);
13525 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
13526 break;
13527 }
13528
13529 DPRINTF(sc, WM_DEBUG_LINK,
13530 ("%s: i = %d after waiting for link\n",
13531 device_xname(sc->sc_dev), i));
13532
13533 status = CSR_READ(sc, WMREG_STATUS);
13534 DPRINTF(sc, WM_DEBUG_LINK,
13535 ("%s: status after final read = 0x%x, STATUS_LU = %#"
13536 __PRIxBIT "\n",
13537 device_xname(sc->sc_dev), status, STATUS_LU));
13538 if (status & STATUS_LU) {
13539 /* Link is up. */
13540 DPRINTF(sc, WM_DEBUG_LINK,
13541 ("%s: LINK: set media -> link up %s\n",
13542 device_xname(sc->sc_dev),
13543 (status & STATUS_FD) ? "FDX" : "HDX"));
13544
13545 /*
13546 * NOTE: CTRL will update TFCE and RFCE automatically,
13547 * so we should update sc->sc_ctrl
13548 */
13549 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13550 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13551 sc->sc_fcrtl &= ~FCRTL_XONE;
13552 if (status & STATUS_FD)
13553 sc->sc_tctl |=
13554 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13555 else
13556 sc->sc_tctl |=
13557 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13558 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13559 sc->sc_fcrtl |= FCRTL_XONE;
13560 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13561 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13562 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13563 sc->sc_tbi_linkup = 1;
13564 } else {
13565 if (i == WM_LINKUP_TIMEOUT)
13566 wm_check_for_link(sc);
13567 /* Link is down. */
13568 DPRINTF(sc, WM_DEBUG_LINK,
13569 ("%s: LINK: set media -> link down\n",
13570 device_xname(sc->sc_dev)));
13571 sc->sc_tbi_linkup = 0;
13572 }
13573 } else {
13574 DPRINTF(sc, WM_DEBUG_LINK,
13575 ("%s: LINK: set media -> no signal\n",
13576 device_xname(sc->sc_dev)));
13577 sc->sc_tbi_linkup = 0;
13578 }
13579
13580 wm_tbi_serdes_set_linkled(sc);
13581
13582 return 0;
13583 }
13584
13585 /*
13586 * wm_tbi_mediastatus: [ifmedia interface function]
13587 *
13588 * Get the current interface media status on a 1000BASE-X device.
13589 */
13590 static void
13591 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13592 {
13593 struct wm_softc *sc = ifp->if_softc;
13594 uint32_t ctrl, status;
13595
13596 ifmr->ifm_status = IFM_AVALID;
13597 ifmr->ifm_active = IFM_ETHER;
13598
13599 status = CSR_READ(sc, WMREG_STATUS);
13600 if ((status & STATUS_LU) == 0) {
13601 ifmr->ifm_active |= IFM_NONE;
13602 return;
13603 }
13604
13605 ifmr->ifm_status |= IFM_ACTIVE;
13606 /* Only 82545 is LX */
13607 if (sc->sc_type == WM_T_82545)
13608 ifmr->ifm_active |= IFM_1000_LX;
13609 else
13610 ifmr->ifm_active |= IFM_1000_SX;
13611 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13612 ifmr->ifm_active |= IFM_FDX;
13613 else
13614 ifmr->ifm_active |= IFM_HDX;
13615 ctrl = CSR_READ(sc, WMREG_CTRL);
13616 if (ctrl & CTRL_RFCE)
13617 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13618 if (ctrl & CTRL_TFCE)
13619 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13620 }
13621
13622 /* XXX TBI only */
13623 static int
13624 wm_check_for_link(struct wm_softc *sc)
13625 {
13626 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13627 uint32_t rxcw;
13628 uint32_t ctrl;
13629 uint32_t status;
13630 bool signal;
13631
13632 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13633 device_xname(sc->sc_dev), __func__));
13634
13635 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13636 /* XXX need some work for >= 82571 */
13637 if (sc->sc_type >= WM_T_82571) {
13638 sc->sc_tbi_linkup = 1;
13639 return 0;
13640 }
13641 }
13642
13643 rxcw = CSR_READ(sc, WMREG_RXCW);
13644 ctrl = CSR_READ(sc, WMREG_CTRL);
13645 status = CSR_READ(sc, WMREG_STATUS);
13646 signal = wm_tbi_havesignal(sc, ctrl);
13647
13648 DPRINTF(sc, WM_DEBUG_LINK,
13649 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13650 device_xname(sc->sc_dev), __func__, signal,
13651 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13652
13653 /*
13654 * SWDPIN LU RXCW
13655 * 0 0 0
13656 * 0 0 1 (should not happen)
13657 * 0 1 0 (should not happen)
13658 * 0 1 1 (should not happen)
13659 * 1 0 0 Disable autonego and force linkup
13660 * 1 0 1 got /C/ but not linkup yet
13661 * 1 1 0 (linkup)
13662 * 1 1 1 If IFM_AUTO, back to autonego
13663 *
13664 */
13665 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13666 DPRINTF(sc, WM_DEBUG_LINK,
13667 ("%s: %s: force linkup and fullduplex\n",
13668 device_xname(sc->sc_dev), __func__));
13669 sc->sc_tbi_linkup = 0;
13670 /* Disable auto-negotiation in the TXCW register */
13671 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13672
13673 /*
13674 * Force link-up and also force full-duplex.
13675 *
13676 * NOTE: CTRL was updated TFCE and RFCE automatically,
13677 * so we should update sc->sc_ctrl
13678 */
13679 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13680 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13681 } else if (((status & STATUS_LU) != 0)
13682 && ((rxcw & RXCW_C) != 0)
13683 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13684 sc->sc_tbi_linkup = 1;
13685 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13686 device_xname(sc->sc_dev), __func__));
13687 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13688 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13689 } else if (signal && ((rxcw & RXCW_C) != 0)) {
13690 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13691 device_xname(sc->sc_dev), __func__));
13692 } else {
13693 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13694 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13695 status));
13696 }
13697
13698 return 0;
13699 }
13700
13701 /*
13702 * wm_tbi_tick:
13703 *
13704 * Check the link on TBI devices.
13705 * This function acts as mii_tick().
13706 */
13707 static void
13708 wm_tbi_tick(struct wm_softc *sc)
13709 {
13710 struct mii_data *mii = &sc->sc_mii;
13711 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13712 uint32_t status;
13713
13714 KASSERT(mutex_owned(sc->sc_core_lock));
13715
13716 status = CSR_READ(sc, WMREG_STATUS);
13717
13718 /* XXX is this needed? */
13719 (void)CSR_READ(sc, WMREG_RXCW);
13720 (void)CSR_READ(sc, WMREG_CTRL);
13721
13722 /* set link status */
13723 if ((status & STATUS_LU) == 0) {
13724 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13725 device_xname(sc->sc_dev)));
13726 sc->sc_tbi_linkup = 0;
13727 } else if (sc->sc_tbi_linkup == 0) {
13728 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13729 device_xname(sc->sc_dev),
13730 (status & STATUS_FD) ? "FDX" : "HDX"));
13731 sc->sc_tbi_linkup = 1;
13732 sc->sc_tbi_serdes_ticks = 0;
13733 }
13734
13735 if ((sc->sc_if_flags & IFF_UP) == 0)
13736 goto setled;
13737
13738 if ((status & STATUS_LU) == 0) {
13739 sc->sc_tbi_linkup = 0;
13740 /* If the timer expired, retry autonegotiation */
13741 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13742 && (++sc->sc_tbi_serdes_ticks
13743 >= sc->sc_tbi_serdes_anegticks)) {
13744 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13745 device_xname(sc->sc_dev), __func__));
13746 sc->sc_tbi_serdes_ticks = 0;
13747 /*
13748 * Reset the link, and let autonegotiation do
13749 * its thing
13750 */
13751 sc->sc_ctrl |= CTRL_LRST;
13752 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13753 CSR_WRITE_FLUSH(sc);
13754 delay(1000);
13755 sc->sc_ctrl &= ~CTRL_LRST;
13756 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13757 CSR_WRITE_FLUSH(sc);
13758 delay(1000);
13759 CSR_WRITE(sc, WMREG_TXCW,
13760 sc->sc_txcw & ~TXCW_ANE);
13761 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13762 }
13763 }
13764
13765 setled:
13766 wm_tbi_serdes_set_linkled(sc);
13767 }
13768
13769 /* SERDES related */
13770 static void
13771 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13772 {
13773 uint32_t reg;
13774
13775 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13776 && ((sc->sc_flags & WM_F_SGMII) == 0))
13777 return;
13778
13779 /* Enable PCS to turn on link */
13780 reg = CSR_READ(sc, WMREG_PCS_CFG);
13781 reg |= PCS_CFG_PCS_EN;
13782 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13783
13784 /* Power up the laser */
13785 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13786 reg &= ~CTRL_EXT_SWDPIN(3);
13787 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13788
13789 /* Flush the write to verify completion */
13790 CSR_WRITE_FLUSH(sc);
13791 delay(1000);
13792 }
13793
13794 static int
13795 wm_serdes_mediachange(struct ifnet *ifp)
13796 {
13797 struct wm_softc *sc = ifp->if_softc;
13798 bool pcs_autoneg = true; /* XXX */
13799 uint32_t ctrl_ext, pcs_lctl, reg;
13800
13801 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13802 && ((sc->sc_flags & WM_F_SGMII) == 0))
13803 return 0;
13804
13805 /* XXX Currently, this function is not called on 8257[12] */
13806 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13807 || (sc->sc_type >= WM_T_82575))
13808 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13809
13810 /* Power on the sfp cage if present */
13811 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13812 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13813 ctrl_ext |= CTRL_EXT_I2C_ENA;
13814 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13815
13816 sc->sc_ctrl |= CTRL_SLU;
13817
13818 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13819 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13820
13821 reg = CSR_READ(sc, WMREG_CONNSW);
13822 reg |= CONNSW_ENRGSRC;
13823 CSR_WRITE(sc, WMREG_CONNSW, reg);
13824 }
13825
13826 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13827 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13828 case CTRL_EXT_LINK_MODE_SGMII:
13829 /* SGMII mode lets the phy handle forcing speed/duplex */
13830 pcs_autoneg = true;
13831 /* Autoneg time out should be disabled for SGMII mode */
13832 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13833 break;
13834 case CTRL_EXT_LINK_MODE_1000KX:
13835 pcs_autoneg = false;
13836 /* FALLTHROUGH */
13837 default:
13838 if ((sc->sc_type == WM_T_82575)
13839 || (sc->sc_type == WM_T_82576)) {
13840 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13841 pcs_autoneg = false;
13842 }
13843 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13844 | CTRL_FRCFDX;
13845
13846 /* Set speed of 1000/Full if speed/duplex is forced */
13847 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13848 }
13849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13850
13851 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13852 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13853
13854 if (pcs_autoneg) {
13855 /* Set PCS register for autoneg */
13856 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13857
13858 /* Disable force flow control for autoneg */
13859 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13860
13861 /* Configure flow control advertisement for autoneg */
13862 reg = CSR_READ(sc, WMREG_PCS_ANADV);
13863 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13864 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13865 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13866 } else
13867 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13868
13869 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13870
13871 return 0;
13872 }
13873
13874 static void
13875 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13876 {
13877 struct wm_softc *sc = ifp->if_softc;
13878 struct mii_data *mii = &sc->sc_mii;
13879 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13880 uint32_t pcs_adv, pcs_lpab, reg;
13881
13882 ifmr->ifm_status = IFM_AVALID;
13883 ifmr->ifm_active = IFM_ETHER;
13884
13885 /* Check PCS */
13886 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13887 if ((reg & PCS_LSTS_LINKOK) == 0) {
13888 ifmr->ifm_active |= IFM_NONE;
13889 sc->sc_tbi_linkup = 0;
13890 goto setled;
13891 }
13892
13893 sc->sc_tbi_linkup = 1;
13894 ifmr->ifm_status |= IFM_ACTIVE;
13895 if (sc->sc_type == WM_T_I354) {
13896 uint32_t status;
13897
13898 status = CSR_READ(sc, WMREG_STATUS);
13899 if (((status & STATUS_2P5_SKU) != 0)
13900 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13901 ifmr->ifm_active |= IFM_2500_KX;
13902 } else
13903 ifmr->ifm_active |= IFM_1000_KX;
13904 } else {
13905 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13906 case PCS_LSTS_SPEED_10:
13907 ifmr->ifm_active |= IFM_10_T; /* XXX */
13908 break;
13909 case PCS_LSTS_SPEED_100:
13910 ifmr->ifm_active |= IFM_100_FX; /* XXX */
13911 break;
13912 case PCS_LSTS_SPEED_1000:
13913 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13914 break;
13915 default:
13916 device_printf(sc->sc_dev, "Unknown speed\n");
13917 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13918 break;
13919 }
13920 }
13921 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13922 if ((reg & PCS_LSTS_FDX) != 0)
13923 ifmr->ifm_active |= IFM_FDX;
13924 else
13925 ifmr->ifm_active |= IFM_HDX;
13926 mii->mii_media_active &= ~IFM_ETH_FMASK;
13927 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13928 /* Check flow */
13929 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13930 if ((reg & PCS_LSTS_AN_COMP) == 0) {
13931 DPRINTF(sc, WM_DEBUG_LINK,
13932 ("XXX LINKOK but not ACOMP\n"));
13933 goto setled;
13934 }
13935 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13936 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13937 DPRINTF(sc, WM_DEBUG_LINK,
13938 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13939 if ((pcs_adv & TXCW_SYM_PAUSE)
13940 && (pcs_lpab & TXCW_SYM_PAUSE)) {
13941 mii->mii_media_active |= IFM_FLOW
13942 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
13943 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
13944 && (pcs_adv & TXCW_ASYM_PAUSE)
13945 && (pcs_lpab & TXCW_SYM_PAUSE)
13946 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13947 mii->mii_media_active |= IFM_FLOW
13948 | IFM_ETH_TXPAUSE;
13949 } else if ((pcs_adv & TXCW_SYM_PAUSE)
13950 && (pcs_adv & TXCW_ASYM_PAUSE)
13951 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
13952 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13953 mii->mii_media_active |= IFM_FLOW
13954 | IFM_ETH_RXPAUSE;
13955 }
13956 }
13957 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
13958 | (mii->mii_media_active & IFM_ETH_FMASK);
13959 setled:
13960 wm_tbi_serdes_set_linkled(sc);
13961 }
13962
13963 /*
13964 * wm_serdes_tick:
13965 *
13966 * Check the link on serdes devices.
13967 */
13968 static void
13969 wm_serdes_tick(struct wm_softc *sc)
13970 {
13971 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13972 struct mii_data *mii = &sc->sc_mii;
13973 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13974 uint32_t reg;
13975
13976 KASSERT(mutex_owned(sc->sc_core_lock));
13977
13978 mii->mii_media_status = IFM_AVALID;
13979 mii->mii_media_active = IFM_ETHER;
13980
13981 /* Check PCS */
13982 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13983 if ((reg & PCS_LSTS_LINKOK) != 0) {
13984 mii->mii_media_status |= IFM_ACTIVE;
13985 sc->sc_tbi_linkup = 1;
13986 sc->sc_tbi_serdes_ticks = 0;
13987 mii->mii_media_active |= IFM_1000_SX; /* XXX */
13988 if ((reg & PCS_LSTS_FDX) != 0)
13989 mii->mii_media_active |= IFM_FDX;
13990 else
13991 mii->mii_media_active |= IFM_HDX;
13992 } else {
13993 mii->mii_media_status |= IFM_NONE;
13994 sc->sc_tbi_linkup = 0;
13995 /* If the timer expired, retry autonegotiation */
13996 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13997 && (++sc->sc_tbi_serdes_ticks
13998 >= sc->sc_tbi_serdes_anegticks)) {
13999 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
14000 device_xname(sc->sc_dev), __func__));
14001 sc->sc_tbi_serdes_ticks = 0;
14002 /* XXX */
14003 wm_serdes_mediachange(ifp);
14004 }
14005 }
14006
14007 wm_tbi_serdes_set_linkled(sc);
14008 }
14009
14010 /* SFP related */
14011
14012 static int
14013 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
14014 {
14015 uint32_t i2ccmd;
14016 int i;
14017
14018 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
14019 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
14020
14021 /* Poll the ready bit */
14022 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
14023 delay(50);
14024 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
14025 if (i2ccmd & I2CCMD_READY)
14026 break;
14027 }
14028 if ((i2ccmd & I2CCMD_READY) == 0)
14029 return -1;
14030 if ((i2ccmd & I2CCMD_ERROR) != 0)
14031 return -1;
14032
14033 *data = i2ccmd & 0x00ff;
14034
14035 return 0;
14036 }
14037
14038 static uint32_t
14039 wm_sfp_get_media_type(struct wm_softc *sc)
14040 {
14041 uint32_t ctrl_ext;
14042 uint8_t val = 0;
14043 int timeout = 3;
14044 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
14045 int rv = -1;
14046
14047 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
14048 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
14049 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
14050 CSR_WRITE_FLUSH(sc);
14051
14052 /* Read SFP module data */
14053 while (timeout) {
14054 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
14055 if (rv == 0)
14056 break;
14057 delay(100*1000); /* XXX too big */
14058 timeout--;
14059 }
14060 if (rv != 0)
14061 goto out;
14062
14063 switch (val) {
14064 case SFF_SFP_ID_SFF:
14065 aprint_normal_dev(sc->sc_dev,
14066 "Module/Connector soldered to board\n");
14067 break;
14068 case SFF_SFP_ID_SFP:
14069 sc->sc_flags |= WM_F_SFP;
14070 break;
14071 case SFF_SFP_ID_UNKNOWN:
14072 goto out;
14073 default:
14074 break;
14075 }
14076
14077 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
14078 if (rv != 0)
14079 goto out;
14080
14081 sc->sc_sfptype = val;
14082 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
14083 mediatype = WM_MEDIATYPE_SERDES;
14084 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
14085 sc->sc_flags |= WM_F_SGMII;
14086 mediatype = WM_MEDIATYPE_COPPER;
14087 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
14088 sc->sc_flags |= WM_F_SGMII;
14089 mediatype = WM_MEDIATYPE_SERDES;
14090 } else {
14091 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
14092 __func__, sc->sc_sfptype);
14093 sc->sc_sfptype = 0; /* XXX unknown */
14094 }
14095
14096 out:
14097 /* Restore I2C interface setting */
14098 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
14099
14100 return mediatype;
14101 }
14102
14103 /*
14104 * NVM related.
14105 * Microwire, SPI (w/wo EERD) and Flash.
14106 */
14107
14108 /* Both spi and uwire */
14109
14110 /*
14111 * wm_eeprom_sendbits:
14112 *
14113 * Send a series of bits to the EEPROM.
14114 */
14115 static void
14116 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
14117 {
14118 uint32_t reg;
14119 int x;
14120
14121 reg = CSR_READ(sc, WMREG_EECD);
14122
14123 for (x = nbits; x > 0; x--) {
14124 if (bits & (1U << (x - 1)))
14125 reg |= EECD_DI;
14126 else
14127 reg &= ~EECD_DI;
14128 CSR_WRITE(sc, WMREG_EECD, reg);
14129 CSR_WRITE_FLUSH(sc);
14130 delay(2);
14131 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
14132 CSR_WRITE_FLUSH(sc);
14133 delay(2);
14134 CSR_WRITE(sc, WMREG_EECD, reg);
14135 CSR_WRITE_FLUSH(sc);
14136 delay(2);
14137 }
14138 }
14139
14140 /*
14141 * wm_eeprom_recvbits:
14142 *
14143 * Receive a series of bits from the EEPROM.
14144 */
14145 static void
14146 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
14147 {
14148 uint32_t reg, val;
14149 int x;
14150
14151 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
14152
14153 val = 0;
14154 for (x = nbits; x > 0; x--) {
14155 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
14156 CSR_WRITE_FLUSH(sc);
14157 delay(2);
14158 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
14159 val |= (1U << (x - 1));
14160 CSR_WRITE(sc, WMREG_EECD, reg);
14161 CSR_WRITE_FLUSH(sc);
14162 delay(2);
14163 }
14164 *valp = val;
14165 }
14166
14167 /* Microwire */
14168
14169 /*
14170 * wm_nvm_read_uwire:
14171 *
14172 * Read a word from the EEPROM using the MicroWire protocol.
14173 */
14174 static int
14175 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14176 {
14177 uint32_t reg, val;
14178 int i, rv;
14179
14180 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14181 device_xname(sc->sc_dev), __func__));
14182
14183 rv = sc->nvm.acquire(sc);
14184 if (rv != 0)
14185 return rv;
14186
14187 for (i = 0; i < wordcnt; i++) {
14188 /* Clear SK and DI. */
14189 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
14190 CSR_WRITE(sc, WMREG_EECD, reg);
14191
14192 /*
14193 * XXX: workaround for a bug in qemu-0.12.x and prior
14194 * and Xen.
14195 *
14196 * We use this workaround only for 82540 because qemu's
14197 * e1000 act as 82540.
14198 */
14199 if (sc->sc_type == WM_T_82540) {
14200 reg |= EECD_SK;
14201 CSR_WRITE(sc, WMREG_EECD, reg);
14202 reg &= ~EECD_SK;
14203 CSR_WRITE(sc, WMREG_EECD, reg);
14204 CSR_WRITE_FLUSH(sc);
14205 delay(2);
14206 }
14207 /* XXX: end of workaround */
14208
14209 /* Set CHIP SELECT. */
14210 reg |= EECD_CS;
14211 CSR_WRITE(sc, WMREG_EECD, reg);
14212 CSR_WRITE_FLUSH(sc);
14213 delay(2);
14214
14215 /* Shift in the READ command. */
14216 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
14217
14218 /* Shift in address. */
14219 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
14220
14221 /* Shift out the data. */
14222 wm_eeprom_recvbits(sc, &val, 16);
14223 data[i] = val & 0xffff;
14224
14225 /* Clear CHIP SELECT. */
14226 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
14227 CSR_WRITE(sc, WMREG_EECD, reg);
14228 CSR_WRITE_FLUSH(sc);
14229 delay(2);
14230 }
14231
14232 sc->nvm.release(sc);
14233 return 0;
14234 }
14235
14236 /* SPI */
14237
14238 /*
14239 * Set SPI and FLASH related information from the EECD register.
14240 * For 82541 and 82547, the word size is taken from EEPROM.
14241 */
14242 static int
14243 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
14244 {
14245 int size;
14246 uint32_t reg;
14247 uint16_t data;
14248
14249 reg = CSR_READ(sc, WMREG_EECD);
14250 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
14251
14252 /* Read the size of NVM from EECD by default */
14253 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
14254 switch (sc->sc_type) {
14255 case WM_T_82541:
14256 case WM_T_82541_2:
14257 case WM_T_82547:
14258 case WM_T_82547_2:
14259 /* Set dummy value to access EEPROM */
14260 sc->sc_nvm_wordsize = 64;
14261 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
14262 aprint_error_dev(sc->sc_dev,
14263 "%s: failed to read EEPROM size\n", __func__);
14264 }
14265 reg = data;
14266 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
14267 if (size == 0)
14268 size = 6; /* 64 word size */
14269 else
14270 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
14271 break;
14272 case WM_T_80003:
14273 case WM_T_82571:
14274 case WM_T_82572:
14275 case WM_T_82573: /* SPI case */
14276 case WM_T_82574: /* SPI case */
14277 case WM_T_82583: /* SPI case */
14278 size += NVM_WORD_SIZE_BASE_SHIFT;
14279 if (size > 14)
14280 size = 14;
14281 break;
14282 case WM_T_82575:
14283 case WM_T_82576:
14284 case WM_T_82580:
14285 case WM_T_I350:
14286 case WM_T_I354:
14287 case WM_T_I210:
14288 case WM_T_I211:
14289 size += NVM_WORD_SIZE_BASE_SHIFT;
14290 if (size > 15)
14291 size = 15;
14292 break;
14293 default:
14294 aprint_error_dev(sc->sc_dev,
14295 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
14296 return -1;
14297 break;
14298 }
14299
14300 sc->sc_nvm_wordsize = 1 << size;
14301
14302 return 0;
14303 }
14304
14305 /*
14306 * wm_nvm_ready_spi:
14307 *
14308 * Wait for a SPI EEPROM to be ready for commands.
14309 */
14310 static int
14311 wm_nvm_ready_spi(struct wm_softc *sc)
14312 {
14313 uint32_t val;
14314 int usec;
14315
14316 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14317 device_xname(sc->sc_dev), __func__));
14318
14319 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
14320 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
14321 wm_eeprom_recvbits(sc, &val, 8);
14322 if ((val & SPI_SR_RDY) == 0)
14323 break;
14324 }
14325 if (usec >= SPI_MAX_RETRIES) {
14326 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
14327 return -1;
14328 }
14329 return 0;
14330 }
14331
14332 /*
14333 * wm_nvm_read_spi:
14334 *
14335 * Read a work from the EEPROM using the SPI protocol.
14336 */
14337 static int
14338 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14339 {
14340 uint32_t reg, val;
14341 int i;
14342 uint8_t opc;
14343 int rv;
14344
14345 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14346 device_xname(sc->sc_dev), __func__));
14347
14348 rv = sc->nvm.acquire(sc);
14349 if (rv != 0)
14350 return rv;
14351
14352 /* Clear SK and CS. */
14353 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
14354 CSR_WRITE(sc, WMREG_EECD, reg);
14355 CSR_WRITE_FLUSH(sc);
14356 delay(2);
14357
14358 if ((rv = wm_nvm_ready_spi(sc)) != 0)
14359 goto out;
14360
14361 /* Toggle CS to flush commands. */
14362 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
14363 CSR_WRITE_FLUSH(sc);
14364 delay(2);
14365 CSR_WRITE(sc, WMREG_EECD, reg);
14366 CSR_WRITE_FLUSH(sc);
14367 delay(2);
14368
14369 opc = SPI_OPC_READ;
14370 if (sc->sc_nvm_addrbits == 8 && word >= 128)
14371 opc |= SPI_OPC_A8;
14372
14373 wm_eeprom_sendbits(sc, opc, 8);
14374 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
14375
14376 for (i = 0; i < wordcnt; i++) {
14377 wm_eeprom_recvbits(sc, &val, 16);
14378 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
14379 }
14380
14381 /* Raise CS and clear SK. */
14382 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
14383 CSR_WRITE(sc, WMREG_EECD, reg);
14384 CSR_WRITE_FLUSH(sc);
14385 delay(2);
14386
14387 out:
14388 sc->nvm.release(sc);
14389 return rv;
14390 }
14391
14392 /* Using with EERD */
14393
14394 static int
14395 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
14396 {
14397 uint32_t attempts = 100000;
14398 uint32_t i, reg = 0;
14399 int32_t done = -1;
14400
14401 for (i = 0; i < attempts; i++) {
14402 reg = CSR_READ(sc, rw);
14403
14404 if (reg & EERD_DONE) {
14405 done = 0;
14406 break;
14407 }
14408 delay(5);
14409 }
14410
14411 return done;
14412 }
14413
14414 static int
14415 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
14416 {
14417 int i, eerd = 0;
14418 int rv;
14419
14420 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14421 device_xname(sc->sc_dev), __func__));
14422
14423 rv = sc->nvm.acquire(sc);
14424 if (rv != 0)
14425 return rv;
14426
14427 for (i = 0; i < wordcnt; i++) {
14428 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
14429 CSR_WRITE(sc, WMREG_EERD, eerd);
14430 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
14431 if (rv != 0) {
14432 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
14433 "offset=%d. wordcnt=%d\n", offset, wordcnt);
14434 break;
14435 }
14436 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
14437 }
14438
14439 sc->nvm.release(sc);
14440 return rv;
14441 }
14442
14443 /* Flash */
14444
14445 static int
14446 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
14447 {
14448 uint32_t eecd;
14449 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
14450 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
14451 uint32_t nvm_dword = 0;
14452 uint8_t sig_byte = 0;
14453 int rv;
14454
14455 switch (sc->sc_type) {
14456 case WM_T_PCH_SPT:
14457 case WM_T_PCH_CNP:
14458 case WM_T_PCH_TGP:
14459 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
14460 act_offset = ICH_NVM_SIG_WORD * 2;
14461
14462 /* Set bank to 0 in case flash read fails. */
14463 *bank = 0;
14464
14465 /* Check bank 0 */
14466 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
14467 if (rv != 0)
14468 return rv;
14469 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14470 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14471 *bank = 0;
14472 return 0;
14473 }
14474
14475 /* Check bank 1 */
14476 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
14477 &nvm_dword);
14478 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14479 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14480 *bank = 1;
14481 return 0;
14482 }
14483 aprint_error_dev(sc->sc_dev,
14484 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
14485 return -1;
14486 case WM_T_ICH8:
14487 case WM_T_ICH9:
14488 eecd = CSR_READ(sc, WMREG_EECD);
14489 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
14490 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
14491 return 0;
14492 }
14493 /* FALLTHROUGH */
14494 default:
14495 /* Default to 0 */
14496 *bank = 0;
14497
14498 /* Check bank 0 */
14499 wm_read_ich8_byte(sc, act_offset, &sig_byte);
14500 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14501 *bank = 0;
14502 return 0;
14503 }
14504
14505 /* Check bank 1 */
14506 wm_read_ich8_byte(sc, act_offset + bank1_offset,
14507 &sig_byte);
14508 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14509 *bank = 1;
14510 return 0;
14511 }
14512 }
14513
14514 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
14515 device_xname(sc->sc_dev)));
14516 return -1;
14517 }
14518
14519 /******************************************************************************
14520 * This function does initial flash setup so that a new read/write/erase cycle
14521 * can be started.
14522 *
14523 * sc - The pointer to the hw structure
14524 ****************************************************************************/
14525 static int32_t
14526 wm_ich8_cycle_init(struct wm_softc *sc)
14527 {
14528 uint16_t hsfsts;
14529 int32_t error = 1;
14530 int32_t i = 0;
14531
14532 if (sc->sc_type >= WM_T_PCH_SPT)
14533 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
14534 else
14535 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14536
14537 /* May be check the Flash Des Valid bit in Hw status */
14538 if ((hsfsts & HSFSTS_FLDVAL) == 0)
14539 return error;
14540
14541 /* Clear FCERR in Hw status by writing 1 */
14542 /* Clear DAEL in Hw status by writing a 1 */
14543 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14544
14545 if (sc->sc_type >= WM_T_PCH_SPT)
14546 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14547 else
14548 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14549
14550 /*
14551 * Either we should have a hardware SPI cycle in progress bit to check
14552 * against, in order to start a new cycle or FDONE bit should be
14553 * changed in the hardware so that it is 1 after hardware reset, which
14554 * can then be used as an indication whether a cycle is in progress or
14555 * has been completed .. we should also have some software semaphore
14556 * mechanism to guard FDONE or the cycle in progress bit so that two
14557 * threads access to those bits can be sequentiallized or a way so that
14558 * 2 threads don't start the cycle at the same time
14559 */
14560
14561 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14562 /*
14563 * There is no cycle running at present, so we can start a
14564 * cycle
14565 */
14566
14567 /* Begin by setting Flash Cycle Done. */
14568 hsfsts |= HSFSTS_DONE;
14569 if (sc->sc_type >= WM_T_PCH_SPT)
14570 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14571 hsfsts & 0xffffUL);
14572 else
14573 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14574 error = 0;
14575 } else {
14576 /*
14577 * Otherwise poll for sometime so the current cycle has a
14578 * chance to end before giving up.
14579 */
14580 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14581 if (sc->sc_type >= WM_T_PCH_SPT)
14582 hsfsts = ICH8_FLASH_READ32(sc,
14583 ICH_FLASH_HSFSTS) & 0xffffUL;
14584 else
14585 hsfsts = ICH8_FLASH_READ16(sc,
14586 ICH_FLASH_HSFSTS);
14587 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14588 error = 0;
14589 break;
14590 }
14591 delay(1);
14592 }
14593 if (error == 0) {
14594 /*
14595 * Successful in waiting for previous cycle to timeout,
14596 * now set the Flash Cycle Done.
14597 */
14598 hsfsts |= HSFSTS_DONE;
14599 if (sc->sc_type >= WM_T_PCH_SPT)
14600 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14601 hsfsts & 0xffffUL);
14602 else
14603 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14604 hsfsts);
14605 }
14606 }
14607 return error;
14608 }
14609
14610 /******************************************************************************
14611 * This function starts a flash cycle and waits for its completion
14612 *
14613 * sc - The pointer to the hw structure
14614 ****************************************************************************/
14615 static int32_t
14616 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14617 {
14618 uint16_t hsflctl;
14619 uint16_t hsfsts;
14620 int32_t error = 1;
14621 uint32_t i = 0;
14622
14623 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14624 if (sc->sc_type >= WM_T_PCH_SPT)
14625 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14626 else
14627 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14628 hsflctl |= HSFCTL_GO;
14629 if (sc->sc_type >= WM_T_PCH_SPT)
14630 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14631 (uint32_t)hsflctl << 16);
14632 else
14633 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14634
14635 /* Wait till FDONE bit is set to 1 */
14636 do {
14637 if (sc->sc_type >= WM_T_PCH_SPT)
14638 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14639 & 0xffffUL;
14640 else
14641 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14642 if (hsfsts & HSFSTS_DONE)
14643 break;
14644 delay(1);
14645 i++;
14646 } while (i < timeout);
14647 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14648 error = 0;
14649
14650 return error;
14651 }
14652
14653 /******************************************************************************
14654 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14655 *
14656 * sc - The pointer to the hw structure
14657 * index - The index of the byte or word to read.
14658 * size - Size of data to read, 1=byte 2=word, 4=dword
14659 * data - Pointer to the word to store the value read.
14660 *****************************************************************************/
14661 static int32_t
14662 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14663 uint32_t size, uint32_t *data)
14664 {
14665 uint16_t hsfsts;
14666 uint16_t hsflctl;
14667 uint32_t flash_linear_address;
14668 uint32_t flash_data = 0;
14669 int32_t error = 1;
14670 int32_t count = 0;
14671
14672 if (size < 1 || size > 4 || data == 0x0 ||
14673 index > ICH_FLASH_LINEAR_ADDR_MASK)
14674 return error;
14675
14676 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14677 sc->sc_ich8_flash_base;
14678
14679 do {
14680 delay(1);
14681 /* Steps */
14682 error = wm_ich8_cycle_init(sc);
14683 if (error)
14684 break;
14685
14686 if (sc->sc_type >= WM_T_PCH_SPT)
14687 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14688 >> 16;
14689 else
14690 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14691 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14692 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14693 & HSFCTL_BCOUNT_MASK;
14694 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14695 if (sc->sc_type >= WM_T_PCH_SPT) {
14696 /*
14697 * In SPT, This register is in Lan memory space, not
14698 * flash. Therefore, only 32 bit access is supported.
14699 */
14700 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14701 (uint32_t)hsflctl << 16);
14702 } else
14703 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14704
14705 /*
14706 * Write the last 24 bits of index into Flash Linear address
14707 * field in Flash Address
14708 */
14709 /* TODO: TBD maybe check the index against the size of flash */
14710
14711 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14712
14713 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14714
14715 /*
14716 * Check if FCERR is set to 1, if set to 1, clear it and try
14717 * the whole sequence a few more times, else read in (shift in)
14718 * the Flash Data0, the order is least significant byte first
14719 * msb to lsb
14720 */
14721 if (error == 0) {
14722 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14723 if (size == 1)
14724 *data = (uint8_t)(flash_data & 0x000000FF);
14725 else if (size == 2)
14726 *data = (uint16_t)(flash_data & 0x0000FFFF);
14727 else if (size == 4)
14728 *data = (uint32_t)flash_data;
14729 break;
14730 } else {
14731 /*
14732 * If we've gotten here, then things are probably
14733 * completely hosed, but if the error condition is
14734 * detected, it won't hurt to give it another try...
14735 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14736 */
14737 if (sc->sc_type >= WM_T_PCH_SPT)
14738 hsfsts = ICH8_FLASH_READ32(sc,
14739 ICH_FLASH_HSFSTS) & 0xffffUL;
14740 else
14741 hsfsts = ICH8_FLASH_READ16(sc,
14742 ICH_FLASH_HSFSTS);
14743
14744 if (hsfsts & HSFSTS_ERR) {
14745 /* Repeat for some time before giving up. */
14746 continue;
14747 } else if ((hsfsts & HSFSTS_DONE) == 0)
14748 break;
14749 }
14750 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14751
14752 return error;
14753 }
14754
14755 /******************************************************************************
14756 * Reads a single byte from the NVM using the ICH8 flash access registers.
14757 *
14758 * sc - pointer to wm_hw structure
14759 * index - The index of the byte to read.
14760 * data - Pointer to a byte to store the value read.
14761 *****************************************************************************/
14762 static int32_t
14763 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14764 {
14765 int32_t status;
14766 uint32_t word = 0;
14767
14768 status = wm_read_ich8_data(sc, index, 1, &word);
14769 if (status == 0)
14770 *data = (uint8_t)word;
14771 else
14772 *data = 0;
14773
14774 return status;
14775 }
14776
14777 /******************************************************************************
14778 * Reads a word from the NVM using the ICH8 flash access registers.
14779 *
14780 * sc - pointer to wm_hw structure
14781 * index - The starting byte index of the word to read.
14782 * data - Pointer to a word to store the value read.
14783 *****************************************************************************/
14784 static int32_t
14785 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14786 {
14787 int32_t status;
14788 uint32_t word = 0;
14789
14790 status = wm_read_ich8_data(sc, index, 2, &word);
14791 if (status == 0)
14792 *data = (uint16_t)word;
14793 else
14794 *data = 0;
14795
14796 return status;
14797 }
14798
14799 /******************************************************************************
14800 * Reads a dword from the NVM using the ICH8 flash access registers.
14801 *
14802 * sc - pointer to wm_hw structure
14803 * index - The starting byte index of the word to read.
14804 * data - Pointer to a word to store the value read.
14805 *****************************************************************************/
14806 static int32_t
14807 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14808 {
14809 int32_t status;
14810
14811 status = wm_read_ich8_data(sc, index, 4, data);
14812 return status;
14813 }
14814
14815 /******************************************************************************
14816 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14817 * register.
14818 *
14819 * sc - Struct containing variables accessed by shared code
14820 * offset - offset of word in the EEPROM to read
14821 * data - word read from the EEPROM
14822 * words - number of words to read
14823 *****************************************************************************/
14824 static int
14825 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14826 {
14827 int rv;
14828 uint32_t flash_bank = 0;
14829 uint32_t act_offset = 0;
14830 uint32_t bank_offset = 0;
14831 uint16_t word = 0;
14832 uint16_t i = 0;
14833
14834 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14835 device_xname(sc->sc_dev), __func__));
14836
14837 rv = sc->nvm.acquire(sc);
14838 if (rv != 0)
14839 return rv;
14840
14841 /*
14842 * We need to know which is the valid flash bank. In the event
14843 * that we didn't allocate eeprom_shadow_ram, we may not be
14844 * managing flash_bank. So it cannot be trusted and needs
14845 * to be updated with each read.
14846 */
14847 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14848 if (rv) {
14849 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14850 device_xname(sc->sc_dev)));
14851 flash_bank = 0;
14852 }
14853
14854 /*
14855 * Adjust offset appropriately if we're on bank 1 - adjust for word
14856 * size
14857 */
14858 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14859
14860 for (i = 0; i < words; i++) {
14861 /* The NVM part needs a byte offset, hence * 2 */
14862 act_offset = bank_offset + ((offset + i) * 2);
14863 rv = wm_read_ich8_word(sc, act_offset, &word);
14864 if (rv) {
14865 aprint_error_dev(sc->sc_dev,
14866 "%s: failed to read NVM\n", __func__);
14867 break;
14868 }
14869 data[i] = word;
14870 }
14871
14872 sc->nvm.release(sc);
14873 return rv;
14874 }
14875
14876 /******************************************************************************
14877 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14878 * register.
14879 *
14880 * sc - Struct containing variables accessed by shared code
14881 * offset - offset of word in the EEPROM to read
14882 * data - word read from the EEPROM
14883 * words - number of words to read
14884 *****************************************************************************/
14885 static int
14886 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14887 {
14888 int rv;
14889 uint32_t flash_bank = 0;
14890 uint32_t act_offset = 0;
14891 uint32_t bank_offset = 0;
14892 uint32_t dword = 0;
14893 uint16_t i = 0;
14894
14895 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14896 device_xname(sc->sc_dev), __func__));
14897
14898 rv = sc->nvm.acquire(sc);
14899 if (rv != 0)
14900 return rv;
14901
14902 /*
14903 * We need to know which is the valid flash bank. In the event
14904 * that we didn't allocate eeprom_shadow_ram, we may not be
14905 * managing flash_bank. So it cannot be trusted and needs
14906 * to be updated with each read.
14907 */
14908 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14909 if (rv) {
14910 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14911 device_xname(sc->sc_dev)));
14912 flash_bank = 0;
14913 }
14914
14915 /*
14916 * Adjust offset appropriately if we're on bank 1 - adjust for word
14917 * size
14918 */
14919 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14920
14921 for (i = 0; i < words; i++) {
14922 /* The NVM part needs a byte offset, hence * 2 */
14923 act_offset = bank_offset + ((offset + i) * 2);
14924 /* but we must read dword aligned, so mask ... */
14925 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14926 if (rv) {
14927 aprint_error_dev(sc->sc_dev,
14928 "%s: failed to read NVM\n", __func__);
14929 break;
14930 }
14931 /* ... and pick out low or high word */
14932 if ((act_offset & 0x2) == 0)
14933 data[i] = (uint16_t)(dword & 0xFFFF);
14934 else
14935 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14936 }
14937
14938 sc->nvm.release(sc);
14939 return rv;
14940 }
14941
14942 /* iNVM */
14943
14944 static int
14945 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14946 {
14947 int32_t rv = 0;
14948 uint32_t invm_dword;
14949 uint16_t i;
14950 uint8_t record_type, word_address;
14951
14952 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14953 device_xname(sc->sc_dev), __func__));
14954
14955 for (i = 0; i < INVM_SIZE; i++) {
14956 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14957 /* Get record type */
14958 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14959 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14960 break;
14961 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14962 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14963 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14964 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14965 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14966 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14967 if (word_address == address) {
14968 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14969 rv = 0;
14970 break;
14971 }
14972 }
14973 }
14974
14975 return rv;
14976 }
14977
14978 static int
14979 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14980 {
14981 int i, rv;
14982
14983 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14984 device_xname(sc->sc_dev), __func__));
14985
14986 rv = sc->nvm.acquire(sc);
14987 if (rv != 0)
14988 return rv;
14989
14990 for (i = 0; i < words; i++) {
14991 switch (offset + i) {
14992 case NVM_OFF_MACADDR:
14993 case NVM_OFF_MACADDR1:
14994 case NVM_OFF_MACADDR2:
14995 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14996 if (rv != 0) {
14997 data[i] = 0xffff;
14998 rv = -1;
14999 }
15000 break;
15001 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
15002 rv = wm_nvm_read_word_invm(sc, offset, data);
15003 if (rv != 0) {
15004 *data = INVM_DEFAULT_AL;
15005 rv = 0;
15006 }
15007 break;
15008 case NVM_OFF_CFG2:
15009 rv = wm_nvm_read_word_invm(sc, offset, data);
15010 if (rv != 0) {
15011 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
15012 rv = 0;
15013 }
15014 break;
15015 case NVM_OFF_CFG4:
15016 rv = wm_nvm_read_word_invm(sc, offset, data);
15017 if (rv != 0) {
15018 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
15019 rv = 0;
15020 }
15021 break;
15022 case NVM_OFF_LED_1_CFG:
15023 rv = wm_nvm_read_word_invm(sc, offset, data);
15024 if (rv != 0) {
15025 *data = NVM_LED_1_CFG_DEFAULT_I211;
15026 rv = 0;
15027 }
15028 break;
15029 case NVM_OFF_LED_0_2_CFG:
15030 rv = wm_nvm_read_word_invm(sc, offset, data);
15031 if (rv != 0) {
15032 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
15033 rv = 0;
15034 }
15035 break;
15036 case NVM_OFF_ID_LED_SETTINGS:
15037 rv = wm_nvm_read_word_invm(sc, offset, data);
15038 if (rv != 0) {
15039 *data = ID_LED_RESERVED_FFFF;
15040 rv = 0;
15041 }
15042 break;
15043 default:
15044 DPRINTF(sc, WM_DEBUG_NVM,
15045 ("NVM word 0x%02x is not mapped.\n", offset));
15046 *data = NVM_RESERVED_WORD;
15047 break;
15048 }
15049 }
15050
15051 sc->nvm.release(sc);
15052 return rv;
15053 }
15054
15055 /* Lock, detecting NVM type, validate checksum, version and read */
15056
15057 static int
15058 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
15059 {
15060 uint32_t eecd = 0;
15061
15062 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
15063 || sc->sc_type == WM_T_82583) {
15064 eecd = CSR_READ(sc, WMREG_EECD);
15065
15066 /* Isolate bits 15 & 16 */
15067 eecd = ((eecd >> 15) & 0x03);
15068
15069 /* If both bits are set, device is Flash type */
15070 if (eecd == 0x03)
15071 return 0;
15072 }
15073 return 1;
15074 }
15075
15076 static int
15077 wm_nvm_flash_presence_i210(struct wm_softc *sc)
15078 {
15079 uint32_t eec;
15080
15081 eec = CSR_READ(sc, WMREG_EEC);
15082 if ((eec & EEC_FLASH_DETECTED) != 0)
15083 return 1;
15084
15085 return 0;
15086 }
15087
15088 /*
15089 * wm_nvm_validate_checksum
15090 *
15091 * The checksum is defined as the sum of the first 64 (16 bit) words.
15092 */
15093 static int
15094 wm_nvm_validate_checksum(struct wm_softc *sc)
15095 {
15096 uint16_t checksum;
15097 uint16_t eeprom_data;
15098 #ifdef WM_DEBUG
15099 uint16_t csum_wordaddr, valid_checksum;
15100 #endif
15101 int i;
15102
15103 checksum = 0;
15104
15105 /* Don't check for I211 */
15106 if (sc->sc_type == WM_T_I211)
15107 return 0;
15108
15109 #ifdef WM_DEBUG
15110 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
15111 (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
15112 csum_wordaddr = NVM_OFF_COMPAT;
15113 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
15114 } else {
15115 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
15116 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
15117 }
15118
15119 /* Dump EEPROM image for debug */
15120 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15121 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15122 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
15123 /* XXX PCH_SPT? */
15124 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
15125 if ((eeprom_data & valid_checksum) == 0)
15126 DPRINTF(sc, WM_DEBUG_NVM,
15127 ("%s: NVM need to be updated (%04x != %04x)\n",
15128 device_xname(sc->sc_dev), eeprom_data,
15129 valid_checksum));
15130 }
15131
15132 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
15133 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
15134 for (i = 0; i < NVM_SIZE; i++) {
15135 if (wm_nvm_read(sc, i, 1, &eeprom_data))
15136 printf("XXXX ");
15137 else
15138 printf("%04hx ", eeprom_data);
15139 if (i % 8 == 7)
15140 printf("\n");
15141 }
15142 }
15143
15144 #endif /* WM_DEBUG */
15145
15146 for (i = 0; i < NVM_SIZE; i++) {
15147 if (wm_nvm_read(sc, i, 1, &eeprom_data))
15148 return -1;
15149 checksum += eeprom_data;
15150 }
15151
15152 if (checksum != (uint16_t) NVM_CHECKSUM) {
15153 #ifdef WM_DEBUG
15154 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
15155 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
15156 #endif
15157 }
15158
15159 return 0;
15160 }
15161
15162 static void
15163 wm_nvm_version_invm(struct wm_softc *sc)
15164 {
15165 uint32_t dword;
15166
15167 /*
15168 * Linux's code to decode version is very strange, so we don't
15169 * obey that algorithm and just use word 61 as the document.
15170 * Perhaps it's not perfect though...
15171 *
15172 * Example:
15173 *
15174 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
15175 */
15176 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
15177 dword = __SHIFTOUT(dword, INVM_VER_1);
15178 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
15179 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
15180 }
15181
15182 static void
15183 wm_nvm_version(struct wm_softc *sc)
15184 {
15185 uint16_t major, minor, build, patch;
15186 uint16_t uid0, uid1;
15187 uint16_t nvm_data;
15188 uint16_t off;
15189 bool check_version = false;
15190 bool check_optionrom = false;
15191 bool have_build = false;
15192 bool have_uid = true;
15193
15194 /*
15195 * Version format:
15196 *
15197 * XYYZ
15198 * X0YZ
15199 * X0YY
15200 *
15201 * Example:
15202 *
15203 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
15204 * 82571 0x50a6 5.10.6?
15205 * 82572 0x506a 5.6.10?
15206 * 82572EI 0x5069 5.6.9?
15207 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
15208 * 0x2013 2.1.3?
15209 * 82583 0x10a0 1.10.0? (document says it's default value)
15210 * ICH8+82567 0x0040 0.4.0?
15211 * ICH9+82566 0x1040 1.4.0?
15212 *ICH10+82567 0x0043 0.4.3?
15213 * PCH+82577 0x00c1 0.12.1?
15214 * PCH2+82579 0x00d3 0.13.3?
15215 * 0x00d4 0.13.4?
15216 * LPT+I218 0x0023 0.2.3?
15217 * SPT+I219 0x0084 0.8.4?
15218 * CNP+I219 0x0054 0.5.4?
15219 */
15220
15221 /*
15222 * XXX
15223 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
15224 * I've never seen real 82574 hardware with such small SPI ROM.
15225 */
15226 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
15227 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
15228 have_uid = false;
15229
15230 switch (sc->sc_type) {
15231 case WM_T_82571:
15232 case WM_T_82572:
15233 case WM_T_82574:
15234 case WM_T_82583:
15235 check_version = true;
15236 check_optionrom = true;
15237 have_build = true;
15238 break;
15239 case WM_T_ICH8:
15240 case WM_T_ICH9:
15241 case WM_T_ICH10:
15242 case WM_T_PCH:
15243 case WM_T_PCH2:
15244 case WM_T_PCH_LPT:
15245 case WM_T_PCH_SPT:
15246 case WM_T_PCH_CNP:
15247 case WM_T_PCH_TGP:
15248 check_version = true;
15249 have_build = true;
15250 have_uid = false;
15251 break;
15252 case WM_T_82575:
15253 case WM_T_82576:
15254 case WM_T_82580:
15255 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
15256 check_version = true;
15257 break;
15258 case WM_T_I211:
15259 wm_nvm_version_invm(sc);
15260 have_uid = false;
15261 goto printver;
15262 case WM_T_I210:
15263 if (!wm_nvm_flash_presence_i210(sc)) {
15264 wm_nvm_version_invm(sc);
15265 have_uid = false;
15266 goto printver;
15267 }
15268 /* FALLTHROUGH */
15269 case WM_T_I350:
15270 case WM_T_I354:
15271 check_version = true;
15272 check_optionrom = true;
15273 break;
15274 default:
15275 return;
15276 }
15277 if (check_version
15278 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
15279 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
15280 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
15281 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
15282 build = nvm_data & NVM_BUILD_MASK;
15283 have_build = true;
15284 } else
15285 minor = nvm_data & 0x00ff;
15286
15287 /* Decimal */
15288 minor = (minor / 16) * 10 + (minor % 16);
15289 sc->sc_nvm_ver_major = major;
15290 sc->sc_nvm_ver_minor = minor;
15291
15292 printver:
15293 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
15294 sc->sc_nvm_ver_minor);
15295 if (have_build) {
15296 sc->sc_nvm_ver_build = build;
15297 aprint_verbose(".%d", build);
15298 }
15299 }
15300
15301 /* Assume the Option ROM area is at avove NVM_SIZE */
15302 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
15303 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
15304 /* Option ROM Version */
15305 if ((off != 0x0000) && (off != 0xffff)) {
15306 int rv;
15307
15308 off += NVM_COMBO_VER_OFF;
15309 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
15310 rv |= wm_nvm_read(sc, off, 1, &uid0);
15311 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
15312 && (uid1 != 0) && (uid1 != 0xffff)) {
15313 /* 16bits */
15314 major = uid0 >> 8;
15315 build = (uid0 << 8) | (uid1 >> 8);
15316 patch = uid1 & 0x00ff;
15317 aprint_verbose(", option ROM Version %d.%d.%d",
15318 major, build, patch);
15319 }
15320 }
15321 }
15322
15323 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
15324 aprint_verbose(", Image Unique ID %08x",
15325 ((uint32_t)uid1 << 16) | uid0);
15326 }
15327
15328 /*
15329 * wm_nvm_read:
15330 *
15331 * Read data from the serial EEPROM.
15332 */
15333 static int
15334 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
15335 {
15336 int rv;
15337
15338 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15339 device_xname(sc->sc_dev), __func__));
15340
15341 if (sc->sc_flags & WM_F_EEPROM_INVALID)
15342 return -1;
15343
15344 rv = sc->nvm.read(sc, word, wordcnt, data);
15345
15346 return rv;
15347 }
15348
15349 /*
15350 * Hardware semaphores.
15351 * Very complexed...
15352 */
15353
15354 static int
15355 wm_get_null(struct wm_softc *sc)
15356 {
15357
15358 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15359 device_xname(sc->sc_dev), __func__));
15360 return 0;
15361 }
15362
15363 static void
15364 wm_put_null(struct wm_softc *sc)
15365 {
15366
15367 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15368 device_xname(sc->sc_dev), __func__));
15369 return;
15370 }
15371
15372 static int
15373 wm_get_eecd(struct wm_softc *sc)
15374 {
15375 uint32_t reg;
15376 int x;
15377
15378 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15379 device_xname(sc->sc_dev), __func__));
15380
15381 reg = CSR_READ(sc, WMREG_EECD);
15382
15383 /* Request EEPROM access. */
15384 reg |= EECD_EE_REQ;
15385 CSR_WRITE(sc, WMREG_EECD, reg);
15386
15387 /* ..and wait for it to be granted. */
15388 for (x = 0; x < 1000; x++) {
15389 reg = CSR_READ(sc, WMREG_EECD);
15390 if (reg & EECD_EE_GNT)
15391 break;
15392 delay(5);
15393 }
15394 if ((reg & EECD_EE_GNT) == 0) {
15395 aprint_error_dev(sc->sc_dev,
15396 "could not acquire EEPROM GNT\n");
15397 reg &= ~EECD_EE_REQ;
15398 CSR_WRITE(sc, WMREG_EECD, reg);
15399 return -1;
15400 }
15401
15402 return 0;
15403 }
15404
15405 static void
15406 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
15407 {
15408
15409 *eecd |= EECD_SK;
15410 CSR_WRITE(sc, WMREG_EECD, *eecd);
15411 CSR_WRITE_FLUSH(sc);
15412 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15413 delay(1);
15414 else
15415 delay(50);
15416 }
15417
15418 static void
15419 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
15420 {
15421
15422 *eecd &= ~EECD_SK;
15423 CSR_WRITE(sc, WMREG_EECD, *eecd);
15424 CSR_WRITE_FLUSH(sc);
15425 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15426 delay(1);
15427 else
15428 delay(50);
15429 }
15430
15431 static void
15432 wm_put_eecd(struct wm_softc *sc)
15433 {
15434 uint32_t reg;
15435
15436 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15437 device_xname(sc->sc_dev), __func__));
15438
15439 /* Stop nvm */
15440 reg = CSR_READ(sc, WMREG_EECD);
15441 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
15442 /* Pull CS high */
15443 reg |= EECD_CS;
15444 wm_nvm_eec_clock_lower(sc, ®);
15445 } else {
15446 /* CS on Microwire is active-high */
15447 reg &= ~(EECD_CS | EECD_DI);
15448 CSR_WRITE(sc, WMREG_EECD, reg);
15449 wm_nvm_eec_clock_raise(sc, ®);
15450 wm_nvm_eec_clock_lower(sc, ®);
15451 }
15452
15453 reg = CSR_READ(sc, WMREG_EECD);
15454 reg &= ~EECD_EE_REQ;
15455 CSR_WRITE(sc, WMREG_EECD, reg);
15456
15457 return;
15458 }
15459
15460 /*
15461 * Get hardware semaphore.
15462 * Same as e1000_get_hw_semaphore_generic()
15463 */
15464 static int
15465 wm_get_swsm_semaphore(struct wm_softc *sc)
15466 {
15467 int32_t timeout;
15468 uint32_t swsm;
15469
15470 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15471 device_xname(sc->sc_dev), __func__));
15472 KASSERT(sc->sc_nvm_wordsize > 0);
15473
15474 retry:
15475 /* Get the SW semaphore. */
15476 timeout = sc->sc_nvm_wordsize + 1;
15477 while (timeout) {
15478 swsm = CSR_READ(sc, WMREG_SWSM);
15479
15480 if ((swsm & SWSM_SMBI) == 0)
15481 break;
15482
15483 delay(50);
15484 timeout--;
15485 }
15486
15487 if (timeout == 0) {
15488 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
15489 /*
15490 * In rare circumstances, the SW semaphore may already
15491 * be held unintentionally. Clear the semaphore once
15492 * before giving up.
15493 */
15494 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
15495 wm_put_swsm_semaphore(sc);
15496 goto retry;
15497 }
15498 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
15499 return -1;
15500 }
15501
15502 /* Get the FW semaphore. */
15503 timeout = sc->sc_nvm_wordsize + 1;
15504 while (timeout) {
15505 swsm = CSR_READ(sc, WMREG_SWSM);
15506 swsm |= SWSM_SWESMBI;
15507 CSR_WRITE(sc, WMREG_SWSM, swsm);
15508 /* If we managed to set the bit we got the semaphore. */
15509 swsm = CSR_READ(sc, WMREG_SWSM);
15510 if (swsm & SWSM_SWESMBI)
15511 break;
15512
15513 delay(50);
15514 timeout--;
15515 }
15516
15517 if (timeout == 0) {
15518 aprint_error_dev(sc->sc_dev,
15519 "could not acquire SWSM SWESMBI\n");
15520 /* Release semaphores */
15521 wm_put_swsm_semaphore(sc);
15522 return -1;
15523 }
15524 return 0;
15525 }
15526
15527 /*
15528 * Put hardware semaphore.
15529 * Same as e1000_put_hw_semaphore_generic()
15530 */
15531 static void
15532 wm_put_swsm_semaphore(struct wm_softc *sc)
15533 {
15534 uint32_t swsm;
15535
15536 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15537 device_xname(sc->sc_dev), __func__));
15538
15539 swsm = CSR_READ(sc, WMREG_SWSM);
15540 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15541 CSR_WRITE(sc, WMREG_SWSM, swsm);
15542 }
15543
15544 /*
15545 * Get SW/FW semaphore.
15546 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15547 */
15548 static int
15549 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15550 {
15551 uint32_t swfw_sync;
15552 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15553 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15554 int timeout;
15555
15556 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15557 device_xname(sc->sc_dev), __func__));
15558
15559 if (sc->sc_type == WM_T_80003)
15560 timeout = 50;
15561 else
15562 timeout = 200;
15563
15564 while (timeout) {
15565 if (wm_get_swsm_semaphore(sc)) {
15566 aprint_error_dev(sc->sc_dev,
15567 "%s: failed to get semaphore\n",
15568 __func__);
15569 return -1;
15570 }
15571 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15572 if ((swfw_sync & (swmask | fwmask)) == 0) {
15573 swfw_sync |= swmask;
15574 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15575 wm_put_swsm_semaphore(sc);
15576 return 0;
15577 }
15578 wm_put_swsm_semaphore(sc);
15579 delay(5000);
15580 timeout--;
15581 }
15582 device_printf(sc->sc_dev,
15583 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15584 mask, swfw_sync);
15585 return -1;
15586 }
15587
15588 static void
15589 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15590 {
15591 uint32_t swfw_sync;
15592
15593 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15594 device_xname(sc->sc_dev), __func__));
15595
15596 while (wm_get_swsm_semaphore(sc) != 0)
15597 continue;
15598
15599 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15600 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15601 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15602
15603 wm_put_swsm_semaphore(sc);
15604 }
15605
15606 static int
15607 wm_get_nvm_80003(struct wm_softc *sc)
15608 {
15609 int rv;
15610
15611 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15612 device_xname(sc->sc_dev), __func__));
15613
15614 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15615 aprint_error_dev(sc->sc_dev,
15616 "%s: failed to get semaphore(SWFW)\n", __func__);
15617 return rv;
15618 }
15619
15620 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15621 && (rv = wm_get_eecd(sc)) != 0) {
15622 aprint_error_dev(sc->sc_dev,
15623 "%s: failed to get semaphore(EECD)\n", __func__);
15624 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15625 return rv;
15626 }
15627
15628 return 0;
15629 }
15630
15631 static void
15632 wm_put_nvm_80003(struct wm_softc *sc)
15633 {
15634
15635 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15636 device_xname(sc->sc_dev), __func__));
15637
15638 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15639 wm_put_eecd(sc);
15640 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15641 }
15642
15643 static int
15644 wm_get_nvm_82571(struct wm_softc *sc)
15645 {
15646 int rv;
15647
15648 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15649 device_xname(sc->sc_dev), __func__));
15650
15651 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15652 return rv;
15653
15654 switch (sc->sc_type) {
15655 case WM_T_82573:
15656 break;
15657 default:
15658 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15659 rv = wm_get_eecd(sc);
15660 break;
15661 }
15662
15663 if (rv != 0) {
15664 aprint_error_dev(sc->sc_dev,
15665 "%s: failed to get semaphore\n",
15666 __func__);
15667 wm_put_swsm_semaphore(sc);
15668 }
15669
15670 return rv;
15671 }
15672
15673 static void
15674 wm_put_nvm_82571(struct wm_softc *sc)
15675 {
15676
15677 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15678 device_xname(sc->sc_dev), __func__));
15679
15680 switch (sc->sc_type) {
15681 case WM_T_82573:
15682 break;
15683 default:
15684 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15685 wm_put_eecd(sc);
15686 break;
15687 }
15688
15689 wm_put_swsm_semaphore(sc);
15690 }
15691
15692 static int
15693 wm_get_phy_82575(struct wm_softc *sc)
15694 {
15695
15696 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15697 device_xname(sc->sc_dev), __func__));
15698 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15699 }
15700
15701 static void
15702 wm_put_phy_82575(struct wm_softc *sc)
15703 {
15704
15705 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15706 device_xname(sc->sc_dev), __func__));
15707 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15708 }
15709
15710 static int
15711 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15712 {
15713 uint32_t ext_ctrl;
15714 int timeout = 200;
15715
15716 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15717 device_xname(sc->sc_dev), __func__));
15718
15719 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15720 for (timeout = 0; timeout < 200; timeout++) {
15721 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15722 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15723 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15724
15725 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15726 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15727 return 0;
15728 delay(5000);
15729 }
15730 device_printf(sc->sc_dev,
15731 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15732 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15733 return -1;
15734 }
15735
15736 static void
15737 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15738 {
15739 uint32_t ext_ctrl;
15740
15741 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15742 device_xname(sc->sc_dev), __func__));
15743
15744 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15745 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15746 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15747
15748 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15749 }
15750
15751 static int
15752 wm_get_swflag_ich8lan(struct wm_softc *sc)
15753 {
15754 uint32_t ext_ctrl;
15755 int timeout;
15756
15757 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15758 device_xname(sc->sc_dev), __func__));
15759 mutex_enter(sc->sc_ich_phymtx);
15760 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15761 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15762 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15763 break;
15764 delay(1000);
15765 }
15766 if (timeout >= WM_PHY_CFG_TIMEOUT) {
15767 device_printf(sc->sc_dev,
15768 "SW has already locked the resource\n");
15769 goto out;
15770 }
15771
15772 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15773 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15774 for (timeout = 0; timeout < 1000; timeout++) {
15775 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15776 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15777 break;
15778 delay(1000);
15779 }
15780 if (timeout >= 1000) {
15781 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15782 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15783 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15784 goto out;
15785 }
15786 return 0;
15787
15788 out:
15789 mutex_exit(sc->sc_ich_phymtx);
15790 return -1;
15791 }
15792
15793 static void
15794 wm_put_swflag_ich8lan(struct wm_softc *sc)
15795 {
15796 uint32_t ext_ctrl;
15797
15798 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15799 device_xname(sc->sc_dev), __func__));
15800 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15801 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15802 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15803 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15804 } else
15805 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15806
15807 mutex_exit(sc->sc_ich_phymtx);
15808 }
15809
15810 static int
15811 wm_get_nvm_ich8lan(struct wm_softc *sc)
15812 {
15813
15814 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15815 device_xname(sc->sc_dev), __func__));
15816 mutex_enter(sc->sc_ich_nvmmtx);
15817
15818 return 0;
15819 }
15820
15821 static void
15822 wm_put_nvm_ich8lan(struct wm_softc *sc)
15823 {
15824
15825 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15826 device_xname(sc->sc_dev), __func__));
15827 mutex_exit(sc->sc_ich_nvmmtx);
15828 }
15829
15830 static int
15831 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15832 {
15833 int i = 0;
15834 uint32_t reg;
15835
15836 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15837 device_xname(sc->sc_dev), __func__));
15838
15839 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15840 do {
15841 CSR_WRITE(sc, WMREG_EXTCNFCTR,
15842 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15843 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15844 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15845 break;
15846 delay(2*1000);
15847 i++;
15848 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15849
15850 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15851 wm_put_hw_semaphore_82573(sc);
15852 log(LOG_ERR, "%s: Driver can't access the PHY\n",
15853 device_xname(sc->sc_dev));
15854 return -1;
15855 }
15856
15857 return 0;
15858 }
15859
15860 static void
15861 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15862 {
15863 uint32_t reg;
15864
15865 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15866 device_xname(sc->sc_dev), __func__));
15867
15868 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15869 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15870 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15871 }
15872
15873 /*
15874 * Management mode and power management related subroutines.
15875 * BMC, AMT, suspend/resume and EEE.
15876 */
15877
15878 #ifdef WM_WOL
15879 static int
15880 wm_check_mng_mode(struct wm_softc *sc)
15881 {
15882 int rv;
15883
15884 switch (sc->sc_type) {
15885 case WM_T_ICH8:
15886 case WM_T_ICH9:
15887 case WM_T_ICH10:
15888 case WM_T_PCH:
15889 case WM_T_PCH2:
15890 case WM_T_PCH_LPT:
15891 case WM_T_PCH_SPT:
15892 case WM_T_PCH_CNP:
15893 case WM_T_PCH_TGP:
15894 rv = wm_check_mng_mode_ich8lan(sc);
15895 break;
15896 case WM_T_82574:
15897 case WM_T_82583:
15898 rv = wm_check_mng_mode_82574(sc);
15899 break;
15900 case WM_T_82571:
15901 case WM_T_82572:
15902 case WM_T_82573:
15903 case WM_T_80003:
15904 rv = wm_check_mng_mode_generic(sc);
15905 break;
15906 default:
15907 /* Noting to do */
15908 rv = 0;
15909 break;
15910 }
15911
15912 return rv;
15913 }
15914
15915 static int
15916 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15917 {
15918 uint32_t fwsm;
15919
15920 fwsm = CSR_READ(sc, WMREG_FWSM);
15921
15922 if (((fwsm & FWSM_FW_VALID) != 0)
15923 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15924 return 1;
15925
15926 return 0;
15927 }
15928
15929 static int
15930 wm_check_mng_mode_82574(struct wm_softc *sc)
15931 {
15932 uint16_t data;
15933
15934 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15935
15936 if ((data & NVM_CFG2_MNGM_MASK) != 0)
15937 return 1;
15938
15939 return 0;
15940 }
15941
15942 static int
15943 wm_check_mng_mode_generic(struct wm_softc *sc)
15944 {
15945 uint32_t fwsm;
15946
15947 fwsm = CSR_READ(sc, WMREG_FWSM);
15948
15949 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15950 return 1;
15951
15952 return 0;
15953 }
15954 #endif /* WM_WOL */
15955
15956 static int
15957 wm_enable_mng_pass_thru(struct wm_softc *sc)
15958 {
15959 uint32_t manc, fwsm, factps;
15960
15961 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15962 return 0;
15963
15964 manc = CSR_READ(sc, WMREG_MANC);
15965
15966 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15967 device_xname(sc->sc_dev), manc));
15968 if ((manc & MANC_RECV_TCO_EN) == 0)
15969 return 0;
15970
15971 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15972 fwsm = CSR_READ(sc, WMREG_FWSM);
15973 factps = CSR_READ(sc, WMREG_FACTPS);
15974 if (((factps & FACTPS_MNGCG) == 0)
15975 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15976 return 1;
15977 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15978 uint16_t data;
15979
15980 factps = CSR_READ(sc, WMREG_FACTPS);
15981 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15982 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15983 device_xname(sc->sc_dev), factps, data));
15984 if (((factps & FACTPS_MNGCG) == 0)
15985 && ((data & NVM_CFG2_MNGM_MASK)
15986 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15987 return 1;
15988 } else if (((manc & MANC_SMBUS_EN) != 0)
15989 && ((manc & MANC_ASF_EN) == 0))
15990 return 1;
15991
15992 return 0;
15993 }
15994
15995 static bool
15996 wm_phy_resetisblocked(struct wm_softc *sc)
15997 {
15998 bool blocked = false;
15999 uint32_t reg;
16000 int i = 0;
16001
16002 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16003 device_xname(sc->sc_dev), __func__));
16004
16005 switch (sc->sc_type) {
16006 case WM_T_ICH8:
16007 case WM_T_ICH9:
16008 case WM_T_ICH10:
16009 case WM_T_PCH:
16010 case WM_T_PCH2:
16011 case WM_T_PCH_LPT:
16012 case WM_T_PCH_SPT:
16013 case WM_T_PCH_CNP:
16014 case WM_T_PCH_TGP:
16015 do {
16016 reg = CSR_READ(sc, WMREG_FWSM);
16017 if ((reg & FWSM_RSPCIPHY) == 0) {
16018 blocked = true;
16019 delay(10*1000);
16020 continue;
16021 }
16022 blocked = false;
16023 } while (blocked && (i++ < 30));
16024 return blocked;
16025 break;
16026 case WM_T_82571:
16027 case WM_T_82572:
16028 case WM_T_82573:
16029 case WM_T_82574:
16030 case WM_T_82583:
16031 case WM_T_80003:
16032 reg = CSR_READ(sc, WMREG_MANC);
16033 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
16034 return true;
16035 else
16036 return false;
16037 break;
16038 default:
16039 /* No problem */
16040 break;
16041 }
16042
16043 return false;
16044 }
16045
16046 static void
16047 wm_get_hw_control(struct wm_softc *sc)
16048 {
16049 uint32_t reg;
16050
16051 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
16052 device_xname(sc->sc_dev), __func__));
16053
16054 if (sc->sc_type == WM_T_82573) {
16055 reg = CSR_READ(sc, WMREG_SWSM);
16056 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
16057 } else if (sc->sc_type >= WM_T_82571) {
16058 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16059 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
16060 }
16061 }
16062
16063 static void
16064 wm_release_hw_control(struct wm_softc *sc)
16065 {
16066 uint32_t reg;
16067
16068 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
16069 device_xname(sc->sc_dev), __func__));
16070
16071 if (sc->sc_type == WM_T_82573) {
16072 reg = CSR_READ(sc, WMREG_SWSM);
16073 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
16074 } else if (sc->sc_type >= WM_T_82571) {
16075 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16076 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
16077 }
16078 }
16079
16080 static void
16081 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
16082 {
16083 uint32_t reg;
16084
16085 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16086 device_xname(sc->sc_dev), __func__));
16087
16088 if (sc->sc_type < WM_T_PCH2)
16089 return;
16090
16091 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
16092
16093 if (gate)
16094 reg |= EXTCNFCTR_GATE_PHY_CFG;
16095 else
16096 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
16097
16098 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
16099 }
16100
16101 static int
16102 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
16103 {
16104 uint32_t fwsm, reg;
16105 int rv;
16106
16107 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16108 device_xname(sc->sc_dev), __func__));
16109
16110 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
16111 wm_gate_hw_phy_config_ich8lan(sc, true);
16112
16113 /* Disable ULP */
16114 wm_ulp_disable(sc);
16115
16116 /* Acquire PHY semaphore */
16117 rv = sc->phy.acquire(sc);
16118 if (rv != 0) {
16119 DPRINTF(sc, WM_DEBUG_INIT,
16120 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
16121 return rv;
16122 }
16123
16124 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
16125 * inaccessible and resetting the PHY is not blocked, toggle the
16126 * LANPHYPC Value bit to force the interconnect to PCIe mode.
16127 */
16128 fwsm = CSR_READ(sc, WMREG_FWSM);
16129 switch (sc->sc_type) {
16130 case WM_T_PCH_LPT:
16131 case WM_T_PCH_SPT:
16132 case WM_T_PCH_CNP:
16133 case WM_T_PCH_TGP:
16134 if (wm_phy_is_accessible_pchlan(sc))
16135 break;
16136
16137 /* Before toggling LANPHYPC, see if PHY is accessible by
16138 * forcing MAC to SMBus mode first.
16139 */
16140 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16141 reg |= CTRL_EXT_FORCE_SMBUS;
16142 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16143 #if 0
16144 /* XXX Isn't this required??? */
16145 CSR_WRITE_FLUSH(sc);
16146 #endif
16147 /* Wait 50 milliseconds for MAC to finish any retries
16148 * that it might be trying to perform from previous
16149 * attempts to acknowledge any phy read requests.
16150 */
16151 delay(50 * 1000);
16152 /* FALLTHROUGH */
16153 case WM_T_PCH2:
16154 if (wm_phy_is_accessible_pchlan(sc) == true)
16155 break;
16156 /* FALLTHROUGH */
16157 case WM_T_PCH:
16158 if (sc->sc_type == WM_T_PCH)
16159 if ((fwsm & FWSM_FW_VALID) != 0)
16160 break;
16161
16162 if (wm_phy_resetisblocked(sc) == true) {
16163 device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
16164 break;
16165 }
16166
16167 /* Toggle LANPHYPC Value bit */
16168 wm_toggle_lanphypc_pch_lpt(sc);
16169
16170 if (sc->sc_type >= WM_T_PCH_LPT) {
16171 if (wm_phy_is_accessible_pchlan(sc) == true)
16172 break;
16173
16174 /* Toggling LANPHYPC brings the PHY out of SMBus mode
16175 * so ensure that the MAC is also out of SMBus mode
16176 */
16177 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16178 reg &= ~CTRL_EXT_FORCE_SMBUS;
16179 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16180
16181 if (wm_phy_is_accessible_pchlan(sc) == true)
16182 break;
16183 rv = -1;
16184 }
16185 break;
16186 default:
16187 break;
16188 }
16189
16190 /* Release semaphore */
16191 sc->phy.release(sc);
16192
16193 if (rv == 0) {
16194 /* Check to see if able to reset PHY. Print error if not */
16195 if (wm_phy_resetisblocked(sc)) {
16196 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
16197 goto out;
16198 }
16199
16200 /* Reset the PHY before any access to it. Doing so, ensures
16201 * that the PHY is in a known good state before we read/write
16202 * PHY registers. The generic reset is sufficient here,
16203 * because we haven't determined the PHY type yet.
16204 */
16205 if (wm_reset_phy(sc) != 0)
16206 goto out;
16207
16208 /* On a successful reset, possibly need to wait for the PHY
16209 * to quiesce to an accessible state before returning control
16210 * to the calling function. If the PHY does not quiesce, then
16211 * return E1000E_BLK_PHY_RESET, as this is the condition that
16212 * the PHY is in.
16213 */
16214 if (wm_phy_resetisblocked(sc))
16215 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
16216 }
16217
16218 out:
16219 /* Ungate automatic PHY configuration on non-managed 82579 */
16220 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
16221 delay(10*1000);
16222 wm_gate_hw_phy_config_ich8lan(sc, false);
16223 }
16224
16225 return 0;
16226 }
16227
16228 static void
16229 wm_init_manageability(struct wm_softc *sc)
16230 {
16231
16232 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16233 device_xname(sc->sc_dev), __func__));
16234 KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
16235
16236 if (sc->sc_flags & WM_F_HAS_MANAGE) {
16237 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
16238 uint32_t manc = CSR_READ(sc, WMREG_MANC);
16239
16240 /* Disable hardware interception of ARP */
16241 manc &= ~MANC_ARP_EN;
16242
16243 /* Enable receiving management packets to the host */
16244 if (sc->sc_type >= WM_T_82571) {
16245 manc |= MANC_EN_MNG2HOST;
16246 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
16247 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
16248 }
16249
16250 CSR_WRITE(sc, WMREG_MANC, manc);
16251 }
16252 }
16253
16254 static void
16255 wm_release_manageability(struct wm_softc *sc)
16256 {
16257
16258 if (sc->sc_flags & WM_F_HAS_MANAGE) {
16259 uint32_t manc = CSR_READ(sc, WMREG_MANC);
16260
16261 manc |= MANC_ARP_EN;
16262 if (sc->sc_type >= WM_T_82571)
16263 manc &= ~MANC_EN_MNG2HOST;
16264
16265 CSR_WRITE(sc, WMREG_MANC, manc);
16266 }
16267 }
16268
16269 static void
16270 wm_get_wakeup(struct wm_softc *sc)
16271 {
16272
16273 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
16274 switch (sc->sc_type) {
16275 case WM_T_82573:
16276 case WM_T_82583:
16277 sc->sc_flags |= WM_F_HAS_AMT;
16278 /* FALLTHROUGH */
16279 case WM_T_80003:
16280 case WM_T_82575:
16281 case WM_T_82576:
16282 case WM_T_82580:
16283 case WM_T_I350:
16284 case WM_T_I354:
16285 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
16286 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
16287 /* FALLTHROUGH */
16288 case WM_T_82541:
16289 case WM_T_82541_2:
16290 case WM_T_82547:
16291 case WM_T_82547_2:
16292 case WM_T_82571:
16293 case WM_T_82572:
16294 case WM_T_82574:
16295 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
16296 break;
16297 case WM_T_ICH8:
16298 case WM_T_ICH9:
16299 case WM_T_ICH10:
16300 case WM_T_PCH:
16301 case WM_T_PCH2:
16302 case WM_T_PCH_LPT:
16303 case WM_T_PCH_SPT:
16304 case WM_T_PCH_CNP:
16305 case WM_T_PCH_TGP:
16306 sc->sc_flags |= WM_F_HAS_AMT;
16307 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
16308 break;
16309 default:
16310 break;
16311 }
16312
16313 /* 1: HAS_MANAGE */
16314 if (wm_enable_mng_pass_thru(sc) != 0)
16315 sc->sc_flags |= WM_F_HAS_MANAGE;
16316
16317 /*
16318 * Note that the WOL flags is set after the resetting of the eeprom
16319 * stuff
16320 */
16321 }
16322
16323 /*
16324 * Unconfigure Ultra Low Power mode.
16325 * Only for I217 and newer (see below).
16326 */
16327 static int
16328 wm_ulp_disable(struct wm_softc *sc)
16329 {
16330 uint32_t reg;
16331 uint16_t phyreg;
16332 int i = 0, rv;
16333
16334 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16335 device_xname(sc->sc_dev), __func__));
16336 /* Exclude old devices */
16337 if ((sc->sc_type < WM_T_PCH_LPT)
16338 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
16339 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
16340 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
16341 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
16342 return 0;
16343
16344 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
16345 /* Request ME un-configure ULP mode in the PHY */
16346 reg = CSR_READ(sc, WMREG_H2ME);
16347 reg &= ~H2ME_ULP;
16348 reg |= H2ME_ENFORCE_SETTINGS;
16349 CSR_WRITE(sc, WMREG_H2ME, reg);
16350
16351 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
16352 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
16353 if (i++ == 30) {
16354 device_printf(sc->sc_dev, "%s timed out\n",
16355 __func__);
16356 return -1;
16357 }
16358 delay(10 * 1000);
16359 }
16360 reg = CSR_READ(sc, WMREG_H2ME);
16361 reg &= ~H2ME_ENFORCE_SETTINGS;
16362 CSR_WRITE(sc, WMREG_H2ME, reg);
16363
16364 return 0;
16365 }
16366
16367 /* Acquire semaphore */
16368 rv = sc->phy.acquire(sc);
16369 if (rv != 0) {
16370 DPRINTF(sc, WM_DEBUG_INIT,
16371 ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
16372 return rv;
16373 }
16374
16375 /* Toggle LANPHYPC */
16376 wm_toggle_lanphypc_pch_lpt(sc);
16377
16378 /* Unforce SMBus mode in PHY */
16379 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
16380 if (rv != 0) {
16381 uint32_t reg2;
16382
16383 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
16384 __func__);
16385 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
16386 reg2 |= CTRL_EXT_FORCE_SMBUS;
16387 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
16388 delay(50 * 1000);
16389
16390 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
16391 &phyreg);
16392 if (rv != 0)
16393 goto release;
16394 }
16395 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16396 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
16397
16398 /* Unforce SMBus mode in MAC */
16399 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16400 reg &= ~CTRL_EXT_FORCE_SMBUS;
16401 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16402
16403 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
16404 if (rv != 0)
16405 goto release;
16406 phyreg |= HV_PM_CTRL_K1_ENA;
16407 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
16408
16409 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
16410 &phyreg);
16411 if (rv != 0)
16412 goto release;
16413 phyreg &= ~(I218_ULP_CONFIG1_IND
16414 | I218_ULP_CONFIG1_STICKY_ULP
16415 | I218_ULP_CONFIG1_RESET_TO_SMBUS
16416 | I218_ULP_CONFIG1_WOL_HOST
16417 | I218_ULP_CONFIG1_INBAND_EXIT
16418 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
16419 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
16420 | I218_ULP_CONFIG1_DIS_SMB_PERST);
16421 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16422 phyreg |= I218_ULP_CONFIG1_START;
16423 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16424
16425 reg = CSR_READ(sc, WMREG_FEXTNVM7);
16426 reg &= ~FEXTNVM7_DIS_SMB_PERST;
16427 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16428
16429 release:
16430 /* Release semaphore */
16431 sc->phy.release(sc);
16432 wm_gmii_reset(sc);
16433 delay(50 * 1000);
16434
16435 return rv;
16436 }
16437
16438 /* WOL in the newer chipset interfaces (pchlan) */
16439 static int
16440 wm_enable_phy_wakeup(struct wm_softc *sc)
16441 {
16442 device_t dev = sc->sc_dev;
16443 uint32_t mreg, moff;
16444 uint16_t wuce, wuc, wufc, preg;
16445 int i, rv;
16446
16447 KASSERT(sc->sc_type >= WM_T_PCH);
16448
16449 /* Copy MAC RARs to PHY RARs */
16450 wm_copy_rx_addrs_to_phy_ich8lan(sc);
16451
16452 /* Activate PHY wakeup */
16453 rv = sc->phy.acquire(sc);
16454 if (rv != 0) {
16455 device_printf(dev, "%s: failed to acquire semaphore\n",
16456 __func__);
16457 return rv;
16458 }
16459
16460 /*
16461 * Enable access to PHY wakeup registers.
16462 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
16463 */
16464 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
16465 if (rv != 0) {
16466 device_printf(dev,
16467 "%s: Could not enable PHY wakeup reg access\n", __func__);
16468 goto release;
16469 }
16470
16471 /* Copy MAC MTA to PHY MTA */
16472 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
16473 uint16_t lo, hi;
16474
16475 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
16476 lo = (uint16_t)(mreg & 0xffff);
16477 hi = (uint16_t)((mreg >> 16) & 0xffff);
16478 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
16479 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
16480 }
16481
16482 /* Configure PHY Rx Control register */
16483 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
16484 mreg = CSR_READ(sc, WMREG_RCTL);
16485 if (mreg & RCTL_UPE)
16486 preg |= BM_RCTL_UPE;
16487 if (mreg & RCTL_MPE)
16488 preg |= BM_RCTL_MPE;
16489 preg &= ~(BM_RCTL_MO_MASK);
16490 moff = __SHIFTOUT(mreg, RCTL_MO);
16491 if (moff != 0)
16492 preg |= moff << BM_RCTL_MO_SHIFT;
16493 if (mreg & RCTL_BAM)
16494 preg |= BM_RCTL_BAM;
16495 if (mreg & RCTL_PMCF)
16496 preg |= BM_RCTL_PMCF;
16497 mreg = CSR_READ(sc, WMREG_CTRL);
16498 if (mreg & CTRL_RFCE)
16499 preg |= BM_RCTL_RFCE;
16500 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
16501
16502 wuc = WUC_APME | WUC_PME_EN;
16503 wufc = WUFC_MAG;
16504 /* Enable PHY wakeup in MAC register */
16505 CSR_WRITE(sc, WMREG_WUC,
16506 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
16507 CSR_WRITE(sc, WMREG_WUFC, wufc);
16508
16509 /* Configure and enable PHY wakeup in PHY registers */
16510 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
16511 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
16512
16513 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
16514 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16515
16516 release:
16517 sc->phy.release(sc);
16518
16519 return 0;
16520 }
16521
16522 /* Power down workaround on D3 */
16523 static void
16524 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
16525 {
16526 uint32_t reg;
16527 uint16_t phyreg;
16528 int i;
16529
16530 for (i = 0; i < 2; i++) {
16531 /* Disable link */
16532 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16533 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16534 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16535
16536 /*
16537 * Call gig speed drop workaround on Gig disable before
16538 * accessing any PHY registers
16539 */
16540 if (sc->sc_type == WM_T_ICH8)
16541 wm_gig_downshift_workaround_ich8lan(sc);
16542
16543 /* Write VR power-down enable */
16544 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16545 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16546 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
16547 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
16548
16549 /* Read it back and test */
16550 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16551 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16552 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16553 break;
16554
16555 /* Issue PHY reset and repeat at most one more time */
16556 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16557 }
16558 }
16559
16560 /*
16561 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16562 * @sc: pointer to the HW structure
16563 *
16564 * During S0 to Sx transition, it is possible the link remains at gig
16565 * instead of negotiating to a lower speed. Before going to Sx, set
16566 * 'Gig Disable' to force link speed negotiation to a lower speed based on
16567 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
16568 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16569 * needs to be written.
16570 * Parts that support (and are linked to a partner which support) EEE in
16571 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16572 * than 10Mbps w/o EEE.
16573 */
16574 static void
16575 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16576 {
16577 device_t dev = sc->sc_dev;
16578 struct ethercom *ec = &sc->sc_ethercom;
16579 uint32_t phy_ctrl;
16580 int rv;
16581
16582 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16583 phy_ctrl |= PHY_CTRL_GBE_DIS;
16584
16585 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_TGP));
16586
16587 if (sc->sc_phytype == WMPHY_I217) {
16588 uint16_t devid = sc->sc_pcidevid;
16589
16590 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16591 (devid == PCI_PRODUCT_INTEL_I218_V) ||
16592 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16593 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16594 (sc->sc_type >= WM_T_PCH_SPT))
16595 CSR_WRITE(sc, WMREG_FEXTNVM6,
16596 CSR_READ(sc, WMREG_FEXTNVM6)
16597 & ~FEXTNVM6_REQ_PLL_CLK);
16598
16599 if (sc->phy.acquire(sc) != 0)
16600 goto out;
16601
16602 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16603 uint16_t eee_advert;
16604
16605 rv = wm_read_emi_reg_locked(dev,
16606 I217_EEE_ADVERTISEMENT, &eee_advert);
16607 if (rv)
16608 goto release;
16609
16610 /*
16611 * Disable LPLU if both link partners support 100BaseT
16612 * EEE and 100Full is advertised on both ends of the
16613 * link, and enable Auto Enable LPI since there will
16614 * be no driver to enable LPI while in Sx.
16615 */
16616 if ((eee_advert & AN_EEEADVERT_100_TX) &&
16617 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16618 uint16_t anar, phy_reg;
16619
16620 sc->phy.readreg_locked(dev, 2, MII_ANAR,
16621 &anar);
16622 if (anar & ANAR_TX_FD) {
16623 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16624 PHY_CTRL_NOND0A_LPLU);
16625
16626 /* Set Auto Enable LPI after link up */
16627 sc->phy.readreg_locked(dev, 2,
16628 I217_LPI_GPIO_CTRL, &phy_reg);
16629 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16630 sc->phy.writereg_locked(dev, 2,
16631 I217_LPI_GPIO_CTRL, phy_reg);
16632 }
16633 }
16634 }
16635
16636 /*
16637 * For i217 Intel Rapid Start Technology support,
16638 * when the system is going into Sx and no manageability engine
16639 * is present, the driver must configure proxy to reset only on
16640 * power good. LPI (Low Power Idle) state must also reset only
16641 * on power good, as well as the MTA (Multicast table array).
16642 * The SMBus release must also be disabled on LCD reset.
16643 */
16644
16645 /*
16646 * Enable MTA to reset for Intel Rapid Start Technology
16647 * Support
16648 */
16649
16650 release:
16651 sc->phy.release(sc);
16652 }
16653 out:
16654 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16655
16656 if (sc->sc_type == WM_T_ICH8)
16657 wm_gig_downshift_workaround_ich8lan(sc);
16658
16659 if (sc->sc_type >= WM_T_PCH) {
16660 wm_oem_bits_config_ich8lan(sc, false);
16661
16662 /* Reset PHY to activate OEM bits on 82577/8 */
16663 if (sc->sc_type == WM_T_PCH)
16664 wm_reset_phy(sc);
16665
16666 if (sc->phy.acquire(sc) != 0)
16667 return;
16668 wm_write_smbus_addr(sc);
16669 sc->phy.release(sc);
16670 }
16671 }
16672
16673 /*
16674 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16675 * @sc: pointer to the HW structure
16676 *
16677 * During Sx to S0 transitions on non-managed devices or managed devices
16678 * on which PHY resets are not blocked, if the PHY registers cannot be
16679 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
16680 * the PHY.
16681 * On i217, setup Intel Rapid Start Technology.
16682 */
16683 static int
16684 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16685 {
16686 device_t dev = sc->sc_dev;
16687 int rv;
16688
16689 if (sc->sc_type < WM_T_PCH2)
16690 return 0;
16691
16692 rv = wm_init_phy_workarounds_pchlan(sc);
16693 if (rv != 0)
16694 return rv;
16695
16696 /* For i217 Intel Rapid Start Technology support when the system
16697 * is transitioning from Sx and no manageability engine is present
16698 * configure SMBus to restore on reset, disable proxy, and enable
16699 * the reset on MTA (Multicast table array).
16700 */
16701 if (sc->sc_phytype == WMPHY_I217) {
16702 uint16_t phy_reg;
16703
16704 rv = sc->phy.acquire(sc);
16705 if (rv != 0)
16706 return rv;
16707
16708 /* Clear Auto Enable LPI after link up */
16709 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16710 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16711 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16712
16713 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16714 /* Restore clear on SMB if no manageability engine
16715 * is present
16716 */
16717 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16718 &phy_reg);
16719 if (rv != 0)
16720 goto release;
16721 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16722 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16723
16724 /* Disable Proxy */
16725 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16726 }
16727 /* Enable reset on MTA */
16728 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16729 if (rv != 0)
16730 goto release;
16731 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16732 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16733
16734 release:
16735 sc->phy.release(sc);
16736 return rv;
16737 }
16738
16739 return 0;
16740 }
16741
16742 static void
16743 wm_enable_wakeup(struct wm_softc *sc)
16744 {
16745 uint32_t reg, pmreg;
16746 pcireg_t pmode;
16747 int rv = 0;
16748
16749 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16750 device_xname(sc->sc_dev), __func__));
16751
16752 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16753 &pmreg, NULL) == 0)
16754 return;
16755
16756 if ((sc->sc_flags & WM_F_WOL) == 0)
16757 goto pme;
16758
16759 /* Advertise the wakeup capability */
16760 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16761 | CTRL_SWDPIN(3));
16762
16763 /* Keep the laser running on fiber adapters */
16764 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16765 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16766 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16767 reg |= CTRL_EXT_SWDPIN(3);
16768 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16769 }
16770
16771 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16772 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16773 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16774 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP) ||
16775 (sc->sc_type == WM_T_PCH_TGP))
16776 wm_suspend_workarounds_ich8lan(sc);
16777
16778 #if 0 /* For the multicast packet */
16779 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16780 reg |= WUFC_MC;
16781 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16782 #endif
16783
16784 if (sc->sc_type >= WM_T_PCH) {
16785 rv = wm_enable_phy_wakeup(sc);
16786 if (rv != 0)
16787 goto pme;
16788 } else {
16789 /* Enable wakeup by the MAC */
16790 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16791 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16792 }
16793
16794 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16795 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16796 || (sc->sc_type == WM_T_PCH2))
16797 && (sc->sc_phytype == WMPHY_IGP_3))
16798 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16799
16800 pme:
16801 /* Request PME */
16802 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16803 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16804 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16805 /* For WOL */
16806 pmode |= PCI_PMCSR_PME_EN;
16807 } else {
16808 /* Disable WOL */
16809 pmode &= ~PCI_PMCSR_PME_EN;
16810 }
16811 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16812 }
16813
16814 /* Disable ASPM L0s and/or L1 for workaround */
16815 static void
16816 wm_disable_aspm(struct wm_softc *sc)
16817 {
16818 pcireg_t reg, mask = 0;
16819 unsigned const char *str = "";
16820
16821 /*
16822 * Only for PCIe device which has PCIe capability in the PCI config
16823 * space.
16824 */
16825 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16826 return;
16827
16828 switch (sc->sc_type) {
16829 case WM_T_82571:
16830 case WM_T_82572:
16831 /*
16832 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16833 * State Power management L1 State (ASPM L1).
16834 */
16835 mask = PCIE_LCSR_ASPM_L1;
16836 str = "L1 is";
16837 break;
16838 case WM_T_82573:
16839 case WM_T_82574:
16840 case WM_T_82583:
16841 /*
16842 * The 82573 disappears when PCIe ASPM L0s is enabled.
16843 *
16844 * The 82574 and 82583 does not support PCIe ASPM L0s with
16845 * some chipset. The document of 82574 and 82583 says that
16846 * disabling L0s with some specific chipset is sufficient,
16847 * but we follow as of the Intel em driver does.
16848 *
16849 * References:
16850 * Errata 8 of the Specification Update of i82573.
16851 * Errata 20 of the Specification Update of i82574.
16852 * Errata 9 of the Specification Update of i82583.
16853 */
16854 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16855 str = "L0s and L1 are";
16856 break;
16857 default:
16858 return;
16859 }
16860
16861 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16862 sc->sc_pcixe_capoff + PCIE_LCSR);
16863 reg &= ~mask;
16864 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16865 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16866
16867 /* Print only in wm_attach() */
16868 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16869 aprint_verbose_dev(sc->sc_dev,
16870 "ASPM %s disabled to workaround the errata.\n", str);
16871 }
16872
16873 /* LPLU */
16874
16875 static void
16876 wm_lplu_d0_disable(struct wm_softc *sc)
16877 {
16878 struct mii_data *mii = &sc->sc_mii;
16879 uint32_t reg;
16880 uint16_t phyval;
16881
16882 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16883 device_xname(sc->sc_dev), __func__));
16884
16885 if (sc->sc_phytype == WMPHY_IFE)
16886 return;
16887
16888 switch (sc->sc_type) {
16889 case WM_T_82571:
16890 case WM_T_82572:
16891 case WM_T_82573:
16892 case WM_T_82575:
16893 case WM_T_82576:
16894 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16895 phyval &= ~PMR_D0_LPLU;
16896 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16897 break;
16898 case WM_T_82580:
16899 case WM_T_I350:
16900 case WM_T_I210:
16901 case WM_T_I211:
16902 reg = CSR_READ(sc, WMREG_PHPM);
16903 reg &= ~PHPM_D0A_LPLU;
16904 CSR_WRITE(sc, WMREG_PHPM, reg);
16905 break;
16906 case WM_T_82574:
16907 case WM_T_82583:
16908 case WM_T_ICH8:
16909 case WM_T_ICH9:
16910 case WM_T_ICH10:
16911 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16912 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16913 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16914 CSR_WRITE_FLUSH(sc);
16915 break;
16916 case WM_T_PCH:
16917 case WM_T_PCH2:
16918 case WM_T_PCH_LPT:
16919 case WM_T_PCH_SPT:
16920 case WM_T_PCH_CNP:
16921 case WM_T_PCH_TGP:
16922 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16923 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16924 if (wm_phy_resetisblocked(sc) == false)
16925 phyval |= HV_OEM_BITS_ANEGNOW;
16926 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16927 break;
16928 default:
16929 break;
16930 }
16931 }
16932
16933 /* EEE */
16934
16935 static int
16936 wm_set_eee_i350(struct wm_softc *sc)
16937 {
16938 struct ethercom *ec = &sc->sc_ethercom;
16939 uint32_t ipcnfg, eeer;
16940 uint32_t ipcnfg_mask
16941 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
16942 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
16943
16944 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
16945
16946 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
16947 eeer = CSR_READ(sc, WMREG_EEER);
16948
16949 /* Enable or disable per user setting */
16950 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16951 ipcnfg |= ipcnfg_mask;
16952 eeer |= eeer_mask;
16953 } else {
16954 ipcnfg &= ~ipcnfg_mask;
16955 eeer &= ~eeer_mask;
16956 }
16957
16958 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
16959 CSR_WRITE(sc, WMREG_EEER, eeer);
16960 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
16961 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
16962
16963 return 0;
16964 }
16965
16966 static int
16967 wm_set_eee_pchlan(struct wm_softc *sc)
16968 {
16969 device_t dev = sc->sc_dev;
16970 struct ethercom *ec = &sc->sc_ethercom;
16971 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
16972 int rv;
16973
16974 switch (sc->sc_phytype) {
16975 case WMPHY_82579:
16976 lpa = I82579_EEE_LP_ABILITY;
16977 pcs_status = I82579_EEE_PCS_STATUS;
16978 adv_addr = I82579_EEE_ADVERTISEMENT;
16979 break;
16980 case WMPHY_I217:
16981 lpa = I217_EEE_LP_ABILITY;
16982 pcs_status = I217_EEE_PCS_STATUS;
16983 adv_addr = I217_EEE_ADVERTISEMENT;
16984 break;
16985 default:
16986 return 0;
16987 }
16988
16989 rv = sc->phy.acquire(sc);
16990 if (rv != 0) {
16991 device_printf(dev, "%s: failed to get semaphore\n", __func__);
16992 return rv;
16993 }
16994
16995 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16996 if (rv != 0)
16997 goto release;
16998
16999 /* Clear bits that enable EEE in various speeds */
17000 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
17001
17002 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
17003 /* Save off link partner's EEE ability */
17004 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
17005 if (rv != 0)
17006 goto release;
17007
17008 /* Read EEE advertisement */
17009 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
17010 goto release;
17011
17012 /*
17013 * Enable EEE only for speeds in which the link partner is
17014 * EEE capable and for which we advertise EEE.
17015 */
17016 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
17017 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
17018 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
17019 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
17020 if ((data & ANLPAR_TX_FD) != 0)
17021 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
17022 else {
17023 /*
17024 * EEE is not supported in 100Half, so ignore
17025 * partner's EEE in 100 ability if full-duplex
17026 * is not advertised.
17027 */
17028 sc->eee_lp_ability
17029 &= ~AN_EEEADVERT_100_TX;
17030 }
17031 }
17032 }
17033
17034 if (sc->sc_phytype == WMPHY_82579) {
17035 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
17036 if (rv != 0)
17037 goto release;
17038
17039 data &= ~I82579_LPI_PLL_SHUT_100;
17040 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
17041 }
17042
17043 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
17044 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
17045 goto release;
17046
17047 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
17048 release:
17049 sc->phy.release(sc);
17050
17051 return rv;
17052 }
17053
17054 static int
17055 wm_set_eee(struct wm_softc *sc)
17056 {
17057 struct ethercom *ec = &sc->sc_ethercom;
17058
17059 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
17060 return 0;
17061
17062 if (sc->sc_type == WM_T_I354) {
17063 /* I354 uses an external PHY */
17064 return 0; /* not yet */
17065 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
17066 return wm_set_eee_i350(sc);
17067 else if (sc->sc_type >= WM_T_PCH2)
17068 return wm_set_eee_pchlan(sc);
17069
17070 return 0;
17071 }
17072
17073 /*
17074 * Workarounds (mainly PHY related).
17075 * Basically, PHY's workarounds are in the PHY drivers.
17076 */
17077
17078 /* Workaround for 82566 Kumeran PCS lock loss */
17079 static int
17080 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
17081 {
17082 struct mii_data *mii = &sc->sc_mii;
17083 uint32_t status = CSR_READ(sc, WMREG_STATUS);
17084 int i, reg, rv;
17085 uint16_t phyreg;
17086
17087 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17088 device_xname(sc->sc_dev), __func__));
17089
17090 /* If the link is not up, do nothing */
17091 if ((status & STATUS_LU) == 0)
17092 return 0;
17093
17094 /* Nothing to do if the link is other than 1Gbps */
17095 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
17096 return 0;
17097
17098 for (i = 0; i < 10; i++) {
17099 /* read twice */
17100 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
17101 if (rv != 0)
17102 return rv;
17103 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
17104 if (rv != 0)
17105 return rv;
17106
17107 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
17108 goto out; /* GOOD! */
17109
17110 /* Reset the PHY */
17111 wm_reset_phy(sc);
17112 delay(5*1000);
17113 }
17114
17115 /* Disable GigE link negotiation */
17116 reg = CSR_READ(sc, WMREG_PHY_CTRL);
17117 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
17118 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
17119
17120 /*
17121 * Call gig speed drop workaround on Gig disable before accessing
17122 * any PHY registers.
17123 */
17124 wm_gig_downshift_workaround_ich8lan(sc);
17125
17126 out:
17127 return 0;
17128 }
17129
17130 /*
17131 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
17132 * @sc: pointer to the HW structure
17133 *
17134 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
17135 * LPLU, Gig disable, MDIC PHY reset):
17136 * 1) Set Kumeran Near-end loopback
17137 * 2) Clear Kumeran Near-end loopback
17138 * Should only be called for ICH8[m] devices with any 1G Phy.
17139 */
17140 static void
17141 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
17142 {
17143 uint16_t kmreg;
17144
17145 /* Only for igp3 */
17146 if (sc->sc_phytype == WMPHY_IGP_3) {
17147 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
17148 return;
17149 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
17150 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
17151 return;
17152 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
17153 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
17154 }
17155 }
17156
17157 /*
17158 * Workaround for pch's PHYs
17159 * XXX should be moved to new PHY driver?
17160 */
17161 static int
17162 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
17163 {
17164 device_t dev = sc->sc_dev;
17165 struct mii_data *mii = &sc->sc_mii;
17166 struct mii_softc *child;
17167 uint16_t phy_data, phyrev = 0;
17168 int phytype = sc->sc_phytype;
17169 int rv;
17170
17171 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17172 device_xname(dev), __func__));
17173 KASSERT(sc->sc_type == WM_T_PCH);
17174
17175 /* Set MDIO slow mode before any other MDIO access */
17176 if (phytype == WMPHY_82577)
17177 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
17178 return rv;
17179
17180 child = LIST_FIRST(&mii->mii_phys);
17181 if (child != NULL)
17182 phyrev = child->mii_mpd_rev;
17183
17184 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
17185 if ((child != NULL) &&
17186 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
17187 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
17188 /* Disable generation of early preamble (0x4431) */
17189 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
17190 &phy_data);
17191 if (rv != 0)
17192 return rv;
17193 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
17194 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
17195 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
17196 phy_data);
17197 if (rv != 0)
17198 return rv;
17199
17200 /* Preamble tuning for SSC */
17201 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
17202 if (rv != 0)
17203 return rv;
17204 }
17205
17206 /* 82578 */
17207 if (phytype == WMPHY_82578) {
17208 /*
17209 * Return registers to default by doing a soft reset then
17210 * writing 0x3140 to the control register
17211 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
17212 */
17213 if ((child != NULL) && (phyrev < 2)) {
17214 PHY_RESET(child);
17215 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
17216 if (rv != 0)
17217 return rv;
17218 }
17219 }
17220
17221 /* Select page 0 */
17222 if ((rv = sc->phy.acquire(sc)) != 0)
17223 return rv;
17224 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
17225 sc->phy.release(sc);
17226 if (rv != 0)
17227 return rv;
17228
17229 /*
17230 * Configure the K1 Si workaround during phy reset assuming there is
17231 * link so that it disables K1 if link is in 1Gbps.
17232 */
17233 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
17234 return rv;
17235
17236 /* Workaround for link disconnects on a busy hub in half duplex */
17237 rv = sc->phy.acquire(sc);
17238 if (rv)
17239 return rv;
17240 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
17241 if (rv)
17242 goto release;
17243 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
17244 phy_data & 0x00ff);
17245 if (rv)
17246 goto release;
17247
17248 /* Set MSE higher to enable link to stay up when noise is high */
17249 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
17250 release:
17251 sc->phy.release(sc);
17252
17253 return rv;
17254 }
17255
17256 /*
17257 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
17258 * @sc: pointer to the HW structure
17259 */
17260 static void
17261 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
17262 {
17263
17264 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17265 device_xname(sc->sc_dev), __func__));
17266
17267 if (sc->phy.acquire(sc) != 0)
17268 return;
17269
17270 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17271
17272 sc->phy.release(sc);
17273 }
17274
17275 static void
17276 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
17277 {
17278 device_t dev = sc->sc_dev;
17279 uint32_t mac_reg;
17280 uint16_t i, wuce;
17281 int count;
17282
17283 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17284 device_xname(dev), __func__));
17285
17286 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
17287 return;
17288
17289 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
17290 count = wm_rar_count(sc);
17291 for (i = 0; i < count; i++) {
17292 uint16_t lo, hi;
17293 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17294 lo = (uint16_t)(mac_reg & 0xffff);
17295 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
17296 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
17297 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
17298
17299 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17300 lo = (uint16_t)(mac_reg & 0xffff);
17301 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
17302 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
17303 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
17304 }
17305
17306 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
17307 }
17308
17309 /*
17310 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
17311 * with 82579 PHY
17312 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
17313 */
17314 static int
17315 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
17316 {
17317 device_t dev = sc->sc_dev;
17318 int rar_count;
17319 int rv;
17320 uint32_t mac_reg;
17321 uint16_t dft_ctrl, data;
17322 uint16_t i;
17323
17324 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17325 device_xname(dev), __func__));
17326
17327 if (sc->sc_type < WM_T_PCH2)
17328 return 0;
17329
17330 /* Acquire PHY semaphore */
17331 rv = sc->phy.acquire(sc);
17332 if (rv != 0)
17333 return rv;
17334
17335 /* Disable Rx path while enabling/disabling workaround */
17336 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
17337 if (rv != 0)
17338 goto out;
17339 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17340 dft_ctrl | (1 << 14));
17341 if (rv != 0)
17342 goto out;
17343
17344 if (enable) {
17345 /* Write Rx addresses (rar_entry_count for RAL/H, and
17346 * SHRAL/H) and initial CRC values to the MAC
17347 */
17348 rar_count = wm_rar_count(sc);
17349 for (i = 0; i < rar_count; i++) {
17350 uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
17351 uint32_t addr_high, addr_low;
17352
17353 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17354 if (!(addr_high & RAL_AV))
17355 continue;
17356 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17357 mac_addr[0] = (addr_low & 0xFF);
17358 mac_addr[1] = ((addr_low >> 8) & 0xFF);
17359 mac_addr[2] = ((addr_low >> 16) & 0xFF);
17360 mac_addr[3] = ((addr_low >> 24) & 0xFF);
17361 mac_addr[4] = (addr_high & 0xFF);
17362 mac_addr[5] = ((addr_high >> 8) & 0xFF);
17363
17364 CSR_WRITE(sc, WMREG_PCH_RAICC(i),
17365 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
17366 }
17367
17368 /* Write Rx addresses to the PHY */
17369 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17370 }
17371
17372 /*
17373 * If enable ==
17374 * true: Enable jumbo frame workaround in the MAC.
17375 * false: Write MAC register values back to h/w defaults.
17376 */
17377 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
17378 if (enable) {
17379 mac_reg &= ~(1 << 14);
17380 mac_reg |= (7 << 15);
17381 } else
17382 mac_reg &= ~(0xf << 14);
17383 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
17384
17385 mac_reg = CSR_READ(sc, WMREG_RCTL);
17386 if (enable) {
17387 mac_reg |= RCTL_SECRC;
17388 sc->sc_rctl |= RCTL_SECRC;
17389 sc->sc_flags |= WM_F_CRC_STRIP;
17390 } else {
17391 mac_reg &= ~RCTL_SECRC;
17392 sc->sc_rctl &= ~RCTL_SECRC;
17393 sc->sc_flags &= ~WM_F_CRC_STRIP;
17394 }
17395 CSR_WRITE(sc, WMREG_RCTL, mac_reg);
17396
17397 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
17398 if (rv != 0)
17399 goto out;
17400 if (enable)
17401 data |= 1 << 0;
17402 else
17403 data &= ~(1 << 0);
17404 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
17405 if (rv != 0)
17406 goto out;
17407
17408 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
17409 if (rv != 0)
17410 goto out;
17411 /*
17412 * XXX FreeBSD and Linux do the same thing that they set the same value
17413 * on both the enable case and the disable case. Is it correct?
17414 */
17415 data &= ~(0xf << 8);
17416 data |= (0xb << 8);
17417 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
17418 if (rv != 0)
17419 goto out;
17420
17421 /*
17422 * If enable ==
17423 * true: Enable jumbo frame workaround in the PHY.
17424 * false: Write PHY register values back to h/w defaults.
17425 */
17426 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
17427 if (rv != 0)
17428 goto out;
17429 data &= ~(0x7F << 5);
17430 if (enable)
17431 data |= (0x37 << 5);
17432 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
17433 if (rv != 0)
17434 goto out;
17435
17436 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
17437 if (rv != 0)
17438 goto out;
17439 if (enable)
17440 data &= ~(1 << 13);
17441 else
17442 data |= (1 << 13);
17443 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
17444 if (rv != 0)
17445 goto out;
17446
17447 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
17448 if (rv != 0)
17449 goto out;
17450 data &= ~(0x3FF << 2);
17451 if (enable)
17452 data |= (I82579_TX_PTR_GAP << 2);
17453 else
17454 data |= (0x8 << 2);
17455 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
17456 if (rv != 0)
17457 goto out;
17458
17459 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
17460 enable ? 0xf100 : 0x7e00);
17461 if (rv != 0)
17462 goto out;
17463
17464 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
17465 if (rv != 0)
17466 goto out;
17467 if (enable)
17468 data |= 1 << 10;
17469 else
17470 data &= ~(1 << 10);
17471 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
17472 if (rv != 0)
17473 goto out;
17474
17475 /* Re-enable Rx path after enabling/disabling workaround */
17476 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17477 dft_ctrl & ~(1 << 14));
17478
17479 out:
17480 sc->phy.release(sc);
17481
17482 return rv;
17483 }
17484
17485 /*
17486 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
17487 * done after every PHY reset.
17488 */
17489 static int
17490 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
17491 {
17492 device_t dev = sc->sc_dev;
17493 int rv;
17494
17495 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17496 device_xname(dev), __func__));
17497 KASSERT(sc->sc_type == WM_T_PCH2);
17498
17499 /* Set MDIO slow mode before any other MDIO access */
17500 rv = wm_set_mdio_slow_mode_hv(sc);
17501 if (rv != 0)
17502 return rv;
17503
17504 rv = sc->phy.acquire(sc);
17505 if (rv != 0)
17506 return rv;
17507 /* Set MSE higher to enable link to stay up when noise is high */
17508 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
17509 if (rv != 0)
17510 goto release;
17511 /* Drop link after 5 times MSE threshold was reached */
17512 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
17513 release:
17514 sc->phy.release(sc);
17515
17516 return rv;
17517 }
17518
17519 /**
17520 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
17521 * @link: link up bool flag
17522 *
17523 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
17524 * preventing further DMA write requests. Workaround the issue by disabling
17525 * the de-assertion of the clock request when in 1Gpbs mode.
17526 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
17527 * speeds in order to avoid Tx hangs.
17528 **/
17529 static int
17530 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
17531 {
17532 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
17533 uint32_t status = CSR_READ(sc, WMREG_STATUS);
17534 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
17535 uint16_t phyreg;
17536
17537 if (link && (speed == STATUS_SPEED_1000)) {
17538 int rv;
17539
17540 rv = sc->phy.acquire(sc);
17541 if (rv != 0)
17542 return rv;
17543 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17544 &phyreg);
17545 if (rv != 0)
17546 goto release;
17547 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17548 phyreg & ~KUMCTRLSTA_K1_ENABLE);
17549 if (rv != 0)
17550 goto release;
17551 delay(20);
17552 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17553
17554 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17555 &phyreg);
17556 release:
17557 sc->phy.release(sc);
17558 return rv;
17559 }
17560
17561 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17562
17563 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17564 if (((child != NULL) && (child->mii_mpd_rev > 5))
17565 || !link
17566 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17567 goto update_fextnvm6;
17568
17569 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17570
17571 /* Clear link status transmit timeout */
17572 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17573 if (speed == STATUS_SPEED_100) {
17574 /* Set inband Tx timeout to 5x10us for 100Half */
17575 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17576
17577 /* Do not extend the K1 entry latency for 100Half */
17578 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17579 } else {
17580 /* Set inband Tx timeout to 50x10us for 10Full/Half */
17581 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17582
17583 /* Extend the K1 entry latency for 10 Mbps */
17584 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17585 }
17586
17587 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17588
17589 update_fextnvm6:
17590 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17591 return 0;
17592 }
17593
17594 /*
17595 * wm_k1_gig_workaround_hv - K1 Si workaround
17596 * @sc: pointer to the HW structure
17597 * @link: link up bool flag
17598 *
17599 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17600 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
17601 * If link is down, the function will restore the default K1 setting located
17602 * in the NVM.
17603 */
17604 static int
17605 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17606 {
17607 int k1_enable = sc->sc_nvm_k1_enabled;
17608 int rv;
17609
17610 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17611 device_xname(sc->sc_dev), __func__));
17612
17613 rv = sc->phy.acquire(sc);
17614 if (rv != 0)
17615 return rv;
17616
17617 if (link) {
17618 k1_enable = 0;
17619
17620 /* Link stall fix for link up */
17621 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17622 0x0100);
17623 } else {
17624 /* Link stall fix for link down */
17625 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17626 0x4100);
17627 }
17628
17629 wm_configure_k1_ich8lan(sc, k1_enable);
17630 sc->phy.release(sc);
17631
17632 return 0;
17633 }
17634
17635 /*
17636 * wm_k1_workaround_lv - K1 Si workaround
17637 * @sc: pointer to the HW structure
17638 *
17639 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17640 * Disable K1 for 1000 and 100 speeds
17641 */
17642 static int
17643 wm_k1_workaround_lv(struct wm_softc *sc)
17644 {
17645 uint32_t reg;
17646 uint16_t phyreg;
17647 int rv;
17648
17649 if (sc->sc_type != WM_T_PCH2)
17650 return 0;
17651
17652 /* Set K1 beacon duration based on 10Mbps speed */
17653 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17654 if (rv != 0)
17655 return rv;
17656
17657 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17658 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17659 if (phyreg &
17660 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17661 /* LV 1G/100 Packet drop issue wa */
17662 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17663 &phyreg);
17664 if (rv != 0)
17665 return rv;
17666 phyreg &= ~HV_PM_CTRL_K1_ENA;
17667 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17668 phyreg);
17669 if (rv != 0)
17670 return rv;
17671 } else {
17672 /* For 10Mbps */
17673 reg = CSR_READ(sc, WMREG_FEXTNVM4);
17674 reg &= ~FEXTNVM4_BEACON_DURATION;
17675 reg |= FEXTNVM4_BEACON_DURATION_16US;
17676 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17677 }
17678 }
17679
17680 return 0;
17681 }
17682
17683 /*
17684 * wm_link_stall_workaround_hv - Si workaround
17685 * @sc: pointer to the HW structure
17686 *
17687 * This function works around a Si bug where the link partner can get
17688 * a link up indication before the PHY does. If small packets are sent
17689 * by the link partner they can be placed in the packet buffer without
17690 * being properly accounted for by the PHY and will stall preventing
17691 * further packets from being received. The workaround is to clear the
17692 * packet buffer after the PHY detects link up.
17693 */
17694 static int
17695 wm_link_stall_workaround_hv(struct wm_softc *sc)
17696 {
17697 uint16_t phyreg;
17698
17699 if (sc->sc_phytype != WMPHY_82578)
17700 return 0;
17701
17702 /* Do not apply workaround if in PHY loopback bit 14 set */
17703 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17704 if ((phyreg & BMCR_LOOP) != 0)
17705 return 0;
17706
17707 /* Check if link is up and at 1Gbps */
17708 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17709 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17710 | BM_CS_STATUS_SPEED_MASK;
17711 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17712 | BM_CS_STATUS_SPEED_1000))
17713 return 0;
17714
17715 delay(200 * 1000); /* XXX too big */
17716
17717 /* Flush the packets in the fifo buffer */
17718 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17719 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17720 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17721 HV_MUX_DATA_CTRL_GEN_TO_MAC);
17722
17723 return 0;
17724 }
17725
17726 static int
17727 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17728 {
17729 int rv;
17730
17731 rv = sc->phy.acquire(sc);
17732 if (rv != 0) {
17733 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17734 __func__);
17735 return rv;
17736 }
17737
17738 rv = wm_set_mdio_slow_mode_hv_locked(sc);
17739
17740 sc->phy.release(sc);
17741
17742 return rv;
17743 }
17744
17745 static int
17746 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17747 {
17748 int rv;
17749 uint16_t reg;
17750
17751 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
17752 if (rv != 0)
17753 return rv;
17754
17755 return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17756 reg | HV_KMRN_MDIO_SLOW);
17757 }
17758
17759 /*
17760 * wm_configure_k1_ich8lan - Configure K1 power state
17761 * @sc: pointer to the HW structure
17762 * @enable: K1 state to configure
17763 *
17764 * Configure the K1 power state based on the provided parameter.
17765 * Assumes semaphore already acquired.
17766 */
17767 static void
17768 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17769 {
17770 uint32_t ctrl, ctrl_ext, tmp;
17771 uint16_t kmreg;
17772 int rv;
17773
17774 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17775
17776 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17777 if (rv != 0)
17778 return;
17779
17780 if (k1_enable)
17781 kmreg |= KUMCTRLSTA_K1_ENABLE;
17782 else
17783 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17784
17785 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17786 if (rv != 0)
17787 return;
17788
17789 delay(20);
17790
17791 ctrl = CSR_READ(sc, WMREG_CTRL);
17792 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17793
17794 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17795 tmp |= CTRL_FRCSPD;
17796
17797 CSR_WRITE(sc, WMREG_CTRL, tmp);
17798 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17799 CSR_WRITE_FLUSH(sc);
17800 delay(20);
17801
17802 CSR_WRITE(sc, WMREG_CTRL, ctrl);
17803 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17804 CSR_WRITE_FLUSH(sc);
17805 delay(20);
17806
17807 return;
17808 }
17809
17810 /* special case - for 82575 - need to do manual init ... */
17811 static void
17812 wm_reset_init_script_82575(struct wm_softc *sc)
17813 {
17814 /*
17815 * Remark: this is untested code - we have no board without EEPROM
17816 * same setup as mentioned int the FreeBSD driver for the i82575
17817 */
17818
17819 /* SerDes configuration via SERDESCTRL */
17820 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17821 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17822 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17823 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17824
17825 /* CCM configuration via CCMCTL register */
17826 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17827 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17828
17829 /* PCIe lanes configuration */
17830 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17831 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17832 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17833 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17834
17835 /* PCIe PLL Configuration */
17836 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17837 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17838 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17839 }
17840
17841 static void
17842 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17843 {
17844 uint32_t reg;
17845 uint16_t nvmword;
17846 int rv;
17847
17848 if (sc->sc_type != WM_T_82580)
17849 return;
17850 if ((sc->sc_flags & WM_F_SGMII) == 0)
17851 return;
17852
17853 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17854 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17855 if (rv != 0) {
17856 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17857 __func__);
17858 return;
17859 }
17860
17861 reg = CSR_READ(sc, WMREG_MDICNFG);
17862 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17863 reg |= MDICNFG_DEST;
17864 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17865 reg |= MDICNFG_COM_MDIO;
17866 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17867 }
17868
17869 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
17870
17871 static bool
17872 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17873 {
17874 uint32_t reg;
17875 uint16_t id1, id2;
17876 int i, rv;
17877
17878 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17879 device_xname(sc->sc_dev), __func__));
17880 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17881
17882 id1 = id2 = 0xffff;
17883 for (i = 0; i < 2; i++) {
17884 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17885 &id1);
17886 if ((rv != 0) || MII_INVALIDID(id1))
17887 continue;
17888 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17889 &id2);
17890 if ((rv != 0) || MII_INVALIDID(id2))
17891 continue;
17892 break;
17893 }
17894 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17895 goto out;
17896
17897 /*
17898 * In case the PHY needs to be in mdio slow mode,
17899 * set slow mode and try to get the PHY id again.
17900 */
17901 rv = 0;
17902 if (sc->sc_type < WM_T_PCH_LPT) {
17903 wm_set_mdio_slow_mode_hv_locked(sc);
17904 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17905 &id1);
17906 rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17907 &id2);
17908 }
17909 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17910 device_printf(sc->sc_dev, "XXX return with false\n");
17911 return false;
17912 }
17913 out:
17914 if (sc->sc_type >= WM_T_PCH_LPT) {
17915 /* Only unforce SMBus if ME is not active */
17916 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17917 uint16_t phyreg;
17918
17919 /* Unforce SMBus mode in PHY */
17920 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17921 CV_SMB_CTRL, &phyreg);
17922 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17923 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17924 CV_SMB_CTRL, phyreg);
17925
17926 /* Unforce SMBus mode in MAC */
17927 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17928 reg &= ~CTRL_EXT_FORCE_SMBUS;
17929 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17930 }
17931 }
17932 return true;
17933 }
17934
17935 static void
17936 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17937 {
17938 uint32_t reg;
17939 int i;
17940
17941 /* Set PHY Config Counter to 50msec */
17942 reg = CSR_READ(sc, WMREG_FEXTNVM3);
17943 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
17944 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
17945 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
17946
17947 /* Toggle LANPHYPC */
17948 reg = CSR_READ(sc, WMREG_CTRL);
17949 reg |= CTRL_LANPHYPC_OVERRIDE;
17950 reg &= ~CTRL_LANPHYPC_VALUE;
17951 CSR_WRITE(sc, WMREG_CTRL, reg);
17952 CSR_WRITE_FLUSH(sc);
17953 delay(1000);
17954 reg &= ~CTRL_LANPHYPC_OVERRIDE;
17955 CSR_WRITE(sc, WMREG_CTRL, reg);
17956 CSR_WRITE_FLUSH(sc);
17957
17958 if (sc->sc_type < WM_T_PCH_LPT)
17959 delay(50 * 1000);
17960 else {
17961 i = 20;
17962
17963 do {
17964 delay(5 * 1000);
17965 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
17966 && i--);
17967
17968 delay(30 * 1000);
17969 }
17970 }
17971
17972 static int
17973 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
17974 {
17975 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
17976 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
17977 uint32_t rxa;
17978 uint16_t scale = 0, lat_enc = 0;
17979 int32_t obff_hwm = 0;
17980 int64_t lat_ns, value;
17981
17982 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17983 device_xname(sc->sc_dev), __func__));
17984
17985 if (link) {
17986 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
17987 uint32_t status;
17988 uint16_t speed;
17989 pcireg_t preg;
17990
17991 status = CSR_READ(sc, WMREG_STATUS);
17992 switch (__SHIFTOUT(status, STATUS_SPEED)) {
17993 case STATUS_SPEED_10:
17994 speed = 10;
17995 break;
17996 case STATUS_SPEED_100:
17997 speed = 100;
17998 break;
17999 case STATUS_SPEED_1000:
18000 speed = 1000;
18001 break;
18002 default:
18003 device_printf(sc->sc_dev, "Unknown speed "
18004 "(status = %08x)\n", status);
18005 return -1;
18006 }
18007
18008 /* Rx Packet Buffer Allocation size (KB) */
18009 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
18010
18011 /*
18012 * Determine the maximum latency tolerated by the device.
18013 *
18014 * Per the PCIe spec, the tolerated latencies are encoded as
18015 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
18016 * a 10-bit value (0-1023) to provide a range from 1 ns to
18017 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
18018 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
18019 */
18020 lat_ns = ((int64_t)rxa * 1024 -
18021 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
18022 + ETHER_HDR_LEN))) * 8 * 1000;
18023 if (lat_ns < 0)
18024 lat_ns = 0;
18025 else
18026 lat_ns /= speed;
18027 value = lat_ns;
18028
18029 while (value > LTRV_VALUE) {
18030 scale ++;
18031 value = howmany(value, __BIT(5));
18032 }
18033 if (scale > LTRV_SCALE_MAX) {
18034 device_printf(sc->sc_dev,
18035 "Invalid LTR latency scale %d\n", scale);
18036 return -1;
18037 }
18038 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
18039
18040 /* Determine the maximum latency tolerated by the platform */
18041 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
18042 WM_PCI_LTR_CAP_LPT);
18043 max_snoop = preg & 0xffff;
18044 max_nosnoop = preg >> 16;
18045
18046 max_ltr_enc = MAX(max_snoop, max_nosnoop);
18047
18048 if (lat_enc > max_ltr_enc) {
18049 lat_enc = max_ltr_enc;
18050 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
18051 * PCI_LTR_SCALETONS(
18052 __SHIFTOUT(lat_enc,
18053 PCI_LTR_MAXSNOOPLAT_SCALE));
18054 }
18055
18056 if (lat_ns) {
18057 lat_ns *= speed * 1000;
18058 lat_ns /= 8;
18059 lat_ns /= 1000000000;
18060 obff_hwm = (int32_t)(rxa - lat_ns);
18061 }
18062 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
18063 device_printf(sc->sc_dev, "Invalid high water mark %d"
18064 "(rxa = %d, lat_ns = %d)\n",
18065 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
18066 return -1;
18067 }
18068 }
18069 /* Snoop and No-Snoop latencies the same */
18070 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
18071 CSR_WRITE(sc, WMREG_LTRV, reg);
18072
18073 /* Set OBFF high water mark */
18074 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
18075 reg |= obff_hwm;
18076 CSR_WRITE(sc, WMREG_SVT, reg);
18077
18078 /* Enable OBFF */
18079 reg = CSR_READ(sc, WMREG_SVCR);
18080 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
18081 CSR_WRITE(sc, WMREG_SVCR, reg);
18082
18083 return 0;
18084 }
18085
18086 /*
18087 * I210 Errata 25 and I211 Errata 10
18088 * Slow System Clock.
18089 *
18090 * Note that this function is called on both FLASH and iNVM case on NetBSD.
18091 */
18092 static int
18093 wm_pll_workaround_i210(struct wm_softc *sc)
18094 {
18095 uint32_t mdicnfg, wuc;
18096 uint32_t reg;
18097 pcireg_t pcireg;
18098 uint32_t pmreg;
18099 uint16_t nvmword, tmp_nvmword;
18100 uint16_t phyval;
18101 bool wa_done = false;
18102 int i, rv = 0;
18103
18104 /* Get Power Management cap offset */
18105 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
18106 &pmreg, NULL) == 0)
18107 return -1;
18108
18109 /* Save WUC and MDICNFG registers */
18110 wuc = CSR_READ(sc, WMREG_WUC);
18111 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
18112
18113 reg = mdicnfg & ~MDICNFG_DEST;
18114 CSR_WRITE(sc, WMREG_MDICNFG, reg);
18115
18116 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
18117 /*
18118 * The default value of the Initialization Control Word 1
18119 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
18120 */
18121 nvmword = INVM_DEFAULT_AL;
18122 }
18123 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
18124
18125 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
18126 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
18127 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
18128
18129 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
18130 rv = 0;
18131 break; /* OK */
18132 } else
18133 rv = -1;
18134
18135 wa_done = true;
18136 /* Directly reset the internal PHY */
18137 reg = CSR_READ(sc, WMREG_CTRL);
18138 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
18139
18140 reg = CSR_READ(sc, WMREG_CTRL_EXT);
18141 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
18142 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
18143
18144 CSR_WRITE(sc, WMREG_WUC, 0);
18145 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
18146 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
18147
18148 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
18149 pmreg + PCI_PMCSR);
18150 pcireg |= PCI_PMCSR_STATE_D3;
18151 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
18152 pmreg + PCI_PMCSR, pcireg);
18153 delay(1000);
18154 pcireg &= ~PCI_PMCSR_STATE_D3;
18155 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
18156 pmreg + PCI_PMCSR, pcireg);
18157
18158 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
18159 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
18160
18161 /* Restore WUC register */
18162 CSR_WRITE(sc, WMREG_WUC, wuc);
18163 }
18164
18165 /* Restore MDICNFG setting */
18166 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
18167 if (wa_done)
18168 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
18169 return rv;
18170 }
18171
18172 static void
18173 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
18174 {
18175 uint32_t reg;
18176
18177 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
18178 device_xname(sc->sc_dev), __func__));
18179 KASSERT((sc->sc_type == WM_T_PCH_SPT)
18180 || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP));
18181
18182 reg = CSR_READ(sc, WMREG_FEXTNVM7);
18183 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
18184 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
18185
18186 reg = CSR_READ(sc, WMREG_FEXTNVM9);
18187 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
18188 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
18189 }
18190
18191 /* Sysctl functions */
18192 static int
18193 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
18194 {
18195 struct sysctlnode node = *rnode;
18196 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18197 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18198 struct wm_softc *sc = txq->txq_sc;
18199 uint32_t reg;
18200
18201 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
18202 node.sysctl_data = ®
18203 return sysctl_lookup(SYSCTLFN_CALL(&node));
18204 }
18205
18206 static int
18207 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
18208 {
18209 struct sysctlnode node = *rnode;
18210 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18211 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18212 struct wm_softc *sc = txq->txq_sc;
18213 uint32_t reg;
18214
18215 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
18216 node.sysctl_data = ®
18217 return sysctl_lookup(SYSCTLFN_CALL(&node));
18218 }
18219
18220 #ifdef WM_DEBUG
18221 static int
18222 wm_sysctl_debug(SYSCTLFN_ARGS)
18223 {
18224 struct sysctlnode node = *rnode;
18225 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
18226 uint32_t dflags;
18227 int error;
18228
18229 dflags = sc->sc_debug;
18230 node.sysctl_data = &dflags;
18231 error = sysctl_lookup(SYSCTLFN_CALL(&node));
18232
18233 if (error || newp == NULL)
18234 return error;
18235
18236 sc->sc_debug = dflags;
18237 device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
18238 device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
18239
18240 return 0;
18241 }
18242 #endif
18243