if_wm.c revision 1.757 1 /* $NetBSD: if_wm.c,v 1.757 2022/08/08 08:55:42 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.757 2022/08/08 08:55:42 msaitoh Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91
92 #include <sys/param.h>
93
94 #include <sys/atomic.h>
95 #include <sys/callout.h>
96 #include <sys/cpu.h>
97 #include <sys/device.h>
98 #include <sys/errno.h>
99 #include <sys/interrupt.h>
100 #include <sys/ioctl.h>
101 #include <sys/kernel.h>
102 #include <sys/kmem.h>
103 #include <sys/mbuf.h>
104 #include <sys/pcq.h>
105 #include <sys/queue.h>
106 #include <sys/rndsource.h>
107 #include <sys/socket.h>
108 #include <sys/sysctl.h>
109 #include <sys/syslog.h>
110 #include <sys/systm.h>
111 #include <sys/workqueue.h>
112
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117
118 #include <net/bpf.h>
119
120 #include <net/rss_config.h>
121
122 #include <netinet/in.h> /* XXX for struct ip */
123 #include <netinet/in_systm.h> /* XXX for struct ip */
124 #include <netinet/ip.h> /* XXX for struct ip */
125 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
126 #include <netinet/tcp.h> /* XXX for struct tcphdr */
127
128 #include <sys/bus.h>
129 #include <sys/intr.h>
130 #include <machine/endian.h>
131
132 #include <dev/mii/mii.h>
133 #include <dev/mii/mdio.h>
134 #include <dev/mii/miivar.h>
135 #include <dev/mii/miidevs.h>
136 #include <dev/mii/mii_bitbang.h>
137 #include <dev/mii/ikphyreg.h>
138 #include <dev/mii/igphyreg.h>
139 #include <dev/mii/igphyvar.h>
140 #include <dev/mii/inbmphyreg.h>
141 #include <dev/mii/ihphyreg.h>
142 #include <dev/mii/makphyreg.h>
143
144 #include <dev/pci/pcireg.h>
145 #include <dev/pci/pcivar.h>
146 #include <dev/pci/pcidevs.h>
147
148 #include <dev/pci/if_wmreg.h>
149 #include <dev/pci/if_wmvar.h>
150
151 #ifdef WM_DEBUG
152 #define WM_DEBUG_LINK __BIT(0)
153 #define WM_DEBUG_TX __BIT(1)
154 #define WM_DEBUG_RX __BIT(2)
155 #define WM_DEBUG_GMII __BIT(3)
156 #define WM_DEBUG_MANAGE __BIT(4)
157 #define WM_DEBUG_NVM __BIT(5)
158 #define WM_DEBUG_INIT __BIT(6)
159 #define WM_DEBUG_LOCK __BIT(7)
160
161 #if 0
162 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
163 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
164 WM_DEBUG_LOCK
165 #endif
166
167 #define DPRINTF(sc, x, y) \
168 do { \
169 if ((sc)->sc_debug & (x)) \
170 printf y; \
171 } while (0)
172 #else
173 #define DPRINTF(sc, x, y) __nothing
174 #endif /* WM_DEBUG */
175
176 #ifdef NET_MPSAFE
177 #define WM_MPSAFE 1
178 #define WM_CALLOUT_FLAGS CALLOUT_MPSAFE
179 #define WM_SOFTINT_FLAGS SOFTINT_MPSAFE
180 #define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
181 #else
182 #define WM_CALLOUT_FLAGS 0
183 #define WM_SOFTINT_FLAGS 0
184 #define WM_WORKQUEUE_FLAGS WQ_PERCPU
185 #endif
186
187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
188
189 /*
190 * This device driver's max interrupt numbers.
191 */
192 #define WM_MAX_NQUEUEINTR 16
193 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
194
195 #ifndef WM_DISABLE_MSI
196 #define WM_DISABLE_MSI 0
197 #endif
198 #ifndef WM_DISABLE_MSIX
199 #define WM_DISABLE_MSIX 0
200 #endif
201
202 int wm_disable_msi = WM_DISABLE_MSI;
203 int wm_disable_msix = WM_DISABLE_MSIX;
204
205 #ifndef WM_WATCHDOG_TIMEOUT
206 #define WM_WATCHDOG_TIMEOUT 5
207 #endif
208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
209
210 /*
211 * Transmit descriptor list size. Due to errata, we can only have
212 * 256 hardware descriptors in the ring on < 82544, but we use 4096
213 * on >= 82544. We tell the upper layers that they can queue a lot
214 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
215 * of them at a time.
216 *
217 * We allow up to 64 DMA segments per packet. Pathological packet
218 * chains containing many small mbufs have been observed in zero-copy
219 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
220 * m_defrag() is called to reduce it.
221 */
222 #define WM_NTXSEGS 64
223 #define WM_IFQUEUELEN 256
224 #define WM_TXQUEUELEN_MAX 64
225 #define WM_TXQUEUELEN_MAX_82547 16
226 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
227 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
228 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
229 #define WM_NTXDESC_82542 256
230 #define WM_NTXDESC_82544 4096
231 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
232 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
233 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
234 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
235 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
236
237 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
238
239 #define WM_TXINTERQSIZE 256
240
241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
242 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
243 #endif
244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
245 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
246 #endif
247
248 /*
249 * Receive descriptor list size. We have one Rx buffer for normal
250 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
251 * packet. We allocate 256 receive descriptors, each with a 2k
252 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
253 */
254 #define WM_NRXDESC 256U
255 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
256 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
257 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
258
259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
260 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
261 #endif
262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
263 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
264 #endif
265
266 typedef union txdescs {
267 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
268 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
269 } txdescs_t;
270
271 typedef union rxdescs {
272 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
273 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
274 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
275 } rxdescs_t;
276
277 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
278 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
279
280 /*
281 * Software state for transmit jobs.
282 */
283 struct wm_txsoft {
284 struct mbuf *txs_mbuf; /* head of our mbuf chain */
285 bus_dmamap_t txs_dmamap; /* our DMA map */
286 int txs_firstdesc; /* first descriptor in packet */
287 int txs_lastdesc; /* last descriptor in packet */
288 int txs_ndesc; /* # of descriptors used */
289 };
290
291 /*
292 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
293 * buffer and a DMA map. For packets which fill more than one buffer, we chain
294 * them together.
295 */
296 struct wm_rxsoft {
297 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
298 bus_dmamap_t rxs_dmamap; /* our DMA map */
299 };
300
301 #define WM_LINKUP_TIMEOUT 50
302
303 static uint16_t swfwphysem[] = {
304 SWFW_PHY0_SM,
305 SWFW_PHY1_SM,
306 SWFW_PHY2_SM,
307 SWFW_PHY3_SM
308 };
309
310 static const uint32_t wm_82580_rxpbs_table[] = {
311 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
312 };
313
314 struct wm_softc;
315
316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
317 #if !defined(WM_EVENT_COUNTERS)
318 #define WM_EVENT_COUNTERS 1
319 #endif
320 #endif
321
322 #ifdef WM_EVENT_COUNTERS
323 #define WM_Q_EVCNT_DEFINE(qname, evname) \
324 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
325 struct evcnt qname##_ev_##evname
326
327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
328 do { \
329 snprintf((q)->qname##_##evname##_evcnt_name, \
330 sizeof((q)->qname##_##evname##_evcnt_name), \
331 "%s%02d%s", #qname, (qnum), #evname); \
332 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
333 (evtype), NULL, (xname), \
334 (q)->qname##_##evname##_evcnt_name); \
335 } while (0)
336
337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
338 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
339
340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
341 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
342
343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
344 evcnt_detach(&(q)->qname##_ev_##evname)
345 #endif /* WM_EVENT_COUNTERS */
346
347 struct wm_txqueue {
348 kmutex_t *txq_lock; /* lock for tx operations */
349
350 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
351
352 /* Software state for the transmit descriptors. */
353 int txq_num; /* must be a power of two */
354 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
355
356 /* TX control data structures. */
357 int txq_ndesc; /* must be a power of two */
358 size_t txq_descsize; /* a tx descriptor size */
359 txdescs_t *txq_descs_u;
360 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
361 bus_dma_segment_t txq_desc_seg; /* control data segment */
362 int txq_desc_rseg; /* real number of control segment */
363 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
364 #define txq_descs txq_descs_u->sctxu_txdescs
365 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
366
367 bus_addr_t txq_tdt_reg; /* offset of TDT register */
368
369 int txq_free; /* number of free Tx descriptors */
370 int txq_next; /* next ready Tx descriptor */
371
372 int txq_sfree; /* number of free Tx jobs */
373 int txq_snext; /* next free Tx job */
374 int txq_sdirty; /* dirty Tx jobs */
375
376 /* These 4 variables are used only on the 82547. */
377 int txq_fifo_size; /* Tx FIFO size */
378 int txq_fifo_head; /* current head of FIFO */
379 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
380 int txq_fifo_stall; /* Tx FIFO is stalled */
381
382 /*
383 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
384 * CPUs. This queue intermediate them without block.
385 */
386 pcq_t *txq_interq;
387
388 /*
389 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
390 * to manage Tx H/W queue's busy flag.
391 */
392 int txq_flags; /* flags for H/W queue, see below */
393 #define WM_TXQ_NO_SPACE 0x1
394 #define WM_TXQ_LINKDOWN_DISCARD 0x2
395
396 bool txq_stopping;
397
398 bool txq_sending;
399 time_t txq_lastsent;
400
401 /* Checksum flags used for previous packet */
402 uint32_t txq_last_hw_cmd;
403 uint8_t txq_last_hw_fields;
404 uint16_t txq_last_hw_ipcs;
405 uint16_t txq_last_hw_tucs;
406
407 uint32_t txq_packets; /* for AIM */
408 uint32_t txq_bytes; /* for AIM */
409 #ifdef WM_EVENT_COUNTERS
410 /* TX event counters */
411 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
412 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
413 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
414 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
415 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
416 /* XXX not used? */
417
418 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
419 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
420 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
421 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
422 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
423 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
424 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
425 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
426 /* other than toomanyseg */
427
428 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
429 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
430 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
431 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
432
433 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
434 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
435 #endif /* WM_EVENT_COUNTERS */
436 };
437
438 struct wm_rxqueue {
439 kmutex_t *rxq_lock; /* lock for rx operations */
440
441 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
442
443 /* Software state for the receive descriptors. */
444 struct wm_rxsoft rxq_soft[WM_NRXDESC];
445
446 /* RX control data structures. */
447 int rxq_ndesc; /* must be a power of two */
448 size_t rxq_descsize; /* a rx descriptor size */
449 rxdescs_t *rxq_descs_u;
450 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
451 bus_dma_segment_t rxq_desc_seg; /* control data segment */
452 int rxq_desc_rseg; /* real number of control segment */
453 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
454 #define rxq_descs rxq_descs_u->sctxu_rxdescs
455 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
456 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
457
458 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
459
460 int rxq_ptr; /* next ready Rx desc/queue ent */
461 int rxq_discard;
462 int rxq_len;
463 struct mbuf *rxq_head;
464 struct mbuf *rxq_tail;
465 struct mbuf **rxq_tailp;
466
467 bool rxq_stopping;
468
469 uint32_t rxq_packets; /* for AIM */
470 uint32_t rxq_bytes; /* for AIM */
471 #ifdef WM_EVENT_COUNTERS
472 /* RX event counters */
473 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
474 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
475
476 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
477 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
478 #endif
479 };
480
481 struct wm_queue {
482 int wmq_id; /* index of TX/RX queues */
483 int wmq_intr_idx; /* index of MSI-X tables */
484
485 uint32_t wmq_itr; /* interrupt interval per queue. */
486 bool wmq_set_itr;
487
488 struct wm_txqueue wmq_txq;
489 struct wm_rxqueue wmq_rxq;
490 char sysctlname[32]; /* Name for sysctl */
491
492 bool wmq_txrx_use_workqueue;
493 struct work wmq_cookie;
494 void *wmq_si;
495 };
496
497 struct wm_phyop {
498 int (*acquire)(struct wm_softc *);
499 void (*release)(struct wm_softc *);
500 int (*readreg_locked)(device_t, int, int, uint16_t *);
501 int (*writereg_locked)(device_t, int, int, uint16_t);
502 int reset_delay_us;
503 bool no_errprint;
504 };
505
506 struct wm_nvmop {
507 int (*acquire)(struct wm_softc *);
508 void (*release)(struct wm_softc *);
509 int (*read)(struct wm_softc *, int, int, uint16_t *);
510 };
511
512 /*
513 * Software state per device.
514 */
515 struct wm_softc {
516 device_t sc_dev; /* generic device information */
517 bus_space_tag_t sc_st; /* bus space tag */
518 bus_space_handle_t sc_sh; /* bus space handle */
519 bus_size_t sc_ss; /* bus space size */
520 bus_space_tag_t sc_iot; /* I/O space tag */
521 bus_space_handle_t sc_ioh; /* I/O space handle */
522 bus_size_t sc_ios; /* I/O space size */
523 bus_space_tag_t sc_flasht; /* flash registers space tag */
524 bus_space_handle_t sc_flashh; /* flash registers space handle */
525 bus_size_t sc_flashs; /* flash registers space size */
526 off_t sc_flashreg_offset; /*
527 * offset to flash registers from
528 * start of BAR
529 */
530 bus_dma_tag_t sc_dmat; /* bus DMA tag */
531
532 struct ethercom sc_ethercom; /* Ethernet common data */
533 struct mii_data sc_mii; /* MII/media information */
534
535 pci_chipset_tag_t sc_pc;
536 pcitag_t sc_pcitag;
537 int sc_bus_speed; /* PCI/PCIX bus speed */
538 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
539
540 uint16_t sc_pcidevid; /* PCI device ID */
541 wm_chip_type sc_type; /* MAC type */
542 int sc_rev; /* MAC revision */
543 wm_phy_type sc_phytype; /* PHY type */
544 uint8_t sc_sfptype; /* SFP type */
545 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
546 #define WM_MEDIATYPE_UNKNOWN 0x00
547 #define WM_MEDIATYPE_FIBER 0x01
548 #define WM_MEDIATYPE_COPPER 0x02
549 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
550 int sc_funcid; /* unit number of the chip (0 to 3) */
551 int sc_flags; /* flags; see below */
552 u_short sc_if_flags; /* last if_flags */
553 int sc_ec_capenable; /* last ec_capenable */
554 int sc_flowflags; /* 802.3x flow control flags */
555 uint16_t eee_lp_ability; /* EEE link partner's ability */
556 int sc_align_tweak;
557
558 void *sc_ihs[WM_MAX_NINTR]; /*
559 * interrupt cookie.
560 * - legacy and msi use sc_ihs[0] only
561 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
562 */
563 pci_intr_handle_t *sc_intrs; /*
564 * legacy and msi use sc_intrs[0] only
565 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
566 */
567 int sc_nintrs; /* number of interrupts */
568
569 int sc_link_intr_idx; /* index of MSI-X tables */
570
571 callout_t sc_tick_ch; /* tick callout */
572 bool sc_core_stopping;
573
574 int sc_nvm_ver_major;
575 int sc_nvm_ver_minor;
576 int sc_nvm_ver_build;
577 int sc_nvm_addrbits; /* NVM address bits */
578 unsigned int sc_nvm_wordsize; /* NVM word size */
579 int sc_ich8_flash_base;
580 int sc_ich8_flash_bank_size;
581 int sc_nvm_k1_enabled;
582
583 int sc_nqueues;
584 struct wm_queue *sc_queue;
585 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
586 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
587 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
588 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
589 struct workqueue *sc_queue_wq;
590 bool sc_txrx_use_workqueue;
591
592 int sc_affinity_offset;
593
594 #ifdef WM_EVENT_COUNTERS
595 /* Event counters. */
596 struct evcnt sc_ev_linkintr; /* Link interrupts */
597
598 /* >= WM_T_82542_2_1 */
599 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
600 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
601 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
602 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
603 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
604
605 struct evcnt sc_ev_crcerrs; /* CRC Error */
606 struct evcnt sc_ev_algnerrc; /* Alignment Error */
607 struct evcnt sc_ev_symerrc; /* Symbol Error */
608 struct evcnt sc_ev_rxerrc; /* Receive Error */
609 struct evcnt sc_ev_mpc; /* Missed Packets */
610 struct evcnt sc_ev_colc; /* Collision */
611 struct evcnt sc_ev_sec; /* Sequence Error */
612 struct evcnt sc_ev_cexterr; /* Carrier Extension Error */
613 struct evcnt sc_ev_rlec; /* Receive Length Error */
614 struct evcnt sc_ev_scc; /* Single Collision */
615 struct evcnt sc_ev_ecol; /* Excessive Collision */
616 struct evcnt sc_ev_mcc; /* Multiple Collision */
617 struct evcnt sc_ev_latecol; /* Late Collision */
618 struct evcnt sc_ev_dc; /* Defer */
619 struct evcnt sc_ev_gprc; /* Good Packets Rx */
620 struct evcnt sc_ev_bprc; /* Broadcast Packets Rx */
621 struct evcnt sc_ev_mprc; /* Multicast Packets Rx */
622 struct evcnt sc_ev_gptc; /* Good Packets Tx */
623 struct evcnt sc_ev_gorc; /* Good Octets Rx */
624 struct evcnt sc_ev_gotc; /* Good Octets Tx */
625 struct evcnt sc_ev_rnbc; /* Rx No Buffers */
626 struct evcnt sc_ev_ruc; /* Rx Undersize */
627 struct evcnt sc_ev_rfc; /* Rx Fragment */
628 struct evcnt sc_ev_roc; /* Rx Oversize */
629 struct evcnt sc_ev_rjc; /* Rx Jabber */
630 struct evcnt sc_ev_tor; /* Total Octets Rx */
631 struct evcnt sc_ev_tot; /* Total Octets Tx */
632 struct evcnt sc_ev_tpr; /* Total Packets Rx */
633 struct evcnt sc_ev_tpt; /* Total Packets Tx */
634 struct evcnt sc_ev_mptc; /* Multicast Packets Tx */
635 struct evcnt sc_ev_bptc; /* Broadcast Packets Tx Count */
636 struct evcnt sc_ev_prc64; /* Packets Rx (64 bytes) */
637 struct evcnt sc_ev_prc127; /* Packets Rx (65-127 bytes) */
638 struct evcnt sc_ev_prc255; /* Packets Rx (128-255 bytes) */
639 struct evcnt sc_ev_prc511; /* Packets Rx (255-511 bytes) */
640 struct evcnt sc_ev_prc1023; /* Packets Rx (512-1023 bytes) */
641 struct evcnt sc_ev_prc1522; /* Packets Rx (1024-1522 bytes) */
642 struct evcnt sc_ev_ptc64; /* Packets Tx (64 bytes) */
643 struct evcnt sc_ev_ptc127; /* Packets Tx (65-127 bytes) */
644 struct evcnt sc_ev_ptc255; /* Packets Tx (128-255 bytes) */
645 struct evcnt sc_ev_ptc511; /* Packets Tx (256-511 bytes) */
646 struct evcnt sc_ev_ptc1023; /* Packets Tx (512-1023 bytes) */
647 struct evcnt sc_ev_ptc1522; /* Packets Tx (1024-1522 Bytes) */
648 struct evcnt sc_ev_iac; /* Interrupt Assertion */
649 struct evcnt sc_ev_icrxptc; /* Intr. Cause Rx Pkt Timer Expire */
650 struct evcnt sc_ev_icrxatc; /* Intr. Cause Rx Abs Timer Expire */
651 struct evcnt sc_ev_ictxptc; /* Intr. Cause Tx Pkt Timer Expire */
652 struct evcnt sc_ev_ictxact; /* Intr. Cause Tx Abs Timer Expire */
653 struct evcnt sc_ev_ictxqec; /* Intr. Cause Tx Queue Empty */
654 struct evcnt sc_ev_ictxqmtc; /* Intr. Cause Tx Queue Min Thresh */
655 struct evcnt sc_ev_icrxdmtc; /* Intr. Cause Rx Desc Min Thresh */
656 struct evcnt sc_ev_icrxoc; /* Intr. Cause Receiver Overrun */
657 struct evcnt sc_ev_tncrs; /* Tx-No CRS */
658 struct evcnt sc_ev_tsctc; /* TCP Segmentation Context Tx */
659 struct evcnt sc_ev_tsctfc; /* TCP Segmentation Context Tx Fail */
660 struct evcnt sc_ev_mgtprc; /* Management Packets RX */
661 struct evcnt sc_ev_mgtpdc; /* Management Packets Dropped */
662 struct evcnt sc_ev_mgtptc; /* Management Packets TX */
663 struct evcnt sc_ev_b2ogprc; /* BMC2OS pkts received by host */
664 struct evcnt sc_ev_o2bspc; /* OS2BMC pkts transmitted by host */
665 struct evcnt sc_ev_b2ospc; /* BMC2OS pkts sent by BMC */
666 struct evcnt sc_ev_o2bgptc; /* OS2BMC pkts received by BMC */
667
668 #endif /* WM_EVENT_COUNTERS */
669
670 struct sysctllog *sc_sysctllog;
671
672 /* This variable are used only on the 82547. */
673 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
674
675 uint32_t sc_ctrl; /* prototype CTRL register */
676 #if 0
677 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
678 #endif
679 uint32_t sc_icr; /* prototype interrupt bits */
680 uint32_t sc_itr_init; /* prototype intr throttling reg */
681 uint32_t sc_tctl; /* prototype TCTL register */
682 uint32_t sc_rctl; /* prototype RCTL register */
683 uint32_t sc_txcw; /* prototype TXCW register */
684 uint32_t sc_tipg; /* prototype TIPG register */
685 uint32_t sc_fcrtl; /* prototype FCRTL register */
686 uint32_t sc_pba; /* prototype PBA register */
687
688 int sc_tbi_linkup; /* TBI link status */
689 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
690 int sc_tbi_serdes_ticks; /* tbi ticks */
691
692 int sc_mchash_type; /* multicast filter offset */
693
694 krndsource_t rnd_source; /* random source */
695
696 struct if_percpuq *sc_ipq; /* softint-based input queues */
697
698 kmutex_t *sc_core_lock; /* lock for softc operations */
699 kmutex_t *sc_ich_phymtx; /*
700 * 82574/82583/ICH/PCH specific PHY
701 * mutex. For 82574/82583, the mutex
702 * is used for both PHY and NVM.
703 */
704 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
705
706 struct wm_phyop phy;
707 struct wm_nvmop nvm;
708 #ifdef WM_DEBUG
709 uint32_t sc_debug;
710 #endif
711 };
712
713 #define WM_CORE_LOCK(_sc) \
714 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
715 #define WM_CORE_UNLOCK(_sc) \
716 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
717 #define WM_CORE_LOCKED(_sc) \
718 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
719
720 #define WM_RXCHAIN_RESET(rxq) \
721 do { \
722 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
723 *(rxq)->rxq_tailp = NULL; \
724 (rxq)->rxq_len = 0; \
725 } while (/*CONSTCOND*/0)
726
727 #define WM_RXCHAIN_LINK(rxq, m) \
728 do { \
729 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
730 (rxq)->rxq_tailp = &(m)->m_next; \
731 } while (/*CONSTCOND*/0)
732
733 #ifdef WM_EVENT_COUNTERS
734 #ifdef __HAVE_ATOMIC64_LOADSTORE
735 #define WM_EVCNT_INCR(ev) \
736 atomic_store_relaxed(&((ev)->ev_count), \
737 atomic_load_relaxed(&(ev)->ev_count) + 1)
738 #define WM_EVCNT_ADD(ev, val) \
739 atomic_store_relaxed(&((ev)->ev_count), \
740 atomic_load_relaxed(&(ev)->ev_count) + (val))
741 #else
742 #define WM_EVCNT_INCR(ev) \
743 ((ev)->ev_count)++
744 #define WM_EVCNT_ADD(ev, val) \
745 (ev)->ev_count += (val)
746 #endif
747
748 #define WM_Q_EVCNT_INCR(qname, evname) \
749 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
750 #define WM_Q_EVCNT_ADD(qname, evname, val) \
751 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
752 #else /* !WM_EVENT_COUNTERS */
753 #define WM_EVCNT_INCR(ev) /* nothing */
754 #define WM_EVCNT_ADD(ev, val) /* nothing */
755
756 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
757 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
758 #endif /* !WM_EVENT_COUNTERS */
759
760 #define CSR_READ(sc, reg) \
761 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
762 #define CSR_WRITE(sc, reg, val) \
763 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
764 #define CSR_WRITE_FLUSH(sc) \
765 (void)CSR_READ((sc), WMREG_STATUS)
766
767 #define ICH8_FLASH_READ32(sc, reg) \
768 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
769 (reg) + sc->sc_flashreg_offset)
770 #define ICH8_FLASH_WRITE32(sc, reg, data) \
771 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
772 (reg) + sc->sc_flashreg_offset, (data))
773
774 #define ICH8_FLASH_READ16(sc, reg) \
775 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
776 (reg) + sc->sc_flashreg_offset)
777 #define ICH8_FLASH_WRITE16(sc, reg, data) \
778 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
779 (reg) + sc->sc_flashreg_offset, (data))
780
781 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
782 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
783
784 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
785 #define WM_CDTXADDR_HI(txq, x) \
786 (sizeof(bus_addr_t) == 8 ? \
787 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
788
789 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
790 #define WM_CDRXADDR_HI(rxq, x) \
791 (sizeof(bus_addr_t) == 8 ? \
792 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
793
794 /*
795 * Register read/write functions.
796 * Other than CSR_{READ|WRITE}().
797 */
798 #if 0
799 static inline uint32_t wm_io_read(struct wm_softc *, int);
800 #endif
801 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
802 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
803 uint32_t, uint32_t);
804 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
805
806 /*
807 * Descriptor sync/init functions.
808 */
809 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
810 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
811 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
812
813 /*
814 * Device driver interface functions and commonly used functions.
815 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
816 */
817 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
818 static int wm_match(device_t, cfdata_t, void *);
819 static void wm_attach(device_t, device_t, void *);
820 static int wm_detach(device_t, int);
821 static bool wm_suspend(device_t, const pmf_qual_t *);
822 static bool wm_resume(device_t, const pmf_qual_t *);
823 static void wm_watchdog(struct ifnet *);
824 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
825 uint16_t *);
826 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
827 uint16_t *);
828 static void wm_tick(void *);
829 static int wm_ifflags_cb(struct ethercom *);
830 static int wm_ioctl(struct ifnet *, u_long, void *);
831 /* MAC address related */
832 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
833 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
834 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
835 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
836 static int wm_rar_count(struct wm_softc *);
837 static void wm_set_filter(struct wm_softc *);
838 /* Reset and init related */
839 static void wm_set_vlan(struct wm_softc *);
840 static void wm_set_pcie_completion_timeout(struct wm_softc *);
841 static void wm_get_auto_rd_done(struct wm_softc *);
842 static void wm_lan_init_done(struct wm_softc *);
843 static void wm_get_cfg_done(struct wm_softc *);
844 static int wm_phy_post_reset(struct wm_softc *);
845 static int wm_write_smbus_addr(struct wm_softc *);
846 static int wm_init_lcd_from_nvm(struct wm_softc *);
847 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
848 static void wm_initialize_hardware_bits(struct wm_softc *);
849 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
850 static int wm_reset_phy(struct wm_softc *);
851 static void wm_flush_desc_rings(struct wm_softc *);
852 static void wm_reset(struct wm_softc *);
853 static int wm_add_rxbuf(struct wm_rxqueue *, int);
854 static void wm_rxdrain(struct wm_rxqueue *);
855 static void wm_init_rss(struct wm_softc *);
856 static void wm_adjust_qnum(struct wm_softc *, int);
857 static inline bool wm_is_using_msix(struct wm_softc *);
858 static inline bool wm_is_using_multiqueue(struct wm_softc *);
859 static int wm_softint_establish_queue(struct wm_softc *, int, int);
860 static int wm_setup_legacy(struct wm_softc *);
861 static int wm_setup_msix(struct wm_softc *);
862 static int wm_init(struct ifnet *);
863 static int wm_init_locked(struct ifnet *);
864 static void wm_init_sysctls(struct wm_softc *);
865 static void wm_unset_stopping_flags(struct wm_softc *);
866 static void wm_set_stopping_flags(struct wm_softc *);
867 static void wm_stop(struct ifnet *, int);
868 static void wm_stop_locked(struct ifnet *, bool, bool);
869 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
870 static void wm_82547_txfifo_stall(void *);
871 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
872 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
873 /* DMA related */
874 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
875 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
876 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
877 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
878 struct wm_txqueue *);
879 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
880 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
881 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
882 struct wm_rxqueue *);
883 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
884 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
885 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
886 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
887 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
888 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
889 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
890 struct wm_txqueue *);
891 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
892 struct wm_rxqueue *);
893 static int wm_alloc_txrx_queues(struct wm_softc *);
894 static void wm_free_txrx_queues(struct wm_softc *);
895 static int wm_init_txrx_queues(struct wm_softc *);
896 /* Start */
897 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
898 struct wm_txsoft *, uint32_t *, uint8_t *);
899 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
900 static void wm_start(struct ifnet *);
901 static void wm_start_locked(struct ifnet *);
902 static int wm_transmit(struct ifnet *, struct mbuf *);
903 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
904 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
905 bool);
906 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
907 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
908 static void wm_nq_start(struct ifnet *);
909 static void wm_nq_start_locked(struct ifnet *);
910 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
911 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
912 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
913 bool);
914 static void wm_deferred_start_locked(struct wm_txqueue *);
915 static void wm_handle_queue(void *);
916 static void wm_handle_queue_work(struct work *, void *);
917 /* Interrupt */
918 static bool wm_txeof(struct wm_txqueue *, u_int);
919 static bool wm_rxeof(struct wm_rxqueue *, u_int);
920 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
921 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
922 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
923 static void wm_linkintr(struct wm_softc *, uint32_t);
924 static int wm_intr_legacy(void *);
925 static inline void wm_txrxintr_disable(struct wm_queue *);
926 static inline void wm_txrxintr_enable(struct wm_queue *);
927 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
928 static int wm_txrxintr_msix(void *);
929 static int wm_linkintr_msix(void *);
930
931 /*
932 * Media related.
933 * GMII, SGMII, TBI, SERDES and SFP.
934 */
935 /* Common */
936 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
937 /* GMII related */
938 static void wm_gmii_reset(struct wm_softc *);
939 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
940 static int wm_get_phy_id_82575(struct wm_softc *);
941 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
942 static int wm_gmii_mediachange(struct ifnet *);
943 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
944 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
945 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
946 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
947 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
948 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
949 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
950 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
951 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
952 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
953 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
954 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
955 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
956 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
957 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
958 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
959 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
960 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
961 bool);
962 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
963 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
964 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
965 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
966 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
967 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
968 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
969 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
970 static void wm_gmii_statchg(struct ifnet *);
971 /*
972 * kumeran related (80003, ICH* and PCH*).
973 * These functions are not for accessing MII registers but for accessing
974 * kumeran specific registers.
975 */
976 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
977 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
978 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
979 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
980 /* EMI register related */
981 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
982 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
983 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
984 /* SGMII */
985 static bool wm_sgmii_uses_mdio(struct wm_softc *);
986 static void wm_sgmii_sfp_preconfig(struct wm_softc *);
987 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
988 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
989 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
990 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
991 /* TBI related */
992 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
993 static void wm_tbi_mediainit(struct wm_softc *);
994 static int wm_tbi_mediachange(struct ifnet *);
995 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
996 static int wm_check_for_link(struct wm_softc *);
997 static void wm_tbi_tick(struct wm_softc *);
998 /* SERDES related */
999 static void wm_serdes_power_up_link_82575(struct wm_softc *);
1000 static int wm_serdes_mediachange(struct ifnet *);
1001 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
1002 static void wm_serdes_tick(struct wm_softc *);
1003 /* SFP related */
1004 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
1005 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
1006
1007 /*
1008 * NVM related.
1009 * Microwire, SPI (w/wo EERD) and Flash.
1010 */
1011 /* Misc functions */
1012 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1013 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1014 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1015 /* Microwire */
1016 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1017 /* SPI */
1018 static int wm_nvm_ready_spi(struct wm_softc *);
1019 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1020 /* Using with EERD */
1021 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
1022 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1023 /* Flash */
1024 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1025 unsigned int *);
1026 static int32_t wm_ich8_cycle_init(struct wm_softc *);
1027 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1028 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1029 uint32_t *);
1030 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1031 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1032 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1033 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1034 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1035 /* iNVM */
1036 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1037 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1038 /* Lock, detecting NVM type, validate checksum and read */
1039 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
1040 static int wm_nvm_flash_presence_i210(struct wm_softc *);
1041 static int wm_nvm_validate_checksum(struct wm_softc *);
1042 static void wm_nvm_version_invm(struct wm_softc *);
1043 static void wm_nvm_version(struct wm_softc *);
1044 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1045
1046 /*
1047 * Hardware semaphores.
1048 * Very complexed...
1049 */
1050 static int wm_get_null(struct wm_softc *);
1051 static void wm_put_null(struct wm_softc *);
1052 static int wm_get_eecd(struct wm_softc *);
1053 static void wm_put_eecd(struct wm_softc *);
1054 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1055 static void wm_put_swsm_semaphore(struct wm_softc *);
1056 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1057 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1058 static int wm_get_nvm_80003(struct wm_softc *);
1059 static void wm_put_nvm_80003(struct wm_softc *);
1060 static int wm_get_nvm_82571(struct wm_softc *);
1061 static void wm_put_nvm_82571(struct wm_softc *);
1062 static int wm_get_phy_82575(struct wm_softc *);
1063 static void wm_put_phy_82575(struct wm_softc *);
1064 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1065 static void wm_put_swfwhw_semaphore(struct wm_softc *);
1066 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1067 static void wm_put_swflag_ich8lan(struct wm_softc *);
1068 static int wm_get_nvm_ich8lan(struct wm_softc *);
1069 static void wm_put_nvm_ich8lan(struct wm_softc *);
1070 static int wm_get_hw_semaphore_82573(struct wm_softc *);
1071 static void wm_put_hw_semaphore_82573(struct wm_softc *);
1072
1073 /*
1074 * Management mode and power management related subroutines.
1075 * BMC, AMT, suspend/resume and EEE.
1076 */
1077 #if 0
1078 static int wm_check_mng_mode(struct wm_softc *);
1079 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1080 static int wm_check_mng_mode_82574(struct wm_softc *);
1081 static int wm_check_mng_mode_generic(struct wm_softc *);
1082 #endif
1083 static int wm_enable_mng_pass_thru(struct wm_softc *);
1084 static bool wm_phy_resetisblocked(struct wm_softc *);
1085 static void wm_get_hw_control(struct wm_softc *);
1086 static void wm_release_hw_control(struct wm_softc *);
1087 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1088 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1089 static void wm_init_manageability(struct wm_softc *);
1090 static void wm_release_manageability(struct wm_softc *);
1091 static void wm_get_wakeup(struct wm_softc *);
1092 static int wm_ulp_disable(struct wm_softc *);
1093 static int wm_enable_phy_wakeup(struct wm_softc *);
1094 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1095 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1096 static int wm_resume_workarounds_pchlan(struct wm_softc *);
1097 static void wm_enable_wakeup(struct wm_softc *);
1098 static void wm_disable_aspm(struct wm_softc *);
1099 /* LPLU (Low Power Link Up) */
1100 static void wm_lplu_d0_disable(struct wm_softc *);
1101 /* EEE */
1102 static int wm_set_eee_i350(struct wm_softc *);
1103 static int wm_set_eee_pchlan(struct wm_softc *);
1104 static int wm_set_eee(struct wm_softc *);
1105
1106 /*
1107 * Workarounds (mainly PHY related).
1108 * Basically, PHY's workarounds are in the PHY drivers.
1109 */
1110 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1111 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1112 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1113 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1114 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1115 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1116 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1117 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1118 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1119 static int wm_k1_workaround_lv(struct wm_softc *);
1120 static int wm_link_stall_workaround_hv(struct wm_softc *);
1121 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1122 static int wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1123 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1124 static void wm_reset_init_script_82575(struct wm_softc *);
1125 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1126 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1127 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1128 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1129 static int wm_pll_workaround_i210(struct wm_softc *);
1130 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1131 static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1132 static void wm_set_linkdown_discard(struct wm_softc *);
1133 static void wm_clear_linkdown_discard(struct wm_softc *);
1134
1135 static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1136 static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1137 #ifdef WM_DEBUG
1138 static int wm_sysctl_debug(SYSCTLFN_PROTO);
1139 #endif
1140
1141 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1142 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1143
1144 /*
1145 * Devices supported by this driver.
1146 */
1147 static const struct wm_product {
1148 pci_vendor_id_t wmp_vendor;
1149 pci_product_id_t wmp_product;
1150 const char *wmp_name;
1151 wm_chip_type wmp_type;
1152 uint32_t wmp_flags;
1153 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1154 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1155 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1156 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1157 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1158 } wm_products[] = {
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1160 "Intel i82542 1000BASE-X Ethernet",
1161 WM_T_82542_2_1, WMP_F_FIBER },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1164 "Intel i82543GC 1000BASE-X Ethernet",
1165 WM_T_82543, WMP_F_FIBER },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1168 "Intel i82543GC 1000BASE-T Ethernet",
1169 WM_T_82543, WMP_F_COPPER },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1172 "Intel i82544EI 1000BASE-T Ethernet",
1173 WM_T_82544, WMP_F_COPPER },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1176 "Intel i82544EI 1000BASE-X Ethernet",
1177 WM_T_82544, WMP_F_FIBER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1180 "Intel i82544GC 1000BASE-T Ethernet",
1181 WM_T_82544, WMP_F_COPPER },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1184 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1185 WM_T_82544, WMP_F_COPPER },
1186
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1188 "Intel i82540EM 1000BASE-T Ethernet",
1189 WM_T_82540, WMP_F_COPPER },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1192 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1193 WM_T_82540, WMP_F_COPPER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1196 "Intel i82540EP 1000BASE-T Ethernet",
1197 WM_T_82540, WMP_F_COPPER },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1200 "Intel i82540EP 1000BASE-T Ethernet",
1201 WM_T_82540, WMP_F_COPPER },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1204 "Intel i82540EP 1000BASE-T Ethernet",
1205 WM_T_82540, WMP_F_COPPER },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1208 "Intel i82545EM 1000BASE-T Ethernet",
1209 WM_T_82545, WMP_F_COPPER },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1212 "Intel i82545GM 1000BASE-T Ethernet",
1213 WM_T_82545_3, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1216 "Intel i82545GM 1000BASE-X Ethernet",
1217 WM_T_82545_3, WMP_F_FIBER },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1220 "Intel i82545GM Gigabit Ethernet (SERDES)",
1221 WM_T_82545_3, WMP_F_SERDES },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1224 "Intel i82546EB 1000BASE-T Ethernet",
1225 WM_T_82546, WMP_F_COPPER },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1228 "Intel i82546EB 1000BASE-T Ethernet",
1229 WM_T_82546, WMP_F_COPPER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1232 "Intel i82545EM 1000BASE-X Ethernet",
1233 WM_T_82545, WMP_F_FIBER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1236 "Intel i82546EB 1000BASE-X Ethernet",
1237 WM_T_82546, WMP_F_FIBER },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1240 "Intel i82546GB 1000BASE-T Ethernet",
1241 WM_T_82546_3, WMP_F_COPPER },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1244 "Intel i82546GB 1000BASE-X Ethernet",
1245 WM_T_82546_3, WMP_F_FIBER },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1248 "Intel i82546GB Gigabit Ethernet (SERDES)",
1249 WM_T_82546_3, WMP_F_SERDES },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1252 "i82546GB quad-port Gigabit Ethernet",
1253 WM_T_82546_3, WMP_F_COPPER },
1254
1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1256 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1257 WM_T_82546_3, WMP_F_COPPER },
1258
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1260 "Intel PRO/1000MT (82546GB)",
1261 WM_T_82546_3, WMP_F_COPPER },
1262
1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1264 "Intel i82541EI 1000BASE-T Ethernet",
1265 WM_T_82541, WMP_F_COPPER },
1266
1267 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1268 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1269 WM_T_82541, WMP_F_COPPER },
1270
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1272 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1273 WM_T_82541, WMP_F_COPPER },
1274
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1276 "Intel i82541ER 1000BASE-T Ethernet",
1277 WM_T_82541_2, WMP_F_COPPER },
1278
1279 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1280 "Intel i82541GI 1000BASE-T Ethernet",
1281 WM_T_82541_2, WMP_F_COPPER },
1282
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1284 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1285 WM_T_82541_2, WMP_F_COPPER },
1286
1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1288 "Intel i82541PI 1000BASE-T Ethernet",
1289 WM_T_82541_2, WMP_F_COPPER },
1290
1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1292 "Intel i82547EI 1000BASE-T Ethernet",
1293 WM_T_82547, WMP_F_COPPER },
1294
1295 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1296 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1297 WM_T_82547, WMP_F_COPPER },
1298
1299 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1300 "Intel i82547GI 1000BASE-T Ethernet",
1301 WM_T_82547_2, WMP_F_COPPER },
1302
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1304 "Intel PRO/1000 PT (82571EB)",
1305 WM_T_82571, WMP_F_COPPER },
1306
1307 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1308 "Intel PRO/1000 PF (82571EB)",
1309 WM_T_82571, WMP_F_FIBER },
1310
1311 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1312 "Intel PRO/1000 PB (82571EB)",
1313 WM_T_82571, WMP_F_SERDES },
1314
1315 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1316 "Intel PRO/1000 QT (82571EB)",
1317 WM_T_82571, WMP_F_COPPER },
1318
1319 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1320 "Intel PRO/1000 PT Quad Port Server Adapter",
1321 WM_T_82571, WMP_F_COPPER },
1322
1323 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1324 "Intel Gigabit PT Quad Port Server ExpressModule",
1325 WM_T_82571, WMP_F_COPPER },
1326
1327 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1328 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1329 WM_T_82571, WMP_F_SERDES },
1330
1331 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1332 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1333 WM_T_82571, WMP_F_SERDES },
1334
1335 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1336 "Intel 82571EB Quad 1000baseX Ethernet",
1337 WM_T_82571, WMP_F_FIBER },
1338
1339 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1340 "Intel i82572EI 1000baseT Ethernet",
1341 WM_T_82572, WMP_F_COPPER },
1342
1343 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1344 "Intel i82572EI 1000baseX Ethernet",
1345 WM_T_82572, WMP_F_FIBER },
1346
1347 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1348 "Intel i82572EI Gigabit Ethernet (SERDES)",
1349 WM_T_82572, WMP_F_SERDES },
1350
1351 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1352 "Intel i82572EI 1000baseT Ethernet",
1353 WM_T_82572, WMP_F_COPPER },
1354
1355 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1356 "Intel i82573E",
1357 WM_T_82573, WMP_F_COPPER },
1358
1359 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1360 "Intel i82573E IAMT",
1361 WM_T_82573, WMP_F_COPPER },
1362
1363 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1364 "Intel i82573L Gigabit Ethernet",
1365 WM_T_82573, WMP_F_COPPER },
1366
1367 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1368 "Intel i82574L",
1369 WM_T_82574, WMP_F_COPPER },
1370
1371 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1372 "Intel i82574L",
1373 WM_T_82574, WMP_F_COPPER },
1374
1375 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1376 "Intel i82583V",
1377 WM_T_82583, WMP_F_COPPER },
1378
1379 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1380 "i80003 dual 1000baseT Ethernet",
1381 WM_T_80003, WMP_F_COPPER },
1382
1383 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1384 "i80003 dual 1000baseX Ethernet",
1385 WM_T_80003, WMP_F_COPPER },
1386
1387 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1388 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1389 WM_T_80003, WMP_F_SERDES },
1390
1391 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1392 "Intel i80003 1000baseT Ethernet",
1393 WM_T_80003, WMP_F_COPPER },
1394
1395 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1396 "Intel i80003 Gigabit Ethernet (SERDES)",
1397 WM_T_80003, WMP_F_SERDES },
1398
1399 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1400 "Intel i82801H (M_AMT) LAN Controller",
1401 WM_T_ICH8, WMP_F_COPPER },
1402 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1403 "Intel i82801H (AMT) LAN Controller",
1404 WM_T_ICH8, WMP_F_COPPER },
1405 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1406 "Intel i82801H LAN Controller",
1407 WM_T_ICH8, WMP_F_COPPER },
1408 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1409 "Intel i82801H (IFE) 10/100 LAN Controller",
1410 WM_T_ICH8, WMP_F_COPPER },
1411 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1412 "Intel i82801H (M) LAN Controller",
1413 WM_T_ICH8, WMP_F_COPPER },
1414 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1415 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1416 WM_T_ICH8, WMP_F_COPPER },
1417 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1418 "Intel i82801H IFE (G) 10/100 LAN Controller",
1419 WM_T_ICH8, WMP_F_COPPER },
1420 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1421 "82567V-3 LAN Controller",
1422 WM_T_ICH8, WMP_F_COPPER },
1423 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1424 "82801I (AMT) LAN Controller",
1425 WM_T_ICH9, WMP_F_COPPER },
1426 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1427 "82801I 10/100 LAN Controller",
1428 WM_T_ICH9, WMP_F_COPPER },
1429 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1430 "82801I (G) 10/100 LAN Controller",
1431 WM_T_ICH9, WMP_F_COPPER },
1432 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1433 "82801I (GT) 10/100 LAN Controller",
1434 WM_T_ICH9, WMP_F_COPPER },
1435 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1436 "82801I (C) LAN Controller",
1437 WM_T_ICH9, WMP_F_COPPER },
1438 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1439 "82801I mobile LAN Controller",
1440 WM_T_ICH9, WMP_F_COPPER },
1441 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1442 "82801I mobile (V) LAN Controller",
1443 WM_T_ICH9, WMP_F_COPPER },
1444 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1445 "82801I mobile (AMT) LAN Controller",
1446 WM_T_ICH9, WMP_F_COPPER },
1447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1448 "82567LM-4 LAN Controller",
1449 WM_T_ICH9, WMP_F_COPPER },
1450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1451 "82567LM-2 LAN Controller",
1452 WM_T_ICH10, WMP_F_COPPER },
1453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1454 "82567LF-2 LAN Controller",
1455 WM_T_ICH10, WMP_F_COPPER },
1456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1457 "82567LM-3 LAN Controller",
1458 WM_T_ICH10, WMP_F_COPPER },
1459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1460 "82567LF-3 LAN Controller",
1461 WM_T_ICH10, WMP_F_COPPER },
1462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1463 "82567V-2 LAN Controller",
1464 WM_T_ICH10, WMP_F_COPPER },
1465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1466 "82567V-3? LAN Controller",
1467 WM_T_ICH10, WMP_F_COPPER },
1468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1469 "HANKSVILLE LAN Controller",
1470 WM_T_ICH10, WMP_F_COPPER },
1471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1472 "PCH LAN (82577LM) Controller",
1473 WM_T_PCH, WMP_F_COPPER },
1474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1475 "PCH LAN (82577LC) Controller",
1476 WM_T_PCH, WMP_F_COPPER },
1477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1478 "PCH LAN (82578DM) Controller",
1479 WM_T_PCH, WMP_F_COPPER },
1480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1481 "PCH LAN (82578DC) Controller",
1482 WM_T_PCH, WMP_F_COPPER },
1483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1484 "PCH2 LAN (82579LM) Controller",
1485 WM_T_PCH2, WMP_F_COPPER },
1486 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1487 "PCH2 LAN (82579V) Controller",
1488 WM_T_PCH2, WMP_F_COPPER },
1489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1490 "82575EB dual-1000baseT Ethernet",
1491 WM_T_82575, WMP_F_COPPER },
1492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1493 "82575EB dual-1000baseX Ethernet (SERDES)",
1494 WM_T_82575, WMP_F_SERDES },
1495 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1496 "82575GB quad-1000baseT Ethernet",
1497 WM_T_82575, WMP_F_COPPER },
1498 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1499 "82575GB quad-1000baseT Ethernet (PM)",
1500 WM_T_82575, WMP_F_COPPER },
1501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1502 "82576 1000BaseT Ethernet",
1503 WM_T_82576, WMP_F_COPPER },
1504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1505 "82576 1000BaseX Ethernet",
1506 WM_T_82576, WMP_F_FIBER },
1507
1508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1509 "82576 gigabit Ethernet (SERDES)",
1510 WM_T_82576, WMP_F_SERDES },
1511
1512 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1513 "82576 quad-1000BaseT Ethernet",
1514 WM_T_82576, WMP_F_COPPER },
1515
1516 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1517 "82576 Gigabit ET2 Quad Port Server Adapter",
1518 WM_T_82576, WMP_F_COPPER },
1519
1520 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1521 "82576 gigabit Ethernet",
1522 WM_T_82576, WMP_F_COPPER },
1523
1524 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1525 "82576 gigabit Ethernet (SERDES)",
1526 WM_T_82576, WMP_F_SERDES },
1527 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1528 "82576 quad-gigabit Ethernet (SERDES)",
1529 WM_T_82576, WMP_F_SERDES },
1530
1531 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1532 "82580 1000BaseT Ethernet",
1533 WM_T_82580, WMP_F_COPPER },
1534 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1535 "82580 1000BaseX Ethernet",
1536 WM_T_82580, WMP_F_FIBER },
1537
1538 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1539 "82580 1000BaseT Ethernet (SERDES)",
1540 WM_T_82580, WMP_F_SERDES },
1541
1542 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1543 "82580 gigabit Ethernet (SGMII)",
1544 WM_T_82580, WMP_F_COPPER },
1545 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1546 "82580 dual-1000BaseT Ethernet",
1547 WM_T_82580, WMP_F_COPPER },
1548
1549 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1550 "82580 quad-1000BaseX Ethernet",
1551 WM_T_82580, WMP_F_FIBER },
1552
1553 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1554 "DH89XXCC Gigabit Ethernet (SGMII)",
1555 WM_T_82580, WMP_F_COPPER },
1556
1557 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1558 "DH89XXCC Gigabit Ethernet (SERDES)",
1559 WM_T_82580, WMP_F_SERDES },
1560
1561 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1562 "DH89XXCC 1000BASE-KX Ethernet",
1563 WM_T_82580, WMP_F_SERDES },
1564
1565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1566 "DH89XXCC Gigabit Ethernet (SFP)",
1567 WM_T_82580, WMP_F_SERDES },
1568
1569 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1570 "I350 Gigabit Network Connection",
1571 WM_T_I350, WMP_F_COPPER },
1572
1573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1574 "I350 Gigabit Fiber Network Connection",
1575 WM_T_I350, WMP_F_FIBER },
1576
1577 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1578 "I350 Gigabit Backplane Connection",
1579 WM_T_I350, WMP_F_SERDES },
1580
1581 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1582 "I350 Quad Port Gigabit Ethernet",
1583 WM_T_I350, WMP_F_SERDES },
1584
1585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1586 "I350 Gigabit Connection",
1587 WM_T_I350, WMP_F_COPPER },
1588
1589 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1590 "I354 Gigabit Ethernet (KX)",
1591 WM_T_I354, WMP_F_SERDES },
1592
1593 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1594 "I354 Gigabit Ethernet (SGMII)",
1595 WM_T_I354, WMP_F_COPPER },
1596
1597 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1598 "I354 Gigabit Ethernet (2.5G)",
1599 WM_T_I354, WMP_F_COPPER },
1600
1601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1602 "I210-T1 Ethernet Server Adapter",
1603 WM_T_I210, WMP_F_COPPER },
1604
1605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1606 "I210 Ethernet (Copper OEM)",
1607 WM_T_I210, WMP_F_COPPER },
1608
1609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1610 "I210 Ethernet (Copper IT)",
1611 WM_T_I210, WMP_F_COPPER },
1612
1613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1614 "I210 Ethernet (Copper, FLASH less)",
1615 WM_T_I210, WMP_F_COPPER },
1616
1617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1618 "I210 Gigabit Ethernet (Fiber)",
1619 WM_T_I210, WMP_F_FIBER },
1620
1621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1622 "I210 Gigabit Ethernet (SERDES)",
1623 WM_T_I210, WMP_F_SERDES },
1624
1625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1626 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1627 WM_T_I210, WMP_F_SERDES },
1628
1629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1630 "I210 Gigabit Ethernet (SGMII)",
1631 WM_T_I210, WMP_F_COPPER },
1632
1633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1634 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1635 WM_T_I210, WMP_F_COPPER },
1636
1637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1638 "I211 Ethernet (COPPER)",
1639 WM_T_I211, WMP_F_COPPER },
1640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1641 "I217 V Ethernet Connection",
1642 WM_T_PCH_LPT, WMP_F_COPPER },
1643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1644 "I217 LM Ethernet Connection",
1645 WM_T_PCH_LPT, WMP_F_COPPER },
1646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1647 "I218 V Ethernet Connection",
1648 WM_T_PCH_LPT, WMP_F_COPPER },
1649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1650 "I218 V Ethernet Connection",
1651 WM_T_PCH_LPT, WMP_F_COPPER },
1652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1653 "I218 V Ethernet Connection",
1654 WM_T_PCH_LPT, WMP_F_COPPER },
1655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1656 "I218 LM Ethernet Connection",
1657 WM_T_PCH_LPT, WMP_F_COPPER },
1658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1659 "I218 LM Ethernet Connection",
1660 WM_T_PCH_LPT, WMP_F_COPPER },
1661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1662 "I218 LM Ethernet Connection",
1663 WM_T_PCH_LPT, WMP_F_COPPER },
1664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1665 "I219 LM Ethernet Connection",
1666 WM_T_PCH_SPT, WMP_F_COPPER },
1667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1668 "I219 LM (2) Ethernet Connection",
1669 WM_T_PCH_SPT, WMP_F_COPPER },
1670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1671 "I219 LM (3) Ethernet Connection",
1672 WM_T_PCH_SPT, WMP_F_COPPER },
1673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1674 "I219 LM (4) Ethernet Connection",
1675 WM_T_PCH_SPT, WMP_F_COPPER },
1676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1677 "I219 LM (5) Ethernet Connection",
1678 WM_T_PCH_SPT, WMP_F_COPPER },
1679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1680 "I219 LM (6) Ethernet Connection",
1681 WM_T_PCH_CNP, WMP_F_COPPER },
1682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1683 "I219 LM (7) Ethernet Connection",
1684 WM_T_PCH_CNP, WMP_F_COPPER },
1685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1686 "I219 LM (8) Ethernet Connection",
1687 WM_T_PCH_CNP, WMP_F_COPPER },
1688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1689 "I219 LM (9) Ethernet Connection",
1690 WM_T_PCH_CNP, WMP_F_COPPER },
1691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1692 "I219 LM (10) Ethernet Connection",
1693 WM_T_PCH_CNP, WMP_F_COPPER },
1694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1695 "I219 LM (11) Ethernet Connection",
1696 WM_T_PCH_CNP, WMP_F_COPPER },
1697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1698 "I219 LM (12) Ethernet Connection",
1699 WM_T_PCH_SPT, WMP_F_COPPER },
1700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1701 "I219 LM (13) Ethernet Connection",
1702 WM_T_PCH_CNP, WMP_F_COPPER },
1703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1704 "I219 LM (14) Ethernet Connection",
1705 WM_T_PCH_CNP, WMP_F_COPPER },
1706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1707 "I219 LM (15) Ethernet Connection",
1708 WM_T_PCH_CNP, WMP_F_COPPER },
1709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16,
1710 "I219 LM (16) Ethernet Connection",
1711 WM_T_PCH_CNP, WMP_F_COPPER },
1712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17,
1713 "I219 LM (17) Ethernet Connection",
1714 WM_T_PCH_CNP, WMP_F_COPPER },
1715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18,
1716 "I219 LM (18) Ethernet Connection",
1717 WM_T_PCH_CNP, WMP_F_COPPER },
1718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19,
1719 "I219 LM (19) Ethernet Connection",
1720 WM_T_PCH_CNP, WMP_F_COPPER },
1721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1722 "I219 V Ethernet Connection",
1723 WM_T_PCH_SPT, WMP_F_COPPER },
1724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1725 "I219 V (2) Ethernet Connection",
1726 WM_T_PCH_SPT, WMP_F_COPPER },
1727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1728 "I219 V (4) Ethernet Connection",
1729 WM_T_PCH_SPT, WMP_F_COPPER },
1730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1731 "I219 V (5) Ethernet Connection",
1732 WM_T_PCH_SPT, WMP_F_COPPER },
1733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1734 "I219 V (6) Ethernet Connection",
1735 WM_T_PCH_CNP, WMP_F_COPPER },
1736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1737 "I219 V (7) Ethernet Connection",
1738 WM_T_PCH_CNP, WMP_F_COPPER },
1739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1740 "I219 V (8) Ethernet Connection",
1741 WM_T_PCH_CNP, WMP_F_COPPER },
1742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1743 "I219 V (9) Ethernet Connection",
1744 WM_T_PCH_CNP, WMP_F_COPPER },
1745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1746 "I219 V (10) Ethernet Connection",
1747 WM_T_PCH_CNP, WMP_F_COPPER },
1748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1749 "I219 V (11) Ethernet Connection",
1750 WM_T_PCH_CNP, WMP_F_COPPER },
1751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1752 "I219 V (12) Ethernet Connection",
1753 WM_T_PCH_SPT, WMP_F_COPPER },
1754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1755 "I219 V (13) Ethernet Connection",
1756 WM_T_PCH_CNP, WMP_F_COPPER },
1757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1758 "I219 V (14) Ethernet Connection",
1759 WM_T_PCH_CNP, WMP_F_COPPER },
1760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15,
1761 "I219 V (15) Ethernet Connection",
1762 WM_T_PCH_CNP, WMP_F_COPPER },
1763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16,
1764 "I219 V (16) Ethernet Connection",
1765 WM_T_PCH_CNP, WMP_F_COPPER },
1766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17,
1767 "I219 V (17) Ethernet Connection",
1768 WM_T_PCH_CNP, WMP_F_COPPER },
1769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18,
1770 "I219 V (18) Ethernet Connection",
1771 WM_T_PCH_CNP, WMP_F_COPPER },
1772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19,
1773 "I219 V (19) Ethernet Connection",
1774 WM_T_PCH_CNP, WMP_F_COPPER },
1775 { 0, 0,
1776 NULL,
1777 0, 0 },
1778 };
1779
1780 /*
1781 * Register read/write functions.
1782 * Other than CSR_{READ|WRITE}().
1783 */
1784
1785 #if 0 /* Not currently used */
1786 static inline uint32_t
1787 wm_io_read(struct wm_softc *sc, int reg)
1788 {
1789
1790 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1791 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1792 }
1793 #endif
1794
1795 static inline void
1796 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1797 {
1798
1799 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1800 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1801 }
1802
1803 static inline void
1804 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1805 uint32_t data)
1806 {
1807 uint32_t regval;
1808 int i;
1809
1810 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1811
1812 CSR_WRITE(sc, reg, regval);
1813
1814 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1815 delay(5);
1816 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1817 break;
1818 }
1819 if (i == SCTL_CTL_POLL_TIMEOUT) {
1820 aprint_error("%s: WARNING:"
1821 " i82575 reg 0x%08x setup did not indicate ready\n",
1822 device_xname(sc->sc_dev), reg);
1823 }
1824 }
1825
1826 static inline void
1827 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1828 {
1829 wa->wa_low = htole32(BUS_ADDR_LO32(v));
1830 wa->wa_high = htole32(BUS_ADDR_HI32(v));
1831 }
1832
1833 /*
1834 * Descriptor sync/init functions.
1835 */
1836 static inline void
1837 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1838 {
1839 struct wm_softc *sc = txq->txq_sc;
1840
1841 /* If it will wrap around, sync to the end of the ring. */
1842 if ((start + num) > WM_NTXDESC(txq)) {
1843 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1844 WM_CDTXOFF(txq, start), txq->txq_descsize *
1845 (WM_NTXDESC(txq) - start), ops);
1846 num -= (WM_NTXDESC(txq) - start);
1847 start = 0;
1848 }
1849
1850 /* Now sync whatever is left. */
1851 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1852 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1853 }
1854
1855 static inline void
1856 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1857 {
1858 struct wm_softc *sc = rxq->rxq_sc;
1859
1860 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1861 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1862 }
1863
1864 static inline void
1865 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1866 {
1867 struct wm_softc *sc = rxq->rxq_sc;
1868 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1869 struct mbuf *m = rxs->rxs_mbuf;
1870
1871 /*
1872 * Note: We scoot the packet forward 2 bytes in the buffer
1873 * so that the payload after the Ethernet header is aligned
1874 * to a 4-byte boundary.
1875
1876 * XXX BRAINDAMAGE ALERT!
1877 * The stupid chip uses the same size for every buffer, which
1878 * is set in the Receive Control register. We are using the 2K
1879 * size option, but what we REALLY want is (2K - 2)! For this
1880 * reason, we can't "scoot" packets longer than the standard
1881 * Ethernet MTU. On strict-alignment platforms, if the total
1882 * size exceeds (2K - 2) we set align_tweak to 0 and let
1883 * the upper layer copy the headers.
1884 */
1885 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1886
1887 if (sc->sc_type == WM_T_82574) {
1888 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1889 rxd->erx_data.erxd_addr =
1890 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1891 rxd->erx_data.erxd_dd = 0;
1892 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1893 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1894
1895 rxd->nqrx_data.nrxd_paddr =
1896 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1897 /* Currently, split header is not supported. */
1898 rxd->nqrx_data.nrxd_haddr = 0;
1899 } else {
1900 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1901
1902 wm_set_dma_addr(&rxd->wrx_addr,
1903 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1904 rxd->wrx_len = 0;
1905 rxd->wrx_cksum = 0;
1906 rxd->wrx_status = 0;
1907 rxd->wrx_errors = 0;
1908 rxd->wrx_special = 0;
1909 }
1910 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1911
1912 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1913 }
1914
1915 /*
1916 * Device driver interface functions and commonly used functions.
1917 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1918 */
1919
1920 /* Lookup supported device table */
1921 static const struct wm_product *
1922 wm_lookup(const struct pci_attach_args *pa)
1923 {
1924 const struct wm_product *wmp;
1925
1926 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1927 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1928 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1929 return wmp;
1930 }
1931 return NULL;
1932 }
1933
1934 /* The match function (ca_match) */
1935 static int
1936 wm_match(device_t parent, cfdata_t cf, void *aux)
1937 {
1938 struct pci_attach_args *pa = aux;
1939
1940 if (wm_lookup(pa) != NULL)
1941 return 1;
1942
1943 return 0;
1944 }
1945
1946 /* The attach function (ca_attach) */
1947 static void
1948 wm_attach(device_t parent, device_t self, void *aux)
1949 {
1950 struct wm_softc *sc = device_private(self);
1951 struct pci_attach_args *pa = aux;
1952 prop_dictionary_t dict;
1953 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1954 pci_chipset_tag_t pc = pa->pa_pc;
1955 int counts[PCI_INTR_TYPE_SIZE];
1956 pci_intr_type_t max_type;
1957 const char *eetype, *xname;
1958 bus_space_tag_t memt;
1959 bus_space_handle_t memh;
1960 bus_size_t memsize;
1961 int memh_valid;
1962 int i, error;
1963 const struct wm_product *wmp;
1964 prop_data_t ea;
1965 prop_number_t pn;
1966 uint8_t enaddr[ETHER_ADDR_LEN];
1967 char buf[256];
1968 char wqname[MAXCOMLEN];
1969 uint16_t cfg1, cfg2, swdpin, nvmword;
1970 pcireg_t preg, memtype;
1971 uint16_t eeprom_data, apme_mask;
1972 bool force_clear_smbi;
1973 uint32_t link_mode;
1974 uint32_t reg;
1975
1976 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1977 sc->sc_debug = WM_DEBUG_DEFAULT;
1978 #endif
1979 sc->sc_dev = self;
1980 callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
1981 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1982 sc->sc_core_stopping = false;
1983
1984 wmp = wm_lookup(pa);
1985 #ifdef DIAGNOSTIC
1986 if (wmp == NULL) {
1987 printf("\n");
1988 panic("wm_attach: impossible");
1989 }
1990 #endif
1991 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1992
1993 sc->sc_pc = pa->pa_pc;
1994 sc->sc_pcitag = pa->pa_tag;
1995
1996 if (pci_dma64_available(pa)) {
1997 aprint_verbose(", 64-bit DMA");
1998 sc->sc_dmat = pa->pa_dmat64;
1999 } else {
2000 aprint_verbose(", 32-bit DMA");
2001 sc->sc_dmat = pa->pa_dmat;
2002 }
2003
2004 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
2005 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
2006 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
2007
2008 sc->sc_type = wmp->wmp_type;
2009
2010 /* Set default function pointers */
2011 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2012 sc->phy.release = sc->nvm.release = wm_put_null;
2013 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2014
2015 if (sc->sc_type < WM_T_82543) {
2016 if (sc->sc_rev < 2) {
2017 aprint_error_dev(sc->sc_dev,
2018 "i82542 must be at least rev. 2\n");
2019 return;
2020 }
2021 if (sc->sc_rev < 3)
2022 sc->sc_type = WM_T_82542_2_0;
2023 }
2024
2025 /*
2026 * Disable MSI for Errata:
2027 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2028 *
2029 * 82544: Errata 25
2030 * 82540: Errata 6 (easy to reproduce device timeout)
2031 * 82545: Errata 4 (easy to reproduce device timeout)
2032 * 82546: Errata 26 (easy to reproduce device timeout)
2033 * 82541: Errata 7 (easy to reproduce device timeout)
2034 *
2035 * "Byte Enables 2 and 3 are not set on MSI writes"
2036 *
2037 * 82571 & 82572: Errata 63
2038 */
2039 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2040 || (sc->sc_type == WM_T_82572))
2041 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2042
2043 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2044 || (sc->sc_type == WM_T_82580)
2045 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2046 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2047 sc->sc_flags |= WM_F_NEWQUEUE;
2048
2049 /* Set device properties (mactype) */
2050 dict = device_properties(sc->sc_dev);
2051 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2052
2053 /*
2054 * Map the device. All devices support memory-mapped acccess,
2055 * and it is really required for normal operation.
2056 */
2057 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2058 switch (memtype) {
2059 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2060 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2061 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2062 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2063 break;
2064 default:
2065 memh_valid = 0;
2066 break;
2067 }
2068
2069 if (memh_valid) {
2070 sc->sc_st = memt;
2071 sc->sc_sh = memh;
2072 sc->sc_ss = memsize;
2073 } else {
2074 aprint_error_dev(sc->sc_dev,
2075 "unable to map device registers\n");
2076 return;
2077 }
2078
2079 /*
2080 * In addition, i82544 and later support I/O mapped indirect
2081 * register access. It is not desirable (nor supported in
2082 * this driver) to use it for normal operation, though it is
2083 * required to work around bugs in some chip versions.
2084 */
2085 switch (sc->sc_type) {
2086 case WM_T_82544:
2087 case WM_T_82541:
2088 case WM_T_82541_2:
2089 case WM_T_82547:
2090 case WM_T_82547_2:
2091 /* First we have to find the I/O BAR. */
2092 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2093 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2094 if (memtype == PCI_MAPREG_TYPE_IO)
2095 break;
2096 if (PCI_MAPREG_MEM_TYPE(memtype) ==
2097 PCI_MAPREG_MEM_TYPE_64BIT)
2098 i += 4; /* skip high bits, too */
2099 }
2100 if (i < PCI_MAPREG_END) {
2101 /*
2102 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2103 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2104 * It's no problem because newer chips has no this
2105 * bug.
2106 *
2107 * The i8254x doesn't apparently respond when the
2108 * I/O BAR is 0, which looks somewhat like it's not
2109 * been configured.
2110 */
2111 preg = pci_conf_read(pc, pa->pa_tag, i);
2112 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2113 aprint_error_dev(sc->sc_dev,
2114 "WARNING: I/O BAR at zero.\n");
2115 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2116 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2117 == 0) {
2118 sc->sc_flags |= WM_F_IOH_VALID;
2119 } else
2120 aprint_error_dev(sc->sc_dev,
2121 "WARNING: unable to map I/O space\n");
2122 }
2123 break;
2124 default:
2125 break;
2126 }
2127
2128 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2129 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2130 preg |= PCI_COMMAND_MASTER_ENABLE;
2131 if (sc->sc_type < WM_T_82542_2_1)
2132 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2133 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2134
2135 /* Power up chip */
2136 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2137 && error != EOPNOTSUPP) {
2138 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2139 return;
2140 }
2141
2142 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2143 /*
2144 * Don't use MSI-X if we can use only one queue to save interrupt
2145 * resource.
2146 */
2147 if (sc->sc_nqueues > 1) {
2148 max_type = PCI_INTR_TYPE_MSIX;
2149 /*
2150 * 82583 has a MSI-X capability in the PCI configuration space
2151 * but it doesn't support it. At least the document doesn't
2152 * say anything about MSI-X.
2153 */
2154 counts[PCI_INTR_TYPE_MSIX]
2155 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2156 } else {
2157 max_type = PCI_INTR_TYPE_MSI;
2158 counts[PCI_INTR_TYPE_MSIX] = 0;
2159 }
2160
2161 /* Allocation settings */
2162 counts[PCI_INTR_TYPE_MSI] = 1;
2163 counts[PCI_INTR_TYPE_INTX] = 1;
2164 /* overridden by disable flags */
2165 if (wm_disable_msi != 0) {
2166 counts[PCI_INTR_TYPE_MSI] = 0;
2167 if (wm_disable_msix != 0) {
2168 max_type = PCI_INTR_TYPE_INTX;
2169 counts[PCI_INTR_TYPE_MSIX] = 0;
2170 }
2171 } else if (wm_disable_msix != 0) {
2172 max_type = PCI_INTR_TYPE_MSI;
2173 counts[PCI_INTR_TYPE_MSIX] = 0;
2174 }
2175
2176 alloc_retry:
2177 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2178 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2179 return;
2180 }
2181
2182 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2183 error = wm_setup_msix(sc);
2184 if (error) {
2185 pci_intr_release(pc, sc->sc_intrs,
2186 counts[PCI_INTR_TYPE_MSIX]);
2187
2188 /* Setup for MSI: Disable MSI-X */
2189 max_type = PCI_INTR_TYPE_MSI;
2190 counts[PCI_INTR_TYPE_MSI] = 1;
2191 counts[PCI_INTR_TYPE_INTX] = 1;
2192 goto alloc_retry;
2193 }
2194 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2195 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2196 error = wm_setup_legacy(sc);
2197 if (error) {
2198 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2199 counts[PCI_INTR_TYPE_MSI]);
2200
2201 /* The next try is for INTx: Disable MSI */
2202 max_type = PCI_INTR_TYPE_INTX;
2203 counts[PCI_INTR_TYPE_INTX] = 1;
2204 goto alloc_retry;
2205 }
2206 } else {
2207 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2208 error = wm_setup_legacy(sc);
2209 if (error) {
2210 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2211 counts[PCI_INTR_TYPE_INTX]);
2212 return;
2213 }
2214 }
2215
2216 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2217 error = workqueue_create(&sc->sc_queue_wq, wqname,
2218 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2219 WM_WORKQUEUE_FLAGS);
2220 if (error) {
2221 aprint_error_dev(sc->sc_dev,
2222 "unable to create workqueue\n");
2223 goto out;
2224 }
2225
2226 /*
2227 * Check the function ID (unit number of the chip).
2228 */
2229 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2230 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2231 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2232 || (sc->sc_type == WM_T_82580)
2233 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2234 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2235 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2236 else
2237 sc->sc_funcid = 0;
2238
2239 /*
2240 * Determine a few things about the bus we're connected to.
2241 */
2242 if (sc->sc_type < WM_T_82543) {
2243 /* We don't really know the bus characteristics here. */
2244 sc->sc_bus_speed = 33;
2245 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2246 /*
2247 * CSA (Communication Streaming Architecture) is about as fast
2248 * a 32-bit 66MHz PCI Bus.
2249 */
2250 sc->sc_flags |= WM_F_CSA;
2251 sc->sc_bus_speed = 66;
2252 aprint_verbose_dev(sc->sc_dev,
2253 "Communication Streaming Architecture\n");
2254 if (sc->sc_type == WM_T_82547) {
2255 callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
2256 callout_setfunc(&sc->sc_txfifo_ch,
2257 wm_82547_txfifo_stall, sc);
2258 aprint_verbose_dev(sc->sc_dev,
2259 "using 82547 Tx FIFO stall work-around\n");
2260 }
2261 } else if (sc->sc_type >= WM_T_82571) {
2262 sc->sc_flags |= WM_F_PCIE;
2263 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2264 && (sc->sc_type != WM_T_ICH10)
2265 && (sc->sc_type != WM_T_PCH)
2266 && (sc->sc_type != WM_T_PCH2)
2267 && (sc->sc_type != WM_T_PCH_LPT)
2268 && (sc->sc_type != WM_T_PCH_SPT)
2269 && (sc->sc_type != WM_T_PCH_CNP)) {
2270 /* ICH* and PCH* have no PCIe capability registers */
2271 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2272 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2273 NULL) == 0)
2274 aprint_error_dev(sc->sc_dev,
2275 "unable to find PCIe capability\n");
2276 }
2277 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2278 } else {
2279 reg = CSR_READ(sc, WMREG_STATUS);
2280 if (reg & STATUS_BUS64)
2281 sc->sc_flags |= WM_F_BUS64;
2282 if ((reg & STATUS_PCIX_MODE) != 0) {
2283 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2284
2285 sc->sc_flags |= WM_F_PCIX;
2286 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2287 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2288 aprint_error_dev(sc->sc_dev,
2289 "unable to find PCIX capability\n");
2290 else if (sc->sc_type != WM_T_82545_3 &&
2291 sc->sc_type != WM_T_82546_3) {
2292 /*
2293 * Work around a problem caused by the BIOS
2294 * setting the max memory read byte count
2295 * incorrectly.
2296 */
2297 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2298 sc->sc_pcixe_capoff + PCIX_CMD);
2299 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2300 sc->sc_pcixe_capoff + PCIX_STATUS);
2301
2302 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2303 PCIX_CMD_BYTECNT_SHIFT;
2304 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2305 PCIX_STATUS_MAXB_SHIFT;
2306 if (bytecnt > maxb) {
2307 aprint_verbose_dev(sc->sc_dev,
2308 "resetting PCI-X MMRBC: %d -> %d\n",
2309 512 << bytecnt, 512 << maxb);
2310 pcix_cmd = (pcix_cmd &
2311 ~PCIX_CMD_BYTECNT_MASK) |
2312 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2313 pci_conf_write(pa->pa_pc, pa->pa_tag,
2314 sc->sc_pcixe_capoff + PCIX_CMD,
2315 pcix_cmd);
2316 }
2317 }
2318 }
2319 /*
2320 * The quad port adapter is special; it has a PCIX-PCIX
2321 * bridge on the board, and can run the secondary bus at
2322 * a higher speed.
2323 */
2324 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2325 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2326 : 66;
2327 } else if (sc->sc_flags & WM_F_PCIX) {
2328 switch (reg & STATUS_PCIXSPD_MASK) {
2329 case STATUS_PCIXSPD_50_66:
2330 sc->sc_bus_speed = 66;
2331 break;
2332 case STATUS_PCIXSPD_66_100:
2333 sc->sc_bus_speed = 100;
2334 break;
2335 case STATUS_PCIXSPD_100_133:
2336 sc->sc_bus_speed = 133;
2337 break;
2338 default:
2339 aprint_error_dev(sc->sc_dev,
2340 "unknown PCIXSPD %d; assuming 66MHz\n",
2341 reg & STATUS_PCIXSPD_MASK);
2342 sc->sc_bus_speed = 66;
2343 break;
2344 }
2345 } else
2346 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2347 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2348 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2349 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2350 }
2351
2352 /* clear interesting stat counters */
2353 CSR_READ(sc, WMREG_COLC);
2354 CSR_READ(sc, WMREG_RXERRC);
2355
2356 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2357 || (sc->sc_type >= WM_T_ICH8))
2358 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2359 if (sc->sc_type >= WM_T_ICH8)
2360 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2361
2362 /* Set PHY, NVM mutex related stuff */
2363 switch (sc->sc_type) {
2364 case WM_T_82542_2_0:
2365 case WM_T_82542_2_1:
2366 case WM_T_82543:
2367 case WM_T_82544:
2368 /* Microwire */
2369 sc->nvm.read = wm_nvm_read_uwire;
2370 sc->sc_nvm_wordsize = 64;
2371 sc->sc_nvm_addrbits = 6;
2372 break;
2373 case WM_T_82540:
2374 case WM_T_82545:
2375 case WM_T_82545_3:
2376 case WM_T_82546:
2377 case WM_T_82546_3:
2378 /* Microwire */
2379 sc->nvm.read = wm_nvm_read_uwire;
2380 reg = CSR_READ(sc, WMREG_EECD);
2381 if (reg & EECD_EE_SIZE) {
2382 sc->sc_nvm_wordsize = 256;
2383 sc->sc_nvm_addrbits = 8;
2384 } else {
2385 sc->sc_nvm_wordsize = 64;
2386 sc->sc_nvm_addrbits = 6;
2387 }
2388 sc->sc_flags |= WM_F_LOCK_EECD;
2389 sc->nvm.acquire = wm_get_eecd;
2390 sc->nvm.release = wm_put_eecd;
2391 break;
2392 case WM_T_82541:
2393 case WM_T_82541_2:
2394 case WM_T_82547:
2395 case WM_T_82547_2:
2396 reg = CSR_READ(sc, WMREG_EECD);
2397 /*
2398 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2399 * on 8254[17], so set flags and functios before calling it.
2400 */
2401 sc->sc_flags |= WM_F_LOCK_EECD;
2402 sc->nvm.acquire = wm_get_eecd;
2403 sc->nvm.release = wm_put_eecd;
2404 if (reg & EECD_EE_TYPE) {
2405 /* SPI */
2406 sc->nvm.read = wm_nvm_read_spi;
2407 sc->sc_flags |= WM_F_EEPROM_SPI;
2408 wm_nvm_set_addrbits_size_eecd(sc);
2409 } else {
2410 /* Microwire */
2411 sc->nvm.read = wm_nvm_read_uwire;
2412 if ((reg & EECD_EE_ABITS) != 0) {
2413 sc->sc_nvm_wordsize = 256;
2414 sc->sc_nvm_addrbits = 8;
2415 } else {
2416 sc->sc_nvm_wordsize = 64;
2417 sc->sc_nvm_addrbits = 6;
2418 }
2419 }
2420 break;
2421 case WM_T_82571:
2422 case WM_T_82572:
2423 /* SPI */
2424 sc->nvm.read = wm_nvm_read_eerd;
2425 /* Not use WM_F_LOCK_EECD because we use EERD */
2426 sc->sc_flags |= WM_F_EEPROM_SPI;
2427 wm_nvm_set_addrbits_size_eecd(sc);
2428 sc->phy.acquire = wm_get_swsm_semaphore;
2429 sc->phy.release = wm_put_swsm_semaphore;
2430 sc->nvm.acquire = wm_get_nvm_82571;
2431 sc->nvm.release = wm_put_nvm_82571;
2432 break;
2433 case WM_T_82573:
2434 case WM_T_82574:
2435 case WM_T_82583:
2436 sc->nvm.read = wm_nvm_read_eerd;
2437 /* Not use WM_F_LOCK_EECD because we use EERD */
2438 if (sc->sc_type == WM_T_82573) {
2439 sc->phy.acquire = wm_get_swsm_semaphore;
2440 sc->phy.release = wm_put_swsm_semaphore;
2441 sc->nvm.acquire = wm_get_nvm_82571;
2442 sc->nvm.release = wm_put_nvm_82571;
2443 } else {
2444 /* Both PHY and NVM use the same semaphore. */
2445 sc->phy.acquire = sc->nvm.acquire
2446 = wm_get_swfwhw_semaphore;
2447 sc->phy.release = sc->nvm.release
2448 = wm_put_swfwhw_semaphore;
2449 }
2450 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2451 sc->sc_flags |= WM_F_EEPROM_FLASH;
2452 sc->sc_nvm_wordsize = 2048;
2453 } else {
2454 /* SPI */
2455 sc->sc_flags |= WM_F_EEPROM_SPI;
2456 wm_nvm_set_addrbits_size_eecd(sc);
2457 }
2458 break;
2459 case WM_T_82575:
2460 case WM_T_82576:
2461 case WM_T_82580:
2462 case WM_T_I350:
2463 case WM_T_I354:
2464 case WM_T_80003:
2465 /* SPI */
2466 sc->sc_flags |= WM_F_EEPROM_SPI;
2467 wm_nvm_set_addrbits_size_eecd(sc);
2468 if ((sc->sc_type == WM_T_80003)
2469 || (sc->sc_nvm_wordsize < (1 << 15))) {
2470 sc->nvm.read = wm_nvm_read_eerd;
2471 /* Don't use WM_F_LOCK_EECD because we use EERD */
2472 } else {
2473 sc->nvm.read = wm_nvm_read_spi;
2474 sc->sc_flags |= WM_F_LOCK_EECD;
2475 }
2476 sc->phy.acquire = wm_get_phy_82575;
2477 sc->phy.release = wm_put_phy_82575;
2478 sc->nvm.acquire = wm_get_nvm_80003;
2479 sc->nvm.release = wm_put_nvm_80003;
2480 break;
2481 case WM_T_ICH8:
2482 case WM_T_ICH9:
2483 case WM_T_ICH10:
2484 case WM_T_PCH:
2485 case WM_T_PCH2:
2486 case WM_T_PCH_LPT:
2487 sc->nvm.read = wm_nvm_read_ich8;
2488 /* FLASH */
2489 sc->sc_flags |= WM_F_EEPROM_FLASH;
2490 sc->sc_nvm_wordsize = 2048;
2491 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2492 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2493 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2494 aprint_error_dev(sc->sc_dev,
2495 "can't map FLASH registers\n");
2496 goto out;
2497 }
2498 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2499 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2500 ICH_FLASH_SECTOR_SIZE;
2501 sc->sc_ich8_flash_bank_size =
2502 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2503 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2504 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2505 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2506 sc->sc_flashreg_offset = 0;
2507 sc->phy.acquire = wm_get_swflag_ich8lan;
2508 sc->phy.release = wm_put_swflag_ich8lan;
2509 sc->nvm.acquire = wm_get_nvm_ich8lan;
2510 sc->nvm.release = wm_put_nvm_ich8lan;
2511 break;
2512 case WM_T_PCH_SPT:
2513 case WM_T_PCH_CNP:
2514 sc->nvm.read = wm_nvm_read_spt;
2515 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2516 sc->sc_flags |= WM_F_EEPROM_FLASH;
2517 sc->sc_flasht = sc->sc_st;
2518 sc->sc_flashh = sc->sc_sh;
2519 sc->sc_ich8_flash_base = 0;
2520 sc->sc_nvm_wordsize =
2521 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2522 * NVM_SIZE_MULTIPLIER;
2523 /* It is size in bytes, we want words */
2524 sc->sc_nvm_wordsize /= 2;
2525 /* Assume 2 banks */
2526 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2527 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2528 sc->phy.acquire = wm_get_swflag_ich8lan;
2529 sc->phy.release = wm_put_swflag_ich8lan;
2530 sc->nvm.acquire = wm_get_nvm_ich8lan;
2531 sc->nvm.release = wm_put_nvm_ich8lan;
2532 break;
2533 case WM_T_I210:
2534 case WM_T_I211:
2535 /* Allow a single clear of the SW semaphore on I210 and newer*/
2536 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2537 if (wm_nvm_flash_presence_i210(sc)) {
2538 sc->nvm.read = wm_nvm_read_eerd;
2539 /* Don't use WM_F_LOCK_EECD because we use EERD */
2540 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2541 wm_nvm_set_addrbits_size_eecd(sc);
2542 } else {
2543 sc->nvm.read = wm_nvm_read_invm;
2544 sc->sc_flags |= WM_F_EEPROM_INVM;
2545 sc->sc_nvm_wordsize = INVM_SIZE;
2546 }
2547 sc->phy.acquire = wm_get_phy_82575;
2548 sc->phy.release = wm_put_phy_82575;
2549 sc->nvm.acquire = wm_get_nvm_80003;
2550 sc->nvm.release = wm_put_nvm_80003;
2551 break;
2552 default:
2553 break;
2554 }
2555
2556 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2557 switch (sc->sc_type) {
2558 case WM_T_82571:
2559 case WM_T_82572:
2560 reg = CSR_READ(sc, WMREG_SWSM2);
2561 if ((reg & SWSM2_LOCK) == 0) {
2562 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2563 force_clear_smbi = true;
2564 } else
2565 force_clear_smbi = false;
2566 break;
2567 case WM_T_82573:
2568 case WM_T_82574:
2569 case WM_T_82583:
2570 force_clear_smbi = true;
2571 break;
2572 default:
2573 force_clear_smbi = false;
2574 break;
2575 }
2576 if (force_clear_smbi) {
2577 reg = CSR_READ(sc, WMREG_SWSM);
2578 if ((reg & SWSM_SMBI) != 0)
2579 aprint_error_dev(sc->sc_dev,
2580 "Please update the Bootagent\n");
2581 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2582 }
2583
2584 /*
2585 * Defer printing the EEPROM type until after verifying the checksum
2586 * This allows the EEPROM type to be printed correctly in the case
2587 * that no EEPROM is attached.
2588 */
2589 /*
2590 * Validate the EEPROM checksum. If the checksum fails, flag
2591 * this for later, so we can fail future reads from the EEPROM.
2592 */
2593 if (wm_nvm_validate_checksum(sc)) {
2594 /*
2595 * Read twice again because some PCI-e parts fail the
2596 * first check due to the link being in sleep state.
2597 */
2598 if (wm_nvm_validate_checksum(sc))
2599 sc->sc_flags |= WM_F_EEPROM_INVALID;
2600 }
2601
2602 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2603 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2604 else {
2605 aprint_verbose_dev(sc->sc_dev, "%u words ",
2606 sc->sc_nvm_wordsize);
2607 if (sc->sc_flags & WM_F_EEPROM_INVM)
2608 aprint_verbose("iNVM");
2609 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2610 aprint_verbose("FLASH(HW)");
2611 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2612 aprint_verbose("FLASH");
2613 else {
2614 if (sc->sc_flags & WM_F_EEPROM_SPI)
2615 eetype = "SPI";
2616 else
2617 eetype = "MicroWire";
2618 aprint_verbose("(%d address bits) %s EEPROM",
2619 sc->sc_nvm_addrbits, eetype);
2620 }
2621 }
2622 wm_nvm_version(sc);
2623 aprint_verbose("\n");
2624
2625 /*
2626 * XXX The first call of wm_gmii_setup_phytype. The result might be
2627 * incorrect.
2628 */
2629 wm_gmii_setup_phytype(sc, 0, 0);
2630
2631 /* Check for WM_F_WOL on some chips before wm_reset() */
2632 switch (sc->sc_type) {
2633 case WM_T_ICH8:
2634 case WM_T_ICH9:
2635 case WM_T_ICH10:
2636 case WM_T_PCH:
2637 case WM_T_PCH2:
2638 case WM_T_PCH_LPT:
2639 case WM_T_PCH_SPT:
2640 case WM_T_PCH_CNP:
2641 apme_mask = WUC_APME;
2642 eeprom_data = CSR_READ(sc, WMREG_WUC);
2643 if ((eeprom_data & apme_mask) != 0)
2644 sc->sc_flags |= WM_F_WOL;
2645 break;
2646 default:
2647 break;
2648 }
2649
2650 /* Reset the chip to a known state. */
2651 wm_reset(sc);
2652
2653 /*
2654 * Check for I21[01] PLL workaround.
2655 *
2656 * Three cases:
2657 * a) Chip is I211.
2658 * b) Chip is I210 and it uses INVM (not FLASH).
2659 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2660 */
2661 if (sc->sc_type == WM_T_I211)
2662 sc->sc_flags |= WM_F_PLL_WA_I210;
2663 if (sc->sc_type == WM_T_I210) {
2664 if (!wm_nvm_flash_presence_i210(sc))
2665 sc->sc_flags |= WM_F_PLL_WA_I210;
2666 else if ((sc->sc_nvm_ver_major < 3)
2667 || ((sc->sc_nvm_ver_major == 3)
2668 && (sc->sc_nvm_ver_minor < 25))) {
2669 aprint_verbose_dev(sc->sc_dev,
2670 "ROM image version %d.%d is older than 3.25\n",
2671 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2672 sc->sc_flags |= WM_F_PLL_WA_I210;
2673 }
2674 }
2675 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2676 wm_pll_workaround_i210(sc);
2677
2678 wm_get_wakeup(sc);
2679
2680 /* Non-AMT based hardware can now take control from firmware */
2681 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2682 wm_get_hw_control(sc);
2683
2684 /*
2685 * Read the Ethernet address from the EEPROM, if not first found
2686 * in device properties.
2687 */
2688 ea = prop_dictionary_get(dict, "mac-address");
2689 if (ea != NULL) {
2690 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2691 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2692 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2693 } else {
2694 if (wm_read_mac_addr(sc, enaddr) != 0) {
2695 aprint_error_dev(sc->sc_dev,
2696 "unable to read Ethernet address\n");
2697 goto out;
2698 }
2699 }
2700
2701 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2702 ether_sprintf(enaddr));
2703
2704 /*
2705 * Read the config info from the EEPROM, and set up various
2706 * bits in the control registers based on their contents.
2707 */
2708 pn = prop_dictionary_get(dict, "i82543-cfg1");
2709 if (pn != NULL) {
2710 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2711 cfg1 = (uint16_t) prop_number_signed_value(pn);
2712 } else {
2713 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2714 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2715 goto out;
2716 }
2717 }
2718
2719 pn = prop_dictionary_get(dict, "i82543-cfg2");
2720 if (pn != NULL) {
2721 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2722 cfg2 = (uint16_t) prop_number_signed_value(pn);
2723 } else {
2724 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2725 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2726 goto out;
2727 }
2728 }
2729
2730 /* check for WM_F_WOL */
2731 switch (sc->sc_type) {
2732 case WM_T_82542_2_0:
2733 case WM_T_82542_2_1:
2734 case WM_T_82543:
2735 /* dummy? */
2736 eeprom_data = 0;
2737 apme_mask = NVM_CFG3_APME;
2738 break;
2739 case WM_T_82544:
2740 apme_mask = NVM_CFG2_82544_APM_EN;
2741 eeprom_data = cfg2;
2742 break;
2743 case WM_T_82546:
2744 case WM_T_82546_3:
2745 case WM_T_82571:
2746 case WM_T_82572:
2747 case WM_T_82573:
2748 case WM_T_82574:
2749 case WM_T_82583:
2750 case WM_T_80003:
2751 case WM_T_82575:
2752 case WM_T_82576:
2753 apme_mask = NVM_CFG3_APME;
2754 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2755 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2756 break;
2757 case WM_T_82580:
2758 case WM_T_I350:
2759 case WM_T_I354:
2760 case WM_T_I210:
2761 case WM_T_I211:
2762 apme_mask = NVM_CFG3_APME;
2763 wm_nvm_read(sc,
2764 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2765 1, &eeprom_data);
2766 break;
2767 case WM_T_ICH8:
2768 case WM_T_ICH9:
2769 case WM_T_ICH10:
2770 case WM_T_PCH:
2771 case WM_T_PCH2:
2772 case WM_T_PCH_LPT:
2773 case WM_T_PCH_SPT:
2774 case WM_T_PCH_CNP:
2775 /* Already checked before wm_reset () */
2776 apme_mask = eeprom_data = 0;
2777 break;
2778 default: /* XXX 82540 */
2779 apme_mask = NVM_CFG3_APME;
2780 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2781 break;
2782 }
2783 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2784 if ((eeprom_data & apme_mask) != 0)
2785 sc->sc_flags |= WM_F_WOL;
2786
2787 /*
2788 * We have the eeprom settings, now apply the special cases
2789 * where the eeprom may be wrong or the board won't support
2790 * wake on lan on a particular port
2791 */
2792 switch (sc->sc_pcidevid) {
2793 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2794 sc->sc_flags &= ~WM_F_WOL;
2795 break;
2796 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2797 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2798 /* Wake events only supported on port A for dual fiber
2799 * regardless of eeprom setting */
2800 if (sc->sc_funcid == 1)
2801 sc->sc_flags &= ~WM_F_WOL;
2802 break;
2803 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2804 /* If quad port adapter, disable WoL on all but port A */
2805 if (sc->sc_funcid != 0)
2806 sc->sc_flags &= ~WM_F_WOL;
2807 break;
2808 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2809 /* Wake events only supported on port A for dual fiber
2810 * regardless of eeprom setting */
2811 if (sc->sc_funcid == 1)
2812 sc->sc_flags &= ~WM_F_WOL;
2813 break;
2814 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2815 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2816 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2817 /* If quad port adapter, disable WoL on all but port A */
2818 if (sc->sc_funcid != 0)
2819 sc->sc_flags &= ~WM_F_WOL;
2820 break;
2821 }
2822
2823 if (sc->sc_type >= WM_T_82575) {
2824 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2825 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2826 nvmword);
2827 if ((sc->sc_type == WM_T_82575) ||
2828 (sc->sc_type == WM_T_82576)) {
2829 /* Check NVM for autonegotiation */
2830 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2831 != 0)
2832 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2833 }
2834 if ((sc->sc_type == WM_T_82575) ||
2835 (sc->sc_type == WM_T_I350)) {
2836 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2837 sc->sc_flags |= WM_F_MAS;
2838 }
2839 }
2840 }
2841
2842 /*
2843 * XXX need special handling for some multiple port cards
2844 * to disable a paticular port.
2845 */
2846
2847 if (sc->sc_type >= WM_T_82544) {
2848 pn = prop_dictionary_get(dict, "i82543-swdpin");
2849 if (pn != NULL) {
2850 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2851 swdpin = (uint16_t) prop_number_signed_value(pn);
2852 } else {
2853 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2854 aprint_error_dev(sc->sc_dev,
2855 "unable to read SWDPIN\n");
2856 goto out;
2857 }
2858 }
2859 }
2860
2861 if (cfg1 & NVM_CFG1_ILOS)
2862 sc->sc_ctrl |= CTRL_ILOS;
2863
2864 /*
2865 * XXX
2866 * This code isn't correct because pin 2 and 3 are located
2867 * in different position on newer chips. Check all datasheet.
2868 *
2869 * Until resolve this problem, check if a chip < 82580
2870 */
2871 if (sc->sc_type <= WM_T_82580) {
2872 if (sc->sc_type >= WM_T_82544) {
2873 sc->sc_ctrl |=
2874 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2875 CTRL_SWDPIO_SHIFT;
2876 sc->sc_ctrl |=
2877 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2878 CTRL_SWDPINS_SHIFT;
2879 } else {
2880 sc->sc_ctrl |=
2881 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2882 CTRL_SWDPIO_SHIFT;
2883 }
2884 }
2885
2886 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2887 wm_nvm_read(sc,
2888 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2889 1, &nvmword);
2890 if (nvmword & NVM_CFG3_ILOS)
2891 sc->sc_ctrl |= CTRL_ILOS;
2892 }
2893
2894 #if 0
2895 if (sc->sc_type >= WM_T_82544) {
2896 if (cfg1 & NVM_CFG1_IPS0)
2897 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2898 if (cfg1 & NVM_CFG1_IPS1)
2899 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2900 sc->sc_ctrl_ext |=
2901 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2902 CTRL_EXT_SWDPIO_SHIFT;
2903 sc->sc_ctrl_ext |=
2904 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2905 CTRL_EXT_SWDPINS_SHIFT;
2906 } else {
2907 sc->sc_ctrl_ext |=
2908 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2909 CTRL_EXT_SWDPIO_SHIFT;
2910 }
2911 #endif
2912
2913 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2914 #if 0
2915 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2916 #endif
2917
2918 if (sc->sc_type == WM_T_PCH) {
2919 uint16_t val;
2920
2921 /* Save the NVM K1 bit setting */
2922 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2923
2924 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2925 sc->sc_nvm_k1_enabled = 1;
2926 else
2927 sc->sc_nvm_k1_enabled = 0;
2928 }
2929
2930 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2931 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2932 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2933 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2934 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2935 || sc->sc_type == WM_T_82573
2936 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2937 /* Copper only */
2938 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2939 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2940 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2941 || (sc->sc_type ==WM_T_I211)) {
2942 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2943 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2944 switch (link_mode) {
2945 case CTRL_EXT_LINK_MODE_1000KX:
2946 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2947 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2948 break;
2949 case CTRL_EXT_LINK_MODE_SGMII:
2950 if (wm_sgmii_uses_mdio(sc)) {
2951 aprint_normal_dev(sc->sc_dev,
2952 "SGMII(MDIO)\n");
2953 sc->sc_flags |= WM_F_SGMII;
2954 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2955 break;
2956 }
2957 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2958 /*FALLTHROUGH*/
2959 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2960 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2961 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2962 if (link_mode
2963 == CTRL_EXT_LINK_MODE_SGMII) {
2964 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2965 sc->sc_flags |= WM_F_SGMII;
2966 aprint_verbose_dev(sc->sc_dev,
2967 "SGMII\n");
2968 } else {
2969 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2970 aprint_verbose_dev(sc->sc_dev,
2971 "SERDES\n");
2972 }
2973 break;
2974 }
2975 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2976 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2977 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2978 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2979 sc->sc_flags |= WM_F_SGMII;
2980 }
2981 /* Do not change link mode for 100BaseFX */
2982 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2983 break;
2984
2985 /* Change current link mode setting */
2986 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2987 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2988 reg |= CTRL_EXT_LINK_MODE_SGMII;
2989 else
2990 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2991 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2992 break;
2993 case CTRL_EXT_LINK_MODE_GMII:
2994 default:
2995 aprint_normal_dev(sc->sc_dev, "Copper\n");
2996 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2997 break;
2998 }
2999
3000 reg &= ~CTRL_EXT_I2C_ENA;
3001 if ((sc->sc_flags & WM_F_SGMII) != 0)
3002 reg |= CTRL_EXT_I2C_ENA;
3003 else
3004 reg &= ~CTRL_EXT_I2C_ENA;
3005 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3006 if ((sc->sc_flags & WM_F_SGMII) != 0) {
3007 if (!wm_sgmii_uses_mdio(sc))
3008 wm_gmii_setup_phytype(sc, 0, 0);
3009 wm_reset_mdicnfg_82580(sc);
3010 }
3011 } else if (sc->sc_type < WM_T_82543 ||
3012 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3013 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3014 aprint_error_dev(sc->sc_dev,
3015 "WARNING: TBIMODE set on 1000BASE-T product!\n");
3016 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3017 }
3018 } else {
3019 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3020 aprint_error_dev(sc->sc_dev,
3021 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3022 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3023 }
3024 }
3025
3026 if (sc->sc_type >= WM_T_PCH2)
3027 sc->sc_flags |= WM_F_EEE;
3028 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3029 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3030 /* XXX: Need special handling for I354. (not yet) */
3031 if (sc->sc_type != WM_T_I354)
3032 sc->sc_flags |= WM_F_EEE;
3033 }
3034
3035 /*
3036 * The I350 has a bug where it always strips the CRC whether
3037 * asked to or not. So ask for stripped CRC here and cope in rxeof
3038 */
3039 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3040 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3041 sc->sc_flags |= WM_F_CRC_STRIP;
3042
3043 /* Set device properties (macflags) */
3044 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3045
3046 if (sc->sc_flags != 0) {
3047 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3048 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3049 }
3050
3051 #ifdef WM_MPSAFE
3052 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3053 #else
3054 sc->sc_core_lock = NULL;
3055 #endif
3056
3057 /* Initialize the media structures accordingly. */
3058 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3059 wm_gmii_mediainit(sc, wmp->wmp_product);
3060 else
3061 wm_tbi_mediainit(sc); /* All others */
3062
3063 ifp = &sc->sc_ethercom.ec_if;
3064 xname = device_xname(sc->sc_dev);
3065 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3066 ifp->if_softc = sc;
3067 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3068 #ifdef WM_MPSAFE
3069 ifp->if_extflags = IFEF_MPSAFE;
3070 #endif
3071 ifp->if_ioctl = wm_ioctl;
3072 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3073 ifp->if_start = wm_nq_start;
3074 /*
3075 * When the number of CPUs is one and the controller can use
3076 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3077 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3078 * and the other is used for link status changing.
3079 * In this situation, wm_nq_transmit() is disadvantageous
3080 * because of wm_select_txqueue() and pcq(9) overhead.
3081 */
3082 if (wm_is_using_multiqueue(sc))
3083 ifp->if_transmit = wm_nq_transmit;
3084 } else {
3085 ifp->if_start = wm_start;
3086 /*
3087 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3088 * described above.
3089 */
3090 if (wm_is_using_multiqueue(sc))
3091 ifp->if_transmit = wm_transmit;
3092 }
3093 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3094 ifp->if_init = wm_init;
3095 ifp->if_stop = wm_stop;
3096 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3097 IFQ_SET_READY(&ifp->if_snd);
3098
3099 /* Check for jumbo frame */
3100 switch (sc->sc_type) {
3101 case WM_T_82573:
3102 /* XXX limited to 9234 if ASPM is disabled */
3103 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3104 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3105 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3106 break;
3107 case WM_T_82571:
3108 case WM_T_82572:
3109 case WM_T_82574:
3110 case WM_T_82583:
3111 case WM_T_82575:
3112 case WM_T_82576:
3113 case WM_T_82580:
3114 case WM_T_I350:
3115 case WM_T_I354:
3116 case WM_T_I210:
3117 case WM_T_I211:
3118 case WM_T_80003:
3119 case WM_T_ICH9:
3120 case WM_T_ICH10:
3121 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3122 case WM_T_PCH_LPT:
3123 case WM_T_PCH_SPT:
3124 case WM_T_PCH_CNP:
3125 /* XXX limited to 9234 */
3126 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3127 break;
3128 case WM_T_PCH:
3129 /* XXX limited to 4096 */
3130 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3131 break;
3132 case WM_T_82542_2_0:
3133 case WM_T_82542_2_1:
3134 case WM_T_ICH8:
3135 /* No support for jumbo frame */
3136 break;
3137 default:
3138 /* ETHER_MAX_LEN_JUMBO */
3139 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3140 break;
3141 }
3142
3143 /* If we're a i82543 or greater, we can support VLANs. */
3144 if (sc->sc_type >= WM_T_82543) {
3145 sc->sc_ethercom.ec_capabilities |=
3146 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3147 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3148 }
3149
3150 if ((sc->sc_flags & WM_F_EEE) != 0)
3151 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3152
3153 /*
3154 * We can perform TCPv4 and UDPv4 checksums in-bound. Only
3155 * on i82543 and later.
3156 */
3157 if (sc->sc_type >= WM_T_82543) {
3158 ifp->if_capabilities |=
3159 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3160 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3161 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3162 IFCAP_CSUM_TCPv6_Tx |
3163 IFCAP_CSUM_UDPv6_Tx;
3164 }
3165
3166 /*
3167 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3168 *
3169 * 82541GI (8086:1076) ... no
3170 * 82572EI (8086:10b9) ... yes
3171 */
3172 if (sc->sc_type >= WM_T_82571) {
3173 ifp->if_capabilities |=
3174 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3175 }
3176
3177 /*
3178 * If we're a i82544 or greater (except i82547), we can do
3179 * TCP segmentation offload.
3180 */
3181 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3182 ifp->if_capabilities |= IFCAP_TSOv4;
3183
3184 if (sc->sc_type >= WM_T_82571)
3185 ifp->if_capabilities |= IFCAP_TSOv6;
3186
3187 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3188 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3189 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3190 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3191
3192 /* Attach the interface. */
3193 if_initialize(ifp);
3194 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3195 ether_ifattach(ifp, enaddr);
3196 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3197 if_register(ifp);
3198 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3199 RND_FLAG_DEFAULT);
3200
3201 #ifdef WM_EVENT_COUNTERS
3202 /* Attach event counters. */
3203 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3204 NULL, xname, "linkintr");
3205
3206 if (sc->sc_type >= WM_T_82542_2_1) {
3207 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3208 NULL, xname, "tx_xoff");
3209 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3210 NULL, xname, "tx_xon");
3211 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3212 NULL, xname, "rx_xoff");
3213 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3214 NULL, xname, "rx_xon");
3215 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3216 NULL, xname, "rx_macctl");
3217 }
3218
3219 evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3220 NULL, xname, "CRC Error");
3221 evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3222 NULL, xname, "Symbol Error");
3223
3224 if (sc->sc_type >= WM_T_82543) {
3225 evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3226 NULL, xname, "Alignment Error");
3227 evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3228 NULL, xname, "Receive Error");
3229 evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
3230 NULL, xname, "Carrier Extension Error");
3231 }
3232
3233 evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3234 NULL, xname, "Missed Packets");
3235 evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3236 NULL, xname, "Collision");
3237 evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3238 NULL, xname, "Sequence Error");
3239 evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3240 NULL, xname, "Receive Length Error");
3241 evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3242 NULL, xname, "Single Collision");
3243 evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3244 NULL, xname, "Excessive Collisions");
3245 evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3246 NULL, xname, "Multiple Collision");
3247 evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3248 NULL, xname, "Late Collisions");
3249 evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3250 NULL, xname, "Defer");
3251 evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3252 NULL, xname, "Good Packets Rx");
3253 evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3254 NULL, xname, "Broadcast Packets Rx");
3255 evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3256 NULL, xname, "Multicast Packets Rx");
3257 evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3258 NULL, xname, "Good Packets Tx");
3259 evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3260 NULL, xname, "Good Octets Rx");
3261 evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3262 NULL, xname, "Good Octets Tx");
3263 evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3264 NULL, xname, "Rx No Buffers");
3265 evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3266 NULL, xname, "Rx Undersize");
3267 evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3268 NULL, xname, "Rx Fragment");
3269 evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3270 NULL, xname, "Rx Oversize");
3271 evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3272 NULL, xname, "Rx Jabber");
3273 evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3274 NULL, xname, "Total Octets Rx");
3275 evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3276 NULL, xname, "Total Octets Tx");
3277 evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3278 NULL, xname, "Total Packets Rx");
3279 evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3280 NULL, xname, "Total Packets Tx");
3281 evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3282 NULL, xname, "Multicast Packets Tx");
3283 evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3284 NULL, xname, "Broadcast Packets Tx Count");
3285 evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3286 NULL, xname, "Packets Rx (64 bytes)");
3287 evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3288 NULL, xname, "Packets Rx (65-127 bytes)");
3289 evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3290 NULL, xname, "Packets Rx (128-255 bytes)");
3291 evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3292 NULL, xname, "Packets Rx (255-511 bytes)");
3293 evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3294 NULL, xname, "Packets Rx (512-1023 bytes)");
3295 evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3296 NULL, xname, "Packets Rx (1024-1522 bytes)");
3297 evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3298 NULL, xname, "Packets Tx (64 bytes)");
3299 evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3300 NULL, xname, "Packets Tx (65-127 bytes)");
3301 evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3302 NULL, xname, "Packets Tx (128-255 bytes)");
3303 evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3304 NULL, xname, "Packets Tx (256-511 bytes)");
3305 evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3306 NULL, xname, "Packets Tx (512-1023 bytes)");
3307 evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3308 NULL, xname, "Packets Tx (1024-1522 Bytes)");
3309 evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3310 NULL, xname, "Interrupt Assertion");
3311 evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3312 NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3313 evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3314 NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3315 evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3316 NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3317 evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
3318 NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3319 evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3320 NULL, xname, "Intr. Cause Tx Queue Empty");
3321 evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3322 NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3323 evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
3324 NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3325 evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3326 NULL, xname, "Interrupt Cause Receiver Overrun");
3327 if (sc->sc_type >= WM_T_82543) {
3328 evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3329 NULL, xname, "Tx with No CRS");
3330 evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3331 NULL, xname, "TCP Segmentation Context Tx");
3332 evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
3333 NULL, xname, "TCP Segmentation Context Tx Fail");
3334 }
3335 if (sc->sc_type >= WM_T_82540) {
3336 evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3337 NULL, xname, "Management Packets RX");
3338 evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3339 NULL, xname, "Management Packets Dropped");
3340 evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3341 NULL, xname, "Management Packets TX");
3342 }
3343 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3344 evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3345 NULL, xname, "BMC2OS Packets received by host");
3346 evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3347 NULL, xname, "OS2BMC Packets transmitted by host");
3348 evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3349 NULL, xname, "BMC2OS Packets sent by BMC");
3350 evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3351 NULL, xname, "OS2BMC Packets received by BMC");
3352 }
3353 #endif /* WM_EVENT_COUNTERS */
3354
3355 sc->sc_txrx_use_workqueue = false;
3356
3357 if (wm_phy_need_linkdown_discard(sc)) {
3358 DPRINTF(sc, WM_DEBUG_LINK,
3359 ("%s: %s: Set linkdown discard flag\n",
3360 device_xname(sc->sc_dev), __func__));
3361 wm_set_linkdown_discard(sc);
3362 }
3363
3364 wm_init_sysctls(sc);
3365
3366 if (pmf_device_register(self, wm_suspend, wm_resume))
3367 pmf_class_network_register(self, ifp);
3368 else
3369 aprint_error_dev(self, "couldn't establish power handler\n");
3370
3371 sc->sc_flags |= WM_F_ATTACHED;
3372 out:
3373 return;
3374 }
3375
3376 /* The detach function (ca_detach) */
3377 static int
3378 wm_detach(device_t self, int flags __unused)
3379 {
3380 struct wm_softc *sc = device_private(self);
3381 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3382 int i;
3383
3384 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3385 return 0;
3386
3387 /* Stop the interface. Callouts are stopped in it. */
3388 wm_stop(ifp, 1);
3389
3390 pmf_device_deregister(self);
3391
3392 sysctl_teardown(&sc->sc_sysctllog);
3393
3394 #ifdef WM_EVENT_COUNTERS
3395 evcnt_detach(&sc->sc_ev_linkintr);
3396
3397 if (sc->sc_type >= WM_T_82542_2_1) {
3398 evcnt_detach(&sc->sc_ev_tx_xoff);
3399 evcnt_detach(&sc->sc_ev_tx_xon);
3400 evcnt_detach(&sc->sc_ev_rx_xoff);
3401 evcnt_detach(&sc->sc_ev_rx_xon);
3402 evcnt_detach(&sc->sc_ev_rx_macctl);
3403 }
3404
3405 evcnt_detach(&sc->sc_ev_crcerrs);
3406 evcnt_detach(&sc->sc_ev_symerrc);
3407
3408 if (sc->sc_type >= WM_T_82543) {
3409 evcnt_detach(&sc->sc_ev_algnerrc);
3410 evcnt_detach(&sc->sc_ev_rxerrc);
3411 evcnt_detach(&sc->sc_ev_cexterr);
3412 }
3413 evcnt_detach(&sc->sc_ev_mpc);
3414 evcnt_detach(&sc->sc_ev_colc);
3415 evcnt_detach(&sc->sc_ev_sec);
3416 evcnt_detach(&sc->sc_ev_rlec);
3417 evcnt_detach(&sc->sc_ev_scc);
3418 evcnt_detach(&sc->sc_ev_ecol);
3419 evcnt_detach(&sc->sc_ev_mcc);
3420 evcnt_detach(&sc->sc_ev_latecol);
3421 evcnt_detach(&sc->sc_ev_dc);
3422 evcnt_detach(&sc->sc_ev_gprc);
3423 evcnt_detach(&sc->sc_ev_bprc);
3424 evcnt_detach(&sc->sc_ev_mprc);
3425 evcnt_detach(&sc->sc_ev_gptc);
3426 evcnt_detach(&sc->sc_ev_gorc);
3427 evcnt_detach(&sc->sc_ev_gotc);
3428 evcnt_detach(&sc->sc_ev_rnbc);
3429 evcnt_detach(&sc->sc_ev_ruc);
3430 evcnt_detach(&sc->sc_ev_rfc);
3431 evcnt_detach(&sc->sc_ev_roc);
3432 evcnt_detach(&sc->sc_ev_rjc);
3433 evcnt_detach(&sc->sc_ev_tor);
3434 evcnt_detach(&sc->sc_ev_tot);
3435 evcnt_detach(&sc->sc_ev_tpr);
3436 evcnt_detach(&sc->sc_ev_tpt);
3437 evcnt_detach(&sc->sc_ev_mptc);
3438 evcnt_detach(&sc->sc_ev_bptc);
3439 evcnt_detach(&sc->sc_ev_prc64);
3440 evcnt_detach(&sc->sc_ev_prc127);
3441 evcnt_detach(&sc->sc_ev_prc255);
3442 evcnt_detach(&sc->sc_ev_prc511);
3443 evcnt_detach(&sc->sc_ev_prc1023);
3444 evcnt_detach(&sc->sc_ev_prc1522);
3445 evcnt_detach(&sc->sc_ev_ptc64);
3446 evcnt_detach(&sc->sc_ev_ptc127);
3447 evcnt_detach(&sc->sc_ev_ptc255);
3448 evcnt_detach(&sc->sc_ev_ptc511);
3449 evcnt_detach(&sc->sc_ev_ptc1023);
3450 evcnt_detach(&sc->sc_ev_ptc1522);
3451 evcnt_detach(&sc->sc_ev_iac);
3452 evcnt_detach(&sc->sc_ev_icrxptc);
3453 evcnt_detach(&sc->sc_ev_icrxatc);
3454 evcnt_detach(&sc->sc_ev_ictxptc);
3455 evcnt_detach(&sc->sc_ev_ictxact);
3456 evcnt_detach(&sc->sc_ev_ictxqec);
3457 evcnt_detach(&sc->sc_ev_ictxqmtc);
3458 evcnt_detach(&sc->sc_ev_icrxdmtc);
3459 evcnt_detach(&sc->sc_ev_icrxoc);
3460 if (sc->sc_type >= WM_T_82543) {
3461 evcnt_detach(&sc->sc_ev_tncrs);
3462 evcnt_detach(&sc->sc_ev_tsctc);
3463 evcnt_detach(&sc->sc_ev_tsctfc);
3464 }
3465 if (sc->sc_type >= WM_T_82540) {
3466 evcnt_detach(&sc->sc_ev_mgtprc);
3467 evcnt_detach(&sc->sc_ev_mgtpdc);
3468 evcnt_detach(&sc->sc_ev_mgtptc);
3469 }
3470 if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3471 evcnt_detach(&sc->sc_ev_b2ogprc);
3472 evcnt_detach(&sc->sc_ev_o2bspc);
3473 evcnt_detach(&sc->sc_ev_b2ospc);
3474 evcnt_detach(&sc->sc_ev_o2bgptc);
3475 }
3476 #endif /* WM_EVENT_COUNTERS */
3477
3478 rnd_detach_source(&sc->rnd_source);
3479
3480 /* Tell the firmware about the release */
3481 WM_CORE_LOCK(sc);
3482 wm_release_manageability(sc);
3483 wm_release_hw_control(sc);
3484 wm_enable_wakeup(sc);
3485 WM_CORE_UNLOCK(sc);
3486
3487 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3488
3489 ether_ifdetach(ifp);
3490 if_detach(ifp);
3491 if_percpuq_destroy(sc->sc_ipq);
3492
3493 /* Delete all remaining media. */
3494 ifmedia_fini(&sc->sc_mii.mii_media);
3495
3496 /* Unload RX dmamaps and free mbufs */
3497 for (i = 0; i < sc->sc_nqueues; i++) {
3498 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3499 mutex_enter(rxq->rxq_lock);
3500 wm_rxdrain(rxq);
3501 mutex_exit(rxq->rxq_lock);
3502 }
3503 /* Must unlock here */
3504
3505 /* Disestablish the interrupt handler */
3506 for (i = 0; i < sc->sc_nintrs; i++) {
3507 if (sc->sc_ihs[i] != NULL) {
3508 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3509 sc->sc_ihs[i] = NULL;
3510 }
3511 }
3512 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3513
3514 /* wm_stop() ensured that the workqueue is stopped. */
3515 workqueue_destroy(sc->sc_queue_wq);
3516
3517 for (i = 0; i < sc->sc_nqueues; i++)
3518 softint_disestablish(sc->sc_queue[i].wmq_si);
3519
3520 wm_free_txrx_queues(sc);
3521
3522 /* Unmap the registers */
3523 if (sc->sc_ss) {
3524 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3525 sc->sc_ss = 0;
3526 }
3527 if (sc->sc_ios) {
3528 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3529 sc->sc_ios = 0;
3530 }
3531 if (sc->sc_flashs) {
3532 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3533 sc->sc_flashs = 0;
3534 }
3535
3536 if (sc->sc_core_lock)
3537 mutex_obj_free(sc->sc_core_lock);
3538 if (sc->sc_ich_phymtx)
3539 mutex_obj_free(sc->sc_ich_phymtx);
3540 if (sc->sc_ich_nvmmtx)
3541 mutex_obj_free(sc->sc_ich_nvmmtx);
3542
3543 return 0;
3544 }
3545
3546 static bool
3547 wm_suspend(device_t self, const pmf_qual_t *qual)
3548 {
3549 struct wm_softc *sc = device_private(self);
3550
3551 wm_release_manageability(sc);
3552 wm_release_hw_control(sc);
3553 wm_enable_wakeup(sc);
3554
3555 return true;
3556 }
3557
3558 static bool
3559 wm_resume(device_t self, const pmf_qual_t *qual)
3560 {
3561 struct wm_softc *sc = device_private(self);
3562 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3563 pcireg_t reg;
3564 char buf[256];
3565
3566 reg = CSR_READ(sc, WMREG_WUS);
3567 if (reg != 0) {
3568 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3569 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3570 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3571 }
3572
3573 if (sc->sc_type >= WM_T_PCH2)
3574 wm_resume_workarounds_pchlan(sc);
3575 if ((ifp->if_flags & IFF_UP) == 0) {
3576 /* >= PCH_SPT hardware workaround before reset. */
3577 if (sc->sc_type >= WM_T_PCH_SPT)
3578 wm_flush_desc_rings(sc);
3579
3580 wm_reset(sc);
3581 /* Non-AMT based hardware can now take control from firmware */
3582 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3583 wm_get_hw_control(sc);
3584 wm_init_manageability(sc);
3585 } else {
3586 /*
3587 * We called pmf_class_network_register(), so if_init() is
3588 * automatically called when IFF_UP. wm_reset(),
3589 * wm_get_hw_control() and wm_init_manageability() are called
3590 * via wm_init().
3591 */
3592 }
3593
3594 return true;
3595 }
3596
3597 /*
3598 * wm_watchdog: [ifnet interface function]
3599 *
3600 * Watchdog timer handler.
3601 */
3602 static void
3603 wm_watchdog(struct ifnet *ifp)
3604 {
3605 int qid;
3606 struct wm_softc *sc = ifp->if_softc;
3607 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3608
3609 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3610 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3611
3612 wm_watchdog_txq(ifp, txq, &hang_queue);
3613 }
3614
3615 /* IF any of queues hanged up, reset the interface. */
3616 if (hang_queue != 0) {
3617 (void)wm_init(ifp);
3618
3619 /*
3620 * There are still some upper layer processing which call
3621 * ifp->if_start(). e.g. ALTQ or one CPU system
3622 */
3623 /* Try to get more packets going. */
3624 ifp->if_start(ifp);
3625 }
3626 }
3627
3628
3629 static void
3630 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3631 {
3632
3633 mutex_enter(txq->txq_lock);
3634 if (txq->txq_sending &&
3635 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3636 wm_watchdog_txq_locked(ifp, txq, hang);
3637
3638 mutex_exit(txq->txq_lock);
3639 }
3640
3641 static void
3642 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3643 uint16_t *hang)
3644 {
3645 struct wm_softc *sc = ifp->if_softc;
3646 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3647
3648 KASSERT(mutex_owned(txq->txq_lock));
3649
3650 /*
3651 * Since we're using delayed interrupts, sweep up
3652 * before we report an error.
3653 */
3654 wm_txeof(txq, UINT_MAX);
3655
3656 if (txq->txq_sending)
3657 *hang |= __BIT(wmq->wmq_id);
3658
3659 if (txq->txq_free == WM_NTXDESC(txq)) {
3660 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3661 device_xname(sc->sc_dev));
3662 } else {
3663 #ifdef WM_DEBUG
3664 int i, j;
3665 struct wm_txsoft *txs;
3666 #endif
3667 log(LOG_ERR,
3668 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3669 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3670 txq->txq_next);
3671 if_statinc(ifp, if_oerrors);
3672 #ifdef WM_DEBUG
3673 for (i = txq->txq_sdirty; i != txq->txq_snext;
3674 i = WM_NEXTTXS(txq, i)) {
3675 txs = &txq->txq_soft[i];
3676 printf("txs %d tx %d -> %d\n",
3677 i, txs->txs_firstdesc, txs->txs_lastdesc);
3678 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3679 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3680 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3681 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3682 printf("\t %#08x%08x\n",
3683 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3684 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3685 } else {
3686 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3687 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3688 txq->txq_descs[j].wtx_addr.wa_low);
3689 printf("\t %#04x%02x%02x%08x\n",
3690 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3691 txq->txq_descs[j].wtx_fields.wtxu_options,
3692 txq->txq_descs[j].wtx_fields.wtxu_status,
3693 txq->txq_descs[j].wtx_cmdlen);
3694 }
3695 if (j == txs->txs_lastdesc)
3696 break;
3697 }
3698 }
3699 #endif
3700 }
3701 }
3702
3703 /*
3704 * wm_tick:
3705 *
3706 * One second timer, used to check link status, sweep up
3707 * completed transmit jobs, etc.
3708 */
3709 static void
3710 wm_tick(void *arg)
3711 {
3712 struct wm_softc *sc = arg;
3713 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3714 uint64_t crcerrs, algnerrc, symerrc, mpc, colc, sec, rlec, rxerrc,
3715 cexterr;
3716 #ifndef WM_MPSAFE
3717 int s = splnet();
3718 #endif
3719
3720 WM_CORE_LOCK(sc);
3721
3722 if (sc->sc_core_stopping) {
3723 WM_CORE_UNLOCK(sc);
3724 #ifndef WM_MPSAFE
3725 splx(s);
3726 #endif
3727 return;
3728 }
3729
3730 crcerrs = CSR_READ(sc, WMREG_CRCERRS);
3731 symerrc = CSR_READ(sc, WMREG_SYMERRC);
3732 mpc = CSR_READ(sc, WMREG_MPC);
3733 colc = CSR_READ(sc, WMREG_COLC);
3734 sec = CSR_READ(sc, WMREG_SEC);
3735 rlec = CSR_READ(sc, WMREG_RLEC);
3736
3737 WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
3738 WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
3739 WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
3740 WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
3741 WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
3742 WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
3743
3744 if (sc->sc_type >= WM_T_82542_2_1) {
3745 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3746 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3747 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3748 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3749 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3750 }
3751 WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
3752 WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
3753 WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
3754 WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
3755 WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
3756 WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
3757 WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
3758 WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
3759 WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
3760
3761 WM_EVCNT_ADD(&sc->sc_ev_gorc,
3762 CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
3763 WM_EVCNT_ADD(&sc->sc_ev_gotc,
3764 CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
3765
3766 WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
3767 WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
3768 WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
3769 WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
3770 WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
3771
3772 WM_EVCNT_ADD(&sc->sc_ev_tor,
3773 CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
3774 WM_EVCNT_ADD(&sc->sc_ev_tot,
3775 CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
3776
3777 WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
3778 WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
3779 WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
3780 WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
3781 WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
3782 WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
3783 WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
3784 WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
3785 WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
3786 WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
3787 WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
3788 WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
3789 WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
3790 WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
3791 WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
3792 WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
3793 WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
3794 WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
3795 WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
3796 WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
3797 WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
3798 WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
3799 WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
3800 WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
3801 WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
3802
3803 if (sc->sc_type >= WM_T_82543) {
3804 algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
3805 rxerrc = CSR_READ(sc, WMREG_RXERRC);
3806 cexterr = CSR_READ(sc, WMREG_CEXTERR);
3807 WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
3808 WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
3809 WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
3810
3811 WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
3812 WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
3813 WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
3814 } else
3815 algnerrc = rxerrc = cexterr = 0;
3816
3817 if (sc->sc_type >= WM_T_82540) {
3818 WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
3819 WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
3820 WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
3821 }
3822 if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
3823 && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
3824 WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
3825 WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
3826 WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
3827 WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
3828 }
3829 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3830 if_statadd_ref(nsr, if_collisions, colc);
3831 if_statadd_ref(nsr, if_ierrors,
3832 crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
3833 /*
3834 * WMREG_RNBC is incremented when there are no available buffers in host
3835 * memory. It does not mean the number of dropped packets, because an
3836 * Ethernet controller can receive packets in such case if there is
3837 * space in the phy's FIFO.
3838 *
3839 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3840 * own EVCNT instead of if_iqdrops.
3841 */
3842 if_statadd_ref(nsr, if_iqdrops, mpc);
3843 IF_STAT_PUTREF(ifp);
3844
3845 if (sc->sc_flags & WM_F_HAS_MII)
3846 mii_tick(&sc->sc_mii);
3847 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3848 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3849 wm_serdes_tick(sc);
3850 else
3851 wm_tbi_tick(sc);
3852
3853 WM_CORE_UNLOCK(sc);
3854 #ifndef WM_MPSAFE
3855 splx(s);
3856 #endif
3857
3858 wm_watchdog(ifp);
3859
3860 callout_schedule(&sc->sc_tick_ch, hz);
3861 }
3862
3863 static int
3864 wm_ifflags_cb(struct ethercom *ec)
3865 {
3866 struct ifnet *ifp = &ec->ec_if;
3867 struct wm_softc *sc = ifp->if_softc;
3868 u_short iffchange;
3869 int ecchange;
3870 bool needreset = false;
3871 int rc = 0;
3872
3873 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3874 device_xname(sc->sc_dev), __func__));
3875
3876 KASSERT(IFNET_LOCKED(ifp));
3877 WM_CORE_LOCK(sc);
3878
3879 /*
3880 * Check for if_flags.
3881 * Main usage is to prevent linkdown when opening bpf.
3882 */
3883 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3884 sc->sc_if_flags = ifp->if_flags;
3885 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3886 needreset = true;
3887 goto ec;
3888 }
3889
3890 /* iff related updates */
3891 if ((iffchange & IFF_PROMISC) != 0)
3892 wm_set_filter(sc);
3893
3894 wm_set_vlan(sc);
3895
3896 ec:
3897 /* Check for ec_capenable. */
3898 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3899 sc->sc_ec_capenable = ec->ec_capenable;
3900 if ((ecchange & ~ETHERCAP_EEE) != 0) {
3901 needreset = true;
3902 goto out;
3903 }
3904
3905 /* ec related updates */
3906 wm_set_eee(sc);
3907
3908 out:
3909 if (needreset)
3910 rc = ENETRESET;
3911 WM_CORE_UNLOCK(sc);
3912
3913 return rc;
3914 }
3915
3916 static bool
3917 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3918 {
3919
3920 switch (sc->sc_phytype) {
3921 case WMPHY_82577: /* ihphy */
3922 case WMPHY_82578: /* atphy */
3923 case WMPHY_82579: /* ihphy */
3924 case WMPHY_I217: /* ihphy */
3925 case WMPHY_82580: /* ihphy */
3926 case WMPHY_I350: /* ihphy */
3927 return true;
3928 default:
3929 return false;
3930 }
3931 }
3932
3933 static void
3934 wm_set_linkdown_discard(struct wm_softc *sc)
3935 {
3936
3937 for (int i = 0; i < sc->sc_nqueues; i++) {
3938 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3939
3940 mutex_enter(txq->txq_lock);
3941 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3942 mutex_exit(txq->txq_lock);
3943 }
3944 }
3945
3946 static void
3947 wm_clear_linkdown_discard(struct wm_softc *sc)
3948 {
3949
3950 for (int i = 0; i < sc->sc_nqueues; i++) {
3951 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3952
3953 mutex_enter(txq->txq_lock);
3954 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3955 mutex_exit(txq->txq_lock);
3956 }
3957 }
3958
3959 /*
3960 * wm_ioctl: [ifnet interface function]
3961 *
3962 * Handle control requests from the operator.
3963 */
3964 static int
3965 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3966 {
3967 struct wm_softc *sc = ifp->if_softc;
3968 struct ifreq *ifr = (struct ifreq *)data;
3969 struct ifaddr *ifa = (struct ifaddr *)data;
3970 struct sockaddr_dl *sdl;
3971 int error;
3972
3973 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3974 device_xname(sc->sc_dev), __func__));
3975
3976 switch (cmd) {
3977 case SIOCADDMULTI:
3978 case SIOCDELMULTI:
3979 break;
3980 default:
3981 KASSERT(IFNET_LOCKED(ifp));
3982 }
3983
3984 #ifndef WM_MPSAFE
3985 const int s = splnet();
3986 #endif
3987 switch (cmd) {
3988 case SIOCSIFMEDIA:
3989 WM_CORE_LOCK(sc);
3990 /* Flow control requires full-duplex mode. */
3991 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3992 (ifr->ifr_media & IFM_FDX) == 0)
3993 ifr->ifr_media &= ~IFM_ETH_FMASK;
3994 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3995 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3996 /* We can do both TXPAUSE and RXPAUSE. */
3997 ifr->ifr_media |=
3998 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3999 }
4000 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4001 }
4002 WM_CORE_UNLOCK(sc);
4003 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4004 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4005 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4006 DPRINTF(sc, WM_DEBUG_LINK,
4007 ("%s: %s: Set linkdown discard flag\n",
4008 device_xname(sc->sc_dev), __func__));
4009 wm_set_linkdown_discard(sc);
4010 }
4011 }
4012 break;
4013 case SIOCINITIFADDR:
4014 WM_CORE_LOCK(sc);
4015 if (ifa->ifa_addr->sa_family == AF_LINK) {
4016 sdl = satosdl(ifp->if_dl->ifa_addr);
4017 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4018 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4019 /* Unicast address is the first multicast entry */
4020 wm_set_filter(sc);
4021 error = 0;
4022 WM_CORE_UNLOCK(sc);
4023 break;
4024 }
4025 WM_CORE_UNLOCK(sc);
4026 /*FALLTHROUGH*/
4027 default:
4028 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4029 if (((ifp->if_flags & IFF_UP) != 0) &&
4030 ((ifr->ifr_flags & IFF_UP) == 0)) {
4031 DPRINTF(sc, WM_DEBUG_LINK,
4032 ("%s: %s: Set linkdown discard flag\n",
4033 device_xname(sc->sc_dev), __func__));
4034 wm_set_linkdown_discard(sc);
4035 }
4036 }
4037 #ifdef WM_MPSAFE
4038 const int s = splnet();
4039 #endif
4040 /* It may call wm_start, so unlock here */
4041 error = ether_ioctl(ifp, cmd, data);
4042 #ifdef WM_MPSAFE
4043 splx(s);
4044 #endif
4045 if (error != ENETRESET)
4046 break;
4047
4048 error = 0;
4049
4050 if (cmd == SIOCSIFCAP)
4051 error = if_init(ifp);
4052 else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4053 WM_CORE_LOCK(sc);
4054 if (sc->sc_if_flags & IFF_RUNNING) {
4055 /*
4056 * Multicast list has changed; set the hardware filter
4057 * accordingly.
4058 */
4059 wm_set_filter(sc);
4060 }
4061 WM_CORE_UNLOCK(sc);
4062 }
4063 break;
4064 }
4065
4066 #ifndef WM_MPSAFE
4067 splx(s);
4068 #endif
4069 return error;
4070 }
4071
4072 /* MAC address related */
4073
4074 /*
4075 * Get the offset of MAC address and return it.
4076 * If error occured, use offset 0.
4077 */
4078 static uint16_t
4079 wm_check_alt_mac_addr(struct wm_softc *sc)
4080 {
4081 uint16_t myea[ETHER_ADDR_LEN / 2];
4082 uint16_t offset = NVM_OFF_MACADDR;
4083
4084 /* Try to read alternative MAC address pointer */
4085 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4086 return 0;
4087
4088 /* Check pointer if it's valid or not. */
4089 if ((offset == 0x0000) || (offset == 0xffff))
4090 return 0;
4091
4092 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4093 /*
4094 * Check whether alternative MAC address is valid or not.
4095 * Some cards have non 0xffff pointer but those don't use
4096 * alternative MAC address in reality.
4097 *
4098 * Check whether the broadcast bit is set or not.
4099 */
4100 if (wm_nvm_read(sc, offset, 1, myea) == 0)
4101 if (((myea[0] & 0xff) & 0x01) == 0)
4102 return offset; /* Found */
4103
4104 /* Not found */
4105 return 0;
4106 }
4107
4108 static int
4109 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4110 {
4111 uint16_t myea[ETHER_ADDR_LEN / 2];
4112 uint16_t offset = NVM_OFF_MACADDR;
4113 int do_invert = 0;
4114
4115 switch (sc->sc_type) {
4116 case WM_T_82580:
4117 case WM_T_I350:
4118 case WM_T_I354:
4119 /* EEPROM Top Level Partitioning */
4120 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4121 break;
4122 case WM_T_82571:
4123 case WM_T_82575:
4124 case WM_T_82576:
4125 case WM_T_80003:
4126 case WM_T_I210:
4127 case WM_T_I211:
4128 offset = wm_check_alt_mac_addr(sc);
4129 if (offset == 0)
4130 if ((sc->sc_funcid & 0x01) == 1)
4131 do_invert = 1;
4132 break;
4133 default:
4134 if ((sc->sc_funcid & 0x01) == 1)
4135 do_invert = 1;
4136 break;
4137 }
4138
4139 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4140 goto bad;
4141
4142 enaddr[0] = myea[0] & 0xff;
4143 enaddr[1] = myea[0] >> 8;
4144 enaddr[2] = myea[1] & 0xff;
4145 enaddr[3] = myea[1] >> 8;
4146 enaddr[4] = myea[2] & 0xff;
4147 enaddr[5] = myea[2] >> 8;
4148
4149 /*
4150 * Toggle the LSB of the MAC address on the second port
4151 * of some dual port cards.
4152 */
4153 if (do_invert != 0)
4154 enaddr[5] ^= 1;
4155
4156 return 0;
4157
4158 bad:
4159 return -1;
4160 }
4161
4162 /*
4163 * wm_set_ral:
4164 *
4165 * Set an entery in the receive address list.
4166 */
4167 static void
4168 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4169 {
4170 uint32_t ral_lo, ral_hi, addrl, addrh;
4171 uint32_t wlock_mac;
4172 int rv;
4173
4174 if (enaddr != NULL) {
4175 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4176 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4177 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4178 ral_hi |= RAL_AV;
4179 } else {
4180 ral_lo = 0;
4181 ral_hi = 0;
4182 }
4183
4184 switch (sc->sc_type) {
4185 case WM_T_82542_2_0:
4186 case WM_T_82542_2_1:
4187 case WM_T_82543:
4188 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4189 CSR_WRITE_FLUSH(sc);
4190 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4191 CSR_WRITE_FLUSH(sc);
4192 break;
4193 case WM_T_PCH2:
4194 case WM_T_PCH_LPT:
4195 case WM_T_PCH_SPT:
4196 case WM_T_PCH_CNP:
4197 if (idx == 0) {
4198 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4199 CSR_WRITE_FLUSH(sc);
4200 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4201 CSR_WRITE_FLUSH(sc);
4202 return;
4203 }
4204 if (sc->sc_type != WM_T_PCH2) {
4205 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4206 FWSM_WLOCK_MAC);
4207 addrl = WMREG_SHRAL(idx - 1);
4208 addrh = WMREG_SHRAH(idx - 1);
4209 } else {
4210 wlock_mac = 0;
4211 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4212 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4213 }
4214
4215 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4216 rv = wm_get_swflag_ich8lan(sc);
4217 if (rv != 0)
4218 return;
4219 CSR_WRITE(sc, addrl, ral_lo);
4220 CSR_WRITE_FLUSH(sc);
4221 CSR_WRITE(sc, addrh, ral_hi);
4222 CSR_WRITE_FLUSH(sc);
4223 wm_put_swflag_ich8lan(sc);
4224 }
4225
4226 break;
4227 default:
4228 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4229 CSR_WRITE_FLUSH(sc);
4230 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4231 CSR_WRITE_FLUSH(sc);
4232 break;
4233 }
4234 }
4235
4236 /*
4237 * wm_mchash:
4238 *
4239 * Compute the hash of the multicast address for the 4096-bit
4240 * multicast filter.
4241 */
4242 static uint32_t
4243 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4244 {
4245 static const int lo_shift[4] = { 4, 3, 2, 0 };
4246 static const int hi_shift[4] = { 4, 5, 6, 8 };
4247 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4248 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4249 uint32_t hash;
4250
4251 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4252 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4253 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4254 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4255 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4256 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4257 return (hash & 0x3ff);
4258 }
4259 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4260 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4261
4262 return (hash & 0xfff);
4263 }
4264
4265 /*
4266 *
4267 *
4268 */
4269 static int
4270 wm_rar_count(struct wm_softc *sc)
4271 {
4272 int size;
4273
4274 switch (sc->sc_type) {
4275 case WM_T_ICH8:
4276 size = WM_RAL_TABSIZE_ICH8 -1;
4277 break;
4278 case WM_T_ICH9:
4279 case WM_T_ICH10:
4280 case WM_T_PCH:
4281 size = WM_RAL_TABSIZE_ICH8;
4282 break;
4283 case WM_T_PCH2:
4284 size = WM_RAL_TABSIZE_PCH2;
4285 break;
4286 case WM_T_PCH_LPT:
4287 case WM_T_PCH_SPT:
4288 case WM_T_PCH_CNP:
4289 size = WM_RAL_TABSIZE_PCH_LPT;
4290 break;
4291 case WM_T_82575:
4292 case WM_T_I210:
4293 case WM_T_I211:
4294 size = WM_RAL_TABSIZE_82575;
4295 break;
4296 case WM_T_82576:
4297 case WM_T_82580:
4298 size = WM_RAL_TABSIZE_82576;
4299 break;
4300 case WM_T_I350:
4301 case WM_T_I354:
4302 size = WM_RAL_TABSIZE_I350;
4303 break;
4304 default:
4305 size = WM_RAL_TABSIZE;
4306 }
4307
4308 return size;
4309 }
4310
4311 /*
4312 * wm_set_filter:
4313 *
4314 * Set up the receive filter.
4315 */
4316 static void
4317 wm_set_filter(struct wm_softc *sc)
4318 {
4319 struct ethercom *ec = &sc->sc_ethercom;
4320 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4321 struct ether_multi *enm;
4322 struct ether_multistep step;
4323 bus_addr_t mta_reg;
4324 uint32_t hash, reg, bit;
4325 int i, size, ralmax, rv;
4326
4327 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4328 device_xname(sc->sc_dev), __func__));
4329
4330 if (sc->sc_type >= WM_T_82544)
4331 mta_reg = WMREG_CORDOVA_MTA;
4332 else
4333 mta_reg = WMREG_MTA;
4334
4335 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4336
4337 if (ifp->if_flags & IFF_BROADCAST)
4338 sc->sc_rctl |= RCTL_BAM;
4339 if (ifp->if_flags & IFF_PROMISC) {
4340 sc->sc_rctl |= RCTL_UPE;
4341 ETHER_LOCK(ec);
4342 ec->ec_flags |= ETHER_F_ALLMULTI;
4343 ETHER_UNLOCK(ec);
4344 goto allmulti;
4345 }
4346
4347 /*
4348 * Set the station address in the first RAL slot, and
4349 * clear the remaining slots.
4350 */
4351 size = wm_rar_count(sc);
4352 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4353
4354 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
4355 || (sc->sc_type == WM_T_PCH_CNP)) {
4356 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4357 switch (i) {
4358 case 0:
4359 /* We can use all entries */
4360 ralmax = size;
4361 break;
4362 case 1:
4363 /* Only RAR[0] */
4364 ralmax = 1;
4365 break;
4366 default:
4367 /* Available SHRA + RAR[0] */
4368 ralmax = i + 1;
4369 }
4370 } else
4371 ralmax = size;
4372 for (i = 1; i < size; i++) {
4373 if (i < ralmax)
4374 wm_set_ral(sc, NULL, i);
4375 }
4376
4377 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4378 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4379 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4380 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4381 size = WM_ICH8_MC_TABSIZE;
4382 else
4383 size = WM_MC_TABSIZE;
4384 /* Clear out the multicast table. */
4385 for (i = 0; i < size; i++) {
4386 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4387 CSR_WRITE_FLUSH(sc);
4388 }
4389
4390 ETHER_LOCK(ec);
4391 ETHER_FIRST_MULTI(step, ec, enm);
4392 while (enm != NULL) {
4393 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4394 ec->ec_flags |= ETHER_F_ALLMULTI;
4395 ETHER_UNLOCK(ec);
4396 /*
4397 * We must listen to a range of multicast addresses.
4398 * For now, just accept all multicasts, rather than
4399 * trying to set only those filter bits needed to match
4400 * the range. (At this time, the only use of address
4401 * ranges is for IP multicast routing, for which the
4402 * range is big enough to require all bits set.)
4403 */
4404 goto allmulti;
4405 }
4406
4407 hash = wm_mchash(sc, enm->enm_addrlo);
4408
4409 reg = (hash >> 5);
4410 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4411 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4412 || (sc->sc_type == WM_T_PCH2)
4413 || (sc->sc_type == WM_T_PCH_LPT)
4414 || (sc->sc_type == WM_T_PCH_SPT)
4415 || (sc->sc_type == WM_T_PCH_CNP))
4416 reg &= 0x1f;
4417 else
4418 reg &= 0x7f;
4419 bit = hash & 0x1f;
4420
4421 hash = CSR_READ(sc, mta_reg + (reg << 2));
4422 hash |= 1U << bit;
4423
4424 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4425 /*
4426 * 82544 Errata 9: Certain register cannot be written
4427 * with particular alignments in PCI-X bus operation
4428 * (FCAH, MTA and VFTA).
4429 */
4430 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4431 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4432 CSR_WRITE_FLUSH(sc);
4433 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4434 CSR_WRITE_FLUSH(sc);
4435 } else {
4436 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4437 CSR_WRITE_FLUSH(sc);
4438 }
4439
4440 ETHER_NEXT_MULTI(step, enm);
4441 }
4442 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4443 ETHER_UNLOCK(ec);
4444
4445 goto setit;
4446
4447 allmulti:
4448 sc->sc_rctl |= RCTL_MPE;
4449
4450 setit:
4451 if (sc->sc_type >= WM_T_PCH2) {
4452 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4453 && (ifp->if_mtu > ETHERMTU))
4454 rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4455 else
4456 rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4457 if (rv != 0)
4458 device_printf(sc->sc_dev,
4459 "Failed to do workaround for jumbo frame.\n");
4460 }
4461
4462 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4463 }
4464
4465 /* Reset and init related */
4466
4467 static void
4468 wm_set_vlan(struct wm_softc *sc)
4469 {
4470
4471 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4472 device_xname(sc->sc_dev), __func__));
4473
4474 /* Deal with VLAN enables. */
4475 if (VLAN_ATTACHED(&sc->sc_ethercom))
4476 sc->sc_ctrl |= CTRL_VME;
4477 else
4478 sc->sc_ctrl &= ~CTRL_VME;
4479
4480 /* Write the control registers. */
4481 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4482 }
4483
4484 static void
4485 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4486 {
4487 uint32_t gcr;
4488 pcireg_t ctrl2;
4489
4490 gcr = CSR_READ(sc, WMREG_GCR);
4491
4492 /* Only take action if timeout value is defaulted to 0 */
4493 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4494 goto out;
4495
4496 if ((gcr & GCR_CAP_VER2) == 0) {
4497 gcr |= GCR_CMPL_TMOUT_10MS;
4498 goto out;
4499 }
4500
4501 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4502 sc->sc_pcixe_capoff + PCIE_DCSR2);
4503 ctrl2 |= WM_PCIE_DCSR2_16MS;
4504 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4505 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4506
4507 out:
4508 /* Disable completion timeout resend */
4509 gcr &= ~GCR_CMPL_TMOUT_RESEND;
4510
4511 CSR_WRITE(sc, WMREG_GCR, gcr);
4512 }
4513
4514 void
4515 wm_get_auto_rd_done(struct wm_softc *sc)
4516 {
4517 int i;
4518
4519 /* wait for eeprom to reload */
4520 switch (sc->sc_type) {
4521 case WM_T_82571:
4522 case WM_T_82572:
4523 case WM_T_82573:
4524 case WM_T_82574:
4525 case WM_T_82583:
4526 case WM_T_82575:
4527 case WM_T_82576:
4528 case WM_T_82580:
4529 case WM_T_I350:
4530 case WM_T_I354:
4531 case WM_T_I210:
4532 case WM_T_I211:
4533 case WM_T_80003:
4534 case WM_T_ICH8:
4535 case WM_T_ICH9:
4536 for (i = 0; i < 10; i++) {
4537 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4538 break;
4539 delay(1000);
4540 }
4541 if (i == 10) {
4542 log(LOG_ERR, "%s: auto read from eeprom failed to "
4543 "complete\n", device_xname(sc->sc_dev));
4544 }
4545 break;
4546 default:
4547 break;
4548 }
4549 }
4550
4551 void
4552 wm_lan_init_done(struct wm_softc *sc)
4553 {
4554 uint32_t reg = 0;
4555 int i;
4556
4557 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4558 device_xname(sc->sc_dev), __func__));
4559
4560 /* Wait for eeprom to reload */
4561 switch (sc->sc_type) {
4562 case WM_T_ICH10:
4563 case WM_T_PCH:
4564 case WM_T_PCH2:
4565 case WM_T_PCH_LPT:
4566 case WM_T_PCH_SPT:
4567 case WM_T_PCH_CNP:
4568 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4569 reg = CSR_READ(sc, WMREG_STATUS);
4570 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4571 break;
4572 delay(100);
4573 }
4574 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4575 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4576 "complete\n", device_xname(sc->sc_dev), __func__);
4577 }
4578 break;
4579 default:
4580 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4581 __func__);
4582 break;
4583 }
4584
4585 reg &= ~STATUS_LAN_INIT_DONE;
4586 CSR_WRITE(sc, WMREG_STATUS, reg);
4587 }
4588
4589 void
4590 wm_get_cfg_done(struct wm_softc *sc)
4591 {
4592 int mask;
4593 uint32_t reg;
4594 int i;
4595
4596 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4597 device_xname(sc->sc_dev), __func__));
4598
4599 /* Wait for eeprom to reload */
4600 switch (sc->sc_type) {
4601 case WM_T_82542_2_0:
4602 case WM_T_82542_2_1:
4603 /* null */
4604 break;
4605 case WM_T_82543:
4606 case WM_T_82544:
4607 case WM_T_82540:
4608 case WM_T_82545:
4609 case WM_T_82545_3:
4610 case WM_T_82546:
4611 case WM_T_82546_3:
4612 case WM_T_82541:
4613 case WM_T_82541_2:
4614 case WM_T_82547:
4615 case WM_T_82547_2:
4616 case WM_T_82573:
4617 case WM_T_82574:
4618 case WM_T_82583:
4619 /* generic */
4620 delay(10*1000);
4621 break;
4622 case WM_T_80003:
4623 case WM_T_82571:
4624 case WM_T_82572:
4625 case WM_T_82575:
4626 case WM_T_82576:
4627 case WM_T_82580:
4628 case WM_T_I350:
4629 case WM_T_I354:
4630 case WM_T_I210:
4631 case WM_T_I211:
4632 if (sc->sc_type == WM_T_82571) {
4633 /* Only 82571 shares port 0 */
4634 mask = EEMNGCTL_CFGDONE_0;
4635 } else
4636 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4637 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4638 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4639 break;
4640 delay(1000);
4641 }
4642 if (i >= WM_PHY_CFG_TIMEOUT)
4643 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4644 device_xname(sc->sc_dev), __func__));
4645 break;
4646 case WM_T_ICH8:
4647 case WM_T_ICH9:
4648 case WM_T_ICH10:
4649 case WM_T_PCH:
4650 case WM_T_PCH2:
4651 case WM_T_PCH_LPT:
4652 case WM_T_PCH_SPT:
4653 case WM_T_PCH_CNP:
4654 delay(10*1000);
4655 if (sc->sc_type >= WM_T_ICH10)
4656 wm_lan_init_done(sc);
4657 else
4658 wm_get_auto_rd_done(sc);
4659
4660 /* Clear PHY Reset Asserted bit */
4661 reg = CSR_READ(sc, WMREG_STATUS);
4662 if ((reg & STATUS_PHYRA) != 0)
4663 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4664 break;
4665 default:
4666 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4667 __func__);
4668 break;
4669 }
4670 }
4671
4672 int
4673 wm_phy_post_reset(struct wm_softc *sc)
4674 {
4675 device_t dev = sc->sc_dev;
4676 uint16_t reg;
4677 int rv = 0;
4678
4679 /* This function is only for ICH8 and newer. */
4680 if (sc->sc_type < WM_T_ICH8)
4681 return 0;
4682
4683 if (wm_phy_resetisblocked(sc)) {
4684 /* XXX */
4685 device_printf(dev, "PHY is blocked\n");
4686 return -1;
4687 }
4688
4689 /* Allow time for h/w to get to quiescent state after reset */
4690 delay(10*1000);
4691
4692 /* Perform any necessary post-reset workarounds */
4693 if (sc->sc_type == WM_T_PCH)
4694 rv = wm_hv_phy_workarounds_ich8lan(sc);
4695 else if (sc->sc_type == WM_T_PCH2)
4696 rv = wm_lv_phy_workarounds_ich8lan(sc);
4697 if (rv != 0)
4698 return rv;
4699
4700 /* Clear the host wakeup bit after lcd reset */
4701 if (sc->sc_type >= WM_T_PCH) {
4702 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4703 reg &= ~BM_WUC_HOST_WU_BIT;
4704 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4705 }
4706
4707 /* Configure the LCD with the extended configuration region in NVM */
4708 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4709 return rv;
4710
4711 /* Configure the LCD with the OEM bits in NVM */
4712 rv = wm_oem_bits_config_ich8lan(sc, true);
4713
4714 if (sc->sc_type == WM_T_PCH2) {
4715 /* Ungate automatic PHY configuration on non-managed 82579 */
4716 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4717 delay(10 * 1000);
4718 wm_gate_hw_phy_config_ich8lan(sc, false);
4719 }
4720 /* Set EEE LPI Update Timer to 200usec */
4721 rv = sc->phy.acquire(sc);
4722 if (rv)
4723 return rv;
4724 rv = wm_write_emi_reg_locked(dev,
4725 I82579_LPI_UPDATE_TIMER, 0x1387);
4726 sc->phy.release(sc);
4727 }
4728
4729 return rv;
4730 }
4731
4732 /* Only for PCH and newer */
4733 static int
4734 wm_write_smbus_addr(struct wm_softc *sc)
4735 {
4736 uint32_t strap, freq;
4737 uint16_t phy_data;
4738 int rv;
4739
4740 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4741 device_xname(sc->sc_dev), __func__));
4742 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4743
4744 strap = CSR_READ(sc, WMREG_STRAP);
4745 freq = __SHIFTOUT(strap, STRAP_FREQ);
4746
4747 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4748 if (rv != 0)
4749 return rv;
4750
4751 phy_data &= ~HV_SMB_ADDR_ADDR;
4752 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4753 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4754
4755 if (sc->sc_phytype == WMPHY_I217) {
4756 /* Restore SMBus frequency */
4757 if (freq --) {
4758 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4759 | HV_SMB_ADDR_FREQ_HIGH);
4760 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4761 HV_SMB_ADDR_FREQ_LOW);
4762 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4763 HV_SMB_ADDR_FREQ_HIGH);
4764 } else
4765 DPRINTF(sc, WM_DEBUG_INIT,
4766 ("%s: %s Unsupported SMB frequency in PHY\n",
4767 device_xname(sc->sc_dev), __func__));
4768 }
4769
4770 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4771 phy_data);
4772 }
4773
4774 static int
4775 wm_init_lcd_from_nvm(struct wm_softc *sc)
4776 {
4777 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4778 uint16_t phy_page = 0;
4779 int rv = 0;
4780
4781 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4782 device_xname(sc->sc_dev), __func__));
4783
4784 switch (sc->sc_type) {
4785 case WM_T_ICH8:
4786 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4787 || (sc->sc_phytype != WMPHY_IGP_3))
4788 return 0;
4789
4790 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4791 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4792 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4793 break;
4794 }
4795 /* FALLTHROUGH */
4796 case WM_T_PCH:
4797 case WM_T_PCH2:
4798 case WM_T_PCH_LPT:
4799 case WM_T_PCH_SPT:
4800 case WM_T_PCH_CNP:
4801 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4802 break;
4803 default:
4804 return 0;
4805 }
4806
4807 if ((rv = sc->phy.acquire(sc)) != 0)
4808 return rv;
4809
4810 reg = CSR_READ(sc, WMREG_FEXTNVM);
4811 if ((reg & sw_cfg_mask) == 0)
4812 goto release;
4813
4814 /*
4815 * Make sure HW does not configure LCD from PHY extended configuration
4816 * before SW configuration
4817 */
4818 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4819 if ((sc->sc_type < WM_T_PCH2)
4820 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4821 goto release;
4822
4823 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4824 device_xname(sc->sc_dev), __func__));
4825 /* word_addr is in DWORD */
4826 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4827
4828 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4829 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4830 if (cnf_size == 0)
4831 goto release;
4832
4833 if (((sc->sc_type == WM_T_PCH)
4834 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4835 || (sc->sc_type > WM_T_PCH)) {
4836 /*
4837 * HW configures the SMBus address and LEDs when the OEM and
4838 * LCD Write Enable bits are set in the NVM. When both NVM bits
4839 * are cleared, SW will configure them instead.
4840 */
4841 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4842 device_xname(sc->sc_dev), __func__));
4843 if ((rv = wm_write_smbus_addr(sc)) != 0)
4844 goto release;
4845
4846 reg = CSR_READ(sc, WMREG_LEDCTL);
4847 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4848 (uint16_t)reg);
4849 if (rv != 0)
4850 goto release;
4851 }
4852
4853 /* Configure LCD from extended configuration region. */
4854 for (i = 0; i < cnf_size; i++) {
4855 uint16_t reg_data, reg_addr;
4856
4857 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4858 goto release;
4859
4860 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4861 goto release;
4862
4863 if (reg_addr == IGPHY_PAGE_SELECT)
4864 phy_page = reg_data;
4865
4866 reg_addr &= IGPHY_MAXREGADDR;
4867 reg_addr |= phy_page;
4868
4869 KASSERT(sc->phy.writereg_locked != NULL);
4870 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4871 reg_data);
4872 }
4873
4874 release:
4875 sc->phy.release(sc);
4876 return rv;
4877 }
4878
4879 /*
4880 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4881 * @sc: pointer to the HW structure
4882 * @d0_state: boolean if entering d0 or d3 device state
4883 *
4884 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4885 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
4886 * in NVM determines whether HW should configure LPLU and Gbe Disable.
4887 */
4888 int
4889 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4890 {
4891 uint32_t mac_reg;
4892 uint16_t oem_reg;
4893 int rv;
4894
4895 if (sc->sc_type < WM_T_PCH)
4896 return 0;
4897
4898 rv = sc->phy.acquire(sc);
4899 if (rv != 0)
4900 return rv;
4901
4902 if (sc->sc_type == WM_T_PCH) {
4903 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4904 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4905 goto release;
4906 }
4907
4908 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4909 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4910 goto release;
4911
4912 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4913
4914 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4915 if (rv != 0)
4916 goto release;
4917 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4918
4919 if (d0_state) {
4920 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4921 oem_reg |= HV_OEM_BITS_A1KDIS;
4922 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4923 oem_reg |= HV_OEM_BITS_LPLU;
4924 } else {
4925 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4926 != 0)
4927 oem_reg |= HV_OEM_BITS_A1KDIS;
4928 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4929 != 0)
4930 oem_reg |= HV_OEM_BITS_LPLU;
4931 }
4932
4933 /* Set Restart auto-neg to activate the bits */
4934 if ((d0_state || (sc->sc_type != WM_T_PCH))
4935 && (wm_phy_resetisblocked(sc) == false))
4936 oem_reg |= HV_OEM_BITS_ANEGNOW;
4937
4938 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4939
4940 release:
4941 sc->phy.release(sc);
4942
4943 return rv;
4944 }
4945
4946 /* Init hardware bits */
4947 void
4948 wm_initialize_hardware_bits(struct wm_softc *sc)
4949 {
4950 uint32_t tarc0, tarc1, reg;
4951
4952 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4953 device_xname(sc->sc_dev), __func__));
4954
4955 /* For 82571 variant, 80003 and ICHs */
4956 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4957 || (sc->sc_type >= WM_T_80003)) {
4958
4959 /* Transmit Descriptor Control 0 */
4960 reg = CSR_READ(sc, WMREG_TXDCTL(0));
4961 reg |= TXDCTL_COUNT_DESC;
4962 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4963
4964 /* Transmit Descriptor Control 1 */
4965 reg = CSR_READ(sc, WMREG_TXDCTL(1));
4966 reg |= TXDCTL_COUNT_DESC;
4967 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4968
4969 /* TARC0 */
4970 tarc0 = CSR_READ(sc, WMREG_TARC0);
4971 switch (sc->sc_type) {
4972 case WM_T_82571:
4973 case WM_T_82572:
4974 case WM_T_82573:
4975 case WM_T_82574:
4976 case WM_T_82583:
4977 case WM_T_80003:
4978 /* Clear bits 30..27 */
4979 tarc0 &= ~__BITS(30, 27);
4980 break;
4981 default:
4982 break;
4983 }
4984
4985 switch (sc->sc_type) {
4986 case WM_T_82571:
4987 case WM_T_82572:
4988 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4989
4990 tarc1 = CSR_READ(sc, WMREG_TARC1);
4991 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4992 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4993 /* 8257[12] Errata No.7 */
4994 tarc1 |= __BIT(22); /* TARC1 bits 22 */
4995
4996 /* TARC1 bit 28 */
4997 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4998 tarc1 &= ~__BIT(28);
4999 else
5000 tarc1 |= __BIT(28);
5001 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5002
5003 /*
5004 * 8257[12] Errata No.13
5005 * Disable Dyamic Clock Gating.
5006 */
5007 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5008 reg &= ~CTRL_EXT_DMA_DYN_CLK;
5009 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5010 break;
5011 case WM_T_82573:
5012 case WM_T_82574:
5013 case WM_T_82583:
5014 if ((sc->sc_type == WM_T_82574)
5015 || (sc->sc_type == WM_T_82583))
5016 tarc0 |= __BIT(26); /* TARC0 bit 26 */
5017
5018 /* Extended Device Control */
5019 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5020 reg &= ~__BIT(23); /* Clear bit 23 */
5021 reg |= __BIT(22); /* Set bit 22 */
5022 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5023
5024 /* Device Control */
5025 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
5026 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5027
5028 /* PCIe Control Register */
5029 /*
5030 * 82573 Errata (unknown).
5031 *
5032 * 82574 Errata 25 and 82583 Errata 12
5033 * "Dropped Rx Packets":
5034 * NVM Image Version 2.1.4 and newer has no this bug.
5035 */
5036 reg = CSR_READ(sc, WMREG_GCR);
5037 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5038 CSR_WRITE(sc, WMREG_GCR, reg);
5039
5040 if ((sc->sc_type == WM_T_82574)
5041 || (sc->sc_type == WM_T_82583)) {
5042 /*
5043 * Document says this bit must be set for
5044 * proper operation.
5045 */
5046 reg = CSR_READ(sc, WMREG_GCR);
5047 reg |= __BIT(22);
5048 CSR_WRITE(sc, WMREG_GCR, reg);
5049
5050 /*
5051 * Apply workaround for hardware errata
5052 * documented in errata docs Fixes issue where
5053 * some error prone or unreliable PCIe
5054 * completions are occurring, particularly
5055 * with ASPM enabled. Without fix, issue can
5056 * cause Tx timeouts.
5057 */
5058 reg = CSR_READ(sc, WMREG_GCR2);
5059 reg |= __BIT(0);
5060 CSR_WRITE(sc, WMREG_GCR2, reg);
5061 }
5062 break;
5063 case WM_T_80003:
5064 /* TARC0 */
5065 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5066 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5067 tarc0 &= ~__BIT(20); /* Clear bits 20 */
5068
5069 /* TARC1 bit 28 */
5070 tarc1 = CSR_READ(sc, WMREG_TARC1);
5071 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5072 tarc1 &= ~__BIT(28);
5073 else
5074 tarc1 |= __BIT(28);
5075 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5076 break;
5077 case WM_T_ICH8:
5078 case WM_T_ICH9:
5079 case WM_T_ICH10:
5080 case WM_T_PCH:
5081 case WM_T_PCH2:
5082 case WM_T_PCH_LPT:
5083 case WM_T_PCH_SPT:
5084 case WM_T_PCH_CNP:
5085 /* TARC0 */
5086 if (sc->sc_type == WM_T_ICH8) {
5087 /* Set TARC0 bits 29 and 28 */
5088 tarc0 |= __BITS(29, 28);
5089 } else if (sc->sc_type == WM_T_PCH_SPT) {
5090 tarc0 |= __BIT(29);
5091 /*
5092 * Drop bit 28. From Linux.
5093 * See I218/I219 spec update
5094 * "5. Buffer Overrun While the I219 is
5095 * Processing DMA Transactions"
5096 */
5097 tarc0 &= ~__BIT(28);
5098 }
5099 /* Set TARC0 bits 23,24,26,27 */
5100 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5101
5102 /* CTRL_EXT */
5103 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5104 reg |= __BIT(22); /* Set bit 22 */
5105 /*
5106 * Enable PHY low-power state when MAC is at D3
5107 * w/o WoL
5108 */
5109 if (sc->sc_type >= WM_T_PCH)
5110 reg |= CTRL_EXT_PHYPDEN;
5111 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5112
5113 /* TARC1 */
5114 tarc1 = CSR_READ(sc, WMREG_TARC1);
5115 /* bit 28 */
5116 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5117 tarc1 &= ~__BIT(28);
5118 else
5119 tarc1 |= __BIT(28);
5120 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5121 CSR_WRITE(sc, WMREG_TARC1, tarc1);
5122
5123 /* Device Status */
5124 if (sc->sc_type == WM_T_ICH8) {
5125 reg = CSR_READ(sc, WMREG_STATUS);
5126 reg &= ~__BIT(31);
5127 CSR_WRITE(sc, WMREG_STATUS, reg);
5128
5129 }
5130
5131 /* IOSFPC */
5132 if (sc->sc_type == WM_T_PCH_SPT) {
5133 reg = CSR_READ(sc, WMREG_IOSFPC);
5134 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5135 CSR_WRITE(sc, WMREG_IOSFPC, reg);
5136 }
5137 /*
5138 * Work-around descriptor data corruption issue during
5139 * NFS v2 UDP traffic, just disable the NFS filtering
5140 * capability.
5141 */
5142 reg = CSR_READ(sc, WMREG_RFCTL);
5143 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5144 CSR_WRITE(sc, WMREG_RFCTL, reg);
5145 break;
5146 default:
5147 break;
5148 }
5149 CSR_WRITE(sc, WMREG_TARC0, tarc0);
5150
5151 switch (sc->sc_type) {
5152 /*
5153 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
5154 * Avoid RSS Hash Value bug.
5155 */
5156 case WM_T_82571:
5157 case WM_T_82572:
5158 case WM_T_82573:
5159 case WM_T_80003:
5160 case WM_T_ICH8:
5161 reg = CSR_READ(sc, WMREG_RFCTL);
5162 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5163 CSR_WRITE(sc, WMREG_RFCTL, reg);
5164 break;
5165 case WM_T_82574:
5166 /* Use extened Rx descriptor. */
5167 reg = CSR_READ(sc, WMREG_RFCTL);
5168 reg |= WMREG_RFCTL_EXSTEN;
5169 CSR_WRITE(sc, WMREG_RFCTL, reg);
5170 break;
5171 default:
5172 break;
5173 }
5174 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5175 /*
5176 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5177 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5178 * "Certain Malformed IPv6 Extension Headers are Not Processed
5179 * Correctly by the Device"
5180 *
5181 * I354(C2000) Errata AVR53:
5182 * "Malformed IPv6 Extension Headers May Result in LAN Device
5183 * Hang"
5184 */
5185 reg = CSR_READ(sc, WMREG_RFCTL);
5186 reg |= WMREG_RFCTL_IPV6EXDIS;
5187 CSR_WRITE(sc, WMREG_RFCTL, reg);
5188 }
5189 }
5190
5191 static uint32_t
5192 wm_rxpbs_adjust_82580(uint32_t val)
5193 {
5194 uint32_t rv = 0;
5195
5196 if (val < __arraycount(wm_82580_rxpbs_table))
5197 rv = wm_82580_rxpbs_table[val];
5198
5199 return rv;
5200 }
5201
5202 /*
5203 * wm_reset_phy:
5204 *
5205 * generic PHY reset function.
5206 * Same as e1000_phy_hw_reset_generic()
5207 */
5208 static int
5209 wm_reset_phy(struct wm_softc *sc)
5210 {
5211 uint32_t reg;
5212
5213 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5214 device_xname(sc->sc_dev), __func__));
5215 if (wm_phy_resetisblocked(sc))
5216 return -1;
5217
5218 sc->phy.acquire(sc);
5219
5220 reg = CSR_READ(sc, WMREG_CTRL);
5221 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5222 CSR_WRITE_FLUSH(sc);
5223
5224 delay(sc->phy.reset_delay_us);
5225
5226 CSR_WRITE(sc, WMREG_CTRL, reg);
5227 CSR_WRITE_FLUSH(sc);
5228
5229 delay(150);
5230
5231 sc->phy.release(sc);
5232
5233 wm_get_cfg_done(sc);
5234 wm_phy_post_reset(sc);
5235
5236 return 0;
5237 }
5238
5239 /*
5240 * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5241 *
5242 * In i219, the descriptor rings must be emptied before resetting the HW
5243 * or before changing the device state to D3 during runtime (runtime PM).
5244 *
5245 * Failure to do this will cause the HW to enter a unit hang state which can
5246 * only be released by PCI reset on the device.
5247 *
5248 * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5249 */
5250 static void
5251 wm_flush_desc_rings(struct wm_softc *sc)
5252 {
5253 pcireg_t preg;
5254 uint32_t reg;
5255 struct wm_txqueue *txq;
5256 wiseman_txdesc_t *txd;
5257 int nexttx;
5258 uint32_t rctl;
5259
5260 /* First, disable MULR fix in FEXTNVM11 */
5261 reg = CSR_READ(sc, WMREG_FEXTNVM11);
5262 reg |= FEXTNVM11_DIS_MULRFIX;
5263 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5264
5265 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5266 reg = CSR_READ(sc, WMREG_TDLEN(0));
5267 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5268 return;
5269
5270 /*
5271 * Remove all descriptors from the tx_ring.
5272 *
5273 * We want to clear all pending descriptors from the TX ring. Zeroing
5274 * happens when the HW reads the regs. We assign the ring itself as
5275 * the data of the next descriptor. We don't care about the data we are
5276 * about to reset the HW.
5277 */
5278 #ifdef WM_DEBUG
5279 device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5280 #endif
5281 reg = CSR_READ(sc, WMREG_TCTL);
5282 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5283
5284 txq = &sc->sc_queue[0].wmq_txq;
5285 nexttx = txq->txq_next;
5286 txd = &txq->txq_descs[nexttx];
5287 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5288 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5289 txd->wtx_fields.wtxu_status = 0;
5290 txd->wtx_fields.wtxu_options = 0;
5291 txd->wtx_fields.wtxu_vlan = 0;
5292
5293 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5294 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5295
5296 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5297 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5298 CSR_WRITE_FLUSH(sc);
5299 delay(250);
5300
5301 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5302 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5303 return;
5304
5305 /*
5306 * Mark all descriptors in the RX ring as consumed and disable the
5307 * rx ring.
5308 */
5309 #ifdef WM_DEBUG
5310 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5311 #endif
5312 rctl = CSR_READ(sc, WMREG_RCTL);
5313 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5314 CSR_WRITE_FLUSH(sc);
5315 delay(150);
5316
5317 reg = CSR_READ(sc, WMREG_RXDCTL(0));
5318 /* Zero the lower 14 bits (prefetch and host thresholds) */
5319 reg &= 0xffffc000;
5320 /*
5321 * Update thresholds: prefetch threshold to 31, host threshold
5322 * to 1 and make sure the granularity is "descriptors" and not
5323 * "cache lines"
5324 */
5325 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5326 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5327
5328 /* Momentarily enable the RX ring for the changes to take effect */
5329 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5330 CSR_WRITE_FLUSH(sc);
5331 delay(150);
5332 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5333 }
5334
5335 /*
5336 * wm_reset:
5337 *
5338 * Reset the i82542 chip.
5339 */
5340 static void
5341 wm_reset(struct wm_softc *sc)
5342 {
5343 int phy_reset = 0;
5344 int i, error = 0;
5345 uint32_t reg;
5346 uint16_t kmreg;
5347 int rv;
5348
5349 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5350 device_xname(sc->sc_dev), __func__));
5351 KASSERT(sc->sc_type != 0);
5352
5353 /*
5354 * Allocate on-chip memory according to the MTU size.
5355 * The Packet Buffer Allocation register must be written
5356 * before the chip is reset.
5357 */
5358 switch (sc->sc_type) {
5359 case WM_T_82547:
5360 case WM_T_82547_2:
5361 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5362 PBA_22K : PBA_30K;
5363 for (i = 0; i < sc->sc_nqueues; i++) {
5364 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5365 txq->txq_fifo_head = 0;
5366 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5367 txq->txq_fifo_size =
5368 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5369 txq->txq_fifo_stall = 0;
5370 }
5371 break;
5372 case WM_T_82571:
5373 case WM_T_82572:
5374 case WM_T_82575: /* XXX need special handing for jumbo frames */
5375 case WM_T_80003:
5376 sc->sc_pba = PBA_32K;
5377 break;
5378 case WM_T_82573:
5379 sc->sc_pba = PBA_12K;
5380 break;
5381 case WM_T_82574:
5382 case WM_T_82583:
5383 sc->sc_pba = PBA_20K;
5384 break;
5385 case WM_T_82576:
5386 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5387 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5388 break;
5389 case WM_T_82580:
5390 case WM_T_I350:
5391 case WM_T_I354:
5392 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5393 break;
5394 case WM_T_I210:
5395 case WM_T_I211:
5396 sc->sc_pba = PBA_34K;
5397 break;
5398 case WM_T_ICH8:
5399 /* Workaround for a bit corruption issue in FIFO memory */
5400 sc->sc_pba = PBA_8K;
5401 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5402 break;
5403 case WM_T_ICH9:
5404 case WM_T_ICH10:
5405 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5406 PBA_14K : PBA_10K;
5407 break;
5408 case WM_T_PCH:
5409 case WM_T_PCH2: /* XXX 14K? */
5410 case WM_T_PCH_LPT:
5411 case WM_T_PCH_SPT:
5412 case WM_T_PCH_CNP:
5413 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5414 PBA_12K : PBA_26K;
5415 break;
5416 default:
5417 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5418 PBA_40K : PBA_48K;
5419 break;
5420 }
5421 /*
5422 * Only old or non-multiqueue devices have the PBA register
5423 * XXX Need special handling for 82575.
5424 */
5425 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5426 || (sc->sc_type == WM_T_82575))
5427 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5428
5429 /* Prevent the PCI-E bus from sticking */
5430 if (sc->sc_flags & WM_F_PCIE) {
5431 int timeout = 800;
5432
5433 sc->sc_ctrl |= CTRL_GIO_M_DIS;
5434 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5435
5436 while (timeout--) {
5437 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5438 == 0)
5439 break;
5440 delay(100);
5441 }
5442 if (timeout == 0)
5443 device_printf(sc->sc_dev,
5444 "failed to disable bus mastering\n");
5445 }
5446
5447 /* Set the completion timeout for interface */
5448 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5449 || (sc->sc_type == WM_T_82580)
5450 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5451 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5452 wm_set_pcie_completion_timeout(sc);
5453
5454 /* Clear interrupt */
5455 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5456 if (wm_is_using_msix(sc)) {
5457 if (sc->sc_type != WM_T_82574) {
5458 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5459 CSR_WRITE(sc, WMREG_EIAC, 0);
5460 } else
5461 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5462 }
5463
5464 /* Stop the transmit and receive processes. */
5465 CSR_WRITE(sc, WMREG_RCTL, 0);
5466 sc->sc_rctl &= ~RCTL_EN;
5467 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5468 CSR_WRITE_FLUSH(sc);
5469
5470 /* XXX set_tbi_sbp_82543() */
5471
5472 delay(10*1000);
5473
5474 /* Must acquire the MDIO ownership before MAC reset */
5475 switch (sc->sc_type) {
5476 case WM_T_82573:
5477 case WM_T_82574:
5478 case WM_T_82583:
5479 error = wm_get_hw_semaphore_82573(sc);
5480 break;
5481 default:
5482 break;
5483 }
5484
5485 /*
5486 * 82541 Errata 29? & 82547 Errata 28?
5487 * See also the description about PHY_RST bit in CTRL register
5488 * in 8254x_GBe_SDM.pdf.
5489 */
5490 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5491 CSR_WRITE(sc, WMREG_CTRL,
5492 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5493 CSR_WRITE_FLUSH(sc);
5494 delay(5000);
5495 }
5496
5497 switch (sc->sc_type) {
5498 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5499 case WM_T_82541:
5500 case WM_T_82541_2:
5501 case WM_T_82547:
5502 case WM_T_82547_2:
5503 /*
5504 * On some chipsets, a reset through a memory-mapped write
5505 * cycle can cause the chip to reset before completing the
5506 * write cycle. This causes major headache that can be avoided
5507 * by issuing the reset via indirect register writes through
5508 * I/O space.
5509 *
5510 * So, if we successfully mapped the I/O BAR at attach time,
5511 * use that. Otherwise, try our luck with a memory-mapped
5512 * reset.
5513 */
5514 if (sc->sc_flags & WM_F_IOH_VALID)
5515 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5516 else
5517 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5518 break;
5519 case WM_T_82545_3:
5520 case WM_T_82546_3:
5521 /* Use the shadow control register on these chips. */
5522 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5523 break;
5524 case WM_T_80003:
5525 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5526 sc->phy.acquire(sc);
5527 CSR_WRITE(sc, WMREG_CTRL, reg);
5528 sc->phy.release(sc);
5529 break;
5530 case WM_T_ICH8:
5531 case WM_T_ICH9:
5532 case WM_T_ICH10:
5533 case WM_T_PCH:
5534 case WM_T_PCH2:
5535 case WM_T_PCH_LPT:
5536 case WM_T_PCH_SPT:
5537 case WM_T_PCH_CNP:
5538 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5539 if (wm_phy_resetisblocked(sc) == false) {
5540 /*
5541 * Gate automatic PHY configuration by hardware on
5542 * non-managed 82579
5543 */
5544 if ((sc->sc_type == WM_T_PCH2)
5545 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5546 == 0))
5547 wm_gate_hw_phy_config_ich8lan(sc, true);
5548
5549 reg |= CTRL_PHY_RESET;
5550 phy_reset = 1;
5551 } else
5552 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5553 sc->phy.acquire(sc);
5554 CSR_WRITE(sc, WMREG_CTRL, reg);
5555 /* Don't insert a completion barrier when reset */
5556 delay(20*1000);
5557 mutex_exit(sc->sc_ich_phymtx);
5558 break;
5559 case WM_T_82580:
5560 case WM_T_I350:
5561 case WM_T_I354:
5562 case WM_T_I210:
5563 case WM_T_I211:
5564 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5565 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5566 CSR_WRITE_FLUSH(sc);
5567 delay(5000);
5568 break;
5569 case WM_T_82542_2_0:
5570 case WM_T_82542_2_1:
5571 case WM_T_82543:
5572 case WM_T_82540:
5573 case WM_T_82545:
5574 case WM_T_82546:
5575 case WM_T_82571:
5576 case WM_T_82572:
5577 case WM_T_82573:
5578 case WM_T_82574:
5579 case WM_T_82575:
5580 case WM_T_82576:
5581 case WM_T_82583:
5582 default:
5583 /* Everything else can safely use the documented method. */
5584 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5585 break;
5586 }
5587
5588 /* Must release the MDIO ownership after MAC reset */
5589 switch (sc->sc_type) {
5590 case WM_T_82573:
5591 case WM_T_82574:
5592 case WM_T_82583:
5593 if (error == 0)
5594 wm_put_hw_semaphore_82573(sc);
5595 break;
5596 default:
5597 break;
5598 }
5599
5600 /* Set Phy Config Counter to 50msec */
5601 if (sc->sc_type == WM_T_PCH2) {
5602 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5603 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5604 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5605 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5606 }
5607
5608 if (phy_reset != 0)
5609 wm_get_cfg_done(sc);
5610
5611 /* Reload EEPROM */
5612 switch (sc->sc_type) {
5613 case WM_T_82542_2_0:
5614 case WM_T_82542_2_1:
5615 case WM_T_82543:
5616 case WM_T_82544:
5617 delay(10);
5618 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5619 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5620 CSR_WRITE_FLUSH(sc);
5621 delay(2000);
5622 break;
5623 case WM_T_82540:
5624 case WM_T_82545:
5625 case WM_T_82545_3:
5626 case WM_T_82546:
5627 case WM_T_82546_3:
5628 delay(5*1000);
5629 /* XXX Disable HW ARPs on ASF enabled adapters */
5630 break;
5631 case WM_T_82541:
5632 case WM_T_82541_2:
5633 case WM_T_82547:
5634 case WM_T_82547_2:
5635 delay(20000);
5636 /* XXX Disable HW ARPs on ASF enabled adapters */
5637 break;
5638 case WM_T_82571:
5639 case WM_T_82572:
5640 case WM_T_82573:
5641 case WM_T_82574:
5642 case WM_T_82583:
5643 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5644 delay(10);
5645 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5646 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5647 CSR_WRITE_FLUSH(sc);
5648 }
5649 /* check EECD_EE_AUTORD */
5650 wm_get_auto_rd_done(sc);
5651 /*
5652 * Phy configuration from NVM just starts after EECD_AUTO_RD
5653 * is set.
5654 */
5655 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5656 || (sc->sc_type == WM_T_82583))
5657 delay(25*1000);
5658 break;
5659 case WM_T_82575:
5660 case WM_T_82576:
5661 case WM_T_82580:
5662 case WM_T_I350:
5663 case WM_T_I354:
5664 case WM_T_I210:
5665 case WM_T_I211:
5666 case WM_T_80003:
5667 /* check EECD_EE_AUTORD */
5668 wm_get_auto_rd_done(sc);
5669 break;
5670 case WM_T_ICH8:
5671 case WM_T_ICH9:
5672 case WM_T_ICH10:
5673 case WM_T_PCH:
5674 case WM_T_PCH2:
5675 case WM_T_PCH_LPT:
5676 case WM_T_PCH_SPT:
5677 case WM_T_PCH_CNP:
5678 break;
5679 default:
5680 panic("%s: unknown type\n", __func__);
5681 }
5682
5683 /* Check whether EEPROM is present or not */
5684 switch (sc->sc_type) {
5685 case WM_T_82575:
5686 case WM_T_82576:
5687 case WM_T_82580:
5688 case WM_T_I350:
5689 case WM_T_I354:
5690 case WM_T_ICH8:
5691 case WM_T_ICH9:
5692 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5693 /* Not found */
5694 sc->sc_flags |= WM_F_EEPROM_INVALID;
5695 if (sc->sc_type == WM_T_82575)
5696 wm_reset_init_script_82575(sc);
5697 }
5698 break;
5699 default:
5700 break;
5701 }
5702
5703 if (phy_reset != 0)
5704 wm_phy_post_reset(sc);
5705
5706 if ((sc->sc_type == WM_T_82580)
5707 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5708 /* Clear global device reset status bit */
5709 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5710 }
5711
5712 /* Clear any pending interrupt events. */
5713 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5714 reg = CSR_READ(sc, WMREG_ICR);
5715 if (wm_is_using_msix(sc)) {
5716 if (sc->sc_type != WM_T_82574) {
5717 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5718 CSR_WRITE(sc, WMREG_EIAC, 0);
5719 } else
5720 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5721 }
5722
5723 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5724 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5725 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5726 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5727 reg = CSR_READ(sc, WMREG_KABGTXD);
5728 reg |= KABGTXD_BGSQLBIAS;
5729 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5730 }
5731
5732 /* Reload sc_ctrl */
5733 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5734
5735 wm_set_eee(sc);
5736
5737 /*
5738 * For PCH, this write will make sure that any noise will be detected
5739 * as a CRC error and be dropped rather than show up as a bad packet
5740 * to the DMA engine
5741 */
5742 if (sc->sc_type == WM_T_PCH)
5743 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5744
5745 if (sc->sc_type >= WM_T_82544)
5746 CSR_WRITE(sc, WMREG_WUC, 0);
5747
5748 if (sc->sc_type < WM_T_82575)
5749 wm_disable_aspm(sc); /* Workaround for some chips */
5750
5751 wm_reset_mdicnfg_82580(sc);
5752
5753 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5754 wm_pll_workaround_i210(sc);
5755
5756 if (sc->sc_type == WM_T_80003) {
5757 /* Default to TRUE to enable the MDIC W/A */
5758 sc->sc_flags |= WM_F_80003_MDIC_WA;
5759
5760 rv = wm_kmrn_readreg(sc,
5761 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5762 if (rv == 0) {
5763 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5764 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5765 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5766 else
5767 sc->sc_flags |= WM_F_80003_MDIC_WA;
5768 }
5769 }
5770 }
5771
5772 /*
5773 * wm_add_rxbuf:
5774 *
5775 * Add a receive buffer to the indiciated descriptor.
5776 */
5777 static int
5778 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5779 {
5780 struct wm_softc *sc = rxq->rxq_sc;
5781 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5782 struct mbuf *m;
5783 int error;
5784
5785 KASSERT(mutex_owned(rxq->rxq_lock));
5786
5787 MGETHDR(m, M_DONTWAIT, MT_DATA);
5788 if (m == NULL)
5789 return ENOBUFS;
5790
5791 MCLGET(m, M_DONTWAIT);
5792 if ((m->m_flags & M_EXT) == 0) {
5793 m_freem(m);
5794 return ENOBUFS;
5795 }
5796
5797 if (rxs->rxs_mbuf != NULL)
5798 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5799
5800 rxs->rxs_mbuf = m;
5801
5802 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5803 /*
5804 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5805 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5806 */
5807 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5808 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5809 if (error) {
5810 /* XXX XXX XXX */
5811 aprint_error_dev(sc->sc_dev,
5812 "unable to load rx DMA map %d, error = %d\n", idx, error);
5813 panic("wm_add_rxbuf");
5814 }
5815
5816 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5817 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5818
5819 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5820 if ((sc->sc_rctl & RCTL_EN) != 0)
5821 wm_init_rxdesc(rxq, idx);
5822 } else
5823 wm_init_rxdesc(rxq, idx);
5824
5825 return 0;
5826 }
5827
5828 /*
5829 * wm_rxdrain:
5830 *
5831 * Drain the receive queue.
5832 */
5833 static void
5834 wm_rxdrain(struct wm_rxqueue *rxq)
5835 {
5836 struct wm_softc *sc = rxq->rxq_sc;
5837 struct wm_rxsoft *rxs;
5838 int i;
5839
5840 KASSERT(mutex_owned(rxq->rxq_lock));
5841
5842 for (i = 0; i < WM_NRXDESC; i++) {
5843 rxs = &rxq->rxq_soft[i];
5844 if (rxs->rxs_mbuf != NULL) {
5845 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5846 m_freem(rxs->rxs_mbuf);
5847 rxs->rxs_mbuf = NULL;
5848 }
5849 }
5850 }
5851
5852 /*
5853 * Setup registers for RSS.
5854 *
5855 * XXX not yet VMDq support
5856 */
5857 static void
5858 wm_init_rss(struct wm_softc *sc)
5859 {
5860 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5861 int i;
5862
5863 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5864
5865 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5866 unsigned int qid, reta_ent;
5867
5868 qid = i % sc->sc_nqueues;
5869 switch (sc->sc_type) {
5870 case WM_T_82574:
5871 reta_ent = __SHIFTIN(qid,
5872 RETA_ENT_QINDEX_MASK_82574);
5873 break;
5874 case WM_T_82575:
5875 reta_ent = __SHIFTIN(qid,
5876 RETA_ENT_QINDEX1_MASK_82575);
5877 break;
5878 default:
5879 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5880 break;
5881 }
5882
5883 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5884 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5885 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5886 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5887 }
5888
5889 rss_getkey((uint8_t *)rss_key);
5890 for (i = 0; i < RSSRK_NUM_REGS; i++)
5891 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5892
5893 if (sc->sc_type == WM_T_82574)
5894 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5895 else
5896 mrqc = MRQC_ENABLE_RSS_MQ;
5897
5898 /*
5899 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5900 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5901 */
5902 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5903 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5904 #if 0
5905 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5906 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5907 #endif
5908 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5909
5910 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5911 }
5912
5913 /*
5914 * Adjust TX and RX queue numbers which the system actulally uses.
5915 *
5916 * The numbers are affected by below parameters.
5917 * - The nubmer of hardware queues
5918 * - The number of MSI-X vectors (= "nvectors" argument)
5919 * - ncpu
5920 */
5921 static void
5922 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5923 {
5924 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5925
5926 if (nvectors < 2) {
5927 sc->sc_nqueues = 1;
5928 return;
5929 }
5930
5931 switch (sc->sc_type) {
5932 case WM_T_82572:
5933 hw_ntxqueues = 2;
5934 hw_nrxqueues = 2;
5935 break;
5936 case WM_T_82574:
5937 hw_ntxqueues = 2;
5938 hw_nrxqueues = 2;
5939 break;
5940 case WM_T_82575:
5941 hw_ntxqueues = 4;
5942 hw_nrxqueues = 4;
5943 break;
5944 case WM_T_82576:
5945 hw_ntxqueues = 16;
5946 hw_nrxqueues = 16;
5947 break;
5948 case WM_T_82580:
5949 case WM_T_I350:
5950 case WM_T_I354:
5951 hw_ntxqueues = 8;
5952 hw_nrxqueues = 8;
5953 break;
5954 case WM_T_I210:
5955 hw_ntxqueues = 4;
5956 hw_nrxqueues = 4;
5957 break;
5958 case WM_T_I211:
5959 hw_ntxqueues = 2;
5960 hw_nrxqueues = 2;
5961 break;
5962 /*
5963 * The below Ethernet controllers do not support MSI-X;
5964 * this driver doesn't let them use multiqueue.
5965 * - WM_T_80003
5966 * - WM_T_ICH8
5967 * - WM_T_ICH9
5968 * - WM_T_ICH10
5969 * - WM_T_PCH
5970 * - WM_T_PCH2
5971 * - WM_T_PCH_LPT
5972 */
5973 default:
5974 hw_ntxqueues = 1;
5975 hw_nrxqueues = 1;
5976 break;
5977 }
5978
5979 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5980
5981 /*
5982 * As queues more than MSI-X vectors cannot improve scaling, we limit
5983 * the number of queues used actually.
5984 */
5985 if (nvectors < hw_nqueues + 1)
5986 sc->sc_nqueues = nvectors - 1;
5987 else
5988 sc->sc_nqueues = hw_nqueues;
5989
5990 /*
5991 * As queues more than CPUs cannot improve scaling, we limit
5992 * the number of queues used actually.
5993 */
5994 if (ncpu < sc->sc_nqueues)
5995 sc->sc_nqueues = ncpu;
5996 }
5997
5998 static inline bool
5999 wm_is_using_msix(struct wm_softc *sc)
6000 {
6001
6002 return (sc->sc_nintrs > 1);
6003 }
6004
6005 static inline bool
6006 wm_is_using_multiqueue(struct wm_softc *sc)
6007 {
6008
6009 return (sc->sc_nqueues > 1);
6010 }
6011
6012 static int
6013 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6014 {
6015 struct wm_queue *wmq = &sc->sc_queue[qidx];
6016
6017 wmq->wmq_id = qidx;
6018 wmq->wmq_intr_idx = intr_idx;
6019 wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
6020 wm_handle_queue, wmq);
6021 if (wmq->wmq_si != NULL)
6022 return 0;
6023
6024 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6025 wmq->wmq_id);
6026 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6027 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6028 return ENOMEM;
6029 }
6030
6031 /*
6032 * Both single interrupt MSI and INTx can use this function.
6033 */
6034 static int
6035 wm_setup_legacy(struct wm_softc *sc)
6036 {
6037 pci_chipset_tag_t pc = sc->sc_pc;
6038 const char *intrstr = NULL;
6039 char intrbuf[PCI_INTRSTR_LEN];
6040 int error;
6041
6042 error = wm_alloc_txrx_queues(sc);
6043 if (error) {
6044 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6045 error);
6046 return ENOMEM;
6047 }
6048 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6049 sizeof(intrbuf));
6050 #ifdef WM_MPSAFE
6051 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6052 #endif
6053 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6054 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6055 if (sc->sc_ihs[0] == NULL) {
6056 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6057 (pci_intr_type(pc, sc->sc_intrs[0])
6058 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6059 return ENOMEM;
6060 }
6061
6062 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6063 sc->sc_nintrs = 1;
6064
6065 return wm_softint_establish_queue(sc, 0, 0);
6066 }
6067
6068 static int
6069 wm_setup_msix(struct wm_softc *sc)
6070 {
6071 void *vih;
6072 kcpuset_t *affinity;
6073 int qidx, error, intr_idx, txrx_established;
6074 pci_chipset_tag_t pc = sc->sc_pc;
6075 const char *intrstr = NULL;
6076 char intrbuf[PCI_INTRSTR_LEN];
6077 char intr_xname[INTRDEVNAMEBUF];
6078
6079 if (sc->sc_nqueues < ncpu) {
6080 /*
6081 * To avoid other devices' interrupts, the affinity of Tx/Rx
6082 * interrupts start from CPU#1.
6083 */
6084 sc->sc_affinity_offset = 1;
6085 } else {
6086 /*
6087 * In this case, this device use all CPUs. So, we unify
6088 * affinitied cpu_index to msix vector number for readability.
6089 */
6090 sc->sc_affinity_offset = 0;
6091 }
6092
6093 error = wm_alloc_txrx_queues(sc);
6094 if (error) {
6095 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6096 error);
6097 return ENOMEM;
6098 }
6099
6100 kcpuset_create(&affinity, false);
6101 intr_idx = 0;
6102
6103 /*
6104 * TX and RX
6105 */
6106 txrx_established = 0;
6107 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6108 struct wm_queue *wmq = &sc->sc_queue[qidx];
6109 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6110
6111 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6112 sizeof(intrbuf));
6113 #ifdef WM_MPSAFE
6114 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6115 PCI_INTR_MPSAFE, true);
6116 #endif
6117 memset(intr_xname, 0, sizeof(intr_xname));
6118 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6119 device_xname(sc->sc_dev), qidx);
6120 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6121 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6122 if (vih == NULL) {
6123 aprint_error_dev(sc->sc_dev,
6124 "unable to establish MSI-X(for TX and RX)%s%s\n",
6125 intrstr ? " at " : "",
6126 intrstr ? intrstr : "");
6127
6128 goto fail;
6129 }
6130 kcpuset_zero(affinity);
6131 /* Round-robin affinity */
6132 kcpuset_set(affinity, affinity_to);
6133 error = interrupt_distribute(vih, affinity, NULL);
6134 if (error == 0) {
6135 aprint_normal_dev(sc->sc_dev,
6136 "for TX and RX interrupting at %s affinity to %u\n",
6137 intrstr, affinity_to);
6138 } else {
6139 aprint_normal_dev(sc->sc_dev,
6140 "for TX and RX interrupting at %s\n", intrstr);
6141 }
6142 sc->sc_ihs[intr_idx] = vih;
6143 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6144 goto fail;
6145 txrx_established++;
6146 intr_idx++;
6147 }
6148
6149 /* LINK */
6150 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6151 sizeof(intrbuf));
6152 #ifdef WM_MPSAFE
6153 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6154 #endif
6155 memset(intr_xname, 0, sizeof(intr_xname));
6156 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6157 device_xname(sc->sc_dev));
6158 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6159 IPL_NET, wm_linkintr_msix, sc, intr_xname);
6160 if (vih == NULL) {
6161 aprint_error_dev(sc->sc_dev,
6162 "unable to establish MSI-X(for LINK)%s%s\n",
6163 intrstr ? " at " : "",
6164 intrstr ? intrstr : "");
6165
6166 goto fail;
6167 }
6168 /* Keep default affinity to LINK interrupt */
6169 aprint_normal_dev(sc->sc_dev,
6170 "for LINK interrupting at %s\n", intrstr);
6171 sc->sc_ihs[intr_idx] = vih;
6172 sc->sc_link_intr_idx = intr_idx;
6173
6174 sc->sc_nintrs = sc->sc_nqueues + 1;
6175 kcpuset_destroy(affinity);
6176 return 0;
6177
6178 fail:
6179 for (qidx = 0; qidx < txrx_established; qidx++) {
6180 struct wm_queue *wmq = &sc->sc_queue[qidx];
6181 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6182 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6183 }
6184
6185 kcpuset_destroy(affinity);
6186 return ENOMEM;
6187 }
6188
6189 static void
6190 wm_unset_stopping_flags(struct wm_softc *sc)
6191 {
6192 int i;
6193
6194 KASSERT(WM_CORE_LOCKED(sc));
6195
6196 /* Must unset stopping flags in ascending order. */
6197 for (i = 0; i < sc->sc_nqueues; i++) {
6198 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6199 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6200
6201 mutex_enter(txq->txq_lock);
6202 txq->txq_stopping = false;
6203 mutex_exit(txq->txq_lock);
6204
6205 mutex_enter(rxq->rxq_lock);
6206 rxq->rxq_stopping = false;
6207 mutex_exit(rxq->rxq_lock);
6208 }
6209
6210 sc->sc_core_stopping = false;
6211 }
6212
6213 static void
6214 wm_set_stopping_flags(struct wm_softc *sc)
6215 {
6216 int i;
6217
6218 KASSERT(WM_CORE_LOCKED(sc));
6219
6220 sc->sc_core_stopping = true;
6221
6222 /* Must set stopping flags in ascending order. */
6223 for (i = 0; i < sc->sc_nqueues; i++) {
6224 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6225 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6226
6227 mutex_enter(rxq->rxq_lock);
6228 rxq->rxq_stopping = true;
6229 mutex_exit(rxq->rxq_lock);
6230
6231 mutex_enter(txq->txq_lock);
6232 txq->txq_stopping = true;
6233 mutex_exit(txq->txq_lock);
6234 }
6235 }
6236
6237 /*
6238 * Write interrupt interval value to ITR or EITR
6239 */
6240 static void
6241 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6242 {
6243
6244 if (!wmq->wmq_set_itr)
6245 return;
6246
6247 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6248 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6249
6250 /*
6251 * 82575 doesn't have CNT_INGR field.
6252 * So, overwrite counter field by software.
6253 */
6254 if (sc->sc_type == WM_T_82575)
6255 eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
6256 else
6257 eitr |= EITR_CNT_INGR;
6258
6259 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6260 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6261 /*
6262 * 82574 has both ITR and EITR. SET EITR when we use
6263 * the multi queue function with MSI-X.
6264 */
6265 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6266 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6267 } else {
6268 KASSERT(wmq->wmq_id == 0);
6269 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6270 }
6271
6272 wmq->wmq_set_itr = false;
6273 }
6274
6275 /*
6276 * TODO
6277 * Below dynamic calculation of itr is almost the same as Linux igb,
6278 * however it does not fit to wm(4). So, we will have been disable AIM
6279 * until we will find appropriate calculation of itr.
6280 */
6281 /*
6282 * Calculate interrupt interval value to be going to write register in
6283 * wm_itrs_writereg(). This function does not write ITR/EITR register.
6284 */
6285 static void
6286 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6287 {
6288 #ifdef NOTYET
6289 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6290 struct wm_txqueue *txq = &wmq->wmq_txq;
6291 uint32_t avg_size = 0;
6292 uint32_t new_itr;
6293
6294 if (rxq->rxq_packets)
6295 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
6296 if (txq->txq_packets)
6297 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6298
6299 if (avg_size == 0) {
6300 new_itr = 450; /* restore default value */
6301 goto out;
6302 }
6303
6304 /* Add 24 bytes to size to account for CRC, preamble, and gap */
6305 avg_size += 24;
6306
6307 /* Don't starve jumbo frames */
6308 avg_size = uimin(avg_size, 3000);
6309
6310 /* Give a little boost to mid-size frames */
6311 if ((avg_size > 300) && (avg_size < 1200))
6312 new_itr = avg_size / 3;
6313 else
6314 new_itr = avg_size / 2;
6315
6316 out:
6317 /*
6318 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6319 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6320 */
6321 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6322 new_itr *= 4;
6323
6324 if (new_itr != wmq->wmq_itr) {
6325 wmq->wmq_itr = new_itr;
6326 wmq->wmq_set_itr = true;
6327 } else
6328 wmq->wmq_set_itr = false;
6329
6330 rxq->rxq_packets = 0;
6331 rxq->rxq_bytes = 0;
6332 txq->txq_packets = 0;
6333 txq->txq_bytes = 0;
6334 #endif
6335 }
6336
6337 static void
6338 wm_init_sysctls(struct wm_softc *sc)
6339 {
6340 struct sysctllog **log;
6341 const struct sysctlnode *rnode, *qnode, *cnode;
6342 int i, rv;
6343 const char *dvname;
6344
6345 log = &sc->sc_sysctllog;
6346 dvname = device_xname(sc->sc_dev);
6347
6348 rv = sysctl_createv(log, 0, NULL, &rnode,
6349 0, CTLTYPE_NODE, dvname,
6350 SYSCTL_DESCR("wm information and settings"),
6351 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6352 if (rv != 0)
6353 goto err;
6354
6355 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6356 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
6357 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6358 if (rv != 0)
6359 goto teardown;
6360
6361 for (i = 0; i < sc->sc_nqueues; i++) {
6362 struct wm_queue *wmq = &sc->sc_queue[i];
6363 struct wm_txqueue *txq = &wmq->wmq_txq;
6364 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6365
6366 snprintf(sc->sc_queue[i].sysctlname,
6367 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6368
6369 if (sysctl_createv(log, 0, &rnode, &qnode,
6370 0, CTLTYPE_NODE,
6371 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6372 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6373 break;
6374
6375 if (sysctl_createv(log, 0, &qnode, &cnode,
6376 CTLFLAG_READONLY, CTLTYPE_INT,
6377 "txq_free", SYSCTL_DESCR("TX queue free"),
6378 NULL, 0, &txq->txq_free,
6379 0, CTL_CREATE, CTL_EOL) != 0)
6380 break;
6381 if (sysctl_createv(log, 0, &qnode, &cnode,
6382 CTLFLAG_READONLY, CTLTYPE_INT,
6383 "txd_head", SYSCTL_DESCR("TX descriptor head"),
6384 wm_sysctl_tdh_handler, 0, (void *)txq,
6385 0, CTL_CREATE, CTL_EOL) != 0)
6386 break;
6387 if (sysctl_createv(log, 0, &qnode, &cnode,
6388 CTLFLAG_READONLY, CTLTYPE_INT,
6389 "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6390 wm_sysctl_tdt_handler, 0, (void *)txq,
6391 0, CTL_CREATE, CTL_EOL) != 0)
6392 break;
6393 if (sysctl_createv(log, 0, &qnode, &cnode,
6394 CTLFLAG_READONLY, CTLTYPE_INT,
6395 "txq_next", SYSCTL_DESCR("TX queue next"),
6396 NULL, 0, &txq->txq_next,
6397 0, CTL_CREATE, CTL_EOL) != 0)
6398 break;
6399 if (sysctl_createv(log, 0, &qnode, &cnode,
6400 CTLFLAG_READONLY, CTLTYPE_INT,
6401 "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6402 NULL, 0, &txq->txq_sfree,
6403 0, CTL_CREATE, CTL_EOL) != 0)
6404 break;
6405 if (sysctl_createv(log, 0, &qnode, &cnode,
6406 CTLFLAG_READONLY, CTLTYPE_INT,
6407 "txq_snext", SYSCTL_DESCR("TX queue snext"),
6408 NULL, 0, &txq->txq_snext,
6409 0, CTL_CREATE, CTL_EOL) != 0)
6410 break;
6411 if (sysctl_createv(log, 0, &qnode, &cnode,
6412 CTLFLAG_READONLY, CTLTYPE_INT,
6413 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6414 NULL, 0, &txq->txq_sdirty,
6415 0, CTL_CREATE, CTL_EOL) != 0)
6416 break;
6417 if (sysctl_createv(log, 0, &qnode, &cnode,
6418 CTLFLAG_READONLY, CTLTYPE_INT,
6419 "txq_flags", SYSCTL_DESCR("TX queue flags"),
6420 NULL, 0, &txq->txq_flags,
6421 0, CTL_CREATE, CTL_EOL) != 0)
6422 break;
6423 if (sysctl_createv(log, 0, &qnode, &cnode,
6424 CTLFLAG_READONLY, CTLTYPE_BOOL,
6425 "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6426 NULL, 0, &txq->txq_stopping,
6427 0, CTL_CREATE, CTL_EOL) != 0)
6428 break;
6429 if (sysctl_createv(log, 0, &qnode, &cnode,
6430 CTLFLAG_READONLY, CTLTYPE_BOOL,
6431 "txq_sending", SYSCTL_DESCR("TX queue sending"),
6432 NULL, 0, &txq->txq_sending,
6433 0, CTL_CREATE, CTL_EOL) != 0)
6434 break;
6435
6436 if (sysctl_createv(log, 0, &qnode, &cnode,
6437 CTLFLAG_READONLY, CTLTYPE_INT,
6438 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6439 NULL, 0, &rxq->rxq_ptr,
6440 0, CTL_CREATE, CTL_EOL) != 0)
6441 break;
6442 }
6443
6444 #ifdef WM_DEBUG
6445 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6446 CTLTYPE_INT, "debug_flags",
6447 SYSCTL_DESCR(
6448 "Debug flags:\n" \
6449 "\t0x01 LINK\n" \
6450 "\t0x02 TX\n" \
6451 "\t0x04 RX\n" \
6452 "\t0x08 GMII\n" \
6453 "\t0x10 MANAGE\n" \
6454 "\t0x20 NVM\n" \
6455 "\t0x40 INIT\n" \
6456 "\t0x80 LOCK"),
6457 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6458 if (rv != 0)
6459 goto teardown;
6460 #endif
6461
6462 return;
6463
6464 teardown:
6465 sysctl_teardown(log);
6466 err:
6467 sc->sc_sysctllog = NULL;
6468 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6469 __func__, rv);
6470 }
6471
6472 /*
6473 * wm_init: [ifnet interface function]
6474 *
6475 * Initialize the interface.
6476 */
6477 static int
6478 wm_init(struct ifnet *ifp)
6479 {
6480 struct wm_softc *sc = ifp->if_softc;
6481 int ret;
6482
6483 KASSERT(IFNET_LOCKED(ifp));
6484
6485 WM_CORE_LOCK(sc);
6486 ret = wm_init_locked(ifp);
6487 WM_CORE_UNLOCK(sc);
6488
6489 return ret;
6490 }
6491
6492 static int
6493 wm_init_locked(struct ifnet *ifp)
6494 {
6495 struct wm_softc *sc = ifp->if_softc;
6496 struct ethercom *ec = &sc->sc_ethercom;
6497 int i, j, trynum, error = 0;
6498 uint32_t reg, sfp_mask = 0;
6499
6500 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6501 device_xname(sc->sc_dev), __func__));
6502 KASSERT(IFNET_LOCKED(ifp));
6503 KASSERT(WM_CORE_LOCKED(sc));
6504
6505 /*
6506 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6507 * There is a small but measurable benefit to avoiding the adjusment
6508 * of the descriptor so that the headers are aligned, for normal mtu,
6509 * on such platforms. One possibility is that the DMA itself is
6510 * slightly more efficient if the front of the entire packet (instead
6511 * of the front of the headers) is aligned.
6512 *
6513 * Note we must always set align_tweak to 0 if we are using
6514 * jumbo frames.
6515 */
6516 #ifdef __NO_STRICT_ALIGNMENT
6517 sc->sc_align_tweak = 0;
6518 #else
6519 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6520 sc->sc_align_tweak = 0;
6521 else
6522 sc->sc_align_tweak = 2;
6523 #endif /* __NO_STRICT_ALIGNMENT */
6524
6525 /* Cancel any pending I/O. */
6526 wm_stop_locked(ifp, false, false);
6527
6528 /* Update statistics before reset */
6529 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6530 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6531
6532 /* >= PCH_SPT hardware workaround before reset. */
6533 if (sc->sc_type >= WM_T_PCH_SPT)
6534 wm_flush_desc_rings(sc);
6535
6536 /* Reset the chip to a known state. */
6537 wm_reset(sc);
6538
6539 /*
6540 * AMT based hardware can now take control from firmware
6541 * Do this after reset.
6542 */
6543 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6544 wm_get_hw_control(sc);
6545
6546 if ((sc->sc_type >= WM_T_PCH_SPT) &&
6547 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6548 wm_legacy_irq_quirk_spt(sc);
6549
6550 /* Init hardware bits */
6551 wm_initialize_hardware_bits(sc);
6552
6553 /* Reset the PHY. */
6554 if (sc->sc_flags & WM_F_HAS_MII)
6555 wm_gmii_reset(sc);
6556
6557 if (sc->sc_type >= WM_T_ICH8) {
6558 reg = CSR_READ(sc, WMREG_GCR);
6559 /*
6560 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6561 * default after reset.
6562 */
6563 if (sc->sc_type == WM_T_ICH8)
6564 reg |= GCR_NO_SNOOP_ALL;
6565 else
6566 reg &= ~GCR_NO_SNOOP_ALL;
6567 CSR_WRITE(sc, WMREG_GCR, reg);
6568 }
6569
6570 if ((sc->sc_type >= WM_T_ICH8)
6571 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6572 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6573
6574 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6575 reg |= CTRL_EXT_RO_DIS;
6576 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6577 }
6578
6579 /* Calculate (E)ITR value */
6580 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6581 /*
6582 * For NEWQUEUE's EITR (except for 82575).
6583 * 82575's EITR should be set same throttling value as other
6584 * old controllers' ITR because the interrupt/sec calculation
6585 * is the same, that is, 1,000,000,000 / (N * 256).
6586 *
6587 * 82574's EITR should be set same throttling value as ITR.
6588 *
6589 * For N interrupts/sec, set this value to:
6590 * 1,000,000 / N in contrast to ITR throttling value.
6591 */
6592 sc->sc_itr_init = 450;
6593 } else if (sc->sc_type >= WM_T_82543) {
6594 /*
6595 * Set up the interrupt throttling register (units of 256ns)
6596 * Note that a footnote in Intel's documentation says this
6597 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6598 * or 10Mbit mode. Empirically, it appears to be the case
6599 * that that is also true for the 1024ns units of the other
6600 * interrupt-related timer registers -- so, really, we ought
6601 * to divide this value by 4 when the link speed is low.
6602 *
6603 * XXX implement this division at link speed change!
6604 */
6605
6606 /*
6607 * For N interrupts/sec, set this value to:
6608 * 1,000,000,000 / (N * 256). Note that we set the
6609 * absolute and packet timer values to this value
6610 * divided by 4 to get "simple timer" behavior.
6611 */
6612 sc->sc_itr_init = 1500; /* 2604 ints/sec */
6613 }
6614
6615 error = wm_init_txrx_queues(sc);
6616 if (error)
6617 goto out;
6618
6619 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6620 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6621 (sc->sc_type >= WM_T_82575))
6622 wm_serdes_power_up_link_82575(sc);
6623
6624 /* Clear out the VLAN table -- we don't use it (yet). */
6625 CSR_WRITE(sc, WMREG_VET, 0);
6626 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6627 trynum = 10; /* Due to hw errata */
6628 else
6629 trynum = 1;
6630 for (i = 0; i < WM_VLAN_TABSIZE; i++)
6631 for (j = 0; j < trynum; j++)
6632 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6633
6634 /*
6635 * Set up flow-control parameters.
6636 *
6637 * XXX Values could probably stand some tuning.
6638 */
6639 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6640 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6641 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6642 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6643 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6644 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6645 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6646 }
6647
6648 sc->sc_fcrtl = FCRTL_DFLT;
6649 if (sc->sc_type < WM_T_82543) {
6650 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6651 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6652 } else {
6653 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6654 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6655 }
6656
6657 if (sc->sc_type == WM_T_80003)
6658 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6659 else
6660 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6661
6662 /* Writes the control register. */
6663 wm_set_vlan(sc);
6664
6665 if (sc->sc_flags & WM_F_HAS_MII) {
6666 uint16_t kmreg;
6667
6668 switch (sc->sc_type) {
6669 case WM_T_80003:
6670 case WM_T_ICH8:
6671 case WM_T_ICH9:
6672 case WM_T_ICH10:
6673 case WM_T_PCH:
6674 case WM_T_PCH2:
6675 case WM_T_PCH_LPT:
6676 case WM_T_PCH_SPT:
6677 case WM_T_PCH_CNP:
6678 /*
6679 * Set the mac to wait the maximum time between each
6680 * iteration and increase the max iterations when
6681 * polling the phy; this fixes erroneous timeouts at
6682 * 10Mbps.
6683 */
6684 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6685 0xFFFF);
6686 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6687 &kmreg);
6688 kmreg |= 0x3F;
6689 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6690 kmreg);
6691 break;
6692 default:
6693 break;
6694 }
6695
6696 if (sc->sc_type == WM_T_80003) {
6697 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6698 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6699 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6700
6701 /* Bypass RX and TX FIFOs */
6702 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6703 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6704 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6705 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6706 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6707 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6708 }
6709 }
6710 #if 0
6711 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6712 #endif
6713
6714 /* Set up checksum offload parameters. */
6715 reg = CSR_READ(sc, WMREG_RXCSUM);
6716 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6717 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6718 reg |= RXCSUM_IPOFL;
6719 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6720 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6721 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6722 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6723 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6724
6725 /* Set registers about MSI-X */
6726 if (wm_is_using_msix(sc)) {
6727 uint32_t ivar, qintr_idx;
6728 struct wm_queue *wmq;
6729 unsigned int qid;
6730
6731 if (sc->sc_type == WM_T_82575) {
6732 /* Interrupt control */
6733 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6734 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6735 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6736
6737 /* TX and RX */
6738 for (i = 0; i < sc->sc_nqueues; i++) {
6739 wmq = &sc->sc_queue[i];
6740 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6741 EITR_TX_QUEUE(wmq->wmq_id)
6742 | EITR_RX_QUEUE(wmq->wmq_id));
6743 }
6744 /* Link status */
6745 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6746 EITR_OTHER);
6747 } else if (sc->sc_type == WM_T_82574) {
6748 /* Interrupt control */
6749 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6750 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6751 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6752
6753 /*
6754 * Work around issue with spurious interrupts
6755 * in MSI-X mode.
6756 * At wm_initialize_hardware_bits(), sc_nintrs has not
6757 * initialized yet. So re-initialize WMREG_RFCTL here.
6758 */
6759 reg = CSR_READ(sc, WMREG_RFCTL);
6760 reg |= WMREG_RFCTL_ACKDIS;
6761 CSR_WRITE(sc, WMREG_RFCTL, reg);
6762
6763 ivar = 0;
6764 /* TX and RX */
6765 for (i = 0; i < sc->sc_nqueues; i++) {
6766 wmq = &sc->sc_queue[i];
6767 qid = wmq->wmq_id;
6768 qintr_idx = wmq->wmq_intr_idx;
6769
6770 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6771 IVAR_TX_MASK_Q_82574(qid));
6772 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6773 IVAR_RX_MASK_Q_82574(qid));
6774 }
6775 /* Link status */
6776 ivar |= __SHIFTIN((IVAR_VALID_82574
6777 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6778 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6779 } else {
6780 /* Interrupt control */
6781 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6782 | GPIE_EIAME | GPIE_PBA);
6783
6784 switch (sc->sc_type) {
6785 case WM_T_82580:
6786 case WM_T_I350:
6787 case WM_T_I354:
6788 case WM_T_I210:
6789 case WM_T_I211:
6790 /* TX and RX */
6791 for (i = 0; i < sc->sc_nqueues; i++) {
6792 wmq = &sc->sc_queue[i];
6793 qid = wmq->wmq_id;
6794 qintr_idx = wmq->wmq_intr_idx;
6795
6796 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6797 ivar &= ~IVAR_TX_MASK_Q(qid);
6798 ivar |= __SHIFTIN((qintr_idx
6799 | IVAR_VALID),
6800 IVAR_TX_MASK_Q(qid));
6801 ivar &= ~IVAR_RX_MASK_Q(qid);
6802 ivar |= __SHIFTIN((qintr_idx
6803 | IVAR_VALID),
6804 IVAR_RX_MASK_Q(qid));
6805 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6806 }
6807 break;
6808 case WM_T_82576:
6809 /* TX and RX */
6810 for (i = 0; i < sc->sc_nqueues; i++) {
6811 wmq = &sc->sc_queue[i];
6812 qid = wmq->wmq_id;
6813 qintr_idx = wmq->wmq_intr_idx;
6814
6815 ivar = CSR_READ(sc,
6816 WMREG_IVAR_Q_82576(qid));
6817 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6818 ivar |= __SHIFTIN((qintr_idx
6819 | IVAR_VALID),
6820 IVAR_TX_MASK_Q_82576(qid));
6821 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6822 ivar |= __SHIFTIN((qintr_idx
6823 | IVAR_VALID),
6824 IVAR_RX_MASK_Q_82576(qid));
6825 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6826 ivar);
6827 }
6828 break;
6829 default:
6830 break;
6831 }
6832
6833 /* Link status */
6834 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6835 IVAR_MISC_OTHER);
6836 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6837 }
6838
6839 if (wm_is_using_multiqueue(sc)) {
6840 wm_init_rss(sc);
6841
6842 /*
6843 ** NOTE: Receive Full-Packet Checksum Offload
6844 ** is mutually exclusive with Multiqueue. However
6845 ** this is not the same as TCP/IP checksums which
6846 ** still work.
6847 */
6848 reg = CSR_READ(sc, WMREG_RXCSUM);
6849 reg |= RXCSUM_PCSD;
6850 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6851 }
6852 }
6853
6854 /* Set up the interrupt registers. */
6855 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6856
6857 /* Enable SFP module insertion interrupt if it's required */
6858 if ((sc->sc_flags & WM_F_SFP) != 0) {
6859 sc->sc_ctrl |= CTRL_EXTLINK_EN;
6860 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6861 sfp_mask = ICR_GPI(0);
6862 }
6863
6864 if (wm_is_using_msix(sc)) {
6865 uint32_t mask;
6866 struct wm_queue *wmq;
6867
6868 switch (sc->sc_type) {
6869 case WM_T_82574:
6870 mask = 0;
6871 for (i = 0; i < sc->sc_nqueues; i++) {
6872 wmq = &sc->sc_queue[i];
6873 mask |= ICR_TXQ(wmq->wmq_id);
6874 mask |= ICR_RXQ(wmq->wmq_id);
6875 }
6876 mask |= ICR_OTHER;
6877 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6878 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6879 break;
6880 default:
6881 if (sc->sc_type == WM_T_82575) {
6882 mask = 0;
6883 for (i = 0; i < sc->sc_nqueues; i++) {
6884 wmq = &sc->sc_queue[i];
6885 mask |= EITR_TX_QUEUE(wmq->wmq_id);
6886 mask |= EITR_RX_QUEUE(wmq->wmq_id);
6887 }
6888 mask |= EITR_OTHER;
6889 } else {
6890 mask = 0;
6891 for (i = 0; i < sc->sc_nqueues; i++) {
6892 wmq = &sc->sc_queue[i];
6893 mask |= 1 << wmq->wmq_intr_idx;
6894 }
6895 mask |= 1 << sc->sc_link_intr_idx;
6896 }
6897 CSR_WRITE(sc, WMREG_EIAC, mask);
6898 CSR_WRITE(sc, WMREG_EIAM, mask);
6899 CSR_WRITE(sc, WMREG_EIMS, mask);
6900
6901 /* For other interrupts */
6902 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6903 break;
6904 }
6905 } else {
6906 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6907 ICR_RXO | ICR_RXT0 | sfp_mask;
6908 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6909 }
6910
6911 /* Set up the inter-packet gap. */
6912 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6913
6914 if (sc->sc_type >= WM_T_82543) {
6915 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6916 struct wm_queue *wmq = &sc->sc_queue[qidx];
6917 wm_itrs_writereg(sc, wmq);
6918 }
6919 /*
6920 * Link interrupts occur much less than TX
6921 * interrupts and RX interrupts. So, we don't
6922 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6923 * FreeBSD's if_igb.
6924 */
6925 }
6926
6927 /* Set the VLAN EtherType. */
6928 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6929
6930 /*
6931 * Set up the transmit control register; we start out with
6932 * a collision distance suitable for FDX, but update it when
6933 * we resolve the media type.
6934 */
6935 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6936 | TCTL_CT(TX_COLLISION_THRESHOLD)
6937 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6938 if (sc->sc_type >= WM_T_82571)
6939 sc->sc_tctl |= TCTL_MULR;
6940 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6941
6942 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6943 /* Write TDT after TCTL.EN is set. See the document. */
6944 CSR_WRITE(sc, WMREG_TDT(0), 0);
6945 }
6946
6947 if (sc->sc_type == WM_T_80003) {
6948 reg = CSR_READ(sc, WMREG_TCTL_EXT);
6949 reg &= ~TCTL_EXT_GCEX_MASK;
6950 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6951 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6952 }
6953
6954 /* Set the media. */
6955 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6956 goto out;
6957
6958 /* Configure for OS presence */
6959 wm_init_manageability(sc);
6960
6961 /*
6962 * Set up the receive control register; we actually program the
6963 * register when we set the receive filter. Use multicast address
6964 * offset type 0.
6965 *
6966 * Only the i82544 has the ability to strip the incoming CRC, so we
6967 * don't enable that feature.
6968 */
6969 sc->sc_mchash_type = 0;
6970 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6971 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6972
6973 /* 82574 use one buffer extended Rx descriptor. */
6974 if (sc->sc_type == WM_T_82574)
6975 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6976
6977 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
6978 sc->sc_rctl |= RCTL_SECRC;
6979
6980 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6981 && (ifp->if_mtu > ETHERMTU)) {
6982 sc->sc_rctl |= RCTL_LPE;
6983 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6984 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6985 }
6986
6987 if (MCLBYTES == 2048)
6988 sc->sc_rctl |= RCTL_2k;
6989 else {
6990 if (sc->sc_type >= WM_T_82543) {
6991 switch (MCLBYTES) {
6992 case 4096:
6993 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6994 break;
6995 case 8192:
6996 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6997 break;
6998 case 16384:
6999 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7000 break;
7001 default:
7002 panic("wm_init: MCLBYTES %d unsupported",
7003 MCLBYTES);
7004 break;
7005 }
7006 } else
7007 panic("wm_init: i82542 requires MCLBYTES = 2048");
7008 }
7009
7010 /* Enable ECC */
7011 switch (sc->sc_type) {
7012 case WM_T_82571:
7013 reg = CSR_READ(sc, WMREG_PBA_ECC);
7014 reg |= PBA_ECC_CORR_EN;
7015 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7016 break;
7017 case WM_T_PCH_LPT:
7018 case WM_T_PCH_SPT:
7019 case WM_T_PCH_CNP:
7020 reg = CSR_READ(sc, WMREG_PBECCSTS);
7021 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7022 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7023
7024 sc->sc_ctrl |= CTRL_MEHE;
7025 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7026 break;
7027 default:
7028 break;
7029 }
7030
7031 /*
7032 * Set the receive filter.
7033 *
7034 * For 82575 and 82576, the RX descriptors must be initialized after
7035 * the setting of RCTL.EN in wm_set_filter()
7036 */
7037 wm_set_filter(sc);
7038
7039 /* On 575 and later set RDT only if RX enabled */
7040 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7041 int qidx;
7042 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7043 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7044 for (i = 0; i < WM_NRXDESC; i++) {
7045 mutex_enter(rxq->rxq_lock);
7046 wm_init_rxdesc(rxq, i);
7047 mutex_exit(rxq->rxq_lock);
7048
7049 }
7050 }
7051 }
7052
7053 wm_unset_stopping_flags(sc);
7054
7055 /* Start the one second link check clock. */
7056 callout_schedule(&sc->sc_tick_ch, hz);
7057
7058 /*
7059 * ...all done! (IFNET_LOCKED asserted above.)
7060 */
7061 ifp->if_flags |= IFF_RUNNING;
7062
7063 out:
7064 /* Save last flags for the callback */
7065 sc->sc_if_flags = ifp->if_flags;
7066 sc->sc_ec_capenable = ec->ec_capenable;
7067 if (error)
7068 log(LOG_ERR, "%s: interface not running\n",
7069 device_xname(sc->sc_dev));
7070 return error;
7071 }
7072
7073 /*
7074 * wm_stop: [ifnet interface function]
7075 *
7076 * Stop transmission on the interface.
7077 */
7078 static void
7079 wm_stop(struct ifnet *ifp, int disable)
7080 {
7081 struct wm_softc *sc = ifp->if_softc;
7082
7083 ASSERT_SLEEPABLE();
7084
7085 WM_CORE_LOCK(sc);
7086 wm_stop_locked(ifp, disable ? true : false, true);
7087 WM_CORE_UNLOCK(sc);
7088
7089 /*
7090 * After wm_set_stopping_flags(), it is guaranteed that
7091 * wm_handle_queue_work() does not call workqueue_enqueue().
7092 * However, workqueue_wait() cannot call in wm_stop_locked()
7093 * because it can sleep...
7094 * so, call workqueue_wait() here.
7095 */
7096 for (int i = 0; i < sc->sc_nqueues; i++)
7097 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7098 }
7099
7100 static void
7101 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7102 {
7103 struct wm_softc *sc = ifp->if_softc;
7104 struct wm_txsoft *txs;
7105 int i, qidx;
7106
7107 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7108 device_xname(sc->sc_dev), __func__));
7109 KASSERT(WM_CORE_LOCKED(sc));
7110
7111 wm_set_stopping_flags(sc);
7112
7113 if (sc->sc_flags & WM_F_HAS_MII) {
7114 /* Down the MII. */
7115 mii_down(&sc->sc_mii);
7116 } else {
7117 #if 0
7118 /* Should we clear PHY's status properly? */
7119 wm_reset(sc);
7120 #endif
7121 }
7122
7123 /* Stop the transmit and receive processes. */
7124 CSR_WRITE(sc, WMREG_TCTL, 0);
7125 CSR_WRITE(sc, WMREG_RCTL, 0);
7126 sc->sc_rctl &= ~RCTL_EN;
7127
7128 /*
7129 * Clear the interrupt mask to ensure the device cannot assert its
7130 * interrupt line.
7131 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7132 * service any currently pending or shared interrupt.
7133 */
7134 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7135 sc->sc_icr = 0;
7136 if (wm_is_using_msix(sc)) {
7137 if (sc->sc_type != WM_T_82574) {
7138 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7139 CSR_WRITE(sc, WMREG_EIAC, 0);
7140 } else
7141 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7142 }
7143
7144 /*
7145 * Stop callouts after interrupts are disabled; if we have
7146 * to wait for them, we will be releasing the CORE_LOCK
7147 * briefly, which will unblock interrupts on the current CPU.
7148 */
7149
7150 /* Stop the one second clock. */
7151 if (wait)
7152 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7153 else
7154 callout_stop(&sc->sc_tick_ch);
7155
7156 /* Stop the 82547 Tx FIFO stall check timer. */
7157 if (sc->sc_type == WM_T_82547) {
7158 if (wait)
7159 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7160 else
7161 callout_stop(&sc->sc_txfifo_ch);
7162 }
7163
7164 /* Release any queued transmit buffers. */
7165 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7166 struct wm_queue *wmq = &sc->sc_queue[qidx];
7167 struct wm_txqueue *txq = &wmq->wmq_txq;
7168 struct mbuf *m;
7169
7170 mutex_enter(txq->txq_lock);
7171 txq->txq_sending = false; /* Ensure watchdog disabled */
7172 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7173 txs = &txq->txq_soft[i];
7174 if (txs->txs_mbuf != NULL) {
7175 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7176 m_freem(txs->txs_mbuf);
7177 txs->txs_mbuf = NULL;
7178 }
7179 }
7180 /* Drain txq_interq */
7181 while ((m = pcq_get(txq->txq_interq)) != NULL)
7182 m_freem(m);
7183 mutex_exit(txq->txq_lock);
7184 }
7185
7186 /* Mark the interface as down and cancel the watchdog timer. */
7187 ifp->if_flags &= ~IFF_RUNNING;
7188
7189 if (disable) {
7190 for (i = 0; i < sc->sc_nqueues; i++) {
7191 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7192 mutex_enter(rxq->rxq_lock);
7193 wm_rxdrain(rxq);
7194 mutex_exit(rxq->rxq_lock);
7195 }
7196 }
7197
7198 #if 0 /* notyet */
7199 if (sc->sc_type >= WM_T_82544)
7200 CSR_WRITE(sc, WMREG_WUC, 0);
7201 #endif
7202 }
7203
7204 static void
7205 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7206 {
7207 struct mbuf *m;
7208 int i;
7209
7210 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7211 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7212 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7213 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7214 m->m_data, m->m_len, m->m_flags);
7215 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7216 i, i == 1 ? "" : "s");
7217 }
7218
7219 /*
7220 * wm_82547_txfifo_stall:
7221 *
7222 * Callout used to wait for the 82547 Tx FIFO to drain,
7223 * reset the FIFO pointers, and restart packet transmission.
7224 */
7225 static void
7226 wm_82547_txfifo_stall(void *arg)
7227 {
7228 struct wm_softc *sc = arg;
7229 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7230
7231 mutex_enter(txq->txq_lock);
7232
7233 if (txq->txq_stopping)
7234 goto out;
7235
7236 if (txq->txq_fifo_stall) {
7237 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7238 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7239 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7240 /*
7241 * Packets have drained. Stop transmitter, reset
7242 * FIFO pointers, restart transmitter, and kick
7243 * the packet queue.
7244 */
7245 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7246 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7247 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7248 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7249 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7250 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7251 CSR_WRITE(sc, WMREG_TCTL, tctl);
7252 CSR_WRITE_FLUSH(sc);
7253
7254 txq->txq_fifo_head = 0;
7255 txq->txq_fifo_stall = 0;
7256 wm_start_locked(&sc->sc_ethercom.ec_if);
7257 } else {
7258 /*
7259 * Still waiting for packets to drain; try again in
7260 * another tick.
7261 */
7262 callout_schedule(&sc->sc_txfifo_ch, 1);
7263 }
7264 }
7265
7266 out:
7267 mutex_exit(txq->txq_lock);
7268 }
7269
7270 /*
7271 * wm_82547_txfifo_bugchk:
7272 *
7273 * Check for bug condition in the 82547 Tx FIFO. We need to
7274 * prevent enqueueing a packet that would wrap around the end
7275 * if the Tx FIFO ring buffer, otherwise the chip will croak.
7276 *
7277 * We do this by checking the amount of space before the end
7278 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
7279 * the Tx FIFO, wait for all remaining packets to drain, reset
7280 * the internal FIFO pointers to the beginning, and restart
7281 * transmission on the interface.
7282 */
7283 #define WM_FIFO_HDR 0x10
7284 #define WM_82547_PAD_LEN 0x3e0
7285 static int
7286 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7287 {
7288 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7289 int space = txq->txq_fifo_size - txq->txq_fifo_head;
7290 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7291
7292 /* Just return if already stalled. */
7293 if (txq->txq_fifo_stall)
7294 return 1;
7295
7296 if (sc->sc_mii.mii_media_active & IFM_FDX) {
7297 /* Stall only occurs in half-duplex mode. */
7298 goto send_packet;
7299 }
7300
7301 if (len >= WM_82547_PAD_LEN + space) {
7302 txq->txq_fifo_stall = 1;
7303 callout_schedule(&sc->sc_txfifo_ch, 1);
7304 return 1;
7305 }
7306
7307 send_packet:
7308 txq->txq_fifo_head += len;
7309 if (txq->txq_fifo_head >= txq->txq_fifo_size)
7310 txq->txq_fifo_head -= txq->txq_fifo_size;
7311
7312 return 0;
7313 }
7314
7315 static int
7316 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7317 {
7318 int error;
7319
7320 /*
7321 * Allocate the control data structures, and create and load the
7322 * DMA map for it.
7323 *
7324 * NOTE: All Tx descriptors must be in the same 4G segment of
7325 * memory. So must Rx descriptors. We simplify by allocating
7326 * both sets within the same 4G segment.
7327 */
7328 if (sc->sc_type < WM_T_82544)
7329 WM_NTXDESC(txq) = WM_NTXDESC_82542;
7330 else
7331 WM_NTXDESC(txq) = WM_NTXDESC_82544;
7332 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7333 txq->txq_descsize = sizeof(nq_txdesc_t);
7334 else
7335 txq->txq_descsize = sizeof(wiseman_txdesc_t);
7336
7337 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7338 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7339 1, &txq->txq_desc_rseg, 0)) != 0) {
7340 aprint_error_dev(sc->sc_dev,
7341 "unable to allocate TX control data, error = %d\n",
7342 error);
7343 goto fail_0;
7344 }
7345
7346 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7347 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7348 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7349 aprint_error_dev(sc->sc_dev,
7350 "unable to map TX control data, error = %d\n", error);
7351 goto fail_1;
7352 }
7353
7354 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7355 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7356 aprint_error_dev(sc->sc_dev,
7357 "unable to create TX control data DMA map, error = %d\n",
7358 error);
7359 goto fail_2;
7360 }
7361
7362 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7363 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7364 aprint_error_dev(sc->sc_dev,
7365 "unable to load TX control data DMA map, error = %d\n",
7366 error);
7367 goto fail_3;
7368 }
7369
7370 return 0;
7371
7372 fail_3:
7373 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7374 fail_2:
7375 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7376 WM_TXDESCS_SIZE(txq));
7377 fail_1:
7378 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7379 fail_0:
7380 return error;
7381 }
7382
7383 static void
7384 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7385 {
7386
7387 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7388 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7389 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7390 WM_TXDESCS_SIZE(txq));
7391 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7392 }
7393
7394 static int
7395 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7396 {
7397 int error;
7398 size_t rxq_descs_size;
7399
7400 /*
7401 * Allocate the control data structures, and create and load the
7402 * DMA map for it.
7403 *
7404 * NOTE: All Tx descriptors must be in the same 4G segment of
7405 * memory. So must Rx descriptors. We simplify by allocating
7406 * both sets within the same 4G segment.
7407 */
7408 rxq->rxq_ndesc = WM_NRXDESC;
7409 if (sc->sc_type == WM_T_82574)
7410 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7411 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7412 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7413 else
7414 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7415 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7416
7417 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7418 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7419 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7420 aprint_error_dev(sc->sc_dev,
7421 "unable to allocate RX control data, error = %d\n",
7422 error);
7423 goto fail_0;
7424 }
7425
7426 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7427 rxq->rxq_desc_rseg, rxq_descs_size,
7428 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7429 aprint_error_dev(sc->sc_dev,
7430 "unable to map RX control data, error = %d\n", error);
7431 goto fail_1;
7432 }
7433
7434 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7435 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7436 aprint_error_dev(sc->sc_dev,
7437 "unable to create RX control data DMA map, error = %d\n",
7438 error);
7439 goto fail_2;
7440 }
7441
7442 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7443 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7444 aprint_error_dev(sc->sc_dev,
7445 "unable to load RX control data DMA map, error = %d\n",
7446 error);
7447 goto fail_3;
7448 }
7449
7450 return 0;
7451
7452 fail_3:
7453 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7454 fail_2:
7455 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7456 rxq_descs_size);
7457 fail_1:
7458 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7459 fail_0:
7460 return error;
7461 }
7462
7463 static void
7464 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7465 {
7466
7467 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7468 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7469 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7470 rxq->rxq_descsize * rxq->rxq_ndesc);
7471 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7472 }
7473
7474
7475 static int
7476 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7477 {
7478 int i, error;
7479
7480 /* Create the transmit buffer DMA maps. */
7481 WM_TXQUEUELEN(txq) =
7482 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7483 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7484 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7485 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7486 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7487 &txq->txq_soft[i].txs_dmamap)) != 0) {
7488 aprint_error_dev(sc->sc_dev,
7489 "unable to create Tx DMA map %d, error = %d\n",
7490 i, error);
7491 goto fail;
7492 }
7493 }
7494
7495 return 0;
7496
7497 fail:
7498 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7499 if (txq->txq_soft[i].txs_dmamap != NULL)
7500 bus_dmamap_destroy(sc->sc_dmat,
7501 txq->txq_soft[i].txs_dmamap);
7502 }
7503 return error;
7504 }
7505
7506 static void
7507 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7508 {
7509 int i;
7510
7511 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7512 if (txq->txq_soft[i].txs_dmamap != NULL)
7513 bus_dmamap_destroy(sc->sc_dmat,
7514 txq->txq_soft[i].txs_dmamap);
7515 }
7516 }
7517
7518 static int
7519 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7520 {
7521 int i, error;
7522
7523 /* Create the receive buffer DMA maps. */
7524 for (i = 0; i < rxq->rxq_ndesc; i++) {
7525 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7526 MCLBYTES, 0, 0,
7527 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7528 aprint_error_dev(sc->sc_dev,
7529 "unable to create Rx DMA map %d error = %d\n",
7530 i, error);
7531 goto fail;
7532 }
7533 rxq->rxq_soft[i].rxs_mbuf = NULL;
7534 }
7535
7536 return 0;
7537
7538 fail:
7539 for (i = 0; i < rxq->rxq_ndesc; i++) {
7540 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7541 bus_dmamap_destroy(sc->sc_dmat,
7542 rxq->rxq_soft[i].rxs_dmamap);
7543 }
7544 return error;
7545 }
7546
7547 static void
7548 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7549 {
7550 int i;
7551
7552 for (i = 0; i < rxq->rxq_ndesc; i++) {
7553 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7554 bus_dmamap_destroy(sc->sc_dmat,
7555 rxq->rxq_soft[i].rxs_dmamap);
7556 }
7557 }
7558
7559 /*
7560 * wm_alloc_quques:
7561 * Allocate {tx,rx}descs and {tx,rx} buffers
7562 */
7563 static int
7564 wm_alloc_txrx_queues(struct wm_softc *sc)
7565 {
7566 int i, error, tx_done, rx_done;
7567
7568 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7569 KM_SLEEP);
7570 if (sc->sc_queue == NULL) {
7571 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7572 error = ENOMEM;
7573 goto fail_0;
7574 }
7575
7576 /* For transmission */
7577 error = 0;
7578 tx_done = 0;
7579 for (i = 0; i < sc->sc_nqueues; i++) {
7580 #ifdef WM_EVENT_COUNTERS
7581 int j;
7582 const char *xname;
7583 #endif
7584 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7585 txq->txq_sc = sc;
7586 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7587
7588 error = wm_alloc_tx_descs(sc, txq);
7589 if (error)
7590 break;
7591 error = wm_alloc_tx_buffer(sc, txq);
7592 if (error) {
7593 wm_free_tx_descs(sc, txq);
7594 break;
7595 }
7596 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7597 if (txq->txq_interq == NULL) {
7598 wm_free_tx_descs(sc, txq);
7599 wm_free_tx_buffer(sc, txq);
7600 error = ENOMEM;
7601 break;
7602 }
7603
7604 #ifdef WM_EVENT_COUNTERS
7605 xname = device_xname(sc->sc_dev);
7606
7607 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7608 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7609 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7610 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7611 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7612 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7613 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7614 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7615 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7616 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7617 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7618
7619 for (j = 0; j < WM_NTXSEGS; j++) {
7620 snprintf(txq->txq_txseg_evcnt_names[j],
7621 sizeof(txq->txq_txseg_evcnt_names[j]),
7622 "txq%02dtxseg%d", i, j);
7623 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
7624 EVCNT_TYPE_MISC,
7625 NULL, xname, txq->txq_txseg_evcnt_names[j]);
7626 }
7627
7628 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7629 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7630 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7631 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7632 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7633 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7634 #endif /* WM_EVENT_COUNTERS */
7635
7636 tx_done++;
7637 }
7638 if (error)
7639 goto fail_1;
7640
7641 /* For receive */
7642 error = 0;
7643 rx_done = 0;
7644 for (i = 0; i < sc->sc_nqueues; i++) {
7645 #ifdef WM_EVENT_COUNTERS
7646 const char *xname;
7647 #endif
7648 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7649 rxq->rxq_sc = sc;
7650 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7651
7652 error = wm_alloc_rx_descs(sc, rxq);
7653 if (error)
7654 break;
7655
7656 error = wm_alloc_rx_buffer(sc, rxq);
7657 if (error) {
7658 wm_free_rx_descs(sc, rxq);
7659 break;
7660 }
7661
7662 #ifdef WM_EVENT_COUNTERS
7663 xname = device_xname(sc->sc_dev);
7664
7665 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7666 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7667
7668 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7669 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7670 #endif /* WM_EVENT_COUNTERS */
7671
7672 rx_done++;
7673 }
7674 if (error)
7675 goto fail_2;
7676
7677 return 0;
7678
7679 fail_2:
7680 for (i = 0; i < rx_done; i++) {
7681 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7682 wm_free_rx_buffer(sc, rxq);
7683 wm_free_rx_descs(sc, rxq);
7684 if (rxq->rxq_lock)
7685 mutex_obj_free(rxq->rxq_lock);
7686 }
7687 fail_1:
7688 for (i = 0; i < tx_done; i++) {
7689 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7690 pcq_destroy(txq->txq_interq);
7691 wm_free_tx_buffer(sc, txq);
7692 wm_free_tx_descs(sc, txq);
7693 if (txq->txq_lock)
7694 mutex_obj_free(txq->txq_lock);
7695 }
7696
7697 kmem_free(sc->sc_queue,
7698 sizeof(struct wm_queue) * sc->sc_nqueues);
7699 fail_0:
7700 return error;
7701 }
7702
7703 /*
7704 * wm_free_quques:
7705 * Free {tx,rx}descs and {tx,rx} buffers
7706 */
7707 static void
7708 wm_free_txrx_queues(struct wm_softc *sc)
7709 {
7710 int i;
7711
7712 for (i = 0; i < sc->sc_nqueues; i++) {
7713 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7714
7715 #ifdef WM_EVENT_COUNTERS
7716 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7717 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7718 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7719 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7720 #endif /* WM_EVENT_COUNTERS */
7721
7722 wm_free_rx_buffer(sc, rxq);
7723 wm_free_rx_descs(sc, rxq);
7724 if (rxq->rxq_lock)
7725 mutex_obj_free(rxq->rxq_lock);
7726 }
7727
7728 for (i = 0; i < sc->sc_nqueues; i++) {
7729 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7730 struct mbuf *m;
7731 #ifdef WM_EVENT_COUNTERS
7732 int j;
7733
7734 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7735 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7736 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7737 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7738 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7739 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7740 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7741 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7742 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7743 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7744 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7745
7746 for (j = 0; j < WM_NTXSEGS; j++)
7747 evcnt_detach(&txq->txq_ev_txseg[j]);
7748
7749 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7750 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7751 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7752 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7753 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7754 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7755 #endif /* WM_EVENT_COUNTERS */
7756
7757 /* Drain txq_interq */
7758 while ((m = pcq_get(txq->txq_interq)) != NULL)
7759 m_freem(m);
7760 pcq_destroy(txq->txq_interq);
7761
7762 wm_free_tx_buffer(sc, txq);
7763 wm_free_tx_descs(sc, txq);
7764 if (txq->txq_lock)
7765 mutex_obj_free(txq->txq_lock);
7766 }
7767
7768 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7769 }
7770
7771 static void
7772 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7773 {
7774
7775 KASSERT(mutex_owned(txq->txq_lock));
7776
7777 /* Initialize the transmit descriptor ring. */
7778 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7779 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7780 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7781 txq->txq_free = WM_NTXDESC(txq);
7782 txq->txq_next = 0;
7783 }
7784
7785 static void
7786 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7787 struct wm_txqueue *txq)
7788 {
7789
7790 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7791 device_xname(sc->sc_dev), __func__));
7792 KASSERT(mutex_owned(txq->txq_lock));
7793
7794 if (sc->sc_type < WM_T_82543) {
7795 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7796 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7797 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7798 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7799 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7800 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7801 } else {
7802 int qid = wmq->wmq_id;
7803
7804 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7805 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7806 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7807 CSR_WRITE(sc, WMREG_TDH(qid), 0);
7808
7809 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7810 /*
7811 * Don't write TDT before TCTL.EN is set.
7812 * See the document.
7813 */
7814 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7815 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7816 | TXDCTL_WTHRESH(0));
7817 else {
7818 /* XXX should update with AIM? */
7819 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7820 if (sc->sc_type >= WM_T_82540) {
7821 /* Should be the same */
7822 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7823 }
7824
7825 CSR_WRITE(sc, WMREG_TDT(qid), 0);
7826 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7827 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7828 }
7829 }
7830 }
7831
7832 static void
7833 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7834 {
7835 int i;
7836
7837 KASSERT(mutex_owned(txq->txq_lock));
7838
7839 /* Initialize the transmit job descriptors. */
7840 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7841 txq->txq_soft[i].txs_mbuf = NULL;
7842 txq->txq_sfree = WM_TXQUEUELEN(txq);
7843 txq->txq_snext = 0;
7844 txq->txq_sdirty = 0;
7845 }
7846
7847 static void
7848 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7849 struct wm_txqueue *txq)
7850 {
7851
7852 KASSERT(mutex_owned(txq->txq_lock));
7853
7854 /*
7855 * Set up some register offsets that are different between
7856 * the i82542 and the i82543 and later chips.
7857 */
7858 if (sc->sc_type < WM_T_82543)
7859 txq->txq_tdt_reg = WMREG_OLD_TDT;
7860 else
7861 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7862
7863 wm_init_tx_descs(sc, txq);
7864 wm_init_tx_regs(sc, wmq, txq);
7865 wm_init_tx_buffer(sc, txq);
7866
7867 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */
7868 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
7869
7870 txq->txq_sending = false;
7871 }
7872
7873 static void
7874 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7875 struct wm_rxqueue *rxq)
7876 {
7877
7878 KASSERT(mutex_owned(rxq->rxq_lock));
7879
7880 /*
7881 * Initialize the receive descriptor and receive job
7882 * descriptor rings.
7883 */
7884 if (sc->sc_type < WM_T_82543) {
7885 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7886 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7887 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7888 rxq->rxq_descsize * rxq->rxq_ndesc);
7889 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7890 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7891 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7892
7893 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7894 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7895 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7896 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7897 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7898 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7899 } else {
7900 int qid = wmq->wmq_id;
7901
7902 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7903 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7904 CSR_WRITE(sc, WMREG_RDLEN(qid),
7905 rxq->rxq_descsize * rxq->rxq_ndesc);
7906
7907 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7908 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7909 panic("%s: MCLBYTES %d unsupported for 82575 "
7910 "or higher\n", __func__, MCLBYTES);
7911
7912 /*
7913 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
7914 * only.
7915 */
7916 CSR_WRITE(sc, WMREG_SRRCTL(qid),
7917 SRRCTL_DESCTYPE_ADV_ONEBUF
7918 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7919 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7920 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7921 | RXDCTL_WTHRESH(1));
7922 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7923 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7924 } else {
7925 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7926 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7927 /* XXX should update with AIM? */
7928 CSR_WRITE(sc, WMREG_RDTR,
7929 (wmq->wmq_itr / 4) | RDTR_FPD);
7930 /* MUST be same */
7931 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7932 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7933 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7934 }
7935 }
7936 }
7937
7938 static int
7939 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7940 {
7941 struct wm_rxsoft *rxs;
7942 int error, i;
7943
7944 KASSERT(mutex_owned(rxq->rxq_lock));
7945
7946 for (i = 0; i < rxq->rxq_ndesc; i++) {
7947 rxs = &rxq->rxq_soft[i];
7948 if (rxs->rxs_mbuf == NULL) {
7949 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7950 log(LOG_ERR, "%s: unable to allocate or map "
7951 "rx buffer %d, error = %d\n",
7952 device_xname(sc->sc_dev), i, error);
7953 /*
7954 * XXX Should attempt to run with fewer receive
7955 * XXX buffers instead of just failing.
7956 */
7957 wm_rxdrain(rxq);
7958 return ENOMEM;
7959 }
7960 } else {
7961 /*
7962 * For 82575 and 82576, the RX descriptors must be
7963 * initialized after the setting of RCTL.EN in
7964 * wm_set_filter()
7965 */
7966 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7967 wm_init_rxdesc(rxq, i);
7968 }
7969 }
7970 rxq->rxq_ptr = 0;
7971 rxq->rxq_discard = 0;
7972 WM_RXCHAIN_RESET(rxq);
7973
7974 return 0;
7975 }
7976
7977 static int
7978 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7979 struct wm_rxqueue *rxq)
7980 {
7981
7982 KASSERT(mutex_owned(rxq->rxq_lock));
7983
7984 /*
7985 * Set up some register offsets that are different between
7986 * the i82542 and the i82543 and later chips.
7987 */
7988 if (sc->sc_type < WM_T_82543)
7989 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7990 else
7991 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7992
7993 wm_init_rx_regs(sc, wmq, rxq);
7994 return wm_init_rx_buffer(sc, rxq);
7995 }
7996
7997 /*
7998 * wm_init_quques:
7999 * Initialize {tx,rx}descs and {tx,rx} buffers
8000 */
8001 static int
8002 wm_init_txrx_queues(struct wm_softc *sc)
8003 {
8004 int i, error = 0;
8005
8006 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8007 device_xname(sc->sc_dev), __func__));
8008
8009 for (i = 0; i < sc->sc_nqueues; i++) {
8010 struct wm_queue *wmq = &sc->sc_queue[i];
8011 struct wm_txqueue *txq = &wmq->wmq_txq;
8012 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8013
8014 /*
8015 * TODO
8016 * Currently, use constant variable instead of AIM.
8017 * Furthermore, the interrupt interval of multiqueue which use
8018 * polling mode is less than default value.
8019 * More tuning and AIM are required.
8020 */
8021 if (wm_is_using_multiqueue(sc))
8022 wmq->wmq_itr = 50;
8023 else
8024 wmq->wmq_itr = sc->sc_itr_init;
8025 wmq->wmq_set_itr = true;
8026
8027 mutex_enter(txq->txq_lock);
8028 wm_init_tx_queue(sc, wmq, txq);
8029 mutex_exit(txq->txq_lock);
8030
8031 mutex_enter(rxq->rxq_lock);
8032 error = wm_init_rx_queue(sc, wmq, rxq);
8033 mutex_exit(rxq->rxq_lock);
8034 if (error)
8035 break;
8036 }
8037
8038 return error;
8039 }
8040
8041 /*
8042 * wm_tx_offload:
8043 *
8044 * Set up TCP/IP checksumming parameters for the
8045 * specified packet.
8046 */
8047 static void
8048 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8049 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8050 {
8051 struct mbuf *m0 = txs->txs_mbuf;
8052 struct livengood_tcpip_ctxdesc *t;
8053 uint32_t ipcs, tucs, cmd, cmdlen, seg;
8054 uint32_t ipcse;
8055 struct ether_header *eh;
8056 int offset, iphl;
8057 uint8_t fields;
8058
8059 /*
8060 * XXX It would be nice if the mbuf pkthdr had offset
8061 * fields for the protocol headers.
8062 */
8063
8064 eh = mtod(m0, struct ether_header *);
8065 switch (htons(eh->ether_type)) {
8066 case ETHERTYPE_IP:
8067 case ETHERTYPE_IPV6:
8068 offset = ETHER_HDR_LEN;
8069 break;
8070
8071 case ETHERTYPE_VLAN:
8072 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8073 break;
8074
8075 default:
8076 /* Don't support this protocol or encapsulation. */
8077 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8078 txq->txq_last_hw_ipcs = 0;
8079 txq->txq_last_hw_tucs = 0;
8080 *fieldsp = 0;
8081 *cmdp = 0;
8082 return;
8083 }
8084
8085 if ((m0->m_pkthdr.csum_flags &
8086 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8087 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8088 } else
8089 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8090
8091 ipcse = offset + iphl - 1;
8092
8093 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8094 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8095 seg = 0;
8096 fields = 0;
8097
8098 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8099 int hlen = offset + iphl;
8100 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8101
8102 if (__predict_false(m0->m_len <
8103 (hlen + sizeof(struct tcphdr)))) {
8104 /*
8105 * TCP/IP headers are not in the first mbuf; we need
8106 * to do this the slow and painful way. Let's just
8107 * hope this doesn't happen very often.
8108 */
8109 struct tcphdr th;
8110
8111 WM_Q_EVCNT_INCR(txq, tsopain);
8112
8113 m_copydata(m0, hlen, sizeof(th), &th);
8114 if (v4) {
8115 struct ip ip;
8116
8117 m_copydata(m0, offset, sizeof(ip), &ip);
8118 ip.ip_len = 0;
8119 m_copyback(m0,
8120 offset + offsetof(struct ip, ip_len),
8121 sizeof(ip.ip_len), &ip.ip_len);
8122 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8123 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8124 } else {
8125 struct ip6_hdr ip6;
8126
8127 m_copydata(m0, offset, sizeof(ip6), &ip6);
8128 ip6.ip6_plen = 0;
8129 m_copyback(m0,
8130 offset + offsetof(struct ip6_hdr, ip6_plen),
8131 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8132 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8133 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8134 }
8135 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8136 sizeof(th.th_sum), &th.th_sum);
8137
8138 hlen += th.th_off << 2;
8139 } else {
8140 /*
8141 * TCP/IP headers are in the first mbuf; we can do
8142 * this the easy way.
8143 */
8144 struct tcphdr *th;
8145
8146 if (v4) {
8147 struct ip *ip =
8148 (void *)(mtod(m0, char *) + offset);
8149 th = (void *)(mtod(m0, char *) + hlen);
8150
8151 ip->ip_len = 0;
8152 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8153 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8154 } else {
8155 struct ip6_hdr *ip6 =
8156 (void *)(mtod(m0, char *) + offset);
8157 th = (void *)(mtod(m0, char *) + hlen);
8158
8159 ip6->ip6_plen = 0;
8160 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8161 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8162 }
8163 hlen += th->th_off << 2;
8164 }
8165
8166 if (v4) {
8167 WM_Q_EVCNT_INCR(txq, tso);
8168 cmdlen |= WTX_TCPIP_CMD_IP;
8169 } else {
8170 WM_Q_EVCNT_INCR(txq, tso6);
8171 ipcse = 0;
8172 }
8173 cmd |= WTX_TCPIP_CMD_TSE;
8174 cmdlen |= WTX_TCPIP_CMD_TSE |
8175 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8176 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8177 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8178 }
8179
8180 /*
8181 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8182 * offload feature, if we load the context descriptor, we
8183 * MUST provide valid values for IPCSS and TUCSS fields.
8184 */
8185
8186 ipcs = WTX_TCPIP_IPCSS(offset) |
8187 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8188 WTX_TCPIP_IPCSE(ipcse);
8189 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8190 WM_Q_EVCNT_INCR(txq, ipsum);
8191 fields |= WTX_IXSM;
8192 }
8193
8194 offset += iphl;
8195
8196 if (m0->m_pkthdr.csum_flags &
8197 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8198 WM_Q_EVCNT_INCR(txq, tusum);
8199 fields |= WTX_TXSM;
8200 tucs = WTX_TCPIP_TUCSS(offset) |
8201 WTX_TCPIP_TUCSO(offset +
8202 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8203 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8204 } else if ((m0->m_pkthdr.csum_flags &
8205 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8206 WM_Q_EVCNT_INCR(txq, tusum6);
8207 fields |= WTX_TXSM;
8208 tucs = WTX_TCPIP_TUCSS(offset) |
8209 WTX_TCPIP_TUCSO(offset +
8210 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8211 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8212 } else {
8213 /* Just initialize it to a valid TCP context. */
8214 tucs = WTX_TCPIP_TUCSS(offset) |
8215 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8216 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8217 }
8218
8219 *cmdp = cmd;
8220 *fieldsp = fields;
8221
8222 /*
8223 * We don't have to write context descriptor for every packet
8224 * except for 82574. For 82574, we must write context descriptor
8225 * for every packet when we use two descriptor queues.
8226 *
8227 * The 82574L can only remember the *last* context used
8228 * regardless of queue that it was use for. We cannot reuse
8229 * contexts on this hardware platform and must generate a new
8230 * context every time. 82574L hardware spec, section 7.2.6,
8231 * second note.
8232 */
8233 if (sc->sc_nqueues < 2) {
8234 /*
8235 * Setting up new checksum offload context for every
8236 * frames takes a lot of processing time for hardware.
8237 * This also reduces performance a lot for small sized
8238 * frames so avoid it if driver can use previously
8239 * configured checksum offload context.
8240 * For TSO, in theory we can use the same TSO context only if
8241 * frame is the same type(IP/TCP) and the same MSS. However
8242 * checking whether a frame has the same IP/TCP structure is a
8243 * hard thing so just ignore that and always restablish a
8244 * new TSO context.
8245 */
8246 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8247 == 0) {
8248 if (txq->txq_last_hw_cmd == cmd &&
8249 txq->txq_last_hw_fields == fields &&
8250 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8251 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8252 WM_Q_EVCNT_INCR(txq, skipcontext);
8253 return;
8254 }
8255 }
8256
8257 txq->txq_last_hw_cmd = cmd;
8258 txq->txq_last_hw_fields = fields;
8259 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8260 txq->txq_last_hw_tucs = (tucs & 0xffff);
8261 }
8262
8263 /* Fill in the context descriptor. */
8264 t = (struct livengood_tcpip_ctxdesc *)
8265 &txq->txq_descs[txq->txq_next];
8266 t->tcpip_ipcs = htole32(ipcs);
8267 t->tcpip_tucs = htole32(tucs);
8268 t->tcpip_cmdlen = htole32(cmdlen);
8269 t->tcpip_seg = htole32(seg);
8270 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8271
8272 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8273 txs->txs_ndesc++;
8274 }
8275
8276 static inline int
8277 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8278 {
8279 struct wm_softc *sc = ifp->if_softc;
8280 u_int cpuid = cpu_index(curcpu());
8281
8282 /*
8283 * Currently, simple distribute strategy.
8284 * TODO:
8285 * distribute by flowid(RSS has value).
8286 */
8287 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8288 }
8289
8290 static inline bool
8291 wm_linkdown_discard(struct wm_txqueue *txq)
8292 {
8293
8294 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8295 return true;
8296
8297 return false;
8298 }
8299
8300 /*
8301 * wm_start: [ifnet interface function]
8302 *
8303 * Start packet transmission on the interface.
8304 */
8305 static void
8306 wm_start(struct ifnet *ifp)
8307 {
8308 struct wm_softc *sc = ifp->if_softc;
8309 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8310
8311 #ifdef WM_MPSAFE
8312 KASSERT(if_is_mpsafe(ifp));
8313 #endif
8314 /*
8315 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8316 */
8317
8318 mutex_enter(txq->txq_lock);
8319 if (!txq->txq_stopping)
8320 wm_start_locked(ifp);
8321 mutex_exit(txq->txq_lock);
8322 }
8323
8324 static void
8325 wm_start_locked(struct ifnet *ifp)
8326 {
8327 struct wm_softc *sc = ifp->if_softc;
8328 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8329
8330 wm_send_common_locked(ifp, txq, false);
8331 }
8332
8333 static int
8334 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8335 {
8336 int qid;
8337 struct wm_softc *sc = ifp->if_softc;
8338 struct wm_txqueue *txq;
8339
8340 qid = wm_select_txqueue(ifp, m);
8341 txq = &sc->sc_queue[qid].wmq_txq;
8342
8343 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8344 m_freem(m);
8345 WM_Q_EVCNT_INCR(txq, pcqdrop);
8346 return ENOBUFS;
8347 }
8348
8349 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8350 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8351 if (m->m_flags & M_MCAST)
8352 if_statinc_ref(nsr, if_omcasts);
8353 IF_STAT_PUTREF(ifp);
8354
8355 if (mutex_tryenter(txq->txq_lock)) {
8356 if (!txq->txq_stopping)
8357 wm_transmit_locked(ifp, txq);
8358 mutex_exit(txq->txq_lock);
8359 }
8360
8361 return 0;
8362 }
8363
8364 static void
8365 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8366 {
8367
8368 wm_send_common_locked(ifp, txq, true);
8369 }
8370
8371 static void
8372 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8373 bool is_transmit)
8374 {
8375 struct wm_softc *sc = ifp->if_softc;
8376 struct mbuf *m0;
8377 struct wm_txsoft *txs;
8378 bus_dmamap_t dmamap;
8379 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8380 bus_addr_t curaddr;
8381 bus_size_t seglen, curlen;
8382 uint32_t cksumcmd;
8383 uint8_t cksumfields;
8384 bool remap = true;
8385
8386 KASSERT(mutex_owned(txq->txq_lock));
8387
8388 if ((ifp->if_flags & IFF_RUNNING) == 0)
8389 return;
8390 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8391 return;
8392
8393 if (__predict_false(wm_linkdown_discard(txq))) {
8394 do {
8395 if (is_transmit)
8396 m0 = pcq_get(txq->txq_interq);
8397 else
8398 IFQ_DEQUEUE(&ifp->if_snd, m0);
8399 /*
8400 * increment successed packet counter as in the case
8401 * which the packet is discarded by link down PHY.
8402 */
8403 if (m0 != NULL) {
8404 if_statinc(ifp, if_opackets);
8405 m_freem(m0);
8406 }
8407 } while (m0 != NULL);
8408 return;
8409 }
8410
8411 /* Remember the previous number of free descriptors. */
8412 ofree = txq->txq_free;
8413
8414 /*
8415 * Loop through the send queue, setting up transmit descriptors
8416 * until we drain the queue, or use up all available transmit
8417 * descriptors.
8418 */
8419 for (;;) {
8420 m0 = NULL;
8421
8422 /* Get a work queue entry. */
8423 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8424 wm_txeof(txq, UINT_MAX);
8425 if (txq->txq_sfree == 0) {
8426 DPRINTF(sc, WM_DEBUG_TX,
8427 ("%s: TX: no free job descriptors\n",
8428 device_xname(sc->sc_dev)));
8429 WM_Q_EVCNT_INCR(txq, txsstall);
8430 break;
8431 }
8432 }
8433
8434 /* Grab a packet off the queue. */
8435 if (is_transmit)
8436 m0 = pcq_get(txq->txq_interq);
8437 else
8438 IFQ_DEQUEUE(&ifp->if_snd, m0);
8439 if (m0 == NULL)
8440 break;
8441
8442 DPRINTF(sc, WM_DEBUG_TX,
8443 ("%s: TX: have packet to transmit: %p\n",
8444 device_xname(sc->sc_dev), m0));
8445
8446 txs = &txq->txq_soft[txq->txq_snext];
8447 dmamap = txs->txs_dmamap;
8448
8449 use_tso = (m0->m_pkthdr.csum_flags &
8450 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8451
8452 /*
8453 * So says the Linux driver:
8454 * The controller does a simple calculation to make sure
8455 * there is enough room in the FIFO before initiating the
8456 * DMA for each buffer. The calc is:
8457 * 4 = ceil(buffer len / MSS)
8458 * To make sure we don't overrun the FIFO, adjust the max
8459 * buffer len if the MSS drops.
8460 */
8461 dmamap->dm_maxsegsz =
8462 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8463 ? m0->m_pkthdr.segsz << 2
8464 : WTX_MAX_LEN;
8465
8466 /*
8467 * Load the DMA map. If this fails, the packet either
8468 * didn't fit in the allotted number of segments, or we
8469 * were short on resources. For the too-many-segments
8470 * case, we simply report an error and drop the packet,
8471 * since we can't sanely copy a jumbo packet to a single
8472 * buffer.
8473 */
8474 retry:
8475 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8476 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8477 if (__predict_false(error)) {
8478 if (error == EFBIG) {
8479 if (remap == true) {
8480 struct mbuf *m;
8481
8482 remap = false;
8483 m = m_defrag(m0, M_NOWAIT);
8484 if (m != NULL) {
8485 WM_Q_EVCNT_INCR(txq, defrag);
8486 m0 = m;
8487 goto retry;
8488 }
8489 }
8490 WM_Q_EVCNT_INCR(txq, toomanyseg);
8491 log(LOG_ERR, "%s: Tx packet consumes too many "
8492 "DMA segments, dropping...\n",
8493 device_xname(sc->sc_dev));
8494 wm_dump_mbuf_chain(sc, m0);
8495 m_freem(m0);
8496 continue;
8497 }
8498 /* Short on resources, just stop for now. */
8499 DPRINTF(sc, WM_DEBUG_TX,
8500 ("%s: TX: dmamap load failed: %d\n",
8501 device_xname(sc->sc_dev), error));
8502 break;
8503 }
8504
8505 segs_needed = dmamap->dm_nsegs;
8506 if (use_tso) {
8507 /* For sentinel descriptor; see below. */
8508 segs_needed++;
8509 }
8510
8511 /*
8512 * Ensure we have enough descriptors free to describe
8513 * the packet. Note, we always reserve one descriptor
8514 * at the end of the ring due to the semantics of the
8515 * TDT register, plus one more in the event we need
8516 * to load offload context.
8517 */
8518 if (segs_needed > txq->txq_free - 2) {
8519 /*
8520 * Not enough free descriptors to transmit this
8521 * packet. We haven't committed anything yet,
8522 * so just unload the DMA map, put the packet
8523 * pack on the queue, and punt. Notify the upper
8524 * layer that there are no more slots left.
8525 */
8526 DPRINTF(sc, WM_DEBUG_TX,
8527 ("%s: TX: need %d (%d) descriptors, have %d\n",
8528 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8529 segs_needed, txq->txq_free - 1));
8530 txq->txq_flags |= WM_TXQ_NO_SPACE;
8531 bus_dmamap_unload(sc->sc_dmat, dmamap);
8532 WM_Q_EVCNT_INCR(txq, txdstall);
8533 break;
8534 }
8535
8536 /*
8537 * Check for 82547 Tx FIFO bug. We need to do this
8538 * once we know we can transmit the packet, since we
8539 * do some internal FIFO space accounting here.
8540 */
8541 if (sc->sc_type == WM_T_82547 &&
8542 wm_82547_txfifo_bugchk(sc, m0)) {
8543 DPRINTF(sc, WM_DEBUG_TX,
8544 ("%s: TX: 82547 Tx FIFO bug detected\n",
8545 device_xname(sc->sc_dev)));
8546 txq->txq_flags |= WM_TXQ_NO_SPACE;
8547 bus_dmamap_unload(sc->sc_dmat, dmamap);
8548 WM_Q_EVCNT_INCR(txq, fifo_stall);
8549 break;
8550 }
8551
8552 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8553
8554 DPRINTF(sc, WM_DEBUG_TX,
8555 ("%s: TX: packet has %d (%d) DMA segments\n",
8556 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8557
8558 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8559
8560 /*
8561 * Store a pointer to the packet so that we can free it
8562 * later.
8563 *
8564 * Initially, we consider the number of descriptors the
8565 * packet uses the number of DMA segments. This may be
8566 * incremented by 1 if we do checksum offload (a descriptor
8567 * is used to set the checksum context).
8568 */
8569 txs->txs_mbuf = m0;
8570 txs->txs_firstdesc = txq->txq_next;
8571 txs->txs_ndesc = segs_needed;
8572
8573 /* Set up offload parameters for this packet. */
8574 if (m0->m_pkthdr.csum_flags &
8575 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8576 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8577 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8578 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8579 } else {
8580 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8581 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8582 cksumcmd = 0;
8583 cksumfields = 0;
8584 }
8585
8586 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8587
8588 /* Sync the DMA map. */
8589 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8590 BUS_DMASYNC_PREWRITE);
8591
8592 /* Initialize the transmit descriptor. */
8593 for (nexttx = txq->txq_next, seg = 0;
8594 seg < dmamap->dm_nsegs; seg++) {
8595 for (seglen = dmamap->dm_segs[seg].ds_len,
8596 curaddr = dmamap->dm_segs[seg].ds_addr;
8597 seglen != 0;
8598 curaddr += curlen, seglen -= curlen,
8599 nexttx = WM_NEXTTX(txq, nexttx)) {
8600 curlen = seglen;
8601
8602 /*
8603 * So says the Linux driver:
8604 * Work around for premature descriptor
8605 * write-backs in TSO mode. Append a
8606 * 4-byte sentinel descriptor.
8607 */
8608 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8609 curlen > 8)
8610 curlen -= 4;
8611
8612 wm_set_dma_addr(
8613 &txq->txq_descs[nexttx].wtx_addr, curaddr);
8614 txq->txq_descs[nexttx].wtx_cmdlen
8615 = htole32(cksumcmd | curlen);
8616 txq->txq_descs[nexttx].wtx_fields.wtxu_status
8617 = 0;
8618 txq->txq_descs[nexttx].wtx_fields.wtxu_options
8619 = cksumfields;
8620 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8621 lasttx = nexttx;
8622
8623 DPRINTF(sc, WM_DEBUG_TX,
8624 ("%s: TX: desc %d: low %#" PRIx64 ", "
8625 "len %#04zx\n",
8626 device_xname(sc->sc_dev), nexttx,
8627 (uint64_t)curaddr, curlen));
8628 }
8629 }
8630
8631 KASSERT(lasttx != -1);
8632
8633 /*
8634 * Set up the command byte on the last descriptor of
8635 * the packet. If we're in the interrupt delay window,
8636 * delay the interrupt.
8637 */
8638 txq->txq_descs[lasttx].wtx_cmdlen |=
8639 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8640
8641 /*
8642 * If VLANs are enabled and the packet has a VLAN tag, set
8643 * up the descriptor to encapsulate the packet for us.
8644 *
8645 * This is only valid on the last descriptor of the packet.
8646 */
8647 if (vlan_has_tag(m0)) {
8648 txq->txq_descs[lasttx].wtx_cmdlen |=
8649 htole32(WTX_CMD_VLE);
8650 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8651 = htole16(vlan_get_tag(m0));
8652 }
8653
8654 txs->txs_lastdesc = lasttx;
8655
8656 DPRINTF(sc, WM_DEBUG_TX,
8657 ("%s: TX: desc %d: cmdlen 0x%08x\n",
8658 device_xname(sc->sc_dev),
8659 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8660
8661 /* Sync the descriptors we're using. */
8662 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8664
8665 /* Give the packet to the chip. */
8666 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8667
8668 DPRINTF(sc, WM_DEBUG_TX,
8669 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8670
8671 DPRINTF(sc, WM_DEBUG_TX,
8672 ("%s: TX: finished transmitting packet, job %d\n",
8673 device_xname(sc->sc_dev), txq->txq_snext));
8674
8675 /* Advance the tx pointer. */
8676 txq->txq_free -= txs->txs_ndesc;
8677 txq->txq_next = nexttx;
8678
8679 txq->txq_sfree--;
8680 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8681
8682 /* Pass the packet to any BPF listeners. */
8683 bpf_mtap(ifp, m0, BPF_D_OUT);
8684 }
8685
8686 if (m0 != NULL) {
8687 txq->txq_flags |= WM_TXQ_NO_SPACE;
8688 WM_Q_EVCNT_INCR(txq, descdrop);
8689 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8690 __func__));
8691 m_freem(m0);
8692 }
8693
8694 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8695 /* No more slots; notify upper layer. */
8696 txq->txq_flags |= WM_TXQ_NO_SPACE;
8697 }
8698
8699 if (txq->txq_free != ofree) {
8700 /* Set a watchdog timer in case the chip flakes out. */
8701 txq->txq_lastsent = time_uptime;
8702 txq->txq_sending = true;
8703 }
8704 }
8705
8706 /*
8707 * wm_nq_tx_offload:
8708 *
8709 * Set up TCP/IP checksumming parameters for the
8710 * specified packet, for NEWQUEUE devices
8711 */
8712 static void
8713 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8714 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8715 {
8716 struct mbuf *m0 = txs->txs_mbuf;
8717 uint32_t vl_len, mssidx, cmdc;
8718 struct ether_header *eh;
8719 int offset, iphl;
8720
8721 /*
8722 * XXX It would be nice if the mbuf pkthdr had offset
8723 * fields for the protocol headers.
8724 */
8725 *cmdlenp = 0;
8726 *fieldsp = 0;
8727
8728 eh = mtod(m0, struct ether_header *);
8729 switch (htons(eh->ether_type)) {
8730 case ETHERTYPE_IP:
8731 case ETHERTYPE_IPV6:
8732 offset = ETHER_HDR_LEN;
8733 break;
8734
8735 case ETHERTYPE_VLAN:
8736 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8737 break;
8738
8739 default:
8740 /* Don't support this protocol or encapsulation. */
8741 *do_csum = false;
8742 return;
8743 }
8744 *do_csum = true;
8745 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8746 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8747
8748 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8749 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8750
8751 if ((m0->m_pkthdr.csum_flags &
8752 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8753 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8754 } else {
8755 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8756 }
8757 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8758 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8759
8760 if (vlan_has_tag(m0)) {
8761 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8762 << NQTXC_VLLEN_VLAN_SHIFT);
8763 *cmdlenp |= NQTX_CMD_VLE;
8764 }
8765
8766 mssidx = 0;
8767
8768 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8769 int hlen = offset + iphl;
8770 int tcp_hlen;
8771 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8772
8773 if (__predict_false(m0->m_len <
8774 (hlen + sizeof(struct tcphdr)))) {
8775 /*
8776 * TCP/IP headers are not in the first mbuf; we need
8777 * to do this the slow and painful way. Let's just
8778 * hope this doesn't happen very often.
8779 */
8780 struct tcphdr th;
8781
8782 WM_Q_EVCNT_INCR(txq, tsopain);
8783
8784 m_copydata(m0, hlen, sizeof(th), &th);
8785 if (v4) {
8786 struct ip ip;
8787
8788 m_copydata(m0, offset, sizeof(ip), &ip);
8789 ip.ip_len = 0;
8790 m_copyback(m0,
8791 offset + offsetof(struct ip, ip_len),
8792 sizeof(ip.ip_len), &ip.ip_len);
8793 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8794 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8795 } else {
8796 struct ip6_hdr ip6;
8797
8798 m_copydata(m0, offset, sizeof(ip6), &ip6);
8799 ip6.ip6_plen = 0;
8800 m_copyback(m0,
8801 offset + offsetof(struct ip6_hdr, ip6_plen),
8802 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8803 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8804 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8805 }
8806 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8807 sizeof(th.th_sum), &th.th_sum);
8808
8809 tcp_hlen = th.th_off << 2;
8810 } else {
8811 /*
8812 * TCP/IP headers are in the first mbuf; we can do
8813 * this the easy way.
8814 */
8815 struct tcphdr *th;
8816
8817 if (v4) {
8818 struct ip *ip =
8819 (void *)(mtod(m0, char *) + offset);
8820 th = (void *)(mtod(m0, char *) + hlen);
8821
8822 ip->ip_len = 0;
8823 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8824 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8825 } else {
8826 struct ip6_hdr *ip6 =
8827 (void *)(mtod(m0, char *) + offset);
8828 th = (void *)(mtod(m0, char *) + hlen);
8829
8830 ip6->ip6_plen = 0;
8831 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8832 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8833 }
8834 tcp_hlen = th->th_off << 2;
8835 }
8836 hlen += tcp_hlen;
8837 *cmdlenp |= NQTX_CMD_TSE;
8838
8839 if (v4) {
8840 WM_Q_EVCNT_INCR(txq, tso);
8841 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8842 } else {
8843 WM_Q_EVCNT_INCR(txq, tso6);
8844 *fieldsp |= NQTXD_FIELDS_TUXSM;
8845 }
8846 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8847 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8848 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8849 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8850 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8851 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8852 } else {
8853 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8854 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8855 }
8856
8857 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8858 *fieldsp |= NQTXD_FIELDS_IXSM;
8859 cmdc |= NQTXC_CMD_IP4;
8860 }
8861
8862 if (m0->m_pkthdr.csum_flags &
8863 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8864 WM_Q_EVCNT_INCR(txq, tusum);
8865 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8866 cmdc |= NQTXC_CMD_TCP;
8867 else
8868 cmdc |= NQTXC_CMD_UDP;
8869
8870 cmdc |= NQTXC_CMD_IP4;
8871 *fieldsp |= NQTXD_FIELDS_TUXSM;
8872 }
8873 if (m0->m_pkthdr.csum_flags &
8874 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8875 WM_Q_EVCNT_INCR(txq, tusum6);
8876 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8877 cmdc |= NQTXC_CMD_TCP;
8878 else
8879 cmdc |= NQTXC_CMD_UDP;
8880
8881 cmdc |= NQTXC_CMD_IP6;
8882 *fieldsp |= NQTXD_FIELDS_TUXSM;
8883 }
8884
8885 /*
8886 * We don't have to write context descriptor for every packet to
8887 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8888 * I210 and I211. It is enough to write once per a Tx queue for these
8889 * controllers.
8890 * It would be overhead to write context descriptor for every packet,
8891 * however it does not cause problems.
8892 */
8893 /* Fill in the context descriptor. */
8894 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8895 htole32(vl_len);
8896 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8897 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8898 htole32(cmdc);
8899 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8900 htole32(mssidx);
8901 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8902 DPRINTF(sc, WM_DEBUG_TX,
8903 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8904 txq->txq_next, 0, vl_len));
8905 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8906 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8907 txs->txs_ndesc++;
8908 }
8909
8910 /*
8911 * wm_nq_start: [ifnet interface function]
8912 *
8913 * Start packet transmission on the interface for NEWQUEUE devices
8914 */
8915 static void
8916 wm_nq_start(struct ifnet *ifp)
8917 {
8918 struct wm_softc *sc = ifp->if_softc;
8919 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8920
8921 #ifdef WM_MPSAFE
8922 KASSERT(if_is_mpsafe(ifp));
8923 #endif
8924 /*
8925 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8926 */
8927
8928 mutex_enter(txq->txq_lock);
8929 if (!txq->txq_stopping)
8930 wm_nq_start_locked(ifp);
8931 mutex_exit(txq->txq_lock);
8932 }
8933
8934 static void
8935 wm_nq_start_locked(struct ifnet *ifp)
8936 {
8937 struct wm_softc *sc = ifp->if_softc;
8938 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8939
8940 wm_nq_send_common_locked(ifp, txq, false);
8941 }
8942
8943 static int
8944 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8945 {
8946 int qid;
8947 struct wm_softc *sc = ifp->if_softc;
8948 struct wm_txqueue *txq;
8949
8950 qid = wm_select_txqueue(ifp, m);
8951 txq = &sc->sc_queue[qid].wmq_txq;
8952
8953 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8954 m_freem(m);
8955 WM_Q_EVCNT_INCR(txq, pcqdrop);
8956 return ENOBUFS;
8957 }
8958
8959 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8960 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8961 if (m->m_flags & M_MCAST)
8962 if_statinc_ref(nsr, if_omcasts);
8963 IF_STAT_PUTREF(ifp);
8964
8965 /*
8966 * The situations which this mutex_tryenter() fails at running time
8967 * are below two patterns.
8968 * (1) contention with interrupt handler(wm_txrxintr_msix())
8969 * (2) contention with deferred if_start softint(wm_handle_queue())
8970 * In the case of (1), the last packet enqueued to txq->txq_interq is
8971 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8972 * In the case of (2), the last packet enqueued to txq->txq_interq is
8973 * also dequeued by wm_deferred_start_locked(). So, it does not get
8974 * stuck, either.
8975 */
8976 if (mutex_tryenter(txq->txq_lock)) {
8977 if (!txq->txq_stopping)
8978 wm_nq_transmit_locked(ifp, txq);
8979 mutex_exit(txq->txq_lock);
8980 }
8981
8982 return 0;
8983 }
8984
8985 static void
8986 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8987 {
8988
8989 wm_nq_send_common_locked(ifp, txq, true);
8990 }
8991
8992 static void
8993 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8994 bool is_transmit)
8995 {
8996 struct wm_softc *sc = ifp->if_softc;
8997 struct mbuf *m0;
8998 struct wm_txsoft *txs;
8999 bus_dmamap_t dmamap;
9000 int error, nexttx, lasttx = -1, seg, segs_needed;
9001 bool do_csum, sent;
9002 bool remap = true;
9003
9004 KASSERT(mutex_owned(txq->txq_lock));
9005
9006 if ((ifp->if_flags & IFF_RUNNING) == 0)
9007 return;
9008 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9009 return;
9010
9011 if (__predict_false(wm_linkdown_discard(txq))) {
9012 do {
9013 if (is_transmit)
9014 m0 = pcq_get(txq->txq_interq);
9015 else
9016 IFQ_DEQUEUE(&ifp->if_snd, m0);
9017 /*
9018 * increment successed packet counter as in the case
9019 * which the packet is discarded by link down PHY.
9020 */
9021 if (m0 != NULL) {
9022 if_statinc(ifp, if_opackets);
9023 m_freem(m0);
9024 }
9025 } while (m0 != NULL);
9026 return;
9027 }
9028
9029 sent = false;
9030
9031 /*
9032 * Loop through the send queue, setting up transmit descriptors
9033 * until we drain the queue, or use up all available transmit
9034 * descriptors.
9035 */
9036 for (;;) {
9037 m0 = NULL;
9038
9039 /* Get a work queue entry. */
9040 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9041 wm_txeof(txq, UINT_MAX);
9042 if (txq->txq_sfree == 0) {
9043 DPRINTF(sc, WM_DEBUG_TX,
9044 ("%s: TX: no free job descriptors\n",
9045 device_xname(sc->sc_dev)));
9046 WM_Q_EVCNT_INCR(txq, txsstall);
9047 break;
9048 }
9049 }
9050
9051 /* Grab a packet off the queue. */
9052 if (is_transmit)
9053 m0 = pcq_get(txq->txq_interq);
9054 else
9055 IFQ_DEQUEUE(&ifp->if_snd, m0);
9056 if (m0 == NULL)
9057 break;
9058
9059 DPRINTF(sc, WM_DEBUG_TX,
9060 ("%s: TX: have packet to transmit: %p\n",
9061 device_xname(sc->sc_dev), m0));
9062
9063 txs = &txq->txq_soft[txq->txq_snext];
9064 dmamap = txs->txs_dmamap;
9065
9066 /*
9067 * Load the DMA map. If this fails, the packet either
9068 * didn't fit in the allotted number of segments, or we
9069 * were short on resources. For the too-many-segments
9070 * case, we simply report an error and drop the packet,
9071 * since we can't sanely copy a jumbo packet to a single
9072 * buffer.
9073 */
9074 retry:
9075 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9076 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9077 if (__predict_false(error)) {
9078 if (error == EFBIG) {
9079 if (remap == true) {
9080 struct mbuf *m;
9081
9082 remap = false;
9083 m = m_defrag(m0, M_NOWAIT);
9084 if (m != NULL) {
9085 WM_Q_EVCNT_INCR(txq, defrag);
9086 m0 = m;
9087 goto retry;
9088 }
9089 }
9090 WM_Q_EVCNT_INCR(txq, toomanyseg);
9091 log(LOG_ERR, "%s: Tx packet consumes too many "
9092 "DMA segments, dropping...\n",
9093 device_xname(sc->sc_dev));
9094 wm_dump_mbuf_chain(sc, m0);
9095 m_freem(m0);
9096 continue;
9097 }
9098 /* Short on resources, just stop for now. */
9099 DPRINTF(sc, WM_DEBUG_TX,
9100 ("%s: TX: dmamap load failed: %d\n",
9101 device_xname(sc->sc_dev), error));
9102 break;
9103 }
9104
9105 segs_needed = dmamap->dm_nsegs;
9106
9107 /*
9108 * Ensure we have enough descriptors free to describe
9109 * the packet. Note, we always reserve one descriptor
9110 * at the end of the ring due to the semantics of the
9111 * TDT register, plus one more in the event we need
9112 * to load offload context.
9113 */
9114 if (segs_needed > txq->txq_free - 2) {
9115 /*
9116 * Not enough free descriptors to transmit this
9117 * packet. We haven't committed anything yet,
9118 * so just unload the DMA map, put the packet
9119 * pack on the queue, and punt. Notify the upper
9120 * layer that there are no more slots left.
9121 */
9122 DPRINTF(sc, WM_DEBUG_TX,
9123 ("%s: TX: need %d (%d) descriptors, have %d\n",
9124 device_xname(sc->sc_dev), dmamap->dm_nsegs,
9125 segs_needed, txq->txq_free - 1));
9126 txq->txq_flags |= WM_TXQ_NO_SPACE;
9127 bus_dmamap_unload(sc->sc_dmat, dmamap);
9128 WM_Q_EVCNT_INCR(txq, txdstall);
9129 break;
9130 }
9131
9132 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9133
9134 DPRINTF(sc, WM_DEBUG_TX,
9135 ("%s: TX: packet has %d (%d) DMA segments\n",
9136 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9137
9138 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9139
9140 /*
9141 * Store a pointer to the packet so that we can free it
9142 * later.
9143 *
9144 * Initially, we consider the number of descriptors the
9145 * packet uses the number of DMA segments. This may be
9146 * incremented by 1 if we do checksum offload (a descriptor
9147 * is used to set the checksum context).
9148 */
9149 txs->txs_mbuf = m0;
9150 txs->txs_firstdesc = txq->txq_next;
9151 txs->txs_ndesc = segs_needed;
9152
9153 /* Set up offload parameters for this packet. */
9154 uint32_t cmdlen, fields, dcmdlen;
9155 if (m0->m_pkthdr.csum_flags &
9156 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9157 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9158 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9159 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9160 &do_csum);
9161 } else {
9162 do_csum = false;
9163 cmdlen = 0;
9164 fields = 0;
9165 }
9166
9167 /* Sync the DMA map. */
9168 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9169 BUS_DMASYNC_PREWRITE);
9170
9171 /* Initialize the first transmit descriptor. */
9172 nexttx = txq->txq_next;
9173 if (!do_csum) {
9174 /* Set up a legacy descriptor */
9175 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9176 dmamap->dm_segs[0].ds_addr);
9177 txq->txq_descs[nexttx].wtx_cmdlen =
9178 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9179 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9180 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9181 if (vlan_has_tag(m0)) {
9182 txq->txq_descs[nexttx].wtx_cmdlen |=
9183 htole32(WTX_CMD_VLE);
9184 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9185 htole16(vlan_get_tag(m0));
9186 } else
9187 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9188
9189 dcmdlen = 0;
9190 } else {
9191 /* Set up an advanced data descriptor */
9192 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9193 htole64(dmamap->dm_segs[0].ds_addr);
9194 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9195 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9196 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9197 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9198 htole32(fields);
9199 DPRINTF(sc, WM_DEBUG_TX,
9200 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9201 device_xname(sc->sc_dev), nexttx,
9202 (uint64_t)dmamap->dm_segs[0].ds_addr));
9203 DPRINTF(sc, WM_DEBUG_TX,
9204 ("\t 0x%08x%08x\n", fields,
9205 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9206 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9207 }
9208
9209 lasttx = nexttx;
9210 nexttx = WM_NEXTTX(txq, nexttx);
9211 /*
9212 * Fill in the next descriptors. Legacy or advanced format
9213 * is the same here.
9214 */
9215 for (seg = 1; seg < dmamap->dm_nsegs;
9216 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9217 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9218 htole64(dmamap->dm_segs[seg].ds_addr);
9219 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9220 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9221 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9222 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9223 lasttx = nexttx;
9224
9225 DPRINTF(sc, WM_DEBUG_TX,
9226 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9227 device_xname(sc->sc_dev), nexttx,
9228 (uint64_t)dmamap->dm_segs[seg].ds_addr,
9229 dmamap->dm_segs[seg].ds_len));
9230 }
9231
9232 KASSERT(lasttx != -1);
9233
9234 /*
9235 * Set up the command byte on the last descriptor of
9236 * the packet. If we're in the interrupt delay window,
9237 * delay the interrupt.
9238 */
9239 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9240 (NQTX_CMD_EOP | NQTX_CMD_RS));
9241 txq->txq_descs[lasttx].wtx_cmdlen |=
9242 htole32(WTX_CMD_EOP | WTX_CMD_RS);
9243
9244 txs->txs_lastdesc = lasttx;
9245
9246 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9247 device_xname(sc->sc_dev),
9248 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9249
9250 /* Sync the descriptors we're using. */
9251 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9253
9254 /* Give the packet to the chip. */
9255 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9256 sent = true;
9257
9258 DPRINTF(sc, WM_DEBUG_TX,
9259 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9260
9261 DPRINTF(sc, WM_DEBUG_TX,
9262 ("%s: TX: finished transmitting packet, job %d\n",
9263 device_xname(sc->sc_dev), txq->txq_snext));
9264
9265 /* Advance the tx pointer. */
9266 txq->txq_free -= txs->txs_ndesc;
9267 txq->txq_next = nexttx;
9268
9269 txq->txq_sfree--;
9270 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9271
9272 /* Pass the packet to any BPF listeners. */
9273 bpf_mtap(ifp, m0, BPF_D_OUT);
9274 }
9275
9276 if (m0 != NULL) {
9277 txq->txq_flags |= WM_TXQ_NO_SPACE;
9278 WM_Q_EVCNT_INCR(txq, descdrop);
9279 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9280 __func__));
9281 m_freem(m0);
9282 }
9283
9284 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9285 /* No more slots; notify upper layer. */
9286 txq->txq_flags |= WM_TXQ_NO_SPACE;
9287 }
9288
9289 if (sent) {
9290 /* Set a watchdog timer in case the chip flakes out. */
9291 txq->txq_lastsent = time_uptime;
9292 txq->txq_sending = true;
9293 }
9294 }
9295
9296 static void
9297 wm_deferred_start_locked(struct wm_txqueue *txq)
9298 {
9299 struct wm_softc *sc = txq->txq_sc;
9300 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9301 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9302 int qid = wmq->wmq_id;
9303
9304 KASSERT(mutex_owned(txq->txq_lock));
9305 KASSERT(!txq->txq_stopping);
9306
9307 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9308 /* XXX need for ALTQ or one CPU system */
9309 if (qid == 0)
9310 wm_nq_start_locked(ifp);
9311 wm_nq_transmit_locked(ifp, txq);
9312 } else {
9313 /* XXX need for ALTQ or one CPU system */
9314 if (qid == 0)
9315 wm_start_locked(ifp);
9316 wm_transmit_locked(ifp, txq);
9317 }
9318 }
9319
9320 /* Interrupt */
9321
9322 /*
9323 * wm_txeof:
9324 *
9325 * Helper; handle transmit interrupts.
9326 */
9327 static bool
9328 wm_txeof(struct wm_txqueue *txq, u_int limit)
9329 {
9330 struct wm_softc *sc = txq->txq_sc;
9331 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9332 struct wm_txsoft *txs;
9333 int count = 0;
9334 int i;
9335 uint8_t status;
9336 bool more = false;
9337
9338 KASSERT(mutex_owned(txq->txq_lock));
9339
9340 if (txq->txq_stopping)
9341 return false;
9342
9343 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9344
9345 /*
9346 * Go through the Tx list and free mbufs for those
9347 * frames which have been transmitted.
9348 */
9349 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9350 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9351 txs = &txq->txq_soft[i];
9352
9353 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9354 device_xname(sc->sc_dev), i));
9355
9356 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9357 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9358
9359 status =
9360 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9361 if ((status & WTX_ST_DD) == 0) {
9362 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9363 BUS_DMASYNC_PREREAD);
9364 break;
9365 }
9366
9367 if (limit-- == 0) {
9368 more = true;
9369 DPRINTF(sc, WM_DEBUG_TX,
9370 ("%s: TX: loop limited, job %d is not processed\n",
9371 device_xname(sc->sc_dev), i));
9372 break;
9373 }
9374
9375 count++;
9376 DPRINTF(sc, WM_DEBUG_TX,
9377 ("%s: TX: job %d done: descs %d..%d\n",
9378 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9379 txs->txs_lastdesc));
9380
9381 /*
9382 * XXX We should probably be using the statistics
9383 * XXX registers, but I don't know if they exist
9384 * XXX on chips before the i82544.
9385 */
9386
9387 #ifdef WM_EVENT_COUNTERS
9388 if (status & WTX_ST_TU)
9389 WM_Q_EVCNT_INCR(txq, underrun);
9390 #endif /* WM_EVENT_COUNTERS */
9391
9392 /*
9393 * 82574 and newer's document says the status field has neither
9394 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9395 * (reserved). Refer "PCIe GbE Controller Open Source Software
9396 * Developer's Manual", 82574 datasheet and newer.
9397 *
9398 * XXX I saw the LC bit was set on I218 even though the media
9399 * was full duplex, so the bit might be used for other
9400 * meaning ...(I have no document).
9401 */
9402
9403 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9404 && ((sc->sc_type < WM_T_82574)
9405 || (sc->sc_type == WM_T_80003))) {
9406 if_statinc(ifp, if_oerrors);
9407 if (status & WTX_ST_LC)
9408 log(LOG_WARNING, "%s: late collision\n",
9409 device_xname(sc->sc_dev));
9410 else if (status & WTX_ST_EC) {
9411 if_statadd(ifp, if_collisions,
9412 TX_COLLISION_THRESHOLD + 1);
9413 log(LOG_WARNING, "%s: excessive collisions\n",
9414 device_xname(sc->sc_dev));
9415 }
9416 } else
9417 if_statinc(ifp, if_opackets);
9418
9419 txq->txq_packets++;
9420 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9421
9422 txq->txq_free += txs->txs_ndesc;
9423 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9424 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9425 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9426 m_freem(txs->txs_mbuf);
9427 txs->txs_mbuf = NULL;
9428 }
9429
9430 /* Update the dirty transmit buffer pointer. */
9431 txq->txq_sdirty = i;
9432 DPRINTF(sc, WM_DEBUG_TX,
9433 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9434
9435 if (count != 0)
9436 rnd_add_uint32(&sc->rnd_source, count);
9437
9438 /*
9439 * If there are no more pending transmissions, cancel the watchdog
9440 * timer.
9441 */
9442 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9443 txq->txq_sending = false;
9444
9445 return more;
9446 }
9447
9448 static inline uint32_t
9449 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9450 {
9451 struct wm_softc *sc = rxq->rxq_sc;
9452
9453 if (sc->sc_type == WM_T_82574)
9454 return EXTRXC_STATUS(
9455 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9456 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9457 return NQRXC_STATUS(
9458 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9459 else
9460 return rxq->rxq_descs[idx].wrx_status;
9461 }
9462
9463 static inline uint32_t
9464 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9465 {
9466 struct wm_softc *sc = rxq->rxq_sc;
9467
9468 if (sc->sc_type == WM_T_82574)
9469 return EXTRXC_ERROR(
9470 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9471 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9472 return NQRXC_ERROR(
9473 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9474 else
9475 return rxq->rxq_descs[idx].wrx_errors;
9476 }
9477
9478 static inline uint16_t
9479 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9480 {
9481 struct wm_softc *sc = rxq->rxq_sc;
9482
9483 if (sc->sc_type == WM_T_82574)
9484 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9485 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9486 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9487 else
9488 return rxq->rxq_descs[idx].wrx_special;
9489 }
9490
9491 static inline int
9492 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9493 {
9494 struct wm_softc *sc = rxq->rxq_sc;
9495
9496 if (sc->sc_type == WM_T_82574)
9497 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9498 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9499 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9500 else
9501 return rxq->rxq_descs[idx].wrx_len;
9502 }
9503
9504 #ifdef WM_DEBUG
9505 static inline uint32_t
9506 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9507 {
9508 struct wm_softc *sc = rxq->rxq_sc;
9509
9510 if (sc->sc_type == WM_T_82574)
9511 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9512 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9513 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9514 else
9515 return 0;
9516 }
9517
9518 static inline uint8_t
9519 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9520 {
9521 struct wm_softc *sc = rxq->rxq_sc;
9522
9523 if (sc->sc_type == WM_T_82574)
9524 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9525 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9526 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9527 else
9528 return 0;
9529 }
9530 #endif /* WM_DEBUG */
9531
9532 static inline bool
9533 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9534 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9535 {
9536
9537 if (sc->sc_type == WM_T_82574)
9538 return (status & ext_bit) != 0;
9539 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9540 return (status & nq_bit) != 0;
9541 else
9542 return (status & legacy_bit) != 0;
9543 }
9544
9545 static inline bool
9546 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9547 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9548 {
9549
9550 if (sc->sc_type == WM_T_82574)
9551 return (error & ext_bit) != 0;
9552 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9553 return (error & nq_bit) != 0;
9554 else
9555 return (error & legacy_bit) != 0;
9556 }
9557
9558 static inline bool
9559 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9560 {
9561
9562 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9563 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9564 return true;
9565 else
9566 return false;
9567 }
9568
9569 static inline bool
9570 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9571 {
9572 struct wm_softc *sc = rxq->rxq_sc;
9573
9574 /* XXX missing error bit for newqueue? */
9575 if (wm_rxdesc_is_set_error(sc, errors,
9576 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9577 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9578 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9579 NQRXC_ERROR_RXE)) {
9580 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9581 EXTRXC_ERROR_SE, 0))
9582 log(LOG_WARNING, "%s: symbol error\n",
9583 device_xname(sc->sc_dev));
9584 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9585 EXTRXC_ERROR_SEQ, 0))
9586 log(LOG_WARNING, "%s: receive sequence error\n",
9587 device_xname(sc->sc_dev));
9588 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9589 EXTRXC_ERROR_CE, 0))
9590 log(LOG_WARNING, "%s: CRC error\n",
9591 device_xname(sc->sc_dev));
9592 return true;
9593 }
9594
9595 return false;
9596 }
9597
9598 static inline bool
9599 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9600 {
9601 struct wm_softc *sc = rxq->rxq_sc;
9602
9603 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9604 NQRXC_STATUS_DD)) {
9605 /* We have processed all of the receive descriptors. */
9606 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9607 return false;
9608 }
9609
9610 return true;
9611 }
9612
9613 static inline bool
9614 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9615 uint16_t vlantag, struct mbuf *m)
9616 {
9617
9618 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9619 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9620 vlan_set_tag(m, le16toh(vlantag));
9621 }
9622
9623 return true;
9624 }
9625
9626 static inline void
9627 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9628 uint32_t errors, struct mbuf *m)
9629 {
9630 struct wm_softc *sc = rxq->rxq_sc;
9631
9632 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9633 if (wm_rxdesc_is_set_status(sc, status,
9634 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9635 WM_Q_EVCNT_INCR(rxq, ipsum);
9636 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9637 if (wm_rxdesc_is_set_error(sc, errors,
9638 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9639 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9640 }
9641 if (wm_rxdesc_is_set_status(sc, status,
9642 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9643 /*
9644 * Note: we don't know if this was TCP or UDP,
9645 * so we just set both bits, and expect the
9646 * upper layers to deal.
9647 */
9648 WM_Q_EVCNT_INCR(rxq, tusum);
9649 m->m_pkthdr.csum_flags |=
9650 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9651 M_CSUM_TCPv6 | M_CSUM_UDPv6;
9652 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9653 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9654 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9655 }
9656 }
9657 }
9658
9659 /*
9660 * wm_rxeof:
9661 *
9662 * Helper; handle receive interrupts.
9663 */
9664 static bool
9665 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9666 {
9667 struct wm_softc *sc = rxq->rxq_sc;
9668 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9669 struct wm_rxsoft *rxs;
9670 struct mbuf *m;
9671 int i, len;
9672 int count = 0;
9673 uint32_t status, errors;
9674 uint16_t vlantag;
9675 bool more = false;
9676
9677 KASSERT(mutex_owned(rxq->rxq_lock));
9678
9679 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9680 rxs = &rxq->rxq_soft[i];
9681
9682 DPRINTF(sc, WM_DEBUG_RX,
9683 ("%s: RX: checking descriptor %d\n",
9684 device_xname(sc->sc_dev), i));
9685 wm_cdrxsync(rxq, i,
9686 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9687
9688 status = wm_rxdesc_get_status(rxq, i);
9689 errors = wm_rxdesc_get_errors(rxq, i);
9690 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9691 vlantag = wm_rxdesc_get_vlantag(rxq, i);
9692 #ifdef WM_DEBUG
9693 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9694 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9695 #endif
9696
9697 if (!wm_rxdesc_dd(rxq, i, status))
9698 break;
9699
9700 if (limit-- == 0) {
9701 more = true;
9702 DPRINTF(sc, WM_DEBUG_RX,
9703 ("%s: RX: loop limited, descriptor %d is not processed\n",
9704 device_xname(sc->sc_dev), i));
9705 break;
9706 }
9707
9708 count++;
9709 if (__predict_false(rxq->rxq_discard)) {
9710 DPRINTF(sc, WM_DEBUG_RX,
9711 ("%s: RX: discarding contents of descriptor %d\n",
9712 device_xname(sc->sc_dev), i));
9713 wm_init_rxdesc(rxq, i);
9714 if (wm_rxdesc_is_eop(rxq, status)) {
9715 /* Reset our state. */
9716 DPRINTF(sc, WM_DEBUG_RX,
9717 ("%s: RX: resetting rxdiscard -> 0\n",
9718 device_xname(sc->sc_dev)));
9719 rxq->rxq_discard = 0;
9720 }
9721 continue;
9722 }
9723
9724 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9725 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9726
9727 m = rxs->rxs_mbuf;
9728
9729 /*
9730 * Add a new receive buffer to the ring, unless of
9731 * course the length is zero. Treat the latter as a
9732 * failed mapping.
9733 */
9734 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9735 /*
9736 * Failed, throw away what we've done so
9737 * far, and discard the rest of the packet.
9738 */
9739 if_statinc(ifp, if_ierrors);
9740 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9741 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9742 wm_init_rxdesc(rxq, i);
9743 if (!wm_rxdesc_is_eop(rxq, status))
9744 rxq->rxq_discard = 1;
9745 if (rxq->rxq_head != NULL)
9746 m_freem(rxq->rxq_head);
9747 WM_RXCHAIN_RESET(rxq);
9748 DPRINTF(sc, WM_DEBUG_RX,
9749 ("%s: RX: Rx buffer allocation failed, "
9750 "dropping packet%s\n", device_xname(sc->sc_dev),
9751 rxq->rxq_discard ? " (discard)" : ""));
9752 continue;
9753 }
9754
9755 m->m_len = len;
9756 rxq->rxq_len += len;
9757 DPRINTF(sc, WM_DEBUG_RX,
9758 ("%s: RX: buffer at %p len %d\n",
9759 device_xname(sc->sc_dev), m->m_data, len));
9760
9761 /* If this is not the end of the packet, keep looking. */
9762 if (!wm_rxdesc_is_eop(rxq, status)) {
9763 WM_RXCHAIN_LINK(rxq, m);
9764 DPRINTF(sc, WM_DEBUG_RX,
9765 ("%s: RX: not yet EOP, rxlen -> %d\n",
9766 device_xname(sc->sc_dev), rxq->rxq_len));
9767 continue;
9768 }
9769
9770 /*
9771 * Okay, we have the entire packet now. The chip is
9772 * configured to include the FCS except I35[04], I21[01].
9773 * (not all chips can be configured to strip it), so we need
9774 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9775 * in RCTL register is always set, so we don't trim it.
9776 * PCH2 and newer chip also not include FCS when jumbo
9777 * frame is used to do workaround an errata.
9778 * May need to adjust length of previous mbuf in the
9779 * chain if the current mbuf is too short.
9780 */
9781 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9782 if (m->m_len < ETHER_CRC_LEN) {
9783 rxq->rxq_tail->m_len
9784 -= (ETHER_CRC_LEN - m->m_len);
9785 m->m_len = 0;
9786 } else
9787 m->m_len -= ETHER_CRC_LEN;
9788 len = rxq->rxq_len - ETHER_CRC_LEN;
9789 } else
9790 len = rxq->rxq_len;
9791
9792 WM_RXCHAIN_LINK(rxq, m);
9793
9794 *rxq->rxq_tailp = NULL;
9795 m = rxq->rxq_head;
9796
9797 WM_RXCHAIN_RESET(rxq);
9798
9799 DPRINTF(sc, WM_DEBUG_RX,
9800 ("%s: RX: have entire packet, len -> %d\n",
9801 device_xname(sc->sc_dev), len));
9802
9803 /* If an error occurred, update stats and drop the packet. */
9804 if (wm_rxdesc_has_errors(rxq, errors)) {
9805 m_freem(m);
9806 continue;
9807 }
9808
9809 /* No errors. Receive the packet. */
9810 m_set_rcvif(m, ifp);
9811 m->m_pkthdr.len = len;
9812 /*
9813 * TODO
9814 * should be save rsshash and rsstype to this mbuf.
9815 */
9816 DPRINTF(sc, WM_DEBUG_RX,
9817 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9818 device_xname(sc->sc_dev), rsstype, rsshash));
9819
9820 /*
9821 * If VLANs are enabled, VLAN packets have been unwrapped
9822 * for us. Associate the tag with the packet.
9823 */
9824 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9825 continue;
9826
9827 /* Set up checksum info for this packet. */
9828 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9829
9830 rxq->rxq_packets++;
9831 rxq->rxq_bytes += len;
9832 /* Pass it on. */
9833 if_percpuq_enqueue(sc->sc_ipq, m);
9834
9835 if (rxq->rxq_stopping)
9836 break;
9837 }
9838 rxq->rxq_ptr = i;
9839
9840 if (count != 0)
9841 rnd_add_uint32(&sc->rnd_source, count);
9842
9843 DPRINTF(sc, WM_DEBUG_RX,
9844 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9845
9846 return more;
9847 }
9848
9849 /*
9850 * wm_linkintr_gmii:
9851 *
9852 * Helper; handle link interrupts for GMII.
9853 */
9854 static void
9855 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9856 {
9857 device_t dev = sc->sc_dev;
9858 uint32_t status, reg;
9859 bool link;
9860 int rv;
9861
9862 KASSERT(WM_CORE_LOCKED(sc));
9863
9864 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9865 __func__));
9866
9867 if ((icr & ICR_LSC) == 0) {
9868 if (icr & ICR_RXSEQ)
9869 DPRINTF(sc, WM_DEBUG_LINK,
9870 ("%s: LINK Receive sequence error\n",
9871 device_xname(dev)));
9872 return;
9873 }
9874
9875 /* Link status changed */
9876 status = CSR_READ(sc, WMREG_STATUS);
9877 link = status & STATUS_LU;
9878 if (link) {
9879 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9880 device_xname(dev),
9881 (status & STATUS_FD) ? "FDX" : "HDX"));
9882 if (wm_phy_need_linkdown_discard(sc)) {
9883 DPRINTF(sc, WM_DEBUG_LINK,
9884 ("%s: linkintr: Clear linkdown discard flag\n",
9885 device_xname(dev)));
9886 wm_clear_linkdown_discard(sc);
9887 }
9888 } else {
9889 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9890 device_xname(dev)));
9891 if (wm_phy_need_linkdown_discard(sc)) {
9892 DPRINTF(sc, WM_DEBUG_LINK,
9893 ("%s: linkintr: Set linkdown discard flag\n",
9894 device_xname(dev)));
9895 wm_set_linkdown_discard(sc);
9896 }
9897 }
9898 if ((sc->sc_type == WM_T_ICH8) && (link == false))
9899 wm_gig_downshift_workaround_ich8lan(sc);
9900
9901 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
9902 wm_kmrn_lock_loss_workaround_ich8lan(sc);
9903
9904 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9905 device_xname(dev)));
9906 mii_pollstat(&sc->sc_mii);
9907 if (sc->sc_type == WM_T_82543) {
9908 int miistatus, active;
9909
9910 /*
9911 * With 82543, we need to force speed and
9912 * duplex on the MAC equal to what the PHY
9913 * speed and duplex configuration is.
9914 */
9915 miistatus = sc->sc_mii.mii_media_status;
9916
9917 if (miistatus & IFM_ACTIVE) {
9918 active = sc->sc_mii.mii_media_active;
9919 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9920 switch (IFM_SUBTYPE(active)) {
9921 case IFM_10_T:
9922 sc->sc_ctrl |= CTRL_SPEED_10;
9923 break;
9924 case IFM_100_TX:
9925 sc->sc_ctrl |= CTRL_SPEED_100;
9926 break;
9927 case IFM_1000_T:
9928 sc->sc_ctrl |= CTRL_SPEED_1000;
9929 break;
9930 default:
9931 /*
9932 * Fiber?
9933 * Shoud not enter here.
9934 */
9935 device_printf(dev, "unknown media (%x)\n",
9936 active);
9937 break;
9938 }
9939 if (active & IFM_FDX)
9940 sc->sc_ctrl |= CTRL_FD;
9941 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9942 }
9943 } else if (sc->sc_type == WM_T_PCH) {
9944 wm_k1_gig_workaround_hv(sc,
9945 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9946 }
9947
9948 /*
9949 * When connected at 10Mbps half-duplex, some parts are excessively
9950 * aggressive resulting in many collisions. To avoid this, increase
9951 * the IPG and reduce Rx latency in the PHY.
9952 */
9953 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9954 && link) {
9955 uint32_t tipg_reg;
9956 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9957 bool fdx;
9958 uint16_t emi_addr, emi_val;
9959
9960 tipg_reg = CSR_READ(sc, WMREG_TIPG);
9961 tipg_reg &= ~TIPG_IPGT_MASK;
9962 fdx = status & STATUS_FD;
9963
9964 if (!fdx && (speed == STATUS_SPEED_10)) {
9965 tipg_reg |= 0xff;
9966 /* Reduce Rx latency in analog PHY */
9967 emi_val = 0;
9968 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9969 fdx && speed != STATUS_SPEED_1000) {
9970 tipg_reg |= 0xc;
9971 emi_val = 1;
9972 } else {
9973 /* Roll back the default values */
9974 tipg_reg |= 0x08;
9975 emi_val = 1;
9976 }
9977
9978 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9979
9980 rv = sc->phy.acquire(sc);
9981 if (rv)
9982 return;
9983
9984 if (sc->sc_type == WM_T_PCH2)
9985 emi_addr = I82579_RX_CONFIG;
9986 else
9987 emi_addr = I217_RX_CONFIG;
9988 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9989
9990 if (sc->sc_type >= WM_T_PCH_LPT) {
9991 uint16_t phy_reg;
9992
9993 sc->phy.readreg_locked(dev, 2,
9994 I217_PLL_CLOCK_GATE_REG, &phy_reg);
9995 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9996 if (speed == STATUS_SPEED_100
9997 || speed == STATUS_SPEED_10)
9998 phy_reg |= 0x3e8;
9999 else
10000 phy_reg |= 0xfa;
10001 sc->phy.writereg_locked(dev, 2,
10002 I217_PLL_CLOCK_GATE_REG, phy_reg);
10003
10004 if (speed == STATUS_SPEED_1000) {
10005 sc->phy.readreg_locked(dev, 2,
10006 HV_PM_CTRL, &phy_reg);
10007
10008 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10009
10010 sc->phy.writereg_locked(dev, 2,
10011 HV_PM_CTRL, phy_reg);
10012 }
10013 }
10014 sc->phy.release(sc);
10015
10016 if (rv)
10017 return;
10018
10019 if (sc->sc_type >= WM_T_PCH_SPT) {
10020 uint16_t data, ptr_gap;
10021
10022 if (speed == STATUS_SPEED_1000) {
10023 rv = sc->phy.acquire(sc);
10024 if (rv)
10025 return;
10026
10027 rv = sc->phy.readreg_locked(dev, 2,
10028 I82579_UNKNOWN1, &data);
10029 if (rv) {
10030 sc->phy.release(sc);
10031 return;
10032 }
10033
10034 ptr_gap = (data & (0x3ff << 2)) >> 2;
10035 if (ptr_gap < 0x18) {
10036 data &= ~(0x3ff << 2);
10037 data |= (0x18 << 2);
10038 rv = sc->phy.writereg_locked(dev,
10039 2, I82579_UNKNOWN1, data);
10040 }
10041 sc->phy.release(sc);
10042 if (rv)
10043 return;
10044 } else {
10045 rv = sc->phy.acquire(sc);
10046 if (rv)
10047 return;
10048
10049 rv = sc->phy.writereg_locked(dev, 2,
10050 I82579_UNKNOWN1, 0xc023);
10051 sc->phy.release(sc);
10052 if (rv)
10053 return;
10054
10055 }
10056 }
10057 }
10058
10059 /*
10060 * I217 Packet Loss issue:
10061 * ensure that FEXTNVM4 Beacon Duration is set correctly
10062 * on power up.
10063 * Set the Beacon Duration for I217 to 8 usec
10064 */
10065 if (sc->sc_type >= WM_T_PCH_LPT) {
10066 reg = CSR_READ(sc, WMREG_FEXTNVM4);
10067 reg &= ~FEXTNVM4_BEACON_DURATION;
10068 reg |= FEXTNVM4_BEACON_DURATION_8US;
10069 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10070 }
10071
10072 /* Work-around I218 hang issue */
10073 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10074 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10075 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10076 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10077 wm_k1_workaround_lpt_lp(sc, link);
10078
10079 if (sc->sc_type >= WM_T_PCH_LPT) {
10080 /*
10081 * Set platform power management values for Latency
10082 * Tolerance Reporting (LTR)
10083 */
10084 wm_platform_pm_pch_lpt(sc,
10085 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10086 }
10087
10088 /* Clear link partner's EEE ability */
10089 sc->eee_lp_ability = 0;
10090
10091 /* FEXTNVM6 K1-off workaround */
10092 if (sc->sc_type == WM_T_PCH_SPT) {
10093 reg = CSR_READ(sc, WMREG_FEXTNVM6);
10094 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10095 reg |= FEXTNVM6_K1_OFF_ENABLE;
10096 else
10097 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10098 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10099 }
10100
10101 if (!link)
10102 return;
10103
10104 switch (sc->sc_type) {
10105 case WM_T_PCH2:
10106 wm_k1_workaround_lv(sc);
10107 /* FALLTHROUGH */
10108 case WM_T_PCH:
10109 if (sc->sc_phytype == WMPHY_82578)
10110 wm_link_stall_workaround_hv(sc);
10111 break;
10112 default:
10113 break;
10114 }
10115
10116 /* Enable/Disable EEE after link up */
10117 if (sc->sc_phytype > WMPHY_82579)
10118 wm_set_eee_pchlan(sc);
10119 }
10120
10121 /*
10122 * wm_linkintr_tbi:
10123 *
10124 * Helper; handle link interrupts for TBI mode.
10125 */
10126 static void
10127 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10128 {
10129 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10130 uint32_t status;
10131
10132 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10133 __func__));
10134
10135 status = CSR_READ(sc, WMREG_STATUS);
10136 if (icr & ICR_LSC) {
10137 wm_check_for_link(sc);
10138 if (status & STATUS_LU) {
10139 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10140 device_xname(sc->sc_dev),
10141 (status & STATUS_FD) ? "FDX" : "HDX"));
10142 /*
10143 * NOTE: CTRL will update TFCE and RFCE automatically,
10144 * so we should update sc->sc_ctrl
10145 */
10146
10147 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10148 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10149 sc->sc_fcrtl &= ~FCRTL_XONE;
10150 if (status & STATUS_FD)
10151 sc->sc_tctl |=
10152 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10153 else
10154 sc->sc_tctl |=
10155 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10156 if (sc->sc_ctrl & CTRL_TFCE)
10157 sc->sc_fcrtl |= FCRTL_XONE;
10158 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10159 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10160 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10161 sc->sc_tbi_linkup = 1;
10162 if_link_state_change(ifp, LINK_STATE_UP);
10163 } else {
10164 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10165 device_xname(sc->sc_dev)));
10166 sc->sc_tbi_linkup = 0;
10167 if_link_state_change(ifp, LINK_STATE_DOWN);
10168 }
10169 /* Update LED */
10170 wm_tbi_serdes_set_linkled(sc);
10171 } else if (icr & ICR_RXSEQ)
10172 DPRINTF(sc, WM_DEBUG_LINK,
10173 ("%s: LINK: Receive sequence error\n",
10174 device_xname(sc->sc_dev)));
10175 }
10176
10177 /*
10178 * wm_linkintr_serdes:
10179 *
10180 * Helper; handle link interrupts for TBI mode.
10181 */
10182 static void
10183 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10184 {
10185 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10186 struct mii_data *mii = &sc->sc_mii;
10187 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10188 uint32_t pcs_adv, pcs_lpab, reg;
10189
10190 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10191 __func__));
10192
10193 if (icr & ICR_LSC) {
10194 /* Check PCS */
10195 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10196 if ((reg & PCS_LSTS_LINKOK) != 0) {
10197 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10198 device_xname(sc->sc_dev)));
10199 mii->mii_media_status |= IFM_ACTIVE;
10200 sc->sc_tbi_linkup = 1;
10201 if_link_state_change(ifp, LINK_STATE_UP);
10202 } else {
10203 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10204 device_xname(sc->sc_dev)));
10205 mii->mii_media_status |= IFM_NONE;
10206 sc->sc_tbi_linkup = 0;
10207 if_link_state_change(ifp, LINK_STATE_DOWN);
10208 wm_tbi_serdes_set_linkled(sc);
10209 return;
10210 }
10211 mii->mii_media_active |= IFM_1000_SX;
10212 if ((reg & PCS_LSTS_FDX) != 0)
10213 mii->mii_media_active |= IFM_FDX;
10214 else
10215 mii->mii_media_active |= IFM_HDX;
10216 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10217 /* Check flow */
10218 reg = CSR_READ(sc, WMREG_PCS_LSTS);
10219 if ((reg & PCS_LSTS_AN_COMP) == 0) {
10220 DPRINTF(sc, WM_DEBUG_LINK,
10221 ("XXX LINKOK but not ACOMP\n"));
10222 return;
10223 }
10224 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10225 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10226 DPRINTF(sc, WM_DEBUG_LINK,
10227 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10228 if ((pcs_adv & TXCW_SYM_PAUSE)
10229 && (pcs_lpab & TXCW_SYM_PAUSE)) {
10230 mii->mii_media_active |= IFM_FLOW
10231 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10232 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10233 && (pcs_adv & TXCW_ASYM_PAUSE)
10234 && (pcs_lpab & TXCW_SYM_PAUSE)
10235 && (pcs_lpab & TXCW_ASYM_PAUSE))
10236 mii->mii_media_active |= IFM_FLOW
10237 | IFM_ETH_TXPAUSE;
10238 else if ((pcs_adv & TXCW_SYM_PAUSE)
10239 && (pcs_adv & TXCW_ASYM_PAUSE)
10240 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10241 && (pcs_lpab & TXCW_ASYM_PAUSE))
10242 mii->mii_media_active |= IFM_FLOW
10243 | IFM_ETH_RXPAUSE;
10244 }
10245 /* Update LED */
10246 wm_tbi_serdes_set_linkled(sc);
10247 } else
10248 DPRINTF(sc, WM_DEBUG_LINK,
10249 ("%s: LINK: Receive sequence error\n",
10250 device_xname(sc->sc_dev)));
10251 }
10252
10253 /*
10254 * wm_linkintr:
10255 *
10256 * Helper; handle link interrupts.
10257 */
10258 static void
10259 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10260 {
10261
10262 KASSERT(WM_CORE_LOCKED(sc));
10263
10264 if (sc->sc_flags & WM_F_HAS_MII)
10265 wm_linkintr_gmii(sc, icr);
10266 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10267 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10268 wm_linkintr_serdes(sc, icr);
10269 else
10270 wm_linkintr_tbi(sc, icr);
10271 }
10272
10273
10274 static inline void
10275 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10276 {
10277
10278 if (wmq->wmq_txrx_use_workqueue)
10279 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
10280 else
10281 softint_schedule(wmq->wmq_si);
10282 }
10283
10284 static inline void
10285 wm_legacy_intr_disable(struct wm_softc *sc)
10286 {
10287
10288 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10289 }
10290
10291 static inline void
10292 wm_legacy_intr_enable(struct wm_softc *sc)
10293 {
10294
10295 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10296 }
10297
10298 /*
10299 * wm_intr_legacy:
10300 *
10301 * Interrupt service routine for INTx and MSI.
10302 */
10303 static int
10304 wm_intr_legacy(void *arg)
10305 {
10306 struct wm_softc *sc = arg;
10307 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10308 struct wm_queue *wmq = &sc->sc_queue[0];
10309 struct wm_txqueue *txq = &wmq->wmq_txq;
10310 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10311 u_int txlimit = sc->sc_tx_intr_process_limit;
10312 u_int rxlimit = sc->sc_rx_intr_process_limit;
10313 uint32_t icr, rndval = 0;
10314 bool more = false;
10315
10316 icr = CSR_READ(sc, WMREG_ICR);
10317 if ((icr & sc->sc_icr) == 0)
10318 return 0;
10319
10320 DPRINTF(sc, WM_DEBUG_TX,
10321 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10322 if (rndval == 0)
10323 rndval = icr;
10324
10325 mutex_enter(txq->txq_lock);
10326
10327 if (txq->txq_stopping) {
10328 mutex_exit(txq->txq_lock);
10329 return 1;
10330 }
10331
10332 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10333 if (icr & ICR_TXDW) {
10334 DPRINTF(sc, WM_DEBUG_TX,
10335 ("%s: TX: got TXDW interrupt\n",
10336 device_xname(sc->sc_dev)));
10337 WM_Q_EVCNT_INCR(txq, txdw);
10338 }
10339 #endif
10340 if (txlimit > 0) {
10341 more |= wm_txeof(txq, txlimit);
10342 if (!IF_IS_EMPTY(&ifp->if_snd))
10343 more = true;
10344 } else
10345 more = true;
10346 mutex_exit(txq->txq_lock);
10347
10348 mutex_enter(rxq->rxq_lock);
10349
10350 if (rxq->rxq_stopping) {
10351 mutex_exit(rxq->rxq_lock);
10352 return 1;
10353 }
10354
10355 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10356 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10357 DPRINTF(sc, WM_DEBUG_RX,
10358 ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10359 device_xname(sc->sc_dev),
10360 icr & (ICR_RXDMT0 | ICR_RXT0)));
10361 WM_Q_EVCNT_INCR(rxq, intr);
10362 }
10363 #endif
10364 if (rxlimit > 0) {
10365 /*
10366 * wm_rxeof() does *not* call upper layer functions directly,
10367 * as if_percpuq_enqueue() just call softint_schedule().
10368 * So, we can call wm_rxeof() in interrupt context.
10369 */
10370 more = wm_rxeof(rxq, rxlimit);
10371 } else
10372 more = true;
10373
10374 mutex_exit(rxq->rxq_lock);
10375
10376 WM_CORE_LOCK(sc);
10377
10378 if (sc->sc_core_stopping) {
10379 WM_CORE_UNLOCK(sc);
10380 return 1;
10381 }
10382
10383 if (icr & (ICR_LSC | ICR_RXSEQ)) {
10384 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10385 wm_linkintr(sc, icr);
10386 }
10387 if ((icr & ICR_GPI(0)) != 0)
10388 device_printf(sc->sc_dev, "got module interrupt\n");
10389
10390 WM_CORE_UNLOCK(sc);
10391
10392 if (icr & ICR_RXO) {
10393 #if defined(WM_DEBUG)
10394 log(LOG_WARNING, "%s: Receive overrun\n",
10395 device_xname(sc->sc_dev));
10396 #endif /* defined(WM_DEBUG) */
10397 }
10398
10399 rnd_add_uint32(&sc->rnd_source, rndval);
10400
10401 if (more) {
10402 /* Try to get more packets going. */
10403 wm_legacy_intr_disable(sc);
10404 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10405 wm_sched_handle_queue(sc, wmq);
10406 }
10407
10408 return 1;
10409 }
10410
10411 static inline void
10412 wm_txrxintr_disable(struct wm_queue *wmq)
10413 {
10414 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10415
10416 if (__predict_false(!wm_is_using_msix(sc))) {
10417 wm_legacy_intr_disable(sc);
10418 return;
10419 }
10420
10421 if (sc->sc_type == WM_T_82574)
10422 CSR_WRITE(sc, WMREG_IMC,
10423 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10424 else if (sc->sc_type == WM_T_82575)
10425 CSR_WRITE(sc, WMREG_EIMC,
10426 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10427 else
10428 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10429 }
10430
10431 static inline void
10432 wm_txrxintr_enable(struct wm_queue *wmq)
10433 {
10434 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10435
10436 wm_itrs_calculate(sc, wmq);
10437
10438 if (__predict_false(!wm_is_using_msix(sc))) {
10439 wm_legacy_intr_enable(sc);
10440 return;
10441 }
10442
10443 /*
10444 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10445 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10446 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10447 * while each wm_handle_queue(wmq) is runnig.
10448 */
10449 if (sc->sc_type == WM_T_82574)
10450 CSR_WRITE(sc, WMREG_IMS,
10451 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10452 else if (sc->sc_type == WM_T_82575)
10453 CSR_WRITE(sc, WMREG_EIMS,
10454 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10455 else
10456 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10457 }
10458
10459 static int
10460 wm_txrxintr_msix(void *arg)
10461 {
10462 struct wm_queue *wmq = arg;
10463 struct wm_txqueue *txq = &wmq->wmq_txq;
10464 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10465 struct wm_softc *sc = txq->txq_sc;
10466 u_int txlimit = sc->sc_tx_intr_process_limit;
10467 u_int rxlimit = sc->sc_rx_intr_process_limit;
10468 bool txmore;
10469 bool rxmore;
10470
10471 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10472
10473 DPRINTF(sc, WM_DEBUG_TX,
10474 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10475
10476 wm_txrxintr_disable(wmq);
10477
10478 mutex_enter(txq->txq_lock);
10479
10480 if (txq->txq_stopping) {
10481 mutex_exit(txq->txq_lock);
10482 return 1;
10483 }
10484
10485 WM_Q_EVCNT_INCR(txq, txdw);
10486 if (txlimit > 0) {
10487 txmore = wm_txeof(txq, txlimit);
10488 /* wm_deferred start() is done in wm_handle_queue(). */
10489 } else
10490 txmore = true;
10491 mutex_exit(txq->txq_lock);
10492
10493 DPRINTF(sc, WM_DEBUG_RX,
10494 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10495 mutex_enter(rxq->rxq_lock);
10496
10497 if (rxq->rxq_stopping) {
10498 mutex_exit(rxq->rxq_lock);
10499 return 1;
10500 }
10501
10502 WM_Q_EVCNT_INCR(rxq, intr);
10503 if (rxlimit > 0) {
10504 rxmore = wm_rxeof(rxq, rxlimit);
10505 } else
10506 rxmore = true;
10507 mutex_exit(rxq->rxq_lock);
10508
10509 wm_itrs_writereg(sc, wmq);
10510
10511 if (txmore || rxmore) {
10512 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10513 wm_sched_handle_queue(sc, wmq);
10514 } else
10515 wm_txrxintr_enable(wmq);
10516
10517 return 1;
10518 }
10519
10520 static void
10521 wm_handle_queue(void *arg)
10522 {
10523 struct wm_queue *wmq = arg;
10524 struct wm_txqueue *txq = &wmq->wmq_txq;
10525 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10526 struct wm_softc *sc = txq->txq_sc;
10527 u_int txlimit = sc->sc_tx_process_limit;
10528 u_int rxlimit = sc->sc_rx_process_limit;
10529 bool txmore;
10530 bool rxmore;
10531
10532 mutex_enter(txq->txq_lock);
10533 if (txq->txq_stopping) {
10534 mutex_exit(txq->txq_lock);
10535 return;
10536 }
10537 txmore = wm_txeof(txq, txlimit);
10538 wm_deferred_start_locked(txq);
10539 mutex_exit(txq->txq_lock);
10540
10541 mutex_enter(rxq->rxq_lock);
10542 if (rxq->rxq_stopping) {
10543 mutex_exit(rxq->rxq_lock);
10544 return;
10545 }
10546 WM_Q_EVCNT_INCR(rxq, defer);
10547 rxmore = wm_rxeof(rxq, rxlimit);
10548 mutex_exit(rxq->rxq_lock);
10549
10550 if (txmore || rxmore) {
10551 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10552 wm_sched_handle_queue(sc, wmq);
10553 } else
10554 wm_txrxintr_enable(wmq);
10555 }
10556
10557 static void
10558 wm_handle_queue_work(struct work *wk, void *context)
10559 {
10560 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10561
10562 /*
10563 * "enqueued flag" is not required here.
10564 */
10565 wm_handle_queue(wmq);
10566 }
10567
10568 /*
10569 * wm_linkintr_msix:
10570 *
10571 * Interrupt service routine for link status change for MSI-X.
10572 */
10573 static int
10574 wm_linkintr_msix(void *arg)
10575 {
10576 struct wm_softc *sc = arg;
10577 uint32_t reg;
10578 bool has_rxo;
10579
10580 reg = CSR_READ(sc, WMREG_ICR);
10581 WM_CORE_LOCK(sc);
10582 DPRINTF(sc, WM_DEBUG_LINK,
10583 ("%s: LINK: got link intr. ICR = %08x\n",
10584 device_xname(sc->sc_dev), reg));
10585
10586 if (sc->sc_core_stopping)
10587 goto out;
10588
10589 if ((reg & ICR_LSC) != 0) {
10590 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10591 wm_linkintr(sc, ICR_LSC);
10592 }
10593 if ((reg & ICR_GPI(0)) != 0)
10594 device_printf(sc->sc_dev, "got module interrupt\n");
10595
10596 /*
10597 * XXX 82574 MSI-X mode workaround
10598 *
10599 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10600 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10601 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10602 * interrupts by writing WMREG_ICS to process receive packets.
10603 */
10604 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10605 #if defined(WM_DEBUG)
10606 log(LOG_WARNING, "%s: Receive overrun\n",
10607 device_xname(sc->sc_dev));
10608 #endif /* defined(WM_DEBUG) */
10609
10610 has_rxo = true;
10611 /*
10612 * The RXO interrupt is very high rate when receive traffic is
10613 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10614 * interrupts. ICR_OTHER will be enabled at the end of
10615 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10616 * ICR_RXQ(1) interrupts.
10617 */
10618 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10619
10620 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10621 }
10622
10623
10624
10625 out:
10626 WM_CORE_UNLOCK(sc);
10627
10628 if (sc->sc_type == WM_T_82574) {
10629 if (!has_rxo)
10630 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10631 else
10632 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10633 } else if (sc->sc_type == WM_T_82575)
10634 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10635 else
10636 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10637
10638 return 1;
10639 }
10640
10641 /*
10642 * Media related.
10643 * GMII, SGMII, TBI (and SERDES)
10644 */
10645
10646 /* Common */
10647
10648 /*
10649 * wm_tbi_serdes_set_linkled:
10650 *
10651 * Update the link LED on TBI and SERDES devices.
10652 */
10653 static void
10654 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10655 {
10656
10657 if (sc->sc_tbi_linkup)
10658 sc->sc_ctrl |= CTRL_SWDPIN(0);
10659 else
10660 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10661
10662 /* 82540 or newer devices are active low */
10663 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10664
10665 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10666 }
10667
10668 /* GMII related */
10669
10670 /*
10671 * wm_gmii_reset:
10672 *
10673 * Reset the PHY.
10674 */
10675 static void
10676 wm_gmii_reset(struct wm_softc *sc)
10677 {
10678 uint32_t reg;
10679 int rv;
10680
10681 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10682 device_xname(sc->sc_dev), __func__));
10683
10684 rv = sc->phy.acquire(sc);
10685 if (rv != 0) {
10686 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10687 __func__);
10688 return;
10689 }
10690
10691 switch (sc->sc_type) {
10692 case WM_T_82542_2_0:
10693 case WM_T_82542_2_1:
10694 /* null */
10695 break;
10696 case WM_T_82543:
10697 /*
10698 * With 82543, we need to force speed and duplex on the MAC
10699 * equal to what the PHY speed and duplex configuration is.
10700 * In addition, we need to perform a hardware reset on the PHY
10701 * to take it out of reset.
10702 */
10703 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10704 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10705
10706 /* The PHY reset pin is active-low. */
10707 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10708 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10709 CTRL_EXT_SWDPIN(4));
10710 reg |= CTRL_EXT_SWDPIO(4);
10711
10712 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10713 CSR_WRITE_FLUSH(sc);
10714 delay(10*1000);
10715
10716 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10717 CSR_WRITE_FLUSH(sc);
10718 delay(150);
10719 #if 0
10720 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10721 #endif
10722 delay(20*1000); /* XXX extra delay to get PHY ID? */
10723 break;
10724 case WM_T_82544: /* Reset 10000us */
10725 case WM_T_82540:
10726 case WM_T_82545:
10727 case WM_T_82545_3:
10728 case WM_T_82546:
10729 case WM_T_82546_3:
10730 case WM_T_82541:
10731 case WM_T_82541_2:
10732 case WM_T_82547:
10733 case WM_T_82547_2:
10734 case WM_T_82571: /* Reset 100us */
10735 case WM_T_82572:
10736 case WM_T_82573:
10737 case WM_T_82574:
10738 case WM_T_82575:
10739 case WM_T_82576:
10740 case WM_T_82580:
10741 case WM_T_I350:
10742 case WM_T_I354:
10743 case WM_T_I210:
10744 case WM_T_I211:
10745 case WM_T_82583:
10746 case WM_T_80003:
10747 /* Generic reset */
10748 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10749 CSR_WRITE_FLUSH(sc);
10750 delay(20000);
10751 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10752 CSR_WRITE_FLUSH(sc);
10753 delay(20000);
10754
10755 if ((sc->sc_type == WM_T_82541)
10756 || (sc->sc_type == WM_T_82541_2)
10757 || (sc->sc_type == WM_T_82547)
10758 || (sc->sc_type == WM_T_82547_2)) {
10759 /* Workaround for igp are done in igp_reset() */
10760 /* XXX add code to set LED after phy reset */
10761 }
10762 break;
10763 case WM_T_ICH8:
10764 case WM_T_ICH9:
10765 case WM_T_ICH10:
10766 case WM_T_PCH:
10767 case WM_T_PCH2:
10768 case WM_T_PCH_LPT:
10769 case WM_T_PCH_SPT:
10770 case WM_T_PCH_CNP:
10771 /* Generic reset */
10772 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10773 CSR_WRITE_FLUSH(sc);
10774 delay(100);
10775 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10776 CSR_WRITE_FLUSH(sc);
10777 delay(150);
10778 break;
10779 default:
10780 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10781 __func__);
10782 break;
10783 }
10784
10785 sc->phy.release(sc);
10786
10787 /* get_cfg_done */
10788 wm_get_cfg_done(sc);
10789
10790 /* Extra setup */
10791 switch (sc->sc_type) {
10792 case WM_T_82542_2_0:
10793 case WM_T_82542_2_1:
10794 case WM_T_82543:
10795 case WM_T_82544:
10796 case WM_T_82540:
10797 case WM_T_82545:
10798 case WM_T_82545_3:
10799 case WM_T_82546:
10800 case WM_T_82546_3:
10801 case WM_T_82541_2:
10802 case WM_T_82547_2:
10803 case WM_T_82571:
10804 case WM_T_82572:
10805 case WM_T_82573:
10806 case WM_T_82574:
10807 case WM_T_82583:
10808 case WM_T_82575:
10809 case WM_T_82576:
10810 case WM_T_82580:
10811 case WM_T_I350:
10812 case WM_T_I354:
10813 case WM_T_I210:
10814 case WM_T_I211:
10815 case WM_T_80003:
10816 /* Null */
10817 break;
10818 case WM_T_82541:
10819 case WM_T_82547:
10820 /* XXX Configure actively LED after PHY reset */
10821 break;
10822 case WM_T_ICH8:
10823 case WM_T_ICH9:
10824 case WM_T_ICH10:
10825 case WM_T_PCH:
10826 case WM_T_PCH2:
10827 case WM_T_PCH_LPT:
10828 case WM_T_PCH_SPT:
10829 case WM_T_PCH_CNP:
10830 wm_phy_post_reset(sc);
10831 break;
10832 default:
10833 panic("%s: unknown type\n", __func__);
10834 break;
10835 }
10836 }
10837
10838 /*
10839 * Set up sc_phytype and mii_{read|write}reg.
10840 *
10841 * To identify PHY type, correct read/write function should be selected.
10842 * To select correct read/write function, PCI ID or MAC type are required
10843 * without accessing PHY registers.
10844 *
10845 * On the first call of this function, PHY ID is not known yet. Check
10846 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10847 * result might be incorrect.
10848 *
10849 * In the second call, PHY OUI and model is used to identify PHY type.
10850 * It might not be perfect because of the lack of compared entry, but it
10851 * would be better than the first call.
10852 *
10853 * If the detected new result and previous assumption is different,
10854 * a diagnostic message will be printed.
10855 */
10856 static void
10857 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10858 uint16_t phy_model)
10859 {
10860 device_t dev = sc->sc_dev;
10861 struct mii_data *mii = &sc->sc_mii;
10862 uint16_t new_phytype = WMPHY_UNKNOWN;
10863 uint16_t doubt_phytype = WMPHY_UNKNOWN;
10864 mii_readreg_t new_readreg;
10865 mii_writereg_t new_writereg;
10866 bool dodiag = true;
10867
10868 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10869 device_xname(sc->sc_dev), __func__));
10870
10871 /*
10872 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10873 * incorrect. So don't print diag output when it's 2nd call.
10874 */
10875 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10876 dodiag = false;
10877
10878 if (mii->mii_readreg == NULL) {
10879 /*
10880 * This is the first call of this function. For ICH and PCH
10881 * variants, it's difficult to determine the PHY access method
10882 * by sc_type, so use the PCI product ID for some devices.
10883 */
10884
10885 switch (sc->sc_pcidevid) {
10886 case PCI_PRODUCT_INTEL_PCH_M_LM:
10887 case PCI_PRODUCT_INTEL_PCH_M_LC:
10888 /* 82577 */
10889 new_phytype = WMPHY_82577;
10890 break;
10891 case PCI_PRODUCT_INTEL_PCH_D_DM:
10892 case PCI_PRODUCT_INTEL_PCH_D_DC:
10893 /* 82578 */
10894 new_phytype = WMPHY_82578;
10895 break;
10896 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10897 case PCI_PRODUCT_INTEL_PCH2_LV_V:
10898 /* 82579 */
10899 new_phytype = WMPHY_82579;
10900 break;
10901 case PCI_PRODUCT_INTEL_82801H_82567V_3:
10902 case PCI_PRODUCT_INTEL_82801I_BM:
10903 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10904 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10905 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10906 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10907 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10908 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10909 /* ICH8, 9, 10 with 82567 */
10910 new_phytype = WMPHY_BM;
10911 break;
10912 default:
10913 break;
10914 }
10915 } else {
10916 /* It's not the first call. Use PHY OUI and model */
10917 switch (phy_oui) {
10918 case MII_OUI_ATTANSIC: /* atphy(4) */
10919 switch (phy_model) {
10920 case MII_MODEL_ATTANSIC_AR8021:
10921 new_phytype = WMPHY_82578;
10922 break;
10923 default:
10924 break;
10925 }
10926 break;
10927 case MII_OUI_xxMARVELL:
10928 switch (phy_model) {
10929 case MII_MODEL_xxMARVELL_I210:
10930 new_phytype = WMPHY_I210;
10931 break;
10932 case MII_MODEL_xxMARVELL_E1011:
10933 case MII_MODEL_xxMARVELL_E1000_3:
10934 case MII_MODEL_xxMARVELL_E1000_5:
10935 case MII_MODEL_xxMARVELL_E1112:
10936 new_phytype = WMPHY_M88;
10937 break;
10938 case MII_MODEL_xxMARVELL_E1149:
10939 new_phytype = WMPHY_BM;
10940 break;
10941 case MII_MODEL_xxMARVELL_E1111:
10942 case MII_MODEL_xxMARVELL_I347:
10943 case MII_MODEL_xxMARVELL_E1512:
10944 case MII_MODEL_xxMARVELL_E1340M:
10945 case MII_MODEL_xxMARVELL_E1543:
10946 new_phytype = WMPHY_M88;
10947 break;
10948 case MII_MODEL_xxMARVELL_I82563:
10949 new_phytype = WMPHY_GG82563;
10950 break;
10951 default:
10952 break;
10953 }
10954 break;
10955 case MII_OUI_INTEL:
10956 switch (phy_model) {
10957 case MII_MODEL_INTEL_I82577:
10958 new_phytype = WMPHY_82577;
10959 break;
10960 case MII_MODEL_INTEL_I82579:
10961 new_phytype = WMPHY_82579;
10962 break;
10963 case MII_MODEL_INTEL_I217:
10964 new_phytype = WMPHY_I217;
10965 break;
10966 case MII_MODEL_INTEL_I82580:
10967 new_phytype = WMPHY_82580;
10968 break;
10969 case MII_MODEL_INTEL_I350:
10970 new_phytype = WMPHY_I350;
10971 break;
10972 default:
10973 break;
10974 }
10975 break;
10976 case MII_OUI_yyINTEL:
10977 switch (phy_model) {
10978 case MII_MODEL_yyINTEL_I82562G:
10979 case MII_MODEL_yyINTEL_I82562EM:
10980 case MII_MODEL_yyINTEL_I82562ET:
10981 new_phytype = WMPHY_IFE;
10982 break;
10983 case MII_MODEL_yyINTEL_IGP01E1000:
10984 new_phytype = WMPHY_IGP;
10985 break;
10986 case MII_MODEL_yyINTEL_I82566:
10987 new_phytype = WMPHY_IGP_3;
10988 break;
10989 default:
10990 break;
10991 }
10992 break;
10993 default:
10994 break;
10995 }
10996
10997 if (dodiag) {
10998 if (new_phytype == WMPHY_UNKNOWN)
10999 aprint_verbose_dev(dev,
11000 "%s: Unknown PHY model. OUI=%06x, "
11001 "model=%04x\n", __func__, phy_oui,
11002 phy_model);
11003
11004 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11005 && (sc->sc_phytype != new_phytype)) {
11006 aprint_error_dev(dev, "Previously assumed PHY "
11007 "type(%u) was incorrect. PHY type from PHY"
11008 "ID = %u\n", sc->sc_phytype, new_phytype);
11009 }
11010 }
11011 }
11012
11013 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11014 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11015 /* SGMII */
11016 new_readreg = wm_sgmii_readreg;
11017 new_writereg = wm_sgmii_writereg;
11018 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11019 /* BM2 (phyaddr == 1) */
11020 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11021 && (new_phytype != WMPHY_BM)
11022 && (new_phytype != WMPHY_UNKNOWN))
11023 doubt_phytype = new_phytype;
11024 new_phytype = WMPHY_BM;
11025 new_readreg = wm_gmii_bm_readreg;
11026 new_writereg = wm_gmii_bm_writereg;
11027 } else if (sc->sc_type >= WM_T_PCH) {
11028 /* All PCH* use _hv_ */
11029 new_readreg = wm_gmii_hv_readreg;
11030 new_writereg = wm_gmii_hv_writereg;
11031 } else if (sc->sc_type >= WM_T_ICH8) {
11032 /* non-82567 ICH8, 9 and 10 */
11033 new_readreg = wm_gmii_i82544_readreg;
11034 new_writereg = wm_gmii_i82544_writereg;
11035 } else if (sc->sc_type >= WM_T_80003) {
11036 /* 80003 */
11037 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11038 && (new_phytype != WMPHY_GG82563)
11039 && (new_phytype != WMPHY_UNKNOWN))
11040 doubt_phytype = new_phytype;
11041 new_phytype = WMPHY_GG82563;
11042 new_readreg = wm_gmii_i80003_readreg;
11043 new_writereg = wm_gmii_i80003_writereg;
11044 } else if (sc->sc_type >= WM_T_I210) {
11045 /* I210 and I211 */
11046 if ((sc->sc_phytype != WMPHY_UNKNOWN)
11047 && (new_phytype != WMPHY_I210)
11048 && (new_phytype != WMPHY_UNKNOWN))
11049 doubt_phytype = new_phytype;
11050 new_phytype = WMPHY_I210;
11051 new_readreg = wm_gmii_gs40g_readreg;
11052 new_writereg = wm_gmii_gs40g_writereg;
11053 } else if (sc->sc_type >= WM_T_82580) {
11054 /* 82580, I350 and I354 */
11055 new_readreg = wm_gmii_82580_readreg;
11056 new_writereg = wm_gmii_82580_writereg;
11057 } else if (sc->sc_type >= WM_T_82544) {
11058 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
11059 new_readreg = wm_gmii_i82544_readreg;
11060 new_writereg = wm_gmii_i82544_writereg;
11061 } else {
11062 new_readreg = wm_gmii_i82543_readreg;
11063 new_writereg = wm_gmii_i82543_writereg;
11064 }
11065
11066 if (new_phytype == WMPHY_BM) {
11067 /* All BM use _bm_ */
11068 new_readreg = wm_gmii_bm_readreg;
11069 new_writereg = wm_gmii_bm_writereg;
11070 }
11071 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
11072 /* All PCH* use _hv_ */
11073 new_readreg = wm_gmii_hv_readreg;
11074 new_writereg = wm_gmii_hv_writereg;
11075 }
11076
11077 /* Diag output */
11078 if (dodiag) {
11079 if (doubt_phytype != WMPHY_UNKNOWN)
11080 aprint_error_dev(dev, "Assumed new PHY type was "
11081 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11082 new_phytype);
11083 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11084 && (sc->sc_phytype != new_phytype))
11085 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11086 "was incorrect. New PHY type = %u\n",
11087 sc->sc_phytype, new_phytype);
11088
11089 if ((mii->mii_readreg != NULL) &&
11090 (new_phytype == WMPHY_UNKNOWN))
11091 aprint_error_dev(dev, "PHY type is still unknown.\n");
11092
11093 if ((mii->mii_readreg != NULL) &&
11094 (mii->mii_readreg != new_readreg))
11095 aprint_error_dev(dev, "Previously assumed PHY "
11096 "read/write function was incorrect.\n");
11097 }
11098
11099 /* Update now */
11100 sc->sc_phytype = new_phytype;
11101 mii->mii_readreg = new_readreg;
11102 mii->mii_writereg = new_writereg;
11103 if (new_readreg == wm_gmii_hv_readreg) {
11104 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11105 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11106 } else if (new_readreg == wm_sgmii_readreg) {
11107 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11108 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11109 } else if (new_readreg == wm_gmii_i82544_readreg) {
11110 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11111 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11112 }
11113 }
11114
11115 /*
11116 * wm_get_phy_id_82575:
11117 *
11118 * Return PHY ID. Return -1 if it failed.
11119 */
11120 static int
11121 wm_get_phy_id_82575(struct wm_softc *sc)
11122 {
11123 uint32_t reg;
11124 int phyid = -1;
11125
11126 /* XXX */
11127 if ((sc->sc_flags & WM_F_SGMII) == 0)
11128 return -1;
11129
11130 if (wm_sgmii_uses_mdio(sc)) {
11131 switch (sc->sc_type) {
11132 case WM_T_82575:
11133 case WM_T_82576:
11134 reg = CSR_READ(sc, WMREG_MDIC);
11135 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11136 break;
11137 case WM_T_82580:
11138 case WM_T_I350:
11139 case WM_T_I354:
11140 case WM_T_I210:
11141 case WM_T_I211:
11142 reg = CSR_READ(sc, WMREG_MDICNFG);
11143 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11144 break;
11145 default:
11146 return -1;
11147 }
11148 }
11149
11150 return phyid;
11151 }
11152
11153 /*
11154 * wm_gmii_mediainit:
11155 *
11156 * Initialize media for use on 1000BASE-T devices.
11157 */
11158 static void
11159 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11160 {
11161 device_t dev = sc->sc_dev;
11162 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11163 struct mii_data *mii = &sc->sc_mii;
11164
11165 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11166 device_xname(sc->sc_dev), __func__));
11167
11168 /* We have GMII. */
11169 sc->sc_flags |= WM_F_HAS_MII;
11170
11171 if (sc->sc_type == WM_T_80003)
11172 sc->sc_tipg = TIPG_1000T_80003_DFLT;
11173 else
11174 sc->sc_tipg = TIPG_1000T_DFLT;
11175
11176 /*
11177 * Let the chip set speed/duplex on its own based on
11178 * signals from the PHY.
11179 * XXXbouyer - I'm not sure this is right for the 80003,
11180 * the em driver only sets CTRL_SLU here - but it seems to work.
11181 */
11182 sc->sc_ctrl |= CTRL_SLU;
11183 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11184
11185 /* Initialize our media structures and probe the GMII. */
11186 mii->mii_ifp = ifp;
11187
11188 mii->mii_statchg = wm_gmii_statchg;
11189
11190 /* get PHY control from SMBus to PCIe */
11191 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11192 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11193 || (sc->sc_type == WM_T_PCH_CNP))
11194 wm_init_phy_workarounds_pchlan(sc);
11195
11196 wm_gmii_reset(sc);
11197
11198 sc->sc_ethercom.ec_mii = &sc->sc_mii;
11199 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11200 wm_gmii_mediastatus, sc->sc_core_lock);
11201
11202 /* Setup internal SGMII PHY for SFP */
11203 wm_sgmii_sfp_preconfig(sc);
11204
11205 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11206 || (sc->sc_type == WM_T_82580)
11207 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11208 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11209 if ((sc->sc_flags & WM_F_SGMII) == 0) {
11210 /* Attach only one port */
11211 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11212 MII_OFFSET_ANY, MIIF_DOPAUSE);
11213 } else {
11214 int i, id;
11215 uint32_t ctrl_ext;
11216
11217 id = wm_get_phy_id_82575(sc);
11218 if (id != -1) {
11219 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11220 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11221 }
11222 if ((id == -1)
11223 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11224 /* Power on sgmii phy if it is disabled */
11225 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11226 CSR_WRITE(sc, WMREG_CTRL_EXT,
11227 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11228 CSR_WRITE_FLUSH(sc);
11229 delay(300*1000); /* XXX too long */
11230
11231 /*
11232 * From 1 to 8.
11233 *
11234 * I2C access fails with I2C register's ERROR
11235 * bit set, so prevent error message while
11236 * scanning.
11237 */
11238 sc->phy.no_errprint = true;
11239 for (i = 1; i < 8; i++)
11240 mii_attach(sc->sc_dev, &sc->sc_mii,
11241 0xffffffff, i, MII_OFFSET_ANY,
11242 MIIF_DOPAUSE);
11243 sc->phy.no_errprint = false;
11244
11245 /* Restore previous sfp cage power state */
11246 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11247 }
11248 }
11249 } else
11250 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11251 MII_OFFSET_ANY, MIIF_DOPAUSE);
11252
11253 /*
11254 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11255 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11256 */
11257 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
11258 || (sc->sc_type == WM_T_PCH_SPT)
11259 || (sc->sc_type == WM_T_PCH_CNP))
11260 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11261 wm_set_mdio_slow_mode_hv(sc);
11262 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11263 MII_OFFSET_ANY, MIIF_DOPAUSE);
11264 }
11265
11266 /*
11267 * (For ICH8 variants)
11268 * If PHY detection failed, use BM's r/w function and retry.
11269 */
11270 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11271 /* if failed, retry with *_bm_* */
11272 aprint_verbose_dev(dev, "Assumed PHY access function "
11273 "(type = %d) might be incorrect. Use BM and retry.\n",
11274 sc->sc_phytype);
11275 sc->sc_phytype = WMPHY_BM;
11276 mii->mii_readreg = wm_gmii_bm_readreg;
11277 mii->mii_writereg = wm_gmii_bm_writereg;
11278
11279 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11280 MII_OFFSET_ANY, MIIF_DOPAUSE);
11281 }
11282
11283 if (LIST_FIRST(&mii->mii_phys) == NULL) {
11284 /* Any PHY wasn't found */
11285 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11286 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11287 sc->sc_phytype = WMPHY_NONE;
11288 } else {
11289 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11290
11291 /*
11292 * PHY found! Check PHY type again by the second call of
11293 * wm_gmii_setup_phytype.
11294 */
11295 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11296 child->mii_mpd_model);
11297
11298 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11299 }
11300 }
11301
11302 /*
11303 * wm_gmii_mediachange: [ifmedia interface function]
11304 *
11305 * Set hardware to newly-selected media on a 1000BASE-T device.
11306 */
11307 static int
11308 wm_gmii_mediachange(struct ifnet *ifp)
11309 {
11310 struct wm_softc *sc = ifp->if_softc;
11311 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11312 uint32_t reg;
11313 int rc;
11314
11315 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11316 device_xname(sc->sc_dev), __func__));
11317
11318 KASSERT(WM_CORE_LOCKED(sc));
11319
11320 if ((sc->sc_if_flags & IFF_UP) == 0)
11321 return 0;
11322
11323 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11324 if ((sc->sc_type == WM_T_82580)
11325 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11326 || (sc->sc_type == WM_T_I211)) {
11327 reg = CSR_READ(sc, WMREG_PHPM);
11328 reg &= ~PHPM_GO_LINK_D;
11329 CSR_WRITE(sc, WMREG_PHPM, reg);
11330 }
11331
11332 /* Disable D0 LPLU. */
11333 wm_lplu_d0_disable(sc);
11334
11335 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11336 sc->sc_ctrl |= CTRL_SLU;
11337 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11338 || (sc->sc_type > WM_T_82543)) {
11339 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11340 } else {
11341 sc->sc_ctrl &= ~CTRL_ASDE;
11342 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11343 if (ife->ifm_media & IFM_FDX)
11344 sc->sc_ctrl |= CTRL_FD;
11345 switch (IFM_SUBTYPE(ife->ifm_media)) {
11346 case IFM_10_T:
11347 sc->sc_ctrl |= CTRL_SPEED_10;
11348 break;
11349 case IFM_100_TX:
11350 sc->sc_ctrl |= CTRL_SPEED_100;
11351 break;
11352 case IFM_1000_T:
11353 sc->sc_ctrl |= CTRL_SPEED_1000;
11354 break;
11355 case IFM_NONE:
11356 /* There is no specific setting for IFM_NONE */
11357 break;
11358 default:
11359 panic("wm_gmii_mediachange: bad media 0x%x",
11360 ife->ifm_media);
11361 }
11362 }
11363 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11364 CSR_WRITE_FLUSH(sc);
11365
11366 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11367 wm_serdes_mediachange(ifp);
11368
11369 if (sc->sc_type <= WM_T_82543)
11370 wm_gmii_reset(sc);
11371 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11372 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11373 /* allow time for SFP cage time to power up phy */
11374 delay(300 * 1000);
11375 wm_gmii_reset(sc);
11376 }
11377
11378 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11379 return 0;
11380 return rc;
11381 }
11382
11383 /*
11384 * wm_gmii_mediastatus: [ifmedia interface function]
11385 *
11386 * Get the current interface media status on a 1000BASE-T device.
11387 */
11388 static void
11389 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11390 {
11391 struct wm_softc *sc = ifp->if_softc;
11392
11393 KASSERT(WM_CORE_LOCKED(sc));
11394
11395 ether_mediastatus(ifp, ifmr);
11396 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11397 | sc->sc_flowflags;
11398 }
11399
11400 #define MDI_IO CTRL_SWDPIN(2)
11401 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11402 #define MDI_CLK CTRL_SWDPIN(3)
11403
11404 static void
11405 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11406 {
11407 uint32_t i, v;
11408
11409 v = CSR_READ(sc, WMREG_CTRL);
11410 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11411 v |= MDI_DIR | CTRL_SWDPIO(3);
11412
11413 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11414 if (data & i)
11415 v |= MDI_IO;
11416 else
11417 v &= ~MDI_IO;
11418 CSR_WRITE(sc, WMREG_CTRL, v);
11419 CSR_WRITE_FLUSH(sc);
11420 delay(10);
11421 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11422 CSR_WRITE_FLUSH(sc);
11423 delay(10);
11424 CSR_WRITE(sc, WMREG_CTRL, v);
11425 CSR_WRITE_FLUSH(sc);
11426 delay(10);
11427 }
11428 }
11429
11430 static uint16_t
11431 wm_i82543_mii_recvbits(struct wm_softc *sc)
11432 {
11433 uint32_t v, i;
11434 uint16_t data = 0;
11435
11436 v = CSR_READ(sc, WMREG_CTRL);
11437 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11438 v |= CTRL_SWDPIO(3);
11439
11440 CSR_WRITE(sc, WMREG_CTRL, v);
11441 CSR_WRITE_FLUSH(sc);
11442 delay(10);
11443 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11444 CSR_WRITE_FLUSH(sc);
11445 delay(10);
11446 CSR_WRITE(sc, WMREG_CTRL, v);
11447 CSR_WRITE_FLUSH(sc);
11448 delay(10);
11449
11450 for (i = 0; i < 16; i++) {
11451 data <<= 1;
11452 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11453 CSR_WRITE_FLUSH(sc);
11454 delay(10);
11455 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11456 data |= 1;
11457 CSR_WRITE(sc, WMREG_CTRL, v);
11458 CSR_WRITE_FLUSH(sc);
11459 delay(10);
11460 }
11461
11462 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11463 CSR_WRITE_FLUSH(sc);
11464 delay(10);
11465 CSR_WRITE(sc, WMREG_CTRL, v);
11466 CSR_WRITE_FLUSH(sc);
11467 delay(10);
11468
11469 return data;
11470 }
11471
11472 #undef MDI_IO
11473 #undef MDI_DIR
11474 #undef MDI_CLK
11475
11476 /*
11477 * wm_gmii_i82543_readreg: [mii interface function]
11478 *
11479 * Read a PHY register on the GMII (i82543 version).
11480 */
11481 static int
11482 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11483 {
11484 struct wm_softc *sc = device_private(dev);
11485
11486 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11487 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11488 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11489 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
11490
11491 DPRINTF(sc, WM_DEBUG_GMII,
11492 ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11493 device_xname(dev), phy, reg, *val));
11494
11495 return 0;
11496 }
11497
11498 /*
11499 * wm_gmii_i82543_writereg: [mii interface function]
11500 *
11501 * Write a PHY register on the GMII (i82543 version).
11502 */
11503 static int
11504 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11505 {
11506 struct wm_softc *sc = device_private(dev);
11507
11508 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11509 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11510 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11511 (MII_COMMAND_START << 30), 32);
11512
11513 return 0;
11514 }
11515
11516 /*
11517 * wm_gmii_mdic_readreg: [mii interface function]
11518 *
11519 * Read a PHY register on the GMII.
11520 */
11521 static int
11522 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11523 {
11524 struct wm_softc *sc = device_private(dev);
11525 uint32_t mdic = 0;
11526 int i;
11527
11528 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11529 && (reg > MII_ADDRMASK)) {
11530 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11531 __func__, sc->sc_phytype, reg);
11532 reg &= MII_ADDRMASK;
11533 }
11534
11535 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11536 MDIC_REGADD(reg));
11537
11538 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11539 delay(50);
11540 mdic = CSR_READ(sc, WMREG_MDIC);
11541 if (mdic & MDIC_READY)
11542 break;
11543 }
11544
11545 if ((mdic & MDIC_READY) == 0) {
11546 DPRINTF(sc, WM_DEBUG_GMII,
11547 ("%s: MDIC read timed out: phy %d reg %d\n",
11548 device_xname(dev), phy, reg));
11549 return ETIMEDOUT;
11550 } else if (mdic & MDIC_E) {
11551 /* This is normal if no PHY is present. */
11552 DPRINTF(sc, WM_DEBUG_GMII,
11553 ("%s: MDIC read error: phy %d reg %d\n",
11554 device_xname(sc->sc_dev), phy, reg));
11555 return -1;
11556 } else
11557 *val = MDIC_DATA(mdic);
11558
11559 /*
11560 * Allow some time after each MDIC transaction to avoid
11561 * reading duplicate data in the next MDIC transaction.
11562 */
11563 if (sc->sc_type == WM_T_PCH2)
11564 delay(100);
11565
11566 return 0;
11567 }
11568
11569 /*
11570 * wm_gmii_mdic_writereg: [mii interface function]
11571 *
11572 * Write a PHY register on the GMII.
11573 */
11574 static int
11575 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11576 {
11577 struct wm_softc *sc = device_private(dev);
11578 uint32_t mdic = 0;
11579 int i;
11580
11581 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11582 && (reg > MII_ADDRMASK)) {
11583 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11584 __func__, sc->sc_phytype, reg);
11585 reg &= MII_ADDRMASK;
11586 }
11587
11588 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11589 MDIC_REGADD(reg) | MDIC_DATA(val));
11590
11591 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11592 delay(50);
11593 mdic = CSR_READ(sc, WMREG_MDIC);
11594 if (mdic & MDIC_READY)
11595 break;
11596 }
11597
11598 if ((mdic & MDIC_READY) == 0) {
11599 DPRINTF(sc, WM_DEBUG_GMII,
11600 ("%s: MDIC write timed out: phy %d reg %d\n",
11601 device_xname(dev), phy, reg));
11602 return ETIMEDOUT;
11603 } else if (mdic & MDIC_E) {
11604 DPRINTF(sc, WM_DEBUG_GMII,
11605 ("%s: MDIC write error: phy %d reg %d\n",
11606 device_xname(dev), phy, reg));
11607 return -1;
11608 }
11609
11610 /*
11611 * Allow some time after each MDIC transaction to avoid
11612 * reading duplicate data in the next MDIC transaction.
11613 */
11614 if (sc->sc_type == WM_T_PCH2)
11615 delay(100);
11616
11617 return 0;
11618 }
11619
11620 /*
11621 * wm_gmii_i82544_readreg: [mii interface function]
11622 *
11623 * Read a PHY register on the GMII.
11624 */
11625 static int
11626 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11627 {
11628 struct wm_softc *sc = device_private(dev);
11629 int rv;
11630
11631 rv = sc->phy.acquire(sc);
11632 if (rv != 0) {
11633 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11634 return rv;
11635 }
11636
11637 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11638
11639 sc->phy.release(sc);
11640
11641 return rv;
11642 }
11643
11644 static int
11645 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11646 {
11647 struct wm_softc *sc = device_private(dev);
11648 int rv;
11649
11650 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11651 switch (sc->sc_phytype) {
11652 case WMPHY_IGP:
11653 case WMPHY_IGP_2:
11654 case WMPHY_IGP_3:
11655 rv = wm_gmii_mdic_writereg(dev, phy,
11656 IGPHY_PAGE_SELECT, reg);
11657 if (rv != 0)
11658 return rv;
11659 break;
11660 default:
11661 #ifdef WM_DEBUG
11662 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11663 __func__, sc->sc_phytype, reg);
11664 #endif
11665 break;
11666 }
11667 }
11668
11669 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11670 }
11671
11672 /*
11673 * wm_gmii_i82544_writereg: [mii interface function]
11674 *
11675 * Write a PHY register on the GMII.
11676 */
11677 static int
11678 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11679 {
11680 struct wm_softc *sc = device_private(dev);
11681 int rv;
11682
11683 rv = sc->phy.acquire(sc);
11684 if (rv != 0) {
11685 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11686 return rv;
11687 }
11688
11689 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11690 sc->phy.release(sc);
11691
11692 return rv;
11693 }
11694
11695 static int
11696 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11697 {
11698 struct wm_softc *sc = device_private(dev);
11699 int rv;
11700
11701 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11702 switch (sc->sc_phytype) {
11703 case WMPHY_IGP:
11704 case WMPHY_IGP_2:
11705 case WMPHY_IGP_3:
11706 rv = wm_gmii_mdic_writereg(dev, phy,
11707 IGPHY_PAGE_SELECT, reg);
11708 if (rv != 0)
11709 return rv;
11710 break;
11711 default:
11712 #ifdef WM_DEBUG
11713 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11714 __func__, sc->sc_phytype, reg);
11715 #endif
11716 break;
11717 }
11718 }
11719
11720 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11721 }
11722
11723 /*
11724 * wm_gmii_i80003_readreg: [mii interface function]
11725 *
11726 * Read a PHY register on the kumeran
11727 * This could be handled by the PHY layer if we didn't have to lock the
11728 * resource ...
11729 */
11730 static int
11731 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11732 {
11733 struct wm_softc *sc = device_private(dev);
11734 int page_select;
11735 uint16_t temp, temp2;
11736 int rv;
11737
11738 if (phy != 1) /* Only one PHY on kumeran bus */
11739 return -1;
11740
11741 rv = sc->phy.acquire(sc);
11742 if (rv != 0) {
11743 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11744 return rv;
11745 }
11746
11747 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11748 page_select = GG82563_PHY_PAGE_SELECT;
11749 else {
11750 /*
11751 * Use Alternative Page Select register to access registers
11752 * 30 and 31.
11753 */
11754 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11755 }
11756 temp = reg >> GG82563_PAGE_SHIFT;
11757 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11758 goto out;
11759
11760 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11761 /*
11762 * Wait more 200us for a bug of the ready bit in the MDIC
11763 * register.
11764 */
11765 delay(200);
11766 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11767 if ((rv != 0) || (temp2 != temp)) {
11768 device_printf(dev, "%s failed\n", __func__);
11769 rv = -1;
11770 goto out;
11771 }
11772 delay(200);
11773 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11774 delay(200);
11775 } else
11776 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11777
11778 out:
11779 sc->phy.release(sc);
11780 return rv;
11781 }
11782
11783 /*
11784 * wm_gmii_i80003_writereg: [mii interface function]
11785 *
11786 * Write a PHY register on the kumeran.
11787 * This could be handled by the PHY layer if we didn't have to lock the
11788 * resource ...
11789 */
11790 static int
11791 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11792 {
11793 struct wm_softc *sc = device_private(dev);
11794 int page_select, rv;
11795 uint16_t temp, temp2;
11796
11797 if (phy != 1) /* Only one PHY on kumeran bus */
11798 return -1;
11799
11800 rv = sc->phy.acquire(sc);
11801 if (rv != 0) {
11802 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11803 return rv;
11804 }
11805
11806 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11807 page_select = GG82563_PHY_PAGE_SELECT;
11808 else {
11809 /*
11810 * Use Alternative Page Select register to access registers
11811 * 30 and 31.
11812 */
11813 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11814 }
11815 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11816 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11817 goto out;
11818
11819 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11820 /*
11821 * Wait more 200us for a bug of the ready bit in the MDIC
11822 * register.
11823 */
11824 delay(200);
11825 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11826 if ((rv != 0) || (temp2 != temp)) {
11827 device_printf(dev, "%s failed\n", __func__);
11828 rv = -1;
11829 goto out;
11830 }
11831 delay(200);
11832 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11833 delay(200);
11834 } else
11835 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11836
11837 out:
11838 sc->phy.release(sc);
11839 return rv;
11840 }
11841
11842 /*
11843 * wm_gmii_bm_readreg: [mii interface function]
11844 *
11845 * Read a PHY register on the kumeran
11846 * This could be handled by the PHY layer if we didn't have to lock the
11847 * resource ...
11848 */
11849 static int
11850 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11851 {
11852 struct wm_softc *sc = device_private(dev);
11853 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11854 int rv;
11855
11856 rv = sc->phy.acquire(sc);
11857 if (rv != 0) {
11858 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11859 return rv;
11860 }
11861
11862 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11863 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11864 || (reg == 31)) ? 1 : phy;
11865 /* Page 800 works differently than the rest so it has its own func */
11866 if (page == BM_WUC_PAGE) {
11867 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11868 goto release;
11869 }
11870
11871 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11872 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11873 && (sc->sc_type != WM_T_82583))
11874 rv = wm_gmii_mdic_writereg(dev, phy,
11875 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11876 else
11877 rv = wm_gmii_mdic_writereg(dev, phy,
11878 BME1000_PHY_PAGE_SELECT, page);
11879 if (rv != 0)
11880 goto release;
11881 }
11882
11883 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11884
11885 release:
11886 sc->phy.release(sc);
11887 return rv;
11888 }
11889
11890 /*
11891 * wm_gmii_bm_writereg: [mii interface function]
11892 *
11893 * Write a PHY register on the kumeran.
11894 * This could be handled by the PHY layer if we didn't have to lock the
11895 * resource ...
11896 */
11897 static int
11898 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11899 {
11900 struct wm_softc *sc = device_private(dev);
11901 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11902 int rv;
11903
11904 rv = sc->phy.acquire(sc);
11905 if (rv != 0) {
11906 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11907 return rv;
11908 }
11909
11910 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11911 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11912 || (reg == 31)) ? 1 : phy;
11913 /* Page 800 works differently than the rest so it has its own func */
11914 if (page == BM_WUC_PAGE) {
11915 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11916 goto release;
11917 }
11918
11919 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11920 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11921 && (sc->sc_type != WM_T_82583))
11922 rv = wm_gmii_mdic_writereg(dev, phy,
11923 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11924 else
11925 rv = wm_gmii_mdic_writereg(dev, phy,
11926 BME1000_PHY_PAGE_SELECT, page);
11927 if (rv != 0)
11928 goto release;
11929 }
11930
11931 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11932
11933 release:
11934 sc->phy.release(sc);
11935 return rv;
11936 }
11937
11938 /*
11939 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11940 * @dev: pointer to the HW structure
11941 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11942 *
11943 * Assumes semaphore already acquired and phy_reg points to a valid memory
11944 * address to store contents of the BM_WUC_ENABLE_REG register.
11945 */
11946 static int
11947 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11948 {
11949 #ifdef WM_DEBUG
11950 struct wm_softc *sc = device_private(dev);
11951 #endif
11952 uint16_t temp;
11953 int rv;
11954
11955 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11956 device_xname(dev), __func__));
11957
11958 if (!phy_regp)
11959 return -1;
11960
11961 /* All page select, port ctrl and wakeup registers use phy address 1 */
11962
11963 /* Select Port Control Registers page */
11964 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11965 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11966 if (rv != 0)
11967 return rv;
11968
11969 /* Read WUCE and save it */
11970 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11971 if (rv != 0)
11972 return rv;
11973
11974 /* Enable both PHY wakeup mode and Wakeup register page writes.
11975 * Prevent a power state change by disabling ME and Host PHY wakeup.
11976 */
11977 temp = *phy_regp;
11978 temp |= BM_WUC_ENABLE_BIT;
11979 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11980
11981 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11982 return rv;
11983
11984 /* Select Host Wakeup Registers page - caller now able to write
11985 * registers on the Wakeup registers page
11986 */
11987 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11988 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
11989 }
11990
11991 /*
11992 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
11993 * @dev: pointer to the HW structure
11994 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
11995 *
11996 * Restore BM_WUC_ENABLE_REG to its original value.
11997 *
11998 * Assumes semaphore already acquired and *phy_reg is the contents of the
11999 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12000 * caller.
12001 */
12002 static int
12003 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12004 {
12005 #ifdef WM_DEBUG
12006 struct wm_softc *sc = device_private(dev);
12007 #endif
12008
12009 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12010 device_xname(dev), __func__));
12011
12012 if (!phy_regp)
12013 return -1;
12014
12015 /* Select Port Control Registers page */
12016 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12017 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12018
12019 /* Restore 769.17 to its original value */
12020 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12021
12022 return 0;
12023 }
12024
12025 /*
12026 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12027 * @sc: pointer to the HW structure
12028 * @offset: register offset to be read or written
12029 * @val: pointer to the data to read or write
12030 * @rd: determines if operation is read or write
12031 * @page_set: BM_WUC_PAGE already set and access enabled
12032 *
12033 * Read the PHY register at offset and store the retrieved information in
12034 * data, or write data to PHY register at offset. Note the procedure to
12035 * access the PHY wakeup registers is different than reading the other PHY
12036 * registers. It works as such:
12037 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12038 * 2) Set page to 800 for host (801 if we were manageability)
12039 * 3) Write the address using the address opcode (0x11)
12040 * 4) Read or write the data using the data opcode (0x12)
12041 * 5) Restore 769.17.2 to its original value
12042 *
12043 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12044 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12045 *
12046 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
12047 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12048 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12049 */
12050 static int
12051 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12052 bool page_set)
12053 {
12054 struct wm_softc *sc = device_private(dev);
12055 uint16_t regnum = BM_PHY_REG_NUM(offset);
12056 uint16_t page = BM_PHY_REG_PAGE(offset);
12057 uint16_t wuce;
12058 int rv = 0;
12059
12060 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12061 device_xname(dev), __func__));
12062 /* XXX Gig must be disabled for MDIO accesses to page 800 */
12063 if ((sc->sc_type == WM_T_PCH)
12064 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12065 device_printf(dev,
12066 "Attempting to access page %d while gig enabled.\n", page);
12067 }
12068
12069 if (!page_set) {
12070 /* Enable access to PHY wakeup registers */
12071 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12072 if (rv != 0) {
12073 device_printf(dev,
12074 "%s: Could not enable PHY wakeup reg access\n",
12075 __func__);
12076 return rv;
12077 }
12078 }
12079 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12080 device_xname(sc->sc_dev), __func__, page, regnum));
12081
12082 /*
12083 * 2) Access PHY wakeup register.
12084 * See wm_access_phy_wakeup_reg_bm.
12085 */
12086
12087 /* Write the Wakeup register page offset value using opcode 0x11 */
12088 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12089 if (rv != 0)
12090 return rv;
12091
12092 if (rd) {
12093 /* Read the Wakeup register page value using opcode 0x12 */
12094 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12095 } else {
12096 /* Write the Wakeup register page value using opcode 0x12 */
12097 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12098 }
12099 if (rv != 0)
12100 return rv;
12101
12102 if (!page_set)
12103 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12104
12105 return rv;
12106 }
12107
12108 /*
12109 * wm_gmii_hv_readreg: [mii interface function]
12110 *
12111 * Read a PHY register on the kumeran
12112 * This could be handled by the PHY layer if we didn't have to lock the
12113 * resource ...
12114 */
12115 static int
12116 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12117 {
12118 struct wm_softc *sc = device_private(dev);
12119 int rv;
12120
12121 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12122 device_xname(dev), __func__));
12123
12124 rv = sc->phy.acquire(sc);
12125 if (rv != 0) {
12126 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12127 return rv;
12128 }
12129
12130 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12131 sc->phy.release(sc);
12132 return rv;
12133 }
12134
12135 static int
12136 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12137 {
12138 uint16_t page = BM_PHY_REG_PAGE(reg);
12139 uint16_t regnum = BM_PHY_REG_NUM(reg);
12140 int rv;
12141
12142 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12143
12144 /* Page 800 works differently than the rest so it has its own func */
12145 if (page == BM_WUC_PAGE)
12146 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12147
12148 /*
12149 * Lower than page 768 works differently than the rest so it has its
12150 * own func
12151 */
12152 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12153 device_printf(dev, "gmii_hv_readreg!!!\n");
12154 return -1;
12155 }
12156
12157 /*
12158 * XXX I21[789] documents say that the SMBus Address register is at
12159 * PHY address 01, Page 0 (not 768), Register 26.
12160 */
12161 if (page == HV_INTC_FC_PAGE_START)
12162 page = 0;
12163
12164 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12165 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12166 page << BME1000_PAGE_SHIFT);
12167 if (rv != 0)
12168 return rv;
12169 }
12170
12171 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12172 }
12173
12174 /*
12175 * wm_gmii_hv_writereg: [mii interface function]
12176 *
12177 * Write a PHY register on the kumeran.
12178 * This could be handled by the PHY layer if we didn't have to lock the
12179 * resource ...
12180 */
12181 static int
12182 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12183 {
12184 struct wm_softc *sc = device_private(dev);
12185 int rv;
12186
12187 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12188 device_xname(dev), __func__));
12189
12190 rv = sc->phy.acquire(sc);
12191 if (rv != 0) {
12192 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12193 return rv;
12194 }
12195
12196 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12197 sc->phy.release(sc);
12198
12199 return rv;
12200 }
12201
12202 static int
12203 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12204 {
12205 struct wm_softc *sc = device_private(dev);
12206 uint16_t page = BM_PHY_REG_PAGE(reg);
12207 uint16_t regnum = BM_PHY_REG_NUM(reg);
12208 int rv;
12209
12210 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12211
12212 /* Page 800 works differently than the rest so it has its own func */
12213 if (page == BM_WUC_PAGE)
12214 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12215 false);
12216
12217 /*
12218 * Lower than page 768 works differently than the rest so it has its
12219 * own func
12220 */
12221 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12222 device_printf(dev, "gmii_hv_writereg!!!\n");
12223 return -1;
12224 }
12225
12226 {
12227 /*
12228 * XXX I21[789] documents say that the SMBus Address register
12229 * is at PHY address 01, Page 0 (not 768), Register 26.
12230 */
12231 if (page == HV_INTC_FC_PAGE_START)
12232 page = 0;
12233
12234 /*
12235 * XXX Workaround MDIO accesses being disabled after entering
12236 * IEEE Power Down (whenever bit 11 of the PHY control
12237 * register is set)
12238 */
12239 if (sc->sc_phytype == WMPHY_82578) {
12240 struct mii_softc *child;
12241
12242 child = LIST_FIRST(&sc->sc_mii.mii_phys);
12243 if ((child != NULL) && (child->mii_mpd_rev >= 1)
12244 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12245 && ((val & (1 << 11)) != 0)) {
12246 device_printf(dev, "XXX need workaround\n");
12247 }
12248 }
12249
12250 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12251 rv = wm_gmii_mdic_writereg(dev, 1,
12252 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12253 if (rv != 0)
12254 return rv;
12255 }
12256 }
12257
12258 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12259 }
12260
12261 /*
12262 * wm_gmii_82580_readreg: [mii interface function]
12263 *
12264 * Read a PHY register on the 82580 and I350.
12265 * This could be handled by the PHY layer if we didn't have to lock the
12266 * resource ...
12267 */
12268 static int
12269 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12270 {
12271 struct wm_softc *sc = device_private(dev);
12272 int rv;
12273
12274 rv = sc->phy.acquire(sc);
12275 if (rv != 0) {
12276 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12277 return rv;
12278 }
12279
12280 #ifdef DIAGNOSTIC
12281 if (reg > MII_ADDRMASK) {
12282 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12283 __func__, sc->sc_phytype, reg);
12284 reg &= MII_ADDRMASK;
12285 }
12286 #endif
12287 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12288
12289 sc->phy.release(sc);
12290 return rv;
12291 }
12292
12293 /*
12294 * wm_gmii_82580_writereg: [mii interface function]
12295 *
12296 * Write a PHY register on the 82580 and I350.
12297 * This could be handled by the PHY layer if we didn't have to lock the
12298 * resource ...
12299 */
12300 static int
12301 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12302 {
12303 struct wm_softc *sc = device_private(dev);
12304 int rv;
12305
12306 rv = sc->phy.acquire(sc);
12307 if (rv != 0) {
12308 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12309 return rv;
12310 }
12311
12312 #ifdef DIAGNOSTIC
12313 if (reg > MII_ADDRMASK) {
12314 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12315 __func__, sc->sc_phytype, reg);
12316 reg &= MII_ADDRMASK;
12317 }
12318 #endif
12319 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12320
12321 sc->phy.release(sc);
12322 return rv;
12323 }
12324
12325 /*
12326 * wm_gmii_gs40g_readreg: [mii interface function]
12327 *
12328 * Read a PHY register on the I2100 and I211.
12329 * This could be handled by the PHY layer if we didn't have to lock the
12330 * resource ...
12331 */
12332 static int
12333 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12334 {
12335 struct wm_softc *sc = device_private(dev);
12336 int page, offset;
12337 int rv;
12338
12339 /* Acquire semaphore */
12340 rv = sc->phy.acquire(sc);
12341 if (rv != 0) {
12342 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12343 return rv;
12344 }
12345
12346 /* Page select */
12347 page = reg >> GS40G_PAGE_SHIFT;
12348 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12349 if (rv != 0)
12350 goto release;
12351
12352 /* Read reg */
12353 offset = reg & GS40G_OFFSET_MASK;
12354 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12355
12356 release:
12357 sc->phy.release(sc);
12358 return rv;
12359 }
12360
12361 /*
12362 * wm_gmii_gs40g_writereg: [mii interface function]
12363 *
12364 * Write a PHY register on the I210 and I211.
12365 * This could be handled by the PHY layer if we didn't have to lock the
12366 * resource ...
12367 */
12368 static int
12369 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
12370 {
12371 struct wm_softc *sc = device_private(dev);
12372 uint16_t page;
12373 int offset, rv;
12374
12375 /* Acquire semaphore */
12376 rv = sc->phy.acquire(sc);
12377 if (rv != 0) {
12378 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12379 return rv;
12380 }
12381
12382 /* Page select */
12383 page = reg >> GS40G_PAGE_SHIFT;
12384 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12385 if (rv != 0)
12386 goto release;
12387
12388 /* Write reg */
12389 offset = reg & GS40G_OFFSET_MASK;
12390 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
12391
12392 release:
12393 /* Release semaphore */
12394 sc->phy.release(sc);
12395 return rv;
12396 }
12397
12398 /*
12399 * wm_gmii_statchg: [mii interface function]
12400 *
12401 * Callback from MII layer when media changes.
12402 */
12403 static void
12404 wm_gmii_statchg(struct ifnet *ifp)
12405 {
12406 struct wm_softc *sc = ifp->if_softc;
12407 struct mii_data *mii = &sc->sc_mii;
12408
12409 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12410 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12411 sc->sc_fcrtl &= ~FCRTL_XONE;
12412
12413 /* Get flow control negotiation result. */
12414 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12415 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12416 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12417 mii->mii_media_active &= ~IFM_ETH_FMASK;
12418 }
12419
12420 if (sc->sc_flowflags & IFM_FLOW) {
12421 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12422 sc->sc_ctrl |= CTRL_TFCE;
12423 sc->sc_fcrtl |= FCRTL_XONE;
12424 }
12425 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12426 sc->sc_ctrl |= CTRL_RFCE;
12427 }
12428
12429 if (mii->mii_media_active & IFM_FDX) {
12430 DPRINTF(sc, WM_DEBUG_LINK,
12431 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12432 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12433 } else {
12434 DPRINTF(sc, WM_DEBUG_LINK,
12435 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12436 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12437 }
12438
12439 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12440 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12441 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
12442 : WMREG_FCRTL, sc->sc_fcrtl);
12443 if (sc->sc_type == WM_T_80003) {
12444 switch (IFM_SUBTYPE(mii->mii_media_active)) {
12445 case IFM_1000_T:
12446 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12447 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12448 sc->sc_tipg = TIPG_1000T_80003_DFLT;
12449 break;
12450 default:
12451 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12452 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12453 sc->sc_tipg = TIPG_10_100_80003_DFLT;
12454 break;
12455 }
12456 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12457 }
12458 }
12459
12460 /* kumeran related (80003, ICH* and PCH*) */
12461
12462 /*
12463 * wm_kmrn_readreg:
12464 *
12465 * Read a kumeran register
12466 */
12467 static int
12468 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12469 {
12470 int rv;
12471
12472 if (sc->sc_type == WM_T_80003)
12473 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12474 else
12475 rv = sc->phy.acquire(sc);
12476 if (rv != 0) {
12477 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12478 __func__);
12479 return rv;
12480 }
12481
12482 rv = wm_kmrn_readreg_locked(sc, reg, val);
12483
12484 if (sc->sc_type == WM_T_80003)
12485 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12486 else
12487 sc->phy.release(sc);
12488
12489 return rv;
12490 }
12491
12492 static int
12493 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12494 {
12495
12496 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12497 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12498 KUMCTRLSTA_REN);
12499 CSR_WRITE_FLUSH(sc);
12500 delay(2);
12501
12502 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12503
12504 return 0;
12505 }
12506
12507 /*
12508 * wm_kmrn_writereg:
12509 *
12510 * Write a kumeran register
12511 */
12512 static int
12513 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12514 {
12515 int rv;
12516
12517 if (sc->sc_type == WM_T_80003)
12518 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12519 else
12520 rv = sc->phy.acquire(sc);
12521 if (rv != 0) {
12522 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12523 __func__);
12524 return rv;
12525 }
12526
12527 rv = wm_kmrn_writereg_locked(sc, reg, val);
12528
12529 if (sc->sc_type == WM_T_80003)
12530 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12531 else
12532 sc->phy.release(sc);
12533
12534 return rv;
12535 }
12536
12537 static int
12538 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12539 {
12540
12541 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12542 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12543
12544 return 0;
12545 }
12546
12547 /*
12548 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12549 * This access method is different from IEEE MMD.
12550 */
12551 static int
12552 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12553 {
12554 struct wm_softc *sc = device_private(dev);
12555 int rv;
12556
12557 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12558 if (rv != 0)
12559 return rv;
12560
12561 if (rd)
12562 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12563 else
12564 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12565 return rv;
12566 }
12567
12568 static int
12569 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12570 {
12571
12572 return wm_access_emi_reg_locked(dev, reg, val, true);
12573 }
12574
12575 static int
12576 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12577 {
12578
12579 return wm_access_emi_reg_locked(dev, reg, &val, false);
12580 }
12581
12582 /* SGMII related */
12583
12584 /*
12585 * wm_sgmii_uses_mdio
12586 *
12587 * Check whether the transaction is to the internal PHY or the external
12588 * MDIO interface. Return true if it's MDIO.
12589 */
12590 static bool
12591 wm_sgmii_uses_mdio(struct wm_softc *sc)
12592 {
12593 uint32_t reg;
12594 bool ismdio = false;
12595
12596 switch (sc->sc_type) {
12597 case WM_T_82575:
12598 case WM_T_82576:
12599 reg = CSR_READ(sc, WMREG_MDIC);
12600 ismdio = ((reg & MDIC_DEST) != 0);
12601 break;
12602 case WM_T_82580:
12603 case WM_T_I350:
12604 case WM_T_I354:
12605 case WM_T_I210:
12606 case WM_T_I211:
12607 reg = CSR_READ(sc, WMREG_MDICNFG);
12608 ismdio = ((reg & MDICNFG_DEST) != 0);
12609 break;
12610 default:
12611 break;
12612 }
12613
12614 return ismdio;
12615 }
12616
12617 /* Setup internal SGMII PHY for SFP */
12618 static void
12619 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12620 {
12621 uint16_t id1, id2, phyreg;
12622 int i, rv;
12623
12624 if (((sc->sc_flags & WM_F_SGMII) == 0)
12625 || ((sc->sc_flags & WM_F_SFP) == 0))
12626 return;
12627
12628 for (i = 0; i < MII_NPHY; i++) {
12629 sc->phy.no_errprint = true;
12630 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12631 if (rv != 0)
12632 continue;
12633 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12634 if (rv != 0)
12635 continue;
12636 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12637 continue;
12638 sc->phy.no_errprint = false;
12639
12640 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12641 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12642 phyreg |= ESSR_SGMII_WOC_COPPER;
12643 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12644 break;
12645 }
12646
12647 }
12648
12649 /*
12650 * wm_sgmii_readreg: [mii interface function]
12651 *
12652 * Read a PHY register on the SGMII
12653 * This could be handled by the PHY layer if we didn't have to lock the
12654 * resource ...
12655 */
12656 static int
12657 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12658 {
12659 struct wm_softc *sc = device_private(dev);
12660 int rv;
12661
12662 rv = sc->phy.acquire(sc);
12663 if (rv != 0) {
12664 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12665 return rv;
12666 }
12667
12668 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12669
12670 sc->phy.release(sc);
12671 return rv;
12672 }
12673
12674 static int
12675 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12676 {
12677 struct wm_softc *sc = device_private(dev);
12678 uint32_t i2ccmd;
12679 int i, rv = 0;
12680
12681 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12682 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12683 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12684
12685 /* Poll the ready bit */
12686 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12687 delay(50);
12688 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12689 if (i2ccmd & I2CCMD_READY)
12690 break;
12691 }
12692 if ((i2ccmd & I2CCMD_READY) == 0) {
12693 device_printf(dev, "I2CCMD Read did not complete\n");
12694 rv = ETIMEDOUT;
12695 }
12696 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12697 if (!sc->phy.no_errprint)
12698 device_printf(dev, "I2CCMD Error bit set\n");
12699 rv = EIO;
12700 }
12701
12702 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12703
12704 return rv;
12705 }
12706
12707 /*
12708 * wm_sgmii_writereg: [mii interface function]
12709 *
12710 * Write a PHY register on the SGMII.
12711 * This could be handled by the PHY layer if we didn't have to lock the
12712 * resource ...
12713 */
12714 static int
12715 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12716 {
12717 struct wm_softc *sc = device_private(dev);
12718 int rv;
12719
12720 rv = sc->phy.acquire(sc);
12721 if (rv != 0) {
12722 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12723 return rv;
12724 }
12725
12726 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12727
12728 sc->phy.release(sc);
12729
12730 return rv;
12731 }
12732
12733 static int
12734 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12735 {
12736 struct wm_softc *sc = device_private(dev);
12737 uint32_t i2ccmd;
12738 uint16_t swapdata;
12739 int rv = 0;
12740 int i;
12741
12742 /* Swap the data bytes for the I2C interface */
12743 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12744 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12745 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12746 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12747
12748 /* Poll the ready bit */
12749 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12750 delay(50);
12751 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12752 if (i2ccmd & I2CCMD_READY)
12753 break;
12754 }
12755 if ((i2ccmd & I2CCMD_READY) == 0) {
12756 device_printf(dev, "I2CCMD Write did not complete\n");
12757 rv = ETIMEDOUT;
12758 }
12759 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12760 device_printf(dev, "I2CCMD Error bit set\n");
12761 rv = EIO;
12762 }
12763
12764 return rv;
12765 }
12766
12767 /* TBI related */
12768
12769 static bool
12770 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12771 {
12772 bool sig;
12773
12774 sig = ctrl & CTRL_SWDPIN(1);
12775
12776 /*
12777 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12778 * detect a signal, 1 if they don't.
12779 */
12780 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12781 sig = !sig;
12782
12783 return sig;
12784 }
12785
12786 /*
12787 * wm_tbi_mediainit:
12788 *
12789 * Initialize media for use on 1000BASE-X devices.
12790 */
12791 static void
12792 wm_tbi_mediainit(struct wm_softc *sc)
12793 {
12794 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12795 const char *sep = "";
12796
12797 if (sc->sc_type < WM_T_82543)
12798 sc->sc_tipg = TIPG_WM_DFLT;
12799 else
12800 sc->sc_tipg = TIPG_LG_DFLT;
12801
12802 sc->sc_tbi_serdes_anegticks = 5;
12803
12804 /* Initialize our media structures */
12805 sc->sc_mii.mii_ifp = ifp;
12806 sc->sc_ethercom.ec_mii = &sc->sc_mii;
12807
12808 ifp->if_baudrate = IF_Gbps(1);
12809 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12810 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12811 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12812 wm_serdes_mediachange, wm_serdes_mediastatus,
12813 sc->sc_core_lock);
12814 } else {
12815 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12816 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12817 }
12818
12819 /*
12820 * SWD Pins:
12821 *
12822 * 0 = Link LED (output)
12823 * 1 = Loss Of Signal (input)
12824 */
12825 sc->sc_ctrl |= CTRL_SWDPIO(0);
12826
12827 /* XXX Perhaps this is only for TBI */
12828 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12829 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12830
12831 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12832 sc->sc_ctrl &= ~CTRL_LRST;
12833
12834 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12835
12836 #define ADD(ss, mm, dd) \
12837 do { \
12838 aprint_normal("%s%s", sep, ss); \
12839 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12840 sep = ", "; \
12841 } while (/*CONSTCOND*/0)
12842
12843 aprint_normal_dev(sc->sc_dev, "");
12844
12845 if (sc->sc_type == WM_T_I354) {
12846 uint32_t status;
12847
12848 status = CSR_READ(sc, WMREG_STATUS);
12849 if (((status & STATUS_2P5_SKU) != 0)
12850 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12851 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12852 } else
12853 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12854 } else if (sc->sc_type == WM_T_82545) {
12855 /* Only 82545 is LX (XXX except SFP) */
12856 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12857 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12858 } else if (sc->sc_sfptype != 0) {
12859 /* XXX wm(4) fiber/serdes don't use ifm_data */
12860 switch (sc->sc_sfptype) {
12861 default:
12862 case SFF_SFP_ETH_FLAGS_1000SX:
12863 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12864 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12865 break;
12866 case SFF_SFP_ETH_FLAGS_1000LX:
12867 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12868 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12869 break;
12870 case SFF_SFP_ETH_FLAGS_1000CX:
12871 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12872 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12873 break;
12874 case SFF_SFP_ETH_FLAGS_1000T:
12875 ADD("1000baseT", IFM_1000_T, 0);
12876 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12877 break;
12878 case SFF_SFP_ETH_FLAGS_100FX:
12879 ADD("100baseFX", IFM_100_FX, ANAR_TX);
12880 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12881 break;
12882 }
12883 } else {
12884 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12885 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12886 }
12887 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12888 aprint_normal("\n");
12889
12890 #undef ADD
12891
12892 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12893 }
12894
12895 /*
12896 * wm_tbi_mediachange: [ifmedia interface function]
12897 *
12898 * Set hardware to newly-selected media on a 1000BASE-X device.
12899 */
12900 static int
12901 wm_tbi_mediachange(struct ifnet *ifp)
12902 {
12903 struct wm_softc *sc = ifp->if_softc;
12904 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12905 uint32_t status, ctrl;
12906 bool signal;
12907 int i;
12908
12909 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12910 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12911 /* XXX need some work for >= 82571 and < 82575 */
12912 if (sc->sc_type < WM_T_82575)
12913 return 0;
12914 }
12915
12916 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12917 || (sc->sc_type >= WM_T_82575))
12918 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12919
12920 sc->sc_ctrl &= ~CTRL_LRST;
12921 sc->sc_txcw = TXCW_ANE;
12922 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12923 sc->sc_txcw |= TXCW_FD | TXCW_HD;
12924 else if (ife->ifm_media & IFM_FDX)
12925 sc->sc_txcw |= TXCW_FD;
12926 else
12927 sc->sc_txcw |= TXCW_HD;
12928
12929 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12930 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12931
12932 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12933 device_xname(sc->sc_dev), sc->sc_txcw));
12934 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12935 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12936 CSR_WRITE_FLUSH(sc);
12937 delay(1000);
12938
12939 ctrl = CSR_READ(sc, WMREG_CTRL);
12940 signal = wm_tbi_havesignal(sc, ctrl);
12941
12942 DPRINTF(sc, WM_DEBUG_LINK,
12943 ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
12944
12945 if (signal) {
12946 /* Have signal; wait for the link to come up. */
12947 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12948 delay(10000);
12949 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12950 break;
12951 }
12952
12953 DPRINTF(sc, WM_DEBUG_LINK,
12954 ("%s: i = %d after waiting for link\n",
12955 device_xname(sc->sc_dev), i));
12956
12957 status = CSR_READ(sc, WMREG_STATUS);
12958 DPRINTF(sc, WM_DEBUG_LINK,
12959 ("%s: status after final read = 0x%x, STATUS_LU = %#"
12960 __PRIxBIT "\n",
12961 device_xname(sc->sc_dev), status, STATUS_LU));
12962 if (status & STATUS_LU) {
12963 /* Link is up. */
12964 DPRINTF(sc, WM_DEBUG_LINK,
12965 ("%s: LINK: set media -> link up %s\n",
12966 device_xname(sc->sc_dev),
12967 (status & STATUS_FD) ? "FDX" : "HDX"));
12968
12969 /*
12970 * NOTE: CTRL will update TFCE and RFCE automatically,
12971 * so we should update sc->sc_ctrl
12972 */
12973 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
12974 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12975 sc->sc_fcrtl &= ~FCRTL_XONE;
12976 if (status & STATUS_FD)
12977 sc->sc_tctl |=
12978 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12979 else
12980 sc->sc_tctl |=
12981 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12982 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
12983 sc->sc_fcrtl |= FCRTL_XONE;
12984 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12985 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12986 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12987 sc->sc_tbi_linkup = 1;
12988 } else {
12989 if (i == WM_LINKUP_TIMEOUT)
12990 wm_check_for_link(sc);
12991 /* Link is down. */
12992 DPRINTF(sc, WM_DEBUG_LINK,
12993 ("%s: LINK: set media -> link down\n",
12994 device_xname(sc->sc_dev)));
12995 sc->sc_tbi_linkup = 0;
12996 }
12997 } else {
12998 DPRINTF(sc, WM_DEBUG_LINK,
12999 ("%s: LINK: set media -> no signal\n",
13000 device_xname(sc->sc_dev)));
13001 sc->sc_tbi_linkup = 0;
13002 }
13003
13004 wm_tbi_serdes_set_linkled(sc);
13005
13006 return 0;
13007 }
13008
13009 /*
13010 * wm_tbi_mediastatus: [ifmedia interface function]
13011 *
13012 * Get the current interface media status on a 1000BASE-X device.
13013 */
13014 static void
13015 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13016 {
13017 struct wm_softc *sc = ifp->if_softc;
13018 uint32_t ctrl, status;
13019
13020 ifmr->ifm_status = IFM_AVALID;
13021 ifmr->ifm_active = IFM_ETHER;
13022
13023 status = CSR_READ(sc, WMREG_STATUS);
13024 if ((status & STATUS_LU) == 0) {
13025 ifmr->ifm_active |= IFM_NONE;
13026 return;
13027 }
13028
13029 ifmr->ifm_status |= IFM_ACTIVE;
13030 /* Only 82545 is LX */
13031 if (sc->sc_type == WM_T_82545)
13032 ifmr->ifm_active |= IFM_1000_LX;
13033 else
13034 ifmr->ifm_active |= IFM_1000_SX;
13035 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13036 ifmr->ifm_active |= IFM_FDX;
13037 else
13038 ifmr->ifm_active |= IFM_HDX;
13039 ctrl = CSR_READ(sc, WMREG_CTRL);
13040 if (ctrl & CTRL_RFCE)
13041 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13042 if (ctrl & CTRL_TFCE)
13043 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13044 }
13045
13046 /* XXX TBI only */
13047 static int
13048 wm_check_for_link(struct wm_softc *sc)
13049 {
13050 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13051 uint32_t rxcw;
13052 uint32_t ctrl;
13053 uint32_t status;
13054 bool signal;
13055
13056 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13057 device_xname(sc->sc_dev), __func__));
13058
13059 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13060 /* XXX need some work for >= 82571 */
13061 if (sc->sc_type >= WM_T_82571) {
13062 sc->sc_tbi_linkup = 1;
13063 return 0;
13064 }
13065 }
13066
13067 rxcw = CSR_READ(sc, WMREG_RXCW);
13068 ctrl = CSR_READ(sc, WMREG_CTRL);
13069 status = CSR_READ(sc, WMREG_STATUS);
13070 signal = wm_tbi_havesignal(sc, ctrl);
13071
13072 DPRINTF(sc, WM_DEBUG_LINK,
13073 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13074 device_xname(sc->sc_dev), __func__, signal,
13075 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13076
13077 /*
13078 * SWDPIN LU RXCW
13079 * 0 0 0
13080 * 0 0 1 (should not happen)
13081 * 0 1 0 (should not happen)
13082 * 0 1 1 (should not happen)
13083 * 1 0 0 Disable autonego and force linkup
13084 * 1 0 1 got /C/ but not linkup yet
13085 * 1 1 0 (linkup)
13086 * 1 1 1 If IFM_AUTO, back to autonego
13087 *
13088 */
13089 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13090 DPRINTF(sc, WM_DEBUG_LINK,
13091 ("%s: %s: force linkup and fullduplex\n",
13092 device_xname(sc->sc_dev), __func__));
13093 sc->sc_tbi_linkup = 0;
13094 /* Disable auto-negotiation in the TXCW register */
13095 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13096
13097 /*
13098 * Force link-up and also force full-duplex.
13099 *
13100 * NOTE: CTRL was updated TFCE and RFCE automatically,
13101 * so we should update sc->sc_ctrl
13102 */
13103 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13104 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13105 } else if (((status & STATUS_LU) != 0)
13106 && ((rxcw & RXCW_C) != 0)
13107 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13108 sc->sc_tbi_linkup = 1;
13109 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13110 device_xname(sc->sc_dev), __func__));
13111 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13112 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13113 } else if (signal && ((rxcw & RXCW_C) != 0)) {
13114 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13115 device_xname(sc->sc_dev), __func__));
13116 } else {
13117 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13118 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13119 status));
13120 }
13121
13122 return 0;
13123 }
13124
13125 /*
13126 * wm_tbi_tick:
13127 *
13128 * Check the link on TBI devices.
13129 * This function acts as mii_tick().
13130 */
13131 static void
13132 wm_tbi_tick(struct wm_softc *sc)
13133 {
13134 struct mii_data *mii = &sc->sc_mii;
13135 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13136 uint32_t status;
13137
13138 KASSERT(WM_CORE_LOCKED(sc));
13139
13140 status = CSR_READ(sc, WMREG_STATUS);
13141
13142 /* XXX is this needed? */
13143 (void)CSR_READ(sc, WMREG_RXCW);
13144 (void)CSR_READ(sc, WMREG_CTRL);
13145
13146 /* set link status */
13147 if ((status & STATUS_LU) == 0) {
13148 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13149 device_xname(sc->sc_dev)));
13150 sc->sc_tbi_linkup = 0;
13151 } else if (sc->sc_tbi_linkup == 0) {
13152 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13153 device_xname(sc->sc_dev),
13154 (status & STATUS_FD) ? "FDX" : "HDX"));
13155 sc->sc_tbi_linkup = 1;
13156 sc->sc_tbi_serdes_ticks = 0;
13157 }
13158
13159 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
13160 goto setled;
13161
13162 if ((status & STATUS_LU) == 0) {
13163 sc->sc_tbi_linkup = 0;
13164 /* If the timer expired, retry autonegotiation */
13165 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13166 && (++sc->sc_tbi_serdes_ticks
13167 >= sc->sc_tbi_serdes_anegticks)) {
13168 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13169 device_xname(sc->sc_dev), __func__));
13170 sc->sc_tbi_serdes_ticks = 0;
13171 /*
13172 * Reset the link, and let autonegotiation do
13173 * its thing
13174 */
13175 sc->sc_ctrl |= CTRL_LRST;
13176 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13177 CSR_WRITE_FLUSH(sc);
13178 delay(1000);
13179 sc->sc_ctrl &= ~CTRL_LRST;
13180 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13181 CSR_WRITE_FLUSH(sc);
13182 delay(1000);
13183 CSR_WRITE(sc, WMREG_TXCW,
13184 sc->sc_txcw & ~TXCW_ANE);
13185 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13186 }
13187 }
13188
13189 setled:
13190 wm_tbi_serdes_set_linkled(sc);
13191 }
13192
13193 /* SERDES related */
13194 static void
13195 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13196 {
13197 uint32_t reg;
13198
13199 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13200 && ((sc->sc_flags & WM_F_SGMII) == 0))
13201 return;
13202
13203 /* Enable PCS to turn on link */
13204 reg = CSR_READ(sc, WMREG_PCS_CFG);
13205 reg |= PCS_CFG_PCS_EN;
13206 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13207
13208 /* Power up the laser */
13209 reg = CSR_READ(sc, WMREG_CTRL_EXT);
13210 reg &= ~CTRL_EXT_SWDPIN(3);
13211 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13212
13213 /* Flush the write to verify completion */
13214 CSR_WRITE_FLUSH(sc);
13215 delay(1000);
13216 }
13217
13218 static int
13219 wm_serdes_mediachange(struct ifnet *ifp)
13220 {
13221 struct wm_softc *sc = ifp->if_softc;
13222 bool pcs_autoneg = true; /* XXX */
13223 uint32_t ctrl_ext, pcs_lctl, reg;
13224
13225 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13226 && ((sc->sc_flags & WM_F_SGMII) == 0))
13227 return 0;
13228
13229 /* XXX Currently, this function is not called on 8257[12] */
13230 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13231 || (sc->sc_type >= WM_T_82575))
13232 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13233
13234 /* Power on the sfp cage if present */
13235 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13236 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13237 ctrl_ext |= CTRL_EXT_I2C_ENA;
13238 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13239
13240 sc->sc_ctrl |= CTRL_SLU;
13241
13242 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13243 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13244
13245 reg = CSR_READ(sc, WMREG_CONNSW);
13246 reg |= CONNSW_ENRGSRC;
13247 CSR_WRITE(sc, WMREG_CONNSW, reg);
13248 }
13249
13250 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13251 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13252 case CTRL_EXT_LINK_MODE_SGMII:
13253 /* SGMII mode lets the phy handle forcing speed/duplex */
13254 pcs_autoneg = true;
13255 /* Autoneg time out should be disabled for SGMII mode */
13256 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13257 break;
13258 case CTRL_EXT_LINK_MODE_1000KX:
13259 pcs_autoneg = false;
13260 /* FALLTHROUGH */
13261 default:
13262 if ((sc->sc_type == WM_T_82575)
13263 || (sc->sc_type == WM_T_82576)) {
13264 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13265 pcs_autoneg = false;
13266 }
13267 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13268 | CTRL_FRCFDX;
13269
13270 /* Set speed of 1000/Full if speed/duplex is forced */
13271 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13272 }
13273 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13274
13275 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13276 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13277
13278 if (pcs_autoneg) {
13279 /* Set PCS register for autoneg */
13280 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13281
13282 /* Disable force flow control for autoneg */
13283 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13284
13285 /* Configure flow control advertisement for autoneg */
13286 reg = CSR_READ(sc, WMREG_PCS_ANADV);
13287 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13288 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13289 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13290 } else
13291 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13292
13293 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13294
13295 return 0;
13296 }
13297
13298 static void
13299 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13300 {
13301 struct wm_softc *sc = ifp->if_softc;
13302 struct mii_data *mii = &sc->sc_mii;
13303 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13304 uint32_t pcs_adv, pcs_lpab, reg;
13305
13306 ifmr->ifm_status = IFM_AVALID;
13307 ifmr->ifm_active = IFM_ETHER;
13308
13309 /* Check PCS */
13310 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13311 if ((reg & PCS_LSTS_LINKOK) == 0) {
13312 ifmr->ifm_active |= IFM_NONE;
13313 sc->sc_tbi_linkup = 0;
13314 goto setled;
13315 }
13316
13317 sc->sc_tbi_linkup = 1;
13318 ifmr->ifm_status |= IFM_ACTIVE;
13319 if (sc->sc_type == WM_T_I354) {
13320 uint32_t status;
13321
13322 status = CSR_READ(sc, WMREG_STATUS);
13323 if (((status & STATUS_2P5_SKU) != 0)
13324 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13325 ifmr->ifm_active |= IFM_2500_KX;
13326 } else
13327 ifmr->ifm_active |= IFM_1000_KX;
13328 } else {
13329 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13330 case PCS_LSTS_SPEED_10:
13331 ifmr->ifm_active |= IFM_10_T; /* XXX */
13332 break;
13333 case PCS_LSTS_SPEED_100:
13334 ifmr->ifm_active |= IFM_100_FX; /* XXX */
13335 break;
13336 case PCS_LSTS_SPEED_1000:
13337 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13338 break;
13339 default:
13340 device_printf(sc->sc_dev, "Unknown speed\n");
13341 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13342 break;
13343 }
13344 }
13345 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13346 if ((reg & PCS_LSTS_FDX) != 0)
13347 ifmr->ifm_active |= IFM_FDX;
13348 else
13349 ifmr->ifm_active |= IFM_HDX;
13350 mii->mii_media_active &= ~IFM_ETH_FMASK;
13351 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13352 /* Check flow */
13353 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13354 if ((reg & PCS_LSTS_AN_COMP) == 0) {
13355 DPRINTF(sc, WM_DEBUG_LINK,
13356 ("XXX LINKOK but not ACOMP\n"));
13357 goto setled;
13358 }
13359 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13360 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13361 DPRINTF(sc, WM_DEBUG_LINK,
13362 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13363 if ((pcs_adv & TXCW_SYM_PAUSE)
13364 && (pcs_lpab & TXCW_SYM_PAUSE)) {
13365 mii->mii_media_active |= IFM_FLOW
13366 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
13367 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
13368 && (pcs_adv & TXCW_ASYM_PAUSE)
13369 && (pcs_lpab & TXCW_SYM_PAUSE)
13370 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13371 mii->mii_media_active |= IFM_FLOW
13372 | IFM_ETH_TXPAUSE;
13373 } else if ((pcs_adv & TXCW_SYM_PAUSE)
13374 && (pcs_adv & TXCW_ASYM_PAUSE)
13375 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
13376 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13377 mii->mii_media_active |= IFM_FLOW
13378 | IFM_ETH_RXPAUSE;
13379 }
13380 }
13381 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
13382 | (mii->mii_media_active & IFM_ETH_FMASK);
13383 setled:
13384 wm_tbi_serdes_set_linkled(sc);
13385 }
13386
13387 /*
13388 * wm_serdes_tick:
13389 *
13390 * Check the link on serdes devices.
13391 */
13392 static void
13393 wm_serdes_tick(struct wm_softc *sc)
13394 {
13395 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13396 struct mii_data *mii = &sc->sc_mii;
13397 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13398 uint32_t reg;
13399
13400 KASSERT(WM_CORE_LOCKED(sc));
13401
13402 mii->mii_media_status = IFM_AVALID;
13403 mii->mii_media_active = IFM_ETHER;
13404
13405 /* Check PCS */
13406 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13407 if ((reg & PCS_LSTS_LINKOK) != 0) {
13408 mii->mii_media_status |= IFM_ACTIVE;
13409 sc->sc_tbi_linkup = 1;
13410 sc->sc_tbi_serdes_ticks = 0;
13411 mii->mii_media_active |= IFM_1000_SX; /* XXX */
13412 if ((reg & PCS_LSTS_FDX) != 0)
13413 mii->mii_media_active |= IFM_FDX;
13414 else
13415 mii->mii_media_active |= IFM_HDX;
13416 } else {
13417 mii->mii_media_status |= IFM_NONE;
13418 sc->sc_tbi_linkup = 0;
13419 /* If the timer expired, retry autonegotiation */
13420 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13421 && (++sc->sc_tbi_serdes_ticks
13422 >= sc->sc_tbi_serdes_anegticks)) {
13423 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13424 device_xname(sc->sc_dev), __func__));
13425 sc->sc_tbi_serdes_ticks = 0;
13426 /* XXX */
13427 wm_serdes_mediachange(ifp);
13428 }
13429 }
13430
13431 wm_tbi_serdes_set_linkled(sc);
13432 }
13433
13434 /* SFP related */
13435
13436 static int
13437 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13438 {
13439 uint32_t i2ccmd;
13440 int i;
13441
13442 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13443 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13444
13445 /* Poll the ready bit */
13446 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13447 delay(50);
13448 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13449 if (i2ccmd & I2CCMD_READY)
13450 break;
13451 }
13452 if ((i2ccmd & I2CCMD_READY) == 0)
13453 return -1;
13454 if ((i2ccmd & I2CCMD_ERROR) != 0)
13455 return -1;
13456
13457 *data = i2ccmd & 0x00ff;
13458
13459 return 0;
13460 }
13461
13462 static uint32_t
13463 wm_sfp_get_media_type(struct wm_softc *sc)
13464 {
13465 uint32_t ctrl_ext;
13466 uint8_t val = 0;
13467 int timeout = 3;
13468 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13469 int rv = -1;
13470
13471 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13472 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13473 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13474 CSR_WRITE_FLUSH(sc);
13475
13476 /* Read SFP module data */
13477 while (timeout) {
13478 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13479 if (rv == 0)
13480 break;
13481 delay(100*1000); /* XXX too big */
13482 timeout--;
13483 }
13484 if (rv != 0)
13485 goto out;
13486
13487 switch (val) {
13488 case SFF_SFP_ID_SFF:
13489 aprint_normal_dev(sc->sc_dev,
13490 "Module/Connector soldered to board\n");
13491 break;
13492 case SFF_SFP_ID_SFP:
13493 sc->sc_flags |= WM_F_SFP;
13494 break;
13495 case SFF_SFP_ID_UNKNOWN:
13496 goto out;
13497 default:
13498 break;
13499 }
13500
13501 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13502 if (rv != 0)
13503 goto out;
13504
13505 sc->sc_sfptype = val;
13506 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13507 mediatype = WM_MEDIATYPE_SERDES;
13508 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13509 sc->sc_flags |= WM_F_SGMII;
13510 mediatype = WM_MEDIATYPE_COPPER;
13511 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13512 sc->sc_flags |= WM_F_SGMII;
13513 mediatype = WM_MEDIATYPE_SERDES;
13514 } else {
13515 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13516 __func__, sc->sc_sfptype);
13517 sc->sc_sfptype = 0; /* XXX unknown */
13518 }
13519
13520 out:
13521 /* Restore I2C interface setting */
13522 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13523
13524 return mediatype;
13525 }
13526
13527 /*
13528 * NVM related.
13529 * Microwire, SPI (w/wo EERD) and Flash.
13530 */
13531
13532 /* Both spi and uwire */
13533
13534 /*
13535 * wm_eeprom_sendbits:
13536 *
13537 * Send a series of bits to the EEPROM.
13538 */
13539 static void
13540 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13541 {
13542 uint32_t reg;
13543 int x;
13544
13545 reg = CSR_READ(sc, WMREG_EECD);
13546
13547 for (x = nbits; x > 0; x--) {
13548 if (bits & (1U << (x - 1)))
13549 reg |= EECD_DI;
13550 else
13551 reg &= ~EECD_DI;
13552 CSR_WRITE(sc, WMREG_EECD, reg);
13553 CSR_WRITE_FLUSH(sc);
13554 delay(2);
13555 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13556 CSR_WRITE_FLUSH(sc);
13557 delay(2);
13558 CSR_WRITE(sc, WMREG_EECD, reg);
13559 CSR_WRITE_FLUSH(sc);
13560 delay(2);
13561 }
13562 }
13563
13564 /*
13565 * wm_eeprom_recvbits:
13566 *
13567 * Receive a series of bits from the EEPROM.
13568 */
13569 static void
13570 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13571 {
13572 uint32_t reg, val;
13573 int x;
13574
13575 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13576
13577 val = 0;
13578 for (x = nbits; x > 0; x--) {
13579 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13580 CSR_WRITE_FLUSH(sc);
13581 delay(2);
13582 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13583 val |= (1U << (x - 1));
13584 CSR_WRITE(sc, WMREG_EECD, reg);
13585 CSR_WRITE_FLUSH(sc);
13586 delay(2);
13587 }
13588 *valp = val;
13589 }
13590
13591 /* Microwire */
13592
13593 /*
13594 * wm_nvm_read_uwire:
13595 *
13596 * Read a word from the EEPROM using the MicroWire protocol.
13597 */
13598 static int
13599 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13600 {
13601 uint32_t reg, val;
13602 int i, rv;
13603
13604 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13605 device_xname(sc->sc_dev), __func__));
13606
13607 rv = sc->nvm.acquire(sc);
13608 if (rv != 0)
13609 return rv;
13610
13611 for (i = 0; i < wordcnt; i++) {
13612 /* Clear SK and DI. */
13613 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13614 CSR_WRITE(sc, WMREG_EECD, reg);
13615
13616 /*
13617 * XXX: workaround for a bug in qemu-0.12.x and prior
13618 * and Xen.
13619 *
13620 * We use this workaround only for 82540 because qemu's
13621 * e1000 act as 82540.
13622 */
13623 if (sc->sc_type == WM_T_82540) {
13624 reg |= EECD_SK;
13625 CSR_WRITE(sc, WMREG_EECD, reg);
13626 reg &= ~EECD_SK;
13627 CSR_WRITE(sc, WMREG_EECD, reg);
13628 CSR_WRITE_FLUSH(sc);
13629 delay(2);
13630 }
13631 /* XXX: end of workaround */
13632
13633 /* Set CHIP SELECT. */
13634 reg |= EECD_CS;
13635 CSR_WRITE(sc, WMREG_EECD, reg);
13636 CSR_WRITE_FLUSH(sc);
13637 delay(2);
13638
13639 /* Shift in the READ command. */
13640 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13641
13642 /* Shift in address. */
13643 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13644
13645 /* Shift out the data. */
13646 wm_eeprom_recvbits(sc, &val, 16);
13647 data[i] = val & 0xffff;
13648
13649 /* Clear CHIP SELECT. */
13650 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13651 CSR_WRITE(sc, WMREG_EECD, reg);
13652 CSR_WRITE_FLUSH(sc);
13653 delay(2);
13654 }
13655
13656 sc->nvm.release(sc);
13657 return 0;
13658 }
13659
13660 /* SPI */
13661
13662 /*
13663 * Set SPI and FLASH related information from the EECD register.
13664 * For 82541 and 82547, the word size is taken from EEPROM.
13665 */
13666 static int
13667 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13668 {
13669 int size;
13670 uint32_t reg;
13671 uint16_t data;
13672
13673 reg = CSR_READ(sc, WMREG_EECD);
13674 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13675
13676 /* Read the size of NVM from EECD by default */
13677 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13678 switch (sc->sc_type) {
13679 case WM_T_82541:
13680 case WM_T_82541_2:
13681 case WM_T_82547:
13682 case WM_T_82547_2:
13683 /* Set dummy value to access EEPROM */
13684 sc->sc_nvm_wordsize = 64;
13685 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13686 aprint_error_dev(sc->sc_dev,
13687 "%s: failed to read EEPROM size\n", __func__);
13688 }
13689 reg = data;
13690 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13691 if (size == 0)
13692 size = 6; /* 64 word size */
13693 else
13694 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13695 break;
13696 case WM_T_80003:
13697 case WM_T_82571:
13698 case WM_T_82572:
13699 case WM_T_82573: /* SPI case */
13700 case WM_T_82574: /* SPI case */
13701 case WM_T_82583: /* SPI case */
13702 size += NVM_WORD_SIZE_BASE_SHIFT;
13703 if (size > 14)
13704 size = 14;
13705 break;
13706 case WM_T_82575:
13707 case WM_T_82576:
13708 case WM_T_82580:
13709 case WM_T_I350:
13710 case WM_T_I354:
13711 case WM_T_I210:
13712 case WM_T_I211:
13713 size += NVM_WORD_SIZE_BASE_SHIFT;
13714 if (size > 15)
13715 size = 15;
13716 break;
13717 default:
13718 aprint_error_dev(sc->sc_dev,
13719 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13720 return -1;
13721 break;
13722 }
13723
13724 sc->sc_nvm_wordsize = 1 << size;
13725
13726 return 0;
13727 }
13728
13729 /*
13730 * wm_nvm_ready_spi:
13731 *
13732 * Wait for a SPI EEPROM to be ready for commands.
13733 */
13734 static int
13735 wm_nvm_ready_spi(struct wm_softc *sc)
13736 {
13737 uint32_t val;
13738 int usec;
13739
13740 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13741 device_xname(sc->sc_dev), __func__));
13742
13743 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13744 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13745 wm_eeprom_recvbits(sc, &val, 8);
13746 if ((val & SPI_SR_RDY) == 0)
13747 break;
13748 }
13749 if (usec >= SPI_MAX_RETRIES) {
13750 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13751 return -1;
13752 }
13753 return 0;
13754 }
13755
13756 /*
13757 * wm_nvm_read_spi:
13758 *
13759 * Read a work from the EEPROM using the SPI protocol.
13760 */
13761 static int
13762 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13763 {
13764 uint32_t reg, val;
13765 int i;
13766 uint8_t opc;
13767 int rv;
13768
13769 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13770 device_xname(sc->sc_dev), __func__));
13771
13772 rv = sc->nvm.acquire(sc);
13773 if (rv != 0)
13774 return rv;
13775
13776 /* Clear SK and CS. */
13777 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13778 CSR_WRITE(sc, WMREG_EECD, reg);
13779 CSR_WRITE_FLUSH(sc);
13780 delay(2);
13781
13782 if ((rv = wm_nvm_ready_spi(sc)) != 0)
13783 goto out;
13784
13785 /* Toggle CS to flush commands. */
13786 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13787 CSR_WRITE_FLUSH(sc);
13788 delay(2);
13789 CSR_WRITE(sc, WMREG_EECD, reg);
13790 CSR_WRITE_FLUSH(sc);
13791 delay(2);
13792
13793 opc = SPI_OPC_READ;
13794 if (sc->sc_nvm_addrbits == 8 && word >= 128)
13795 opc |= SPI_OPC_A8;
13796
13797 wm_eeprom_sendbits(sc, opc, 8);
13798 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13799
13800 for (i = 0; i < wordcnt; i++) {
13801 wm_eeprom_recvbits(sc, &val, 16);
13802 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13803 }
13804
13805 /* Raise CS and clear SK. */
13806 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13807 CSR_WRITE(sc, WMREG_EECD, reg);
13808 CSR_WRITE_FLUSH(sc);
13809 delay(2);
13810
13811 out:
13812 sc->nvm.release(sc);
13813 return rv;
13814 }
13815
13816 /* Using with EERD */
13817
13818 static int
13819 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13820 {
13821 uint32_t attempts = 100000;
13822 uint32_t i, reg = 0;
13823 int32_t done = -1;
13824
13825 for (i = 0; i < attempts; i++) {
13826 reg = CSR_READ(sc, rw);
13827
13828 if (reg & EERD_DONE) {
13829 done = 0;
13830 break;
13831 }
13832 delay(5);
13833 }
13834
13835 return done;
13836 }
13837
13838 static int
13839 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13840 {
13841 int i, eerd = 0;
13842 int rv;
13843
13844 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13845 device_xname(sc->sc_dev), __func__));
13846
13847 rv = sc->nvm.acquire(sc);
13848 if (rv != 0)
13849 return rv;
13850
13851 for (i = 0; i < wordcnt; i++) {
13852 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13853 CSR_WRITE(sc, WMREG_EERD, eerd);
13854 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13855 if (rv != 0) {
13856 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13857 "offset=%d. wordcnt=%d\n", offset, wordcnt);
13858 break;
13859 }
13860 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13861 }
13862
13863 sc->nvm.release(sc);
13864 return rv;
13865 }
13866
13867 /* Flash */
13868
13869 static int
13870 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13871 {
13872 uint32_t eecd;
13873 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13874 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13875 uint32_t nvm_dword = 0;
13876 uint8_t sig_byte = 0;
13877 int rv;
13878
13879 switch (sc->sc_type) {
13880 case WM_T_PCH_SPT:
13881 case WM_T_PCH_CNP:
13882 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13883 act_offset = ICH_NVM_SIG_WORD * 2;
13884
13885 /* Set bank to 0 in case flash read fails. */
13886 *bank = 0;
13887
13888 /* Check bank 0 */
13889 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13890 if (rv != 0)
13891 return rv;
13892 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13893 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13894 *bank = 0;
13895 return 0;
13896 }
13897
13898 /* Check bank 1 */
13899 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13900 &nvm_dword);
13901 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13902 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13903 *bank = 1;
13904 return 0;
13905 }
13906 aprint_error_dev(sc->sc_dev,
13907 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13908 return -1;
13909 case WM_T_ICH8:
13910 case WM_T_ICH9:
13911 eecd = CSR_READ(sc, WMREG_EECD);
13912 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13913 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13914 return 0;
13915 }
13916 /* FALLTHROUGH */
13917 default:
13918 /* Default to 0 */
13919 *bank = 0;
13920
13921 /* Check bank 0 */
13922 wm_read_ich8_byte(sc, act_offset, &sig_byte);
13923 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13924 *bank = 0;
13925 return 0;
13926 }
13927
13928 /* Check bank 1 */
13929 wm_read_ich8_byte(sc, act_offset + bank1_offset,
13930 &sig_byte);
13931 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13932 *bank = 1;
13933 return 0;
13934 }
13935 }
13936
13937 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13938 device_xname(sc->sc_dev)));
13939 return -1;
13940 }
13941
13942 /******************************************************************************
13943 * This function does initial flash setup so that a new read/write/erase cycle
13944 * can be started.
13945 *
13946 * sc - The pointer to the hw structure
13947 ****************************************************************************/
13948 static int32_t
13949 wm_ich8_cycle_init(struct wm_softc *sc)
13950 {
13951 uint16_t hsfsts;
13952 int32_t error = 1;
13953 int32_t i = 0;
13954
13955 if (sc->sc_type >= WM_T_PCH_SPT)
13956 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13957 else
13958 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13959
13960 /* May be check the Flash Des Valid bit in Hw status */
13961 if ((hsfsts & HSFSTS_FLDVAL) == 0)
13962 return error;
13963
13964 /* Clear FCERR in Hw status by writing 1 */
13965 /* Clear DAEL in Hw status by writing a 1 */
13966 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
13967
13968 if (sc->sc_type >= WM_T_PCH_SPT)
13969 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
13970 else
13971 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13972
13973 /*
13974 * Either we should have a hardware SPI cycle in progress bit to check
13975 * against, in order to start a new cycle or FDONE bit should be
13976 * changed in the hardware so that it is 1 after hardware reset, which
13977 * can then be used as an indication whether a cycle is in progress or
13978 * has been completed .. we should also have some software semaphore
13979 * mechanism to guard FDONE or the cycle in progress bit so that two
13980 * threads access to those bits can be sequentiallized or a way so that
13981 * 2 threads don't start the cycle at the same time
13982 */
13983
13984 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13985 /*
13986 * There is no cycle running at present, so we can start a
13987 * cycle
13988 */
13989
13990 /* Begin by setting Flash Cycle Done. */
13991 hsfsts |= HSFSTS_DONE;
13992 if (sc->sc_type >= WM_T_PCH_SPT)
13993 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13994 hsfsts & 0xffffUL);
13995 else
13996 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13997 error = 0;
13998 } else {
13999 /*
14000 * Otherwise poll for sometime so the current cycle has a
14001 * chance to end before giving up.
14002 */
14003 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14004 if (sc->sc_type >= WM_T_PCH_SPT)
14005 hsfsts = ICH8_FLASH_READ32(sc,
14006 ICH_FLASH_HSFSTS) & 0xffffUL;
14007 else
14008 hsfsts = ICH8_FLASH_READ16(sc,
14009 ICH_FLASH_HSFSTS);
14010 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14011 error = 0;
14012 break;
14013 }
14014 delay(1);
14015 }
14016 if (error == 0) {
14017 /*
14018 * Successful in waiting for previous cycle to timeout,
14019 * now set the Flash Cycle Done.
14020 */
14021 hsfsts |= HSFSTS_DONE;
14022 if (sc->sc_type >= WM_T_PCH_SPT)
14023 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14024 hsfsts & 0xffffUL);
14025 else
14026 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14027 hsfsts);
14028 }
14029 }
14030 return error;
14031 }
14032
14033 /******************************************************************************
14034 * This function starts a flash cycle and waits for its completion
14035 *
14036 * sc - The pointer to the hw structure
14037 ****************************************************************************/
14038 static int32_t
14039 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14040 {
14041 uint16_t hsflctl;
14042 uint16_t hsfsts;
14043 int32_t error = 1;
14044 uint32_t i = 0;
14045
14046 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14047 if (sc->sc_type >= WM_T_PCH_SPT)
14048 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14049 else
14050 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14051 hsflctl |= HSFCTL_GO;
14052 if (sc->sc_type >= WM_T_PCH_SPT)
14053 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14054 (uint32_t)hsflctl << 16);
14055 else
14056 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14057
14058 /* Wait till FDONE bit is set to 1 */
14059 do {
14060 if (sc->sc_type >= WM_T_PCH_SPT)
14061 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14062 & 0xffffUL;
14063 else
14064 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14065 if (hsfsts & HSFSTS_DONE)
14066 break;
14067 delay(1);
14068 i++;
14069 } while (i < timeout);
14070 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14071 error = 0;
14072
14073 return error;
14074 }
14075
14076 /******************************************************************************
14077 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14078 *
14079 * sc - The pointer to the hw structure
14080 * index - The index of the byte or word to read.
14081 * size - Size of data to read, 1=byte 2=word, 4=dword
14082 * data - Pointer to the word to store the value read.
14083 *****************************************************************************/
14084 static int32_t
14085 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14086 uint32_t size, uint32_t *data)
14087 {
14088 uint16_t hsfsts;
14089 uint16_t hsflctl;
14090 uint32_t flash_linear_address;
14091 uint32_t flash_data = 0;
14092 int32_t error = 1;
14093 int32_t count = 0;
14094
14095 if (size < 1 || size > 4 || data == 0x0 ||
14096 index > ICH_FLASH_LINEAR_ADDR_MASK)
14097 return error;
14098
14099 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14100 sc->sc_ich8_flash_base;
14101
14102 do {
14103 delay(1);
14104 /* Steps */
14105 error = wm_ich8_cycle_init(sc);
14106 if (error)
14107 break;
14108
14109 if (sc->sc_type >= WM_T_PCH_SPT)
14110 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14111 >> 16;
14112 else
14113 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14114 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14115 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14116 & HSFCTL_BCOUNT_MASK;
14117 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14118 if (sc->sc_type >= WM_T_PCH_SPT) {
14119 /*
14120 * In SPT, This register is in Lan memory space, not
14121 * flash. Therefore, only 32 bit access is supported.
14122 */
14123 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14124 (uint32_t)hsflctl << 16);
14125 } else
14126 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14127
14128 /*
14129 * Write the last 24 bits of index into Flash Linear address
14130 * field in Flash Address
14131 */
14132 /* TODO: TBD maybe check the index against the size of flash */
14133
14134 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14135
14136 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14137
14138 /*
14139 * Check if FCERR is set to 1, if set to 1, clear it and try
14140 * the whole sequence a few more times, else read in (shift in)
14141 * the Flash Data0, the order is least significant byte first
14142 * msb to lsb
14143 */
14144 if (error == 0) {
14145 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14146 if (size == 1)
14147 *data = (uint8_t)(flash_data & 0x000000FF);
14148 else if (size == 2)
14149 *data = (uint16_t)(flash_data & 0x0000FFFF);
14150 else if (size == 4)
14151 *data = (uint32_t)flash_data;
14152 break;
14153 } else {
14154 /*
14155 * If we've gotten here, then things are probably
14156 * completely hosed, but if the error condition is
14157 * detected, it won't hurt to give it another try...
14158 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14159 */
14160 if (sc->sc_type >= WM_T_PCH_SPT)
14161 hsfsts = ICH8_FLASH_READ32(sc,
14162 ICH_FLASH_HSFSTS) & 0xffffUL;
14163 else
14164 hsfsts = ICH8_FLASH_READ16(sc,
14165 ICH_FLASH_HSFSTS);
14166
14167 if (hsfsts & HSFSTS_ERR) {
14168 /* Repeat for some time before giving up. */
14169 continue;
14170 } else if ((hsfsts & HSFSTS_DONE) == 0)
14171 break;
14172 }
14173 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14174
14175 return error;
14176 }
14177
14178 /******************************************************************************
14179 * Reads a single byte from the NVM using the ICH8 flash access registers.
14180 *
14181 * sc - pointer to wm_hw structure
14182 * index - The index of the byte to read.
14183 * data - Pointer to a byte to store the value read.
14184 *****************************************************************************/
14185 static int32_t
14186 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14187 {
14188 int32_t status;
14189 uint32_t word = 0;
14190
14191 status = wm_read_ich8_data(sc, index, 1, &word);
14192 if (status == 0)
14193 *data = (uint8_t)word;
14194 else
14195 *data = 0;
14196
14197 return status;
14198 }
14199
14200 /******************************************************************************
14201 * Reads a word from the NVM using the ICH8 flash access registers.
14202 *
14203 * sc - pointer to wm_hw structure
14204 * index - The starting byte index of the word to read.
14205 * data - Pointer to a word to store the value read.
14206 *****************************************************************************/
14207 static int32_t
14208 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14209 {
14210 int32_t status;
14211 uint32_t word = 0;
14212
14213 status = wm_read_ich8_data(sc, index, 2, &word);
14214 if (status == 0)
14215 *data = (uint16_t)word;
14216 else
14217 *data = 0;
14218
14219 return status;
14220 }
14221
14222 /******************************************************************************
14223 * Reads a dword from the NVM using the ICH8 flash access registers.
14224 *
14225 * sc - pointer to wm_hw structure
14226 * index - The starting byte index of the word to read.
14227 * data - Pointer to a word to store the value read.
14228 *****************************************************************************/
14229 static int32_t
14230 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14231 {
14232 int32_t status;
14233
14234 status = wm_read_ich8_data(sc, index, 4, data);
14235 return status;
14236 }
14237
14238 /******************************************************************************
14239 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14240 * register.
14241 *
14242 * sc - Struct containing variables accessed by shared code
14243 * offset - offset of word in the EEPROM to read
14244 * data - word read from the EEPROM
14245 * words - number of words to read
14246 *****************************************************************************/
14247 static int
14248 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14249 {
14250 int rv;
14251 uint32_t flash_bank = 0;
14252 uint32_t act_offset = 0;
14253 uint32_t bank_offset = 0;
14254 uint16_t word = 0;
14255 uint16_t i = 0;
14256
14257 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14258 device_xname(sc->sc_dev), __func__));
14259
14260 rv = sc->nvm.acquire(sc);
14261 if (rv != 0)
14262 return rv;
14263
14264 /*
14265 * We need to know which is the valid flash bank. In the event
14266 * that we didn't allocate eeprom_shadow_ram, we may not be
14267 * managing flash_bank. So it cannot be trusted and needs
14268 * to be updated with each read.
14269 */
14270 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14271 if (rv) {
14272 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14273 device_xname(sc->sc_dev)));
14274 flash_bank = 0;
14275 }
14276
14277 /*
14278 * Adjust offset appropriately if we're on bank 1 - adjust for word
14279 * size
14280 */
14281 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14282
14283 for (i = 0; i < words; i++) {
14284 /* The NVM part needs a byte offset, hence * 2 */
14285 act_offset = bank_offset + ((offset + i) * 2);
14286 rv = wm_read_ich8_word(sc, act_offset, &word);
14287 if (rv) {
14288 aprint_error_dev(sc->sc_dev,
14289 "%s: failed to read NVM\n", __func__);
14290 break;
14291 }
14292 data[i] = word;
14293 }
14294
14295 sc->nvm.release(sc);
14296 return rv;
14297 }
14298
14299 /******************************************************************************
14300 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14301 * register.
14302 *
14303 * sc - Struct containing variables accessed by shared code
14304 * offset - offset of word in the EEPROM to read
14305 * data - word read from the EEPROM
14306 * words - number of words to read
14307 *****************************************************************************/
14308 static int
14309 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14310 {
14311 int rv;
14312 uint32_t flash_bank = 0;
14313 uint32_t act_offset = 0;
14314 uint32_t bank_offset = 0;
14315 uint32_t dword = 0;
14316 uint16_t i = 0;
14317
14318 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14319 device_xname(sc->sc_dev), __func__));
14320
14321 rv = sc->nvm.acquire(sc);
14322 if (rv != 0)
14323 return rv;
14324
14325 /*
14326 * We need to know which is the valid flash bank. In the event
14327 * that we didn't allocate eeprom_shadow_ram, we may not be
14328 * managing flash_bank. So it cannot be trusted and needs
14329 * to be updated with each read.
14330 */
14331 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14332 if (rv) {
14333 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14334 device_xname(sc->sc_dev)));
14335 flash_bank = 0;
14336 }
14337
14338 /*
14339 * Adjust offset appropriately if we're on bank 1 - adjust for word
14340 * size
14341 */
14342 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14343
14344 for (i = 0; i < words; i++) {
14345 /* The NVM part needs a byte offset, hence * 2 */
14346 act_offset = bank_offset + ((offset + i) * 2);
14347 /* but we must read dword aligned, so mask ... */
14348 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14349 if (rv) {
14350 aprint_error_dev(sc->sc_dev,
14351 "%s: failed to read NVM\n", __func__);
14352 break;
14353 }
14354 /* ... and pick out low or high word */
14355 if ((act_offset & 0x2) == 0)
14356 data[i] = (uint16_t)(dword & 0xFFFF);
14357 else
14358 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14359 }
14360
14361 sc->nvm.release(sc);
14362 return rv;
14363 }
14364
14365 /* iNVM */
14366
14367 static int
14368 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14369 {
14370 int32_t rv = 0;
14371 uint32_t invm_dword;
14372 uint16_t i;
14373 uint8_t record_type, word_address;
14374
14375 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14376 device_xname(sc->sc_dev), __func__));
14377
14378 for (i = 0; i < INVM_SIZE; i++) {
14379 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14380 /* Get record type */
14381 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14382 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14383 break;
14384 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14385 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14386 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14387 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14388 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14389 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14390 if (word_address == address) {
14391 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14392 rv = 0;
14393 break;
14394 }
14395 }
14396 }
14397
14398 return rv;
14399 }
14400
14401 static int
14402 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14403 {
14404 int i, rv;
14405
14406 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14407 device_xname(sc->sc_dev), __func__));
14408
14409 rv = sc->nvm.acquire(sc);
14410 if (rv != 0)
14411 return rv;
14412
14413 for (i = 0; i < words; i++) {
14414 switch (offset + i) {
14415 case NVM_OFF_MACADDR:
14416 case NVM_OFF_MACADDR1:
14417 case NVM_OFF_MACADDR2:
14418 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14419 if (rv != 0) {
14420 data[i] = 0xffff;
14421 rv = -1;
14422 }
14423 break;
14424 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14425 rv = wm_nvm_read_word_invm(sc, offset, data);
14426 if (rv != 0) {
14427 *data = INVM_DEFAULT_AL;
14428 rv = 0;
14429 }
14430 break;
14431 case NVM_OFF_CFG2:
14432 rv = wm_nvm_read_word_invm(sc, offset, data);
14433 if (rv != 0) {
14434 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
14435 rv = 0;
14436 }
14437 break;
14438 case NVM_OFF_CFG4:
14439 rv = wm_nvm_read_word_invm(sc, offset, data);
14440 if (rv != 0) {
14441 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
14442 rv = 0;
14443 }
14444 break;
14445 case NVM_OFF_LED_1_CFG:
14446 rv = wm_nvm_read_word_invm(sc, offset, data);
14447 if (rv != 0) {
14448 *data = NVM_LED_1_CFG_DEFAULT_I211;
14449 rv = 0;
14450 }
14451 break;
14452 case NVM_OFF_LED_0_2_CFG:
14453 rv = wm_nvm_read_word_invm(sc, offset, data);
14454 if (rv != 0) {
14455 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
14456 rv = 0;
14457 }
14458 break;
14459 case NVM_OFF_ID_LED_SETTINGS:
14460 rv = wm_nvm_read_word_invm(sc, offset, data);
14461 if (rv != 0) {
14462 *data = ID_LED_RESERVED_FFFF;
14463 rv = 0;
14464 }
14465 break;
14466 default:
14467 DPRINTF(sc, WM_DEBUG_NVM,
14468 ("NVM word 0x%02x is not mapped.\n", offset));
14469 *data = NVM_RESERVED_WORD;
14470 break;
14471 }
14472 }
14473
14474 sc->nvm.release(sc);
14475 return rv;
14476 }
14477
14478 /* Lock, detecting NVM type, validate checksum, version and read */
14479
14480 static int
14481 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14482 {
14483 uint32_t eecd = 0;
14484
14485 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14486 || sc->sc_type == WM_T_82583) {
14487 eecd = CSR_READ(sc, WMREG_EECD);
14488
14489 /* Isolate bits 15 & 16 */
14490 eecd = ((eecd >> 15) & 0x03);
14491
14492 /* If both bits are set, device is Flash type */
14493 if (eecd == 0x03)
14494 return 0;
14495 }
14496 return 1;
14497 }
14498
14499 static int
14500 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14501 {
14502 uint32_t eec;
14503
14504 eec = CSR_READ(sc, WMREG_EEC);
14505 if ((eec & EEC_FLASH_DETECTED) != 0)
14506 return 1;
14507
14508 return 0;
14509 }
14510
14511 /*
14512 * wm_nvm_validate_checksum
14513 *
14514 * The checksum is defined as the sum of the first 64 (16 bit) words.
14515 */
14516 static int
14517 wm_nvm_validate_checksum(struct wm_softc *sc)
14518 {
14519 uint16_t checksum;
14520 uint16_t eeprom_data;
14521 #ifdef WM_DEBUG
14522 uint16_t csum_wordaddr, valid_checksum;
14523 #endif
14524 int i;
14525
14526 checksum = 0;
14527
14528 /* Don't check for I211 */
14529 if (sc->sc_type == WM_T_I211)
14530 return 0;
14531
14532 #ifdef WM_DEBUG
14533 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14534 || (sc->sc_type == WM_T_PCH_CNP)) {
14535 csum_wordaddr = NVM_OFF_COMPAT;
14536 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14537 } else {
14538 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14539 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14540 }
14541
14542 /* Dump EEPROM image for debug */
14543 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14544 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14545 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14546 /* XXX PCH_SPT? */
14547 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14548 if ((eeprom_data & valid_checksum) == 0)
14549 DPRINTF(sc, WM_DEBUG_NVM,
14550 ("%s: NVM need to be updated (%04x != %04x)\n",
14551 device_xname(sc->sc_dev), eeprom_data,
14552 valid_checksum));
14553 }
14554
14555 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14556 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14557 for (i = 0; i < NVM_SIZE; i++) {
14558 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14559 printf("XXXX ");
14560 else
14561 printf("%04hx ", eeprom_data);
14562 if (i % 8 == 7)
14563 printf("\n");
14564 }
14565 }
14566
14567 #endif /* WM_DEBUG */
14568
14569 for (i = 0; i < NVM_SIZE; i++) {
14570 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14571 return -1;
14572 checksum += eeprom_data;
14573 }
14574
14575 if (checksum != (uint16_t) NVM_CHECKSUM) {
14576 #ifdef WM_DEBUG
14577 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14578 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14579 #endif
14580 }
14581
14582 return 0;
14583 }
14584
14585 static void
14586 wm_nvm_version_invm(struct wm_softc *sc)
14587 {
14588 uint32_t dword;
14589
14590 /*
14591 * Linux's code to decode version is very strange, so we don't
14592 * obey that algorithm and just use word 61 as the document.
14593 * Perhaps it's not perfect though...
14594 *
14595 * Example:
14596 *
14597 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14598 */
14599 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14600 dword = __SHIFTOUT(dword, INVM_VER_1);
14601 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14602 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14603 }
14604
14605 static void
14606 wm_nvm_version(struct wm_softc *sc)
14607 {
14608 uint16_t major, minor, build, patch;
14609 uint16_t uid0, uid1;
14610 uint16_t nvm_data;
14611 uint16_t off;
14612 bool check_version = false;
14613 bool check_optionrom = false;
14614 bool have_build = false;
14615 bool have_uid = true;
14616
14617 /*
14618 * Version format:
14619 *
14620 * XYYZ
14621 * X0YZ
14622 * X0YY
14623 *
14624 * Example:
14625 *
14626 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
14627 * 82571 0x50a6 5.10.6?
14628 * 82572 0x506a 5.6.10?
14629 * 82572EI 0x5069 5.6.9?
14630 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
14631 * 0x2013 2.1.3?
14632 * 82583 0x10a0 1.10.0? (document says it's default value)
14633 * ICH8+82567 0x0040 0.4.0?
14634 * ICH9+82566 0x1040 1.4.0?
14635 *ICH10+82567 0x0043 0.4.3?
14636 * PCH+82577 0x00c1 0.12.1?
14637 * PCH2+82579 0x00d3 0.13.3?
14638 * 0x00d4 0.13.4?
14639 * LPT+I218 0x0023 0.2.3?
14640 * SPT+I219 0x0084 0.8.4?
14641 * CNP+I219 0x0054 0.5.4?
14642 */
14643
14644 /*
14645 * XXX
14646 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14647 * I've never seen real 82574 hardware with such small SPI ROM.
14648 */
14649 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14650 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14651 have_uid = false;
14652
14653 switch (sc->sc_type) {
14654 case WM_T_82571:
14655 case WM_T_82572:
14656 case WM_T_82574:
14657 case WM_T_82583:
14658 check_version = true;
14659 check_optionrom = true;
14660 have_build = true;
14661 break;
14662 case WM_T_ICH8:
14663 case WM_T_ICH9:
14664 case WM_T_ICH10:
14665 case WM_T_PCH:
14666 case WM_T_PCH2:
14667 case WM_T_PCH_LPT:
14668 case WM_T_PCH_SPT:
14669 case WM_T_PCH_CNP:
14670 check_version = true;
14671 have_build = true;
14672 have_uid = false;
14673 break;
14674 case WM_T_82575:
14675 case WM_T_82576:
14676 case WM_T_82580:
14677 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14678 check_version = true;
14679 break;
14680 case WM_T_I211:
14681 wm_nvm_version_invm(sc);
14682 have_uid = false;
14683 goto printver;
14684 case WM_T_I210:
14685 if (!wm_nvm_flash_presence_i210(sc)) {
14686 wm_nvm_version_invm(sc);
14687 have_uid = false;
14688 goto printver;
14689 }
14690 /* FALLTHROUGH */
14691 case WM_T_I350:
14692 case WM_T_I354:
14693 check_version = true;
14694 check_optionrom = true;
14695 break;
14696 default:
14697 return;
14698 }
14699 if (check_version
14700 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14701 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14702 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14703 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14704 build = nvm_data & NVM_BUILD_MASK;
14705 have_build = true;
14706 } else
14707 minor = nvm_data & 0x00ff;
14708
14709 /* Decimal */
14710 minor = (minor / 16) * 10 + (minor % 16);
14711 sc->sc_nvm_ver_major = major;
14712 sc->sc_nvm_ver_minor = minor;
14713
14714 printver:
14715 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14716 sc->sc_nvm_ver_minor);
14717 if (have_build) {
14718 sc->sc_nvm_ver_build = build;
14719 aprint_verbose(".%d", build);
14720 }
14721 }
14722
14723 /* Assume the Option ROM area is at avove NVM_SIZE */
14724 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14725 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14726 /* Option ROM Version */
14727 if ((off != 0x0000) && (off != 0xffff)) {
14728 int rv;
14729
14730 off += NVM_COMBO_VER_OFF;
14731 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14732 rv |= wm_nvm_read(sc, off, 1, &uid0);
14733 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14734 && (uid1 != 0) && (uid1 != 0xffff)) {
14735 /* 16bits */
14736 major = uid0 >> 8;
14737 build = (uid0 << 8) | (uid1 >> 8);
14738 patch = uid1 & 0x00ff;
14739 aprint_verbose(", option ROM Version %d.%d.%d",
14740 major, build, patch);
14741 }
14742 }
14743 }
14744
14745 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14746 aprint_verbose(", Image Unique ID %08x",
14747 ((uint32_t)uid1 << 16) | uid0);
14748 }
14749
14750 /*
14751 * wm_nvm_read:
14752 *
14753 * Read data from the serial EEPROM.
14754 */
14755 static int
14756 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14757 {
14758 int rv;
14759
14760 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14761 device_xname(sc->sc_dev), __func__));
14762
14763 if (sc->sc_flags & WM_F_EEPROM_INVALID)
14764 return -1;
14765
14766 rv = sc->nvm.read(sc, word, wordcnt, data);
14767
14768 return rv;
14769 }
14770
14771 /*
14772 * Hardware semaphores.
14773 * Very complexed...
14774 */
14775
14776 static int
14777 wm_get_null(struct wm_softc *sc)
14778 {
14779
14780 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14781 device_xname(sc->sc_dev), __func__));
14782 return 0;
14783 }
14784
14785 static void
14786 wm_put_null(struct wm_softc *sc)
14787 {
14788
14789 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14790 device_xname(sc->sc_dev), __func__));
14791 return;
14792 }
14793
14794 static int
14795 wm_get_eecd(struct wm_softc *sc)
14796 {
14797 uint32_t reg;
14798 int x;
14799
14800 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14801 device_xname(sc->sc_dev), __func__));
14802
14803 reg = CSR_READ(sc, WMREG_EECD);
14804
14805 /* Request EEPROM access. */
14806 reg |= EECD_EE_REQ;
14807 CSR_WRITE(sc, WMREG_EECD, reg);
14808
14809 /* ..and wait for it to be granted. */
14810 for (x = 0; x < 1000; x++) {
14811 reg = CSR_READ(sc, WMREG_EECD);
14812 if (reg & EECD_EE_GNT)
14813 break;
14814 delay(5);
14815 }
14816 if ((reg & EECD_EE_GNT) == 0) {
14817 aprint_error_dev(sc->sc_dev,
14818 "could not acquire EEPROM GNT\n");
14819 reg &= ~EECD_EE_REQ;
14820 CSR_WRITE(sc, WMREG_EECD, reg);
14821 return -1;
14822 }
14823
14824 return 0;
14825 }
14826
14827 static void
14828 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14829 {
14830
14831 *eecd |= EECD_SK;
14832 CSR_WRITE(sc, WMREG_EECD, *eecd);
14833 CSR_WRITE_FLUSH(sc);
14834 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14835 delay(1);
14836 else
14837 delay(50);
14838 }
14839
14840 static void
14841 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14842 {
14843
14844 *eecd &= ~EECD_SK;
14845 CSR_WRITE(sc, WMREG_EECD, *eecd);
14846 CSR_WRITE_FLUSH(sc);
14847 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14848 delay(1);
14849 else
14850 delay(50);
14851 }
14852
14853 static void
14854 wm_put_eecd(struct wm_softc *sc)
14855 {
14856 uint32_t reg;
14857
14858 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14859 device_xname(sc->sc_dev), __func__));
14860
14861 /* Stop nvm */
14862 reg = CSR_READ(sc, WMREG_EECD);
14863 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14864 /* Pull CS high */
14865 reg |= EECD_CS;
14866 wm_nvm_eec_clock_lower(sc, ®);
14867 } else {
14868 /* CS on Microwire is active-high */
14869 reg &= ~(EECD_CS | EECD_DI);
14870 CSR_WRITE(sc, WMREG_EECD, reg);
14871 wm_nvm_eec_clock_raise(sc, ®);
14872 wm_nvm_eec_clock_lower(sc, ®);
14873 }
14874
14875 reg = CSR_READ(sc, WMREG_EECD);
14876 reg &= ~EECD_EE_REQ;
14877 CSR_WRITE(sc, WMREG_EECD, reg);
14878
14879 return;
14880 }
14881
14882 /*
14883 * Get hardware semaphore.
14884 * Same as e1000_get_hw_semaphore_generic()
14885 */
14886 static int
14887 wm_get_swsm_semaphore(struct wm_softc *sc)
14888 {
14889 int32_t timeout;
14890 uint32_t swsm;
14891
14892 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14893 device_xname(sc->sc_dev), __func__));
14894 KASSERT(sc->sc_nvm_wordsize > 0);
14895
14896 retry:
14897 /* Get the SW semaphore. */
14898 timeout = sc->sc_nvm_wordsize + 1;
14899 while (timeout) {
14900 swsm = CSR_READ(sc, WMREG_SWSM);
14901
14902 if ((swsm & SWSM_SMBI) == 0)
14903 break;
14904
14905 delay(50);
14906 timeout--;
14907 }
14908
14909 if (timeout == 0) {
14910 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14911 /*
14912 * In rare circumstances, the SW semaphore may already
14913 * be held unintentionally. Clear the semaphore once
14914 * before giving up.
14915 */
14916 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14917 wm_put_swsm_semaphore(sc);
14918 goto retry;
14919 }
14920 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
14921 return -1;
14922 }
14923
14924 /* Get the FW semaphore. */
14925 timeout = sc->sc_nvm_wordsize + 1;
14926 while (timeout) {
14927 swsm = CSR_READ(sc, WMREG_SWSM);
14928 swsm |= SWSM_SWESMBI;
14929 CSR_WRITE(sc, WMREG_SWSM, swsm);
14930 /* If we managed to set the bit we got the semaphore. */
14931 swsm = CSR_READ(sc, WMREG_SWSM);
14932 if (swsm & SWSM_SWESMBI)
14933 break;
14934
14935 delay(50);
14936 timeout--;
14937 }
14938
14939 if (timeout == 0) {
14940 aprint_error_dev(sc->sc_dev,
14941 "could not acquire SWSM SWESMBI\n");
14942 /* Release semaphores */
14943 wm_put_swsm_semaphore(sc);
14944 return -1;
14945 }
14946 return 0;
14947 }
14948
14949 /*
14950 * Put hardware semaphore.
14951 * Same as e1000_put_hw_semaphore_generic()
14952 */
14953 static void
14954 wm_put_swsm_semaphore(struct wm_softc *sc)
14955 {
14956 uint32_t swsm;
14957
14958 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14959 device_xname(sc->sc_dev), __func__));
14960
14961 swsm = CSR_READ(sc, WMREG_SWSM);
14962 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14963 CSR_WRITE(sc, WMREG_SWSM, swsm);
14964 }
14965
14966 /*
14967 * Get SW/FW semaphore.
14968 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
14969 */
14970 static int
14971 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14972 {
14973 uint32_t swfw_sync;
14974 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
14975 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
14976 int timeout;
14977
14978 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14979 device_xname(sc->sc_dev), __func__));
14980
14981 if (sc->sc_type == WM_T_80003)
14982 timeout = 50;
14983 else
14984 timeout = 200;
14985
14986 while (timeout) {
14987 if (wm_get_swsm_semaphore(sc)) {
14988 aprint_error_dev(sc->sc_dev,
14989 "%s: failed to get semaphore\n",
14990 __func__);
14991 return -1;
14992 }
14993 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14994 if ((swfw_sync & (swmask | fwmask)) == 0) {
14995 swfw_sync |= swmask;
14996 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14997 wm_put_swsm_semaphore(sc);
14998 return 0;
14999 }
15000 wm_put_swsm_semaphore(sc);
15001 delay(5000);
15002 timeout--;
15003 }
15004 device_printf(sc->sc_dev,
15005 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15006 mask, swfw_sync);
15007 return -1;
15008 }
15009
15010 static void
15011 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15012 {
15013 uint32_t swfw_sync;
15014
15015 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15016 device_xname(sc->sc_dev), __func__));
15017
15018 while (wm_get_swsm_semaphore(sc) != 0)
15019 continue;
15020
15021 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15022 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15023 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15024
15025 wm_put_swsm_semaphore(sc);
15026 }
15027
15028 static int
15029 wm_get_nvm_80003(struct wm_softc *sc)
15030 {
15031 int rv;
15032
15033 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15034 device_xname(sc->sc_dev), __func__));
15035
15036 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15037 aprint_error_dev(sc->sc_dev,
15038 "%s: failed to get semaphore(SWFW)\n", __func__);
15039 return rv;
15040 }
15041
15042 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15043 && (rv = wm_get_eecd(sc)) != 0) {
15044 aprint_error_dev(sc->sc_dev,
15045 "%s: failed to get semaphore(EECD)\n", __func__);
15046 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15047 return rv;
15048 }
15049
15050 return 0;
15051 }
15052
15053 static void
15054 wm_put_nvm_80003(struct wm_softc *sc)
15055 {
15056
15057 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15058 device_xname(sc->sc_dev), __func__));
15059
15060 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15061 wm_put_eecd(sc);
15062 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15063 }
15064
15065 static int
15066 wm_get_nvm_82571(struct wm_softc *sc)
15067 {
15068 int rv;
15069
15070 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15071 device_xname(sc->sc_dev), __func__));
15072
15073 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15074 return rv;
15075
15076 switch (sc->sc_type) {
15077 case WM_T_82573:
15078 break;
15079 default:
15080 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15081 rv = wm_get_eecd(sc);
15082 break;
15083 }
15084
15085 if (rv != 0) {
15086 aprint_error_dev(sc->sc_dev,
15087 "%s: failed to get semaphore\n",
15088 __func__);
15089 wm_put_swsm_semaphore(sc);
15090 }
15091
15092 return rv;
15093 }
15094
15095 static void
15096 wm_put_nvm_82571(struct wm_softc *sc)
15097 {
15098
15099 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15100 device_xname(sc->sc_dev), __func__));
15101
15102 switch (sc->sc_type) {
15103 case WM_T_82573:
15104 break;
15105 default:
15106 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15107 wm_put_eecd(sc);
15108 break;
15109 }
15110
15111 wm_put_swsm_semaphore(sc);
15112 }
15113
15114 static int
15115 wm_get_phy_82575(struct wm_softc *sc)
15116 {
15117
15118 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15119 device_xname(sc->sc_dev), __func__));
15120 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15121 }
15122
15123 static void
15124 wm_put_phy_82575(struct wm_softc *sc)
15125 {
15126
15127 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15128 device_xname(sc->sc_dev), __func__));
15129 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15130 }
15131
15132 static int
15133 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15134 {
15135 uint32_t ext_ctrl;
15136 int timeout = 200;
15137
15138 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15139 device_xname(sc->sc_dev), __func__));
15140
15141 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15142 for (timeout = 0; timeout < 200; timeout++) {
15143 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15144 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15145 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15146
15147 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15148 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15149 return 0;
15150 delay(5000);
15151 }
15152 device_printf(sc->sc_dev,
15153 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15154 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15155 return -1;
15156 }
15157
15158 static void
15159 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15160 {
15161 uint32_t ext_ctrl;
15162
15163 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15164 device_xname(sc->sc_dev), __func__));
15165
15166 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15167 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15168 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15169
15170 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15171 }
15172
15173 static int
15174 wm_get_swflag_ich8lan(struct wm_softc *sc)
15175 {
15176 uint32_t ext_ctrl;
15177 int timeout;
15178
15179 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15180 device_xname(sc->sc_dev), __func__));
15181 mutex_enter(sc->sc_ich_phymtx);
15182 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15183 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15184 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15185 break;
15186 delay(1000);
15187 }
15188 if (timeout >= WM_PHY_CFG_TIMEOUT) {
15189 device_printf(sc->sc_dev,
15190 "SW has already locked the resource\n");
15191 goto out;
15192 }
15193
15194 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15195 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15196 for (timeout = 0; timeout < 1000; timeout++) {
15197 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15198 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15199 break;
15200 delay(1000);
15201 }
15202 if (timeout >= 1000) {
15203 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15204 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15205 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15206 goto out;
15207 }
15208 return 0;
15209
15210 out:
15211 mutex_exit(sc->sc_ich_phymtx);
15212 return -1;
15213 }
15214
15215 static void
15216 wm_put_swflag_ich8lan(struct wm_softc *sc)
15217 {
15218 uint32_t ext_ctrl;
15219
15220 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15221 device_xname(sc->sc_dev), __func__));
15222 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15223 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15224 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15225 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15226 } else
15227 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15228
15229 mutex_exit(sc->sc_ich_phymtx);
15230 }
15231
15232 static int
15233 wm_get_nvm_ich8lan(struct wm_softc *sc)
15234 {
15235
15236 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15237 device_xname(sc->sc_dev), __func__));
15238 mutex_enter(sc->sc_ich_nvmmtx);
15239
15240 return 0;
15241 }
15242
15243 static void
15244 wm_put_nvm_ich8lan(struct wm_softc *sc)
15245 {
15246
15247 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15248 device_xname(sc->sc_dev), __func__));
15249 mutex_exit(sc->sc_ich_nvmmtx);
15250 }
15251
15252 static int
15253 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15254 {
15255 int i = 0;
15256 uint32_t reg;
15257
15258 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15259 device_xname(sc->sc_dev), __func__));
15260
15261 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15262 do {
15263 CSR_WRITE(sc, WMREG_EXTCNFCTR,
15264 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15265 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15266 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15267 break;
15268 delay(2*1000);
15269 i++;
15270 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15271
15272 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15273 wm_put_hw_semaphore_82573(sc);
15274 log(LOG_ERR, "%s: Driver can't access the PHY\n",
15275 device_xname(sc->sc_dev));
15276 return -1;
15277 }
15278
15279 return 0;
15280 }
15281
15282 static void
15283 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15284 {
15285 uint32_t reg;
15286
15287 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15288 device_xname(sc->sc_dev), __func__));
15289
15290 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15291 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15292 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15293 }
15294
15295 /*
15296 * Management mode and power management related subroutines.
15297 * BMC, AMT, suspend/resume and EEE.
15298 */
15299
15300 #ifdef WM_WOL
15301 static int
15302 wm_check_mng_mode(struct wm_softc *sc)
15303 {
15304 int rv;
15305
15306 switch (sc->sc_type) {
15307 case WM_T_ICH8:
15308 case WM_T_ICH9:
15309 case WM_T_ICH10:
15310 case WM_T_PCH:
15311 case WM_T_PCH2:
15312 case WM_T_PCH_LPT:
15313 case WM_T_PCH_SPT:
15314 case WM_T_PCH_CNP:
15315 rv = wm_check_mng_mode_ich8lan(sc);
15316 break;
15317 case WM_T_82574:
15318 case WM_T_82583:
15319 rv = wm_check_mng_mode_82574(sc);
15320 break;
15321 case WM_T_82571:
15322 case WM_T_82572:
15323 case WM_T_82573:
15324 case WM_T_80003:
15325 rv = wm_check_mng_mode_generic(sc);
15326 break;
15327 default:
15328 /* Noting to do */
15329 rv = 0;
15330 break;
15331 }
15332
15333 return rv;
15334 }
15335
15336 static int
15337 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15338 {
15339 uint32_t fwsm;
15340
15341 fwsm = CSR_READ(sc, WMREG_FWSM);
15342
15343 if (((fwsm & FWSM_FW_VALID) != 0)
15344 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15345 return 1;
15346
15347 return 0;
15348 }
15349
15350 static int
15351 wm_check_mng_mode_82574(struct wm_softc *sc)
15352 {
15353 uint16_t data;
15354
15355 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15356
15357 if ((data & NVM_CFG2_MNGM_MASK) != 0)
15358 return 1;
15359
15360 return 0;
15361 }
15362
15363 static int
15364 wm_check_mng_mode_generic(struct wm_softc *sc)
15365 {
15366 uint32_t fwsm;
15367
15368 fwsm = CSR_READ(sc, WMREG_FWSM);
15369
15370 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15371 return 1;
15372
15373 return 0;
15374 }
15375 #endif /* WM_WOL */
15376
15377 static int
15378 wm_enable_mng_pass_thru(struct wm_softc *sc)
15379 {
15380 uint32_t manc, fwsm, factps;
15381
15382 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15383 return 0;
15384
15385 manc = CSR_READ(sc, WMREG_MANC);
15386
15387 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15388 device_xname(sc->sc_dev), manc));
15389 if ((manc & MANC_RECV_TCO_EN) == 0)
15390 return 0;
15391
15392 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15393 fwsm = CSR_READ(sc, WMREG_FWSM);
15394 factps = CSR_READ(sc, WMREG_FACTPS);
15395 if (((factps & FACTPS_MNGCG) == 0)
15396 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15397 return 1;
15398 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15399 uint16_t data;
15400
15401 factps = CSR_READ(sc, WMREG_FACTPS);
15402 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15403 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15404 device_xname(sc->sc_dev), factps, data));
15405 if (((factps & FACTPS_MNGCG) == 0)
15406 && ((data & NVM_CFG2_MNGM_MASK)
15407 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15408 return 1;
15409 } else if (((manc & MANC_SMBUS_EN) != 0)
15410 && ((manc & MANC_ASF_EN) == 0))
15411 return 1;
15412
15413 return 0;
15414 }
15415
15416 static bool
15417 wm_phy_resetisblocked(struct wm_softc *sc)
15418 {
15419 bool blocked = false;
15420 uint32_t reg;
15421 int i = 0;
15422
15423 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15424 device_xname(sc->sc_dev), __func__));
15425
15426 switch (sc->sc_type) {
15427 case WM_T_ICH8:
15428 case WM_T_ICH9:
15429 case WM_T_ICH10:
15430 case WM_T_PCH:
15431 case WM_T_PCH2:
15432 case WM_T_PCH_LPT:
15433 case WM_T_PCH_SPT:
15434 case WM_T_PCH_CNP:
15435 do {
15436 reg = CSR_READ(sc, WMREG_FWSM);
15437 if ((reg & FWSM_RSPCIPHY) == 0) {
15438 blocked = true;
15439 delay(10*1000);
15440 continue;
15441 }
15442 blocked = false;
15443 } while (blocked && (i++ < 30));
15444 return blocked;
15445 break;
15446 case WM_T_82571:
15447 case WM_T_82572:
15448 case WM_T_82573:
15449 case WM_T_82574:
15450 case WM_T_82583:
15451 case WM_T_80003:
15452 reg = CSR_READ(sc, WMREG_MANC);
15453 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15454 return true;
15455 else
15456 return false;
15457 break;
15458 default:
15459 /* No problem */
15460 break;
15461 }
15462
15463 return false;
15464 }
15465
15466 static void
15467 wm_get_hw_control(struct wm_softc *sc)
15468 {
15469 uint32_t reg;
15470
15471 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15472 device_xname(sc->sc_dev), __func__));
15473
15474 if (sc->sc_type == WM_T_82573) {
15475 reg = CSR_READ(sc, WMREG_SWSM);
15476 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15477 } else if (sc->sc_type >= WM_T_82571) {
15478 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15479 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15480 }
15481 }
15482
15483 static void
15484 wm_release_hw_control(struct wm_softc *sc)
15485 {
15486 uint32_t reg;
15487
15488 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15489 device_xname(sc->sc_dev), __func__));
15490
15491 if (sc->sc_type == WM_T_82573) {
15492 reg = CSR_READ(sc, WMREG_SWSM);
15493 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15494 } else if (sc->sc_type >= WM_T_82571) {
15495 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15496 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15497 }
15498 }
15499
15500 static void
15501 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15502 {
15503 uint32_t reg;
15504
15505 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15506 device_xname(sc->sc_dev), __func__));
15507
15508 if (sc->sc_type < WM_T_PCH2)
15509 return;
15510
15511 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15512
15513 if (gate)
15514 reg |= EXTCNFCTR_GATE_PHY_CFG;
15515 else
15516 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15517
15518 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15519 }
15520
15521 static int
15522 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15523 {
15524 uint32_t fwsm, reg;
15525 int rv;
15526
15527 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15528 device_xname(sc->sc_dev), __func__));
15529
15530 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
15531 wm_gate_hw_phy_config_ich8lan(sc, true);
15532
15533 /* Disable ULP */
15534 wm_ulp_disable(sc);
15535
15536 /* Acquire PHY semaphore */
15537 rv = sc->phy.acquire(sc);
15538 if (rv != 0) {
15539 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
15540 device_xname(sc->sc_dev), __func__));
15541 return rv;
15542 }
15543
15544 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
15545 * inaccessible and resetting the PHY is not blocked, toggle the
15546 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15547 */
15548 fwsm = CSR_READ(sc, WMREG_FWSM);
15549 switch (sc->sc_type) {
15550 case WM_T_PCH_LPT:
15551 case WM_T_PCH_SPT:
15552 case WM_T_PCH_CNP:
15553 if (wm_phy_is_accessible_pchlan(sc))
15554 break;
15555
15556 /* Before toggling LANPHYPC, see if PHY is accessible by
15557 * forcing MAC to SMBus mode first.
15558 */
15559 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15560 reg |= CTRL_EXT_FORCE_SMBUS;
15561 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15562 #if 0
15563 /* XXX Isn't this required??? */
15564 CSR_WRITE_FLUSH(sc);
15565 #endif
15566 /* Wait 50 milliseconds for MAC to finish any retries
15567 * that it might be trying to perform from previous
15568 * attempts to acknowledge any phy read requests.
15569 */
15570 delay(50 * 1000);
15571 /* FALLTHROUGH */
15572 case WM_T_PCH2:
15573 if (wm_phy_is_accessible_pchlan(sc) == true)
15574 break;
15575 /* FALLTHROUGH */
15576 case WM_T_PCH:
15577 if (sc->sc_type == WM_T_PCH)
15578 if ((fwsm & FWSM_FW_VALID) != 0)
15579 break;
15580
15581 if (wm_phy_resetisblocked(sc) == true) {
15582 device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
15583 break;
15584 }
15585
15586 /* Toggle LANPHYPC Value bit */
15587 wm_toggle_lanphypc_pch_lpt(sc);
15588
15589 if (sc->sc_type >= WM_T_PCH_LPT) {
15590 if (wm_phy_is_accessible_pchlan(sc) == true)
15591 break;
15592
15593 /* Toggling LANPHYPC brings the PHY out of SMBus mode
15594 * so ensure that the MAC is also out of SMBus mode
15595 */
15596 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15597 reg &= ~CTRL_EXT_FORCE_SMBUS;
15598 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15599
15600 if (wm_phy_is_accessible_pchlan(sc) == true)
15601 break;
15602 rv = -1;
15603 }
15604 break;
15605 default:
15606 break;
15607 }
15608
15609 /* Release semaphore */
15610 sc->phy.release(sc);
15611
15612 if (rv == 0) {
15613 /* Check to see if able to reset PHY. Print error if not */
15614 if (wm_phy_resetisblocked(sc)) {
15615 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15616 goto out;
15617 }
15618
15619 /* Reset the PHY before any access to it. Doing so, ensures
15620 * that the PHY is in a known good state before we read/write
15621 * PHY registers. The generic reset is sufficient here,
15622 * because we haven't determined the PHY type yet.
15623 */
15624 if (wm_reset_phy(sc) != 0)
15625 goto out;
15626
15627 /* On a successful reset, possibly need to wait for the PHY
15628 * to quiesce to an accessible state before returning control
15629 * to the calling function. If the PHY does not quiesce, then
15630 * return E1000E_BLK_PHY_RESET, as this is the condition that
15631 * the PHY is in.
15632 */
15633 if (wm_phy_resetisblocked(sc))
15634 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15635 }
15636
15637 out:
15638 /* Ungate automatic PHY configuration on non-managed 82579 */
15639 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15640 delay(10*1000);
15641 wm_gate_hw_phy_config_ich8lan(sc, false);
15642 }
15643
15644 return 0;
15645 }
15646
15647 static void
15648 wm_init_manageability(struct wm_softc *sc)
15649 {
15650
15651 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15652 device_xname(sc->sc_dev), __func__));
15653 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15654 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15655 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15656
15657 /* Disable hardware interception of ARP */
15658 manc &= ~MANC_ARP_EN;
15659
15660 /* Enable receiving management packets to the host */
15661 if (sc->sc_type >= WM_T_82571) {
15662 manc |= MANC_EN_MNG2HOST;
15663 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15664 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15665 }
15666
15667 CSR_WRITE(sc, WMREG_MANC, manc);
15668 }
15669 }
15670
15671 static void
15672 wm_release_manageability(struct wm_softc *sc)
15673 {
15674
15675 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15676 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15677
15678 manc |= MANC_ARP_EN;
15679 if (sc->sc_type >= WM_T_82571)
15680 manc &= ~MANC_EN_MNG2HOST;
15681
15682 CSR_WRITE(sc, WMREG_MANC, manc);
15683 }
15684 }
15685
15686 static void
15687 wm_get_wakeup(struct wm_softc *sc)
15688 {
15689
15690 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15691 switch (sc->sc_type) {
15692 case WM_T_82573:
15693 case WM_T_82583:
15694 sc->sc_flags |= WM_F_HAS_AMT;
15695 /* FALLTHROUGH */
15696 case WM_T_80003:
15697 case WM_T_82575:
15698 case WM_T_82576:
15699 case WM_T_82580:
15700 case WM_T_I350:
15701 case WM_T_I354:
15702 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15703 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15704 /* FALLTHROUGH */
15705 case WM_T_82541:
15706 case WM_T_82541_2:
15707 case WM_T_82547:
15708 case WM_T_82547_2:
15709 case WM_T_82571:
15710 case WM_T_82572:
15711 case WM_T_82574:
15712 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15713 break;
15714 case WM_T_ICH8:
15715 case WM_T_ICH9:
15716 case WM_T_ICH10:
15717 case WM_T_PCH:
15718 case WM_T_PCH2:
15719 case WM_T_PCH_LPT:
15720 case WM_T_PCH_SPT:
15721 case WM_T_PCH_CNP:
15722 sc->sc_flags |= WM_F_HAS_AMT;
15723 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15724 break;
15725 default:
15726 break;
15727 }
15728
15729 /* 1: HAS_MANAGE */
15730 if (wm_enable_mng_pass_thru(sc) != 0)
15731 sc->sc_flags |= WM_F_HAS_MANAGE;
15732
15733 /*
15734 * Note that the WOL flags is set after the resetting of the eeprom
15735 * stuff
15736 */
15737 }
15738
15739 /*
15740 * Unconfigure Ultra Low Power mode.
15741 * Only for I217 and newer (see below).
15742 */
15743 static int
15744 wm_ulp_disable(struct wm_softc *sc)
15745 {
15746 uint32_t reg;
15747 uint16_t phyreg;
15748 int i = 0, rv;
15749
15750 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15751 device_xname(sc->sc_dev), __func__));
15752 /* Exclude old devices */
15753 if ((sc->sc_type < WM_T_PCH_LPT)
15754 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15755 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15756 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15757 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15758 return 0;
15759
15760 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15761 /* Request ME un-configure ULP mode in the PHY */
15762 reg = CSR_READ(sc, WMREG_H2ME);
15763 reg &= ~H2ME_ULP;
15764 reg |= H2ME_ENFORCE_SETTINGS;
15765 CSR_WRITE(sc, WMREG_H2ME, reg);
15766
15767 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15768 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15769 if (i++ == 30) {
15770 device_printf(sc->sc_dev, "%s timed out\n",
15771 __func__);
15772 return -1;
15773 }
15774 delay(10 * 1000);
15775 }
15776 reg = CSR_READ(sc, WMREG_H2ME);
15777 reg &= ~H2ME_ENFORCE_SETTINGS;
15778 CSR_WRITE(sc, WMREG_H2ME, reg);
15779
15780 return 0;
15781 }
15782
15783 /* Acquire semaphore */
15784 rv = sc->phy.acquire(sc);
15785 if (rv != 0) {
15786 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
15787 device_xname(sc->sc_dev), __func__));
15788 return rv;
15789 }
15790
15791 /* Toggle LANPHYPC */
15792 wm_toggle_lanphypc_pch_lpt(sc);
15793
15794 /* Unforce SMBus mode in PHY */
15795 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15796 if (rv != 0) {
15797 uint32_t reg2;
15798
15799 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15800 __func__);
15801 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15802 reg2 |= CTRL_EXT_FORCE_SMBUS;
15803 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15804 delay(50 * 1000);
15805
15806 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15807 &phyreg);
15808 if (rv != 0)
15809 goto release;
15810 }
15811 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15812 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15813
15814 /* Unforce SMBus mode in MAC */
15815 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15816 reg &= ~CTRL_EXT_FORCE_SMBUS;
15817 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15818
15819 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15820 if (rv != 0)
15821 goto release;
15822 phyreg |= HV_PM_CTRL_K1_ENA;
15823 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15824
15825 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15826 &phyreg);
15827 if (rv != 0)
15828 goto release;
15829 phyreg &= ~(I218_ULP_CONFIG1_IND
15830 | I218_ULP_CONFIG1_STICKY_ULP
15831 | I218_ULP_CONFIG1_RESET_TO_SMBUS
15832 | I218_ULP_CONFIG1_WOL_HOST
15833 | I218_ULP_CONFIG1_INBAND_EXIT
15834 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15835 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15836 | I218_ULP_CONFIG1_DIS_SMB_PERST);
15837 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15838 phyreg |= I218_ULP_CONFIG1_START;
15839 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15840
15841 reg = CSR_READ(sc, WMREG_FEXTNVM7);
15842 reg &= ~FEXTNVM7_DIS_SMB_PERST;
15843 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15844
15845 release:
15846 /* Release semaphore */
15847 sc->phy.release(sc);
15848 wm_gmii_reset(sc);
15849 delay(50 * 1000);
15850
15851 return rv;
15852 }
15853
15854 /* WOL in the newer chipset interfaces (pchlan) */
15855 static int
15856 wm_enable_phy_wakeup(struct wm_softc *sc)
15857 {
15858 device_t dev = sc->sc_dev;
15859 uint32_t mreg, moff;
15860 uint16_t wuce, wuc, wufc, preg;
15861 int i, rv;
15862
15863 KASSERT(sc->sc_type >= WM_T_PCH);
15864
15865 /* Copy MAC RARs to PHY RARs */
15866 wm_copy_rx_addrs_to_phy_ich8lan(sc);
15867
15868 /* Activate PHY wakeup */
15869 rv = sc->phy.acquire(sc);
15870 if (rv != 0) {
15871 device_printf(dev, "%s: failed to acquire semaphore\n",
15872 __func__);
15873 return rv;
15874 }
15875
15876 /*
15877 * Enable access to PHY wakeup registers.
15878 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15879 */
15880 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15881 if (rv != 0) {
15882 device_printf(dev,
15883 "%s: Could not enable PHY wakeup reg access\n", __func__);
15884 goto release;
15885 }
15886
15887 /* Copy MAC MTA to PHY MTA */
15888 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15889 uint16_t lo, hi;
15890
15891 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15892 lo = (uint16_t)(mreg & 0xffff);
15893 hi = (uint16_t)((mreg >> 16) & 0xffff);
15894 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15895 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15896 }
15897
15898 /* Configure PHY Rx Control register */
15899 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15900 mreg = CSR_READ(sc, WMREG_RCTL);
15901 if (mreg & RCTL_UPE)
15902 preg |= BM_RCTL_UPE;
15903 if (mreg & RCTL_MPE)
15904 preg |= BM_RCTL_MPE;
15905 preg &= ~(BM_RCTL_MO_MASK);
15906 moff = __SHIFTOUT(mreg, RCTL_MO);
15907 if (moff != 0)
15908 preg |= moff << BM_RCTL_MO_SHIFT;
15909 if (mreg & RCTL_BAM)
15910 preg |= BM_RCTL_BAM;
15911 if (mreg & RCTL_PMCF)
15912 preg |= BM_RCTL_PMCF;
15913 mreg = CSR_READ(sc, WMREG_CTRL);
15914 if (mreg & CTRL_RFCE)
15915 preg |= BM_RCTL_RFCE;
15916 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15917
15918 wuc = WUC_APME | WUC_PME_EN;
15919 wufc = WUFC_MAG;
15920 /* Enable PHY wakeup in MAC register */
15921 CSR_WRITE(sc, WMREG_WUC,
15922 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15923 CSR_WRITE(sc, WMREG_WUFC, wufc);
15924
15925 /* Configure and enable PHY wakeup in PHY registers */
15926 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15927 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15928
15929 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15930 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15931
15932 release:
15933 sc->phy.release(sc);
15934
15935 return 0;
15936 }
15937
15938 /* Power down workaround on D3 */
15939 static void
15940 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15941 {
15942 uint32_t reg;
15943 uint16_t phyreg;
15944 int i;
15945
15946 for (i = 0; i < 2; i++) {
15947 /* Disable link */
15948 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15949 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15950 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15951
15952 /*
15953 * Call gig speed drop workaround on Gig disable before
15954 * accessing any PHY registers
15955 */
15956 if (sc->sc_type == WM_T_ICH8)
15957 wm_gig_downshift_workaround_ich8lan(sc);
15958
15959 /* Write VR power-down enable */
15960 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15961 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15962 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15963 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15964
15965 /* Read it back and test */
15966 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15967 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15968 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
15969 break;
15970
15971 /* Issue PHY reset and repeat at most one more time */
15972 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
15973 }
15974 }
15975
15976 /*
15977 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
15978 * @sc: pointer to the HW structure
15979 *
15980 * During S0 to Sx transition, it is possible the link remains at gig
15981 * instead of negotiating to a lower speed. Before going to Sx, set
15982 * 'Gig Disable' to force link speed negotiation to a lower speed based on
15983 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
15984 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
15985 * needs to be written.
15986 * Parts that support (and are linked to a partner which support) EEE in
15987 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
15988 * than 10Mbps w/o EEE.
15989 */
15990 static void
15991 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
15992 {
15993 device_t dev = sc->sc_dev;
15994 struct ethercom *ec = &sc->sc_ethercom;
15995 uint32_t phy_ctrl;
15996 int rv;
15997
15998 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
15999 phy_ctrl |= PHY_CTRL_GBE_DIS;
16000
16001 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
16002
16003 if (sc->sc_phytype == WMPHY_I217) {
16004 uint16_t devid = sc->sc_pcidevid;
16005
16006 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16007 (devid == PCI_PRODUCT_INTEL_I218_V) ||
16008 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16009 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16010 (sc->sc_type >= WM_T_PCH_SPT))
16011 CSR_WRITE(sc, WMREG_FEXTNVM6,
16012 CSR_READ(sc, WMREG_FEXTNVM6)
16013 & ~FEXTNVM6_REQ_PLL_CLK);
16014
16015 if (sc->phy.acquire(sc) != 0)
16016 goto out;
16017
16018 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16019 uint16_t eee_advert;
16020
16021 rv = wm_read_emi_reg_locked(dev,
16022 I217_EEE_ADVERTISEMENT, &eee_advert);
16023 if (rv)
16024 goto release;
16025
16026 /*
16027 * Disable LPLU if both link partners support 100BaseT
16028 * EEE and 100Full is advertised on both ends of the
16029 * link, and enable Auto Enable LPI since there will
16030 * be no driver to enable LPI while in Sx.
16031 */
16032 if ((eee_advert & AN_EEEADVERT_100_TX) &&
16033 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16034 uint16_t anar, phy_reg;
16035
16036 sc->phy.readreg_locked(dev, 2, MII_ANAR,
16037 &anar);
16038 if (anar & ANAR_TX_FD) {
16039 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16040 PHY_CTRL_NOND0A_LPLU);
16041
16042 /* Set Auto Enable LPI after link up */
16043 sc->phy.readreg_locked(dev, 2,
16044 I217_LPI_GPIO_CTRL, &phy_reg);
16045 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16046 sc->phy.writereg_locked(dev, 2,
16047 I217_LPI_GPIO_CTRL, phy_reg);
16048 }
16049 }
16050 }
16051
16052 /*
16053 * For i217 Intel Rapid Start Technology support,
16054 * when the system is going into Sx and no manageability engine
16055 * is present, the driver must configure proxy to reset only on
16056 * power good. LPI (Low Power Idle) state must also reset only
16057 * on power good, as well as the MTA (Multicast table array).
16058 * The SMBus release must also be disabled on LCD reset.
16059 */
16060
16061 /*
16062 * Enable MTA to reset for Intel Rapid Start Technology
16063 * Support
16064 */
16065
16066 release:
16067 sc->phy.release(sc);
16068 }
16069 out:
16070 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16071
16072 if (sc->sc_type == WM_T_ICH8)
16073 wm_gig_downshift_workaround_ich8lan(sc);
16074
16075 if (sc->sc_type >= WM_T_PCH) {
16076 wm_oem_bits_config_ich8lan(sc, false);
16077
16078 /* Reset PHY to activate OEM bits on 82577/8 */
16079 if (sc->sc_type == WM_T_PCH)
16080 wm_reset_phy(sc);
16081
16082 if (sc->phy.acquire(sc) != 0)
16083 return;
16084 wm_write_smbus_addr(sc);
16085 sc->phy.release(sc);
16086 }
16087 }
16088
16089 /*
16090 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16091 * @sc: pointer to the HW structure
16092 *
16093 * During Sx to S0 transitions on non-managed devices or managed devices
16094 * on which PHY resets are not blocked, if the PHY registers cannot be
16095 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
16096 * the PHY.
16097 * On i217, setup Intel Rapid Start Technology.
16098 */
16099 static int
16100 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16101 {
16102 device_t dev = sc->sc_dev;
16103 int rv;
16104
16105 if (sc->sc_type < WM_T_PCH2)
16106 return 0;
16107
16108 rv = wm_init_phy_workarounds_pchlan(sc);
16109 if (rv != 0)
16110 return rv;
16111
16112 /* For i217 Intel Rapid Start Technology support when the system
16113 * is transitioning from Sx and no manageability engine is present
16114 * configure SMBus to restore on reset, disable proxy, and enable
16115 * the reset on MTA (Multicast table array).
16116 */
16117 if (sc->sc_phytype == WMPHY_I217) {
16118 uint16_t phy_reg;
16119
16120 rv = sc->phy.acquire(sc);
16121 if (rv != 0)
16122 return rv;
16123
16124 /* Clear Auto Enable LPI after link up */
16125 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16126 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16127 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16128
16129 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16130 /* Restore clear on SMB if no manageability engine
16131 * is present
16132 */
16133 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16134 &phy_reg);
16135 if (rv != 0)
16136 goto release;
16137 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16138 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16139
16140 /* Disable Proxy */
16141 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16142 }
16143 /* Enable reset on MTA */
16144 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16145 if (rv != 0)
16146 goto release;
16147 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16148 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16149
16150 release:
16151 sc->phy.release(sc);
16152 return rv;
16153 }
16154
16155 return 0;
16156 }
16157
16158 static void
16159 wm_enable_wakeup(struct wm_softc *sc)
16160 {
16161 uint32_t reg, pmreg;
16162 pcireg_t pmode;
16163 int rv = 0;
16164
16165 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16166 device_xname(sc->sc_dev), __func__));
16167
16168 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16169 &pmreg, NULL) == 0)
16170 return;
16171
16172 if ((sc->sc_flags & WM_F_WOL) == 0)
16173 goto pme;
16174
16175 /* Advertise the wakeup capability */
16176 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16177 | CTRL_SWDPIN(3));
16178
16179 /* Keep the laser running on fiber adapters */
16180 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16181 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16182 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16183 reg |= CTRL_EXT_SWDPIN(3);
16184 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16185 }
16186
16187 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16188 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16189 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16190 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
16191 wm_suspend_workarounds_ich8lan(sc);
16192
16193 #if 0 /* For the multicast packet */
16194 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16195 reg |= WUFC_MC;
16196 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16197 #endif
16198
16199 if (sc->sc_type >= WM_T_PCH) {
16200 rv = wm_enable_phy_wakeup(sc);
16201 if (rv != 0)
16202 goto pme;
16203 } else {
16204 /* Enable wakeup by the MAC */
16205 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16206 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16207 }
16208
16209 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16210 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16211 || (sc->sc_type == WM_T_PCH2))
16212 && (sc->sc_phytype == WMPHY_IGP_3))
16213 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16214
16215 pme:
16216 /* Request PME */
16217 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16218 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16219 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16220 /* For WOL */
16221 pmode |= PCI_PMCSR_PME_EN;
16222 } else {
16223 /* Disable WOL */
16224 pmode &= ~PCI_PMCSR_PME_EN;
16225 }
16226 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16227 }
16228
16229 /* Disable ASPM L0s and/or L1 for workaround */
16230 static void
16231 wm_disable_aspm(struct wm_softc *sc)
16232 {
16233 pcireg_t reg, mask = 0;
16234 unsigned const char *str = "";
16235
16236 /*
16237 * Only for PCIe device which has PCIe capability in the PCI config
16238 * space.
16239 */
16240 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16241 return;
16242
16243 switch (sc->sc_type) {
16244 case WM_T_82571:
16245 case WM_T_82572:
16246 /*
16247 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16248 * State Power management L1 State (ASPM L1).
16249 */
16250 mask = PCIE_LCSR_ASPM_L1;
16251 str = "L1 is";
16252 break;
16253 case WM_T_82573:
16254 case WM_T_82574:
16255 case WM_T_82583:
16256 /*
16257 * The 82573 disappears when PCIe ASPM L0s is enabled.
16258 *
16259 * The 82574 and 82583 does not support PCIe ASPM L0s with
16260 * some chipset. The document of 82574 and 82583 says that
16261 * disabling L0s with some specific chipset is sufficient,
16262 * but we follow as of the Intel em driver does.
16263 *
16264 * References:
16265 * Errata 8 of the Specification Update of i82573.
16266 * Errata 20 of the Specification Update of i82574.
16267 * Errata 9 of the Specification Update of i82583.
16268 */
16269 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16270 str = "L0s and L1 are";
16271 break;
16272 default:
16273 return;
16274 }
16275
16276 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16277 sc->sc_pcixe_capoff + PCIE_LCSR);
16278 reg &= ~mask;
16279 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16280 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16281
16282 /* Print only in wm_attach() */
16283 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16284 aprint_verbose_dev(sc->sc_dev,
16285 "ASPM %s disabled to workaround the errata.\n", str);
16286 }
16287
16288 /* LPLU */
16289
16290 static void
16291 wm_lplu_d0_disable(struct wm_softc *sc)
16292 {
16293 struct mii_data *mii = &sc->sc_mii;
16294 uint32_t reg;
16295 uint16_t phyval;
16296
16297 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16298 device_xname(sc->sc_dev), __func__));
16299
16300 if (sc->sc_phytype == WMPHY_IFE)
16301 return;
16302
16303 switch (sc->sc_type) {
16304 case WM_T_82571:
16305 case WM_T_82572:
16306 case WM_T_82573:
16307 case WM_T_82575:
16308 case WM_T_82576:
16309 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16310 phyval &= ~PMR_D0_LPLU;
16311 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16312 break;
16313 case WM_T_82580:
16314 case WM_T_I350:
16315 case WM_T_I210:
16316 case WM_T_I211:
16317 reg = CSR_READ(sc, WMREG_PHPM);
16318 reg &= ~PHPM_D0A_LPLU;
16319 CSR_WRITE(sc, WMREG_PHPM, reg);
16320 break;
16321 case WM_T_82574:
16322 case WM_T_82583:
16323 case WM_T_ICH8:
16324 case WM_T_ICH9:
16325 case WM_T_ICH10:
16326 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16327 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16328 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16329 CSR_WRITE_FLUSH(sc);
16330 break;
16331 case WM_T_PCH:
16332 case WM_T_PCH2:
16333 case WM_T_PCH_LPT:
16334 case WM_T_PCH_SPT:
16335 case WM_T_PCH_CNP:
16336 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16337 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16338 if (wm_phy_resetisblocked(sc) == false)
16339 phyval |= HV_OEM_BITS_ANEGNOW;
16340 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16341 break;
16342 default:
16343 break;
16344 }
16345 }
16346
16347 /* EEE */
16348
16349 static int
16350 wm_set_eee_i350(struct wm_softc *sc)
16351 {
16352 struct ethercom *ec = &sc->sc_ethercom;
16353 uint32_t ipcnfg, eeer;
16354 uint32_t ipcnfg_mask
16355 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
16356 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
16357
16358 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
16359
16360 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
16361 eeer = CSR_READ(sc, WMREG_EEER);
16362
16363 /* Enable or disable per user setting */
16364 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16365 ipcnfg |= ipcnfg_mask;
16366 eeer |= eeer_mask;
16367 } else {
16368 ipcnfg &= ~ipcnfg_mask;
16369 eeer &= ~eeer_mask;
16370 }
16371
16372 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
16373 CSR_WRITE(sc, WMREG_EEER, eeer);
16374 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
16375 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
16376
16377 return 0;
16378 }
16379
16380 static int
16381 wm_set_eee_pchlan(struct wm_softc *sc)
16382 {
16383 device_t dev = sc->sc_dev;
16384 struct ethercom *ec = &sc->sc_ethercom;
16385 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
16386 int rv;
16387
16388 switch (sc->sc_phytype) {
16389 case WMPHY_82579:
16390 lpa = I82579_EEE_LP_ABILITY;
16391 pcs_status = I82579_EEE_PCS_STATUS;
16392 adv_addr = I82579_EEE_ADVERTISEMENT;
16393 break;
16394 case WMPHY_I217:
16395 lpa = I217_EEE_LP_ABILITY;
16396 pcs_status = I217_EEE_PCS_STATUS;
16397 adv_addr = I217_EEE_ADVERTISEMENT;
16398 break;
16399 default:
16400 return 0;
16401 }
16402
16403 rv = sc->phy.acquire(sc);
16404 if (rv != 0) {
16405 device_printf(dev, "%s: failed to get semaphore\n", __func__);
16406 return rv;
16407 }
16408
16409 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16410 if (rv != 0)
16411 goto release;
16412
16413 /* Clear bits that enable EEE in various speeds */
16414 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
16415
16416 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16417 /* Save off link partner's EEE ability */
16418 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
16419 if (rv != 0)
16420 goto release;
16421
16422 /* Read EEE advertisement */
16423 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
16424 goto release;
16425
16426 /*
16427 * Enable EEE only for speeds in which the link partner is
16428 * EEE capable and for which we advertise EEE.
16429 */
16430 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16431 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16432 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16433 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16434 if ((data & ANLPAR_TX_FD) != 0)
16435 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16436 else {
16437 /*
16438 * EEE is not supported in 100Half, so ignore
16439 * partner's EEE in 100 ability if full-duplex
16440 * is not advertised.
16441 */
16442 sc->eee_lp_ability
16443 &= ~AN_EEEADVERT_100_TX;
16444 }
16445 }
16446 }
16447
16448 if (sc->sc_phytype == WMPHY_82579) {
16449 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16450 if (rv != 0)
16451 goto release;
16452
16453 data &= ~I82579_LPI_PLL_SHUT_100;
16454 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16455 }
16456
16457 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16458 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16459 goto release;
16460
16461 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16462 release:
16463 sc->phy.release(sc);
16464
16465 return rv;
16466 }
16467
16468 static int
16469 wm_set_eee(struct wm_softc *sc)
16470 {
16471 struct ethercom *ec = &sc->sc_ethercom;
16472
16473 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16474 return 0;
16475
16476 if (sc->sc_type == WM_T_I354) {
16477 /* I354 uses an external PHY */
16478 return 0; /* not yet */
16479 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16480 return wm_set_eee_i350(sc);
16481 else if (sc->sc_type >= WM_T_PCH2)
16482 return wm_set_eee_pchlan(sc);
16483
16484 return 0;
16485 }
16486
16487 /*
16488 * Workarounds (mainly PHY related).
16489 * Basically, PHY's workarounds are in the PHY drivers.
16490 */
16491
16492 /* Workaround for 82566 Kumeran PCS lock loss */
16493 static int
16494 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16495 {
16496 struct mii_data *mii = &sc->sc_mii;
16497 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16498 int i, reg, rv;
16499 uint16_t phyreg;
16500
16501 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16502 device_xname(sc->sc_dev), __func__));
16503
16504 /* If the link is not up, do nothing */
16505 if ((status & STATUS_LU) == 0)
16506 return 0;
16507
16508 /* Nothing to do if the link is other than 1Gbps */
16509 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16510 return 0;
16511
16512 for (i = 0; i < 10; i++) {
16513 /* read twice */
16514 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16515 if (rv != 0)
16516 return rv;
16517 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16518 if (rv != 0)
16519 return rv;
16520
16521 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16522 goto out; /* GOOD! */
16523
16524 /* Reset the PHY */
16525 wm_reset_phy(sc);
16526 delay(5*1000);
16527 }
16528
16529 /* Disable GigE link negotiation */
16530 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16531 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16532 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16533
16534 /*
16535 * Call gig speed drop workaround on Gig disable before accessing
16536 * any PHY registers.
16537 */
16538 wm_gig_downshift_workaround_ich8lan(sc);
16539
16540 out:
16541 return 0;
16542 }
16543
16544 /*
16545 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16546 * @sc: pointer to the HW structure
16547 *
16548 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16549 * LPLU, Gig disable, MDIC PHY reset):
16550 * 1) Set Kumeran Near-end loopback
16551 * 2) Clear Kumeran Near-end loopback
16552 * Should only be called for ICH8[m] devices with any 1G Phy.
16553 */
16554 static void
16555 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16556 {
16557 uint16_t kmreg;
16558
16559 /* Only for igp3 */
16560 if (sc->sc_phytype == WMPHY_IGP_3) {
16561 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16562 return;
16563 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16564 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16565 return;
16566 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16567 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16568 }
16569 }
16570
16571 /*
16572 * Workaround for pch's PHYs
16573 * XXX should be moved to new PHY driver?
16574 */
16575 static int
16576 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16577 {
16578 device_t dev = sc->sc_dev;
16579 struct mii_data *mii = &sc->sc_mii;
16580 struct mii_softc *child;
16581 uint16_t phy_data, phyrev = 0;
16582 int phytype = sc->sc_phytype;
16583 int rv;
16584
16585 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16586 device_xname(dev), __func__));
16587 KASSERT(sc->sc_type == WM_T_PCH);
16588
16589 /* Set MDIO slow mode before any other MDIO access */
16590 if (phytype == WMPHY_82577)
16591 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16592 return rv;
16593
16594 child = LIST_FIRST(&mii->mii_phys);
16595 if (child != NULL)
16596 phyrev = child->mii_mpd_rev;
16597
16598 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16599 if ((child != NULL) &&
16600 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16601 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
16602 /* Disable generation of early preamble (0x4431) */
16603 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16604 &phy_data);
16605 if (rv != 0)
16606 return rv;
16607 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16608 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16609 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16610 phy_data);
16611 if (rv != 0)
16612 return rv;
16613
16614 /* Preamble tuning for SSC */
16615 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16616 if (rv != 0)
16617 return rv;
16618 }
16619
16620 /* 82578 */
16621 if (phytype == WMPHY_82578) {
16622 /*
16623 * Return registers to default by doing a soft reset then
16624 * writing 0x3140 to the control register
16625 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16626 */
16627 if ((child != NULL) && (phyrev < 2)) {
16628 PHY_RESET(child);
16629 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16630 if (rv != 0)
16631 return rv;
16632 }
16633 }
16634
16635 /* Select page 0 */
16636 if ((rv = sc->phy.acquire(sc)) != 0)
16637 return rv;
16638 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16639 sc->phy.release(sc);
16640 if (rv != 0)
16641 return rv;
16642
16643 /*
16644 * Configure the K1 Si workaround during phy reset assuming there is
16645 * link so that it disables K1 if link is in 1Gbps.
16646 */
16647 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16648 return rv;
16649
16650 /* Workaround for link disconnects on a busy hub in half duplex */
16651 rv = sc->phy.acquire(sc);
16652 if (rv)
16653 return rv;
16654 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16655 if (rv)
16656 goto release;
16657 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16658 phy_data & 0x00ff);
16659 if (rv)
16660 goto release;
16661
16662 /* Set MSE higher to enable link to stay up when noise is high */
16663 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16664 release:
16665 sc->phy.release(sc);
16666
16667 return rv;
16668 }
16669
16670 /*
16671 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16672 * @sc: pointer to the HW structure
16673 */
16674 static void
16675 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16676 {
16677
16678 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16679 device_xname(sc->sc_dev), __func__));
16680
16681 if (sc->phy.acquire(sc) != 0)
16682 return;
16683
16684 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16685
16686 sc->phy.release(sc);
16687 }
16688
16689 static void
16690 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16691 {
16692 device_t dev = sc->sc_dev;
16693 uint32_t mac_reg;
16694 uint16_t i, wuce;
16695 int count;
16696
16697 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16698 device_xname(dev), __func__));
16699
16700 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16701 return;
16702
16703 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16704 count = wm_rar_count(sc);
16705 for (i = 0; i < count; i++) {
16706 uint16_t lo, hi;
16707 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16708 lo = (uint16_t)(mac_reg & 0xffff);
16709 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16710 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16711 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16712
16713 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16714 lo = (uint16_t)(mac_reg & 0xffff);
16715 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16716 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16717 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16718 }
16719
16720 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16721 }
16722
16723 /*
16724 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16725 * with 82579 PHY
16726 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
16727 */
16728 static int
16729 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16730 {
16731 device_t dev = sc->sc_dev;
16732 int rar_count;
16733 int rv;
16734 uint32_t mac_reg;
16735 uint16_t dft_ctrl, data;
16736 uint16_t i;
16737
16738 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16739 device_xname(dev), __func__));
16740
16741 if (sc->sc_type < WM_T_PCH2)
16742 return 0;
16743
16744 /* Acquire PHY semaphore */
16745 rv = sc->phy.acquire(sc);
16746 if (rv != 0)
16747 return rv;
16748
16749 /* Disable Rx path while enabling/disabling workaround */
16750 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16751 if (rv != 0)
16752 goto out;
16753 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16754 dft_ctrl | (1 << 14));
16755 if (rv != 0)
16756 goto out;
16757
16758 if (enable) {
16759 /* Write Rx addresses (rar_entry_count for RAL/H, and
16760 * SHRAL/H) and initial CRC values to the MAC
16761 */
16762 rar_count = wm_rar_count(sc);
16763 for (i = 0; i < rar_count; i++) {
16764 uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16765 uint32_t addr_high, addr_low;
16766
16767 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16768 if (!(addr_high & RAL_AV))
16769 continue;
16770 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16771 mac_addr[0] = (addr_low & 0xFF);
16772 mac_addr[1] = ((addr_low >> 8) & 0xFF);
16773 mac_addr[2] = ((addr_low >> 16) & 0xFF);
16774 mac_addr[3] = ((addr_low >> 24) & 0xFF);
16775 mac_addr[4] = (addr_high & 0xFF);
16776 mac_addr[5] = ((addr_high >> 8) & 0xFF);
16777
16778 CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16779 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16780 }
16781
16782 /* Write Rx addresses to the PHY */
16783 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16784 }
16785
16786 /*
16787 * If enable ==
16788 * true: Enable jumbo frame workaround in the MAC.
16789 * false: Write MAC register values back to h/w defaults.
16790 */
16791 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16792 if (enable) {
16793 mac_reg &= ~(1 << 14);
16794 mac_reg |= (7 << 15);
16795 } else
16796 mac_reg &= ~(0xf << 14);
16797 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16798
16799 mac_reg = CSR_READ(sc, WMREG_RCTL);
16800 if (enable) {
16801 mac_reg |= RCTL_SECRC;
16802 sc->sc_rctl |= RCTL_SECRC;
16803 sc->sc_flags |= WM_F_CRC_STRIP;
16804 } else {
16805 mac_reg &= ~RCTL_SECRC;
16806 sc->sc_rctl &= ~RCTL_SECRC;
16807 sc->sc_flags &= ~WM_F_CRC_STRIP;
16808 }
16809 CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16810
16811 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16812 if (rv != 0)
16813 goto out;
16814 if (enable)
16815 data |= 1 << 0;
16816 else
16817 data &= ~(1 << 0);
16818 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16819 if (rv != 0)
16820 goto out;
16821
16822 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16823 if (rv != 0)
16824 goto out;
16825 /*
16826 * XXX FreeBSD and Linux do the same thing that they set the same value
16827 * on both the enable case and the disable case. Is it correct?
16828 */
16829 data &= ~(0xf << 8);
16830 data |= (0xb << 8);
16831 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16832 if (rv != 0)
16833 goto out;
16834
16835 /*
16836 * If enable ==
16837 * true: Enable jumbo frame workaround in the PHY.
16838 * false: Write PHY register values back to h/w defaults.
16839 */
16840 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16841 if (rv != 0)
16842 goto out;
16843 data &= ~(0x7F << 5);
16844 if (enable)
16845 data |= (0x37 << 5);
16846 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16847 if (rv != 0)
16848 goto out;
16849
16850 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16851 if (rv != 0)
16852 goto out;
16853 if (enable)
16854 data &= ~(1 << 13);
16855 else
16856 data |= (1 << 13);
16857 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16858 if (rv != 0)
16859 goto out;
16860
16861 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16862 if (rv != 0)
16863 goto out;
16864 data &= ~(0x3FF << 2);
16865 if (enable)
16866 data |= (I82579_TX_PTR_GAP << 2);
16867 else
16868 data |= (0x8 << 2);
16869 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16870 if (rv != 0)
16871 goto out;
16872
16873 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16874 enable ? 0xf100 : 0x7e00);
16875 if (rv != 0)
16876 goto out;
16877
16878 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16879 if (rv != 0)
16880 goto out;
16881 if (enable)
16882 data |= 1 << 10;
16883 else
16884 data &= ~(1 << 10);
16885 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16886 if (rv != 0)
16887 goto out;
16888
16889 /* Re-enable Rx path after enabling/disabling workaround */
16890 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16891 dft_ctrl & ~(1 << 14));
16892
16893 out:
16894 sc->phy.release(sc);
16895
16896 return rv;
16897 }
16898
16899 /*
16900 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16901 * done after every PHY reset.
16902 */
16903 static int
16904 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16905 {
16906 device_t dev = sc->sc_dev;
16907 int rv;
16908
16909 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16910 device_xname(dev), __func__));
16911 KASSERT(sc->sc_type == WM_T_PCH2);
16912
16913 /* Set MDIO slow mode before any other MDIO access */
16914 rv = wm_set_mdio_slow_mode_hv(sc);
16915 if (rv != 0)
16916 return rv;
16917
16918 rv = sc->phy.acquire(sc);
16919 if (rv != 0)
16920 return rv;
16921 /* Set MSE higher to enable link to stay up when noise is high */
16922 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16923 if (rv != 0)
16924 goto release;
16925 /* Drop link after 5 times MSE threshold was reached */
16926 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16927 release:
16928 sc->phy.release(sc);
16929
16930 return rv;
16931 }
16932
16933 /**
16934 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16935 * @link: link up bool flag
16936 *
16937 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16938 * preventing further DMA write requests. Workaround the issue by disabling
16939 * the de-assertion of the clock request when in 1Gpbs mode.
16940 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16941 * speeds in order to avoid Tx hangs.
16942 **/
16943 static int
16944 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16945 {
16946 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
16947 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16948 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
16949 uint16_t phyreg;
16950
16951 if (link && (speed == STATUS_SPEED_1000)) {
16952 sc->phy.acquire(sc);
16953 int rv = wm_kmrn_readreg_locked(sc,
16954 KUMCTRLSTA_OFFSET_K1_CONFIG, &phyreg);
16955 if (rv != 0)
16956 goto release;
16957 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16958 phyreg & ~KUMCTRLSTA_K1_ENABLE);
16959 if (rv != 0)
16960 goto release;
16961 delay(20);
16962 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
16963
16964 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16965 &phyreg);
16966 release:
16967 sc->phy.release(sc);
16968 return rv;
16969 }
16970
16971 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
16972
16973 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
16974 if (((child != NULL) && (child->mii_mpd_rev > 5))
16975 || !link
16976 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
16977 goto update_fextnvm6;
16978
16979 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
16980
16981 /* Clear link status transmit timeout */
16982 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
16983 if (speed == STATUS_SPEED_100) {
16984 /* Set inband Tx timeout to 5x10us for 100Half */
16985 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16986
16987 /* Do not extend the K1 entry latency for 100Half */
16988 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16989 } else {
16990 /* Set inband Tx timeout to 50x10us for 10Full/Half */
16991 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16992
16993 /* Extend the K1 entry latency for 10 Mbps */
16994 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16995 }
16996
16997 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
16998
16999 update_fextnvm6:
17000 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17001 return 0;
17002 }
17003
17004 /*
17005 * wm_k1_gig_workaround_hv - K1 Si workaround
17006 * @sc: pointer to the HW structure
17007 * @link: link up bool flag
17008 *
17009 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17010 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
17011 * If link is down, the function will restore the default K1 setting located
17012 * in the NVM.
17013 */
17014 static int
17015 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17016 {
17017 int k1_enable = sc->sc_nvm_k1_enabled;
17018 int rv;
17019
17020 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17021 device_xname(sc->sc_dev), __func__));
17022
17023 rv = sc->phy.acquire(sc);
17024 if (rv != 0)
17025 return rv;
17026
17027 if (link) {
17028 k1_enable = 0;
17029
17030 /* Link stall fix for link up */
17031 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17032 0x0100);
17033 } else {
17034 /* Link stall fix for link down */
17035 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17036 0x4100);
17037 }
17038
17039 wm_configure_k1_ich8lan(sc, k1_enable);
17040 sc->phy.release(sc);
17041
17042 return 0;
17043 }
17044
17045 /*
17046 * wm_k1_workaround_lv - K1 Si workaround
17047 * @sc: pointer to the HW structure
17048 *
17049 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17050 * Disable K1 for 1000 and 100 speeds
17051 */
17052 static int
17053 wm_k1_workaround_lv(struct wm_softc *sc)
17054 {
17055 uint32_t reg;
17056 uint16_t phyreg;
17057 int rv;
17058
17059 if (sc->sc_type != WM_T_PCH2)
17060 return 0;
17061
17062 /* Set K1 beacon duration based on 10Mbps speed */
17063 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17064 if (rv != 0)
17065 return rv;
17066
17067 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17068 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17069 if (phyreg &
17070 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17071 /* LV 1G/100 Packet drop issue wa */
17072 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17073 &phyreg);
17074 if (rv != 0)
17075 return rv;
17076 phyreg &= ~HV_PM_CTRL_K1_ENA;
17077 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17078 phyreg);
17079 if (rv != 0)
17080 return rv;
17081 } else {
17082 /* For 10Mbps */
17083 reg = CSR_READ(sc, WMREG_FEXTNVM4);
17084 reg &= ~FEXTNVM4_BEACON_DURATION;
17085 reg |= FEXTNVM4_BEACON_DURATION_16US;
17086 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17087 }
17088 }
17089
17090 return 0;
17091 }
17092
17093 /*
17094 * wm_link_stall_workaround_hv - Si workaround
17095 * @sc: pointer to the HW structure
17096 *
17097 * This function works around a Si bug where the link partner can get
17098 * a link up indication before the PHY does. If small packets are sent
17099 * by the link partner they can be placed in the packet buffer without
17100 * being properly accounted for by the PHY and will stall preventing
17101 * further packets from being received. The workaround is to clear the
17102 * packet buffer after the PHY detects link up.
17103 */
17104 static int
17105 wm_link_stall_workaround_hv(struct wm_softc *sc)
17106 {
17107 uint16_t phyreg;
17108
17109 if (sc->sc_phytype != WMPHY_82578)
17110 return 0;
17111
17112 /* Do not apply workaround if in PHY loopback bit 14 set */
17113 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17114 if ((phyreg & BMCR_LOOP) != 0)
17115 return 0;
17116
17117 /* Check if link is up and at 1Gbps */
17118 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17119 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17120 | BM_CS_STATUS_SPEED_MASK;
17121 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17122 | BM_CS_STATUS_SPEED_1000))
17123 return 0;
17124
17125 delay(200 * 1000); /* XXX too big */
17126
17127 /* Flush the packets in the fifo buffer */
17128 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17129 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17130 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17131 HV_MUX_DATA_CTRL_GEN_TO_MAC);
17132
17133 return 0;
17134 }
17135
17136 static int
17137 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17138 {
17139 int rv;
17140
17141 rv = sc->phy.acquire(sc);
17142 if (rv != 0) {
17143 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17144 __func__);
17145 return rv;
17146 }
17147
17148 rv = wm_set_mdio_slow_mode_hv_locked(sc);
17149
17150 sc->phy.release(sc);
17151
17152 return rv;
17153 }
17154
17155 static int
17156 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17157 {
17158 int rv;
17159 uint16_t reg;
17160
17161 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
17162 if (rv != 0)
17163 return rv;
17164
17165 return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17166 reg | HV_KMRN_MDIO_SLOW);
17167 }
17168
17169 /*
17170 * wm_configure_k1_ich8lan - Configure K1 power state
17171 * @sc: pointer to the HW structure
17172 * @enable: K1 state to configure
17173 *
17174 * Configure the K1 power state based on the provided parameter.
17175 * Assumes semaphore already acquired.
17176 */
17177 static void
17178 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17179 {
17180 uint32_t ctrl, ctrl_ext, tmp;
17181 uint16_t kmreg;
17182 int rv;
17183
17184 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17185
17186 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17187 if (rv != 0)
17188 return;
17189
17190 if (k1_enable)
17191 kmreg |= KUMCTRLSTA_K1_ENABLE;
17192 else
17193 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17194
17195 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17196 if (rv != 0)
17197 return;
17198
17199 delay(20);
17200
17201 ctrl = CSR_READ(sc, WMREG_CTRL);
17202 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17203
17204 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17205 tmp |= CTRL_FRCSPD;
17206
17207 CSR_WRITE(sc, WMREG_CTRL, tmp);
17208 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17209 CSR_WRITE_FLUSH(sc);
17210 delay(20);
17211
17212 CSR_WRITE(sc, WMREG_CTRL, ctrl);
17213 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17214 CSR_WRITE_FLUSH(sc);
17215 delay(20);
17216
17217 return;
17218 }
17219
17220 /* special case - for 82575 - need to do manual init ... */
17221 static void
17222 wm_reset_init_script_82575(struct wm_softc *sc)
17223 {
17224 /*
17225 * Remark: this is untested code - we have no board without EEPROM
17226 * same setup as mentioned int the FreeBSD driver for the i82575
17227 */
17228
17229 /* SerDes configuration via SERDESCTRL */
17230 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17231 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17232 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17233 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17234
17235 /* CCM configuration via CCMCTL register */
17236 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17237 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17238
17239 /* PCIe lanes configuration */
17240 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17241 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17242 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17243 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17244
17245 /* PCIe PLL Configuration */
17246 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17247 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17248 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17249 }
17250
17251 static void
17252 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17253 {
17254 uint32_t reg;
17255 uint16_t nvmword;
17256 int rv;
17257
17258 if (sc->sc_type != WM_T_82580)
17259 return;
17260 if ((sc->sc_flags & WM_F_SGMII) == 0)
17261 return;
17262
17263 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17264 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17265 if (rv != 0) {
17266 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17267 __func__);
17268 return;
17269 }
17270
17271 reg = CSR_READ(sc, WMREG_MDICNFG);
17272 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17273 reg |= MDICNFG_DEST;
17274 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17275 reg |= MDICNFG_COM_MDIO;
17276 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17277 }
17278
17279 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
17280
17281 static bool
17282 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17283 {
17284 uint32_t reg;
17285 uint16_t id1, id2;
17286 int i, rv;
17287
17288 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17289 device_xname(sc->sc_dev), __func__));
17290 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17291
17292 id1 = id2 = 0xffff;
17293 for (i = 0; i < 2; i++) {
17294 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17295 &id1);
17296 if ((rv != 0) || MII_INVALIDID(id1))
17297 continue;
17298 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17299 &id2);
17300 if ((rv != 0) || MII_INVALIDID(id2))
17301 continue;
17302 break;
17303 }
17304 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17305 goto out;
17306
17307 /*
17308 * In case the PHY needs to be in mdio slow mode,
17309 * set slow mode and try to get the PHY id again.
17310 */
17311 rv = 0;
17312 if (sc->sc_type < WM_T_PCH_LPT) {
17313 wm_set_mdio_slow_mode_hv_locked(sc);
17314 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17315 &id1);
17316 rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17317 &id2);
17318 }
17319 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17320 device_printf(sc->sc_dev, "XXX return with false\n");
17321 return false;
17322 }
17323 out:
17324 if (sc->sc_type >= WM_T_PCH_LPT) {
17325 /* Only unforce SMBus if ME is not active */
17326 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17327 uint16_t phyreg;
17328
17329 /* Unforce SMBus mode in PHY */
17330 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17331 CV_SMB_CTRL, &phyreg);
17332 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17333 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17334 CV_SMB_CTRL, phyreg);
17335
17336 /* Unforce SMBus mode in MAC */
17337 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17338 reg &= ~CTRL_EXT_FORCE_SMBUS;
17339 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17340 }
17341 }
17342 return true;
17343 }
17344
17345 static void
17346 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17347 {
17348 uint32_t reg;
17349 int i;
17350
17351 /* Set PHY Config Counter to 50msec */
17352 reg = CSR_READ(sc, WMREG_FEXTNVM3);
17353 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
17354 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
17355 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
17356
17357 /* Toggle LANPHYPC */
17358 reg = CSR_READ(sc, WMREG_CTRL);
17359 reg |= CTRL_LANPHYPC_OVERRIDE;
17360 reg &= ~CTRL_LANPHYPC_VALUE;
17361 CSR_WRITE(sc, WMREG_CTRL, reg);
17362 CSR_WRITE_FLUSH(sc);
17363 delay(1000);
17364 reg &= ~CTRL_LANPHYPC_OVERRIDE;
17365 CSR_WRITE(sc, WMREG_CTRL, reg);
17366 CSR_WRITE_FLUSH(sc);
17367
17368 if (sc->sc_type < WM_T_PCH_LPT)
17369 delay(50 * 1000);
17370 else {
17371 i = 20;
17372
17373 do {
17374 delay(5 * 1000);
17375 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
17376 && i--);
17377
17378 delay(30 * 1000);
17379 }
17380 }
17381
17382 static int
17383 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
17384 {
17385 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
17386 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
17387 uint32_t rxa;
17388 uint16_t scale = 0, lat_enc = 0;
17389 int32_t obff_hwm = 0;
17390 int64_t lat_ns, value;
17391
17392 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17393 device_xname(sc->sc_dev), __func__));
17394
17395 if (link) {
17396 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
17397 uint32_t status;
17398 uint16_t speed;
17399 pcireg_t preg;
17400
17401 status = CSR_READ(sc, WMREG_STATUS);
17402 switch (__SHIFTOUT(status, STATUS_SPEED)) {
17403 case STATUS_SPEED_10:
17404 speed = 10;
17405 break;
17406 case STATUS_SPEED_100:
17407 speed = 100;
17408 break;
17409 case STATUS_SPEED_1000:
17410 speed = 1000;
17411 break;
17412 default:
17413 device_printf(sc->sc_dev, "Unknown speed "
17414 "(status = %08x)\n", status);
17415 return -1;
17416 }
17417
17418 /* Rx Packet Buffer Allocation size (KB) */
17419 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
17420
17421 /*
17422 * Determine the maximum latency tolerated by the device.
17423 *
17424 * Per the PCIe spec, the tolerated latencies are encoded as
17425 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
17426 * a 10-bit value (0-1023) to provide a range from 1 ns to
17427 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
17428 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
17429 */
17430 lat_ns = ((int64_t)rxa * 1024 -
17431 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
17432 + ETHER_HDR_LEN))) * 8 * 1000;
17433 if (lat_ns < 0)
17434 lat_ns = 0;
17435 else
17436 lat_ns /= speed;
17437 value = lat_ns;
17438
17439 while (value > LTRV_VALUE) {
17440 scale ++;
17441 value = howmany(value, __BIT(5));
17442 }
17443 if (scale > LTRV_SCALE_MAX) {
17444 device_printf(sc->sc_dev,
17445 "Invalid LTR latency scale %d\n", scale);
17446 return -1;
17447 }
17448 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17449
17450 /* Determine the maximum latency tolerated by the platform */
17451 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17452 WM_PCI_LTR_CAP_LPT);
17453 max_snoop = preg & 0xffff;
17454 max_nosnoop = preg >> 16;
17455
17456 max_ltr_enc = MAX(max_snoop, max_nosnoop);
17457
17458 if (lat_enc > max_ltr_enc) {
17459 lat_enc = max_ltr_enc;
17460 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17461 * PCI_LTR_SCALETONS(
17462 __SHIFTOUT(lat_enc,
17463 PCI_LTR_MAXSNOOPLAT_SCALE));
17464 }
17465
17466 if (lat_ns) {
17467 lat_ns *= speed * 1000;
17468 lat_ns /= 8;
17469 lat_ns /= 1000000000;
17470 obff_hwm = (int32_t)(rxa - lat_ns);
17471 }
17472 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17473 device_printf(sc->sc_dev, "Invalid high water mark %d"
17474 "(rxa = %d, lat_ns = %d)\n",
17475 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17476 return -1;
17477 }
17478 }
17479 /* Snoop and No-Snoop latencies the same */
17480 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17481 CSR_WRITE(sc, WMREG_LTRV, reg);
17482
17483 /* Set OBFF high water mark */
17484 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17485 reg |= obff_hwm;
17486 CSR_WRITE(sc, WMREG_SVT, reg);
17487
17488 /* Enable OBFF */
17489 reg = CSR_READ(sc, WMREG_SVCR);
17490 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17491 CSR_WRITE(sc, WMREG_SVCR, reg);
17492
17493 return 0;
17494 }
17495
17496 /*
17497 * I210 Errata 25 and I211 Errata 10
17498 * Slow System Clock.
17499 *
17500 * Note that this function is called on both FLASH and iNVM case on NetBSD.
17501 */
17502 static int
17503 wm_pll_workaround_i210(struct wm_softc *sc)
17504 {
17505 uint32_t mdicnfg, wuc;
17506 uint32_t reg;
17507 pcireg_t pcireg;
17508 uint32_t pmreg;
17509 uint16_t nvmword, tmp_nvmword;
17510 uint16_t phyval;
17511 bool wa_done = false;
17512 int i, rv = 0;
17513
17514 /* Get Power Management cap offset */
17515 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17516 &pmreg, NULL) == 0)
17517 return -1;
17518
17519 /* Save WUC and MDICNFG registers */
17520 wuc = CSR_READ(sc, WMREG_WUC);
17521 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17522
17523 reg = mdicnfg & ~MDICNFG_DEST;
17524 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17525
17526 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17527 /*
17528 * The default value of the Initialization Control Word 1
17529 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17530 */
17531 nvmword = INVM_DEFAULT_AL;
17532 }
17533 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17534
17535 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17536 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17537 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17538
17539 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17540 rv = 0;
17541 break; /* OK */
17542 } else
17543 rv = -1;
17544
17545 wa_done = true;
17546 /* Directly reset the internal PHY */
17547 reg = CSR_READ(sc, WMREG_CTRL);
17548 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17549
17550 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17551 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17552 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17553
17554 CSR_WRITE(sc, WMREG_WUC, 0);
17555 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17556 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17557
17558 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17559 pmreg + PCI_PMCSR);
17560 pcireg |= PCI_PMCSR_STATE_D3;
17561 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17562 pmreg + PCI_PMCSR, pcireg);
17563 delay(1000);
17564 pcireg &= ~PCI_PMCSR_STATE_D3;
17565 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17566 pmreg + PCI_PMCSR, pcireg);
17567
17568 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
17569 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17570
17571 /* Restore WUC register */
17572 CSR_WRITE(sc, WMREG_WUC, wuc);
17573 }
17574
17575 /* Restore MDICNFG setting */
17576 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
17577 if (wa_done)
17578 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
17579 return rv;
17580 }
17581
17582 static void
17583 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
17584 {
17585 uint32_t reg;
17586
17587 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17588 device_xname(sc->sc_dev), __func__));
17589 KASSERT((sc->sc_type == WM_T_PCH_SPT)
17590 || (sc->sc_type == WM_T_PCH_CNP));
17591
17592 reg = CSR_READ(sc, WMREG_FEXTNVM7);
17593 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
17594 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
17595
17596 reg = CSR_READ(sc, WMREG_FEXTNVM9);
17597 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17598 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17599 }
17600
17601 /* Sysctl functions */
17602 static int
17603 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
17604 {
17605 struct sysctlnode node = *rnode;
17606 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17607 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17608 struct wm_softc *sc = txq->txq_sc;
17609 uint32_t reg;
17610
17611 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
17612 node.sysctl_data = ®
17613 return sysctl_lookup(SYSCTLFN_CALL(&node));
17614 }
17615
17616 static int
17617 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
17618 {
17619 struct sysctlnode node = *rnode;
17620 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17621 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17622 struct wm_softc *sc = txq->txq_sc;
17623 uint32_t reg;
17624
17625 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
17626 node.sysctl_data = ®
17627 return sysctl_lookup(SYSCTLFN_CALL(&node));
17628 }
17629
17630 #ifdef WM_DEBUG
17631 static int
17632 wm_sysctl_debug(SYSCTLFN_ARGS)
17633 {
17634 struct sysctlnode node = *rnode;
17635 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17636 uint32_t dflags;
17637 int error;
17638
17639 dflags = sc->sc_debug;
17640 node.sysctl_data = &dflags;
17641 error = sysctl_lookup(SYSCTLFN_CALL(&node));
17642
17643 if (error || newp == NULL)
17644 return error;
17645
17646 sc->sc_debug = dflags;
17647 device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
17648 device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
17649
17650 return 0;
17651 }
17652 #endif
17653