if_wm.c revision 1.741 1 /* $NetBSD: if_wm.c,v 1.741 2022/07/19 06:46:29 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Check XXX'ed comments
76 * - TX Multi queue improvement (refine queue selection logic)
77 * - Split header buffer for newer descriptors
78 * - EEE (Energy Efficiency Ethernet) for I354
79 * - Virtual Function
80 * - Set LED correctly (based on contents in EEPROM)
81 * - Rework how parameters are loaded from the EEPROM.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.741 2022/07/19 06:46:29 skrll Exp $");
86
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108 #include <sys/sysctl.h>
109 #include <sys/workqueue.h>
110 #include <sys/atomic.h>
111
112 #include <sys/rndsource.h>
113
114 #include <net/if.h>
115 #include <net/if_dl.h>
116 #include <net/if_media.h>
117 #include <net/if_ether.h>
118
119 #include <net/bpf.h>
120
121 #include <net/rss_config.h>
122
123 #include <netinet/in.h> /* XXX for struct ip */
124 #include <netinet/in_systm.h> /* XXX for struct ip */
125 #include <netinet/ip.h> /* XXX for struct ip */
126 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
127 #include <netinet/tcp.h> /* XXX for struct tcphdr */
128
129 #include <sys/bus.h>
130 #include <sys/intr.h>
131 #include <machine/endian.h>
132
133 #include <dev/mii/mii.h>
134 #include <dev/mii/mdio.h>
135 #include <dev/mii/miivar.h>
136 #include <dev/mii/miidevs.h>
137 #include <dev/mii/mii_bitbang.h>
138 #include <dev/mii/ikphyreg.h>
139 #include <dev/mii/igphyreg.h>
140 #include <dev/mii/igphyvar.h>
141 #include <dev/mii/inbmphyreg.h>
142 #include <dev/mii/ihphyreg.h>
143 #include <dev/mii/makphyreg.h>
144
145 #include <dev/pci/pcireg.h>
146 #include <dev/pci/pcivar.h>
147 #include <dev/pci/pcidevs.h>
148
149 #include <dev/pci/if_wmreg.h>
150 #include <dev/pci/if_wmvar.h>
151
152 #ifdef WM_DEBUG
153 #define WM_DEBUG_LINK __BIT(0)
154 #define WM_DEBUG_TX __BIT(1)
155 #define WM_DEBUG_RX __BIT(2)
156 #define WM_DEBUG_GMII __BIT(3)
157 #define WM_DEBUG_MANAGE __BIT(4)
158 #define WM_DEBUG_NVM __BIT(5)
159 #define WM_DEBUG_INIT __BIT(6)
160 #define WM_DEBUG_LOCK __BIT(7)
161
162 #if 0
163 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
164 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \
165 WM_DEBUG_LOCK
166 #endif
167
168 #define DPRINTF(sc, x, y) \
169 do { \
170 if ((sc)->sc_debug & (x)) \
171 printf y; \
172 } while (0)
173 #else
174 #define DPRINTF(sc, x, y) __nothing
175 #endif /* WM_DEBUG */
176
177 #ifdef NET_MPSAFE
178 #define WM_MPSAFE 1
179 #define WM_CALLOUT_FLAGS CALLOUT_MPSAFE
180 #define WM_SOFTINT_FLAGS SOFTINT_MPSAFE
181 #define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
182 #else
183 #define WM_CALLOUT_FLAGS 0
184 #define WM_SOFTINT_FLAGS 0
185 #define WM_WORKQUEUE_FLAGS WQ_PERCPU
186 #endif
187
188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
189
190 /*
191 * This device driver's max interrupt numbers.
192 */
193 #define WM_MAX_NQUEUEINTR 16
194 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1)
195
196 #ifndef WM_DISABLE_MSI
197 #define WM_DISABLE_MSI 0
198 #endif
199 #ifndef WM_DISABLE_MSIX
200 #define WM_DISABLE_MSIX 0
201 #endif
202
203 int wm_disable_msi = WM_DISABLE_MSI;
204 int wm_disable_msix = WM_DISABLE_MSIX;
205
206 #ifndef WM_WATCHDOG_TIMEOUT
207 #define WM_WATCHDOG_TIMEOUT 5
208 #endif
209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
210
211 /*
212 * Transmit descriptor list size. Due to errata, we can only have
213 * 256 hardware descriptors in the ring on < 82544, but we use 4096
214 * on >= 82544. We tell the upper layers that they can queue a lot
215 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
216 * of them at a time.
217 *
218 * We allow up to 64 DMA segments per packet. Pathological packet
219 * chains containing many small mbufs have been observed in zero-copy
220 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
221 * m_defrag() is called to reduce it.
222 */
223 #define WM_NTXSEGS 64
224 #define WM_IFQUEUELEN 256
225 #define WM_TXQUEUELEN_MAX 64
226 #define WM_TXQUEUELEN_MAX_82547 16
227 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
228 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
229 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
230 #define WM_NTXDESC_82542 256
231 #define WM_NTXDESC_82544 4096
232 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
233 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
234 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
235 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
236 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
237
238 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
239
240 #define WM_TXINTERQSIZE 256
241
242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
243 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U
244 #endif
245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
246 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U
247 #endif
248
249 /*
250 * Receive descriptor list size. We have one Rx buffer for normal
251 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
252 * packet. We allocate 256 receive descriptors, each with a 2k
253 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
254 */
255 #define WM_NRXDESC 256U
256 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
257 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
258 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
259
260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
261 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U
262 #endif
263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
264 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U
265 #endif
266
267 typedef union txdescs {
268 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
269 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544];
270 } txdescs_t;
271
272 typedef union rxdescs {
273 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
274 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
275 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
276 } rxdescs_t;
277
278 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
279 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x))
280
281 /*
282 * Software state for transmit jobs.
283 */
284 struct wm_txsoft {
285 struct mbuf *txs_mbuf; /* head of our mbuf chain */
286 bus_dmamap_t txs_dmamap; /* our DMA map */
287 int txs_firstdesc; /* first descriptor in packet */
288 int txs_lastdesc; /* last descriptor in packet */
289 int txs_ndesc; /* # of descriptors used */
290 };
291
292 /*
293 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
294 * buffer and a DMA map. For packets which fill more than one buffer, we chain
295 * them together.
296 */
297 struct wm_rxsoft {
298 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
299 bus_dmamap_t rxs_dmamap; /* our DMA map */
300 };
301
302 #define WM_LINKUP_TIMEOUT 50
303
304 static uint16_t swfwphysem[] = {
305 SWFW_PHY0_SM,
306 SWFW_PHY1_SM,
307 SWFW_PHY2_SM,
308 SWFW_PHY3_SM
309 };
310
311 static const uint32_t wm_82580_rxpbs_table[] = {
312 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
313 };
314
315 struct wm_softc;
316
317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
318 #if !defined(WM_EVENT_COUNTERS)
319 #define WM_EVENT_COUNTERS 1
320 #endif
321 #endif
322
323 #ifdef WM_EVENT_COUNTERS
324 #define WM_Q_EVCNT_DEFINE(qname, evname) \
325 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
326 struct evcnt qname##_ev_##evname
327
328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \
329 do { \
330 snprintf((q)->qname##_##evname##_evcnt_name, \
331 sizeof((q)->qname##_##evname##_evcnt_name), \
332 "%s%02d%s", #qname, (qnum), #evname); \
333 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \
334 (evtype), NULL, (xname), \
335 (q)->qname##_##evname##_evcnt_name); \
336 } while (0)
337
338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
339 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
340
341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \
342 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
343
344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \
345 evcnt_detach(&(q)->qname##_ev_##evname)
346 #endif /* WM_EVENT_COUNTERS */
347
348 struct wm_txqueue {
349 kmutex_t *txq_lock; /* lock for tx operations */
350
351 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */
352
353 /* Software state for the transmit descriptors. */
354 int txq_num; /* must be a power of two */
355 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
356
357 /* TX control data structures. */
358 int txq_ndesc; /* must be a power of two */
359 size_t txq_descsize; /* a tx descriptor size */
360 txdescs_t *txq_descs_u;
361 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
362 bus_dma_segment_t txq_desc_seg; /* control data segment */
363 int txq_desc_rseg; /* real number of control segment */
364 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
365 #define txq_descs txq_descs_u->sctxu_txdescs
366 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
367
368 bus_addr_t txq_tdt_reg; /* offset of TDT register */
369
370 int txq_free; /* number of free Tx descriptors */
371 int txq_next; /* next ready Tx descriptor */
372
373 int txq_sfree; /* number of free Tx jobs */
374 int txq_snext; /* next free Tx job */
375 int txq_sdirty; /* dirty Tx jobs */
376
377 /* These 4 variables are used only on the 82547. */
378 int txq_fifo_size; /* Tx FIFO size */
379 int txq_fifo_head; /* current head of FIFO */
380 uint32_t txq_fifo_addr; /* internal address of start of FIFO */
381 int txq_fifo_stall; /* Tx FIFO is stalled */
382
383 /*
384 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
385 * CPUs. This queue intermediate them without block.
386 */
387 pcq_t *txq_interq;
388
389 /*
390 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
391 * to manage Tx H/W queue's busy flag.
392 */
393 int txq_flags; /* flags for H/W queue, see below */
394 #define WM_TXQ_NO_SPACE 0x1
395 #define WM_TXQ_LINKDOWN_DISCARD 0x2
396
397 bool txq_stopping;
398
399 bool txq_sending;
400 time_t txq_lastsent;
401
402 /* Checksum flags used for previous packet */
403 uint32_t txq_last_hw_cmd;
404 uint8_t txq_last_hw_fields;
405 uint16_t txq_last_hw_ipcs;
406 uint16_t txq_last_hw_tucs;
407
408 uint32_t txq_packets; /* for AIM */
409 uint32_t txq_bytes; /* for AIM */
410 #ifdef WM_EVENT_COUNTERS
411 /* TX event counters */
412 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
413 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
414 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
415 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
416 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
417 /* XXX not used? */
418
419 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
420 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
421 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
422 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
423 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
424 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
425 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
426 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
427 /* other than toomanyseg */
428
429 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
430 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
431 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
432 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
433
434 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
435 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
436 #endif /* WM_EVENT_COUNTERS */
437 };
438
439 struct wm_rxqueue {
440 kmutex_t *rxq_lock; /* lock for rx operations */
441
442 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */
443
444 /* Software state for the receive descriptors. */
445 struct wm_rxsoft rxq_soft[WM_NRXDESC];
446
447 /* RX control data structures. */
448 int rxq_ndesc; /* must be a power of two */
449 size_t rxq_descsize; /* a rx descriptor size */
450 rxdescs_t *rxq_descs_u;
451 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
452 bus_dma_segment_t rxq_desc_seg; /* control data segment */
453 int rxq_desc_rseg; /* real number of control segment */
454 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
455 #define rxq_descs rxq_descs_u->sctxu_rxdescs
456 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
457 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
458
459 bus_addr_t rxq_rdt_reg; /* offset of RDT register */
460
461 int rxq_ptr; /* next ready Rx desc/queue ent */
462 int rxq_discard;
463 int rxq_len;
464 struct mbuf *rxq_head;
465 struct mbuf *rxq_tail;
466 struct mbuf **rxq_tailp;
467
468 bool rxq_stopping;
469
470 uint32_t rxq_packets; /* for AIM */
471 uint32_t rxq_bytes; /* for AIM */
472 #ifdef WM_EVENT_COUNTERS
473 /* RX event counters */
474 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
475 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
476
477 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
478 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
479 #endif
480 };
481
482 struct wm_queue {
483 int wmq_id; /* index of TX/RX queues */
484 int wmq_intr_idx; /* index of MSI-X tables */
485
486 uint32_t wmq_itr; /* interrupt interval per queue. */
487 bool wmq_set_itr;
488
489 struct wm_txqueue wmq_txq;
490 struct wm_rxqueue wmq_rxq;
491 char sysctlname[32]; /* Name for sysctl */
492
493 bool wmq_txrx_use_workqueue;
494 struct work wmq_cookie;
495 void *wmq_si;
496 };
497
498 struct wm_phyop {
499 int (*acquire)(struct wm_softc *);
500 void (*release)(struct wm_softc *);
501 int (*readreg_locked)(device_t, int, int, uint16_t *);
502 int (*writereg_locked)(device_t, int, int, uint16_t);
503 int reset_delay_us;
504 bool no_errprint;
505 };
506
507 struct wm_nvmop {
508 int (*acquire)(struct wm_softc *);
509 void (*release)(struct wm_softc *);
510 int (*read)(struct wm_softc *, int, int, uint16_t *);
511 };
512
513 /*
514 * Software state per device.
515 */
516 struct wm_softc {
517 device_t sc_dev; /* generic device information */
518 bus_space_tag_t sc_st; /* bus space tag */
519 bus_space_handle_t sc_sh; /* bus space handle */
520 bus_size_t sc_ss; /* bus space size */
521 bus_space_tag_t sc_iot; /* I/O space tag */
522 bus_space_handle_t sc_ioh; /* I/O space handle */
523 bus_size_t sc_ios; /* I/O space size */
524 bus_space_tag_t sc_flasht; /* flash registers space tag */
525 bus_space_handle_t sc_flashh; /* flash registers space handle */
526 bus_size_t sc_flashs; /* flash registers space size */
527 off_t sc_flashreg_offset; /*
528 * offset to flash registers from
529 * start of BAR
530 */
531 bus_dma_tag_t sc_dmat; /* bus DMA tag */
532
533 struct ethercom sc_ethercom; /* Ethernet common data */
534 struct mii_data sc_mii; /* MII/media information */
535
536 pci_chipset_tag_t sc_pc;
537 pcitag_t sc_pcitag;
538 int sc_bus_speed; /* PCI/PCIX bus speed */
539 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
540
541 uint16_t sc_pcidevid; /* PCI device ID */
542 wm_chip_type sc_type; /* MAC type */
543 int sc_rev; /* MAC revision */
544 wm_phy_type sc_phytype; /* PHY type */
545 uint8_t sc_sfptype; /* SFP type */
546 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
547 #define WM_MEDIATYPE_UNKNOWN 0x00
548 #define WM_MEDIATYPE_FIBER 0x01
549 #define WM_MEDIATYPE_COPPER 0x02
550 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
551 int sc_funcid; /* unit number of the chip (0 to 3) */
552 int sc_flags; /* flags; see below */
553 u_short sc_if_flags; /* last if_flags */
554 int sc_ec_capenable; /* last ec_capenable */
555 int sc_flowflags; /* 802.3x flow control flags */
556 uint16_t eee_lp_ability; /* EEE link partner's ability */
557 int sc_align_tweak;
558
559 void *sc_ihs[WM_MAX_NINTR]; /*
560 * interrupt cookie.
561 * - legacy and msi use sc_ihs[0] only
562 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
563 */
564 pci_intr_handle_t *sc_intrs; /*
565 * legacy and msi use sc_intrs[0] only
566 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
567 */
568 int sc_nintrs; /* number of interrupts */
569
570 int sc_link_intr_idx; /* index of MSI-X tables */
571
572 callout_t sc_tick_ch; /* tick callout */
573 bool sc_core_stopping;
574
575 int sc_nvm_ver_major;
576 int sc_nvm_ver_minor;
577 int sc_nvm_ver_build;
578 int sc_nvm_addrbits; /* NVM address bits */
579 unsigned int sc_nvm_wordsize; /* NVM word size */
580 int sc_ich8_flash_base;
581 int sc_ich8_flash_bank_size;
582 int sc_nvm_k1_enabled;
583
584 int sc_nqueues;
585 struct wm_queue *sc_queue;
586 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
587 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
588 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
589 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
590 struct workqueue *sc_queue_wq;
591 bool sc_txrx_use_workqueue;
592
593 int sc_affinity_offset;
594
595 #ifdef WM_EVENT_COUNTERS
596 /* Event counters. */
597 struct evcnt sc_ev_linkintr; /* Link interrupts */
598
599 /* WM_T_82542_2_1 only */
600 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
601 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
602 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
603 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
604 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
605 #endif /* WM_EVENT_COUNTERS */
606
607 struct sysctllog *sc_sysctllog;
608
609 /* This variable are used only on the 82547. */
610 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
611
612 uint32_t sc_ctrl; /* prototype CTRL register */
613 #if 0
614 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
615 #endif
616 uint32_t sc_icr; /* prototype interrupt bits */
617 uint32_t sc_itr_init; /* prototype intr throttling reg */
618 uint32_t sc_tctl; /* prototype TCTL register */
619 uint32_t sc_rctl; /* prototype RCTL register */
620 uint32_t sc_txcw; /* prototype TXCW register */
621 uint32_t sc_tipg; /* prototype TIPG register */
622 uint32_t sc_fcrtl; /* prototype FCRTL register */
623 uint32_t sc_pba; /* prototype PBA register */
624
625 int sc_tbi_linkup; /* TBI link status */
626 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */
627 int sc_tbi_serdes_ticks; /* tbi ticks */
628
629 int sc_mchash_type; /* multicast filter offset */
630
631 krndsource_t rnd_source; /* random source */
632
633 struct if_percpuq *sc_ipq; /* softint-based input queues */
634
635 kmutex_t *sc_core_lock; /* lock for softc operations */
636 kmutex_t *sc_ich_phymtx; /*
637 * 82574/82583/ICH/PCH specific PHY
638 * mutex. For 82574/82583, the mutex
639 * is used for both PHY and NVM.
640 */
641 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
642
643 struct wm_phyop phy;
644 struct wm_nvmop nvm;
645 #ifdef WM_DEBUG
646 uint32_t sc_debug;
647 #endif
648 };
649
650 #define WM_CORE_LOCK(_sc) \
651 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
652 #define WM_CORE_UNLOCK(_sc) \
653 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
654 #define WM_CORE_LOCKED(_sc) \
655 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
656
657 #define WM_RXCHAIN_RESET(rxq) \
658 do { \
659 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
660 *(rxq)->rxq_tailp = NULL; \
661 (rxq)->rxq_len = 0; \
662 } while (/*CONSTCOND*/0)
663
664 #define WM_RXCHAIN_LINK(rxq, m) \
665 do { \
666 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
667 (rxq)->rxq_tailp = &(m)->m_next; \
668 } while (/*CONSTCOND*/0)
669
670 #ifdef WM_EVENT_COUNTERS
671 #ifdef __HAVE_ATOMIC64_LOADSTORE
672 #define WM_EVCNT_INCR(ev) \
673 atomic_store_relaxed(&((ev)->ev_count), \
674 atomic_load_relaxed(&(ev)->ev_count) + 1)
675 #define WM_EVCNT_ADD(ev, val) \
676 atomic_store_relaxed(&((ev)->ev_count), \
677 atomic_load_relaxed(&(ev)->ev_count) + (val))
678 #else
679 #define WM_EVCNT_INCR(ev) \
680 ((ev)->ev_count)++
681 #define WM_EVCNT_ADD(ev, val) \
682 (ev)->ev_count += (val)
683 #endif
684
685 #define WM_Q_EVCNT_INCR(qname, evname) \
686 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
687 #define WM_Q_EVCNT_ADD(qname, evname, val) \
688 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
689 #else /* !WM_EVENT_COUNTERS */
690 #define WM_EVCNT_INCR(ev) /* nothing */
691 #define WM_EVCNT_ADD(ev, val) /* nothing */
692
693 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */
694 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */
695 #endif /* !WM_EVENT_COUNTERS */
696
697 #define CSR_READ(sc, reg) \
698 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
699 #define CSR_WRITE(sc, reg, val) \
700 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
701 #define CSR_WRITE_FLUSH(sc) \
702 (void)CSR_READ((sc), WMREG_STATUS)
703
704 #define ICH8_FLASH_READ32(sc, reg) \
705 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \
706 (reg) + sc->sc_flashreg_offset)
707 #define ICH8_FLASH_WRITE32(sc, reg, data) \
708 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \
709 (reg) + sc->sc_flashreg_offset, (data))
710
711 #define ICH8_FLASH_READ16(sc, reg) \
712 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \
713 (reg) + sc->sc_flashreg_offset)
714 #define ICH8_FLASH_WRITE16(sc, reg, data) \
715 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \
716 (reg) + sc->sc_flashreg_offset, (data))
717
718 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
719 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
720
721 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
722 #define WM_CDTXADDR_HI(txq, x) \
723 (sizeof(bus_addr_t) == 8 ? \
724 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
725
726 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
727 #define WM_CDRXADDR_HI(rxq, x) \
728 (sizeof(bus_addr_t) == 8 ? \
729 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
730
731 /*
732 * Register read/write functions.
733 * Other than CSR_{READ|WRITE}().
734 */
735 #if 0
736 static inline uint32_t wm_io_read(struct wm_softc *, int);
737 #endif
738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
740 uint32_t, uint32_t);
741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
742
743 /*
744 * Descriptor sync/init functions.
745 */
746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
749
750 /*
751 * Device driver interface functions and commonly used functions.
752 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
753 */
754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
755 static int wm_match(device_t, cfdata_t, void *);
756 static void wm_attach(device_t, device_t, void *);
757 static int wm_detach(device_t, int);
758 static bool wm_suspend(device_t, const pmf_qual_t *);
759 static bool wm_resume(device_t, const pmf_qual_t *);
760 static void wm_watchdog(struct ifnet *);
761 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
762 uint16_t *);
763 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
764 uint16_t *);
765 static void wm_tick(void *);
766 static int wm_ifflags_cb(struct ethercom *);
767 static int wm_ioctl(struct ifnet *, u_long, void *);
768 /* MAC address related */
769 static uint16_t wm_check_alt_mac_addr(struct wm_softc *);
770 static int wm_read_mac_addr(struct wm_softc *, uint8_t *);
771 static void wm_set_ral(struct wm_softc *, const uint8_t *, int);
772 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *);
773 static int wm_rar_count(struct wm_softc *);
774 static void wm_set_filter(struct wm_softc *);
775 /* Reset and init related */
776 static void wm_set_vlan(struct wm_softc *);
777 static void wm_set_pcie_completion_timeout(struct wm_softc *);
778 static void wm_get_auto_rd_done(struct wm_softc *);
779 static void wm_lan_init_done(struct wm_softc *);
780 static void wm_get_cfg_done(struct wm_softc *);
781 static int wm_phy_post_reset(struct wm_softc *);
782 static int wm_write_smbus_addr(struct wm_softc *);
783 static int wm_init_lcd_from_nvm(struct wm_softc *);
784 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
785 static void wm_initialize_hardware_bits(struct wm_softc *);
786 static uint32_t wm_rxpbs_adjust_82580(uint32_t);
787 static int wm_reset_phy(struct wm_softc *);
788 static void wm_flush_desc_rings(struct wm_softc *);
789 static void wm_reset(struct wm_softc *);
790 static int wm_add_rxbuf(struct wm_rxqueue *, int);
791 static void wm_rxdrain(struct wm_rxqueue *);
792 static void wm_init_rss(struct wm_softc *);
793 static void wm_adjust_qnum(struct wm_softc *, int);
794 static inline bool wm_is_using_msix(struct wm_softc *);
795 static inline bool wm_is_using_multiqueue(struct wm_softc *);
796 static int wm_softint_establish_queue(struct wm_softc *, int, int);
797 static int wm_setup_legacy(struct wm_softc *);
798 static int wm_setup_msix(struct wm_softc *);
799 static int wm_init(struct ifnet *);
800 static int wm_init_locked(struct ifnet *);
801 static void wm_init_sysctls(struct wm_softc *);
802 static void wm_unset_stopping_flags(struct wm_softc *);
803 static void wm_set_stopping_flags(struct wm_softc *);
804 static void wm_stop(struct ifnet *, int);
805 static void wm_stop_locked(struct ifnet *, bool, bool);
806 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
807 static void wm_82547_txfifo_stall(void *);
808 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
809 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
810 /* DMA related */
811 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
812 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
813 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
814 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
815 struct wm_txqueue *);
816 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
817 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
818 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
819 struct wm_rxqueue *);
820 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
821 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
822 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
823 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
824 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
825 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
826 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
827 struct wm_txqueue *);
828 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
829 struct wm_rxqueue *);
830 static int wm_alloc_txrx_queues(struct wm_softc *);
831 static void wm_free_txrx_queues(struct wm_softc *);
832 static int wm_init_txrx_queues(struct wm_softc *);
833 /* Start */
834 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
835 struct wm_txsoft *, uint32_t *, uint8_t *);
836 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *);
837 static void wm_start(struct ifnet *);
838 static void wm_start_locked(struct ifnet *);
839 static int wm_transmit(struct ifnet *, struct mbuf *);
840 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
841 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
842 bool);
843 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
844 struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
845 static void wm_nq_start(struct ifnet *);
846 static void wm_nq_start_locked(struct ifnet *);
847 static int wm_nq_transmit(struct ifnet *, struct mbuf *);
848 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
849 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
850 bool);
851 static void wm_deferred_start_locked(struct wm_txqueue *);
852 static void wm_handle_queue(void *);
853 static void wm_handle_queue_work(struct work *, void *);
854 /* Interrupt */
855 static bool wm_txeof(struct wm_txqueue *, u_int);
856 static bool wm_rxeof(struct wm_rxqueue *, u_int);
857 static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
858 static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
859 static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
860 static void wm_linkintr(struct wm_softc *, uint32_t);
861 static int wm_intr_legacy(void *);
862 static inline void wm_txrxintr_disable(struct wm_queue *);
863 static inline void wm_txrxintr_enable(struct wm_queue *);
864 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
865 static int wm_txrxintr_msix(void *);
866 static int wm_linkintr_msix(void *);
867
868 /*
869 * Media related.
870 * GMII, SGMII, TBI, SERDES and SFP.
871 */
872 /* Common */
873 static void wm_tbi_serdes_set_linkled(struct wm_softc *);
874 /* GMII related */
875 static void wm_gmii_reset(struct wm_softc *);
876 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
877 static int wm_get_phy_id_82575(struct wm_softc *);
878 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
879 static int wm_gmii_mediachange(struct ifnet *);
880 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
881 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
882 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *);
883 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
884 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
885 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
886 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
887 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
888 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
889 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
890 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
891 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
892 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
893 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
894 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t);
895 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
896 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
897 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
898 bool);
899 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
900 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
901 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t);
902 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
903 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
904 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t);
905 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
906 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
907 static void wm_gmii_statchg(struct ifnet *);
908 /*
909 * kumeran related (80003, ICH* and PCH*).
910 * These functions are not for accessing MII registers but for accessing
911 * kumeran specific registers.
912 */
913 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
914 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
915 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
916 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
917 /* EMI register related */
918 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
919 static int wm_read_emi_reg_locked(device_t, int, uint16_t *);
920 static int wm_write_emi_reg_locked(device_t, int, uint16_t);
921 /* SGMII */
922 static bool wm_sgmii_uses_mdio(struct wm_softc *);
923 static void wm_sgmii_sfp_preconfig(struct wm_softc *);
924 static int wm_sgmii_readreg(device_t, int, int, uint16_t *);
925 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
926 static int wm_sgmii_writereg(device_t, int, int, uint16_t);
927 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
928 /* TBI related */
929 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t);
930 static void wm_tbi_mediainit(struct wm_softc *);
931 static int wm_tbi_mediachange(struct ifnet *);
932 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
933 static int wm_check_for_link(struct wm_softc *);
934 static void wm_tbi_tick(struct wm_softc *);
935 /* SERDES related */
936 static void wm_serdes_power_up_link_82575(struct wm_softc *);
937 static int wm_serdes_mediachange(struct ifnet *);
938 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
939 static void wm_serdes_tick(struct wm_softc *);
940 /* SFP related */
941 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
942 static uint32_t wm_sfp_get_media_type(struct wm_softc *);
943
944 /*
945 * NVM related.
946 * Microwire, SPI (w/wo EERD) and Flash.
947 */
948 /* Misc functions */
949 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
950 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
951 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
952 /* Microwire */
953 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
954 /* SPI */
955 static int wm_nvm_ready_spi(struct wm_softc *);
956 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
957 /* Using with EERD */
958 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
959 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
960 /* Flash */
961 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
962 unsigned int *);
963 static int32_t wm_ich8_cycle_init(struct wm_softc *);
964 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
965 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
966 uint32_t *);
967 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
968 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
969 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
970 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
971 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
972 /* iNVM */
973 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
974 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
975 /* Lock, detecting NVM type, validate checksum and read */
976 static int wm_nvm_is_onboard_eeprom(struct wm_softc *);
977 static int wm_nvm_flash_presence_i210(struct wm_softc *);
978 static int wm_nvm_validate_checksum(struct wm_softc *);
979 static void wm_nvm_version_invm(struct wm_softc *);
980 static void wm_nvm_version(struct wm_softc *);
981 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
982
983 /*
984 * Hardware semaphores.
985 * Very complexed...
986 */
987 static int wm_get_null(struct wm_softc *);
988 static void wm_put_null(struct wm_softc *);
989 static int wm_get_eecd(struct wm_softc *);
990 static void wm_put_eecd(struct wm_softc *);
991 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
992 static void wm_put_swsm_semaphore(struct wm_softc *);
993 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
994 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
995 static int wm_get_nvm_80003(struct wm_softc *);
996 static void wm_put_nvm_80003(struct wm_softc *);
997 static int wm_get_nvm_82571(struct wm_softc *);
998 static void wm_put_nvm_82571(struct wm_softc *);
999 static int wm_get_phy_82575(struct wm_softc *);
1000 static void wm_put_phy_82575(struct wm_softc *);
1001 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1002 static void wm_put_swfwhw_semaphore(struct wm_softc *);
1003 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
1004 static void wm_put_swflag_ich8lan(struct wm_softc *);
1005 static int wm_get_nvm_ich8lan(struct wm_softc *);
1006 static void wm_put_nvm_ich8lan(struct wm_softc *);
1007 static int wm_get_hw_semaphore_82573(struct wm_softc *);
1008 static void wm_put_hw_semaphore_82573(struct wm_softc *);
1009
1010 /*
1011 * Management mode and power management related subroutines.
1012 * BMC, AMT, suspend/resume and EEE.
1013 */
1014 #if 0
1015 static int wm_check_mng_mode(struct wm_softc *);
1016 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
1017 static int wm_check_mng_mode_82574(struct wm_softc *);
1018 static int wm_check_mng_mode_generic(struct wm_softc *);
1019 #endif
1020 static int wm_enable_mng_pass_thru(struct wm_softc *);
1021 static bool wm_phy_resetisblocked(struct wm_softc *);
1022 static void wm_get_hw_control(struct wm_softc *);
1023 static void wm_release_hw_control(struct wm_softc *);
1024 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1025 static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
1026 static void wm_init_manageability(struct wm_softc *);
1027 static void wm_release_manageability(struct wm_softc *);
1028 static void wm_get_wakeup(struct wm_softc *);
1029 static int wm_ulp_disable(struct wm_softc *);
1030 static int wm_enable_phy_wakeup(struct wm_softc *);
1031 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1032 static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
1033 static int wm_resume_workarounds_pchlan(struct wm_softc *);
1034 static void wm_enable_wakeup(struct wm_softc *);
1035 static void wm_disable_aspm(struct wm_softc *);
1036 /* LPLU (Low Power Link Up) */
1037 static void wm_lplu_d0_disable(struct wm_softc *);
1038 /* EEE */
1039 static int wm_set_eee_i350(struct wm_softc *);
1040 static int wm_set_eee_pchlan(struct wm_softc *);
1041 static int wm_set_eee(struct wm_softc *);
1042
1043 /*
1044 * Workarounds (mainly PHY related).
1045 * Basically, PHY's workarounds are in the PHY drivers.
1046 */
1047 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1048 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1049 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1050 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1051 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1052 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1053 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1054 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1055 static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
1056 static int wm_k1_workaround_lv(struct wm_softc *);
1057 static int wm_link_stall_workaround_hv(struct wm_softc *);
1058 static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
1059 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
1060 static void wm_reset_init_script_82575(struct wm_softc *);
1061 static void wm_reset_mdicnfg_82580(struct wm_softc *);
1062 static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
1063 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1064 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1065 static int wm_pll_workaround_i210(struct wm_softc *);
1066 static void wm_legacy_irq_quirk_spt(struct wm_softc *);
1067 static bool wm_phy_need_linkdown_discard(struct wm_softc *);
1068 static void wm_set_linkdown_discard(struct wm_softc *);
1069 static void wm_clear_linkdown_discard(struct wm_softc *);
1070
1071 static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1072 static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1073 #ifdef WM_DEBUG
1074 static int wm_sysctl_debug(SYSCTLFN_PROTO);
1075 #endif
1076
1077 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1078 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1079
1080 /*
1081 * Devices supported by this driver.
1082 */
1083 static const struct wm_product {
1084 pci_vendor_id_t wmp_vendor;
1085 pci_product_id_t wmp_product;
1086 const char *wmp_name;
1087 wm_chip_type wmp_type;
1088 uint32_t wmp_flags;
1089 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN
1090 #define WMP_F_FIBER WM_MEDIATYPE_FIBER
1091 #define WMP_F_COPPER WM_MEDIATYPE_COPPER
1092 #define WMP_F_SERDES WM_MEDIATYPE_SERDES
1093 #define WMP_MEDIATYPE(x) ((x) & 0x03)
1094 } wm_products[] = {
1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
1096 "Intel i82542 1000BASE-X Ethernet",
1097 WM_T_82542_2_1, WMP_F_FIBER },
1098
1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
1100 "Intel i82543GC 1000BASE-X Ethernet",
1101 WM_T_82543, WMP_F_FIBER },
1102
1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
1104 "Intel i82543GC 1000BASE-T Ethernet",
1105 WM_T_82543, WMP_F_COPPER },
1106
1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
1108 "Intel i82544EI 1000BASE-T Ethernet",
1109 WM_T_82544, WMP_F_COPPER },
1110
1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
1112 "Intel i82544EI 1000BASE-X Ethernet",
1113 WM_T_82544, WMP_F_FIBER },
1114
1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
1116 "Intel i82544GC 1000BASE-T Ethernet",
1117 WM_T_82544, WMP_F_COPPER },
1118
1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
1120 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1121 WM_T_82544, WMP_F_COPPER },
1122
1123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
1124 "Intel i82540EM 1000BASE-T Ethernet",
1125 WM_T_82540, WMP_F_COPPER },
1126
1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
1128 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1129 WM_T_82540, WMP_F_COPPER },
1130
1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
1132 "Intel i82540EP 1000BASE-T Ethernet",
1133 WM_T_82540, WMP_F_COPPER },
1134
1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
1136 "Intel i82540EP 1000BASE-T Ethernet",
1137 WM_T_82540, WMP_F_COPPER },
1138
1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
1140 "Intel i82540EP 1000BASE-T Ethernet",
1141 WM_T_82540, WMP_F_COPPER },
1142
1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
1144 "Intel i82545EM 1000BASE-T Ethernet",
1145 WM_T_82545, WMP_F_COPPER },
1146
1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
1148 "Intel i82545GM 1000BASE-T Ethernet",
1149 WM_T_82545_3, WMP_F_COPPER },
1150
1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
1152 "Intel i82545GM 1000BASE-X Ethernet",
1153 WM_T_82545_3, WMP_F_FIBER },
1154
1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
1156 "Intel i82545GM Gigabit Ethernet (SERDES)",
1157 WM_T_82545_3, WMP_F_SERDES },
1158
1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
1160 "Intel i82546EB 1000BASE-T Ethernet",
1161 WM_T_82546, WMP_F_COPPER },
1162
1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
1164 "Intel i82546EB 1000BASE-T Ethernet",
1165 WM_T_82546, WMP_F_COPPER },
1166
1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
1168 "Intel i82545EM 1000BASE-X Ethernet",
1169 WM_T_82545, WMP_F_FIBER },
1170
1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
1172 "Intel i82546EB 1000BASE-X Ethernet",
1173 WM_T_82546, WMP_F_FIBER },
1174
1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
1176 "Intel i82546GB 1000BASE-T Ethernet",
1177 WM_T_82546_3, WMP_F_COPPER },
1178
1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
1180 "Intel i82546GB 1000BASE-X Ethernet",
1181 WM_T_82546_3, WMP_F_FIBER },
1182
1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
1184 "Intel i82546GB Gigabit Ethernet (SERDES)",
1185 WM_T_82546_3, WMP_F_SERDES },
1186
1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1188 "i82546GB quad-port Gigabit Ethernet",
1189 WM_T_82546_3, WMP_F_COPPER },
1190
1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1192 "i82546GB quad-port Gigabit Ethernet (KSP3)",
1193 WM_T_82546_3, WMP_F_COPPER },
1194
1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
1196 "Intel PRO/1000MT (82546GB)",
1197 WM_T_82546_3, WMP_F_COPPER },
1198
1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
1200 "Intel i82541EI 1000BASE-T Ethernet",
1201 WM_T_82541, WMP_F_COPPER },
1202
1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
1204 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1205 WM_T_82541, WMP_F_COPPER },
1206
1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
1208 "Intel i82541EI Mobile 1000BASE-T Ethernet",
1209 WM_T_82541, WMP_F_COPPER },
1210
1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
1212 "Intel i82541ER 1000BASE-T Ethernet",
1213 WM_T_82541_2, WMP_F_COPPER },
1214
1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
1216 "Intel i82541GI 1000BASE-T Ethernet",
1217 WM_T_82541_2, WMP_F_COPPER },
1218
1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
1220 "Intel i82541GI Mobile 1000BASE-T Ethernet",
1221 WM_T_82541_2, WMP_F_COPPER },
1222
1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
1224 "Intel i82541PI 1000BASE-T Ethernet",
1225 WM_T_82541_2, WMP_F_COPPER },
1226
1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
1228 "Intel i82547EI 1000BASE-T Ethernet",
1229 WM_T_82547, WMP_F_COPPER },
1230
1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
1232 "Intel i82547EI Mobile 1000BASE-T Ethernet",
1233 WM_T_82547, WMP_F_COPPER },
1234
1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
1236 "Intel i82547GI 1000BASE-T Ethernet",
1237 WM_T_82547_2, WMP_F_COPPER },
1238
1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
1240 "Intel PRO/1000 PT (82571EB)",
1241 WM_T_82571, WMP_F_COPPER },
1242
1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
1244 "Intel PRO/1000 PF (82571EB)",
1245 WM_T_82571, WMP_F_FIBER },
1246
1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
1248 "Intel PRO/1000 PB (82571EB)",
1249 WM_T_82571, WMP_F_SERDES },
1250
1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1252 "Intel PRO/1000 QT (82571EB)",
1253 WM_T_82571, WMP_F_COPPER },
1254
1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1256 "Intel PRO/1000 PT Quad Port Server Adapter",
1257 WM_T_82571, WMP_F_COPPER },
1258
1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1260 "Intel Gigabit PT Quad Port Server ExpressModule",
1261 WM_T_82571, WMP_F_COPPER },
1262
1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1264 "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1265 WM_T_82571, WMP_F_SERDES },
1266
1267 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1268 "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1269 WM_T_82571, WMP_F_SERDES },
1270
1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1272 "Intel 82571EB Quad 1000baseX Ethernet",
1273 WM_T_82571, WMP_F_FIBER },
1274
1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
1276 "Intel i82572EI 1000baseT Ethernet",
1277 WM_T_82572, WMP_F_COPPER },
1278
1279 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
1280 "Intel i82572EI 1000baseX Ethernet",
1281 WM_T_82572, WMP_F_FIBER },
1282
1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
1284 "Intel i82572EI Gigabit Ethernet (SERDES)",
1285 WM_T_82572, WMP_F_SERDES },
1286
1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
1288 "Intel i82572EI 1000baseT Ethernet",
1289 WM_T_82572, WMP_F_COPPER },
1290
1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
1292 "Intel i82573E",
1293 WM_T_82573, WMP_F_COPPER },
1294
1295 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
1296 "Intel i82573E IAMT",
1297 WM_T_82573, WMP_F_COPPER },
1298
1299 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
1300 "Intel i82573L Gigabit Ethernet",
1301 WM_T_82573, WMP_F_COPPER },
1302
1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
1304 "Intel i82574L",
1305 WM_T_82574, WMP_F_COPPER },
1306
1307 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA,
1308 "Intel i82574L",
1309 WM_T_82574, WMP_F_COPPER },
1310
1311 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
1312 "Intel i82583V",
1313 WM_T_82583, WMP_F_COPPER },
1314
1315 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1316 "i80003 dual 1000baseT Ethernet",
1317 WM_T_80003, WMP_F_COPPER },
1318
1319 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1320 "i80003 dual 1000baseX Ethernet",
1321 WM_T_80003, WMP_F_COPPER },
1322
1323 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1324 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1325 WM_T_80003, WMP_F_SERDES },
1326
1327 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1328 "Intel i80003 1000baseT Ethernet",
1329 WM_T_80003, WMP_F_COPPER },
1330
1331 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1332 "Intel i80003 Gigabit Ethernet (SERDES)",
1333 WM_T_80003, WMP_F_SERDES },
1334
1335 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
1336 "Intel i82801H (M_AMT) LAN Controller",
1337 WM_T_ICH8, WMP_F_COPPER },
1338 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
1339 "Intel i82801H (AMT) LAN Controller",
1340 WM_T_ICH8, WMP_F_COPPER },
1341 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
1342 "Intel i82801H LAN Controller",
1343 WM_T_ICH8, WMP_F_COPPER },
1344 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1345 "Intel i82801H (IFE) 10/100 LAN Controller",
1346 WM_T_ICH8, WMP_F_COPPER },
1347 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
1348 "Intel i82801H (M) LAN Controller",
1349 WM_T_ICH8, WMP_F_COPPER },
1350 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
1351 "Intel i82801H IFE (GT) 10/100 LAN Controller",
1352 WM_T_ICH8, WMP_F_COPPER },
1353 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
1354 "Intel i82801H IFE (G) 10/100 LAN Controller",
1355 WM_T_ICH8, WMP_F_COPPER },
1356 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3,
1357 "82567V-3 LAN Controller",
1358 WM_T_ICH8, WMP_F_COPPER },
1359 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1360 "82801I (AMT) LAN Controller",
1361 WM_T_ICH9, WMP_F_COPPER },
1362 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
1363 "82801I 10/100 LAN Controller",
1364 WM_T_ICH9, WMP_F_COPPER },
1365 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
1366 "82801I (G) 10/100 LAN Controller",
1367 WM_T_ICH9, WMP_F_COPPER },
1368 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
1369 "82801I (GT) 10/100 LAN Controller",
1370 WM_T_ICH9, WMP_F_COPPER },
1371 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
1372 "82801I (C) LAN Controller",
1373 WM_T_ICH9, WMP_F_COPPER },
1374 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
1375 "82801I mobile LAN Controller",
1376 WM_T_ICH9, WMP_F_COPPER },
1377 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1378 "82801I mobile (V) LAN Controller",
1379 WM_T_ICH9, WMP_F_COPPER },
1380 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1381 "82801I mobile (AMT) LAN Controller",
1382 WM_T_ICH9, WMP_F_COPPER },
1383 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
1384 "82567LM-4 LAN Controller",
1385 WM_T_ICH9, WMP_F_COPPER },
1386 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1387 "82567LM-2 LAN Controller",
1388 WM_T_ICH10, WMP_F_COPPER },
1389 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1390 "82567LF-2 LAN Controller",
1391 WM_T_ICH10, WMP_F_COPPER },
1392 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1393 "82567LM-3 LAN Controller",
1394 WM_T_ICH10, WMP_F_COPPER },
1395 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1396 "82567LF-3 LAN Controller",
1397 WM_T_ICH10, WMP_F_COPPER },
1398 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
1399 "82567V-2 LAN Controller",
1400 WM_T_ICH10, WMP_F_COPPER },
1401 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V,
1402 "82567V-3? LAN Controller",
1403 WM_T_ICH10, WMP_F_COPPER },
1404 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE,
1405 "HANKSVILLE LAN Controller",
1406 WM_T_ICH10, WMP_F_COPPER },
1407 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
1408 "PCH LAN (82577LM) Controller",
1409 WM_T_PCH, WMP_F_COPPER },
1410 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
1411 "PCH LAN (82577LC) Controller",
1412 WM_T_PCH, WMP_F_COPPER },
1413 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
1414 "PCH LAN (82578DM) Controller",
1415 WM_T_PCH, WMP_F_COPPER },
1416 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
1417 "PCH LAN (82578DC) Controller",
1418 WM_T_PCH, WMP_F_COPPER },
1419 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM,
1420 "PCH2 LAN (82579LM) Controller",
1421 WM_T_PCH2, WMP_F_COPPER },
1422 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V,
1423 "PCH2 LAN (82579V) Controller",
1424 WM_T_PCH2, WMP_F_COPPER },
1425 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
1426 "82575EB dual-1000baseT Ethernet",
1427 WM_T_82575, WMP_F_COPPER },
1428 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1429 "82575EB dual-1000baseX Ethernet (SERDES)",
1430 WM_T_82575, WMP_F_SERDES },
1431 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1432 "82575GB quad-1000baseT Ethernet",
1433 WM_T_82575, WMP_F_COPPER },
1434 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1435 "82575GB quad-1000baseT Ethernet (PM)",
1436 WM_T_82575, WMP_F_COPPER },
1437 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
1438 "82576 1000BaseT Ethernet",
1439 WM_T_82576, WMP_F_COPPER },
1440 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
1441 "82576 1000BaseX Ethernet",
1442 WM_T_82576, WMP_F_FIBER },
1443
1444 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
1445 "82576 gigabit Ethernet (SERDES)",
1446 WM_T_82576, WMP_F_SERDES },
1447
1448 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1449 "82576 quad-1000BaseT Ethernet",
1450 WM_T_82576, WMP_F_COPPER },
1451
1452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1453 "82576 Gigabit ET2 Quad Port Server Adapter",
1454 WM_T_82576, WMP_F_COPPER },
1455
1456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
1457 "82576 gigabit Ethernet",
1458 WM_T_82576, WMP_F_COPPER },
1459
1460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
1461 "82576 gigabit Ethernet (SERDES)",
1462 WM_T_82576, WMP_F_SERDES },
1463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1464 "82576 quad-gigabit Ethernet (SERDES)",
1465 WM_T_82576, WMP_F_SERDES },
1466
1467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
1468 "82580 1000BaseT Ethernet",
1469 WM_T_82580, WMP_F_COPPER },
1470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
1471 "82580 1000BaseX Ethernet",
1472 WM_T_82580, WMP_F_FIBER },
1473
1474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
1475 "82580 1000BaseT Ethernet (SERDES)",
1476 WM_T_82580, WMP_F_SERDES },
1477
1478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
1479 "82580 gigabit Ethernet (SGMII)",
1480 WM_T_82580, WMP_F_COPPER },
1481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1482 "82580 dual-1000BaseT Ethernet",
1483 WM_T_82580, WMP_F_COPPER },
1484
1485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1486 "82580 quad-1000BaseX Ethernet",
1487 WM_T_82580, WMP_F_FIBER },
1488
1489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1490 "DH89XXCC Gigabit Ethernet (SGMII)",
1491 WM_T_82580, WMP_F_COPPER },
1492
1493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1494 "DH89XXCC Gigabit Ethernet (SERDES)",
1495 WM_T_82580, WMP_F_SERDES },
1496
1497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1498 "DH89XXCC 1000BASE-KX Ethernet",
1499 WM_T_82580, WMP_F_SERDES },
1500
1501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1502 "DH89XXCC Gigabit Ethernet (SFP)",
1503 WM_T_82580, WMP_F_SERDES },
1504
1505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER,
1506 "I350 Gigabit Network Connection",
1507 WM_T_I350, WMP_F_COPPER },
1508
1509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER,
1510 "I350 Gigabit Fiber Network Connection",
1511 WM_T_I350, WMP_F_FIBER },
1512
1513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES,
1514 "I350 Gigabit Backplane Connection",
1515 WM_T_I350, WMP_F_SERDES },
1516
1517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4,
1518 "I350 Quad Port Gigabit Ethernet",
1519 WM_T_I350, WMP_F_SERDES },
1520
1521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII,
1522 "I350 Gigabit Connection",
1523 WM_T_I350, WMP_F_COPPER },
1524
1525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX,
1526 "I354 Gigabit Ethernet (KX)",
1527 WM_T_I354, WMP_F_SERDES },
1528
1529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII,
1530 "I354 Gigabit Ethernet (SGMII)",
1531 WM_T_I354, WMP_F_COPPER },
1532
1533 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE,
1534 "I354 Gigabit Ethernet (2.5G)",
1535 WM_T_I354, WMP_F_COPPER },
1536
1537 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1,
1538 "I210-T1 Ethernet Server Adapter",
1539 WM_T_I210, WMP_F_COPPER },
1540
1541 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1542 "I210 Ethernet (Copper OEM)",
1543 WM_T_I210, WMP_F_COPPER },
1544
1545 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT,
1546 "I210 Ethernet (Copper IT)",
1547 WM_T_I210, WMP_F_COPPER },
1548
1549 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1550 "I210 Ethernet (Copper, FLASH less)",
1551 WM_T_I210, WMP_F_COPPER },
1552
1553 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER,
1554 "I210 Gigabit Ethernet (Fiber)",
1555 WM_T_I210, WMP_F_FIBER },
1556
1557 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES,
1558 "I210 Gigabit Ethernet (SERDES)",
1559 WM_T_I210, WMP_F_SERDES },
1560
1561 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1562 "I210 Gigabit Ethernet (SERDES, FLASH less)",
1563 WM_T_I210, WMP_F_SERDES },
1564
1565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII,
1566 "I210 Gigabit Ethernet (SGMII)",
1567 WM_T_I210, WMP_F_COPPER },
1568
1569 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1570 "I210 Gigabit Ethernet (SGMII, FLASH less)",
1571 WM_T_I210, WMP_F_COPPER },
1572
1573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER,
1574 "I211 Ethernet (COPPER)",
1575 WM_T_I211, WMP_F_COPPER },
1576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V,
1577 "I217 V Ethernet Connection",
1578 WM_T_PCH_LPT, WMP_F_COPPER },
1579 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM,
1580 "I217 LM Ethernet Connection",
1581 WM_T_PCH_LPT, WMP_F_COPPER },
1582 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V,
1583 "I218 V Ethernet Connection",
1584 WM_T_PCH_LPT, WMP_F_COPPER },
1585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2,
1586 "I218 V Ethernet Connection",
1587 WM_T_PCH_LPT, WMP_F_COPPER },
1588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3,
1589 "I218 V Ethernet Connection",
1590 WM_T_PCH_LPT, WMP_F_COPPER },
1591 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM,
1592 "I218 LM Ethernet Connection",
1593 WM_T_PCH_LPT, WMP_F_COPPER },
1594 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2,
1595 "I218 LM Ethernet Connection",
1596 WM_T_PCH_LPT, WMP_F_COPPER },
1597 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3,
1598 "I218 LM Ethernet Connection",
1599 WM_T_PCH_LPT, WMP_F_COPPER },
1600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM,
1601 "I219 LM Ethernet Connection",
1602 WM_T_PCH_SPT, WMP_F_COPPER },
1603 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2,
1604 "I219 LM (2) Ethernet Connection",
1605 WM_T_PCH_SPT, WMP_F_COPPER },
1606 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3,
1607 "I219 LM (3) Ethernet Connection",
1608 WM_T_PCH_SPT, WMP_F_COPPER },
1609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4,
1610 "I219 LM (4) Ethernet Connection",
1611 WM_T_PCH_SPT, WMP_F_COPPER },
1612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5,
1613 "I219 LM (5) Ethernet Connection",
1614 WM_T_PCH_SPT, WMP_F_COPPER },
1615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6,
1616 "I219 LM (6) Ethernet Connection",
1617 WM_T_PCH_CNP, WMP_F_COPPER },
1618 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7,
1619 "I219 LM (7) Ethernet Connection",
1620 WM_T_PCH_CNP, WMP_F_COPPER },
1621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8,
1622 "I219 LM (8) Ethernet Connection",
1623 WM_T_PCH_CNP, WMP_F_COPPER },
1624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9,
1625 "I219 LM (9) Ethernet Connection",
1626 WM_T_PCH_CNP, WMP_F_COPPER },
1627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10,
1628 "I219 LM (10) Ethernet Connection",
1629 WM_T_PCH_CNP, WMP_F_COPPER },
1630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11,
1631 "I219 LM (11) Ethernet Connection",
1632 WM_T_PCH_CNP, WMP_F_COPPER },
1633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12,
1634 "I219 LM (12) Ethernet Connection",
1635 WM_T_PCH_SPT, WMP_F_COPPER },
1636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13,
1637 "I219 LM (13) Ethernet Connection",
1638 WM_T_PCH_CNP, WMP_F_COPPER },
1639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14,
1640 "I219 LM (14) Ethernet Connection",
1641 WM_T_PCH_CNP, WMP_F_COPPER },
1642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15,
1643 "I219 LM (15) Ethernet Connection",
1644 WM_T_PCH_CNP, WMP_F_COPPER },
1645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16,
1646 "I219 LM (16) Ethernet Connection",
1647 WM_T_PCH_CNP, WMP_F_COPPER },
1648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17,
1649 "I219 LM (17) Ethernet Connection",
1650 WM_T_PCH_CNP, WMP_F_COPPER },
1651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18,
1652 "I219 LM (18) Ethernet Connection",
1653 WM_T_PCH_CNP, WMP_F_COPPER },
1654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19,
1655 "I219 LM (19) Ethernet Connection",
1656 WM_T_PCH_CNP, WMP_F_COPPER },
1657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V,
1658 "I219 V Ethernet Connection",
1659 WM_T_PCH_SPT, WMP_F_COPPER },
1660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2,
1661 "I219 V (2) Ethernet Connection",
1662 WM_T_PCH_SPT, WMP_F_COPPER },
1663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4,
1664 "I219 V (4) Ethernet Connection",
1665 WM_T_PCH_SPT, WMP_F_COPPER },
1666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5,
1667 "I219 V (5) Ethernet Connection",
1668 WM_T_PCH_SPT, WMP_F_COPPER },
1669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6,
1670 "I219 V (6) Ethernet Connection",
1671 WM_T_PCH_CNP, WMP_F_COPPER },
1672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7,
1673 "I219 V (7) Ethernet Connection",
1674 WM_T_PCH_CNP, WMP_F_COPPER },
1675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8,
1676 "I219 V (8) Ethernet Connection",
1677 WM_T_PCH_CNP, WMP_F_COPPER },
1678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9,
1679 "I219 V (9) Ethernet Connection",
1680 WM_T_PCH_CNP, WMP_F_COPPER },
1681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10,
1682 "I219 V (10) Ethernet Connection",
1683 WM_T_PCH_CNP, WMP_F_COPPER },
1684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11,
1685 "I219 V (11) Ethernet Connection",
1686 WM_T_PCH_CNP, WMP_F_COPPER },
1687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12,
1688 "I219 V (12) Ethernet Connection",
1689 WM_T_PCH_SPT, WMP_F_COPPER },
1690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13,
1691 "I219 V (13) Ethernet Connection",
1692 WM_T_PCH_CNP, WMP_F_COPPER },
1693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14,
1694 "I219 V (14) Ethernet Connection",
1695 WM_T_PCH_CNP, WMP_F_COPPER },
1696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15,
1697 "I219 V (15) Ethernet Connection",
1698 WM_T_PCH_CNP, WMP_F_COPPER },
1699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16,
1700 "I219 V (16) Ethernet Connection",
1701 WM_T_PCH_CNP, WMP_F_COPPER },
1702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17,
1703 "I219 V (17) Ethernet Connection",
1704 WM_T_PCH_CNP, WMP_F_COPPER },
1705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18,
1706 "I219 V (18) Ethernet Connection",
1707 WM_T_PCH_CNP, WMP_F_COPPER },
1708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19,
1709 "I219 V (19) Ethernet Connection",
1710 WM_T_PCH_CNP, WMP_F_COPPER },
1711 { 0, 0,
1712 NULL,
1713 0, 0 },
1714 };
1715
1716 /*
1717 * Register read/write functions.
1718 * Other than CSR_{READ|WRITE}().
1719 */
1720
1721 #if 0 /* Not currently used */
1722 static inline uint32_t
1723 wm_io_read(struct wm_softc *sc, int reg)
1724 {
1725
1726 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1727 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1728 }
1729 #endif
1730
1731 static inline void
1732 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1733 {
1734
1735 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1736 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1737 }
1738
1739 static inline void
1740 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1741 uint32_t data)
1742 {
1743 uint32_t regval;
1744 int i;
1745
1746 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1747
1748 CSR_WRITE(sc, reg, regval);
1749
1750 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1751 delay(5);
1752 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1753 break;
1754 }
1755 if (i == SCTL_CTL_POLL_TIMEOUT) {
1756 aprint_error("%s: WARNING:"
1757 " i82575 reg 0x%08x setup did not indicate ready\n",
1758 device_xname(sc->sc_dev), reg);
1759 }
1760 }
1761
1762 static inline void
1763 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1764 {
1765 wa->wa_low = htole32(BUS_ADDR_LO32(v));
1766 wa->wa_high = htole32(BUS_ADDR_HI32(v));
1767 }
1768
1769 /*
1770 * Descriptor sync/init functions.
1771 */
1772 static inline void
1773 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1774 {
1775 struct wm_softc *sc = txq->txq_sc;
1776
1777 /* If it will wrap around, sync to the end of the ring. */
1778 if ((start + num) > WM_NTXDESC(txq)) {
1779 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1780 WM_CDTXOFF(txq, start), txq->txq_descsize *
1781 (WM_NTXDESC(txq) - start), ops);
1782 num -= (WM_NTXDESC(txq) - start);
1783 start = 0;
1784 }
1785
1786 /* Now sync whatever is left. */
1787 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1788 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1789 }
1790
1791 static inline void
1792 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1793 {
1794 struct wm_softc *sc = rxq->rxq_sc;
1795
1796 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1797 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1798 }
1799
1800 static inline void
1801 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1802 {
1803 struct wm_softc *sc = rxq->rxq_sc;
1804 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1805 struct mbuf *m = rxs->rxs_mbuf;
1806
1807 /*
1808 * Note: We scoot the packet forward 2 bytes in the buffer
1809 * so that the payload after the Ethernet header is aligned
1810 * to a 4-byte boundary.
1811
1812 * XXX BRAINDAMAGE ALERT!
1813 * The stupid chip uses the same size for every buffer, which
1814 * is set in the Receive Control register. We are using the 2K
1815 * size option, but what we REALLY want is (2K - 2)! For this
1816 * reason, we can't "scoot" packets longer than the standard
1817 * Ethernet MTU. On strict-alignment platforms, if the total
1818 * size exceeds (2K - 2) we set align_tweak to 0 and let
1819 * the upper layer copy the headers.
1820 */
1821 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1822
1823 if (sc->sc_type == WM_T_82574) {
1824 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1825 rxd->erx_data.erxd_addr =
1826 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1827 rxd->erx_data.erxd_dd = 0;
1828 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1829 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1830
1831 rxd->nqrx_data.nrxd_paddr =
1832 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1833 /* Currently, split header is not supported. */
1834 rxd->nqrx_data.nrxd_haddr = 0;
1835 } else {
1836 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1837
1838 wm_set_dma_addr(&rxd->wrx_addr,
1839 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1840 rxd->wrx_len = 0;
1841 rxd->wrx_cksum = 0;
1842 rxd->wrx_status = 0;
1843 rxd->wrx_errors = 0;
1844 rxd->wrx_special = 0;
1845 }
1846 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1847
1848 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1849 }
1850
1851 /*
1852 * Device driver interface functions and commonly used functions.
1853 * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1854 */
1855
1856 /* Lookup supported device table */
1857 static const struct wm_product *
1858 wm_lookup(const struct pci_attach_args *pa)
1859 {
1860 const struct wm_product *wmp;
1861
1862 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1863 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1864 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1865 return wmp;
1866 }
1867 return NULL;
1868 }
1869
1870 /* The match function (ca_match) */
1871 static int
1872 wm_match(device_t parent, cfdata_t cf, void *aux)
1873 {
1874 struct pci_attach_args *pa = aux;
1875
1876 if (wm_lookup(pa) != NULL)
1877 return 1;
1878
1879 return 0;
1880 }
1881
1882 /* The attach function (ca_attach) */
1883 static void
1884 wm_attach(device_t parent, device_t self, void *aux)
1885 {
1886 struct wm_softc *sc = device_private(self);
1887 struct pci_attach_args *pa = aux;
1888 prop_dictionary_t dict;
1889 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1890 pci_chipset_tag_t pc = pa->pa_pc;
1891 int counts[PCI_INTR_TYPE_SIZE];
1892 pci_intr_type_t max_type;
1893 const char *eetype, *xname;
1894 bus_space_tag_t memt;
1895 bus_space_handle_t memh;
1896 bus_size_t memsize;
1897 int memh_valid;
1898 int i, error;
1899 const struct wm_product *wmp;
1900 prop_data_t ea;
1901 prop_number_t pn;
1902 uint8_t enaddr[ETHER_ADDR_LEN];
1903 char buf[256];
1904 char wqname[MAXCOMLEN];
1905 uint16_t cfg1, cfg2, swdpin, nvmword;
1906 pcireg_t preg, memtype;
1907 uint16_t eeprom_data, apme_mask;
1908 bool force_clear_smbi;
1909 uint32_t link_mode;
1910 uint32_t reg;
1911
1912 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1913 sc->sc_debug = WM_DEBUG_DEFAULT;
1914 #endif
1915 sc->sc_dev = self;
1916 callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
1917 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1918 sc->sc_core_stopping = false;
1919
1920 wmp = wm_lookup(pa);
1921 #ifdef DIAGNOSTIC
1922 if (wmp == NULL) {
1923 printf("\n");
1924 panic("wm_attach: impossible");
1925 }
1926 #endif
1927 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1928
1929 sc->sc_pc = pa->pa_pc;
1930 sc->sc_pcitag = pa->pa_tag;
1931
1932 if (pci_dma64_available(pa)) {
1933 aprint_verbose(", 64-bit DMA");
1934 sc->sc_dmat = pa->pa_dmat64;
1935 } else {
1936 aprint_verbose(", 32-bit DMA");
1937 sc->sc_dmat = pa->pa_dmat;
1938 }
1939
1940 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1941 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1942 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1943
1944 sc->sc_type = wmp->wmp_type;
1945
1946 /* Set default function pointers */
1947 sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1948 sc->phy.release = sc->nvm.release = wm_put_null;
1949 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1950
1951 if (sc->sc_type < WM_T_82543) {
1952 if (sc->sc_rev < 2) {
1953 aprint_error_dev(sc->sc_dev,
1954 "i82542 must be at least rev. 2\n");
1955 return;
1956 }
1957 if (sc->sc_rev < 3)
1958 sc->sc_type = WM_T_82542_2_0;
1959 }
1960
1961 /*
1962 * Disable MSI for Errata:
1963 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1964 *
1965 * 82544: Errata 25
1966 * 82540: Errata 6 (easy to reproduce device timeout)
1967 * 82545: Errata 4 (easy to reproduce device timeout)
1968 * 82546: Errata 26 (easy to reproduce device timeout)
1969 * 82541: Errata 7 (easy to reproduce device timeout)
1970 *
1971 * "Byte Enables 2 and 3 are not set on MSI writes"
1972 *
1973 * 82571 & 82572: Errata 63
1974 */
1975 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1976 || (sc->sc_type == WM_T_82572))
1977 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1978
1979 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1980 || (sc->sc_type == WM_T_82580)
1981 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1982 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1983 sc->sc_flags |= WM_F_NEWQUEUE;
1984
1985 /* Set device properties (mactype) */
1986 dict = device_properties(sc->sc_dev);
1987 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1988
1989 /*
1990 * Map the device. All devices support memory-mapped acccess,
1991 * and it is really required for normal operation.
1992 */
1993 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1994 switch (memtype) {
1995 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1996 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1997 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1998 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1999 break;
2000 default:
2001 memh_valid = 0;
2002 break;
2003 }
2004
2005 if (memh_valid) {
2006 sc->sc_st = memt;
2007 sc->sc_sh = memh;
2008 sc->sc_ss = memsize;
2009 } else {
2010 aprint_error_dev(sc->sc_dev,
2011 "unable to map device registers\n");
2012 return;
2013 }
2014
2015 /*
2016 * In addition, i82544 and later support I/O mapped indirect
2017 * register access. It is not desirable (nor supported in
2018 * this driver) to use it for normal operation, though it is
2019 * required to work around bugs in some chip versions.
2020 */
2021 switch (sc->sc_type) {
2022 case WM_T_82544:
2023 case WM_T_82541:
2024 case WM_T_82541_2:
2025 case WM_T_82547:
2026 case WM_T_82547_2:
2027 /* First we have to find the I/O BAR. */
2028 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2029 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2030 if (memtype == PCI_MAPREG_TYPE_IO)
2031 break;
2032 if (PCI_MAPREG_MEM_TYPE(memtype) ==
2033 PCI_MAPREG_MEM_TYPE_64BIT)
2034 i += 4; /* skip high bits, too */
2035 }
2036 if (i < PCI_MAPREG_END) {
2037 /*
2038 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2039 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2040 * It's no problem because newer chips has no this
2041 * bug.
2042 *
2043 * The i8254x doesn't apparently respond when the
2044 * I/O BAR is 0, which looks somewhat like it's not
2045 * been configured.
2046 */
2047 preg = pci_conf_read(pc, pa->pa_tag, i);
2048 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2049 aprint_error_dev(sc->sc_dev,
2050 "WARNING: I/O BAR at zero.\n");
2051 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2052 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2053 == 0) {
2054 sc->sc_flags |= WM_F_IOH_VALID;
2055 } else
2056 aprint_error_dev(sc->sc_dev,
2057 "WARNING: unable to map I/O space\n");
2058 }
2059 break;
2060 default:
2061 break;
2062 }
2063
2064 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
2065 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2066 preg |= PCI_COMMAND_MASTER_ENABLE;
2067 if (sc->sc_type < WM_T_82542_2_1)
2068 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2069 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2070
2071 /* Power up chip */
2072 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2073 && error != EOPNOTSUPP) {
2074 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2075 return;
2076 }
2077
2078 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2079 /*
2080 * Don't use MSI-X if we can use only one queue to save interrupt
2081 * resource.
2082 */
2083 if (sc->sc_nqueues > 1) {
2084 max_type = PCI_INTR_TYPE_MSIX;
2085 /*
2086 * 82583 has a MSI-X capability in the PCI configuration space
2087 * but it doesn't support it. At least the document doesn't
2088 * say anything about MSI-X.
2089 */
2090 counts[PCI_INTR_TYPE_MSIX]
2091 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2092 } else {
2093 max_type = PCI_INTR_TYPE_MSI;
2094 counts[PCI_INTR_TYPE_MSIX] = 0;
2095 }
2096
2097 /* Allocation settings */
2098 counts[PCI_INTR_TYPE_MSI] = 1;
2099 counts[PCI_INTR_TYPE_INTX] = 1;
2100 /* overridden by disable flags */
2101 if (wm_disable_msi != 0) {
2102 counts[PCI_INTR_TYPE_MSI] = 0;
2103 if (wm_disable_msix != 0) {
2104 max_type = PCI_INTR_TYPE_INTX;
2105 counts[PCI_INTR_TYPE_MSIX] = 0;
2106 }
2107 } else if (wm_disable_msix != 0) {
2108 max_type = PCI_INTR_TYPE_MSI;
2109 counts[PCI_INTR_TYPE_MSIX] = 0;
2110 }
2111
2112 alloc_retry:
2113 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2114 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2115 return;
2116 }
2117
2118 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2119 error = wm_setup_msix(sc);
2120 if (error) {
2121 pci_intr_release(pc, sc->sc_intrs,
2122 counts[PCI_INTR_TYPE_MSIX]);
2123
2124 /* Setup for MSI: Disable MSI-X */
2125 max_type = PCI_INTR_TYPE_MSI;
2126 counts[PCI_INTR_TYPE_MSI] = 1;
2127 counts[PCI_INTR_TYPE_INTX] = 1;
2128 goto alloc_retry;
2129 }
2130 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2131 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2132 error = wm_setup_legacy(sc);
2133 if (error) {
2134 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2135 counts[PCI_INTR_TYPE_MSI]);
2136
2137 /* The next try is for INTx: Disable MSI */
2138 max_type = PCI_INTR_TYPE_INTX;
2139 counts[PCI_INTR_TYPE_INTX] = 1;
2140 goto alloc_retry;
2141 }
2142 } else {
2143 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */
2144 error = wm_setup_legacy(sc);
2145 if (error) {
2146 pci_intr_release(sc->sc_pc, sc->sc_intrs,
2147 counts[PCI_INTR_TYPE_INTX]);
2148 return;
2149 }
2150 }
2151
2152 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2153 error = workqueue_create(&sc->sc_queue_wq, wqname,
2154 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2155 WM_WORKQUEUE_FLAGS);
2156 if (error) {
2157 aprint_error_dev(sc->sc_dev,
2158 "unable to create workqueue\n");
2159 goto out;
2160 }
2161
2162 /*
2163 * Check the function ID (unit number of the chip).
2164 */
2165 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2166 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2167 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2168 || (sc->sc_type == WM_T_82580)
2169 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2170 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2171 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2172 else
2173 sc->sc_funcid = 0;
2174
2175 /*
2176 * Determine a few things about the bus we're connected to.
2177 */
2178 if (sc->sc_type < WM_T_82543) {
2179 /* We don't really know the bus characteristics here. */
2180 sc->sc_bus_speed = 33;
2181 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2182 /*
2183 * CSA (Communication Streaming Architecture) is about as fast
2184 * a 32-bit 66MHz PCI Bus.
2185 */
2186 sc->sc_flags |= WM_F_CSA;
2187 sc->sc_bus_speed = 66;
2188 aprint_verbose_dev(sc->sc_dev,
2189 "Communication Streaming Architecture\n");
2190 if (sc->sc_type == WM_T_82547) {
2191 callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
2192 callout_setfunc(&sc->sc_txfifo_ch,
2193 wm_82547_txfifo_stall, sc);
2194 aprint_verbose_dev(sc->sc_dev,
2195 "using 82547 Tx FIFO stall work-around\n");
2196 }
2197 } else if (sc->sc_type >= WM_T_82571) {
2198 sc->sc_flags |= WM_F_PCIE;
2199 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2200 && (sc->sc_type != WM_T_ICH10)
2201 && (sc->sc_type != WM_T_PCH)
2202 && (sc->sc_type != WM_T_PCH2)
2203 && (sc->sc_type != WM_T_PCH_LPT)
2204 && (sc->sc_type != WM_T_PCH_SPT)
2205 && (sc->sc_type != WM_T_PCH_CNP)) {
2206 /* ICH* and PCH* have no PCIe capability registers */
2207 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2208 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2209 NULL) == 0)
2210 aprint_error_dev(sc->sc_dev,
2211 "unable to find PCIe capability\n");
2212 }
2213 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2214 } else {
2215 reg = CSR_READ(sc, WMREG_STATUS);
2216 if (reg & STATUS_BUS64)
2217 sc->sc_flags |= WM_F_BUS64;
2218 if ((reg & STATUS_PCIX_MODE) != 0) {
2219 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2220
2221 sc->sc_flags |= WM_F_PCIX;
2222 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2223 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2224 aprint_error_dev(sc->sc_dev,
2225 "unable to find PCIX capability\n");
2226 else if (sc->sc_type != WM_T_82545_3 &&
2227 sc->sc_type != WM_T_82546_3) {
2228 /*
2229 * Work around a problem caused by the BIOS
2230 * setting the max memory read byte count
2231 * incorrectly.
2232 */
2233 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2234 sc->sc_pcixe_capoff + PCIX_CMD);
2235 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2236 sc->sc_pcixe_capoff + PCIX_STATUS);
2237
2238 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2239 PCIX_CMD_BYTECNT_SHIFT;
2240 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2241 PCIX_STATUS_MAXB_SHIFT;
2242 if (bytecnt > maxb) {
2243 aprint_verbose_dev(sc->sc_dev,
2244 "resetting PCI-X MMRBC: %d -> %d\n",
2245 512 << bytecnt, 512 << maxb);
2246 pcix_cmd = (pcix_cmd &
2247 ~PCIX_CMD_BYTECNT_MASK) |
2248 (maxb << PCIX_CMD_BYTECNT_SHIFT);
2249 pci_conf_write(pa->pa_pc, pa->pa_tag,
2250 sc->sc_pcixe_capoff + PCIX_CMD,
2251 pcix_cmd);
2252 }
2253 }
2254 }
2255 /*
2256 * The quad port adapter is special; it has a PCIX-PCIX
2257 * bridge on the board, and can run the secondary bus at
2258 * a higher speed.
2259 */
2260 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2261 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2262 : 66;
2263 } else if (sc->sc_flags & WM_F_PCIX) {
2264 switch (reg & STATUS_PCIXSPD_MASK) {
2265 case STATUS_PCIXSPD_50_66:
2266 sc->sc_bus_speed = 66;
2267 break;
2268 case STATUS_PCIXSPD_66_100:
2269 sc->sc_bus_speed = 100;
2270 break;
2271 case STATUS_PCIXSPD_100_133:
2272 sc->sc_bus_speed = 133;
2273 break;
2274 default:
2275 aprint_error_dev(sc->sc_dev,
2276 "unknown PCIXSPD %d; assuming 66MHz\n",
2277 reg & STATUS_PCIXSPD_MASK);
2278 sc->sc_bus_speed = 66;
2279 break;
2280 }
2281 } else
2282 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2283 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2284 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2285 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2286 }
2287
2288 /* clear interesting stat counters */
2289 CSR_READ(sc, WMREG_COLC);
2290 CSR_READ(sc, WMREG_RXERRC);
2291
2292 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2293 || (sc->sc_type >= WM_T_ICH8))
2294 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2295 if (sc->sc_type >= WM_T_ICH8)
2296 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2297
2298 /* Set PHY, NVM mutex related stuff */
2299 switch (sc->sc_type) {
2300 case WM_T_82542_2_0:
2301 case WM_T_82542_2_1:
2302 case WM_T_82543:
2303 case WM_T_82544:
2304 /* Microwire */
2305 sc->nvm.read = wm_nvm_read_uwire;
2306 sc->sc_nvm_wordsize = 64;
2307 sc->sc_nvm_addrbits = 6;
2308 break;
2309 case WM_T_82540:
2310 case WM_T_82545:
2311 case WM_T_82545_3:
2312 case WM_T_82546:
2313 case WM_T_82546_3:
2314 /* Microwire */
2315 sc->nvm.read = wm_nvm_read_uwire;
2316 reg = CSR_READ(sc, WMREG_EECD);
2317 if (reg & EECD_EE_SIZE) {
2318 sc->sc_nvm_wordsize = 256;
2319 sc->sc_nvm_addrbits = 8;
2320 } else {
2321 sc->sc_nvm_wordsize = 64;
2322 sc->sc_nvm_addrbits = 6;
2323 }
2324 sc->sc_flags |= WM_F_LOCK_EECD;
2325 sc->nvm.acquire = wm_get_eecd;
2326 sc->nvm.release = wm_put_eecd;
2327 break;
2328 case WM_T_82541:
2329 case WM_T_82541_2:
2330 case WM_T_82547:
2331 case WM_T_82547_2:
2332 reg = CSR_READ(sc, WMREG_EECD);
2333 /*
2334 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2335 * on 8254[17], so set flags and functios before calling it.
2336 */
2337 sc->sc_flags |= WM_F_LOCK_EECD;
2338 sc->nvm.acquire = wm_get_eecd;
2339 sc->nvm.release = wm_put_eecd;
2340 if (reg & EECD_EE_TYPE) {
2341 /* SPI */
2342 sc->nvm.read = wm_nvm_read_spi;
2343 sc->sc_flags |= WM_F_EEPROM_SPI;
2344 wm_nvm_set_addrbits_size_eecd(sc);
2345 } else {
2346 /* Microwire */
2347 sc->nvm.read = wm_nvm_read_uwire;
2348 if ((reg & EECD_EE_ABITS) != 0) {
2349 sc->sc_nvm_wordsize = 256;
2350 sc->sc_nvm_addrbits = 8;
2351 } else {
2352 sc->sc_nvm_wordsize = 64;
2353 sc->sc_nvm_addrbits = 6;
2354 }
2355 }
2356 break;
2357 case WM_T_82571:
2358 case WM_T_82572:
2359 /* SPI */
2360 sc->nvm.read = wm_nvm_read_eerd;
2361 /* Not use WM_F_LOCK_EECD because we use EERD */
2362 sc->sc_flags |= WM_F_EEPROM_SPI;
2363 wm_nvm_set_addrbits_size_eecd(sc);
2364 sc->phy.acquire = wm_get_swsm_semaphore;
2365 sc->phy.release = wm_put_swsm_semaphore;
2366 sc->nvm.acquire = wm_get_nvm_82571;
2367 sc->nvm.release = wm_put_nvm_82571;
2368 break;
2369 case WM_T_82573:
2370 case WM_T_82574:
2371 case WM_T_82583:
2372 sc->nvm.read = wm_nvm_read_eerd;
2373 /* Not use WM_F_LOCK_EECD because we use EERD */
2374 if (sc->sc_type == WM_T_82573) {
2375 sc->phy.acquire = wm_get_swsm_semaphore;
2376 sc->phy.release = wm_put_swsm_semaphore;
2377 sc->nvm.acquire = wm_get_nvm_82571;
2378 sc->nvm.release = wm_put_nvm_82571;
2379 } else {
2380 /* Both PHY and NVM use the same semaphore. */
2381 sc->phy.acquire = sc->nvm.acquire
2382 = wm_get_swfwhw_semaphore;
2383 sc->phy.release = sc->nvm.release
2384 = wm_put_swfwhw_semaphore;
2385 }
2386 if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2387 sc->sc_flags |= WM_F_EEPROM_FLASH;
2388 sc->sc_nvm_wordsize = 2048;
2389 } else {
2390 /* SPI */
2391 sc->sc_flags |= WM_F_EEPROM_SPI;
2392 wm_nvm_set_addrbits_size_eecd(sc);
2393 }
2394 break;
2395 case WM_T_82575:
2396 case WM_T_82576:
2397 case WM_T_82580:
2398 case WM_T_I350:
2399 case WM_T_I354:
2400 case WM_T_80003:
2401 /* SPI */
2402 sc->sc_flags |= WM_F_EEPROM_SPI;
2403 wm_nvm_set_addrbits_size_eecd(sc);
2404 if ((sc->sc_type == WM_T_80003)
2405 || (sc->sc_nvm_wordsize < (1 << 15))) {
2406 sc->nvm.read = wm_nvm_read_eerd;
2407 /* Don't use WM_F_LOCK_EECD because we use EERD */
2408 } else {
2409 sc->nvm.read = wm_nvm_read_spi;
2410 sc->sc_flags |= WM_F_LOCK_EECD;
2411 }
2412 sc->phy.acquire = wm_get_phy_82575;
2413 sc->phy.release = wm_put_phy_82575;
2414 sc->nvm.acquire = wm_get_nvm_80003;
2415 sc->nvm.release = wm_put_nvm_80003;
2416 break;
2417 case WM_T_ICH8:
2418 case WM_T_ICH9:
2419 case WM_T_ICH10:
2420 case WM_T_PCH:
2421 case WM_T_PCH2:
2422 case WM_T_PCH_LPT:
2423 sc->nvm.read = wm_nvm_read_ich8;
2424 /* FLASH */
2425 sc->sc_flags |= WM_F_EEPROM_FLASH;
2426 sc->sc_nvm_wordsize = 2048;
2427 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2428 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2429 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2430 aprint_error_dev(sc->sc_dev,
2431 "can't map FLASH registers\n");
2432 goto out;
2433 }
2434 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2435 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2436 ICH_FLASH_SECTOR_SIZE;
2437 sc->sc_ich8_flash_bank_size =
2438 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2439 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2440 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2441 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2442 sc->sc_flashreg_offset = 0;
2443 sc->phy.acquire = wm_get_swflag_ich8lan;
2444 sc->phy.release = wm_put_swflag_ich8lan;
2445 sc->nvm.acquire = wm_get_nvm_ich8lan;
2446 sc->nvm.release = wm_put_nvm_ich8lan;
2447 break;
2448 case WM_T_PCH_SPT:
2449 case WM_T_PCH_CNP:
2450 sc->nvm.read = wm_nvm_read_spt;
2451 /* SPT has no GFPREG; flash registers mapped through BAR0 */
2452 sc->sc_flags |= WM_F_EEPROM_FLASH;
2453 sc->sc_flasht = sc->sc_st;
2454 sc->sc_flashh = sc->sc_sh;
2455 sc->sc_ich8_flash_base = 0;
2456 sc->sc_nvm_wordsize =
2457 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2458 * NVM_SIZE_MULTIPLIER;
2459 /* It is size in bytes, we want words */
2460 sc->sc_nvm_wordsize /= 2;
2461 /* Assume 2 banks */
2462 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2463 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2464 sc->phy.acquire = wm_get_swflag_ich8lan;
2465 sc->phy.release = wm_put_swflag_ich8lan;
2466 sc->nvm.acquire = wm_get_nvm_ich8lan;
2467 sc->nvm.release = wm_put_nvm_ich8lan;
2468 break;
2469 case WM_T_I210:
2470 case WM_T_I211:
2471 /* Allow a single clear of the SW semaphore on I210 and newer*/
2472 sc->sc_flags |= WM_F_WA_I210_CLSEM;
2473 if (wm_nvm_flash_presence_i210(sc)) {
2474 sc->nvm.read = wm_nvm_read_eerd;
2475 /* Don't use WM_F_LOCK_EECD because we use EERD */
2476 sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2477 wm_nvm_set_addrbits_size_eecd(sc);
2478 } else {
2479 sc->nvm.read = wm_nvm_read_invm;
2480 sc->sc_flags |= WM_F_EEPROM_INVM;
2481 sc->sc_nvm_wordsize = INVM_SIZE;
2482 }
2483 sc->phy.acquire = wm_get_phy_82575;
2484 sc->phy.release = wm_put_phy_82575;
2485 sc->nvm.acquire = wm_get_nvm_80003;
2486 sc->nvm.release = wm_put_nvm_80003;
2487 break;
2488 default:
2489 break;
2490 }
2491
2492 /* Ensure the SMBI bit is clear before first NVM or PHY access */
2493 switch (sc->sc_type) {
2494 case WM_T_82571:
2495 case WM_T_82572:
2496 reg = CSR_READ(sc, WMREG_SWSM2);
2497 if ((reg & SWSM2_LOCK) == 0) {
2498 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2499 force_clear_smbi = true;
2500 } else
2501 force_clear_smbi = false;
2502 break;
2503 case WM_T_82573:
2504 case WM_T_82574:
2505 case WM_T_82583:
2506 force_clear_smbi = true;
2507 break;
2508 default:
2509 force_clear_smbi = false;
2510 break;
2511 }
2512 if (force_clear_smbi) {
2513 reg = CSR_READ(sc, WMREG_SWSM);
2514 if ((reg & SWSM_SMBI) != 0)
2515 aprint_error_dev(sc->sc_dev,
2516 "Please update the Bootagent\n");
2517 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2518 }
2519
2520 /*
2521 * Defer printing the EEPROM type until after verifying the checksum
2522 * This allows the EEPROM type to be printed correctly in the case
2523 * that no EEPROM is attached.
2524 */
2525 /*
2526 * Validate the EEPROM checksum. If the checksum fails, flag
2527 * this for later, so we can fail future reads from the EEPROM.
2528 */
2529 if (wm_nvm_validate_checksum(sc)) {
2530 /*
2531 * Read twice again because some PCI-e parts fail the
2532 * first check due to the link being in sleep state.
2533 */
2534 if (wm_nvm_validate_checksum(sc))
2535 sc->sc_flags |= WM_F_EEPROM_INVALID;
2536 }
2537
2538 if (sc->sc_flags & WM_F_EEPROM_INVALID)
2539 aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2540 else {
2541 aprint_verbose_dev(sc->sc_dev, "%u words ",
2542 sc->sc_nvm_wordsize);
2543 if (sc->sc_flags & WM_F_EEPROM_INVM)
2544 aprint_verbose("iNVM");
2545 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2546 aprint_verbose("FLASH(HW)");
2547 else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2548 aprint_verbose("FLASH");
2549 else {
2550 if (sc->sc_flags & WM_F_EEPROM_SPI)
2551 eetype = "SPI";
2552 else
2553 eetype = "MicroWire";
2554 aprint_verbose("(%d address bits) %s EEPROM",
2555 sc->sc_nvm_addrbits, eetype);
2556 }
2557 }
2558 wm_nvm_version(sc);
2559 aprint_verbose("\n");
2560
2561 /*
2562 * XXX The first call of wm_gmii_setup_phytype. The result might be
2563 * incorrect.
2564 */
2565 wm_gmii_setup_phytype(sc, 0, 0);
2566
2567 /* Check for WM_F_WOL on some chips before wm_reset() */
2568 switch (sc->sc_type) {
2569 case WM_T_ICH8:
2570 case WM_T_ICH9:
2571 case WM_T_ICH10:
2572 case WM_T_PCH:
2573 case WM_T_PCH2:
2574 case WM_T_PCH_LPT:
2575 case WM_T_PCH_SPT:
2576 case WM_T_PCH_CNP:
2577 apme_mask = WUC_APME;
2578 eeprom_data = CSR_READ(sc, WMREG_WUC);
2579 if ((eeprom_data & apme_mask) != 0)
2580 sc->sc_flags |= WM_F_WOL;
2581 break;
2582 default:
2583 break;
2584 }
2585
2586 /* Reset the chip to a known state. */
2587 wm_reset(sc);
2588
2589 /*
2590 * Check for I21[01] PLL workaround.
2591 *
2592 * Three cases:
2593 * a) Chip is I211.
2594 * b) Chip is I210 and it uses INVM (not FLASH).
2595 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2596 */
2597 if (sc->sc_type == WM_T_I211)
2598 sc->sc_flags |= WM_F_PLL_WA_I210;
2599 if (sc->sc_type == WM_T_I210) {
2600 if (!wm_nvm_flash_presence_i210(sc))
2601 sc->sc_flags |= WM_F_PLL_WA_I210;
2602 else if ((sc->sc_nvm_ver_major < 3)
2603 || ((sc->sc_nvm_ver_major == 3)
2604 && (sc->sc_nvm_ver_minor < 25))) {
2605 aprint_verbose_dev(sc->sc_dev,
2606 "ROM image version %d.%d is older than 3.25\n",
2607 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2608 sc->sc_flags |= WM_F_PLL_WA_I210;
2609 }
2610 }
2611 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2612 wm_pll_workaround_i210(sc);
2613
2614 wm_get_wakeup(sc);
2615
2616 /* Non-AMT based hardware can now take control from firmware */
2617 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2618 wm_get_hw_control(sc);
2619
2620 /*
2621 * Read the Ethernet address from the EEPROM, if not first found
2622 * in device properties.
2623 */
2624 ea = prop_dictionary_get(dict, "mac-address");
2625 if (ea != NULL) {
2626 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2627 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2628 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2629 } else {
2630 if (wm_read_mac_addr(sc, enaddr) != 0) {
2631 aprint_error_dev(sc->sc_dev,
2632 "unable to read Ethernet address\n");
2633 goto out;
2634 }
2635 }
2636
2637 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2638 ether_sprintf(enaddr));
2639
2640 /*
2641 * Read the config info from the EEPROM, and set up various
2642 * bits in the control registers based on their contents.
2643 */
2644 pn = prop_dictionary_get(dict, "i82543-cfg1");
2645 if (pn != NULL) {
2646 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2647 cfg1 = (uint16_t) prop_number_signed_value(pn);
2648 } else {
2649 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2650 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2651 goto out;
2652 }
2653 }
2654
2655 pn = prop_dictionary_get(dict, "i82543-cfg2");
2656 if (pn != NULL) {
2657 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2658 cfg2 = (uint16_t) prop_number_signed_value(pn);
2659 } else {
2660 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2661 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2662 goto out;
2663 }
2664 }
2665
2666 /* check for WM_F_WOL */
2667 switch (sc->sc_type) {
2668 case WM_T_82542_2_0:
2669 case WM_T_82542_2_1:
2670 case WM_T_82543:
2671 /* dummy? */
2672 eeprom_data = 0;
2673 apme_mask = NVM_CFG3_APME;
2674 break;
2675 case WM_T_82544:
2676 apme_mask = NVM_CFG2_82544_APM_EN;
2677 eeprom_data = cfg2;
2678 break;
2679 case WM_T_82546:
2680 case WM_T_82546_3:
2681 case WM_T_82571:
2682 case WM_T_82572:
2683 case WM_T_82573:
2684 case WM_T_82574:
2685 case WM_T_82583:
2686 case WM_T_80003:
2687 case WM_T_82575:
2688 case WM_T_82576:
2689 apme_mask = NVM_CFG3_APME;
2690 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2691 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2692 break;
2693 case WM_T_82580:
2694 case WM_T_I350:
2695 case WM_T_I354:
2696 case WM_T_I210:
2697 case WM_T_I211:
2698 apme_mask = NVM_CFG3_APME;
2699 wm_nvm_read(sc,
2700 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2701 1, &eeprom_data);
2702 break;
2703 case WM_T_ICH8:
2704 case WM_T_ICH9:
2705 case WM_T_ICH10:
2706 case WM_T_PCH:
2707 case WM_T_PCH2:
2708 case WM_T_PCH_LPT:
2709 case WM_T_PCH_SPT:
2710 case WM_T_PCH_CNP:
2711 /* Already checked before wm_reset () */
2712 apme_mask = eeprom_data = 0;
2713 break;
2714 default: /* XXX 82540 */
2715 apme_mask = NVM_CFG3_APME;
2716 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2717 break;
2718 }
2719 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2720 if ((eeprom_data & apme_mask) != 0)
2721 sc->sc_flags |= WM_F_WOL;
2722
2723 /*
2724 * We have the eeprom settings, now apply the special cases
2725 * where the eeprom may be wrong or the board won't support
2726 * wake on lan on a particular port
2727 */
2728 switch (sc->sc_pcidevid) {
2729 case PCI_PRODUCT_INTEL_82546GB_PCIE:
2730 sc->sc_flags &= ~WM_F_WOL;
2731 break;
2732 case PCI_PRODUCT_INTEL_82546EB_FIBER:
2733 case PCI_PRODUCT_INTEL_82546GB_FIBER:
2734 /* Wake events only supported on port A for dual fiber
2735 * regardless of eeprom setting */
2736 if (sc->sc_funcid == 1)
2737 sc->sc_flags &= ~WM_F_WOL;
2738 break;
2739 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2740 /* If quad port adapter, disable WoL on all but port A */
2741 if (sc->sc_funcid != 0)
2742 sc->sc_flags &= ~WM_F_WOL;
2743 break;
2744 case PCI_PRODUCT_INTEL_82571EB_FIBER:
2745 /* Wake events only supported on port A for dual fiber
2746 * regardless of eeprom setting */
2747 if (sc->sc_funcid == 1)
2748 sc->sc_flags &= ~WM_F_WOL;
2749 break;
2750 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2751 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2752 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2753 /* If quad port adapter, disable WoL on all but port A */
2754 if (sc->sc_funcid != 0)
2755 sc->sc_flags &= ~WM_F_WOL;
2756 break;
2757 }
2758
2759 if (sc->sc_type >= WM_T_82575) {
2760 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2761 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2762 nvmword);
2763 if ((sc->sc_type == WM_T_82575) ||
2764 (sc->sc_type == WM_T_82576)) {
2765 /* Check NVM for autonegotiation */
2766 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2767 != 0)
2768 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2769 }
2770 if ((sc->sc_type == WM_T_82575) ||
2771 (sc->sc_type == WM_T_I350)) {
2772 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2773 sc->sc_flags |= WM_F_MAS;
2774 }
2775 }
2776 }
2777
2778 /*
2779 * XXX need special handling for some multiple port cards
2780 * to disable a paticular port.
2781 */
2782
2783 if (sc->sc_type >= WM_T_82544) {
2784 pn = prop_dictionary_get(dict, "i82543-swdpin");
2785 if (pn != NULL) {
2786 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2787 swdpin = (uint16_t) prop_number_signed_value(pn);
2788 } else {
2789 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2790 aprint_error_dev(sc->sc_dev,
2791 "unable to read SWDPIN\n");
2792 goto out;
2793 }
2794 }
2795 }
2796
2797 if (cfg1 & NVM_CFG1_ILOS)
2798 sc->sc_ctrl |= CTRL_ILOS;
2799
2800 /*
2801 * XXX
2802 * This code isn't correct because pin 2 and 3 are located
2803 * in different position on newer chips. Check all datasheet.
2804 *
2805 * Until resolve this problem, check if a chip < 82580
2806 */
2807 if (sc->sc_type <= WM_T_82580) {
2808 if (sc->sc_type >= WM_T_82544) {
2809 sc->sc_ctrl |=
2810 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2811 CTRL_SWDPIO_SHIFT;
2812 sc->sc_ctrl |=
2813 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2814 CTRL_SWDPINS_SHIFT;
2815 } else {
2816 sc->sc_ctrl |=
2817 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2818 CTRL_SWDPIO_SHIFT;
2819 }
2820 }
2821
2822 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2823 wm_nvm_read(sc,
2824 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2825 1, &nvmword);
2826 if (nvmword & NVM_CFG3_ILOS)
2827 sc->sc_ctrl |= CTRL_ILOS;
2828 }
2829
2830 #if 0
2831 if (sc->sc_type >= WM_T_82544) {
2832 if (cfg1 & NVM_CFG1_IPS0)
2833 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2834 if (cfg1 & NVM_CFG1_IPS1)
2835 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2836 sc->sc_ctrl_ext |=
2837 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2838 CTRL_EXT_SWDPIO_SHIFT;
2839 sc->sc_ctrl_ext |=
2840 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2841 CTRL_EXT_SWDPINS_SHIFT;
2842 } else {
2843 sc->sc_ctrl_ext |=
2844 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2845 CTRL_EXT_SWDPIO_SHIFT;
2846 }
2847 #endif
2848
2849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2850 #if 0
2851 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2852 #endif
2853
2854 if (sc->sc_type == WM_T_PCH) {
2855 uint16_t val;
2856
2857 /* Save the NVM K1 bit setting */
2858 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2859
2860 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2861 sc->sc_nvm_k1_enabled = 1;
2862 else
2863 sc->sc_nvm_k1_enabled = 0;
2864 }
2865
2866 /* Determine if we're GMII, TBI, SERDES or SGMII mode */
2867 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2868 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2869 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2870 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2871 || sc->sc_type == WM_T_82573
2872 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2873 /* Copper only */
2874 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2875 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2876 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2877 || (sc->sc_type ==WM_T_I211)) {
2878 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2879 link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2880 switch (link_mode) {
2881 case CTRL_EXT_LINK_MODE_1000KX:
2882 aprint_normal_dev(sc->sc_dev, "1000KX\n");
2883 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2884 break;
2885 case CTRL_EXT_LINK_MODE_SGMII:
2886 if (wm_sgmii_uses_mdio(sc)) {
2887 aprint_normal_dev(sc->sc_dev,
2888 "SGMII(MDIO)\n");
2889 sc->sc_flags |= WM_F_SGMII;
2890 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2891 break;
2892 }
2893 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2894 /*FALLTHROUGH*/
2895 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2896 sc->sc_mediatype = wm_sfp_get_media_type(sc);
2897 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2898 if (link_mode
2899 == CTRL_EXT_LINK_MODE_SGMII) {
2900 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2901 sc->sc_flags |= WM_F_SGMII;
2902 aprint_verbose_dev(sc->sc_dev,
2903 "SGMII\n");
2904 } else {
2905 sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2906 aprint_verbose_dev(sc->sc_dev,
2907 "SERDES\n");
2908 }
2909 break;
2910 }
2911 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2912 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2913 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2914 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2915 sc->sc_flags |= WM_F_SGMII;
2916 }
2917 /* Do not change link mode for 100BaseFX */
2918 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2919 break;
2920
2921 /* Change current link mode setting */
2922 reg &= ~CTRL_EXT_LINK_MODE_MASK;
2923 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2924 reg |= CTRL_EXT_LINK_MODE_SGMII;
2925 else
2926 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2927 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2928 break;
2929 case CTRL_EXT_LINK_MODE_GMII:
2930 default:
2931 aprint_normal_dev(sc->sc_dev, "Copper\n");
2932 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2933 break;
2934 }
2935
2936 reg &= ~CTRL_EXT_I2C_ENA;
2937 if ((sc->sc_flags & WM_F_SGMII) != 0)
2938 reg |= CTRL_EXT_I2C_ENA;
2939 else
2940 reg &= ~CTRL_EXT_I2C_ENA;
2941 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2942 if ((sc->sc_flags & WM_F_SGMII) != 0) {
2943 if (!wm_sgmii_uses_mdio(sc))
2944 wm_gmii_setup_phytype(sc, 0, 0);
2945 wm_reset_mdicnfg_82580(sc);
2946 }
2947 } else if (sc->sc_type < WM_T_82543 ||
2948 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2949 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2950 aprint_error_dev(sc->sc_dev,
2951 "WARNING: TBIMODE set on 1000BASE-T product!\n");
2952 sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2953 }
2954 } else {
2955 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2956 aprint_error_dev(sc->sc_dev,
2957 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2958 sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2959 }
2960 }
2961
2962 if (sc->sc_type >= WM_T_PCH2)
2963 sc->sc_flags |= WM_F_EEE;
2964 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
2965 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
2966 /* XXX: Need special handling for I354. (not yet) */
2967 if (sc->sc_type != WM_T_I354)
2968 sc->sc_flags |= WM_F_EEE;
2969 }
2970
2971 /*
2972 * The I350 has a bug where it always strips the CRC whether
2973 * asked to or not. So ask for stripped CRC here and cope in rxeof
2974 */
2975 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2976 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2977 sc->sc_flags |= WM_F_CRC_STRIP;
2978
2979 /* Set device properties (macflags) */
2980 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2981
2982 if (sc->sc_flags != 0) {
2983 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2984 aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2985 }
2986
2987 #ifdef WM_MPSAFE
2988 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2989 #else
2990 sc->sc_core_lock = NULL;
2991 #endif
2992
2993 /* Initialize the media structures accordingly. */
2994 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2995 wm_gmii_mediainit(sc, wmp->wmp_product);
2996 else
2997 wm_tbi_mediainit(sc); /* All others */
2998
2999 ifp = &sc->sc_ethercom.ec_if;
3000 xname = device_xname(sc->sc_dev);
3001 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3002 ifp->if_softc = sc;
3003 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3004 #ifdef WM_MPSAFE
3005 ifp->if_extflags = IFEF_MPSAFE;
3006 #endif
3007 ifp->if_ioctl = wm_ioctl;
3008 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3009 ifp->if_start = wm_nq_start;
3010 /*
3011 * When the number of CPUs is one and the controller can use
3012 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3013 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3014 * and the other is used for link status changing.
3015 * In this situation, wm_nq_transmit() is disadvantageous
3016 * because of wm_select_txqueue() and pcq(9) overhead.
3017 */
3018 if (wm_is_using_multiqueue(sc))
3019 ifp->if_transmit = wm_nq_transmit;
3020 } else {
3021 ifp->if_start = wm_start;
3022 /*
3023 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3024 * described above.
3025 */
3026 if (wm_is_using_multiqueue(sc))
3027 ifp->if_transmit = wm_transmit;
3028 }
3029 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3030 ifp->if_init = wm_init;
3031 ifp->if_stop = wm_stop;
3032 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3033 IFQ_SET_READY(&ifp->if_snd);
3034
3035 /* Check for jumbo frame */
3036 switch (sc->sc_type) {
3037 case WM_T_82573:
3038 /* XXX limited to 9234 if ASPM is disabled */
3039 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3040 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3041 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3042 break;
3043 case WM_T_82571:
3044 case WM_T_82572:
3045 case WM_T_82574:
3046 case WM_T_82583:
3047 case WM_T_82575:
3048 case WM_T_82576:
3049 case WM_T_82580:
3050 case WM_T_I350:
3051 case WM_T_I354:
3052 case WM_T_I210:
3053 case WM_T_I211:
3054 case WM_T_80003:
3055 case WM_T_ICH9:
3056 case WM_T_ICH10:
3057 case WM_T_PCH2: /* PCH2 supports 9K frame size */
3058 case WM_T_PCH_LPT:
3059 case WM_T_PCH_SPT:
3060 case WM_T_PCH_CNP:
3061 /* XXX limited to 9234 */
3062 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3063 break;
3064 case WM_T_PCH:
3065 /* XXX limited to 4096 */
3066 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3067 break;
3068 case WM_T_82542_2_0:
3069 case WM_T_82542_2_1:
3070 case WM_T_ICH8:
3071 /* No support for jumbo frame */
3072 break;
3073 default:
3074 /* ETHER_MAX_LEN_JUMBO */
3075 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3076 break;
3077 }
3078
3079 /* If we're a i82543 or greater, we can support VLANs. */
3080 if (sc->sc_type >= WM_T_82543) {
3081 sc->sc_ethercom.ec_capabilities |=
3082 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3083 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3084 }
3085
3086 if ((sc->sc_flags & WM_F_EEE) != 0)
3087 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3088
3089 /*
3090 * We can perform TCPv4 and UDPv4 checksums in-bound. Only
3091 * on i82543 and later.
3092 */
3093 if (sc->sc_type >= WM_T_82543) {
3094 ifp->if_capabilities |=
3095 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3096 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3097 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3098 IFCAP_CSUM_TCPv6_Tx |
3099 IFCAP_CSUM_UDPv6_Tx;
3100 }
3101
3102 /*
3103 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3104 *
3105 * 82541GI (8086:1076) ... no
3106 * 82572EI (8086:10b9) ... yes
3107 */
3108 if (sc->sc_type >= WM_T_82571) {
3109 ifp->if_capabilities |=
3110 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3111 }
3112
3113 /*
3114 * If we're a i82544 or greater (except i82547), we can do
3115 * TCP segmentation offload.
3116 */
3117 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3118 ifp->if_capabilities |= IFCAP_TSOv4;
3119
3120 if (sc->sc_type >= WM_T_82571)
3121 ifp->if_capabilities |= IFCAP_TSOv6;
3122
3123 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3124 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3125 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3126 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3127
3128 /* Attach the interface. */
3129 if_initialize(ifp);
3130 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3131 ether_ifattach(ifp, enaddr);
3132 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3133 if_register(ifp);
3134 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3135 RND_FLAG_DEFAULT);
3136
3137 #ifdef WM_EVENT_COUNTERS
3138 /* Attach event counters. */
3139 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3140 NULL, xname, "linkintr");
3141
3142 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3143 NULL, xname, "tx_xoff");
3144 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3145 NULL, xname, "tx_xon");
3146 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3147 NULL, xname, "rx_xoff");
3148 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3149 NULL, xname, "rx_xon");
3150 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3151 NULL, xname, "rx_macctl");
3152 #endif /* WM_EVENT_COUNTERS */
3153
3154 sc->sc_txrx_use_workqueue = false;
3155
3156 if (wm_phy_need_linkdown_discard(sc)) {
3157 DPRINTF(sc, WM_DEBUG_LINK,
3158 ("%s: %s: Set linkdown discard flag\n",
3159 device_xname(sc->sc_dev), __func__));
3160 wm_set_linkdown_discard(sc);
3161 }
3162
3163 wm_init_sysctls(sc);
3164
3165 if (pmf_device_register(self, wm_suspend, wm_resume))
3166 pmf_class_network_register(self, ifp);
3167 else
3168 aprint_error_dev(self, "couldn't establish power handler\n");
3169
3170 sc->sc_flags |= WM_F_ATTACHED;
3171 out:
3172 return;
3173 }
3174
3175 /* The detach function (ca_detach) */
3176 static int
3177 wm_detach(device_t self, int flags __unused)
3178 {
3179 struct wm_softc *sc = device_private(self);
3180 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3181 int i;
3182
3183 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3184 return 0;
3185
3186 /* Stop the interface. Callouts are stopped in it. */
3187 wm_stop(ifp, 1);
3188
3189 pmf_device_deregister(self);
3190
3191 sysctl_teardown(&sc->sc_sysctllog);
3192
3193 #ifdef WM_EVENT_COUNTERS
3194 evcnt_detach(&sc->sc_ev_linkintr);
3195
3196 evcnt_detach(&sc->sc_ev_tx_xoff);
3197 evcnt_detach(&sc->sc_ev_tx_xon);
3198 evcnt_detach(&sc->sc_ev_rx_xoff);
3199 evcnt_detach(&sc->sc_ev_rx_xon);
3200 evcnt_detach(&sc->sc_ev_rx_macctl);
3201 #endif /* WM_EVENT_COUNTERS */
3202
3203 rnd_detach_source(&sc->rnd_source);
3204
3205 /* Tell the firmware about the release */
3206 WM_CORE_LOCK(sc);
3207 wm_release_manageability(sc);
3208 wm_release_hw_control(sc);
3209 wm_enable_wakeup(sc);
3210 WM_CORE_UNLOCK(sc);
3211
3212 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3213
3214 ether_ifdetach(ifp);
3215 if_detach(ifp);
3216 if_percpuq_destroy(sc->sc_ipq);
3217
3218 /* Delete all remaining media. */
3219 ifmedia_fini(&sc->sc_mii.mii_media);
3220
3221 /* Unload RX dmamaps and free mbufs */
3222 for (i = 0; i < sc->sc_nqueues; i++) {
3223 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3224 mutex_enter(rxq->rxq_lock);
3225 wm_rxdrain(rxq);
3226 mutex_exit(rxq->rxq_lock);
3227 }
3228 /* Must unlock here */
3229
3230 /* Disestablish the interrupt handler */
3231 for (i = 0; i < sc->sc_nintrs; i++) {
3232 if (sc->sc_ihs[i] != NULL) {
3233 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3234 sc->sc_ihs[i] = NULL;
3235 }
3236 }
3237 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3238
3239 /* wm_stop() ensure workqueue is stopped. */
3240 workqueue_destroy(sc->sc_queue_wq);
3241
3242 for (i = 0; i < sc->sc_nqueues; i++)
3243 softint_disestablish(sc->sc_queue[i].wmq_si);
3244
3245 wm_free_txrx_queues(sc);
3246
3247 /* Unmap the registers */
3248 if (sc->sc_ss) {
3249 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3250 sc->sc_ss = 0;
3251 }
3252 if (sc->sc_ios) {
3253 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3254 sc->sc_ios = 0;
3255 }
3256 if (sc->sc_flashs) {
3257 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3258 sc->sc_flashs = 0;
3259 }
3260
3261 if (sc->sc_core_lock)
3262 mutex_obj_free(sc->sc_core_lock);
3263 if (sc->sc_ich_phymtx)
3264 mutex_obj_free(sc->sc_ich_phymtx);
3265 if (sc->sc_ich_nvmmtx)
3266 mutex_obj_free(sc->sc_ich_nvmmtx);
3267
3268 return 0;
3269 }
3270
3271 static bool
3272 wm_suspend(device_t self, const pmf_qual_t *qual)
3273 {
3274 struct wm_softc *sc = device_private(self);
3275
3276 wm_release_manageability(sc);
3277 wm_release_hw_control(sc);
3278 wm_enable_wakeup(sc);
3279
3280 return true;
3281 }
3282
3283 static bool
3284 wm_resume(device_t self, const pmf_qual_t *qual)
3285 {
3286 struct wm_softc *sc = device_private(self);
3287 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3288 pcireg_t reg;
3289 char buf[256];
3290
3291 reg = CSR_READ(sc, WMREG_WUS);
3292 if (reg != 0) {
3293 snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3294 device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3295 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3296 }
3297
3298 if (sc->sc_type >= WM_T_PCH2)
3299 wm_resume_workarounds_pchlan(sc);
3300 if ((ifp->if_flags & IFF_UP) == 0) {
3301 /* >= PCH_SPT hardware workaround before reset. */
3302 if (sc->sc_type >= WM_T_PCH_SPT)
3303 wm_flush_desc_rings(sc);
3304
3305 wm_reset(sc);
3306 /* Non-AMT based hardware can now take control from firmware */
3307 if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3308 wm_get_hw_control(sc);
3309 wm_init_manageability(sc);
3310 } else {
3311 /*
3312 * We called pmf_class_network_register(), so if_init() is
3313 * automatically called when IFF_UP. wm_reset(),
3314 * wm_get_hw_control() and wm_init_manageability() are called
3315 * via wm_init().
3316 */
3317 }
3318
3319 return true;
3320 }
3321
3322 /*
3323 * wm_watchdog: [ifnet interface function]
3324 *
3325 * Watchdog timer handler.
3326 */
3327 static void
3328 wm_watchdog(struct ifnet *ifp)
3329 {
3330 int qid;
3331 struct wm_softc *sc = ifp->if_softc;
3332 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3333
3334 for (qid = 0; qid < sc->sc_nqueues; qid++) {
3335 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3336
3337 wm_watchdog_txq(ifp, txq, &hang_queue);
3338 }
3339
3340 /* IF any of queues hanged up, reset the interface. */
3341 if (hang_queue != 0) {
3342 (void)wm_init(ifp);
3343
3344 /*
3345 * There are still some upper layer processing which call
3346 * ifp->if_start(). e.g. ALTQ or one CPU system
3347 */
3348 /* Try to get more packets going. */
3349 ifp->if_start(ifp);
3350 }
3351 }
3352
3353
3354 static void
3355 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3356 {
3357
3358 mutex_enter(txq->txq_lock);
3359 if (txq->txq_sending &&
3360 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3361 wm_watchdog_txq_locked(ifp, txq, hang);
3362
3363 mutex_exit(txq->txq_lock);
3364 }
3365
3366 static void
3367 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3368 uint16_t *hang)
3369 {
3370 struct wm_softc *sc = ifp->if_softc;
3371 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3372
3373 KASSERT(mutex_owned(txq->txq_lock));
3374
3375 /*
3376 * Since we're using delayed interrupts, sweep up
3377 * before we report an error.
3378 */
3379 wm_txeof(txq, UINT_MAX);
3380
3381 if (txq->txq_sending)
3382 *hang |= __BIT(wmq->wmq_id);
3383
3384 if (txq->txq_free == WM_NTXDESC(txq)) {
3385 log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3386 device_xname(sc->sc_dev));
3387 } else {
3388 #ifdef WM_DEBUG
3389 int i, j;
3390 struct wm_txsoft *txs;
3391 #endif
3392 log(LOG_ERR,
3393 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3394 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3395 txq->txq_next);
3396 if_statinc(ifp, if_oerrors);
3397 #ifdef WM_DEBUG
3398 for (i = txq->txq_sdirty; i != txq->txq_snext;
3399 i = WM_NEXTTXS(txq, i)) {
3400 txs = &txq->txq_soft[i];
3401 printf("txs %d tx %d -> %d\n",
3402 i, txs->txs_firstdesc, txs->txs_lastdesc);
3403 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3404 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3405 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3406 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3407 printf("\t %#08x%08x\n",
3408 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3409 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3410 } else {
3411 printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3412 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3413 txq->txq_descs[j].wtx_addr.wa_low);
3414 printf("\t %#04x%02x%02x%08x\n",
3415 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3416 txq->txq_descs[j].wtx_fields.wtxu_options,
3417 txq->txq_descs[j].wtx_fields.wtxu_status,
3418 txq->txq_descs[j].wtx_cmdlen);
3419 }
3420 if (j == txs->txs_lastdesc)
3421 break;
3422 }
3423 }
3424 #endif
3425 }
3426 }
3427
3428 /*
3429 * wm_tick:
3430 *
3431 * One second timer, used to check link status, sweep up
3432 * completed transmit jobs, etc.
3433 */
3434 static void
3435 wm_tick(void *arg)
3436 {
3437 struct wm_softc *sc = arg;
3438 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3439 #ifndef WM_MPSAFE
3440 int s = splnet();
3441 #endif
3442
3443 WM_CORE_LOCK(sc);
3444
3445 if (sc->sc_core_stopping) {
3446 WM_CORE_UNLOCK(sc);
3447 #ifndef WM_MPSAFE
3448 splx(s);
3449 #endif
3450 return;
3451 }
3452
3453 if (sc->sc_type >= WM_T_82542_2_1) {
3454 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3455 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3456 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3457 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3458 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3459 }
3460
3461 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3462 if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
3463 if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
3464 + CSR_READ(sc, WMREG_CRCERRS)
3465 + CSR_READ(sc, WMREG_ALGNERRC)
3466 + CSR_READ(sc, WMREG_SYMERRC)
3467 + CSR_READ(sc, WMREG_RXERRC)
3468 + CSR_READ(sc, WMREG_SEC)
3469 + CSR_READ(sc, WMREG_CEXTERR)
3470 + CSR_READ(sc, WMREG_RLEC));
3471 /*
3472 * WMREG_RNBC is incremented when there are no available buffers in host
3473 * memory. It does not mean the number of dropped packets, because an
3474 * Ethernet controller can receive packets in such case if there is
3475 * space in the phy's FIFO.
3476 *
3477 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3478 * own EVCNT instead of if_iqdrops.
3479 */
3480 if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
3481 IF_STAT_PUTREF(ifp);
3482
3483 if (sc->sc_flags & WM_F_HAS_MII)
3484 mii_tick(&sc->sc_mii);
3485 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3486 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3487 wm_serdes_tick(sc);
3488 else
3489 wm_tbi_tick(sc);
3490
3491 WM_CORE_UNLOCK(sc);
3492 #ifndef WM_MPSAFE
3493 splx(s);
3494 #endif
3495
3496 wm_watchdog(ifp);
3497
3498 callout_schedule(&sc->sc_tick_ch, hz);
3499 }
3500
3501 static int
3502 wm_ifflags_cb(struct ethercom *ec)
3503 {
3504 struct ifnet *ifp = &ec->ec_if;
3505 struct wm_softc *sc = ifp->if_softc;
3506 u_short iffchange;
3507 int ecchange;
3508 bool needreset = false;
3509 int rc = 0;
3510
3511 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3512 device_xname(sc->sc_dev), __func__));
3513
3514 WM_CORE_LOCK(sc);
3515
3516 /*
3517 * Check for if_flags.
3518 * Main usage is to prevent linkdown when opening bpf.
3519 */
3520 iffchange = ifp->if_flags ^ sc->sc_if_flags;
3521 sc->sc_if_flags = ifp->if_flags;
3522 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3523 needreset = true;
3524 goto ec;
3525 }
3526
3527 /* iff related updates */
3528 if ((iffchange & IFF_PROMISC) != 0)
3529 wm_set_filter(sc);
3530
3531 wm_set_vlan(sc);
3532
3533 ec:
3534 /* Check for ec_capenable. */
3535 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3536 sc->sc_ec_capenable = ec->ec_capenable;
3537 if ((ecchange & ~ETHERCAP_EEE) != 0) {
3538 needreset = true;
3539 goto out;
3540 }
3541
3542 /* ec related updates */
3543 wm_set_eee(sc);
3544
3545 out:
3546 if (needreset)
3547 rc = ENETRESET;
3548 WM_CORE_UNLOCK(sc);
3549
3550 return rc;
3551 }
3552
3553 static bool
3554 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3555 {
3556
3557 switch (sc->sc_phytype) {
3558 case WMPHY_82577: /* ihphy */
3559 case WMPHY_82578: /* atphy */
3560 case WMPHY_82579: /* ihphy */
3561 case WMPHY_I217: /* ihphy */
3562 case WMPHY_82580: /* ihphy */
3563 case WMPHY_I350: /* ihphy */
3564 return true;
3565 default:
3566 return false;
3567 }
3568 }
3569
3570 static void
3571 wm_set_linkdown_discard(struct wm_softc *sc)
3572 {
3573
3574 for (int i = 0; i < sc->sc_nqueues; i++) {
3575 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3576
3577 mutex_enter(txq->txq_lock);
3578 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3579 mutex_exit(txq->txq_lock);
3580 }
3581 }
3582
3583 static void
3584 wm_clear_linkdown_discard(struct wm_softc *sc)
3585 {
3586
3587 for (int i = 0; i < sc->sc_nqueues; i++) {
3588 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3589
3590 mutex_enter(txq->txq_lock);
3591 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3592 mutex_exit(txq->txq_lock);
3593 }
3594 }
3595
3596 /*
3597 * wm_ioctl: [ifnet interface function]
3598 *
3599 * Handle control requests from the operator.
3600 */
3601 static int
3602 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3603 {
3604 struct wm_softc *sc = ifp->if_softc;
3605 struct ifreq *ifr = (struct ifreq *)data;
3606 struct ifaddr *ifa = (struct ifaddr *)data;
3607 struct sockaddr_dl *sdl;
3608 int s, error;
3609
3610 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3611 device_xname(sc->sc_dev), __func__));
3612
3613 #ifndef WM_MPSAFE
3614 s = splnet();
3615 #endif
3616 switch (cmd) {
3617 case SIOCSIFMEDIA:
3618 WM_CORE_LOCK(sc);
3619 /* Flow control requires full-duplex mode. */
3620 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3621 (ifr->ifr_media & IFM_FDX) == 0)
3622 ifr->ifr_media &= ~IFM_ETH_FMASK;
3623 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3624 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3625 /* We can do both TXPAUSE and RXPAUSE. */
3626 ifr->ifr_media |=
3627 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3628 }
3629 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3630 }
3631 WM_CORE_UNLOCK(sc);
3632 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3633 if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
3634 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
3635 DPRINTF(sc, WM_DEBUG_LINK,
3636 ("%s: %s: Set linkdown discard flag\n",
3637 device_xname(sc->sc_dev), __func__));
3638 wm_set_linkdown_discard(sc);
3639 }
3640 }
3641 break;
3642 case SIOCINITIFADDR:
3643 WM_CORE_LOCK(sc);
3644 if (ifa->ifa_addr->sa_family == AF_LINK) {
3645 sdl = satosdl(ifp->if_dl->ifa_addr);
3646 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3647 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3648 /* Unicast address is the first multicast entry */
3649 wm_set_filter(sc);
3650 error = 0;
3651 WM_CORE_UNLOCK(sc);
3652 break;
3653 }
3654 WM_CORE_UNLOCK(sc);
3655 /*FALLTHROUGH*/
3656 default:
3657 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
3658 if (((ifp->if_flags & IFF_UP) != 0) &&
3659 ((ifr->ifr_flags & IFF_UP) == 0)) {
3660 DPRINTF(sc, WM_DEBUG_LINK,
3661 ("%s: %s: Set linkdown discard flag\n",
3662 device_xname(sc->sc_dev), __func__));
3663 wm_set_linkdown_discard(sc);
3664 }
3665 }
3666 #ifdef WM_MPSAFE
3667 s = splnet();
3668 #endif
3669 /* It may call wm_start, so unlock here */
3670 error = ether_ioctl(ifp, cmd, data);
3671 #ifdef WM_MPSAFE
3672 splx(s);
3673 #endif
3674 if (error != ENETRESET)
3675 break;
3676
3677 error = 0;
3678
3679 if (cmd == SIOCSIFCAP)
3680 error = if_init(ifp);
3681 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3682 ;
3683 else if (ifp->if_flags & IFF_RUNNING) {
3684 /*
3685 * Multicast list has changed; set the hardware filter
3686 * accordingly.
3687 */
3688 WM_CORE_LOCK(sc);
3689 wm_set_filter(sc);
3690 WM_CORE_UNLOCK(sc);
3691 }
3692 break;
3693 }
3694
3695 #ifndef WM_MPSAFE
3696 splx(s);
3697 #endif
3698 return error;
3699 }
3700
3701 /* MAC address related */
3702
3703 /*
3704 * Get the offset of MAC address and return it.
3705 * If error occured, use offset 0.
3706 */
3707 static uint16_t
3708 wm_check_alt_mac_addr(struct wm_softc *sc)
3709 {
3710 uint16_t myea[ETHER_ADDR_LEN / 2];
3711 uint16_t offset = NVM_OFF_MACADDR;
3712
3713 /* Try to read alternative MAC address pointer */
3714 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3715 return 0;
3716
3717 /* Check pointer if it's valid or not. */
3718 if ((offset == 0x0000) || (offset == 0xffff))
3719 return 0;
3720
3721 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3722 /*
3723 * Check whether alternative MAC address is valid or not.
3724 * Some cards have non 0xffff pointer but those don't use
3725 * alternative MAC address in reality.
3726 *
3727 * Check whether the broadcast bit is set or not.
3728 */
3729 if (wm_nvm_read(sc, offset, 1, myea) == 0)
3730 if (((myea[0] & 0xff) & 0x01) == 0)
3731 return offset; /* Found */
3732
3733 /* Not found */
3734 return 0;
3735 }
3736
3737 static int
3738 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3739 {
3740 uint16_t myea[ETHER_ADDR_LEN / 2];
3741 uint16_t offset = NVM_OFF_MACADDR;
3742 int do_invert = 0;
3743
3744 switch (sc->sc_type) {
3745 case WM_T_82580:
3746 case WM_T_I350:
3747 case WM_T_I354:
3748 /* EEPROM Top Level Partitioning */
3749 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3750 break;
3751 case WM_T_82571:
3752 case WM_T_82575:
3753 case WM_T_82576:
3754 case WM_T_80003:
3755 case WM_T_I210:
3756 case WM_T_I211:
3757 offset = wm_check_alt_mac_addr(sc);
3758 if (offset == 0)
3759 if ((sc->sc_funcid & 0x01) == 1)
3760 do_invert = 1;
3761 break;
3762 default:
3763 if ((sc->sc_funcid & 0x01) == 1)
3764 do_invert = 1;
3765 break;
3766 }
3767
3768 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3769 goto bad;
3770
3771 enaddr[0] = myea[0] & 0xff;
3772 enaddr[1] = myea[0] >> 8;
3773 enaddr[2] = myea[1] & 0xff;
3774 enaddr[3] = myea[1] >> 8;
3775 enaddr[4] = myea[2] & 0xff;
3776 enaddr[5] = myea[2] >> 8;
3777
3778 /*
3779 * Toggle the LSB of the MAC address on the second port
3780 * of some dual port cards.
3781 */
3782 if (do_invert != 0)
3783 enaddr[5] ^= 1;
3784
3785 return 0;
3786
3787 bad:
3788 return -1;
3789 }
3790
3791 /*
3792 * wm_set_ral:
3793 *
3794 * Set an entery in the receive address list.
3795 */
3796 static void
3797 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3798 {
3799 uint32_t ral_lo, ral_hi, addrl, addrh;
3800 uint32_t wlock_mac;
3801 int rv;
3802
3803 if (enaddr != NULL) {
3804 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
3805 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
3806 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
3807 ral_hi |= RAL_AV;
3808 } else {
3809 ral_lo = 0;
3810 ral_hi = 0;
3811 }
3812
3813 switch (sc->sc_type) {
3814 case WM_T_82542_2_0:
3815 case WM_T_82542_2_1:
3816 case WM_T_82543:
3817 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3818 CSR_WRITE_FLUSH(sc);
3819 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3820 CSR_WRITE_FLUSH(sc);
3821 break;
3822 case WM_T_PCH2:
3823 case WM_T_PCH_LPT:
3824 case WM_T_PCH_SPT:
3825 case WM_T_PCH_CNP:
3826 if (idx == 0) {
3827 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3828 CSR_WRITE_FLUSH(sc);
3829 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3830 CSR_WRITE_FLUSH(sc);
3831 return;
3832 }
3833 if (sc->sc_type != WM_T_PCH2) {
3834 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3835 FWSM_WLOCK_MAC);
3836 addrl = WMREG_SHRAL(idx - 1);
3837 addrh = WMREG_SHRAH(idx - 1);
3838 } else {
3839 wlock_mac = 0;
3840 addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3841 addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3842 }
3843
3844 if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3845 rv = wm_get_swflag_ich8lan(sc);
3846 if (rv != 0)
3847 return;
3848 CSR_WRITE(sc, addrl, ral_lo);
3849 CSR_WRITE_FLUSH(sc);
3850 CSR_WRITE(sc, addrh, ral_hi);
3851 CSR_WRITE_FLUSH(sc);
3852 wm_put_swflag_ich8lan(sc);
3853 }
3854
3855 break;
3856 default:
3857 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3858 CSR_WRITE_FLUSH(sc);
3859 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3860 CSR_WRITE_FLUSH(sc);
3861 break;
3862 }
3863 }
3864
3865 /*
3866 * wm_mchash:
3867 *
3868 * Compute the hash of the multicast address for the 4096-bit
3869 * multicast filter.
3870 */
3871 static uint32_t
3872 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3873 {
3874 static const int lo_shift[4] = { 4, 3, 2, 0 };
3875 static const int hi_shift[4] = { 4, 5, 6, 8 };
3876 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3877 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3878 uint32_t hash;
3879
3880 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3881 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3882 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3883 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3884 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3885 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3886 return (hash & 0x3ff);
3887 }
3888 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3889 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3890
3891 return (hash & 0xfff);
3892 }
3893
3894 /*
3895 *
3896 *
3897 */
3898 static int
3899 wm_rar_count(struct wm_softc *sc)
3900 {
3901 int size;
3902
3903 switch (sc->sc_type) {
3904 case WM_T_ICH8:
3905 size = WM_RAL_TABSIZE_ICH8 -1;
3906 break;
3907 case WM_T_ICH9:
3908 case WM_T_ICH10:
3909 case WM_T_PCH:
3910 size = WM_RAL_TABSIZE_ICH8;
3911 break;
3912 case WM_T_PCH2:
3913 size = WM_RAL_TABSIZE_PCH2;
3914 break;
3915 case WM_T_PCH_LPT:
3916 case WM_T_PCH_SPT:
3917 case WM_T_PCH_CNP:
3918 size = WM_RAL_TABSIZE_PCH_LPT;
3919 break;
3920 case WM_T_82575:
3921 case WM_T_I210:
3922 case WM_T_I211:
3923 size = WM_RAL_TABSIZE_82575;
3924 break;
3925 case WM_T_82576:
3926 case WM_T_82580:
3927 size = WM_RAL_TABSIZE_82576;
3928 break;
3929 case WM_T_I350:
3930 case WM_T_I354:
3931 size = WM_RAL_TABSIZE_I350;
3932 break;
3933 default:
3934 size = WM_RAL_TABSIZE;
3935 }
3936
3937 return size;
3938 }
3939
3940 /*
3941 * wm_set_filter:
3942 *
3943 * Set up the receive filter.
3944 */
3945 static void
3946 wm_set_filter(struct wm_softc *sc)
3947 {
3948 struct ethercom *ec = &sc->sc_ethercom;
3949 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3950 struct ether_multi *enm;
3951 struct ether_multistep step;
3952 bus_addr_t mta_reg;
3953 uint32_t hash, reg, bit;
3954 int i, size, ralmax, rv;
3955
3956 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3957 device_xname(sc->sc_dev), __func__));
3958
3959 if (sc->sc_type >= WM_T_82544)
3960 mta_reg = WMREG_CORDOVA_MTA;
3961 else
3962 mta_reg = WMREG_MTA;
3963
3964 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3965
3966 if (ifp->if_flags & IFF_BROADCAST)
3967 sc->sc_rctl |= RCTL_BAM;
3968 if (ifp->if_flags & IFF_PROMISC) {
3969 sc->sc_rctl |= RCTL_UPE;
3970 ETHER_LOCK(ec);
3971 ec->ec_flags |= ETHER_F_ALLMULTI;
3972 ETHER_UNLOCK(ec);
3973 goto allmulti;
3974 }
3975
3976 /*
3977 * Set the station address in the first RAL slot, and
3978 * clear the remaining slots.
3979 */
3980 size = wm_rar_count(sc);
3981 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3982
3983 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3984 || (sc->sc_type == WM_T_PCH_CNP)) {
3985 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3986 switch (i) {
3987 case 0:
3988 /* We can use all entries */
3989 ralmax = size;
3990 break;
3991 case 1:
3992 /* Only RAR[0] */
3993 ralmax = 1;
3994 break;
3995 default:
3996 /* Available SHRA + RAR[0] */
3997 ralmax = i + 1;
3998 }
3999 } else
4000 ralmax = size;
4001 for (i = 1; i < size; i++) {
4002 if (i < ralmax)
4003 wm_set_ral(sc, NULL, i);
4004 }
4005
4006 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4007 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4008 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4009 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4010 size = WM_ICH8_MC_TABSIZE;
4011 else
4012 size = WM_MC_TABSIZE;
4013 /* Clear out the multicast table. */
4014 for (i = 0; i < size; i++) {
4015 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4016 CSR_WRITE_FLUSH(sc);
4017 }
4018
4019 ETHER_LOCK(ec);
4020 ETHER_FIRST_MULTI(step, ec, enm);
4021 while (enm != NULL) {
4022 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4023 ec->ec_flags |= ETHER_F_ALLMULTI;
4024 ETHER_UNLOCK(ec);
4025 /*
4026 * We must listen to a range of multicast addresses.
4027 * For now, just accept all multicasts, rather than
4028 * trying to set only those filter bits needed to match
4029 * the range. (At this time, the only use of address
4030 * ranges is for IP multicast routing, for which the
4031 * range is big enough to require all bits set.)
4032 */
4033 goto allmulti;
4034 }
4035
4036 hash = wm_mchash(sc, enm->enm_addrlo);
4037
4038 reg = (hash >> 5);
4039 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4040 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4041 || (sc->sc_type == WM_T_PCH2)
4042 || (sc->sc_type == WM_T_PCH_LPT)
4043 || (sc->sc_type == WM_T_PCH_SPT)
4044 || (sc->sc_type == WM_T_PCH_CNP))
4045 reg &= 0x1f;
4046 else
4047 reg &= 0x7f;
4048 bit = hash & 0x1f;
4049
4050 hash = CSR_READ(sc, mta_reg + (reg << 2));
4051 hash |= 1U << bit;
4052
4053 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4054 /*
4055 * 82544 Errata 9: Certain register cannot be written
4056 * with particular alignments in PCI-X bus operation
4057 * (FCAH, MTA and VFTA).
4058 */
4059 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4060 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4061 CSR_WRITE_FLUSH(sc);
4062 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4063 CSR_WRITE_FLUSH(sc);
4064 } else {
4065 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4066 CSR_WRITE_FLUSH(sc);
4067 }
4068
4069 ETHER_NEXT_MULTI(step, enm);
4070 }
4071 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4072 ETHER_UNLOCK(ec);
4073
4074 goto setit;
4075
4076 allmulti:
4077 sc->sc_rctl |= RCTL_MPE;
4078
4079 setit:
4080 if (sc->sc_type >= WM_T_PCH2) {
4081 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4082 && (ifp->if_mtu > ETHERMTU))
4083 rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4084 else
4085 rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4086 if (rv != 0)
4087 device_printf(sc->sc_dev,
4088 "Failed to do workaround for jumbo frame.\n");
4089 }
4090
4091 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4092 }
4093
4094 /* Reset and init related */
4095
4096 static void
4097 wm_set_vlan(struct wm_softc *sc)
4098 {
4099
4100 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4101 device_xname(sc->sc_dev), __func__));
4102
4103 /* Deal with VLAN enables. */
4104 if (VLAN_ATTACHED(&sc->sc_ethercom))
4105 sc->sc_ctrl |= CTRL_VME;
4106 else
4107 sc->sc_ctrl &= ~CTRL_VME;
4108
4109 /* Write the control registers. */
4110 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4111 }
4112
4113 static void
4114 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4115 {
4116 uint32_t gcr;
4117 pcireg_t ctrl2;
4118
4119 gcr = CSR_READ(sc, WMREG_GCR);
4120
4121 /* Only take action if timeout value is defaulted to 0 */
4122 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4123 goto out;
4124
4125 if ((gcr & GCR_CAP_VER2) == 0) {
4126 gcr |= GCR_CMPL_TMOUT_10MS;
4127 goto out;
4128 }
4129
4130 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4131 sc->sc_pcixe_capoff + PCIE_DCSR2);
4132 ctrl2 |= WM_PCIE_DCSR2_16MS;
4133 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4134 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4135
4136 out:
4137 /* Disable completion timeout resend */
4138 gcr &= ~GCR_CMPL_TMOUT_RESEND;
4139
4140 CSR_WRITE(sc, WMREG_GCR, gcr);
4141 }
4142
4143 void
4144 wm_get_auto_rd_done(struct wm_softc *sc)
4145 {
4146 int i;
4147
4148 /* wait for eeprom to reload */
4149 switch (sc->sc_type) {
4150 case WM_T_82571:
4151 case WM_T_82572:
4152 case WM_T_82573:
4153 case WM_T_82574:
4154 case WM_T_82583:
4155 case WM_T_82575:
4156 case WM_T_82576:
4157 case WM_T_82580:
4158 case WM_T_I350:
4159 case WM_T_I354:
4160 case WM_T_I210:
4161 case WM_T_I211:
4162 case WM_T_80003:
4163 case WM_T_ICH8:
4164 case WM_T_ICH9:
4165 for (i = 0; i < 10; i++) {
4166 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4167 break;
4168 delay(1000);
4169 }
4170 if (i == 10) {
4171 log(LOG_ERR, "%s: auto read from eeprom failed to "
4172 "complete\n", device_xname(sc->sc_dev));
4173 }
4174 break;
4175 default:
4176 break;
4177 }
4178 }
4179
4180 void
4181 wm_lan_init_done(struct wm_softc *sc)
4182 {
4183 uint32_t reg = 0;
4184 int i;
4185
4186 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4187 device_xname(sc->sc_dev), __func__));
4188
4189 /* Wait for eeprom to reload */
4190 switch (sc->sc_type) {
4191 case WM_T_ICH10:
4192 case WM_T_PCH:
4193 case WM_T_PCH2:
4194 case WM_T_PCH_LPT:
4195 case WM_T_PCH_SPT:
4196 case WM_T_PCH_CNP:
4197 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4198 reg = CSR_READ(sc, WMREG_STATUS);
4199 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4200 break;
4201 delay(100);
4202 }
4203 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4204 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4205 "complete\n", device_xname(sc->sc_dev), __func__);
4206 }
4207 break;
4208 default:
4209 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4210 __func__);
4211 break;
4212 }
4213
4214 reg &= ~STATUS_LAN_INIT_DONE;
4215 CSR_WRITE(sc, WMREG_STATUS, reg);
4216 }
4217
4218 void
4219 wm_get_cfg_done(struct wm_softc *sc)
4220 {
4221 int mask;
4222 uint32_t reg;
4223 int i;
4224
4225 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4226 device_xname(sc->sc_dev), __func__));
4227
4228 /* Wait for eeprom to reload */
4229 switch (sc->sc_type) {
4230 case WM_T_82542_2_0:
4231 case WM_T_82542_2_1:
4232 /* null */
4233 break;
4234 case WM_T_82543:
4235 case WM_T_82544:
4236 case WM_T_82540:
4237 case WM_T_82545:
4238 case WM_T_82545_3:
4239 case WM_T_82546:
4240 case WM_T_82546_3:
4241 case WM_T_82541:
4242 case WM_T_82541_2:
4243 case WM_T_82547:
4244 case WM_T_82547_2:
4245 case WM_T_82573:
4246 case WM_T_82574:
4247 case WM_T_82583:
4248 /* generic */
4249 delay(10*1000);
4250 break;
4251 case WM_T_80003:
4252 case WM_T_82571:
4253 case WM_T_82572:
4254 case WM_T_82575:
4255 case WM_T_82576:
4256 case WM_T_82580:
4257 case WM_T_I350:
4258 case WM_T_I354:
4259 case WM_T_I210:
4260 case WM_T_I211:
4261 if (sc->sc_type == WM_T_82571) {
4262 /* Only 82571 shares port 0 */
4263 mask = EEMNGCTL_CFGDONE_0;
4264 } else
4265 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4266 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4267 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4268 break;
4269 delay(1000);
4270 }
4271 if (i >= WM_PHY_CFG_TIMEOUT)
4272 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4273 device_xname(sc->sc_dev), __func__));
4274 break;
4275 case WM_T_ICH8:
4276 case WM_T_ICH9:
4277 case WM_T_ICH10:
4278 case WM_T_PCH:
4279 case WM_T_PCH2:
4280 case WM_T_PCH_LPT:
4281 case WM_T_PCH_SPT:
4282 case WM_T_PCH_CNP:
4283 delay(10*1000);
4284 if (sc->sc_type >= WM_T_ICH10)
4285 wm_lan_init_done(sc);
4286 else
4287 wm_get_auto_rd_done(sc);
4288
4289 /* Clear PHY Reset Asserted bit */
4290 reg = CSR_READ(sc, WMREG_STATUS);
4291 if ((reg & STATUS_PHYRA) != 0)
4292 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4293 break;
4294 default:
4295 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4296 __func__);
4297 break;
4298 }
4299 }
4300
4301 int
4302 wm_phy_post_reset(struct wm_softc *sc)
4303 {
4304 device_t dev = sc->sc_dev;
4305 uint16_t reg;
4306 int rv = 0;
4307
4308 /* This function is only for ICH8 and newer. */
4309 if (sc->sc_type < WM_T_ICH8)
4310 return 0;
4311
4312 if (wm_phy_resetisblocked(sc)) {
4313 /* XXX */
4314 device_printf(dev, "PHY is blocked\n");
4315 return -1;
4316 }
4317
4318 /* Allow time for h/w to get to quiescent state after reset */
4319 delay(10*1000);
4320
4321 /* Perform any necessary post-reset workarounds */
4322 if (sc->sc_type == WM_T_PCH)
4323 rv = wm_hv_phy_workarounds_ich8lan(sc);
4324 else if (sc->sc_type == WM_T_PCH2)
4325 rv = wm_lv_phy_workarounds_ich8lan(sc);
4326 if (rv != 0)
4327 return rv;
4328
4329 /* Clear the host wakeup bit after lcd reset */
4330 if (sc->sc_type >= WM_T_PCH) {
4331 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®);
4332 reg &= ~BM_WUC_HOST_WU_BIT;
4333 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4334 }
4335
4336 /* Configure the LCD with the extended configuration region in NVM */
4337 if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4338 return rv;
4339
4340 /* Configure the LCD with the OEM bits in NVM */
4341 rv = wm_oem_bits_config_ich8lan(sc, true);
4342
4343 if (sc->sc_type == WM_T_PCH2) {
4344 /* Ungate automatic PHY configuration on non-managed 82579 */
4345 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4346 delay(10 * 1000);
4347 wm_gate_hw_phy_config_ich8lan(sc, false);
4348 }
4349 /* Set EEE LPI Update Timer to 200usec */
4350 rv = sc->phy.acquire(sc);
4351 if (rv)
4352 return rv;
4353 rv = wm_write_emi_reg_locked(dev,
4354 I82579_LPI_UPDATE_TIMER, 0x1387);
4355 sc->phy.release(sc);
4356 }
4357
4358 return rv;
4359 }
4360
4361 /* Only for PCH and newer */
4362 static int
4363 wm_write_smbus_addr(struct wm_softc *sc)
4364 {
4365 uint32_t strap, freq;
4366 uint16_t phy_data;
4367 int rv;
4368
4369 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4370 device_xname(sc->sc_dev), __func__));
4371 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4372
4373 strap = CSR_READ(sc, WMREG_STRAP);
4374 freq = __SHIFTOUT(strap, STRAP_FREQ);
4375
4376 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4377 if (rv != 0)
4378 return -1;
4379
4380 phy_data &= ~HV_SMB_ADDR_ADDR;
4381 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4382 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4383
4384 if (sc->sc_phytype == WMPHY_I217) {
4385 /* Restore SMBus frequency */
4386 if (freq --) {
4387 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4388 | HV_SMB_ADDR_FREQ_HIGH);
4389 phy_data |= __SHIFTIN((freq & 0x01) != 0,
4390 HV_SMB_ADDR_FREQ_LOW);
4391 phy_data |= __SHIFTIN((freq & 0x02) != 0,
4392 HV_SMB_ADDR_FREQ_HIGH);
4393 } else
4394 DPRINTF(sc, WM_DEBUG_INIT,
4395 ("%s: %s Unsupported SMB frequency in PHY\n",
4396 device_xname(sc->sc_dev), __func__));
4397 }
4398
4399 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4400 phy_data);
4401 }
4402
4403 static int
4404 wm_init_lcd_from_nvm(struct wm_softc *sc)
4405 {
4406 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4407 uint16_t phy_page = 0;
4408 int rv = 0;
4409
4410 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4411 device_xname(sc->sc_dev), __func__));
4412
4413 switch (sc->sc_type) {
4414 case WM_T_ICH8:
4415 if ((sc->sc_phytype == WMPHY_UNKNOWN)
4416 || (sc->sc_phytype != WMPHY_IGP_3))
4417 return 0;
4418
4419 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4420 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4421 sw_cfg_mask = FEXTNVM_SW_CONFIG;
4422 break;
4423 }
4424 /* FALLTHROUGH */
4425 case WM_T_PCH:
4426 case WM_T_PCH2:
4427 case WM_T_PCH_LPT:
4428 case WM_T_PCH_SPT:
4429 case WM_T_PCH_CNP:
4430 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4431 break;
4432 default:
4433 return 0;
4434 }
4435
4436 if ((rv = sc->phy.acquire(sc)) != 0)
4437 return rv;
4438
4439 reg = CSR_READ(sc, WMREG_FEXTNVM);
4440 if ((reg & sw_cfg_mask) == 0)
4441 goto release;
4442
4443 /*
4444 * Make sure HW does not configure LCD from PHY extended configuration
4445 * before SW configuration
4446 */
4447 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4448 if ((sc->sc_type < WM_T_PCH2)
4449 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4450 goto release;
4451
4452 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4453 device_xname(sc->sc_dev), __func__));
4454 /* word_addr is in DWORD */
4455 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4456
4457 reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4458 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4459 if (cnf_size == 0)
4460 goto release;
4461
4462 if (((sc->sc_type == WM_T_PCH)
4463 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4464 || (sc->sc_type > WM_T_PCH)) {
4465 /*
4466 * HW configures the SMBus address and LEDs when the OEM and
4467 * LCD Write Enable bits are set in the NVM. When both NVM bits
4468 * are cleared, SW will configure them instead.
4469 */
4470 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4471 device_xname(sc->sc_dev), __func__));
4472 if ((rv = wm_write_smbus_addr(sc)) != 0)
4473 goto release;
4474
4475 reg = CSR_READ(sc, WMREG_LEDCTL);
4476 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4477 (uint16_t)reg);
4478 if (rv != 0)
4479 goto release;
4480 }
4481
4482 /* Configure LCD from extended configuration region. */
4483 for (i = 0; i < cnf_size; i++) {
4484 uint16_t reg_data, reg_addr;
4485
4486 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0)
4487 goto release;
4488
4489 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0)
4490 goto release;
4491
4492 if (reg_addr == IGPHY_PAGE_SELECT)
4493 phy_page = reg_data;
4494
4495 reg_addr &= IGPHY_MAXREGADDR;
4496 reg_addr |= phy_page;
4497
4498 KASSERT(sc->phy.writereg_locked != NULL);
4499 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4500 reg_data);
4501 }
4502
4503 release:
4504 sc->phy.release(sc);
4505 return rv;
4506 }
4507
4508 /*
4509 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4510 * @sc: pointer to the HW structure
4511 * @d0_state: boolean if entering d0 or d3 device state
4512 *
4513 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4514 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
4515 * in NVM determines whether HW should configure LPLU and Gbe Disable.
4516 */
4517 int
4518 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4519 {
4520 uint32_t mac_reg;
4521 uint16_t oem_reg;
4522 int rv;
4523
4524 if (sc->sc_type < WM_T_PCH)
4525 return 0;
4526
4527 rv = sc->phy.acquire(sc);
4528 if (rv != 0)
4529 return rv;
4530
4531 if (sc->sc_type == WM_T_PCH) {
4532 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4533 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4534 goto release;
4535 }
4536
4537 mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4538 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4539 goto release;
4540
4541 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4542
4543 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4544 if (rv != 0)
4545 goto release;
4546 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4547
4548 if (d0_state) {
4549 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4550 oem_reg |= HV_OEM_BITS_A1KDIS;
4551 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4552 oem_reg |= HV_OEM_BITS_LPLU;
4553 } else {
4554 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4555 != 0)
4556 oem_reg |= HV_OEM_BITS_A1KDIS;
4557 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4558 != 0)
4559 oem_reg |= HV_OEM_BITS_LPLU;
4560 }
4561
4562 /* Set Restart auto-neg to activate the bits */
4563 if ((d0_state || (sc->sc_type != WM_T_PCH))
4564 && (wm_phy_resetisblocked(sc) == false))
4565 oem_reg |= HV_OEM_BITS_ANEGNOW;
4566
4567 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4568
4569 release:
4570 sc->phy.release(sc);
4571
4572 return rv;
4573 }
4574
4575 /* Init hardware bits */
4576 void
4577 wm_initialize_hardware_bits(struct wm_softc *sc)
4578 {
4579 uint32_t tarc0, tarc1, reg;
4580
4581 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4582 device_xname(sc->sc_dev), __func__));
4583
4584 /* For 82571 variant, 80003 and ICHs */
4585 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4586 || (sc->sc_type >= WM_T_80003)) {
4587
4588 /* Transmit Descriptor Control 0 */
4589 reg = CSR_READ(sc, WMREG_TXDCTL(0));
4590 reg |= TXDCTL_COUNT_DESC;
4591 CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4592
4593 /* Transmit Descriptor Control 1 */
4594 reg = CSR_READ(sc, WMREG_TXDCTL(1));
4595 reg |= TXDCTL_COUNT_DESC;
4596 CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4597
4598 /* TARC0 */
4599 tarc0 = CSR_READ(sc, WMREG_TARC0);
4600 switch (sc->sc_type) {
4601 case WM_T_82571:
4602 case WM_T_82572:
4603 case WM_T_82573:
4604 case WM_T_82574:
4605 case WM_T_82583:
4606 case WM_T_80003:
4607 /* Clear bits 30..27 */
4608 tarc0 &= ~__BITS(30, 27);
4609 break;
4610 default:
4611 break;
4612 }
4613
4614 switch (sc->sc_type) {
4615 case WM_T_82571:
4616 case WM_T_82572:
4617 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4618
4619 tarc1 = CSR_READ(sc, WMREG_TARC1);
4620 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4621 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4622 /* 8257[12] Errata No.7 */
4623 tarc1 |= __BIT(22); /* TARC1 bits 22 */
4624
4625 /* TARC1 bit 28 */
4626 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4627 tarc1 &= ~__BIT(28);
4628 else
4629 tarc1 |= __BIT(28);
4630 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4631
4632 /*
4633 * 8257[12] Errata No.13
4634 * Disable Dyamic Clock Gating.
4635 */
4636 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4637 reg &= ~CTRL_EXT_DMA_DYN_CLK;
4638 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4639 break;
4640 case WM_T_82573:
4641 case WM_T_82574:
4642 case WM_T_82583:
4643 if ((sc->sc_type == WM_T_82574)
4644 || (sc->sc_type == WM_T_82583))
4645 tarc0 |= __BIT(26); /* TARC0 bit 26 */
4646
4647 /* Extended Device Control */
4648 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4649 reg &= ~__BIT(23); /* Clear bit 23 */
4650 reg |= __BIT(22); /* Set bit 22 */
4651 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4652
4653 /* Device Control */
4654 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
4655 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4656
4657 /* PCIe Control Register */
4658 /*
4659 * 82573 Errata (unknown).
4660 *
4661 * 82574 Errata 25 and 82583 Errata 12
4662 * "Dropped Rx Packets":
4663 * NVM Image Version 2.1.4 and newer has no this bug.
4664 */
4665 reg = CSR_READ(sc, WMREG_GCR);
4666 reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4667 CSR_WRITE(sc, WMREG_GCR, reg);
4668
4669 if ((sc->sc_type == WM_T_82574)
4670 || (sc->sc_type == WM_T_82583)) {
4671 /*
4672 * Document says this bit must be set for
4673 * proper operation.
4674 */
4675 reg = CSR_READ(sc, WMREG_GCR);
4676 reg |= __BIT(22);
4677 CSR_WRITE(sc, WMREG_GCR, reg);
4678
4679 /*
4680 * Apply workaround for hardware errata
4681 * documented in errata docs Fixes issue where
4682 * some error prone or unreliable PCIe
4683 * completions are occurring, particularly
4684 * with ASPM enabled. Without fix, issue can
4685 * cause Tx timeouts.
4686 */
4687 reg = CSR_READ(sc, WMREG_GCR2);
4688 reg |= __BIT(0);
4689 CSR_WRITE(sc, WMREG_GCR2, reg);
4690 }
4691 break;
4692 case WM_T_80003:
4693 /* TARC0 */
4694 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4695 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4696 tarc0 &= ~__BIT(20); /* Clear bits 20 */
4697
4698 /* TARC1 bit 28 */
4699 tarc1 = CSR_READ(sc, WMREG_TARC1);
4700 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4701 tarc1 &= ~__BIT(28);
4702 else
4703 tarc1 |= __BIT(28);
4704 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4705 break;
4706 case WM_T_ICH8:
4707 case WM_T_ICH9:
4708 case WM_T_ICH10:
4709 case WM_T_PCH:
4710 case WM_T_PCH2:
4711 case WM_T_PCH_LPT:
4712 case WM_T_PCH_SPT:
4713 case WM_T_PCH_CNP:
4714 /* TARC0 */
4715 if (sc->sc_type == WM_T_ICH8) {
4716 /* Set TARC0 bits 29 and 28 */
4717 tarc0 |= __BITS(29, 28);
4718 } else if (sc->sc_type == WM_T_PCH_SPT) {
4719 tarc0 |= __BIT(29);
4720 /*
4721 * Drop bit 28. From Linux.
4722 * See I218/I219 spec update
4723 * "5. Buffer Overrun While the I219 is
4724 * Processing DMA Transactions"
4725 */
4726 tarc0 &= ~__BIT(28);
4727 }
4728 /* Set TARC0 bits 23,24,26,27 */
4729 tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4730
4731 /* CTRL_EXT */
4732 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4733 reg |= __BIT(22); /* Set bit 22 */
4734 /*
4735 * Enable PHY low-power state when MAC is at D3
4736 * w/o WoL
4737 */
4738 if (sc->sc_type >= WM_T_PCH)
4739 reg |= CTRL_EXT_PHYPDEN;
4740 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4741
4742 /* TARC1 */
4743 tarc1 = CSR_READ(sc, WMREG_TARC1);
4744 /* bit 28 */
4745 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4746 tarc1 &= ~__BIT(28);
4747 else
4748 tarc1 |= __BIT(28);
4749 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4750 CSR_WRITE(sc, WMREG_TARC1, tarc1);
4751
4752 /* Device Status */
4753 if (sc->sc_type == WM_T_ICH8) {
4754 reg = CSR_READ(sc, WMREG_STATUS);
4755 reg &= ~__BIT(31);
4756 CSR_WRITE(sc, WMREG_STATUS, reg);
4757
4758 }
4759
4760 /* IOSFPC */
4761 if (sc->sc_type == WM_T_PCH_SPT) {
4762 reg = CSR_READ(sc, WMREG_IOSFPC);
4763 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4764 CSR_WRITE(sc, WMREG_IOSFPC, reg);
4765 }
4766 /*
4767 * Work-around descriptor data corruption issue during
4768 * NFS v2 UDP traffic, just disable the NFS filtering
4769 * capability.
4770 */
4771 reg = CSR_READ(sc, WMREG_RFCTL);
4772 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4773 CSR_WRITE(sc, WMREG_RFCTL, reg);
4774 break;
4775 default:
4776 break;
4777 }
4778 CSR_WRITE(sc, WMREG_TARC0, tarc0);
4779
4780 switch (sc->sc_type) {
4781 /*
4782 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4783 * Avoid RSS Hash Value bug.
4784 */
4785 case WM_T_82571:
4786 case WM_T_82572:
4787 case WM_T_82573:
4788 case WM_T_80003:
4789 case WM_T_ICH8:
4790 reg = CSR_READ(sc, WMREG_RFCTL);
4791 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4792 CSR_WRITE(sc, WMREG_RFCTL, reg);
4793 break;
4794 case WM_T_82574:
4795 /* Use extened Rx descriptor. */
4796 reg = CSR_READ(sc, WMREG_RFCTL);
4797 reg |= WMREG_RFCTL_EXSTEN;
4798 CSR_WRITE(sc, WMREG_RFCTL, reg);
4799 break;
4800 default:
4801 break;
4802 }
4803 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4804 /*
4805 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4806 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4807 * "Certain Malformed IPv6 Extension Headers are Not Processed
4808 * Correctly by the Device"
4809 *
4810 * I354(C2000) Errata AVR53:
4811 * "Malformed IPv6 Extension Headers May Result in LAN Device
4812 * Hang"
4813 */
4814 reg = CSR_READ(sc, WMREG_RFCTL);
4815 reg |= WMREG_RFCTL_IPV6EXDIS;
4816 CSR_WRITE(sc, WMREG_RFCTL, reg);
4817 }
4818 }
4819
4820 static uint32_t
4821 wm_rxpbs_adjust_82580(uint32_t val)
4822 {
4823 uint32_t rv = 0;
4824
4825 if (val < __arraycount(wm_82580_rxpbs_table))
4826 rv = wm_82580_rxpbs_table[val];
4827
4828 return rv;
4829 }
4830
4831 /*
4832 * wm_reset_phy:
4833 *
4834 * generic PHY reset function.
4835 * Same as e1000_phy_hw_reset_generic()
4836 */
4837 static int
4838 wm_reset_phy(struct wm_softc *sc)
4839 {
4840 uint32_t reg;
4841
4842 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4843 device_xname(sc->sc_dev), __func__));
4844 if (wm_phy_resetisblocked(sc))
4845 return -1;
4846
4847 sc->phy.acquire(sc);
4848
4849 reg = CSR_READ(sc, WMREG_CTRL);
4850 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4851 CSR_WRITE_FLUSH(sc);
4852
4853 delay(sc->phy.reset_delay_us);
4854
4855 CSR_WRITE(sc, WMREG_CTRL, reg);
4856 CSR_WRITE_FLUSH(sc);
4857
4858 delay(150);
4859
4860 sc->phy.release(sc);
4861
4862 wm_get_cfg_done(sc);
4863 wm_phy_post_reset(sc);
4864
4865 return 0;
4866 }
4867
4868 /*
4869 * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
4870 *
4871 * In i219, the descriptor rings must be emptied before resetting the HW
4872 * or before changing the device state to D3 during runtime (runtime PM).
4873 *
4874 * Failure to do this will cause the HW to enter a unit hang state which can
4875 * only be released by PCI reset on the device.
4876 *
4877 * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
4878 */
4879 static void
4880 wm_flush_desc_rings(struct wm_softc *sc)
4881 {
4882 pcireg_t preg;
4883 uint32_t reg;
4884 struct wm_txqueue *txq;
4885 wiseman_txdesc_t *txd;
4886 int nexttx;
4887 uint32_t rctl;
4888
4889 /* First, disable MULR fix in FEXTNVM11 */
4890 reg = CSR_READ(sc, WMREG_FEXTNVM11);
4891 reg |= FEXTNVM11_DIS_MULRFIX;
4892 CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4893
4894 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4895 reg = CSR_READ(sc, WMREG_TDLEN(0));
4896 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4897 return;
4898
4899 /*
4900 * Remove all descriptors from the tx_ring.
4901 *
4902 * We want to clear all pending descriptors from the TX ring. Zeroing
4903 * happens when the HW reads the regs. We assign the ring itself as
4904 * the data of the next descriptor. We don't care about the data we are
4905 * about to reset the HW.
4906 */
4907 #ifdef WM_DEBUG
4908 device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
4909 #endif
4910 reg = CSR_READ(sc, WMREG_TCTL);
4911 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4912
4913 txq = &sc->sc_queue[0].wmq_txq;
4914 nexttx = txq->txq_next;
4915 txd = &txq->txq_descs[nexttx];
4916 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
4917 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4918 txd->wtx_fields.wtxu_status = 0;
4919 txd->wtx_fields.wtxu_options = 0;
4920 txd->wtx_fields.wtxu_vlan = 0;
4921
4922 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4923 BUS_SPACE_BARRIER_WRITE);
4924
4925 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4926 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4927 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4928 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4929 delay(250);
4930
4931 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4932 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4933 return;
4934
4935 /*
4936 * Mark all descriptors in the RX ring as consumed and disable the
4937 * rx ring.
4938 */
4939 #ifdef WM_DEBUG
4940 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
4941 #endif
4942 rctl = CSR_READ(sc, WMREG_RCTL);
4943 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4944 CSR_WRITE_FLUSH(sc);
4945 delay(150);
4946
4947 reg = CSR_READ(sc, WMREG_RXDCTL(0));
4948 /* Zero the lower 14 bits (prefetch and host thresholds) */
4949 reg &= 0xffffc000;
4950 /*
4951 * Update thresholds: prefetch threshold to 31, host threshold
4952 * to 1 and make sure the granularity is "descriptors" and not
4953 * "cache lines"
4954 */
4955 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4956 CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4957
4958 /* Momentarily enable the RX ring for the changes to take effect */
4959 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4960 CSR_WRITE_FLUSH(sc);
4961 delay(150);
4962 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4963 }
4964
4965 /*
4966 * wm_reset:
4967 *
4968 * Reset the i82542 chip.
4969 */
4970 static void
4971 wm_reset(struct wm_softc *sc)
4972 {
4973 int phy_reset = 0;
4974 int i, error = 0;
4975 uint32_t reg;
4976 uint16_t kmreg;
4977 int rv;
4978
4979 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4980 device_xname(sc->sc_dev), __func__));
4981 KASSERT(sc->sc_type != 0);
4982
4983 /*
4984 * Allocate on-chip memory according to the MTU size.
4985 * The Packet Buffer Allocation register must be written
4986 * before the chip is reset.
4987 */
4988 switch (sc->sc_type) {
4989 case WM_T_82547:
4990 case WM_T_82547_2:
4991 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4992 PBA_22K : PBA_30K;
4993 for (i = 0; i < sc->sc_nqueues; i++) {
4994 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4995 txq->txq_fifo_head = 0;
4996 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4997 txq->txq_fifo_size =
4998 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4999 txq->txq_fifo_stall = 0;
5000 }
5001 break;
5002 case WM_T_82571:
5003 case WM_T_82572:
5004 case WM_T_82575: /* XXX need special handing for jumbo frames */
5005 case WM_T_80003:
5006 sc->sc_pba = PBA_32K;
5007 break;
5008 case WM_T_82573:
5009 sc->sc_pba = PBA_12K;
5010 break;
5011 case WM_T_82574:
5012 case WM_T_82583:
5013 sc->sc_pba = PBA_20K;
5014 break;
5015 case WM_T_82576:
5016 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5017 sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5018 break;
5019 case WM_T_82580:
5020 case WM_T_I350:
5021 case WM_T_I354:
5022 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5023 break;
5024 case WM_T_I210:
5025 case WM_T_I211:
5026 sc->sc_pba = PBA_34K;
5027 break;
5028 case WM_T_ICH8:
5029 /* Workaround for a bit corruption issue in FIFO memory */
5030 sc->sc_pba = PBA_8K;
5031 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5032 break;
5033 case WM_T_ICH9:
5034 case WM_T_ICH10:
5035 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5036 PBA_14K : PBA_10K;
5037 break;
5038 case WM_T_PCH:
5039 case WM_T_PCH2: /* XXX 14K? */
5040 case WM_T_PCH_LPT:
5041 case WM_T_PCH_SPT:
5042 case WM_T_PCH_CNP:
5043 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5044 PBA_12K : PBA_26K;
5045 break;
5046 default:
5047 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5048 PBA_40K : PBA_48K;
5049 break;
5050 }
5051 /*
5052 * Only old or non-multiqueue devices have the PBA register
5053 * XXX Need special handling for 82575.
5054 */
5055 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5056 || (sc->sc_type == WM_T_82575))
5057 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5058
5059 /* Prevent the PCI-E bus from sticking */
5060 if (sc->sc_flags & WM_F_PCIE) {
5061 int timeout = 800;
5062
5063 sc->sc_ctrl |= CTRL_GIO_M_DIS;
5064 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5065
5066 while (timeout--) {
5067 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5068 == 0)
5069 break;
5070 delay(100);
5071 }
5072 if (timeout == 0)
5073 device_printf(sc->sc_dev,
5074 "failed to disable bus mastering\n");
5075 }
5076
5077 /* Set the completion timeout for interface */
5078 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5079 || (sc->sc_type == WM_T_82580)
5080 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5081 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5082 wm_set_pcie_completion_timeout(sc);
5083
5084 /* Clear interrupt */
5085 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5086 if (wm_is_using_msix(sc)) {
5087 if (sc->sc_type != WM_T_82574) {
5088 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5089 CSR_WRITE(sc, WMREG_EIAC, 0);
5090 } else
5091 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5092 }
5093
5094 /* Stop the transmit and receive processes. */
5095 CSR_WRITE(sc, WMREG_RCTL, 0);
5096 sc->sc_rctl &= ~RCTL_EN;
5097 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5098 CSR_WRITE_FLUSH(sc);
5099
5100 /* XXX set_tbi_sbp_82543() */
5101
5102 delay(10*1000);
5103
5104 /* Must acquire the MDIO ownership before MAC reset */
5105 switch (sc->sc_type) {
5106 case WM_T_82573:
5107 case WM_T_82574:
5108 case WM_T_82583:
5109 error = wm_get_hw_semaphore_82573(sc);
5110 break;
5111 default:
5112 break;
5113 }
5114
5115 /*
5116 * 82541 Errata 29? & 82547 Errata 28?
5117 * See also the description about PHY_RST bit in CTRL register
5118 * in 8254x_GBe_SDM.pdf.
5119 */
5120 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5121 CSR_WRITE(sc, WMREG_CTRL,
5122 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5123 CSR_WRITE_FLUSH(sc);
5124 delay(5000);
5125 }
5126
5127 switch (sc->sc_type) {
5128 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5129 case WM_T_82541:
5130 case WM_T_82541_2:
5131 case WM_T_82547:
5132 case WM_T_82547_2:
5133 /*
5134 * On some chipsets, a reset through a memory-mapped write
5135 * cycle can cause the chip to reset before completing the
5136 * write cycle. This causes major headache that can be avoided
5137 * by issuing the reset via indirect register writes through
5138 * I/O space.
5139 *
5140 * So, if we successfully mapped the I/O BAR at attach time,
5141 * use that. Otherwise, try our luck with a memory-mapped
5142 * reset.
5143 */
5144 if (sc->sc_flags & WM_F_IOH_VALID)
5145 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5146 else
5147 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5148 break;
5149 case WM_T_82545_3:
5150 case WM_T_82546_3:
5151 /* Use the shadow control register on these chips. */
5152 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5153 break;
5154 case WM_T_80003:
5155 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5156 sc->phy.acquire(sc);
5157 CSR_WRITE(sc, WMREG_CTRL, reg);
5158 sc->phy.release(sc);
5159 break;
5160 case WM_T_ICH8:
5161 case WM_T_ICH9:
5162 case WM_T_ICH10:
5163 case WM_T_PCH:
5164 case WM_T_PCH2:
5165 case WM_T_PCH_LPT:
5166 case WM_T_PCH_SPT:
5167 case WM_T_PCH_CNP:
5168 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5169 if (wm_phy_resetisblocked(sc) == false) {
5170 /*
5171 * Gate automatic PHY configuration by hardware on
5172 * non-managed 82579
5173 */
5174 if ((sc->sc_type == WM_T_PCH2)
5175 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5176 == 0))
5177 wm_gate_hw_phy_config_ich8lan(sc, true);
5178
5179 reg |= CTRL_PHY_RESET;
5180 phy_reset = 1;
5181 } else
5182 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5183 sc->phy.acquire(sc);
5184 CSR_WRITE(sc, WMREG_CTRL, reg);
5185 /* Don't insert a completion barrier when reset */
5186 delay(20*1000);
5187 mutex_exit(sc->sc_ich_phymtx);
5188 break;
5189 case WM_T_82580:
5190 case WM_T_I350:
5191 case WM_T_I354:
5192 case WM_T_I210:
5193 case WM_T_I211:
5194 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5195 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5196 CSR_WRITE_FLUSH(sc);
5197 delay(5000);
5198 break;
5199 case WM_T_82542_2_0:
5200 case WM_T_82542_2_1:
5201 case WM_T_82543:
5202 case WM_T_82540:
5203 case WM_T_82545:
5204 case WM_T_82546:
5205 case WM_T_82571:
5206 case WM_T_82572:
5207 case WM_T_82573:
5208 case WM_T_82574:
5209 case WM_T_82575:
5210 case WM_T_82576:
5211 case WM_T_82583:
5212 default:
5213 /* Everything else can safely use the documented method. */
5214 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5215 break;
5216 }
5217
5218 /* Must release the MDIO ownership after MAC reset */
5219 switch (sc->sc_type) {
5220 case WM_T_82573:
5221 case WM_T_82574:
5222 case WM_T_82583:
5223 if (error == 0)
5224 wm_put_hw_semaphore_82573(sc);
5225 break;
5226 default:
5227 break;
5228 }
5229
5230 /* Set Phy Config Counter to 50msec */
5231 if (sc->sc_type == WM_T_PCH2) {
5232 reg = CSR_READ(sc, WMREG_FEXTNVM3);
5233 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5234 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5235 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5236 }
5237
5238 if (phy_reset != 0)
5239 wm_get_cfg_done(sc);
5240
5241 /* Reload EEPROM */
5242 switch (sc->sc_type) {
5243 case WM_T_82542_2_0:
5244 case WM_T_82542_2_1:
5245 case WM_T_82543:
5246 case WM_T_82544:
5247 delay(10);
5248 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5249 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5250 CSR_WRITE_FLUSH(sc);
5251 delay(2000);
5252 break;
5253 case WM_T_82540:
5254 case WM_T_82545:
5255 case WM_T_82545_3:
5256 case WM_T_82546:
5257 case WM_T_82546_3:
5258 delay(5*1000);
5259 /* XXX Disable HW ARPs on ASF enabled adapters */
5260 break;
5261 case WM_T_82541:
5262 case WM_T_82541_2:
5263 case WM_T_82547:
5264 case WM_T_82547_2:
5265 delay(20000);
5266 /* XXX Disable HW ARPs on ASF enabled adapters */
5267 break;
5268 case WM_T_82571:
5269 case WM_T_82572:
5270 case WM_T_82573:
5271 case WM_T_82574:
5272 case WM_T_82583:
5273 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5274 delay(10);
5275 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5276 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5277 CSR_WRITE_FLUSH(sc);
5278 }
5279 /* check EECD_EE_AUTORD */
5280 wm_get_auto_rd_done(sc);
5281 /*
5282 * Phy configuration from NVM just starts after EECD_AUTO_RD
5283 * is set.
5284 */
5285 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5286 || (sc->sc_type == WM_T_82583))
5287 delay(25*1000);
5288 break;
5289 case WM_T_82575:
5290 case WM_T_82576:
5291 case WM_T_82580:
5292 case WM_T_I350:
5293 case WM_T_I354:
5294 case WM_T_I210:
5295 case WM_T_I211:
5296 case WM_T_80003:
5297 /* check EECD_EE_AUTORD */
5298 wm_get_auto_rd_done(sc);
5299 break;
5300 case WM_T_ICH8:
5301 case WM_T_ICH9:
5302 case WM_T_ICH10:
5303 case WM_T_PCH:
5304 case WM_T_PCH2:
5305 case WM_T_PCH_LPT:
5306 case WM_T_PCH_SPT:
5307 case WM_T_PCH_CNP:
5308 break;
5309 default:
5310 panic("%s: unknown type\n", __func__);
5311 }
5312
5313 /* Check whether EEPROM is present or not */
5314 switch (sc->sc_type) {
5315 case WM_T_82575:
5316 case WM_T_82576:
5317 case WM_T_82580:
5318 case WM_T_I350:
5319 case WM_T_I354:
5320 case WM_T_ICH8:
5321 case WM_T_ICH9:
5322 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5323 /* Not found */
5324 sc->sc_flags |= WM_F_EEPROM_INVALID;
5325 if (sc->sc_type == WM_T_82575)
5326 wm_reset_init_script_82575(sc);
5327 }
5328 break;
5329 default:
5330 break;
5331 }
5332
5333 if (phy_reset != 0)
5334 wm_phy_post_reset(sc);
5335
5336 if ((sc->sc_type == WM_T_82580)
5337 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5338 /* Clear global device reset status bit */
5339 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5340 }
5341
5342 /* Clear any pending interrupt events. */
5343 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5344 reg = CSR_READ(sc, WMREG_ICR);
5345 if (wm_is_using_msix(sc)) {
5346 if (sc->sc_type != WM_T_82574) {
5347 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5348 CSR_WRITE(sc, WMREG_EIAC, 0);
5349 } else
5350 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5351 }
5352
5353 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5354 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5355 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5356 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5357 reg = CSR_READ(sc, WMREG_KABGTXD);
5358 reg |= KABGTXD_BGSQLBIAS;
5359 CSR_WRITE(sc, WMREG_KABGTXD, reg);
5360 }
5361
5362 /* Reload sc_ctrl */
5363 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5364
5365 wm_set_eee(sc);
5366
5367 /*
5368 * For PCH, this write will make sure that any noise will be detected
5369 * as a CRC error and be dropped rather than show up as a bad packet
5370 * to the DMA engine
5371 */
5372 if (sc->sc_type == WM_T_PCH)
5373 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5374
5375 if (sc->sc_type >= WM_T_82544)
5376 CSR_WRITE(sc, WMREG_WUC, 0);
5377
5378 if (sc->sc_type < WM_T_82575)
5379 wm_disable_aspm(sc); /* Workaround for some chips */
5380
5381 wm_reset_mdicnfg_82580(sc);
5382
5383 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5384 wm_pll_workaround_i210(sc);
5385
5386 if (sc->sc_type == WM_T_80003) {
5387 /* Default to TRUE to enable the MDIC W/A */
5388 sc->sc_flags |= WM_F_80003_MDIC_WA;
5389
5390 rv = wm_kmrn_readreg(sc,
5391 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5392 if (rv == 0) {
5393 if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5394 == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5395 sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5396 else
5397 sc->sc_flags |= WM_F_80003_MDIC_WA;
5398 }
5399 }
5400 }
5401
5402 /*
5403 * wm_add_rxbuf:
5404 *
5405 * Add a receive buffer to the indiciated descriptor.
5406 */
5407 static int
5408 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5409 {
5410 struct wm_softc *sc = rxq->rxq_sc;
5411 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5412 struct mbuf *m;
5413 int error;
5414
5415 KASSERT(mutex_owned(rxq->rxq_lock));
5416
5417 MGETHDR(m, M_DONTWAIT, MT_DATA);
5418 if (m == NULL)
5419 return ENOBUFS;
5420
5421 MCLGET(m, M_DONTWAIT);
5422 if ((m->m_flags & M_EXT) == 0) {
5423 m_freem(m);
5424 return ENOBUFS;
5425 }
5426
5427 if (rxs->rxs_mbuf != NULL)
5428 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5429
5430 rxs->rxs_mbuf = m;
5431
5432 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5433 /*
5434 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5435 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5436 */
5437 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5438 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5439 if (error) {
5440 /* XXX XXX XXX */
5441 aprint_error_dev(sc->sc_dev,
5442 "unable to load rx DMA map %d, error = %d\n", idx, error);
5443 panic("wm_add_rxbuf");
5444 }
5445
5446 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5447 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5448
5449 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5450 if ((sc->sc_rctl & RCTL_EN) != 0)
5451 wm_init_rxdesc(rxq, idx);
5452 } else
5453 wm_init_rxdesc(rxq, idx);
5454
5455 return 0;
5456 }
5457
5458 /*
5459 * wm_rxdrain:
5460 *
5461 * Drain the receive queue.
5462 */
5463 static void
5464 wm_rxdrain(struct wm_rxqueue *rxq)
5465 {
5466 struct wm_softc *sc = rxq->rxq_sc;
5467 struct wm_rxsoft *rxs;
5468 int i;
5469
5470 KASSERT(mutex_owned(rxq->rxq_lock));
5471
5472 for (i = 0; i < WM_NRXDESC; i++) {
5473 rxs = &rxq->rxq_soft[i];
5474 if (rxs->rxs_mbuf != NULL) {
5475 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5476 m_freem(rxs->rxs_mbuf);
5477 rxs->rxs_mbuf = NULL;
5478 }
5479 }
5480 }
5481
5482 /*
5483 * Setup registers for RSS.
5484 *
5485 * XXX not yet VMDq support
5486 */
5487 static void
5488 wm_init_rss(struct wm_softc *sc)
5489 {
5490 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5491 int i;
5492
5493 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5494
5495 for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5496 unsigned int qid, reta_ent;
5497
5498 qid = i % sc->sc_nqueues;
5499 switch (sc->sc_type) {
5500 case WM_T_82574:
5501 reta_ent = __SHIFTIN(qid,
5502 RETA_ENT_QINDEX_MASK_82574);
5503 break;
5504 case WM_T_82575:
5505 reta_ent = __SHIFTIN(qid,
5506 RETA_ENT_QINDEX1_MASK_82575);
5507 break;
5508 default:
5509 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5510 break;
5511 }
5512
5513 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5514 reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5515 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5516 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5517 }
5518
5519 rss_getkey((uint8_t *)rss_key);
5520 for (i = 0; i < RSSRK_NUM_REGS; i++)
5521 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5522
5523 if (sc->sc_type == WM_T_82574)
5524 mrqc = MRQC_ENABLE_RSS_MQ_82574;
5525 else
5526 mrqc = MRQC_ENABLE_RSS_MQ;
5527
5528 /*
5529 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5530 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5531 */
5532 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5533 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5534 #if 0
5535 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5536 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5537 #endif
5538 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5539
5540 CSR_WRITE(sc, WMREG_MRQC, mrqc);
5541 }
5542
5543 /*
5544 * Adjust TX and RX queue numbers which the system actulally uses.
5545 *
5546 * The numbers are affected by below parameters.
5547 * - The nubmer of hardware queues
5548 * - The number of MSI-X vectors (= "nvectors" argument)
5549 * - ncpu
5550 */
5551 static void
5552 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5553 {
5554 int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5555
5556 if (nvectors < 2) {
5557 sc->sc_nqueues = 1;
5558 return;
5559 }
5560
5561 switch (sc->sc_type) {
5562 case WM_T_82572:
5563 hw_ntxqueues = 2;
5564 hw_nrxqueues = 2;
5565 break;
5566 case WM_T_82574:
5567 hw_ntxqueues = 2;
5568 hw_nrxqueues = 2;
5569 break;
5570 case WM_T_82575:
5571 hw_ntxqueues = 4;
5572 hw_nrxqueues = 4;
5573 break;
5574 case WM_T_82576:
5575 hw_ntxqueues = 16;
5576 hw_nrxqueues = 16;
5577 break;
5578 case WM_T_82580:
5579 case WM_T_I350:
5580 case WM_T_I354:
5581 hw_ntxqueues = 8;
5582 hw_nrxqueues = 8;
5583 break;
5584 case WM_T_I210:
5585 hw_ntxqueues = 4;
5586 hw_nrxqueues = 4;
5587 break;
5588 case WM_T_I211:
5589 hw_ntxqueues = 2;
5590 hw_nrxqueues = 2;
5591 break;
5592 /*
5593 * The below Ethernet controllers do not support MSI-X;
5594 * this driver doesn't let them use multiqueue.
5595 * - WM_T_80003
5596 * - WM_T_ICH8
5597 * - WM_T_ICH9
5598 * - WM_T_ICH10
5599 * - WM_T_PCH
5600 * - WM_T_PCH2
5601 * - WM_T_PCH_LPT
5602 */
5603 default:
5604 hw_ntxqueues = 1;
5605 hw_nrxqueues = 1;
5606 break;
5607 }
5608
5609 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5610
5611 /*
5612 * As queues more than MSI-X vectors cannot improve scaling, we limit
5613 * the number of queues used actually.
5614 */
5615 if (nvectors < hw_nqueues + 1)
5616 sc->sc_nqueues = nvectors - 1;
5617 else
5618 sc->sc_nqueues = hw_nqueues;
5619
5620 /*
5621 * As queues more than CPUs cannot improve scaling, we limit
5622 * the number of queues used actually.
5623 */
5624 if (ncpu < sc->sc_nqueues)
5625 sc->sc_nqueues = ncpu;
5626 }
5627
5628 static inline bool
5629 wm_is_using_msix(struct wm_softc *sc)
5630 {
5631
5632 return (sc->sc_nintrs > 1);
5633 }
5634
5635 static inline bool
5636 wm_is_using_multiqueue(struct wm_softc *sc)
5637 {
5638
5639 return (sc->sc_nqueues > 1);
5640 }
5641
5642 static int
5643 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
5644 {
5645 struct wm_queue *wmq = &sc->sc_queue[qidx];
5646
5647 wmq->wmq_id = qidx;
5648 wmq->wmq_intr_idx = intr_idx;
5649 wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
5650 wm_handle_queue, wmq);
5651 if (wmq->wmq_si != NULL)
5652 return 0;
5653
5654 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5655 wmq->wmq_id);
5656 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5657 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5658 return ENOMEM;
5659 }
5660
5661 /*
5662 * Both single interrupt MSI and INTx can use this function.
5663 */
5664 static int
5665 wm_setup_legacy(struct wm_softc *sc)
5666 {
5667 pci_chipset_tag_t pc = sc->sc_pc;
5668 const char *intrstr = NULL;
5669 char intrbuf[PCI_INTRSTR_LEN];
5670 int error;
5671
5672 error = wm_alloc_txrx_queues(sc);
5673 if (error) {
5674 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5675 error);
5676 return ENOMEM;
5677 }
5678 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5679 sizeof(intrbuf));
5680 #ifdef WM_MPSAFE
5681 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5682 #endif
5683 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5684 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5685 if (sc->sc_ihs[0] == NULL) {
5686 aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5687 (pci_intr_type(pc, sc->sc_intrs[0])
5688 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5689 return ENOMEM;
5690 }
5691
5692 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5693 sc->sc_nintrs = 1;
5694
5695 return wm_softint_establish_queue(sc, 0, 0);
5696 }
5697
5698 static int
5699 wm_setup_msix(struct wm_softc *sc)
5700 {
5701 void *vih;
5702 kcpuset_t *affinity;
5703 int qidx, error, intr_idx, txrx_established;
5704 pci_chipset_tag_t pc = sc->sc_pc;
5705 const char *intrstr = NULL;
5706 char intrbuf[PCI_INTRSTR_LEN];
5707 char intr_xname[INTRDEVNAMEBUF];
5708
5709 if (sc->sc_nqueues < ncpu) {
5710 /*
5711 * To avoid other devices' interrupts, the affinity of Tx/Rx
5712 * interrupts start from CPU#1.
5713 */
5714 sc->sc_affinity_offset = 1;
5715 } else {
5716 /*
5717 * In this case, this device use all CPUs. So, we unify
5718 * affinitied cpu_index to msix vector number for readability.
5719 */
5720 sc->sc_affinity_offset = 0;
5721 }
5722
5723 error = wm_alloc_txrx_queues(sc);
5724 if (error) {
5725 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5726 error);
5727 return ENOMEM;
5728 }
5729
5730 kcpuset_create(&affinity, false);
5731 intr_idx = 0;
5732
5733 /*
5734 * TX and RX
5735 */
5736 txrx_established = 0;
5737 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5738 struct wm_queue *wmq = &sc->sc_queue[qidx];
5739 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5740
5741 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5742 sizeof(intrbuf));
5743 #ifdef WM_MPSAFE
5744 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5745 PCI_INTR_MPSAFE, true);
5746 #endif
5747 memset(intr_xname, 0, sizeof(intr_xname));
5748 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5749 device_xname(sc->sc_dev), qidx);
5750 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5751 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5752 if (vih == NULL) {
5753 aprint_error_dev(sc->sc_dev,
5754 "unable to establish MSI-X(for TX and RX)%s%s\n",
5755 intrstr ? " at " : "",
5756 intrstr ? intrstr : "");
5757
5758 goto fail;
5759 }
5760 kcpuset_zero(affinity);
5761 /* Round-robin affinity */
5762 kcpuset_set(affinity, affinity_to);
5763 error = interrupt_distribute(vih, affinity, NULL);
5764 if (error == 0) {
5765 aprint_normal_dev(sc->sc_dev,
5766 "for TX and RX interrupting at %s affinity to %u\n",
5767 intrstr, affinity_to);
5768 } else {
5769 aprint_normal_dev(sc->sc_dev,
5770 "for TX and RX interrupting at %s\n", intrstr);
5771 }
5772 sc->sc_ihs[intr_idx] = vih;
5773 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
5774 goto fail;
5775 txrx_established++;
5776 intr_idx++;
5777 }
5778
5779 /* LINK */
5780 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5781 sizeof(intrbuf));
5782 #ifdef WM_MPSAFE
5783 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5784 #endif
5785 memset(intr_xname, 0, sizeof(intr_xname));
5786 snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5787 device_xname(sc->sc_dev));
5788 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5789 IPL_NET, wm_linkintr_msix, sc, intr_xname);
5790 if (vih == NULL) {
5791 aprint_error_dev(sc->sc_dev,
5792 "unable to establish MSI-X(for LINK)%s%s\n",
5793 intrstr ? " at " : "",
5794 intrstr ? intrstr : "");
5795
5796 goto fail;
5797 }
5798 /* Keep default affinity to LINK interrupt */
5799 aprint_normal_dev(sc->sc_dev,
5800 "for LINK interrupting at %s\n", intrstr);
5801 sc->sc_ihs[intr_idx] = vih;
5802 sc->sc_link_intr_idx = intr_idx;
5803
5804 sc->sc_nintrs = sc->sc_nqueues + 1;
5805 kcpuset_destroy(affinity);
5806 return 0;
5807
5808 fail:
5809 for (qidx = 0; qidx < txrx_established; qidx++) {
5810 struct wm_queue *wmq = &sc->sc_queue[qidx];
5811 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5812 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5813 }
5814
5815 kcpuset_destroy(affinity);
5816 return ENOMEM;
5817 }
5818
5819 static void
5820 wm_unset_stopping_flags(struct wm_softc *sc)
5821 {
5822 int i;
5823
5824 KASSERT(WM_CORE_LOCKED(sc));
5825
5826 /* Must unset stopping flags in ascending order. */
5827 for (i = 0; i < sc->sc_nqueues; i++) {
5828 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5829 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5830
5831 mutex_enter(txq->txq_lock);
5832 txq->txq_stopping = false;
5833 mutex_exit(txq->txq_lock);
5834
5835 mutex_enter(rxq->rxq_lock);
5836 rxq->rxq_stopping = false;
5837 mutex_exit(rxq->rxq_lock);
5838 }
5839
5840 sc->sc_core_stopping = false;
5841 }
5842
5843 static void
5844 wm_set_stopping_flags(struct wm_softc *sc)
5845 {
5846 int i;
5847
5848 KASSERT(WM_CORE_LOCKED(sc));
5849
5850 sc->sc_core_stopping = true;
5851
5852 /* Must set stopping flags in ascending order. */
5853 for (i = 0; i < sc->sc_nqueues; i++) {
5854 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5855 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5856
5857 mutex_enter(rxq->rxq_lock);
5858 rxq->rxq_stopping = true;
5859 mutex_exit(rxq->rxq_lock);
5860
5861 mutex_enter(txq->txq_lock);
5862 txq->txq_stopping = true;
5863 mutex_exit(txq->txq_lock);
5864 }
5865 }
5866
5867 /*
5868 * Write interrupt interval value to ITR or EITR
5869 */
5870 static void
5871 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5872 {
5873
5874 if (!wmq->wmq_set_itr)
5875 return;
5876
5877 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5878 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5879
5880 /*
5881 * 82575 doesn't have CNT_INGR field.
5882 * So, overwrite counter field by software.
5883 */
5884 if (sc->sc_type == WM_T_82575)
5885 eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5886 else
5887 eitr |= EITR_CNT_INGR;
5888
5889 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5890 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5891 /*
5892 * 82574 has both ITR and EITR. SET EITR when we use
5893 * the multi queue function with MSI-X.
5894 */
5895 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5896 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5897 } else {
5898 KASSERT(wmq->wmq_id == 0);
5899 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5900 }
5901
5902 wmq->wmq_set_itr = false;
5903 }
5904
5905 /*
5906 * TODO
5907 * Below dynamic calculation of itr is almost the same as Linux igb,
5908 * however it does not fit to wm(4). So, we will have been disable AIM
5909 * until we will find appropriate calculation of itr.
5910 */
5911 /*
5912 * Calculate interrupt interval value to be going to write register in
5913 * wm_itrs_writereg(). This function does not write ITR/EITR register.
5914 */
5915 static void
5916 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5917 {
5918 #ifdef NOTYET
5919 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5920 struct wm_txqueue *txq = &wmq->wmq_txq;
5921 uint32_t avg_size = 0;
5922 uint32_t new_itr;
5923
5924 if (rxq->rxq_packets)
5925 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
5926 if (txq->txq_packets)
5927 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5928
5929 if (avg_size == 0) {
5930 new_itr = 450; /* restore default value */
5931 goto out;
5932 }
5933
5934 /* Add 24 bytes to size to account for CRC, preamble, and gap */
5935 avg_size += 24;
5936
5937 /* Don't starve jumbo frames */
5938 avg_size = uimin(avg_size, 3000);
5939
5940 /* Give a little boost to mid-size frames */
5941 if ((avg_size > 300) && (avg_size < 1200))
5942 new_itr = avg_size / 3;
5943 else
5944 new_itr = avg_size / 2;
5945
5946 out:
5947 /*
5948 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5949 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5950 */
5951 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5952 new_itr *= 4;
5953
5954 if (new_itr != wmq->wmq_itr) {
5955 wmq->wmq_itr = new_itr;
5956 wmq->wmq_set_itr = true;
5957 } else
5958 wmq->wmq_set_itr = false;
5959
5960 rxq->rxq_packets = 0;
5961 rxq->rxq_bytes = 0;
5962 txq->txq_packets = 0;
5963 txq->txq_bytes = 0;
5964 #endif
5965 }
5966
5967 static void
5968 wm_init_sysctls(struct wm_softc *sc)
5969 {
5970 struct sysctllog **log;
5971 const struct sysctlnode *rnode, *qnode, *cnode;
5972 int i, rv;
5973 const char *dvname;
5974
5975 log = &sc->sc_sysctllog;
5976 dvname = device_xname(sc->sc_dev);
5977
5978 rv = sysctl_createv(log, 0, NULL, &rnode,
5979 0, CTLTYPE_NODE, dvname,
5980 SYSCTL_DESCR("wm information and settings"),
5981 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5982 if (rv != 0)
5983 goto err;
5984
5985 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5986 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
5987 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
5988 if (rv != 0)
5989 goto teardown;
5990
5991 for (i = 0; i < sc->sc_nqueues; i++) {
5992 struct wm_queue *wmq = &sc->sc_queue[i];
5993 struct wm_txqueue *txq = &wmq->wmq_txq;
5994 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5995
5996 snprintf(sc->sc_queue[i].sysctlname,
5997 sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
5998
5999 if (sysctl_createv(log, 0, &rnode, &qnode,
6000 0, CTLTYPE_NODE,
6001 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6002 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6003 break;
6004
6005 if (sysctl_createv(log, 0, &qnode, &cnode,
6006 CTLFLAG_READONLY, CTLTYPE_INT,
6007 "txq_free", SYSCTL_DESCR("TX queue free"),
6008 NULL, 0, &txq->txq_free,
6009 0, CTL_CREATE, CTL_EOL) != 0)
6010 break;
6011 if (sysctl_createv(log, 0, &qnode, &cnode,
6012 CTLFLAG_READONLY, CTLTYPE_INT,
6013 "txd_head", SYSCTL_DESCR("TX descriptor head"),
6014 wm_sysctl_tdh_handler, 0, (void *)txq,
6015 0, CTL_CREATE, CTL_EOL) != 0)
6016 break;
6017 if (sysctl_createv(log, 0, &qnode, &cnode,
6018 CTLFLAG_READONLY, CTLTYPE_INT,
6019 "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6020 wm_sysctl_tdt_handler, 0, (void *)txq,
6021 0, CTL_CREATE, CTL_EOL) != 0)
6022 break;
6023 if (sysctl_createv(log, 0, &qnode, &cnode,
6024 CTLFLAG_READONLY, CTLTYPE_INT,
6025 "txq_next", SYSCTL_DESCR("TX queue next"),
6026 NULL, 0, &txq->txq_next,
6027 0, CTL_CREATE, CTL_EOL) != 0)
6028 break;
6029 if (sysctl_createv(log, 0, &qnode, &cnode,
6030 CTLFLAG_READONLY, CTLTYPE_INT,
6031 "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6032 NULL, 0, &txq->txq_sfree,
6033 0, CTL_CREATE, CTL_EOL) != 0)
6034 break;
6035 if (sysctl_createv(log, 0, &qnode, &cnode,
6036 CTLFLAG_READONLY, CTLTYPE_INT,
6037 "txq_snext", SYSCTL_DESCR("TX queue snext"),
6038 NULL, 0, &txq->txq_snext,
6039 0, CTL_CREATE, CTL_EOL) != 0)
6040 break;
6041 if (sysctl_createv(log, 0, &qnode, &cnode,
6042 CTLFLAG_READONLY, CTLTYPE_INT,
6043 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6044 NULL, 0, &txq->txq_sdirty,
6045 0, CTL_CREATE, CTL_EOL) != 0)
6046 break;
6047 if (sysctl_createv(log, 0, &qnode, &cnode,
6048 CTLFLAG_READONLY, CTLTYPE_INT,
6049 "txq_flags", SYSCTL_DESCR("TX queue flags"),
6050 NULL, 0, &txq->txq_flags,
6051 0, CTL_CREATE, CTL_EOL) != 0)
6052 break;
6053 if (sysctl_createv(log, 0, &qnode, &cnode,
6054 CTLFLAG_READONLY, CTLTYPE_BOOL,
6055 "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6056 NULL, 0, &txq->txq_stopping,
6057 0, CTL_CREATE, CTL_EOL) != 0)
6058 break;
6059 if (sysctl_createv(log, 0, &qnode, &cnode,
6060 CTLFLAG_READONLY, CTLTYPE_BOOL,
6061 "txq_sending", SYSCTL_DESCR("TX queue sending"),
6062 NULL, 0, &txq->txq_sending,
6063 0, CTL_CREATE, CTL_EOL) != 0)
6064 break;
6065
6066 if (sysctl_createv(log, 0, &qnode, &cnode,
6067 CTLFLAG_READONLY, CTLTYPE_INT,
6068 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6069 NULL, 0, &rxq->rxq_ptr,
6070 0, CTL_CREATE, CTL_EOL) != 0)
6071 break;
6072 }
6073
6074 #ifdef WM_DEBUG
6075 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6076 CTLTYPE_INT, "debug_flags",
6077 SYSCTL_DESCR(
6078 "Debug flags:\n" \
6079 "\t0x01 LINK\n" \
6080 "\t0x02 TX\n" \
6081 "\t0x04 RX\n" \
6082 "\t0x08 GMII\n" \
6083 "\t0x10 MANAGE\n" \
6084 "\t0x20 NVM\n" \
6085 "\t0x40 INIT\n" \
6086 "\t0x80 LOCK"),
6087 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6088 if (rv != 0)
6089 goto teardown;
6090 #endif
6091
6092 return;
6093
6094 teardown:
6095 sysctl_teardown(log);
6096 err:
6097 sc->sc_sysctllog = NULL;
6098 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6099 __func__, rv);
6100 }
6101
6102 /*
6103 * wm_init: [ifnet interface function]
6104 *
6105 * Initialize the interface.
6106 */
6107 static int
6108 wm_init(struct ifnet *ifp)
6109 {
6110 struct wm_softc *sc = ifp->if_softc;
6111 int ret;
6112
6113 WM_CORE_LOCK(sc);
6114 ret = wm_init_locked(ifp);
6115 WM_CORE_UNLOCK(sc);
6116
6117 return ret;
6118 }
6119
6120 static int
6121 wm_init_locked(struct ifnet *ifp)
6122 {
6123 struct wm_softc *sc = ifp->if_softc;
6124 struct ethercom *ec = &sc->sc_ethercom;
6125 int i, j, trynum, error = 0;
6126 uint32_t reg, sfp_mask = 0;
6127
6128 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6129 device_xname(sc->sc_dev), __func__));
6130 KASSERT(WM_CORE_LOCKED(sc));
6131
6132 /*
6133 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6134 * There is a small but measurable benefit to avoiding the adjusment
6135 * of the descriptor so that the headers are aligned, for normal mtu,
6136 * on such platforms. One possibility is that the DMA itself is
6137 * slightly more efficient if the front of the entire packet (instead
6138 * of the front of the headers) is aligned.
6139 *
6140 * Note we must always set align_tweak to 0 if we are using
6141 * jumbo frames.
6142 */
6143 #ifdef __NO_STRICT_ALIGNMENT
6144 sc->sc_align_tweak = 0;
6145 #else
6146 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6147 sc->sc_align_tweak = 0;
6148 else
6149 sc->sc_align_tweak = 2;
6150 #endif /* __NO_STRICT_ALIGNMENT */
6151
6152 /* Cancel any pending I/O. */
6153 wm_stop_locked(ifp, false, false);
6154
6155 /* Update statistics before reset */
6156 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6157 if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6158
6159 /* >= PCH_SPT hardware workaround before reset. */
6160 if (sc->sc_type >= WM_T_PCH_SPT)
6161 wm_flush_desc_rings(sc);
6162
6163 /* Reset the chip to a known state. */
6164 wm_reset(sc);
6165
6166 /*
6167 * AMT based hardware can now take control from firmware
6168 * Do this after reset.
6169 */
6170 if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6171 wm_get_hw_control(sc);
6172
6173 if ((sc->sc_type >= WM_T_PCH_SPT) &&
6174 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6175 wm_legacy_irq_quirk_spt(sc);
6176
6177 /* Init hardware bits */
6178 wm_initialize_hardware_bits(sc);
6179
6180 /* Reset the PHY. */
6181 if (sc->sc_flags & WM_F_HAS_MII)
6182 wm_gmii_reset(sc);
6183
6184 if (sc->sc_type >= WM_T_ICH8) {
6185 reg = CSR_READ(sc, WMREG_GCR);
6186 /*
6187 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6188 * default after reset.
6189 */
6190 if (sc->sc_type == WM_T_ICH8)
6191 reg |= GCR_NO_SNOOP_ALL;
6192 else
6193 reg &= ~GCR_NO_SNOOP_ALL;
6194 CSR_WRITE(sc, WMREG_GCR, reg);
6195 }
6196
6197 if ((sc->sc_type >= WM_T_ICH8)
6198 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6199 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6200
6201 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6202 reg |= CTRL_EXT_RO_DIS;
6203 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6204 }
6205
6206 /* Calculate (E)ITR value */
6207 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6208 /*
6209 * For NEWQUEUE's EITR (except for 82575).
6210 * 82575's EITR should be set same throttling value as other
6211 * old controllers' ITR because the interrupt/sec calculation
6212 * is the same, that is, 1,000,000,000 / (N * 256).
6213 *
6214 * 82574's EITR should be set same throttling value as ITR.
6215 *
6216 * For N interrupts/sec, set this value to:
6217 * 1,000,000 / N in contrast to ITR throttling value.
6218 */
6219 sc->sc_itr_init = 450;
6220 } else if (sc->sc_type >= WM_T_82543) {
6221 /*
6222 * Set up the interrupt throttling register (units of 256ns)
6223 * Note that a footnote in Intel's documentation says this
6224 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6225 * or 10Mbit mode. Empirically, it appears to be the case
6226 * that that is also true for the 1024ns units of the other
6227 * interrupt-related timer registers -- so, really, we ought
6228 * to divide this value by 4 when the link speed is low.
6229 *
6230 * XXX implement this division at link speed change!
6231 */
6232
6233 /*
6234 * For N interrupts/sec, set this value to:
6235 * 1,000,000,000 / (N * 256). Note that we set the
6236 * absolute and packet timer values to this value
6237 * divided by 4 to get "simple timer" behavior.
6238 */
6239 sc->sc_itr_init = 1500; /* 2604 ints/sec */
6240 }
6241
6242 error = wm_init_txrx_queues(sc);
6243 if (error)
6244 goto out;
6245
6246 if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6247 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6248 (sc->sc_type >= WM_T_82575))
6249 wm_serdes_power_up_link_82575(sc);
6250
6251 /* Clear out the VLAN table -- we don't use it (yet). */
6252 CSR_WRITE(sc, WMREG_VET, 0);
6253 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6254 trynum = 10; /* Due to hw errata */
6255 else
6256 trynum = 1;
6257 for (i = 0; i < WM_VLAN_TABSIZE; i++)
6258 for (j = 0; j < trynum; j++)
6259 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6260
6261 /*
6262 * Set up flow-control parameters.
6263 *
6264 * XXX Values could probably stand some tuning.
6265 */
6266 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6267 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6268 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6269 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6270 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6271 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6272 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6273 }
6274
6275 sc->sc_fcrtl = FCRTL_DFLT;
6276 if (sc->sc_type < WM_T_82543) {
6277 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6278 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6279 } else {
6280 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6281 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6282 }
6283
6284 if (sc->sc_type == WM_T_80003)
6285 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6286 else
6287 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6288
6289 /* Writes the control register. */
6290 wm_set_vlan(sc);
6291
6292 if (sc->sc_flags & WM_F_HAS_MII) {
6293 uint16_t kmreg;
6294
6295 switch (sc->sc_type) {
6296 case WM_T_80003:
6297 case WM_T_ICH8:
6298 case WM_T_ICH9:
6299 case WM_T_ICH10:
6300 case WM_T_PCH:
6301 case WM_T_PCH2:
6302 case WM_T_PCH_LPT:
6303 case WM_T_PCH_SPT:
6304 case WM_T_PCH_CNP:
6305 /*
6306 * Set the mac to wait the maximum time between each
6307 * iteration and increase the max iterations when
6308 * polling the phy; this fixes erroneous timeouts at
6309 * 10Mbps.
6310 */
6311 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6312 0xFFFF);
6313 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6314 &kmreg);
6315 kmreg |= 0x3F;
6316 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6317 kmreg);
6318 break;
6319 default:
6320 break;
6321 }
6322
6323 if (sc->sc_type == WM_T_80003) {
6324 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6325 reg &= ~CTRL_EXT_LINK_MODE_MASK;
6326 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6327
6328 /* Bypass RX and TX FIFOs */
6329 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6330 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6331 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6332 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6333 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6334 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6335 }
6336 }
6337 #if 0
6338 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6339 #endif
6340
6341 /* Set up checksum offload parameters. */
6342 reg = CSR_READ(sc, WMREG_RXCSUM);
6343 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6344 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6345 reg |= RXCSUM_IPOFL;
6346 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6347 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6348 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6349 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6350 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6351
6352 /* Set registers about MSI-X */
6353 if (wm_is_using_msix(sc)) {
6354 uint32_t ivar, qintr_idx;
6355 struct wm_queue *wmq;
6356 unsigned int qid;
6357
6358 if (sc->sc_type == WM_T_82575) {
6359 /* Interrupt control */
6360 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6361 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6362 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6363
6364 /* TX and RX */
6365 for (i = 0; i < sc->sc_nqueues; i++) {
6366 wmq = &sc->sc_queue[i];
6367 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6368 EITR_TX_QUEUE(wmq->wmq_id)
6369 | EITR_RX_QUEUE(wmq->wmq_id));
6370 }
6371 /* Link status */
6372 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6373 EITR_OTHER);
6374 } else if (sc->sc_type == WM_T_82574) {
6375 /* Interrupt control */
6376 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6377 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6378 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6379
6380 /*
6381 * Work around issue with spurious interrupts
6382 * in MSI-X mode.
6383 * At wm_initialize_hardware_bits(), sc_nintrs has not
6384 * initialized yet. So re-initialize WMREG_RFCTL here.
6385 */
6386 reg = CSR_READ(sc, WMREG_RFCTL);
6387 reg |= WMREG_RFCTL_ACKDIS;
6388 CSR_WRITE(sc, WMREG_RFCTL, reg);
6389
6390 ivar = 0;
6391 /* TX and RX */
6392 for (i = 0; i < sc->sc_nqueues; i++) {
6393 wmq = &sc->sc_queue[i];
6394 qid = wmq->wmq_id;
6395 qintr_idx = wmq->wmq_intr_idx;
6396
6397 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6398 IVAR_TX_MASK_Q_82574(qid));
6399 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6400 IVAR_RX_MASK_Q_82574(qid));
6401 }
6402 /* Link status */
6403 ivar |= __SHIFTIN((IVAR_VALID_82574
6404 | sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6405 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6406 } else {
6407 /* Interrupt control */
6408 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6409 | GPIE_EIAME | GPIE_PBA);
6410
6411 switch (sc->sc_type) {
6412 case WM_T_82580:
6413 case WM_T_I350:
6414 case WM_T_I354:
6415 case WM_T_I210:
6416 case WM_T_I211:
6417 /* TX and RX */
6418 for (i = 0; i < sc->sc_nqueues; i++) {
6419 wmq = &sc->sc_queue[i];
6420 qid = wmq->wmq_id;
6421 qintr_idx = wmq->wmq_intr_idx;
6422
6423 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6424 ivar &= ~IVAR_TX_MASK_Q(qid);
6425 ivar |= __SHIFTIN((qintr_idx
6426 | IVAR_VALID),
6427 IVAR_TX_MASK_Q(qid));
6428 ivar &= ~IVAR_RX_MASK_Q(qid);
6429 ivar |= __SHIFTIN((qintr_idx
6430 | IVAR_VALID),
6431 IVAR_RX_MASK_Q(qid));
6432 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6433 }
6434 break;
6435 case WM_T_82576:
6436 /* TX and RX */
6437 for (i = 0; i < sc->sc_nqueues; i++) {
6438 wmq = &sc->sc_queue[i];
6439 qid = wmq->wmq_id;
6440 qintr_idx = wmq->wmq_intr_idx;
6441
6442 ivar = CSR_READ(sc,
6443 WMREG_IVAR_Q_82576(qid));
6444 ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6445 ivar |= __SHIFTIN((qintr_idx
6446 | IVAR_VALID),
6447 IVAR_TX_MASK_Q_82576(qid));
6448 ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6449 ivar |= __SHIFTIN((qintr_idx
6450 | IVAR_VALID),
6451 IVAR_RX_MASK_Q_82576(qid));
6452 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6453 ivar);
6454 }
6455 break;
6456 default:
6457 break;
6458 }
6459
6460 /* Link status */
6461 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6462 IVAR_MISC_OTHER);
6463 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6464 }
6465
6466 if (wm_is_using_multiqueue(sc)) {
6467 wm_init_rss(sc);
6468
6469 /*
6470 ** NOTE: Receive Full-Packet Checksum Offload
6471 ** is mutually exclusive with Multiqueue. However
6472 ** this is not the same as TCP/IP checksums which
6473 ** still work.
6474 */
6475 reg = CSR_READ(sc, WMREG_RXCSUM);
6476 reg |= RXCSUM_PCSD;
6477 CSR_WRITE(sc, WMREG_RXCSUM, reg);
6478 }
6479 }
6480
6481 /* Set up the interrupt registers. */
6482 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6483
6484 /* Enable SFP module insertion interrupt if it's required */
6485 if ((sc->sc_flags & WM_F_SFP) != 0) {
6486 sc->sc_ctrl |= CTRL_EXTLINK_EN;
6487 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6488 sfp_mask = ICR_GPI(0);
6489 }
6490
6491 if (wm_is_using_msix(sc)) {
6492 uint32_t mask;
6493 struct wm_queue *wmq;
6494
6495 switch (sc->sc_type) {
6496 case WM_T_82574:
6497 mask = 0;
6498 for (i = 0; i < sc->sc_nqueues; i++) {
6499 wmq = &sc->sc_queue[i];
6500 mask |= ICR_TXQ(wmq->wmq_id);
6501 mask |= ICR_RXQ(wmq->wmq_id);
6502 }
6503 mask |= ICR_OTHER;
6504 CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6505 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6506 break;
6507 default:
6508 if (sc->sc_type == WM_T_82575) {
6509 mask = 0;
6510 for (i = 0; i < sc->sc_nqueues; i++) {
6511 wmq = &sc->sc_queue[i];
6512 mask |= EITR_TX_QUEUE(wmq->wmq_id);
6513 mask |= EITR_RX_QUEUE(wmq->wmq_id);
6514 }
6515 mask |= EITR_OTHER;
6516 } else {
6517 mask = 0;
6518 for (i = 0; i < sc->sc_nqueues; i++) {
6519 wmq = &sc->sc_queue[i];
6520 mask |= 1 << wmq->wmq_intr_idx;
6521 }
6522 mask |= 1 << sc->sc_link_intr_idx;
6523 }
6524 CSR_WRITE(sc, WMREG_EIAC, mask);
6525 CSR_WRITE(sc, WMREG_EIAM, mask);
6526 CSR_WRITE(sc, WMREG_EIMS, mask);
6527
6528 /* For other interrupts */
6529 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6530 break;
6531 }
6532 } else {
6533 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6534 ICR_RXO | ICR_RXT0 | sfp_mask;
6535 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6536 }
6537
6538 /* Set up the inter-packet gap. */
6539 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6540
6541 if (sc->sc_type >= WM_T_82543) {
6542 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6543 struct wm_queue *wmq = &sc->sc_queue[qidx];
6544 wm_itrs_writereg(sc, wmq);
6545 }
6546 /*
6547 * Link interrupts occur much less than TX
6548 * interrupts and RX interrupts. So, we don't
6549 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6550 * FreeBSD's if_igb.
6551 */
6552 }
6553
6554 /* Set the VLAN EtherType. */
6555 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6556
6557 /*
6558 * Set up the transmit control register; we start out with
6559 * a collision distance suitable for FDX, but update it when
6560 * we resolve the media type.
6561 */
6562 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6563 | TCTL_CT(TX_COLLISION_THRESHOLD)
6564 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6565 if (sc->sc_type >= WM_T_82571)
6566 sc->sc_tctl |= TCTL_MULR;
6567 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6568
6569 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6570 /* Write TDT after TCTL.EN is set. See the document. */
6571 CSR_WRITE(sc, WMREG_TDT(0), 0);
6572 }
6573
6574 if (sc->sc_type == WM_T_80003) {
6575 reg = CSR_READ(sc, WMREG_TCTL_EXT);
6576 reg &= ~TCTL_EXT_GCEX_MASK;
6577 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6578 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6579 }
6580
6581 /* Set the media. */
6582 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6583 goto out;
6584
6585 /* Configure for OS presence */
6586 wm_init_manageability(sc);
6587
6588 /*
6589 * Set up the receive control register; we actually program the
6590 * register when we set the receive filter. Use multicast address
6591 * offset type 0.
6592 *
6593 * Only the i82544 has the ability to strip the incoming CRC, so we
6594 * don't enable that feature.
6595 */
6596 sc->sc_mchash_type = 0;
6597 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6598 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6599
6600 /* 82574 use one buffer extended Rx descriptor. */
6601 if (sc->sc_type == WM_T_82574)
6602 sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6603
6604 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
6605 sc->sc_rctl |= RCTL_SECRC;
6606
6607 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6608 && (ifp->if_mtu > ETHERMTU)) {
6609 sc->sc_rctl |= RCTL_LPE;
6610 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6611 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6612 }
6613
6614 if (MCLBYTES == 2048)
6615 sc->sc_rctl |= RCTL_2k;
6616 else {
6617 if (sc->sc_type >= WM_T_82543) {
6618 switch (MCLBYTES) {
6619 case 4096:
6620 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6621 break;
6622 case 8192:
6623 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6624 break;
6625 case 16384:
6626 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6627 break;
6628 default:
6629 panic("wm_init: MCLBYTES %d unsupported",
6630 MCLBYTES);
6631 break;
6632 }
6633 } else
6634 panic("wm_init: i82542 requires MCLBYTES = 2048");
6635 }
6636
6637 /* Enable ECC */
6638 switch (sc->sc_type) {
6639 case WM_T_82571:
6640 reg = CSR_READ(sc, WMREG_PBA_ECC);
6641 reg |= PBA_ECC_CORR_EN;
6642 CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6643 break;
6644 case WM_T_PCH_LPT:
6645 case WM_T_PCH_SPT:
6646 case WM_T_PCH_CNP:
6647 reg = CSR_READ(sc, WMREG_PBECCSTS);
6648 reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6649 CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6650
6651 sc->sc_ctrl |= CTRL_MEHE;
6652 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6653 break;
6654 default:
6655 break;
6656 }
6657
6658 /*
6659 * Set the receive filter.
6660 *
6661 * For 82575 and 82576, the RX descriptors must be initialized after
6662 * the setting of RCTL.EN in wm_set_filter()
6663 */
6664 wm_set_filter(sc);
6665
6666 /* On 575 and later set RDT only if RX enabled */
6667 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6668 int qidx;
6669 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6670 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6671 for (i = 0; i < WM_NRXDESC; i++) {
6672 mutex_enter(rxq->rxq_lock);
6673 wm_init_rxdesc(rxq, i);
6674 mutex_exit(rxq->rxq_lock);
6675
6676 }
6677 }
6678 }
6679
6680 wm_unset_stopping_flags(sc);
6681
6682 /* Start the one second link check clock. */
6683 callout_schedule(&sc->sc_tick_ch, hz);
6684
6685 /* ...all done! */
6686 ifp->if_flags |= IFF_RUNNING;
6687
6688 out:
6689 /* Save last flags for the callback */
6690 sc->sc_if_flags = ifp->if_flags;
6691 sc->sc_ec_capenable = ec->ec_capenable;
6692 if (error)
6693 log(LOG_ERR, "%s: interface not running\n",
6694 device_xname(sc->sc_dev));
6695 return error;
6696 }
6697
6698 /*
6699 * wm_stop: [ifnet interface function]
6700 *
6701 * Stop transmission on the interface.
6702 */
6703 static void
6704 wm_stop(struct ifnet *ifp, int disable)
6705 {
6706 struct wm_softc *sc = ifp->if_softc;
6707
6708 ASSERT_SLEEPABLE();
6709
6710 WM_CORE_LOCK(sc);
6711 wm_stop_locked(ifp, disable ? true : false, true);
6712 WM_CORE_UNLOCK(sc);
6713
6714 /*
6715 * After wm_set_stopping_flags(), it is guaranteed
6716 * wm_handle_queue_work() does not call workqueue_enqueue().
6717 * However, workqueue_wait() cannot call in wm_stop_locked()
6718 * because it can sleep...
6719 * so, call workqueue_wait() here.
6720 */
6721 for (int i = 0; i < sc->sc_nqueues; i++)
6722 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
6723 }
6724
6725 static void
6726 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
6727 {
6728 struct wm_softc *sc = ifp->if_softc;
6729 struct wm_txsoft *txs;
6730 int i, qidx;
6731
6732 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6733 device_xname(sc->sc_dev), __func__));
6734 KASSERT(WM_CORE_LOCKED(sc));
6735
6736 wm_set_stopping_flags(sc);
6737
6738 if (sc->sc_flags & WM_F_HAS_MII) {
6739 /* Down the MII. */
6740 mii_down(&sc->sc_mii);
6741 } else {
6742 #if 0
6743 /* Should we clear PHY's status properly? */
6744 wm_reset(sc);
6745 #endif
6746 }
6747
6748 /* Stop the transmit and receive processes. */
6749 CSR_WRITE(sc, WMREG_TCTL, 0);
6750 CSR_WRITE(sc, WMREG_RCTL, 0);
6751 sc->sc_rctl &= ~RCTL_EN;
6752
6753 /*
6754 * Clear the interrupt mask to ensure the device cannot assert its
6755 * interrupt line.
6756 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6757 * service any currently pending or shared interrupt.
6758 */
6759 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6760 sc->sc_icr = 0;
6761 if (wm_is_using_msix(sc)) {
6762 if (sc->sc_type != WM_T_82574) {
6763 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6764 CSR_WRITE(sc, WMREG_EIAC, 0);
6765 } else
6766 CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6767 }
6768
6769 /*
6770 * Stop callouts after interrupts are disabled; if we have
6771 * to wait for them, we will be releasing the CORE_LOCK
6772 * briefly, which will unblock interrupts on the current CPU.
6773 */
6774
6775 /* Stop the one second clock. */
6776 if (wait)
6777 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
6778 else
6779 callout_stop(&sc->sc_tick_ch);
6780
6781 /* Stop the 82547 Tx FIFO stall check timer. */
6782 if (sc->sc_type == WM_T_82547) {
6783 if (wait)
6784 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
6785 else
6786 callout_stop(&sc->sc_txfifo_ch);
6787 }
6788
6789 /* Release any queued transmit buffers. */
6790 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6791 struct wm_queue *wmq = &sc->sc_queue[qidx];
6792 struct wm_txqueue *txq = &wmq->wmq_txq;
6793 struct mbuf *m;
6794
6795 mutex_enter(txq->txq_lock);
6796 txq->txq_sending = false; /* Ensure watchdog disabled */
6797 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6798 txs = &txq->txq_soft[i];
6799 if (txs->txs_mbuf != NULL) {
6800 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6801 m_freem(txs->txs_mbuf);
6802 txs->txs_mbuf = NULL;
6803 }
6804 }
6805 /* Drain txq_interq */
6806 while ((m = pcq_get(txq->txq_interq)) != NULL)
6807 m_freem(m);
6808 mutex_exit(txq->txq_lock);
6809 }
6810
6811 /* Mark the interface as down and cancel the watchdog timer. */
6812 ifp->if_flags &= ~IFF_RUNNING;
6813
6814 if (disable) {
6815 for (i = 0; i < sc->sc_nqueues; i++) {
6816 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6817 mutex_enter(rxq->rxq_lock);
6818 wm_rxdrain(rxq);
6819 mutex_exit(rxq->rxq_lock);
6820 }
6821 }
6822
6823 #if 0 /* notyet */
6824 if (sc->sc_type >= WM_T_82544)
6825 CSR_WRITE(sc, WMREG_WUC, 0);
6826 #endif
6827 }
6828
6829 static void
6830 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6831 {
6832 struct mbuf *m;
6833 int i;
6834
6835 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6836 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6837 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6838 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6839 m->m_data, m->m_len, m->m_flags);
6840 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6841 i, i == 1 ? "" : "s");
6842 }
6843
6844 /*
6845 * wm_82547_txfifo_stall:
6846 *
6847 * Callout used to wait for the 82547 Tx FIFO to drain,
6848 * reset the FIFO pointers, and restart packet transmission.
6849 */
6850 static void
6851 wm_82547_txfifo_stall(void *arg)
6852 {
6853 struct wm_softc *sc = arg;
6854 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6855
6856 mutex_enter(txq->txq_lock);
6857
6858 if (txq->txq_stopping)
6859 goto out;
6860
6861 if (txq->txq_fifo_stall) {
6862 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6863 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6864 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6865 /*
6866 * Packets have drained. Stop transmitter, reset
6867 * FIFO pointers, restart transmitter, and kick
6868 * the packet queue.
6869 */
6870 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6871 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6872 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6873 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6874 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6875 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6876 CSR_WRITE(sc, WMREG_TCTL, tctl);
6877 CSR_WRITE_FLUSH(sc);
6878
6879 txq->txq_fifo_head = 0;
6880 txq->txq_fifo_stall = 0;
6881 wm_start_locked(&sc->sc_ethercom.ec_if);
6882 } else {
6883 /*
6884 * Still waiting for packets to drain; try again in
6885 * another tick.
6886 */
6887 callout_schedule(&sc->sc_txfifo_ch, 1);
6888 }
6889 }
6890
6891 out:
6892 mutex_exit(txq->txq_lock);
6893 }
6894
6895 /*
6896 * wm_82547_txfifo_bugchk:
6897 *
6898 * Check for bug condition in the 82547 Tx FIFO. We need to
6899 * prevent enqueueing a packet that would wrap around the end
6900 * if the Tx FIFO ring buffer, otherwise the chip will croak.
6901 *
6902 * We do this by checking the amount of space before the end
6903 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
6904 * the Tx FIFO, wait for all remaining packets to drain, reset
6905 * the internal FIFO pointers to the beginning, and restart
6906 * transmission on the interface.
6907 */
6908 #define WM_FIFO_HDR 0x10
6909 #define WM_82547_PAD_LEN 0x3e0
6910 static int
6911 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6912 {
6913 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6914 int space = txq->txq_fifo_size - txq->txq_fifo_head;
6915 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6916
6917 /* Just return if already stalled. */
6918 if (txq->txq_fifo_stall)
6919 return 1;
6920
6921 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6922 /* Stall only occurs in half-duplex mode. */
6923 goto send_packet;
6924 }
6925
6926 if (len >= WM_82547_PAD_LEN + space) {
6927 txq->txq_fifo_stall = 1;
6928 callout_schedule(&sc->sc_txfifo_ch, 1);
6929 return 1;
6930 }
6931
6932 send_packet:
6933 txq->txq_fifo_head += len;
6934 if (txq->txq_fifo_head >= txq->txq_fifo_size)
6935 txq->txq_fifo_head -= txq->txq_fifo_size;
6936
6937 return 0;
6938 }
6939
6940 static int
6941 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6942 {
6943 int error;
6944
6945 /*
6946 * Allocate the control data structures, and create and load the
6947 * DMA map for it.
6948 *
6949 * NOTE: All Tx descriptors must be in the same 4G segment of
6950 * memory. So must Rx descriptors. We simplify by allocating
6951 * both sets within the same 4G segment.
6952 */
6953 if (sc->sc_type < WM_T_82544)
6954 WM_NTXDESC(txq) = WM_NTXDESC_82542;
6955 else
6956 WM_NTXDESC(txq) = WM_NTXDESC_82544;
6957 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6958 txq->txq_descsize = sizeof(nq_txdesc_t);
6959 else
6960 txq->txq_descsize = sizeof(wiseman_txdesc_t);
6961
6962 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6963 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6964 1, &txq->txq_desc_rseg, 0)) != 0) {
6965 aprint_error_dev(sc->sc_dev,
6966 "unable to allocate TX control data, error = %d\n",
6967 error);
6968 goto fail_0;
6969 }
6970
6971 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6972 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6973 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6974 aprint_error_dev(sc->sc_dev,
6975 "unable to map TX control data, error = %d\n", error);
6976 goto fail_1;
6977 }
6978
6979 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6980 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6981 aprint_error_dev(sc->sc_dev,
6982 "unable to create TX control data DMA map, error = %d\n",
6983 error);
6984 goto fail_2;
6985 }
6986
6987 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6988 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6989 aprint_error_dev(sc->sc_dev,
6990 "unable to load TX control data DMA map, error = %d\n",
6991 error);
6992 goto fail_3;
6993 }
6994
6995 return 0;
6996
6997 fail_3:
6998 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6999 fail_2:
7000 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7001 WM_TXDESCS_SIZE(txq));
7002 fail_1:
7003 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7004 fail_0:
7005 return error;
7006 }
7007
7008 static void
7009 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7010 {
7011
7012 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7013 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7014 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7015 WM_TXDESCS_SIZE(txq));
7016 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7017 }
7018
7019 static int
7020 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7021 {
7022 int error;
7023 size_t rxq_descs_size;
7024
7025 /*
7026 * Allocate the control data structures, and create and load the
7027 * DMA map for it.
7028 *
7029 * NOTE: All Tx descriptors must be in the same 4G segment of
7030 * memory. So must Rx descriptors. We simplify by allocating
7031 * both sets within the same 4G segment.
7032 */
7033 rxq->rxq_ndesc = WM_NRXDESC;
7034 if (sc->sc_type == WM_T_82574)
7035 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7036 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7037 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7038 else
7039 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7040 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7041
7042 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7043 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7044 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7045 aprint_error_dev(sc->sc_dev,
7046 "unable to allocate RX control data, error = %d\n",
7047 error);
7048 goto fail_0;
7049 }
7050
7051 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7052 rxq->rxq_desc_rseg, rxq_descs_size,
7053 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7054 aprint_error_dev(sc->sc_dev,
7055 "unable to map RX control data, error = %d\n", error);
7056 goto fail_1;
7057 }
7058
7059 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7060 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7061 aprint_error_dev(sc->sc_dev,
7062 "unable to create RX control data DMA map, error = %d\n",
7063 error);
7064 goto fail_2;
7065 }
7066
7067 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7068 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7069 aprint_error_dev(sc->sc_dev,
7070 "unable to load RX control data DMA map, error = %d\n",
7071 error);
7072 goto fail_3;
7073 }
7074
7075 return 0;
7076
7077 fail_3:
7078 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7079 fail_2:
7080 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7081 rxq_descs_size);
7082 fail_1:
7083 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7084 fail_0:
7085 return error;
7086 }
7087
7088 static void
7089 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7090 {
7091
7092 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7093 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7094 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7095 rxq->rxq_descsize * rxq->rxq_ndesc);
7096 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7097 }
7098
7099
7100 static int
7101 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7102 {
7103 int i, error;
7104
7105 /* Create the transmit buffer DMA maps. */
7106 WM_TXQUEUELEN(txq) =
7107 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7108 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7109 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7110 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7111 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7112 &txq->txq_soft[i].txs_dmamap)) != 0) {
7113 aprint_error_dev(sc->sc_dev,
7114 "unable to create Tx DMA map %d, error = %d\n",
7115 i, error);
7116 goto fail;
7117 }
7118 }
7119
7120 return 0;
7121
7122 fail:
7123 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7124 if (txq->txq_soft[i].txs_dmamap != NULL)
7125 bus_dmamap_destroy(sc->sc_dmat,
7126 txq->txq_soft[i].txs_dmamap);
7127 }
7128 return error;
7129 }
7130
7131 static void
7132 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7133 {
7134 int i;
7135
7136 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7137 if (txq->txq_soft[i].txs_dmamap != NULL)
7138 bus_dmamap_destroy(sc->sc_dmat,
7139 txq->txq_soft[i].txs_dmamap);
7140 }
7141 }
7142
7143 static int
7144 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7145 {
7146 int i, error;
7147
7148 /* Create the receive buffer DMA maps. */
7149 for (i = 0; i < rxq->rxq_ndesc; i++) {
7150 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7151 MCLBYTES, 0, 0,
7152 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7153 aprint_error_dev(sc->sc_dev,
7154 "unable to create Rx DMA map %d error = %d\n",
7155 i, error);
7156 goto fail;
7157 }
7158 rxq->rxq_soft[i].rxs_mbuf = NULL;
7159 }
7160
7161 return 0;
7162
7163 fail:
7164 for (i = 0; i < rxq->rxq_ndesc; i++) {
7165 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7166 bus_dmamap_destroy(sc->sc_dmat,
7167 rxq->rxq_soft[i].rxs_dmamap);
7168 }
7169 return error;
7170 }
7171
7172 static void
7173 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7174 {
7175 int i;
7176
7177 for (i = 0; i < rxq->rxq_ndesc; i++) {
7178 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7179 bus_dmamap_destroy(sc->sc_dmat,
7180 rxq->rxq_soft[i].rxs_dmamap);
7181 }
7182 }
7183
7184 /*
7185 * wm_alloc_quques:
7186 * Allocate {tx,rx}descs and {tx,rx} buffers
7187 */
7188 static int
7189 wm_alloc_txrx_queues(struct wm_softc *sc)
7190 {
7191 int i, error, tx_done, rx_done;
7192
7193 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7194 KM_SLEEP);
7195 if (sc->sc_queue == NULL) {
7196 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7197 error = ENOMEM;
7198 goto fail_0;
7199 }
7200
7201 /* For transmission */
7202 error = 0;
7203 tx_done = 0;
7204 for (i = 0; i < sc->sc_nqueues; i++) {
7205 #ifdef WM_EVENT_COUNTERS
7206 int j;
7207 const char *xname;
7208 #endif
7209 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7210 txq->txq_sc = sc;
7211 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7212
7213 error = wm_alloc_tx_descs(sc, txq);
7214 if (error)
7215 break;
7216 error = wm_alloc_tx_buffer(sc, txq);
7217 if (error) {
7218 wm_free_tx_descs(sc, txq);
7219 break;
7220 }
7221 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7222 if (txq->txq_interq == NULL) {
7223 wm_free_tx_descs(sc, txq);
7224 wm_free_tx_buffer(sc, txq);
7225 error = ENOMEM;
7226 break;
7227 }
7228
7229 #ifdef WM_EVENT_COUNTERS
7230 xname = device_xname(sc->sc_dev);
7231
7232 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7233 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7234 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7235 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7236 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7237 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7238 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7239 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7240 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7241 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7242 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7243
7244 for (j = 0; j < WM_NTXSEGS; j++) {
7245 snprintf(txq->txq_txseg_evcnt_names[j],
7246 sizeof(txq->txq_txseg_evcnt_names[j]),
7247 "txq%02dtxseg%d", i, j);
7248 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
7249 EVCNT_TYPE_MISC,
7250 NULL, xname, txq->txq_txseg_evcnt_names[j]);
7251 }
7252
7253 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7254 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7255 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7256 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7257 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7258 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7259 #endif /* WM_EVENT_COUNTERS */
7260
7261 tx_done++;
7262 }
7263 if (error)
7264 goto fail_1;
7265
7266 /* For receive */
7267 error = 0;
7268 rx_done = 0;
7269 for (i = 0; i < sc->sc_nqueues; i++) {
7270 #ifdef WM_EVENT_COUNTERS
7271 const char *xname;
7272 #endif
7273 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7274 rxq->rxq_sc = sc;
7275 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7276
7277 error = wm_alloc_rx_descs(sc, rxq);
7278 if (error)
7279 break;
7280
7281 error = wm_alloc_rx_buffer(sc, rxq);
7282 if (error) {
7283 wm_free_rx_descs(sc, rxq);
7284 break;
7285 }
7286
7287 #ifdef WM_EVENT_COUNTERS
7288 xname = device_xname(sc->sc_dev);
7289
7290 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7291 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7292
7293 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7294 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7295 #endif /* WM_EVENT_COUNTERS */
7296
7297 rx_done++;
7298 }
7299 if (error)
7300 goto fail_2;
7301
7302 return 0;
7303
7304 fail_2:
7305 for (i = 0; i < rx_done; i++) {
7306 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7307 wm_free_rx_buffer(sc, rxq);
7308 wm_free_rx_descs(sc, rxq);
7309 if (rxq->rxq_lock)
7310 mutex_obj_free(rxq->rxq_lock);
7311 }
7312 fail_1:
7313 for (i = 0; i < tx_done; i++) {
7314 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7315 pcq_destroy(txq->txq_interq);
7316 wm_free_tx_buffer(sc, txq);
7317 wm_free_tx_descs(sc, txq);
7318 if (txq->txq_lock)
7319 mutex_obj_free(txq->txq_lock);
7320 }
7321
7322 kmem_free(sc->sc_queue,
7323 sizeof(struct wm_queue) * sc->sc_nqueues);
7324 fail_0:
7325 return error;
7326 }
7327
7328 /*
7329 * wm_free_quques:
7330 * Free {tx,rx}descs and {tx,rx} buffers
7331 */
7332 static void
7333 wm_free_txrx_queues(struct wm_softc *sc)
7334 {
7335 int i;
7336
7337 for (i = 0; i < sc->sc_nqueues; i++) {
7338 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7339
7340 #ifdef WM_EVENT_COUNTERS
7341 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7342 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7343 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7344 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7345 #endif /* WM_EVENT_COUNTERS */
7346
7347 wm_free_rx_buffer(sc, rxq);
7348 wm_free_rx_descs(sc, rxq);
7349 if (rxq->rxq_lock)
7350 mutex_obj_free(rxq->rxq_lock);
7351 }
7352
7353 for (i = 0; i < sc->sc_nqueues; i++) {
7354 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7355 struct mbuf *m;
7356 #ifdef WM_EVENT_COUNTERS
7357 int j;
7358
7359 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7360 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7361 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7362 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7363 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7364 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7365 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7366 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7367 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7368 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7369 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7370
7371 for (j = 0; j < WM_NTXSEGS; j++)
7372 evcnt_detach(&txq->txq_ev_txseg[j]);
7373
7374 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7375 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7376 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7377 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7378 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7379 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7380 #endif /* WM_EVENT_COUNTERS */
7381
7382 /* Drain txq_interq */
7383 while ((m = pcq_get(txq->txq_interq)) != NULL)
7384 m_freem(m);
7385 pcq_destroy(txq->txq_interq);
7386
7387 wm_free_tx_buffer(sc, txq);
7388 wm_free_tx_descs(sc, txq);
7389 if (txq->txq_lock)
7390 mutex_obj_free(txq->txq_lock);
7391 }
7392
7393 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7394 }
7395
7396 static void
7397 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7398 {
7399
7400 KASSERT(mutex_owned(txq->txq_lock));
7401
7402 /* Initialize the transmit descriptor ring. */
7403 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7404 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7405 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7406 txq->txq_free = WM_NTXDESC(txq);
7407 txq->txq_next = 0;
7408 }
7409
7410 static void
7411 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7412 struct wm_txqueue *txq)
7413 {
7414
7415 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7416 device_xname(sc->sc_dev), __func__));
7417 KASSERT(mutex_owned(txq->txq_lock));
7418
7419 if (sc->sc_type < WM_T_82543) {
7420 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7421 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7422 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7423 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7424 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7425 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7426 } else {
7427 int qid = wmq->wmq_id;
7428
7429 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7430 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7431 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7432 CSR_WRITE(sc, WMREG_TDH(qid), 0);
7433
7434 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7435 /*
7436 * Don't write TDT before TCTL.EN is set.
7437 * See the document.
7438 */
7439 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7440 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7441 | TXDCTL_WTHRESH(0));
7442 else {
7443 /* XXX should update with AIM? */
7444 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7445 if (sc->sc_type >= WM_T_82540) {
7446 /* Should be the same */
7447 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7448 }
7449
7450 CSR_WRITE(sc, WMREG_TDT(qid), 0);
7451 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7452 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7453 }
7454 }
7455 }
7456
7457 static void
7458 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7459 {
7460 int i;
7461
7462 KASSERT(mutex_owned(txq->txq_lock));
7463
7464 /* Initialize the transmit job descriptors. */
7465 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7466 txq->txq_soft[i].txs_mbuf = NULL;
7467 txq->txq_sfree = WM_TXQUEUELEN(txq);
7468 txq->txq_snext = 0;
7469 txq->txq_sdirty = 0;
7470 }
7471
7472 static void
7473 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7474 struct wm_txqueue *txq)
7475 {
7476
7477 KASSERT(mutex_owned(txq->txq_lock));
7478
7479 /*
7480 * Set up some register offsets that are different between
7481 * the i82542 and the i82543 and later chips.
7482 */
7483 if (sc->sc_type < WM_T_82543)
7484 txq->txq_tdt_reg = WMREG_OLD_TDT;
7485 else
7486 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7487
7488 wm_init_tx_descs(sc, txq);
7489 wm_init_tx_regs(sc, wmq, txq);
7490 wm_init_tx_buffer(sc, txq);
7491
7492 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */
7493 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
7494
7495 txq->txq_sending = false;
7496 }
7497
7498 static void
7499 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7500 struct wm_rxqueue *rxq)
7501 {
7502
7503 KASSERT(mutex_owned(rxq->rxq_lock));
7504
7505 /*
7506 * Initialize the receive descriptor and receive job
7507 * descriptor rings.
7508 */
7509 if (sc->sc_type < WM_T_82543) {
7510 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7511 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7512 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7513 rxq->rxq_descsize * rxq->rxq_ndesc);
7514 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7515 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7516 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7517
7518 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7519 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7520 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7521 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7522 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7523 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7524 } else {
7525 int qid = wmq->wmq_id;
7526
7527 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7528 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7529 CSR_WRITE(sc, WMREG_RDLEN(qid),
7530 rxq->rxq_descsize * rxq->rxq_ndesc);
7531
7532 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7533 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7534 panic("%s: MCLBYTES %d unsupported for 82575 "
7535 "or higher\n", __func__, MCLBYTES);
7536
7537 /*
7538 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
7539 * only.
7540 */
7541 CSR_WRITE(sc, WMREG_SRRCTL(qid),
7542 SRRCTL_DESCTYPE_ADV_ONEBUF
7543 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7544 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7545 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7546 | RXDCTL_WTHRESH(1));
7547 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7548 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7549 } else {
7550 CSR_WRITE(sc, WMREG_RDH(qid), 0);
7551 CSR_WRITE(sc, WMREG_RDT(qid), 0);
7552 /* XXX should update with AIM? */
7553 CSR_WRITE(sc, WMREG_RDTR,
7554 (wmq->wmq_itr / 4) | RDTR_FPD);
7555 /* MUST be same */
7556 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7557 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7558 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7559 }
7560 }
7561 }
7562
7563 static int
7564 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7565 {
7566 struct wm_rxsoft *rxs;
7567 int error, i;
7568
7569 KASSERT(mutex_owned(rxq->rxq_lock));
7570
7571 for (i = 0; i < rxq->rxq_ndesc; i++) {
7572 rxs = &rxq->rxq_soft[i];
7573 if (rxs->rxs_mbuf == NULL) {
7574 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7575 log(LOG_ERR, "%s: unable to allocate or map "
7576 "rx buffer %d, error = %d\n",
7577 device_xname(sc->sc_dev), i, error);
7578 /*
7579 * XXX Should attempt to run with fewer receive
7580 * XXX buffers instead of just failing.
7581 */
7582 wm_rxdrain(rxq);
7583 return ENOMEM;
7584 }
7585 } else {
7586 /*
7587 * For 82575 and 82576, the RX descriptors must be
7588 * initialized after the setting of RCTL.EN in
7589 * wm_set_filter()
7590 */
7591 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7592 wm_init_rxdesc(rxq, i);
7593 }
7594 }
7595 rxq->rxq_ptr = 0;
7596 rxq->rxq_discard = 0;
7597 WM_RXCHAIN_RESET(rxq);
7598
7599 return 0;
7600 }
7601
7602 static int
7603 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7604 struct wm_rxqueue *rxq)
7605 {
7606
7607 KASSERT(mutex_owned(rxq->rxq_lock));
7608
7609 /*
7610 * Set up some register offsets that are different between
7611 * the i82542 and the i82543 and later chips.
7612 */
7613 if (sc->sc_type < WM_T_82543)
7614 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7615 else
7616 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7617
7618 wm_init_rx_regs(sc, wmq, rxq);
7619 return wm_init_rx_buffer(sc, rxq);
7620 }
7621
7622 /*
7623 * wm_init_quques:
7624 * Initialize {tx,rx}descs and {tx,rx} buffers
7625 */
7626 static int
7627 wm_init_txrx_queues(struct wm_softc *sc)
7628 {
7629 int i, error = 0;
7630
7631 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7632 device_xname(sc->sc_dev), __func__));
7633
7634 for (i = 0; i < sc->sc_nqueues; i++) {
7635 struct wm_queue *wmq = &sc->sc_queue[i];
7636 struct wm_txqueue *txq = &wmq->wmq_txq;
7637 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7638
7639 /*
7640 * TODO
7641 * Currently, use constant variable instead of AIM.
7642 * Furthermore, the interrupt interval of multiqueue which use
7643 * polling mode is less than default value.
7644 * More tuning and AIM are required.
7645 */
7646 if (wm_is_using_multiqueue(sc))
7647 wmq->wmq_itr = 50;
7648 else
7649 wmq->wmq_itr = sc->sc_itr_init;
7650 wmq->wmq_set_itr = true;
7651
7652 mutex_enter(txq->txq_lock);
7653 wm_init_tx_queue(sc, wmq, txq);
7654 mutex_exit(txq->txq_lock);
7655
7656 mutex_enter(rxq->rxq_lock);
7657 error = wm_init_rx_queue(sc, wmq, rxq);
7658 mutex_exit(rxq->rxq_lock);
7659 if (error)
7660 break;
7661 }
7662
7663 return error;
7664 }
7665
7666 /*
7667 * wm_tx_offload:
7668 *
7669 * Set up TCP/IP checksumming parameters for the
7670 * specified packet.
7671 */
7672 static void
7673 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7674 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7675 {
7676 struct mbuf *m0 = txs->txs_mbuf;
7677 struct livengood_tcpip_ctxdesc *t;
7678 uint32_t ipcs, tucs, cmd, cmdlen, seg;
7679 uint32_t ipcse;
7680 struct ether_header *eh;
7681 int offset, iphl;
7682 uint8_t fields;
7683
7684 /*
7685 * XXX It would be nice if the mbuf pkthdr had offset
7686 * fields for the protocol headers.
7687 */
7688
7689 eh = mtod(m0, struct ether_header *);
7690 switch (htons(eh->ether_type)) {
7691 case ETHERTYPE_IP:
7692 case ETHERTYPE_IPV6:
7693 offset = ETHER_HDR_LEN;
7694 break;
7695
7696 case ETHERTYPE_VLAN:
7697 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7698 break;
7699
7700 default:
7701 /* Don't support this protocol or encapsulation. */
7702 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7703 txq->txq_last_hw_ipcs = 0;
7704 txq->txq_last_hw_tucs = 0;
7705 *fieldsp = 0;
7706 *cmdp = 0;
7707 return;
7708 }
7709
7710 if ((m0->m_pkthdr.csum_flags &
7711 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7712 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7713 } else
7714 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7715
7716 ipcse = offset + iphl - 1;
7717
7718 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7719 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7720 seg = 0;
7721 fields = 0;
7722
7723 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7724 int hlen = offset + iphl;
7725 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7726
7727 if (__predict_false(m0->m_len <
7728 (hlen + sizeof(struct tcphdr)))) {
7729 /*
7730 * TCP/IP headers are not in the first mbuf; we need
7731 * to do this the slow and painful way. Let's just
7732 * hope this doesn't happen very often.
7733 */
7734 struct tcphdr th;
7735
7736 WM_Q_EVCNT_INCR(txq, tsopain);
7737
7738 m_copydata(m0, hlen, sizeof(th), &th);
7739 if (v4) {
7740 struct ip ip;
7741
7742 m_copydata(m0, offset, sizeof(ip), &ip);
7743 ip.ip_len = 0;
7744 m_copyback(m0,
7745 offset + offsetof(struct ip, ip_len),
7746 sizeof(ip.ip_len), &ip.ip_len);
7747 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7748 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7749 } else {
7750 struct ip6_hdr ip6;
7751
7752 m_copydata(m0, offset, sizeof(ip6), &ip6);
7753 ip6.ip6_plen = 0;
7754 m_copyback(m0,
7755 offset + offsetof(struct ip6_hdr, ip6_plen),
7756 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7757 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7758 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7759 }
7760 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7761 sizeof(th.th_sum), &th.th_sum);
7762
7763 hlen += th.th_off << 2;
7764 } else {
7765 /*
7766 * TCP/IP headers are in the first mbuf; we can do
7767 * this the easy way.
7768 */
7769 struct tcphdr *th;
7770
7771 if (v4) {
7772 struct ip *ip =
7773 (void *)(mtod(m0, char *) + offset);
7774 th = (void *)(mtod(m0, char *) + hlen);
7775
7776 ip->ip_len = 0;
7777 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7778 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7779 } else {
7780 struct ip6_hdr *ip6 =
7781 (void *)(mtod(m0, char *) + offset);
7782 th = (void *)(mtod(m0, char *) + hlen);
7783
7784 ip6->ip6_plen = 0;
7785 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7786 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7787 }
7788 hlen += th->th_off << 2;
7789 }
7790
7791 if (v4) {
7792 WM_Q_EVCNT_INCR(txq, tso);
7793 cmdlen |= WTX_TCPIP_CMD_IP;
7794 } else {
7795 WM_Q_EVCNT_INCR(txq, tso6);
7796 ipcse = 0;
7797 }
7798 cmd |= WTX_TCPIP_CMD_TSE;
7799 cmdlen |= WTX_TCPIP_CMD_TSE |
7800 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7801 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7802 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7803 }
7804
7805 /*
7806 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7807 * offload feature, if we load the context descriptor, we
7808 * MUST provide valid values for IPCSS and TUCSS fields.
7809 */
7810
7811 ipcs = WTX_TCPIP_IPCSS(offset) |
7812 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7813 WTX_TCPIP_IPCSE(ipcse);
7814 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7815 WM_Q_EVCNT_INCR(txq, ipsum);
7816 fields |= WTX_IXSM;
7817 }
7818
7819 offset += iphl;
7820
7821 if (m0->m_pkthdr.csum_flags &
7822 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7823 WM_Q_EVCNT_INCR(txq, tusum);
7824 fields |= WTX_TXSM;
7825 tucs = WTX_TCPIP_TUCSS(offset) |
7826 WTX_TCPIP_TUCSO(offset +
7827 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7828 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7829 } else if ((m0->m_pkthdr.csum_flags &
7830 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7831 WM_Q_EVCNT_INCR(txq, tusum6);
7832 fields |= WTX_TXSM;
7833 tucs = WTX_TCPIP_TUCSS(offset) |
7834 WTX_TCPIP_TUCSO(offset +
7835 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7836 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7837 } else {
7838 /* Just initialize it to a valid TCP context. */
7839 tucs = WTX_TCPIP_TUCSS(offset) |
7840 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7841 WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7842 }
7843
7844 *cmdp = cmd;
7845 *fieldsp = fields;
7846
7847 /*
7848 * We don't have to write context descriptor for every packet
7849 * except for 82574. For 82574, we must write context descriptor
7850 * for every packet when we use two descriptor queues.
7851 *
7852 * The 82574L can only remember the *last* context used
7853 * regardless of queue that it was use for. We cannot reuse
7854 * contexts on this hardware platform and must generate a new
7855 * context every time. 82574L hardware spec, section 7.2.6,
7856 * second note.
7857 */
7858 if (sc->sc_nqueues < 2) {
7859 /*
7860 * Setting up new checksum offload context for every
7861 * frames takes a lot of processing time for hardware.
7862 * This also reduces performance a lot for small sized
7863 * frames so avoid it if driver can use previously
7864 * configured checksum offload context.
7865 * For TSO, in theory we can use the same TSO context only if
7866 * frame is the same type(IP/TCP) and the same MSS. However
7867 * checking whether a frame has the same IP/TCP structure is a
7868 * hard thing so just ignore that and always restablish a
7869 * new TSO context.
7870 */
7871 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
7872 == 0) {
7873 if (txq->txq_last_hw_cmd == cmd &&
7874 txq->txq_last_hw_fields == fields &&
7875 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
7876 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
7877 WM_Q_EVCNT_INCR(txq, skipcontext);
7878 return;
7879 }
7880 }
7881
7882 txq->txq_last_hw_cmd = cmd;
7883 txq->txq_last_hw_fields = fields;
7884 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
7885 txq->txq_last_hw_tucs = (tucs & 0xffff);
7886 }
7887
7888 /* Fill in the context descriptor. */
7889 t = (struct livengood_tcpip_ctxdesc *)
7890 &txq->txq_descs[txq->txq_next];
7891 t->tcpip_ipcs = htole32(ipcs);
7892 t->tcpip_tucs = htole32(tucs);
7893 t->tcpip_cmdlen = htole32(cmdlen);
7894 t->tcpip_seg = htole32(seg);
7895 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7896
7897 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7898 txs->txs_ndesc++;
7899 }
7900
7901 static inline int
7902 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7903 {
7904 struct wm_softc *sc = ifp->if_softc;
7905 u_int cpuid = cpu_index(curcpu());
7906
7907 /*
7908 * Currently, simple distribute strategy.
7909 * TODO:
7910 * distribute by flowid(RSS has value).
7911 */
7912 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7913 }
7914
7915 static inline bool
7916 wm_linkdown_discard(struct wm_txqueue *txq)
7917 {
7918
7919 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
7920 return true;
7921
7922 return false;
7923 }
7924
7925 /*
7926 * wm_start: [ifnet interface function]
7927 *
7928 * Start packet transmission on the interface.
7929 */
7930 static void
7931 wm_start(struct ifnet *ifp)
7932 {
7933 struct wm_softc *sc = ifp->if_softc;
7934 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7935
7936 #ifdef WM_MPSAFE
7937 KASSERT(if_is_mpsafe(ifp));
7938 #endif
7939 /*
7940 * if_obytes and if_omcasts are added in if_transmit()@if.c.
7941 */
7942
7943 mutex_enter(txq->txq_lock);
7944 if (!txq->txq_stopping)
7945 wm_start_locked(ifp);
7946 mutex_exit(txq->txq_lock);
7947 }
7948
7949 static void
7950 wm_start_locked(struct ifnet *ifp)
7951 {
7952 struct wm_softc *sc = ifp->if_softc;
7953 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7954
7955 wm_send_common_locked(ifp, txq, false);
7956 }
7957
7958 static int
7959 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7960 {
7961 int qid;
7962 struct wm_softc *sc = ifp->if_softc;
7963 struct wm_txqueue *txq;
7964
7965 qid = wm_select_txqueue(ifp, m);
7966 txq = &sc->sc_queue[qid].wmq_txq;
7967
7968 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7969 m_freem(m);
7970 WM_Q_EVCNT_INCR(txq, pcqdrop);
7971 return ENOBUFS;
7972 }
7973
7974 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
7975 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
7976 if (m->m_flags & M_MCAST)
7977 if_statinc_ref(nsr, if_omcasts);
7978 IF_STAT_PUTREF(ifp);
7979
7980 if (mutex_tryenter(txq->txq_lock)) {
7981 if (!txq->txq_stopping)
7982 wm_transmit_locked(ifp, txq);
7983 mutex_exit(txq->txq_lock);
7984 }
7985
7986 return 0;
7987 }
7988
7989 static void
7990 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7991 {
7992
7993 wm_send_common_locked(ifp, txq, true);
7994 }
7995
7996 static void
7997 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7998 bool is_transmit)
7999 {
8000 struct wm_softc *sc = ifp->if_softc;
8001 struct mbuf *m0;
8002 struct wm_txsoft *txs;
8003 bus_dmamap_t dmamap;
8004 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8005 bus_addr_t curaddr;
8006 bus_size_t seglen, curlen;
8007 uint32_t cksumcmd;
8008 uint8_t cksumfields;
8009 bool remap = true;
8010
8011 KASSERT(mutex_owned(txq->txq_lock));
8012
8013 if ((ifp->if_flags & IFF_RUNNING) == 0)
8014 return;
8015 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8016 return;
8017
8018 if (__predict_false(wm_linkdown_discard(txq))) {
8019 do {
8020 if (is_transmit)
8021 m0 = pcq_get(txq->txq_interq);
8022 else
8023 IFQ_DEQUEUE(&ifp->if_snd, m0);
8024 /*
8025 * increment successed packet counter as in the case
8026 * which the packet is discarded by link down PHY.
8027 */
8028 if (m0 != NULL) {
8029 if_statinc(ifp, if_opackets);
8030 m_freem(m0);
8031 }
8032 } while (m0 != NULL);
8033 return;
8034 }
8035
8036 /* Remember the previous number of free descriptors. */
8037 ofree = txq->txq_free;
8038
8039 /*
8040 * Loop through the send queue, setting up transmit descriptors
8041 * until we drain the queue, or use up all available transmit
8042 * descriptors.
8043 */
8044 for (;;) {
8045 m0 = NULL;
8046
8047 /* Get a work queue entry. */
8048 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8049 wm_txeof(txq, UINT_MAX);
8050 if (txq->txq_sfree == 0) {
8051 DPRINTF(sc, WM_DEBUG_TX,
8052 ("%s: TX: no free job descriptors\n",
8053 device_xname(sc->sc_dev)));
8054 WM_Q_EVCNT_INCR(txq, txsstall);
8055 break;
8056 }
8057 }
8058
8059 /* Grab a packet off the queue. */
8060 if (is_transmit)
8061 m0 = pcq_get(txq->txq_interq);
8062 else
8063 IFQ_DEQUEUE(&ifp->if_snd, m0);
8064 if (m0 == NULL)
8065 break;
8066
8067 DPRINTF(sc, WM_DEBUG_TX,
8068 ("%s: TX: have packet to transmit: %p\n",
8069 device_xname(sc->sc_dev), m0));
8070
8071 txs = &txq->txq_soft[txq->txq_snext];
8072 dmamap = txs->txs_dmamap;
8073
8074 use_tso = (m0->m_pkthdr.csum_flags &
8075 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8076
8077 /*
8078 * So says the Linux driver:
8079 * The controller does a simple calculation to make sure
8080 * there is enough room in the FIFO before initiating the
8081 * DMA for each buffer. The calc is:
8082 * 4 = ceil(buffer len / MSS)
8083 * To make sure we don't overrun the FIFO, adjust the max
8084 * buffer len if the MSS drops.
8085 */
8086 dmamap->dm_maxsegsz =
8087 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8088 ? m0->m_pkthdr.segsz << 2
8089 : WTX_MAX_LEN;
8090
8091 /*
8092 * Load the DMA map. If this fails, the packet either
8093 * didn't fit in the allotted number of segments, or we
8094 * were short on resources. For the too-many-segments
8095 * case, we simply report an error and drop the packet,
8096 * since we can't sanely copy a jumbo packet to a single
8097 * buffer.
8098 */
8099 retry:
8100 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8101 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8102 if (__predict_false(error)) {
8103 if (error == EFBIG) {
8104 if (remap == true) {
8105 struct mbuf *m;
8106
8107 remap = false;
8108 m = m_defrag(m0, M_NOWAIT);
8109 if (m != NULL) {
8110 WM_Q_EVCNT_INCR(txq, defrag);
8111 m0 = m;
8112 goto retry;
8113 }
8114 }
8115 WM_Q_EVCNT_INCR(txq, toomanyseg);
8116 log(LOG_ERR, "%s: Tx packet consumes too many "
8117 "DMA segments, dropping...\n",
8118 device_xname(sc->sc_dev));
8119 wm_dump_mbuf_chain(sc, m0);
8120 m_freem(m0);
8121 continue;
8122 }
8123 /* Short on resources, just stop for now. */
8124 DPRINTF(sc, WM_DEBUG_TX,
8125 ("%s: TX: dmamap load failed: %d\n",
8126 device_xname(sc->sc_dev), error));
8127 break;
8128 }
8129
8130 segs_needed = dmamap->dm_nsegs;
8131 if (use_tso) {
8132 /* For sentinel descriptor; see below. */
8133 segs_needed++;
8134 }
8135
8136 /*
8137 * Ensure we have enough descriptors free to describe
8138 * the packet. Note, we always reserve one descriptor
8139 * at the end of the ring due to the semantics of the
8140 * TDT register, plus one more in the event we need
8141 * to load offload context.
8142 */
8143 if (segs_needed > txq->txq_free - 2) {
8144 /*
8145 * Not enough free descriptors to transmit this
8146 * packet. We haven't committed anything yet,
8147 * so just unload the DMA map, put the packet
8148 * pack on the queue, and punt. Notify the upper
8149 * layer that there are no more slots left.
8150 */
8151 DPRINTF(sc, WM_DEBUG_TX,
8152 ("%s: TX: need %d (%d) descriptors, have %d\n",
8153 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8154 segs_needed, txq->txq_free - 1));
8155 txq->txq_flags |= WM_TXQ_NO_SPACE;
8156 bus_dmamap_unload(sc->sc_dmat, dmamap);
8157 WM_Q_EVCNT_INCR(txq, txdstall);
8158 break;
8159 }
8160
8161 /*
8162 * Check for 82547 Tx FIFO bug. We need to do this
8163 * once we know we can transmit the packet, since we
8164 * do some internal FIFO space accounting here.
8165 */
8166 if (sc->sc_type == WM_T_82547 &&
8167 wm_82547_txfifo_bugchk(sc, m0)) {
8168 DPRINTF(sc, WM_DEBUG_TX,
8169 ("%s: TX: 82547 Tx FIFO bug detected\n",
8170 device_xname(sc->sc_dev)));
8171 txq->txq_flags |= WM_TXQ_NO_SPACE;
8172 bus_dmamap_unload(sc->sc_dmat, dmamap);
8173 WM_Q_EVCNT_INCR(txq, fifo_stall);
8174 break;
8175 }
8176
8177 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8178
8179 DPRINTF(sc, WM_DEBUG_TX,
8180 ("%s: TX: packet has %d (%d) DMA segments\n",
8181 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8182
8183 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8184
8185 /*
8186 * Store a pointer to the packet so that we can free it
8187 * later.
8188 *
8189 * Initially, we consider the number of descriptors the
8190 * packet uses the number of DMA segments. This may be
8191 * incremented by 1 if we do checksum offload (a descriptor
8192 * is used to set the checksum context).
8193 */
8194 txs->txs_mbuf = m0;
8195 txs->txs_firstdesc = txq->txq_next;
8196 txs->txs_ndesc = segs_needed;
8197
8198 /* Set up offload parameters for this packet. */
8199 if (m0->m_pkthdr.csum_flags &
8200 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8201 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8202 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8203 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8204 } else {
8205 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8206 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8207 cksumcmd = 0;
8208 cksumfields = 0;
8209 }
8210
8211 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8212
8213 /* Sync the DMA map. */
8214 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8215 BUS_DMASYNC_PREWRITE);
8216
8217 /* Initialize the transmit descriptor. */
8218 for (nexttx = txq->txq_next, seg = 0;
8219 seg < dmamap->dm_nsegs; seg++) {
8220 for (seglen = dmamap->dm_segs[seg].ds_len,
8221 curaddr = dmamap->dm_segs[seg].ds_addr;
8222 seglen != 0;
8223 curaddr += curlen, seglen -= curlen,
8224 nexttx = WM_NEXTTX(txq, nexttx)) {
8225 curlen = seglen;
8226
8227 /*
8228 * So says the Linux driver:
8229 * Work around for premature descriptor
8230 * write-backs in TSO mode. Append a
8231 * 4-byte sentinel descriptor.
8232 */
8233 if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8234 curlen > 8)
8235 curlen -= 4;
8236
8237 wm_set_dma_addr(
8238 &txq->txq_descs[nexttx].wtx_addr, curaddr);
8239 txq->txq_descs[nexttx].wtx_cmdlen
8240 = htole32(cksumcmd | curlen);
8241 txq->txq_descs[nexttx].wtx_fields.wtxu_status
8242 = 0;
8243 txq->txq_descs[nexttx].wtx_fields.wtxu_options
8244 = cksumfields;
8245 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8246 lasttx = nexttx;
8247
8248 DPRINTF(sc, WM_DEBUG_TX,
8249 ("%s: TX: desc %d: low %#" PRIx64 ", "
8250 "len %#04zx\n",
8251 device_xname(sc->sc_dev), nexttx,
8252 (uint64_t)curaddr, curlen));
8253 }
8254 }
8255
8256 KASSERT(lasttx != -1);
8257
8258 /*
8259 * Set up the command byte on the last descriptor of
8260 * the packet. If we're in the interrupt delay window,
8261 * delay the interrupt.
8262 */
8263 txq->txq_descs[lasttx].wtx_cmdlen |=
8264 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8265
8266 /*
8267 * If VLANs are enabled and the packet has a VLAN tag, set
8268 * up the descriptor to encapsulate the packet for us.
8269 *
8270 * This is only valid on the last descriptor of the packet.
8271 */
8272 if (vlan_has_tag(m0)) {
8273 txq->txq_descs[lasttx].wtx_cmdlen |=
8274 htole32(WTX_CMD_VLE);
8275 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8276 = htole16(vlan_get_tag(m0));
8277 }
8278
8279 txs->txs_lastdesc = lasttx;
8280
8281 DPRINTF(sc, WM_DEBUG_TX,
8282 ("%s: TX: desc %d: cmdlen 0x%08x\n",
8283 device_xname(sc->sc_dev),
8284 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8285
8286 /* Sync the descriptors we're using. */
8287 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8288 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8289
8290 /* Give the packet to the chip. */
8291 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8292
8293 DPRINTF(sc, WM_DEBUG_TX,
8294 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8295
8296 DPRINTF(sc, WM_DEBUG_TX,
8297 ("%s: TX: finished transmitting packet, job %d\n",
8298 device_xname(sc->sc_dev), txq->txq_snext));
8299
8300 /* Advance the tx pointer. */
8301 txq->txq_free -= txs->txs_ndesc;
8302 txq->txq_next = nexttx;
8303
8304 txq->txq_sfree--;
8305 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8306
8307 /* Pass the packet to any BPF listeners. */
8308 bpf_mtap(ifp, m0, BPF_D_OUT);
8309 }
8310
8311 if (m0 != NULL) {
8312 txq->txq_flags |= WM_TXQ_NO_SPACE;
8313 WM_Q_EVCNT_INCR(txq, descdrop);
8314 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8315 __func__));
8316 m_freem(m0);
8317 }
8318
8319 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8320 /* No more slots; notify upper layer. */
8321 txq->txq_flags |= WM_TXQ_NO_SPACE;
8322 }
8323
8324 if (txq->txq_free != ofree) {
8325 /* Set a watchdog timer in case the chip flakes out. */
8326 txq->txq_lastsent = time_uptime;
8327 txq->txq_sending = true;
8328 }
8329 }
8330
8331 /*
8332 * wm_nq_tx_offload:
8333 *
8334 * Set up TCP/IP checksumming parameters for the
8335 * specified packet, for NEWQUEUE devices
8336 */
8337 static void
8338 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8339 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8340 {
8341 struct mbuf *m0 = txs->txs_mbuf;
8342 uint32_t vl_len, mssidx, cmdc;
8343 struct ether_header *eh;
8344 int offset, iphl;
8345
8346 /*
8347 * XXX It would be nice if the mbuf pkthdr had offset
8348 * fields for the protocol headers.
8349 */
8350 *cmdlenp = 0;
8351 *fieldsp = 0;
8352
8353 eh = mtod(m0, struct ether_header *);
8354 switch (htons(eh->ether_type)) {
8355 case ETHERTYPE_IP:
8356 case ETHERTYPE_IPV6:
8357 offset = ETHER_HDR_LEN;
8358 break;
8359
8360 case ETHERTYPE_VLAN:
8361 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8362 break;
8363
8364 default:
8365 /* Don't support this protocol or encapsulation. */
8366 *do_csum = false;
8367 return;
8368 }
8369 *do_csum = true;
8370 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8371 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8372
8373 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8374 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8375
8376 if ((m0->m_pkthdr.csum_flags &
8377 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8378 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8379 } else {
8380 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8381 }
8382 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8383 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8384
8385 if (vlan_has_tag(m0)) {
8386 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8387 << NQTXC_VLLEN_VLAN_SHIFT);
8388 *cmdlenp |= NQTX_CMD_VLE;
8389 }
8390
8391 mssidx = 0;
8392
8393 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8394 int hlen = offset + iphl;
8395 int tcp_hlen;
8396 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8397
8398 if (__predict_false(m0->m_len <
8399 (hlen + sizeof(struct tcphdr)))) {
8400 /*
8401 * TCP/IP headers are not in the first mbuf; we need
8402 * to do this the slow and painful way. Let's just
8403 * hope this doesn't happen very often.
8404 */
8405 struct tcphdr th;
8406
8407 WM_Q_EVCNT_INCR(txq, tsopain);
8408
8409 m_copydata(m0, hlen, sizeof(th), &th);
8410 if (v4) {
8411 struct ip ip;
8412
8413 m_copydata(m0, offset, sizeof(ip), &ip);
8414 ip.ip_len = 0;
8415 m_copyback(m0,
8416 offset + offsetof(struct ip, ip_len),
8417 sizeof(ip.ip_len), &ip.ip_len);
8418 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8419 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8420 } else {
8421 struct ip6_hdr ip6;
8422
8423 m_copydata(m0, offset, sizeof(ip6), &ip6);
8424 ip6.ip6_plen = 0;
8425 m_copyback(m0,
8426 offset + offsetof(struct ip6_hdr, ip6_plen),
8427 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8428 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8429 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8430 }
8431 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8432 sizeof(th.th_sum), &th.th_sum);
8433
8434 tcp_hlen = th.th_off << 2;
8435 } else {
8436 /*
8437 * TCP/IP headers are in the first mbuf; we can do
8438 * this the easy way.
8439 */
8440 struct tcphdr *th;
8441
8442 if (v4) {
8443 struct ip *ip =
8444 (void *)(mtod(m0, char *) + offset);
8445 th = (void *)(mtod(m0, char *) + hlen);
8446
8447 ip->ip_len = 0;
8448 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8449 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8450 } else {
8451 struct ip6_hdr *ip6 =
8452 (void *)(mtod(m0, char *) + offset);
8453 th = (void *)(mtod(m0, char *) + hlen);
8454
8455 ip6->ip6_plen = 0;
8456 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8457 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8458 }
8459 tcp_hlen = th->th_off << 2;
8460 }
8461 hlen += tcp_hlen;
8462 *cmdlenp |= NQTX_CMD_TSE;
8463
8464 if (v4) {
8465 WM_Q_EVCNT_INCR(txq, tso);
8466 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8467 } else {
8468 WM_Q_EVCNT_INCR(txq, tso6);
8469 *fieldsp |= NQTXD_FIELDS_TUXSM;
8470 }
8471 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8472 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8473 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8474 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8475 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8476 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8477 } else {
8478 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8479 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8480 }
8481
8482 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8483 *fieldsp |= NQTXD_FIELDS_IXSM;
8484 cmdc |= NQTXC_CMD_IP4;
8485 }
8486
8487 if (m0->m_pkthdr.csum_flags &
8488 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8489 WM_Q_EVCNT_INCR(txq, tusum);
8490 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8491 cmdc |= NQTXC_CMD_TCP;
8492 else
8493 cmdc |= NQTXC_CMD_UDP;
8494
8495 cmdc |= NQTXC_CMD_IP4;
8496 *fieldsp |= NQTXD_FIELDS_TUXSM;
8497 }
8498 if (m0->m_pkthdr.csum_flags &
8499 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8500 WM_Q_EVCNT_INCR(txq, tusum6);
8501 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8502 cmdc |= NQTXC_CMD_TCP;
8503 else
8504 cmdc |= NQTXC_CMD_UDP;
8505
8506 cmdc |= NQTXC_CMD_IP6;
8507 *fieldsp |= NQTXD_FIELDS_TUXSM;
8508 }
8509
8510 /*
8511 * We don't have to write context descriptor for every packet to
8512 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8513 * I210 and I211. It is enough to write once per a Tx queue for these
8514 * controllers.
8515 * It would be overhead to write context descriptor for every packet,
8516 * however it does not cause problems.
8517 */
8518 /* Fill in the context descriptor. */
8519 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8520 htole32(vl_len);
8521 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8522 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8523 htole32(cmdc);
8524 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8525 htole32(mssidx);
8526 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8527 DPRINTF(sc, WM_DEBUG_TX,
8528 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8529 txq->txq_next, 0, vl_len));
8530 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8531 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8532 txs->txs_ndesc++;
8533 }
8534
8535 /*
8536 * wm_nq_start: [ifnet interface function]
8537 *
8538 * Start packet transmission on the interface for NEWQUEUE devices
8539 */
8540 static void
8541 wm_nq_start(struct ifnet *ifp)
8542 {
8543 struct wm_softc *sc = ifp->if_softc;
8544 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8545
8546 #ifdef WM_MPSAFE
8547 KASSERT(if_is_mpsafe(ifp));
8548 #endif
8549 /*
8550 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8551 */
8552
8553 mutex_enter(txq->txq_lock);
8554 if (!txq->txq_stopping)
8555 wm_nq_start_locked(ifp);
8556 mutex_exit(txq->txq_lock);
8557 }
8558
8559 static void
8560 wm_nq_start_locked(struct ifnet *ifp)
8561 {
8562 struct wm_softc *sc = ifp->if_softc;
8563 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8564
8565 wm_nq_send_common_locked(ifp, txq, false);
8566 }
8567
8568 static int
8569 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8570 {
8571 int qid;
8572 struct wm_softc *sc = ifp->if_softc;
8573 struct wm_txqueue *txq;
8574
8575 qid = wm_select_txqueue(ifp, m);
8576 txq = &sc->sc_queue[qid].wmq_txq;
8577
8578 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8579 m_freem(m);
8580 WM_Q_EVCNT_INCR(txq, pcqdrop);
8581 return ENOBUFS;
8582 }
8583
8584 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8585 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8586 if (m->m_flags & M_MCAST)
8587 if_statinc_ref(nsr, if_omcasts);
8588 IF_STAT_PUTREF(ifp);
8589
8590 /*
8591 * The situations which this mutex_tryenter() fails at running time
8592 * are below two patterns.
8593 * (1) contention with interrupt handler(wm_txrxintr_msix())
8594 * (2) contention with deferred if_start softint(wm_handle_queue())
8595 * In the case of (1), the last packet enqueued to txq->txq_interq is
8596 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8597 * In the case of (2), the last packet enqueued to txq->txq_interq is
8598 * also dequeued by wm_deferred_start_locked(). So, it does not get
8599 * stuck, either.
8600 */
8601 if (mutex_tryenter(txq->txq_lock)) {
8602 if (!txq->txq_stopping)
8603 wm_nq_transmit_locked(ifp, txq);
8604 mutex_exit(txq->txq_lock);
8605 }
8606
8607 return 0;
8608 }
8609
8610 static void
8611 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8612 {
8613
8614 wm_nq_send_common_locked(ifp, txq, true);
8615 }
8616
8617 static void
8618 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8619 bool is_transmit)
8620 {
8621 struct wm_softc *sc = ifp->if_softc;
8622 struct mbuf *m0;
8623 struct wm_txsoft *txs;
8624 bus_dmamap_t dmamap;
8625 int error, nexttx, lasttx = -1, seg, segs_needed;
8626 bool do_csum, sent;
8627 bool remap = true;
8628
8629 KASSERT(mutex_owned(txq->txq_lock));
8630
8631 if ((ifp->if_flags & IFF_RUNNING) == 0)
8632 return;
8633 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8634 return;
8635
8636 if (__predict_false(wm_linkdown_discard(txq))) {
8637 do {
8638 if (is_transmit)
8639 m0 = pcq_get(txq->txq_interq);
8640 else
8641 IFQ_DEQUEUE(&ifp->if_snd, m0);
8642 /*
8643 * increment successed packet counter as in the case
8644 * which the packet is discarded by link down PHY.
8645 */
8646 if (m0 != NULL) {
8647 if_statinc(ifp, if_opackets);
8648 m_freem(m0);
8649 }
8650 } while (m0 != NULL);
8651 return;
8652 }
8653
8654 sent = false;
8655
8656 /*
8657 * Loop through the send queue, setting up transmit descriptors
8658 * until we drain the queue, or use up all available transmit
8659 * descriptors.
8660 */
8661 for (;;) {
8662 m0 = NULL;
8663
8664 /* Get a work queue entry. */
8665 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8666 wm_txeof(txq, UINT_MAX);
8667 if (txq->txq_sfree == 0) {
8668 DPRINTF(sc, WM_DEBUG_TX,
8669 ("%s: TX: no free job descriptors\n",
8670 device_xname(sc->sc_dev)));
8671 WM_Q_EVCNT_INCR(txq, txsstall);
8672 break;
8673 }
8674 }
8675
8676 /* Grab a packet off the queue. */
8677 if (is_transmit)
8678 m0 = pcq_get(txq->txq_interq);
8679 else
8680 IFQ_DEQUEUE(&ifp->if_snd, m0);
8681 if (m0 == NULL)
8682 break;
8683
8684 DPRINTF(sc, WM_DEBUG_TX,
8685 ("%s: TX: have packet to transmit: %p\n",
8686 device_xname(sc->sc_dev), m0));
8687
8688 txs = &txq->txq_soft[txq->txq_snext];
8689 dmamap = txs->txs_dmamap;
8690
8691 /*
8692 * Load the DMA map. If this fails, the packet either
8693 * didn't fit in the allotted number of segments, or we
8694 * were short on resources. For the too-many-segments
8695 * case, we simply report an error and drop the packet,
8696 * since we can't sanely copy a jumbo packet to a single
8697 * buffer.
8698 */
8699 retry:
8700 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8701 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8702 if (__predict_false(error)) {
8703 if (error == EFBIG) {
8704 if (remap == true) {
8705 struct mbuf *m;
8706
8707 remap = false;
8708 m = m_defrag(m0, M_NOWAIT);
8709 if (m != NULL) {
8710 WM_Q_EVCNT_INCR(txq, defrag);
8711 m0 = m;
8712 goto retry;
8713 }
8714 }
8715 WM_Q_EVCNT_INCR(txq, toomanyseg);
8716 log(LOG_ERR, "%s: Tx packet consumes too many "
8717 "DMA segments, dropping...\n",
8718 device_xname(sc->sc_dev));
8719 wm_dump_mbuf_chain(sc, m0);
8720 m_freem(m0);
8721 continue;
8722 }
8723 /* Short on resources, just stop for now. */
8724 DPRINTF(sc, WM_DEBUG_TX,
8725 ("%s: TX: dmamap load failed: %d\n",
8726 device_xname(sc->sc_dev), error));
8727 break;
8728 }
8729
8730 segs_needed = dmamap->dm_nsegs;
8731
8732 /*
8733 * Ensure we have enough descriptors free to describe
8734 * the packet. Note, we always reserve one descriptor
8735 * at the end of the ring due to the semantics of the
8736 * TDT register, plus one more in the event we need
8737 * to load offload context.
8738 */
8739 if (segs_needed > txq->txq_free - 2) {
8740 /*
8741 * Not enough free descriptors to transmit this
8742 * packet. We haven't committed anything yet,
8743 * so just unload the DMA map, put the packet
8744 * pack on the queue, and punt. Notify the upper
8745 * layer that there are no more slots left.
8746 */
8747 DPRINTF(sc, WM_DEBUG_TX,
8748 ("%s: TX: need %d (%d) descriptors, have %d\n",
8749 device_xname(sc->sc_dev), dmamap->dm_nsegs,
8750 segs_needed, txq->txq_free - 1));
8751 txq->txq_flags |= WM_TXQ_NO_SPACE;
8752 bus_dmamap_unload(sc->sc_dmat, dmamap);
8753 WM_Q_EVCNT_INCR(txq, txdstall);
8754 break;
8755 }
8756
8757 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8758
8759 DPRINTF(sc, WM_DEBUG_TX,
8760 ("%s: TX: packet has %d (%d) DMA segments\n",
8761 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8762
8763 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8764
8765 /*
8766 * Store a pointer to the packet so that we can free it
8767 * later.
8768 *
8769 * Initially, we consider the number of descriptors the
8770 * packet uses the number of DMA segments. This may be
8771 * incremented by 1 if we do checksum offload (a descriptor
8772 * is used to set the checksum context).
8773 */
8774 txs->txs_mbuf = m0;
8775 txs->txs_firstdesc = txq->txq_next;
8776 txs->txs_ndesc = segs_needed;
8777
8778 /* Set up offload parameters for this packet. */
8779 uint32_t cmdlen, fields, dcmdlen;
8780 if (m0->m_pkthdr.csum_flags &
8781 (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8782 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8783 M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8784 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8785 &do_csum);
8786 } else {
8787 do_csum = false;
8788 cmdlen = 0;
8789 fields = 0;
8790 }
8791
8792 /* Sync the DMA map. */
8793 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8794 BUS_DMASYNC_PREWRITE);
8795
8796 /* Initialize the first transmit descriptor. */
8797 nexttx = txq->txq_next;
8798 if (!do_csum) {
8799 /* Set up a legacy descriptor */
8800 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8801 dmamap->dm_segs[0].ds_addr);
8802 txq->txq_descs[nexttx].wtx_cmdlen =
8803 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8804 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8805 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8806 if (vlan_has_tag(m0)) {
8807 txq->txq_descs[nexttx].wtx_cmdlen |=
8808 htole32(WTX_CMD_VLE);
8809 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8810 htole16(vlan_get_tag(m0));
8811 } else
8812 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8813
8814 dcmdlen = 0;
8815 } else {
8816 /* Set up an advanced data descriptor */
8817 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8818 htole64(dmamap->dm_segs[0].ds_addr);
8819 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8820 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8821 htole32(dmamap->dm_segs[0].ds_len | cmdlen);
8822 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8823 htole32(fields);
8824 DPRINTF(sc, WM_DEBUG_TX,
8825 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8826 device_xname(sc->sc_dev), nexttx,
8827 (uint64_t)dmamap->dm_segs[0].ds_addr));
8828 DPRINTF(sc, WM_DEBUG_TX,
8829 ("\t 0x%08x%08x\n", fields,
8830 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8831 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8832 }
8833
8834 lasttx = nexttx;
8835 nexttx = WM_NEXTTX(txq, nexttx);
8836 /*
8837 * Fill in the next descriptors. Legacy or advanced format
8838 * is the same here.
8839 */
8840 for (seg = 1; seg < dmamap->dm_nsegs;
8841 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8842 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8843 htole64(dmamap->dm_segs[seg].ds_addr);
8844 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8845 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8846 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8847 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8848 lasttx = nexttx;
8849
8850 DPRINTF(sc, WM_DEBUG_TX,
8851 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8852 device_xname(sc->sc_dev), nexttx,
8853 (uint64_t)dmamap->dm_segs[seg].ds_addr,
8854 dmamap->dm_segs[seg].ds_len));
8855 }
8856
8857 KASSERT(lasttx != -1);
8858
8859 /*
8860 * Set up the command byte on the last descriptor of
8861 * the packet. If we're in the interrupt delay window,
8862 * delay the interrupt.
8863 */
8864 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8865 (NQTX_CMD_EOP | NQTX_CMD_RS));
8866 txq->txq_descs[lasttx].wtx_cmdlen |=
8867 htole32(WTX_CMD_EOP | WTX_CMD_RS);
8868
8869 txs->txs_lastdesc = lasttx;
8870
8871 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8872 device_xname(sc->sc_dev),
8873 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8874
8875 /* Sync the descriptors we're using. */
8876 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8877 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8878
8879 /* Give the packet to the chip. */
8880 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8881 sent = true;
8882
8883 DPRINTF(sc, WM_DEBUG_TX,
8884 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8885
8886 DPRINTF(sc, WM_DEBUG_TX,
8887 ("%s: TX: finished transmitting packet, job %d\n",
8888 device_xname(sc->sc_dev), txq->txq_snext));
8889
8890 /* Advance the tx pointer. */
8891 txq->txq_free -= txs->txs_ndesc;
8892 txq->txq_next = nexttx;
8893
8894 txq->txq_sfree--;
8895 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8896
8897 /* Pass the packet to any BPF listeners. */
8898 bpf_mtap(ifp, m0, BPF_D_OUT);
8899 }
8900
8901 if (m0 != NULL) {
8902 txq->txq_flags |= WM_TXQ_NO_SPACE;
8903 WM_Q_EVCNT_INCR(txq, descdrop);
8904 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8905 __func__));
8906 m_freem(m0);
8907 }
8908
8909 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8910 /* No more slots; notify upper layer. */
8911 txq->txq_flags |= WM_TXQ_NO_SPACE;
8912 }
8913
8914 if (sent) {
8915 /* Set a watchdog timer in case the chip flakes out. */
8916 txq->txq_lastsent = time_uptime;
8917 txq->txq_sending = true;
8918 }
8919 }
8920
8921 static void
8922 wm_deferred_start_locked(struct wm_txqueue *txq)
8923 {
8924 struct wm_softc *sc = txq->txq_sc;
8925 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8926 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8927 int qid = wmq->wmq_id;
8928
8929 KASSERT(mutex_owned(txq->txq_lock));
8930 KASSERT(!txq->txq_stopping);
8931
8932 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8933 /* XXX need for ALTQ or one CPU system */
8934 if (qid == 0)
8935 wm_nq_start_locked(ifp);
8936 wm_nq_transmit_locked(ifp, txq);
8937 } else {
8938 /* XXX need for ALTQ or one CPU system */
8939 if (qid == 0)
8940 wm_start_locked(ifp);
8941 wm_transmit_locked(ifp, txq);
8942 }
8943 }
8944
8945 /* Interrupt */
8946
8947 /*
8948 * wm_txeof:
8949 *
8950 * Helper; handle transmit interrupts.
8951 */
8952 static bool
8953 wm_txeof(struct wm_txqueue *txq, u_int limit)
8954 {
8955 struct wm_softc *sc = txq->txq_sc;
8956 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8957 struct wm_txsoft *txs;
8958 int count = 0;
8959 int i;
8960 uint8_t status;
8961 bool more = false;
8962
8963 KASSERT(mutex_owned(txq->txq_lock));
8964
8965 if (txq->txq_stopping)
8966 return false;
8967
8968 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8969
8970 /*
8971 * Go through the Tx list and free mbufs for those
8972 * frames which have been transmitted.
8973 */
8974 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8975 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8976 txs = &txq->txq_soft[i];
8977
8978 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8979 device_xname(sc->sc_dev), i));
8980
8981 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8982 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8983
8984 status =
8985 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8986 if ((status & WTX_ST_DD) == 0) {
8987 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8988 BUS_DMASYNC_PREREAD);
8989 break;
8990 }
8991
8992 if (limit-- == 0) {
8993 more = true;
8994 DPRINTF(sc, WM_DEBUG_TX,
8995 ("%s: TX: loop limited, job %d is not processed\n",
8996 device_xname(sc->sc_dev), i));
8997 break;
8998 }
8999
9000 count++;
9001 DPRINTF(sc, WM_DEBUG_TX,
9002 ("%s: TX: job %d done: descs %d..%d\n",
9003 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9004 txs->txs_lastdesc));
9005
9006 /*
9007 * XXX We should probably be using the statistics
9008 * XXX registers, but I don't know if they exist
9009 * XXX on chips before the i82544.
9010 */
9011
9012 #ifdef WM_EVENT_COUNTERS
9013 if (status & WTX_ST_TU)
9014 WM_Q_EVCNT_INCR(txq, underrun);
9015 #endif /* WM_EVENT_COUNTERS */
9016
9017 /*
9018 * 82574 and newer's document says the status field has neither
9019 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9020 * (reserved). Refer "PCIe GbE Controller Open Source Software
9021 * Developer's Manual", 82574 datasheet and newer.
9022 *
9023 * XXX I saw the LC bit was set on I218 even though the media
9024 * was full duplex, so the bit might be used for other
9025 * meaning ...(I have no document).
9026 */
9027
9028 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9029 && ((sc->sc_type < WM_T_82574)
9030 || (sc->sc_type == WM_T_80003))) {
9031 if_statinc(ifp, if_oerrors);
9032 if (status & WTX_ST_LC)
9033 log(LOG_WARNING, "%s: late collision\n",
9034 device_xname(sc->sc_dev));
9035 else if (status & WTX_ST_EC) {
9036 if_statadd(ifp, if_collisions,
9037 TX_COLLISION_THRESHOLD + 1);
9038 log(LOG_WARNING, "%s: excessive collisions\n",
9039 device_xname(sc->sc_dev));
9040 }
9041 } else
9042 if_statinc(ifp, if_opackets);
9043
9044 txq->txq_packets++;
9045 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9046
9047 txq->txq_free += txs->txs_ndesc;
9048 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9049 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9050 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9051 m_freem(txs->txs_mbuf);
9052 txs->txs_mbuf = NULL;
9053 }
9054
9055 /* Update the dirty transmit buffer pointer. */
9056 txq->txq_sdirty = i;
9057 DPRINTF(sc, WM_DEBUG_TX,
9058 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9059
9060 if (count != 0)
9061 rnd_add_uint32(&sc->rnd_source, count);
9062
9063 /*
9064 * If there are no more pending transmissions, cancel the watchdog
9065 * timer.
9066 */
9067 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9068 txq->txq_sending = false;
9069
9070 return more;
9071 }
9072
9073 static inline uint32_t
9074 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9075 {
9076 struct wm_softc *sc = rxq->rxq_sc;
9077
9078 if (sc->sc_type == WM_T_82574)
9079 return EXTRXC_STATUS(
9080 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9081 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9082 return NQRXC_STATUS(
9083 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9084 else
9085 return rxq->rxq_descs[idx].wrx_status;
9086 }
9087
9088 static inline uint32_t
9089 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9090 {
9091 struct wm_softc *sc = rxq->rxq_sc;
9092
9093 if (sc->sc_type == WM_T_82574)
9094 return EXTRXC_ERROR(
9095 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9096 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9097 return NQRXC_ERROR(
9098 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9099 else
9100 return rxq->rxq_descs[idx].wrx_errors;
9101 }
9102
9103 static inline uint16_t
9104 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9105 {
9106 struct wm_softc *sc = rxq->rxq_sc;
9107
9108 if (sc->sc_type == WM_T_82574)
9109 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9110 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9111 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9112 else
9113 return rxq->rxq_descs[idx].wrx_special;
9114 }
9115
9116 static inline int
9117 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9118 {
9119 struct wm_softc *sc = rxq->rxq_sc;
9120
9121 if (sc->sc_type == WM_T_82574)
9122 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9123 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9124 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9125 else
9126 return rxq->rxq_descs[idx].wrx_len;
9127 }
9128
9129 #ifdef WM_DEBUG
9130 static inline uint32_t
9131 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9132 {
9133 struct wm_softc *sc = rxq->rxq_sc;
9134
9135 if (sc->sc_type == WM_T_82574)
9136 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9137 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9138 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9139 else
9140 return 0;
9141 }
9142
9143 static inline uint8_t
9144 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9145 {
9146 struct wm_softc *sc = rxq->rxq_sc;
9147
9148 if (sc->sc_type == WM_T_82574)
9149 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9150 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9151 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9152 else
9153 return 0;
9154 }
9155 #endif /* WM_DEBUG */
9156
9157 static inline bool
9158 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9159 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9160 {
9161
9162 if (sc->sc_type == WM_T_82574)
9163 return (status & ext_bit) != 0;
9164 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9165 return (status & nq_bit) != 0;
9166 else
9167 return (status & legacy_bit) != 0;
9168 }
9169
9170 static inline bool
9171 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9172 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9173 {
9174
9175 if (sc->sc_type == WM_T_82574)
9176 return (error & ext_bit) != 0;
9177 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9178 return (error & nq_bit) != 0;
9179 else
9180 return (error & legacy_bit) != 0;
9181 }
9182
9183 static inline bool
9184 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9185 {
9186
9187 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9188 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9189 return true;
9190 else
9191 return false;
9192 }
9193
9194 static inline bool
9195 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9196 {
9197 struct wm_softc *sc = rxq->rxq_sc;
9198
9199 /* XXX missing error bit for newqueue? */
9200 if (wm_rxdesc_is_set_error(sc, errors,
9201 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9202 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9203 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9204 NQRXC_ERROR_RXE)) {
9205 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9206 EXTRXC_ERROR_SE, 0))
9207 log(LOG_WARNING, "%s: symbol error\n",
9208 device_xname(sc->sc_dev));
9209 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9210 EXTRXC_ERROR_SEQ, 0))
9211 log(LOG_WARNING, "%s: receive sequence error\n",
9212 device_xname(sc->sc_dev));
9213 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9214 EXTRXC_ERROR_CE, 0))
9215 log(LOG_WARNING, "%s: CRC error\n",
9216 device_xname(sc->sc_dev));
9217 return true;
9218 }
9219
9220 return false;
9221 }
9222
9223 static inline bool
9224 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9225 {
9226 struct wm_softc *sc = rxq->rxq_sc;
9227
9228 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9229 NQRXC_STATUS_DD)) {
9230 /* We have processed all of the receive descriptors. */
9231 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9232 return false;
9233 }
9234
9235 return true;
9236 }
9237
9238 static inline bool
9239 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9240 uint16_t vlantag, struct mbuf *m)
9241 {
9242
9243 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9244 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9245 vlan_set_tag(m, le16toh(vlantag));
9246 }
9247
9248 return true;
9249 }
9250
9251 static inline void
9252 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9253 uint32_t errors, struct mbuf *m)
9254 {
9255 struct wm_softc *sc = rxq->rxq_sc;
9256
9257 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9258 if (wm_rxdesc_is_set_status(sc, status,
9259 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9260 WM_Q_EVCNT_INCR(rxq, ipsum);
9261 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9262 if (wm_rxdesc_is_set_error(sc, errors,
9263 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9264 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9265 }
9266 if (wm_rxdesc_is_set_status(sc, status,
9267 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9268 /*
9269 * Note: we don't know if this was TCP or UDP,
9270 * so we just set both bits, and expect the
9271 * upper layers to deal.
9272 */
9273 WM_Q_EVCNT_INCR(rxq, tusum);
9274 m->m_pkthdr.csum_flags |=
9275 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9276 M_CSUM_TCPv6 | M_CSUM_UDPv6;
9277 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9278 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9279 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9280 }
9281 }
9282 }
9283
9284 /*
9285 * wm_rxeof:
9286 *
9287 * Helper; handle receive interrupts.
9288 */
9289 static bool
9290 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9291 {
9292 struct wm_softc *sc = rxq->rxq_sc;
9293 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9294 struct wm_rxsoft *rxs;
9295 struct mbuf *m;
9296 int i, len;
9297 int count = 0;
9298 uint32_t status, errors;
9299 uint16_t vlantag;
9300 bool more = false;
9301
9302 KASSERT(mutex_owned(rxq->rxq_lock));
9303
9304 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9305 rxs = &rxq->rxq_soft[i];
9306
9307 DPRINTF(sc, WM_DEBUG_RX,
9308 ("%s: RX: checking descriptor %d\n",
9309 device_xname(sc->sc_dev), i));
9310 wm_cdrxsync(rxq, i,
9311 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9312
9313 status = wm_rxdesc_get_status(rxq, i);
9314 errors = wm_rxdesc_get_errors(rxq, i);
9315 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9316 vlantag = wm_rxdesc_get_vlantag(rxq, i);
9317 #ifdef WM_DEBUG
9318 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9319 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9320 #endif
9321
9322 if (!wm_rxdesc_dd(rxq, i, status))
9323 break;
9324
9325 if (limit-- == 0) {
9326 more = true;
9327 DPRINTF(sc, WM_DEBUG_RX,
9328 ("%s: RX: loop limited, descriptor %d is not processed\n",
9329 device_xname(sc->sc_dev), i));
9330 break;
9331 }
9332
9333 count++;
9334 if (__predict_false(rxq->rxq_discard)) {
9335 DPRINTF(sc, WM_DEBUG_RX,
9336 ("%s: RX: discarding contents of descriptor %d\n",
9337 device_xname(sc->sc_dev), i));
9338 wm_init_rxdesc(rxq, i);
9339 if (wm_rxdesc_is_eop(rxq, status)) {
9340 /* Reset our state. */
9341 DPRINTF(sc, WM_DEBUG_RX,
9342 ("%s: RX: resetting rxdiscard -> 0\n",
9343 device_xname(sc->sc_dev)));
9344 rxq->rxq_discard = 0;
9345 }
9346 continue;
9347 }
9348
9349 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9350 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9351
9352 m = rxs->rxs_mbuf;
9353
9354 /*
9355 * Add a new receive buffer to the ring, unless of
9356 * course the length is zero. Treat the latter as a
9357 * failed mapping.
9358 */
9359 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9360 /*
9361 * Failed, throw away what we've done so
9362 * far, and discard the rest of the packet.
9363 */
9364 if_statinc(ifp, if_ierrors);
9365 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9366 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9367 wm_init_rxdesc(rxq, i);
9368 if (!wm_rxdesc_is_eop(rxq, status))
9369 rxq->rxq_discard = 1;
9370 if (rxq->rxq_head != NULL)
9371 m_freem(rxq->rxq_head);
9372 WM_RXCHAIN_RESET(rxq);
9373 DPRINTF(sc, WM_DEBUG_RX,
9374 ("%s: RX: Rx buffer allocation failed, "
9375 "dropping packet%s\n", device_xname(sc->sc_dev),
9376 rxq->rxq_discard ? " (discard)" : ""));
9377 continue;
9378 }
9379
9380 m->m_len = len;
9381 rxq->rxq_len += len;
9382 DPRINTF(sc, WM_DEBUG_RX,
9383 ("%s: RX: buffer at %p len %d\n",
9384 device_xname(sc->sc_dev), m->m_data, len));
9385
9386 /* If this is not the end of the packet, keep looking. */
9387 if (!wm_rxdesc_is_eop(rxq, status)) {
9388 WM_RXCHAIN_LINK(rxq, m);
9389 DPRINTF(sc, WM_DEBUG_RX,
9390 ("%s: RX: not yet EOP, rxlen -> %d\n",
9391 device_xname(sc->sc_dev), rxq->rxq_len));
9392 continue;
9393 }
9394
9395 /*
9396 * Okay, we have the entire packet now. The chip is
9397 * configured to include the FCS except I35[04], I21[01].
9398 * (not all chips can be configured to strip it), so we need
9399 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9400 * in RCTL register is always set, so we don't trim it.
9401 * PCH2 and newer chip also not include FCS when jumbo
9402 * frame is used to do workaround an errata.
9403 * May need to adjust length of previous mbuf in the
9404 * chain if the current mbuf is too short.
9405 */
9406 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9407 if (m->m_len < ETHER_CRC_LEN) {
9408 rxq->rxq_tail->m_len
9409 -= (ETHER_CRC_LEN - m->m_len);
9410 m->m_len = 0;
9411 } else
9412 m->m_len -= ETHER_CRC_LEN;
9413 len = rxq->rxq_len - ETHER_CRC_LEN;
9414 } else
9415 len = rxq->rxq_len;
9416
9417 WM_RXCHAIN_LINK(rxq, m);
9418
9419 *rxq->rxq_tailp = NULL;
9420 m = rxq->rxq_head;
9421
9422 WM_RXCHAIN_RESET(rxq);
9423
9424 DPRINTF(sc, WM_DEBUG_RX,
9425 ("%s: RX: have entire packet, len -> %d\n",
9426 device_xname(sc->sc_dev), len));
9427
9428 /* If an error occurred, update stats and drop the packet. */
9429 if (wm_rxdesc_has_errors(rxq, errors)) {
9430 m_freem(m);
9431 continue;
9432 }
9433
9434 /* No errors. Receive the packet. */
9435 m_set_rcvif(m, ifp);
9436 m->m_pkthdr.len = len;
9437 /*
9438 * TODO
9439 * should be save rsshash and rsstype to this mbuf.
9440 */
9441 DPRINTF(sc, WM_DEBUG_RX,
9442 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9443 device_xname(sc->sc_dev), rsstype, rsshash));
9444
9445 /*
9446 * If VLANs are enabled, VLAN packets have been unwrapped
9447 * for us. Associate the tag with the packet.
9448 */
9449 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9450 continue;
9451
9452 /* Set up checksum info for this packet. */
9453 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9454
9455 rxq->rxq_packets++;
9456 rxq->rxq_bytes += len;
9457 /* Pass it on. */
9458 if_percpuq_enqueue(sc->sc_ipq, m);
9459
9460 if (rxq->rxq_stopping)
9461 break;
9462 }
9463 rxq->rxq_ptr = i;
9464
9465 if (count != 0)
9466 rnd_add_uint32(&sc->rnd_source, count);
9467
9468 DPRINTF(sc, WM_DEBUG_RX,
9469 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9470
9471 return more;
9472 }
9473
9474 /*
9475 * wm_linkintr_gmii:
9476 *
9477 * Helper; handle link interrupts for GMII.
9478 */
9479 static void
9480 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9481 {
9482 device_t dev = sc->sc_dev;
9483 uint32_t status, reg;
9484 bool link;
9485 int rv;
9486
9487 KASSERT(WM_CORE_LOCKED(sc));
9488
9489 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9490 __func__));
9491
9492 if ((icr & ICR_LSC) == 0) {
9493 if (icr & ICR_RXSEQ)
9494 DPRINTF(sc, WM_DEBUG_LINK,
9495 ("%s: LINK Receive sequence error\n",
9496 device_xname(dev)));
9497 return;
9498 }
9499
9500 /* Link status changed */
9501 status = CSR_READ(sc, WMREG_STATUS);
9502 link = status & STATUS_LU;
9503 if (link) {
9504 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9505 device_xname(dev),
9506 (status & STATUS_FD) ? "FDX" : "HDX"));
9507 if (wm_phy_need_linkdown_discard(sc)) {
9508 DPRINTF(sc, WM_DEBUG_LINK,
9509 ("%s: linkintr: Clear linkdown discard flag\n",
9510 device_xname(dev)));
9511 wm_clear_linkdown_discard(sc);
9512 }
9513 } else {
9514 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9515 device_xname(dev)));
9516 if (wm_phy_need_linkdown_discard(sc)) {
9517 DPRINTF(sc, WM_DEBUG_LINK,
9518 ("%s: linkintr: Set linkdown discard flag\n",
9519 device_xname(dev)));
9520 wm_set_linkdown_discard(sc);
9521 }
9522 }
9523 if ((sc->sc_type == WM_T_ICH8) && (link == false))
9524 wm_gig_downshift_workaround_ich8lan(sc);
9525
9526 if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
9527 wm_kmrn_lock_loss_workaround_ich8lan(sc);
9528
9529 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9530 device_xname(dev)));
9531 mii_pollstat(&sc->sc_mii);
9532 if (sc->sc_type == WM_T_82543) {
9533 int miistatus, active;
9534
9535 /*
9536 * With 82543, we need to force speed and
9537 * duplex on the MAC equal to what the PHY
9538 * speed and duplex configuration is.
9539 */
9540 miistatus = sc->sc_mii.mii_media_status;
9541
9542 if (miistatus & IFM_ACTIVE) {
9543 active = sc->sc_mii.mii_media_active;
9544 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9545 switch (IFM_SUBTYPE(active)) {
9546 case IFM_10_T:
9547 sc->sc_ctrl |= CTRL_SPEED_10;
9548 break;
9549 case IFM_100_TX:
9550 sc->sc_ctrl |= CTRL_SPEED_100;
9551 break;
9552 case IFM_1000_T:
9553 sc->sc_ctrl |= CTRL_SPEED_1000;
9554 break;
9555 default:
9556 /*
9557 * Fiber?
9558 * Shoud not enter here.
9559 */
9560 device_printf(dev, "unknown media (%x)\n",
9561 active);
9562 break;
9563 }
9564 if (active & IFM_FDX)
9565 sc->sc_ctrl |= CTRL_FD;
9566 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9567 }
9568 } else if (sc->sc_type == WM_T_PCH) {
9569 wm_k1_gig_workaround_hv(sc,
9570 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9571 }
9572
9573 /*
9574 * When connected at 10Mbps half-duplex, some parts are excessively
9575 * aggressive resulting in many collisions. To avoid this, increase
9576 * the IPG and reduce Rx latency in the PHY.
9577 */
9578 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9579 && link) {
9580 uint32_t tipg_reg;
9581 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9582 bool fdx;
9583 uint16_t emi_addr, emi_val;
9584
9585 tipg_reg = CSR_READ(sc, WMREG_TIPG);
9586 tipg_reg &= ~TIPG_IPGT_MASK;
9587 fdx = status & STATUS_FD;
9588
9589 if (!fdx && (speed == STATUS_SPEED_10)) {
9590 tipg_reg |= 0xff;
9591 /* Reduce Rx latency in analog PHY */
9592 emi_val = 0;
9593 } else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9594 fdx && speed != STATUS_SPEED_1000) {
9595 tipg_reg |= 0xc;
9596 emi_val = 1;
9597 } else {
9598 /* Roll back the default values */
9599 tipg_reg |= 0x08;
9600 emi_val = 1;
9601 }
9602
9603 CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9604
9605 rv = sc->phy.acquire(sc);
9606 if (rv)
9607 return;
9608
9609 if (sc->sc_type == WM_T_PCH2)
9610 emi_addr = I82579_RX_CONFIG;
9611 else
9612 emi_addr = I217_RX_CONFIG;
9613 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9614
9615 if (sc->sc_type >= WM_T_PCH_LPT) {
9616 uint16_t phy_reg;
9617
9618 sc->phy.readreg_locked(dev, 2,
9619 I217_PLL_CLOCK_GATE_REG, &phy_reg);
9620 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9621 if (speed == STATUS_SPEED_100
9622 || speed == STATUS_SPEED_10)
9623 phy_reg |= 0x3e8;
9624 else
9625 phy_reg |= 0xfa;
9626 sc->phy.writereg_locked(dev, 2,
9627 I217_PLL_CLOCK_GATE_REG, phy_reg);
9628
9629 if (speed == STATUS_SPEED_1000) {
9630 sc->phy.readreg_locked(dev, 2,
9631 HV_PM_CTRL, &phy_reg);
9632
9633 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9634
9635 sc->phy.writereg_locked(dev, 2,
9636 HV_PM_CTRL, phy_reg);
9637 }
9638 }
9639 sc->phy.release(sc);
9640
9641 if (rv)
9642 return;
9643
9644 if (sc->sc_type >= WM_T_PCH_SPT) {
9645 uint16_t data, ptr_gap;
9646
9647 if (speed == STATUS_SPEED_1000) {
9648 rv = sc->phy.acquire(sc);
9649 if (rv)
9650 return;
9651
9652 rv = sc->phy.readreg_locked(dev, 2,
9653 I82579_UNKNOWN1, &data);
9654 if (rv) {
9655 sc->phy.release(sc);
9656 return;
9657 }
9658
9659 ptr_gap = (data & (0x3ff << 2)) >> 2;
9660 if (ptr_gap < 0x18) {
9661 data &= ~(0x3ff << 2);
9662 data |= (0x18 << 2);
9663 rv = sc->phy.writereg_locked(dev,
9664 2, I82579_UNKNOWN1, data);
9665 }
9666 sc->phy.release(sc);
9667 if (rv)
9668 return;
9669 } else {
9670 rv = sc->phy.acquire(sc);
9671 if (rv)
9672 return;
9673
9674 rv = sc->phy.writereg_locked(dev, 2,
9675 I82579_UNKNOWN1, 0xc023);
9676 sc->phy.release(sc);
9677 if (rv)
9678 return;
9679
9680 }
9681 }
9682 }
9683
9684 /*
9685 * I217 Packet Loss issue:
9686 * ensure that FEXTNVM4 Beacon Duration is set correctly
9687 * on power up.
9688 * Set the Beacon Duration for I217 to 8 usec
9689 */
9690 if (sc->sc_type >= WM_T_PCH_LPT) {
9691 reg = CSR_READ(sc, WMREG_FEXTNVM4);
9692 reg &= ~FEXTNVM4_BEACON_DURATION;
9693 reg |= FEXTNVM4_BEACON_DURATION_8US;
9694 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9695 }
9696
9697 /* Work-around I218 hang issue */
9698 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9699 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9700 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9701 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9702 wm_k1_workaround_lpt_lp(sc, link);
9703
9704 if (sc->sc_type >= WM_T_PCH_LPT) {
9705 /*
9706 * Set platform power management values for Latency
9707 * Tolerance Reporting (LTR)
9708 */
9709 wm_platform_pm_pch_lpt(sc,
9710 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9711 }
9712
9713 /* Clear link partner's EEE ability */
9714 sc->eee_lp_ability = 0;
9715
9716 /* FEXTNVM6 K1-off workaround */
9717 if (sc->sc_type == WM_T_PCH_SPT) {
9718 reg = CSR_READ(sc, WMREG_FEXTNVM6);
9719 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9720 reg |= FEXTNVM6_K1_OFF_ENABLE;
9721 else
9722 reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9723 CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9724 }
9725
9726 if (!link)
9727 return;
9728
9729 switch (sc->sc_type) {
9730 case WM_T_PCH2:
9731 wm_k1_workaround_lv(sc);
9732 /* FALLTHROUGH */
9733 case WM_T_PCH:
9734 if (sc->sc_phytype == WMPHY_82578)
9735 wm_link_stall_workaround_hv(sc);
9736 break;
9737 default:
9738 break;
9739 }
9740
9741 /* Enable/Disable EEE after link up */
9742 if (sc->sc_phytype > WMPHY_82579)
9743 wm_set_eee_pchlan(sc);
9744 }
9745
9746 /*
9747 * wm_linkintr_tbi:
9748 *
9749 * Helper; handle link interrupts for TBI mode.
9750 */
9751 static void
9752 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9753 {
9754 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9755 uint32_t status;
9756
9757 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9758 __func__));
9759
9760 status = CSR_READ(sc, WMREG_STATUS);
9761 if (icr & ICR_LSC) {
9762 wm_check_for_link(sc);
9763 if (status & STATUS_LU) {
9764 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9765 device_xname(sc->sc_dev),
9766 (status & STATUS_FD) ? "FDX" : "HDX"));
9767 /*
9768 * NOTE: CTRL will update TFCE and RFCE automatically,
9769 * so we should update sc->sc_ctrl
9770 */
9771
9772 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9773 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9774 sc->sc_fcrtl &= ~FCRTL_XONE;
9775 if (status & STATUS_FD)
9776 sc->sc_tctl |=
9777 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9778 else
9779 sc->sc_tctl |=
9780 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9781 if (sc->sc_ctrl & CTRL_TFCE)
9782 sc->sc_fcrtl |= FCRTL_XONE;
9783 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9784 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9785 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9786 sc->sc_tbi_linkup = 1;
9787 if_link_state_change(ifp, LINK_STATE_UP);
9788 } else {
9789 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9790 device_xname(sc->sc_dev)));
9791 sc->sc_tbi_linkup = 0;
9792 if_link_state_change(ifp, LINK_STATE_DOWN);
9793 }
9794 /* Update LED */
9795 wm_tbi_serdes_set_linkled(sc);
9796 } else if (icr & ICR_RXSEQ)
9797 DPRINTF(sc, WM_DEBUG_LINK,
9798 ("%s: LINK: Receive sequence error\n",
9799 device_xname(sc->sc_dev)));
9800 }
9801
9802 /*
9803 * wm_linkintr_serdes:
9804 *
9805 * Helper; handle link interrupts for TBI mode.
9806 */
9807 static void
9808 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9809 {
9810 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9811 struct mii_data *mii = &sc->sc_mii;
9812 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9813 uint32_t pcs_adv, pcs_lpab, reg;
9814
9815 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9816 __func__));
9817
9818 if (icr & ICR_LSC) {
9819 /* Check PCS */
9820 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9821 if ((reg & PCS_LSTS_LINKOK) != 0) {
9822 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9823 device_xname(sc->sc_dev)));
9824 mii->mii_media_status |= IFM_ACTIVE;
9825 sc->sc_tbi_linkup = 1;
9826 if_link_state_change(ifp, LINK_STATE_UP);
9827 } else {
9828 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9829 device_xname(sc->sc_dev)));
9830 mii->mii_media_status |= IFM_NONE;
9831 sc->sc_tbi_linkup = 0;
9832 if_link_state_change(ifp, LINK_STATE_DOWN);
9833 wm_tbi_serdes_set_linkled(sc);
9834 return;
9835 }
9836 mii->mii_media_active |= IFM_1000_SX;
9837 if ((reg & PCS_LSTS_FDX) != 0)
9838 mii->mii_media_active |= IFM_FDX;
9839 else
9840 mii->mii_media_active |= IFM_HDX;
9841 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9842 /* Check flow */
9843 reg = CSR_READ(sc, WMREG_PCS_LSTS);
9844 if ((reg & PCS_LSTS_AN_COMP) == 0) {
9845 DPRINTF(sc, WM_DEBUG_LINK,
9846 ("XXX LINKOK but not ACOMP\n"));
9847 return;
9848 }
9849 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9850 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9851 DPRINTF(sc, WM_DEBUG_LINK,
9852 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9853 if ((pcs_adv & TXCW_SYM_PAUSE)
9854 && (pcs_lpab & TXCW_SYM_PAUSE)) {
9855 mii->mii_media_active |= IFM_FLOW
9856 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9857 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9858 && (pcs_adv & TXCW_ASYM_PAUSE)
9859 && (pcs_lpab & TXCW_SYM_PAUSE)
9860 && (pcs_lpab & TXCW_ASYM_PAUSE))
9861 mii->mii_media_active |= IFM_FLOW
9862 | IFM_ETH_TXPAUSE;
9863 else if ((pcs_adv & TXCW_SYM_PAUSE)
9864 && (pcs_adv & TXCW_ASYM_PAUSE)
9865 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9866 && (pcs_lpab & TXCW_ASYM_PAUSE))
9867 mii->mii_media_active |= IFM_FLOW
9868 | IFM_ETH_RXPAUSE;
9869 }
9870 /* Update LED */
9871 wm_tbi_serdes_set_linkled(sc);
9872 } else
9873 DPRINTF(sc, WM_DEBUG_LINK,
9874 ("%s: LINK: Receive sequence error\n",
9875 device_xname(sc->sc_dev)));
9876 }
9877
9878 /*
9879 * wm_linkintr:
9880 *
9881 * Helper; handle link interrupts.
9882 */
9883 static void
9884 wm_linkintr(struct wm_softc *sc, uint32_t icr)
9885 {
9886
9887 KASSERT(WM_CORE_LOCKED(sc));
9888
9889 if (sc->sc_flags & WM_F_HAS_MII)
9890 wm_linkintr_gmii(sc, icr);
9891 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9892 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9893 wm_linkintr_serdes(sc, icr);
9894 else
9895 wm_linkintr_tbi(sc, icr);
9896 }
9897
9898
9899 static inline void
9900 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
9901 {
9902
9903 if (wmq->wmq_txrx_use_workqueue)
9904 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
9905 else
9906 softint_schedule(wmq->wmq_si);
9907 }
9908
9909 static inline void
9910 wm_legacy_intr_disable(struct wm_softc *sc)
9911 {
9912
9913 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
9914 }
9915
9916 static inline void
9917 wm_legacy_intr_enable(struct wm_softc *sc)
9918 {
9919
9920 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
9921 }
9922
9923 /*
9924 * wm_intr_legacy:
9925 *
9926 * Interrupt service routine for INTx and MSI.
9927 */
9928 static int
9929 wm_intr_legacy(void *arg)
9930 {
9931 struct wm_softc *sc = arg;
9932 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9933 struct wm_queue *wmq = &sc->sc_queue[0];
9934 struct wm_txqueue *txq = &wmq->wmq_txq;
9935 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9936 u_int txlimit = sc->sc_tx_intr_process_limit;
9937 u_int rxlimit = sc->sc_rx_intr_process_limit;
9938 uint32_t icr, rndval = 0;
9939 bool more = false;
9940
9941 icr = CSR_READ(sc, WMREG_ICR);
9942 if ((icr & sc->sc_icr) == 0)
9943 return 0;
9944
9945 DPRINTF(sc, WM_DEBUG_TX,
9946 ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9947 if (rndval == 0)
9948 rndval = icr;
9949
9950 mutex_enter(txq->txq_lock);
9951
9952 if (txq->txq_stopping) {
9953 mutex_exit(txq->txq_lock);
9954 return 1;
9955 }
9956
9957 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9958 if (icr & ICR_TXDW) {
9959 DPRINTF(sc, WM_DEBUG_TX,
9960 ("%s: TX: got TXDW interrupt\n",
9961 device_xname(sc->sc_dev)));
9962 WM_Q_EVCNT_INCR(txq, txdw);
9963 }
9964 #endif
9965 if (txlimit > 0) {
9966 more |= wm_txeof(txq, txlimit);
9967 if (!IF_IS_EMPTY(&ifp->if_snd))
9968 more = true;
9969 } else
9970 more = true;
9971 mutex_exit(txq->txq_lock);
9972
9973 mutex_enter(rxq->rxq_lock);
9974
9975 if (rxq->rxq_stopping) {
9976 mutex_exit(rxq->rxq_lock);
9977 return 1;
9978 }
9979
9980 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9981 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9982 DPRINTF(sc, WM_DEBUG_RX,
9983 ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
9984 device_xname(sc->sc_dev),
9985 icr & (ICR_RXDMT0 | ICR_RXT0)));
9986 WM_Q_EVCNT_INCR(rxq, intr);
9987 }
9988 #endif
9989 if (rxlimit > 0) {
9990 /*
9991 * wm_rxeof() does *not* call upper layer functions directly,
9992 * as if_percpuq_enqueue() just call softint_schedule().
9993 * So, we can call wm_rxeof() in interrupt context.
9994 */
9995 more = wm_rxeof(rxq, rxlimit);
9996 } else
9997 more = true;
9998
9999 mutex_exit(rxq->rxq_lock);
10000
10001 WM_CORE_LOCK(sc);
10002
10003 if (sc->sc_core_stopping) {
10004 WM_CORE_UNLOCK(sc);
10005 return 1;
10006 }
10007
10008 if (icr & (ICR_LSC | ICR_RXSEQ)) {
10009 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10010 wm_linkintr(sc, icr);
10011 }
10012 if ((icr & ICR_GPI(0)) != 0)
10013 device_printf(sc->sc_dev, "got module interrupt\n");
10014
10015 WM_CORE_UNLOCK(sc);
10016
10017 if (icr & ICR_RXO) {
10018 #if defined(WM_DEBUG)
10019 log(LOG_WARNING, "%s: Receive overrun\n",
10020 device_xname(sc->sc_dev));
10021 #endif /* defined(WM_DEBUG) */
10022 }
10023
10024 rnd_add_uint32(&sc->rnd_source, rndval);
10025
10026 if (more) {
10027 /* Try to get more packets going. */
10028 wm_legacy_intr_disable(sc);
10029 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10030 wm_sched_handle_queue(sc, wmq);
10031 }
10032
10033 return 1;
10034 }
10035
10036 static inline void
10037 wm_txrxintr_disable(struct wm_queue *wmq)
10038 {
10039 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10040
10041 if (__predict_false(!wm_is_using_msix(sc))) {
10042 wm_legacy_intr_disable(sc);
10043 return;
10044 }
10045
10046 if (sc->sc_type == WM_T_82574)
10047 CSR_WRITE(sc, WMREG_IMC,
10048 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10049 else if (sc->sc_type == WM_T_82575)
10050 CSR_WRITE(sc, WMREG_EIMC,
10051 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10052 else
10053 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10054 }
10055
10056 static inline void
10057 wm_txrxintr_enable(struct wm_queue *wmq)
10058 {
10059 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10060
10061 wm_itrs_calculate(sc, wmq);
10062
10063 if (__predict_false(!wm_is_using_msix(sc))) {
10064 wm_legacy_intr_enable(sc);
10065 return;
10066 }
10067
10068 /*
10069 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10070 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10071 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10072 * while each wm_handle_queue(wmq) is runnig.
10073 */
10074 if (sc->sc_type == WM_T_82574)
10075 CSR_WRITE(sc, WMREG_IMS,
10076 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10077 else if (sc->sc_type == WM_T_82575)
10078 CSR_WRITE(sc, WMREG_EIMS,
10079 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10080 else
10081 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10082 }
10083
10084 static int
10085 wm_txrxintr_msix(void *arg)
10086 {
10087 struct wm_queue *wmq = arg;
10088 struct wm_txqueue *txq = &wmq->wmq_txq;
10089 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10090 struct wm_softc *sc = txq->txq_sc;
10091 u_int txlimit = sc->sc_tx_intr_process_limit;
10092 u_int rxlimit = sc->sc_rx_intr_process_limit;
10093 bool txmore;
10094 bool rxmore;
10095
10096 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10097
10098 DPRINTF(sc, WM_DEBUG_TX,
10099 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10100
10101 wm_txrxintr_disable(wmq);
10102
10103 mutex_enter(txq->txq_lock);
10104
10105 if (txq->txq_stopping) {
10106 mutex_exit(txq->txq_lock);
10107 return 1;
10108 }
10109
10110 WM_Q_EVCNT_INCR(txq, txdw);
10111 if (txlimit > 0) {
10112 txmore = wm_txeof(txq, txlimit);
10113 /* wm_deferred start() is done in wm_handle_queue(). */
10114 } else
10115 txmore = true;
10116 mutex_exit(txq->txq_lock);
10117
10118 DPRINTF(sc, WM_DEBUG_RX,
10119 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10120 mutex_enter(rxq->rxq_lock);
10121
10122 if (rxq->rxq_stopping) {
10123 mutex_exit(rxq->rxq_lock);
10124 return 1;
10125 }
10126
10127 WM_Q_EVCNT_INCR(rxq, intr);
10128 if (rxlimit > 0) {
10129 rxmore = wm_rxeof(rxq, rxlimit);
10130 } else
10131 rxmore = true;
10132 mutex_exit(rxq->rxq_lock);
10133
10134 wm_itrs_writereg(sc, wmq);
10135
10136 if (txmore || rxmore) {
10137 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10138 wm_sched_handle_queue(sc, wmq);
10139 } else
10140 wm_txrxintr_enable(wmq);
10141
10142 return 1;
10143 }
10144
10145 static void
10146 wm_handle_queue(void *arg)
10147 {
10148 struct wm_queue *wmq = arg;
10149 struct wm_txqueue *txq = &wmq->wmq_txq;
10150 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10151 struct wm_softc *sc = txq->txq_sc;
10152 u_int txlimit = sc->sc_tx_process_limit;
10153 u_int rxlimit = sc->sc_rx_process_limit;
10154 bool txmore;
10155 bool rxmore;
10156
10157 mutex_enter(txq->txq_lock);
10158 if (txq->txq_stopping) {
10159 mutex_exit(txq->txq_lock);
10160 return;
10161 }
10162 txmore = wm_txeof(txq, txlimit);
10163 wm_deferred_start_locked(txq);
10164 mutex_exit(txq->txq_lock);
10165
10166 mutex_enter(rxq->rxq_lock);
10167 if (rxq->rxq_stopping) {
10168 mutex_exit(rxq->rxq_lock);
10169 return;
10170 }
10171 WM_Q_EVCNT_INCR(rxq, defer);
10172 rxmore = wm_rxeof(rxq, rxlimit);
10173 mutex_exit(rxq->rxq_lock);
10174
10175 if (txmore || rxmore) {
10176 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10177 wm_sched_handle_queue(sc, wmq);
10178 } else
10179 wm_txrxintr_enable(wmq);
10180 }
10181
10182 static void
10183 wm_handle_queue_work(struct work *wk, void *context)
10184 {
10185 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10186
10187 /*
10188 * "enqueued flag" is not required here.
10189 */
10190 wm_handle_queue(wmq);
10191 }
10192
10193 /*
10194 * wm_linkintr_msix:
10195 *
10196 * Interrupt service routine for link status change for MSI-X.
10197 */
10198 static int
10199 wm_linkintr_msix(void *arg)
10200 {
10201 struct wm_softc *sc = arg;
10202 uint32_t reg;
10203 bool has_rxo;
10204
10205 reg = CSR_READ(sc, WMREG_ICR);
10206 WM_CORE_LOCK(sc);
10207 DPRINTF(sc, WM_DEBUG_LINK,
10208 ("%s: LINK: got link intr. ICR = %08x\n",
10209 device_xname(sc->sc_dev), reg));
10210
10211 if (sc->sc_core_stopping)
10212 goto out;
10213
10214 if ((reg & ICR_LSC) != 0) {
10215 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10216 wm_linkintr(sc, ICR_LSC);
10217 }
10218 if ((reg & ICR_GPI(0)) != 0)
10219 device_printf(sc->sc_dev, "got module interrupt\n");
10220
10221 /*
10222 * XXX 82574 MSI-X mode workaround
10223 *
10224 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10225 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10226 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10227 * interrupts by writing WMREG_ICS to process receive packets.
10228 */
10229 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10230 #if defined(WM_DEBUG)
10231 log(LOG_WARNING, "%s: Receive overrun\n",
10232 device_xname(sc->sc_dev));
10233 #endif /* defined(WM_DEBUG) */
10234
10235 has_rxo = true;
10236 /*
10237 * The RXO interrupt is very high rate when receive traffic is
10238 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10239 * interrupts. ICR_OTHER will be enabled at the end of
10240 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10241 * ICR_RXQ(1) interrupts.
10242 */
10243 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10244
10245 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10246 }
10247
10248
10249
10250 out:
10251 WM_CORE_UNLOCK(sc);
10252
10253 if (sc->sc_type == WM_T_82574) {
10254 if (!has_rxo)
10255 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10256 else
10257 CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10258 } else if (sc->sc_type == WM_T_82575)
10259 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10260 else
10261 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10262
10263 return 1;
10264 }
10265
10266 /*
10267 * Media related.
10268 * GMII, SGMII, TBI (and SERDES)
10269 */
10270
10271 /* Common */
10272
10273 /*
10274 * wm_tbi_serdes_set_linkled:
10275 *
10276 * Update the link LED on TBI and SERDES devices.
10277 */
10278 static void
10279 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10280 {
10281
10282 if (sc->sc_tbi_linkup)
10283 sc->sc_ctrl |= CTRL_SWDPIN(0);
10284 else
10285 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10286
10287 /* 82540 or newer devices are active low */
10288 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10289
10290 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10291 }
10292
10293 /* GMII related */
10294
10295 /*
10296 * wm_gmii_reset:
10297 *
10298 * Reset the PHY.
10299 */
10300 static void
10301 wm_gmii_reset(struct wm_softc *sc)
10302 {
10303 uint32_t reg;
10304 int rv;
10305
10306 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10307 device_xname(sc->sc_dev), __func__));
10308
10309 rv = sc->phy.acquire(sc);
10310 if (rv != 0) {
10311 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10312 __func__);
10313 return;
10314 }
10315
10316 switch (sc->sc_type) {
10317 case WM_T_82542_2_0:
10318 case WM_T_82542_2_1:
10319 /* null */
10320 break;
10321 case WM_T_82543:
10322 /*
10323 * With 82543, we need to force speed and duplex on the MAC
10324 * equal to what the PHY speed and duplex configuration is.
10325 * In addition, we need to perform a hardware reset on the PHY
10326 * to take it out of reset.
10327 */
10328 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10329 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10330
10331 /* The PHY reset pin is active-low. */
10332 reg = CSR_READ(sc, WMREG_CTRL_EXT);
10333 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10334 CTRL_EXT_SWDPIN(4));
10335 reg |= CTRL_EXT_SWDPIO(4);
10336
10337 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10338 CSR_WRITE_FLUSH(sc);
10339 delay(10*1000);
10340
10341 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10342 CSR_WRITE_FLUSH(sc);
10343 delay(150);
10344 #if 0
10345 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10346 #endif
10347 delay(20*1000); /* XXX extra delay to get PHY ID? */
10348 break;
10349 case WM_T_82544: /* Reset 10000us */
10350 case WM_T_82540:
10351 case WM_T_82545:
10352 case WM_T_82545_3:
10353 case WM_T_82546:
10354 case WM_T_82546_3:
10355 case WM_T_82541:
10356 case WM_T_82541_2:
10357 case WM_T_82547:
10358 case WM_T_82547_2:
10359 case WM_T_82571: /* Reset 100us */
10360 case WM_T_82572:
10361 case WM_T_82573:
10362 case WM_T_82574:
10363 case WM_T_82575:
10364 case WM_T_82576:
10365 case WM_T_82580:
10366 case WM_T_I350:
10367 case WM_T_I354:
10368 case WM_T_I210:
10369 case WM_T_I211:
10370 case WM_T_82583:
10371 case WM_T_80003:
10372 /* Generic reset */
10373 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10374 CSR_WRITE_FLUSH(sc);
10375 delay(20000);
10376 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10377 CSR_WRITE_FLUSH(sc);
10378 delay(20000);
10379
10380 if ((sc->sc_type == WM_T_82541)
10381 || (sc->sc_type == WM_T_82541_2)
10382 || (sc->sc_type == WM_T_82547)
10383 || (sc->sc_type == WM_T_82547_2)) {
10384 /* Workaround for igp are done in igp_reset() */
10385 /* XXX add code to set LED after phy reset */
10386 }
10387 break;
10388 case WM_T_ICH8:
10389 case WM_T_ICH9:
10390 case WM_T_ICH10:
10391 case WM_T_PCH:
10392 case WM_T_PCH2:
10393 case WM_T_PCH_LPT:
10394 case WM_T_PCH_SPT:
10395 case WM_T_PCH_CNP:
10396 /* Generic reset */
10397 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10398 CSR_WRITE_FLUSH(sc);
10399 delay(100);
10400 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10401 CSR_WRITE_FLUSH(sc);
10402 delay(150);
10403 break;
10404 default:
10405 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10406 __func__);
10407 break;
10408 }
10409
10410 sc->phy.release(sc);
10411
10412 /* get_cfg_done */
10413 wm_get_cfg_done(sc);
10414
10415 /* Extra setup */
10416 switch (sc->sc_type) {
10417 case WM_T_82542_2_0:
10418 case WM_T_82542_2_1:
10419 case WM_T_82543:
10420 case WM_T_82544:
10421 case WM_T_82540:
10422 case WM_T_82545:
10423 case WM_T_82545_3:
10424 case WM_T_82546:
10425 case WM_T_82546_3:
10426 case WM_T_82541_2:
10427 case WM_T_82547_2:
10428 case WM_T_82571:
10429 case WM_T_82572:
10430 case WM_T_82573:
10431 case WM_T_82574:
10432 case WM_T_82583:
10433 case WM_T_82575:
10434 case WM_T_82576:
10435 case WM_T_82580:
10436 case WM_T_I350:
10437 case WM_T_I354:
10438 case WM_T_I210:
10439 case WM_T_I211:
10440 case WM_T_80003:
10441 /* Null */
10442 break;
10443 case WM_T_82541:
10444 case WM_T_82547:
10445 /* XXX Configure actively LED after PHY reset */
10446 break;
10447 case WM_T_ICH8:
10448 case WM_T_ICH9:
10449 case WM_T_ICH10:
10450 case WM_T_PCH:
10451 case WM_T_PCH2:
10452 case WM_T_PCH_LPT:
10453 case WM_T_PCH_SPT:
10454 case WM_T_PCH_CNP:
10455 wm_phy_post_reset(sc);
10456 break;
10457 default:
10458 panic("%s: unknown type\n", __func__);
10459 break;
10460 }
10461 }
10462
10463 /*
10464 * Set up sc_phytype and mii_{read|write}reg.
10465 *
10466 * To identify PHY type, correct read/write function should be selected.
10467 * To select correct read/write function, PCI ID or MAC type are required
10468 * without accessing PHY registers.
10469 *
10470 * On the first call of this function, PHY ID is not known yet. Check
10471 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10472 * result might be incorrect.
10473 *
10474 * In the second call, PHY OUI and model is used to identify PHY type.
10475 * It might not be perfect because of the lack of compared entry, but it
10476 * would be better than the first call.
10477 *
10478 * If the detected new result and previous assumption is different,
10479 * a diagnostic message will be printed.
10480 */
10481 static void
10482 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10483 uint16_t phy_model)
10484 {
10485 device_t dev = sc->sc_dev;
10486 struct mii_data *mii = &sc->sc_mii;
10487 uint16_t new_phytype = WMPHY_UNKNOWN;
10488 uint16_t doubt_phytype = WMPHY_UNKNOWN;
10489 mii_readreg_t new_readreg;
10490 mii_writereg_t new_writereg;
10491 bool dodiag = true;
10492
10493 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10494 device_xname(sc->sc_dev), __func__));
10495
10496 /*
10497 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10498 * incorrect. So don't print diag output when it's 2nd call.
10499 */
10500 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10501 dodiag = false;
10502
10503 if (mii->mii_readreg == NULL) {
10504 /*
10505 * This is the first call of this function. For ICH and PCH
10506 * variants, it's difficult to determine the PHY access method
10507 * by sc_type, so use the PCI product ID for some devices.
10508 */
10509
10510 switch (sc->sc_pcidevid) {
10511 case PCI_PRODUCT_INTEL_PCH_M_LM:
10512 case PCI_PRODUCT_INTEL_PCH_M_LC:
10513 /* 82577 */
10514 new_phytype = WMPHY_82577;
10515 break;
10516 case PCI_PRODUCT_INTEL_PCH_D_DM:
10517 case PCI_PRODUCT_INTEL_PCH_D_DC:
10518 /* 82578 */
10519 new_phytype = WMPHY_82578;
10520 break;
10521 case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10522 case PCI_PRODUCT_INTEL_PCH2_LV_V:
10523 /* 82579 */
10524 new_phytype = WMPHY_82579;
10525 break;
10526 case PCI_PRODUCT_INTEL_82801H_82567V_3:
10527 case PCI_PRODUCT_INTEL_82801I_BM:
10528 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10529 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10530 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10531 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10532 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10533 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10534 /* ICH8, 9, 10 with 82567 */
10535 new_phytype = WMPHY_BM;
10536 break;
10537 default:
10538 break;
10539 }
10540 } else {
10541 /* It's not the first call. Use PHY OUI and model */
10542 switch (phy_oui) {
10543 case MII_OUI_ATTANSIC: /* atphy(4) */
10544 switch (phy_model) {
10545 case MII_MODEL_ATTANSIC_AR8021:
10546 new_phytype = WMPHY_82578;
10547 break;
10548 default:
10549 break;
10550 }
10551 break;
10552 case MII_OUI_xxMARVELL:
10553 switch (phy_model) {
10554 case MII_MODEL_xxMARVELL_I210:
10555 new_phytype = WMPHY_I210;
10556 break;
10557 case MII_MODEL_xxMARVELL_E1011:
10558 case MII_MODEL_xxMARVELL_E1000_3:
10559 case MII_MODEL_xxMARVELL_E1000_5:
10560 case MII_MODEL_xxMARVELL_E1112:
10561 new_phytype = WMPHY_M88;
10562 break;
10563 case MII_MODEL_xxMARVELL_E1149:
10564 new_phytype = WMPHY_BM;
10565 break;
10566 case MII_MODEL_xxMARVELL_E1111:
10567 case MII_MODEL_xxMARVELL_I347:
10568 case MII_MODEL_xxMARVELL_E1512:
10569 case MII_MODEL_xxMARVELL_E1340M:
10570 case MII_MODEL_xxMARVELL_E1543:
10571 new_phytype = WMPHY_M88;
10572 break;
10573 case MII_MODEL_xxMARVELL_I82563:
10574 new_phytype = WMPHY_GG82563;
10575 break;
10576 default:
10577 break;
10578 }
10579 break;
10580 case MII_OUI_INTEL:
10581 switch (phy_model) {
10582 case MII_MODEL_INTEL_I82577:
10583 new_phytype = WMPHY_82577;
10584 break;
10585 case MII_MODEL_INTEL_I82579:
10586 new_phytype = WMPHY_82579;
10587 break;
10588 case MII_MODEL_INTEL_I217:
10589 new_phytype = WMPHY_I217;
10590 break;
10591 case MII_MODEL_INTEL_I82580:
10592 new_phytype = WMPHY_82580;
10593 break;
10594 case MII_MODEL_INTEL_I350:
10595 new_phytype = WMPHY_I350;
10596 break;
10597 default:
10598 break;
10599 }
10600 break;
10601 case MII_OUI_yyINTEL:
10602 switch (phy_model) {
10603 case MII_MODEL_yyINTEL_I82562G:
10604 case MII_MODEL_yyINTEL_I82562EM:
10605 case MII_MODEL_yyINTEL_I82562ET:
10606 new_phytype = WMPHY_IFE;
10607 break;
10608 case MII_MODEL_yyINTEL_IGP01E1000:
10609 new_phytype = WMPHY_IGP;
10610 break;
10611 case MII_MODEL_yyINTEL_I82566:
10612 new_phytype = WMPHY_IGP_3;
10613 break;
10614 default:
10615 break;
10616 }
10617 break;
10618 default:
10619 break;
10620 }
10621
10622 if (dodiag) {
10623 if (new_phytype == WMPHY_UNKNOWN)
10624 aprint_verbose_dev(dev,
10625 "%s: Unknown PHY model. OUI=%06x, "
10626 "model=%04x\n", __func__, phy_oui,
10627 phy_model);
10628
10629 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10630 && (sc->sc_phytype != new_phytype)) {
10631 aprint_error_dev(dev, "Previously assumed PHY "
10632 "type(%u) was incorrect. PHY type from PHY"
10633 "ID = %u\n", sc->sc_phytype, new_phytype);
10634 }
10635 }
10636 }
10637
10638 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10639 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10640 /* SGMII */
10641 new_readreg = wm_sgmii_readreg;
10642 new_writereg = wm_sgmii_writereg;
10643 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10644 /* BM2 (phyaddr == 1) */
10645 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10646 && (new_phytype != WMPHY_BM)
10647 && (new_phytype != WMPHY_UNKNOWN))
10648 doubt_phytype = new_phytype;
10649 new_phytype = WMPHY_BM;
10650 new_readreg = wm_gmii_bm_readreg;
10651 new_writereg = wm_gmii_bm_writereg;
10652 } else if (sc->sc_type >= WM_T_PCH) {
10653 /* All PCH* use _hv_ */
10654 new_readreg = wm_gmii_hv_readreg;
10655 new_writereg = wm_gmii_hv_writereg;
10656 } else if (sc->sc_type >= WM_T_ICH8) {
10657 /* non-82567 ICH8, 9 and 10 */
10658 new_readreg = wm_gmii_i82544_readreg;
10659 new_writereg = wm_gmii_i82544_writereg;
10660 } else if (sc->sc_type >= WM_T_80003) {
10661 /* 80003 */
10662 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10663 && (new_phytype != WMPHY_GG82563)
10664 && (new_phytype != WMPHY_UNKNOWN))
10665 doubt_phytype = new_phytype;
10666 new_phytype = WMPHY_GG82563;
10667 new_readreg = wm_gmii_i80003_readreg;
10668 new_writereg = wm_gmii_i80003_writereg;
10669 } else if (sc->sc_type >= WM_T_I210) {
10670 /* I210 and I211 */
10671 if ((sc->sc_phytype != WMPHY_UNKNOWN)
10672 && (new_phytype != WMPHY_I210)
10673 && (new_phytype != WMPHY_UNKNOWN))
10674 doubt_phytype = new_phytype;
10675 new_phytype = WMPHY_I210;
10676 new_readreg = wm_gmii_gs40g_readreg;
10677 new_writereg = wm_gmii_gs40g_writereg;
10678 } else if (sc->sc_type >= WM_T_82580) {
10679 /* 82580, I350 and I354 */
10680 new_readreg = wm_gmii_82580_readreg;
10681 new_writereg = wm_gmii_82580_writereg;
10682 } else if (sc->sc_type >= WM_T_82544) {
10683 /* 82544, 0, [56], [17], 8257[1234] and 82583 */
10684 new_readreg = wm_gmii_i82544_readreg;
10685 new_writereg = wm_gmii_i82544_writereg;
10686 } else {
10687 new_readreg = wm_gmii_i82543_readreg;
10688 new_writereg = wm_gmii_i82543_writereg;
10689 }
10690
10691 if (new_phytype == WMPHY_BM) {
10692 /* All BM use _bm_ */
10693 new_readreg = wm_gmii_bm_readreg;
10694 new_writereg = wm_gmii_bm_writereg;
10695 }
10696 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10697 /* All PCH* use _hv_ */
10698 new_readreg = wm_gmii_hv_readreg;
10699 new_writereg = wm_gmii_hv_writereg;
10700 }
10701
10702 /* Diag output */
10703 if (dodiag) {
10704 if (doubt_phytype != WMPHY_UNKNOWN)
10705 aprint_error_dev(dev, "Assumed new PHY type was "
10706 "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10707 new_phytype);
10708 else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10709 && (sc->sc_phytype != new_phytype))
10710 aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10711 "was incorrect. New PHY type = %u\n",
10712 sc->sc_phytype, new_phytype);
10713
10714 if ((mii->mii_readreg != NULL) &&
10715 (new_phytype == WMPHY_UNKNOWN))
10716 aprint_error_dev(dev, "PHY type is still unknown.\n");
10717
10718 if ((mii->mii_readreg != NULL) &&
10719 (mii->mii_readreg != new_readreg))
10720 aprint_error_dev(dev, "Previously assumed PHY "
10721 "read/write function was incorrect.\n");
10722 }
10723
10724 /* Update now */
10725 sc->sc_phytype = new_phytype;
10726 mii->mii_readreg = new_readreg;
10727 mii->mii_writereg = new_writereg;
10728 if (new_readreg == wm_gmii_hv_readreg) {
10729 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10730 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10731 } else if (new_readreg == wm_sgmii_readreg) {
10732 sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10733 sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10734 } else if (new_readreg == wm_gmii_i82544_readreg) {
10735 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10736 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10737 }
10738 }
10739
10740 /*
10741 * wm_get_phy_id_82575:
10742 *
10743 * Return PHY ID. Return -1 if it failed.
10744 */
10745 static int
10746 wm_get_phy_id_82575(struct wm_softc *sc)
10747 {
10748 uint32_t reg;
10749 int phyid = -1;
10750
10751 /* XXX */
10752 if ((sc->sc_flags & WM_F_SGMII) == 0)
10753 return -1;
10754
10755 if (wm_sgmii_uses_mdio(sc)) {
10756 switch (sc->sc_type) {
10757 case WM_T_82575:
10758 case WM_T_82576:
10759 reg = CSR_READ(sc, WMREG_MDIC);
10760 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10761 break;
10762 case WM_T_82580:
10763 case WM_T_I350:
10764 case WM_T_I354:
10765 case WM_T_I210:
10766 case WM_T_I211:
10767 reg = CSR_READ(sc, WMREG_MDICNFG);
10768 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10769 break;
10770 default:
10771 return -1;
10772 }
10773 }
10774
10775 return phyid;
10776 }
10777
10778 /*
10779 * wm_gmii_mediainit:
10780 *
10781 * Initialize media for use on 1000BASE-T devices.
10782 */
10783 static void
10784 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10785 {
10786 device_t dev = sc->sc_dev;
10787 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10788 struct mii_data *mii = &sc->sc_mii;
10789
10790 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10791 device_xname(sc->sc_dev), __func__));
10792
10793 /* We have GMII. */
10794 sc->sc_flags |= WM_F_HAS_MII;
10795
10796 if (sc->sc_type == WM_T_80003)
10797 sc->sc_tipg = TIPG_1000T_80003_DFLT;
10798 else
10799 sc->sc_tipg = TIPG_1000T_DFLT;
10800
10801 /*
10802 * Let the chip set speed/duplex on its own based on
10803 * signals from the PHY.
10804 * XXXbouyer - I'm not sure this is right for the 80003,
10805 * the em driver only sets CTRL_SLU here - but it seems to work.
10806 */
10807 sc->sc_ctrl |= CTRL_SLU;
10808 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10809
10810 /* Initialize our media structures and probe the GMII. */
10811 mii->mii_ifp = ifp;
10812
10813 mii->mii_statchg = wm_gmii_statchg;
10814
10815 /* get PHY control from SMBus to PCIe */
10816 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10817 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10818 || (sc->sc_type == WM_T_PCH_CNP))
10819 wm_init_phy_workarounds_pchlan(sc);
10820
10821 wm_gmii_reset(sc);
10822
10823 sc->sc_ethercom.ec_mii = &sc->sc_mii;
10824 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10825 wm_gmii_mediastatus, sc->sc_core_lock);
10826
10827 /* Setup internal SGMII PHY for SFP */
10828 wm_sgmii_sfp_preconfig(sc);
10829
10830 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10831 || (sc->sc_type == WM_T_82580)
10832 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10833 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10834 if ((sc->sc_flags & WM_F_SGMII) == 0) {
10835 /* Attach only one port */
10836 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10837 MII_OFFSET_ANY, MIIF_DOPAUSE);
10838 } else {
10839 int i, id;
10840 uint32_t ctrl_ext;
10841
10842 id = wm_get_phy_id_82575(sc);
10843 if (id != -1) {
10844 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10845 id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10846 }
10847 if ((id == -1)
10848 || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10849 /* Power on sgmii phy if it is disabled */
10850 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10851 CSR_WRITE(sc, WMREG_CTRL_EXT,
10852 ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10853 CSR_WRITE_FLUSH(sc);
10854 delay(300*1000); /* XXX too long */
10855
10856 /*
10857 * From 1 to 8.
10858 *
10859 * I2C access fails with I2C register's ERROR
10860 * bit set, so prevent error message while
10861 * scanning.
10862 */
10863 sc->phy.no_errprint = true;
10864 for (i = 1; i < 8; i++)
10865 mii_attach(sc->sc_dev, &sc->sc_mii,
10866 0xffffffff, i, MII_OFFSET_ANY,
10867 MIIF_DOPAUSE);
10868 sc->phy.no_errprint = false;
10869
10870 /* Restore previous sfp cage power state */
10871 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10872 }
10873 }
10874 } else
10875 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10876 MII_OFFSET_ANY, MIIF_DOPAUSE);
10877
10878 /*
10879 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10880 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10881 */
10882 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10883 || (sc->sc_type == WM_T_PCH_SPT)
10884 || (sc->sc_type == WM_T_PCH_CNP))
10885 && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10886 wm_set_mdio_slow_mode_hv(sc);
10887 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10888 MII_OFFSET_ANY, MIIF_DOPAUSE);
10889 }
10890
10891 /*
10892 * (For ICH8 variants)
10893 * If PHY detection failed, use BM's r/w function and retry.
10894 */
10895 if (LIST_FIRST(&mii->mii_phys) == NULL) {
10896 /* if failed, retry with *_bm_* */
10897 aprint_verbose_dev(dev, "Assumed PHY access function "
10898 "(type = %d) might be incorrect. Use BM and retry.\n",
10899 sc->sc_phytype);
10900 sc->sc_phytype = WMPHY_BM;
10901 mii->mii_readreg = wm_gmii_bm_readreg;
10902 mii->mii_writereg = wm_gmii_bm_writereg;
10903
10904 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10905 MII_OFFSET_ANY, MIIF_DOPAUSE);
10906 }
10907
10908 if (LIST_FIRST(&mii->mii_phys) == NULL) {
10909 /* Any PHY wasn't found */
10910 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10911 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10912 sc->sc_phytype = WMPHY_NONE;
10913 } else {
10914 struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10915
10916 /*
10917 * PHY found! Check PHY type again by the second call of
10918 * wm_gmii_setup_phytype.
10919 */
10920 wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10921 child->mii_mpd_model);
10922
10923 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10924 }
10925 }
10926
10927 /*
10928 * wm_gmii_mediachange: [ifmedia interface function]
10929 *
10930 * Set hardware to newly-selected media on a 1000BASE-T device.
10931 */
10932 static int
10933 wm_gmii_mediachange(struct ifnet *ifp)
10934 {
10935 struct wm_softc *sc = ifp->if_softc;
10936 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10937 uint32_t reg;
10938 int rc;
10939
10940 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10941 device_xname(sc->sc_dev), __func__));
10942 if ((ifp->if_flags & IFF_UP) == 0)
10943 return 0;
10944
10945 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10946 if ((sc->sc_type == WM_T_82580)
10947 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10948 || (sc->sc_type == WM_T_I211)) {
10949 reg = CSR_READ(sc, WMREG_PHPM);
10950 reg &= ~PHPM_GO_LINK_D;
10951 CSR_WRITE(sc, WMREG_PHPM, reg);
10952 }
10953
10954 /* Disable D0 LPLU. */
10955 wm_lplu_d0_disable(sc);
10956
10957 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10958 sc->sc_ctrl |= CTRL_SLU;
10959 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10960 || (sc->sc_type > WM_T_82543)) {
10961 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10962 } else {
10963 sc->sc_ctrl &= ~CTRL_ASDE;
10964 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10965 if (ife->ifm_media & IFM_FDX)
10966 sc->sc_ctrl |= CTRL_FD;
10967 switch (IFM_SUBTYPE(ife->ifm_media)) {
10968 case IFM_10_T:
10969 sc->sc_ctrl |= CTRL_SPEED_10;
10970 break;
10971 case IFM_100_TX:
10972 sc->sc_ctrl |= CTRL_SPEED_100;
10973 break;
10974 case IFM_1000_T:
10975 sc->sc_ctrl |= CTRL_SPEED_1000;
10976 break;
10977 case IFM_NONE:
10978 /* There is no specific setting for IFM_NONE */
10979 break;
10980 default:
10981 panic("wm_gmii_mediachange: bad media 0x%x",
10982 ife->ifm_media);
10983 }
10984 }
10985 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10986 CSR_WRITE_FLUSH(sc);
10987
10988 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
10989 wm_serdes_mediachange(ifp);
10990
10991 if (sc->sc_type <= WM_T_82543)
10992 wm_gmii_reset(sc);
10993 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
10994 && ((sc->sc_flags & WM_F_SGMII) != 0)) {
10995 /* allow time for SFP cage time to power up phy */
10996 delay(300 * 1000);
10997 wm_gmii_reset(sc);
10998 }
10999
11000 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11001 return 0;
11002 return rc;
11003 }
11004
11005 /*
11006 * wm_gmii_mediastatus: [ifmedia interface function]
11007 *
11008 * Get the current interface media status on a 1000BASE-T device.
11009 */
11010 static void
11011 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11012 {
11013 struct wm_softc *sc = ifp->if_softc;
11014
11015 ether_mediastatus(ifp, ifmr);
11016 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11017 | sc->sc_flowflags;
11018 }
11019
11020 #define MDI_IO CTRL_SWDPIN(2)
11021 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
11022 #define MDI_CLK CTRL_SWDPIN(3)
11023
11024 static void
11025 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11026 {
11027 uint32_t i, v;
11028
11029 v = CSR_READ(sc, WMREG_CTRL);
11030 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11031 v |= MDI_DIR | CTRL_SWDPIO(3);
11032
11033 for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11034 if (data & i)
11035 v |= MDI_IO;
11036 else
11037 v &= ~MDI_IO;
11038 CSR_WRITE(sc, WMREG_CTRL, v);
11039 CSR_WRITE_FLUSH(sc);
11040 delay(10);
11041 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11042 CSR_WRITE_FLUSH(sc);
11043 delay(10);
11044 CSR_WRITE(sc, WMREG_CTRL, v);
11045 CSR_WRITE_FLUSH(sc);
11046 delay(10);
11047 }
11048 }
11049
11050 static uint16_t
11051 wm_i82543_mii_recvbits(struct wm_softc *sc)
11052 {
11053 uint32_t v, i;
11054 uint16_t data = 0;
11055
11056 v = CSR_READ(sc, WMREG_CTRL);
11057 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11058 v |= CTRL_SWDPIO(3);
11059
11060 CSR_WRITE(sc, WMREG_CTRL, v);
11061 CSR_WRITE_FLUSH(sc);
11062 delay(10);
11063 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11064 CSR_WRITE_FLUSH(sc);
11065 delay(10);
11066 CSR_WRITE(sc, WMREG_CTRL, v);
11067 CSR_WRITE_FLUSH(sc);
11068 delay(10);
11069
11070 for (i = 0; i < 16; i++) {
11071 data <<= 1;
11072 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11073 CSR_WRITE_FLUSH(sc);
11074 delay(10);
11075 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11076 data |= 1;
11077 CSR_WRITE(sc, WMREG_CTRL, v);
11078 CSR_WRITE_FLUSH(sc);
11079 delay(10);
11080 }
11081
11082 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11083 CSR_WRITE_FLUSH(sc);
11084 delay(10);
11085 CSR_WRITE(sc, WMREG_CTRL, v);
11086 CSR_WRITE_FLUSH(sc);
11087 delay(10);
11088
11089 return data;
11090 }
11091
11092 #undef MDI_IO
11093 #undef MDI_DIR
11094 #undef MDI_CLK
11095
11096 /*
11097 * wm_gmii_i82543_readreg: [mii interface function]
11098 *
11099 * Read a PHY register on the GMII (i82543 version).
11100 */
11101 static int
11102 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11103 {
11104 struct wm_softc *sc = device_private(dev);
11105
11106 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11107 wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11108 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11109 *val = wm_i82543_mii_recvbits(sc) & 0xffff;
11110
11111 DPRINTF(sc, WM_DEBUG_GMII,
11112 ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11113 device_xname(dev), phy, reg, *val));
11114
11115 return 0;
11116 }
11117
11118 /*
11119 * wm_gmii_i82543_writereg: [mii interface function]
11120 *
11121 * Write a PHY register on the GMII (i82543 version).
11122 */
11123 static int
11124 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11125 {
11126 struct wm_softc *sc = device_private(dev);
11127
11128 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11129 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11130 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11131 (MII_COMMAND_START << 30), 32);
11132
11133 return 0;
11134 }
11135
11136 /*
11137 * wm_gmii_mdic_readreg: [mii interface function]
11138 *
11139 * Read a PHY register on the GMII.
11140 */
11141 static int
11142 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11143 {
11144 struct wm_softc *sc = device_private(dev);
11145 uint32_t mdic = 0;
11146 int i;
11147
11148 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11149 && (reg > MII_ADDRMASK)) {
11150 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11151 __func__, sc->sc_phytype, reg);
11152 reg &= MII_ADDRMASK;
11153 }
11154
11155 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11156 MDIC_REGADD(reg));
11157
11158 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11159 delay(50);
11160 mdic = CSR_READ(sc, WMREG_MDIC);
11161 if (mdic & MDIC_READY)
11162 break;
11163 }
11164
11165 if ((mdic & MDIC_READY) == 0) {
11166 DPRINTF(sc, WM_DEBUG_GMII,
11167 ("%s: MDIC read timed out: phy %d reg %d\n",
11168 device_xname(dev), phy, reg));
11169 return ETIMEDOUT;
11170 } else if (mdic & MDIC_E) {
11171 /* This is normal if no PHY is present. */
11172 DPRINTF(sc, WM_DEBUG_GMII,
11173 ("%s: MDIC read error: phy %d reg %d\n",
11174 device_xname(sc->sc_dev), phy, reg));
11175 return -1;
11176 } else
11177 *val = MDIC_DATA(mdic);
11178
11179 /*
11180 * Allow some time after each MDIC transaction to avoid
11181 * reading duplicate data in the next MDIC transaction.
11182 */
11183 if (sc->sc_type == WM_T_PCH2)
11184 delay(100);
11185
11186 return 0;
11187 }
11188
11189 /*
11190 * wm_gmii_mdic_writereg: [mii interface function]
11191 *
11192 * Write a PHY register on the GMII.
11193 */
11194 static int
11195 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11196 {
11197 struct wm_softc *sc = device_private(dev);
11198 uint32_t mdic = 0;
11199 int i;
11200
11201 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11202 && (reg > MII_ADDRMASK)) {
11203 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11204 __func__, sc->sc_phytype, reg);
11205 reg &= MII_ADDRMASK;
11206 }
11207
11208 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11209 MDIC_REGADD(reg) | MDIC_DATA(val));
11210
11211 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11212 delay(50);
11213 mdic = CSR_READ(sc, WMREG_MDIC);
11214 if (mdic & MDIC_READY)
11215 break;
11216 }
11217
11218 if ((mdic & MDIC_READY) == 0) {
11219 DPRINTF(sc, WM_DEBUG_GMII,
11220 ("%s: MDIC write timed out: phy %d reg %d\n",
11221 device_xname(dev), phy, reg));
11222 return ETIMEDOUT;
11223 } else if (mdic & MDIC_E) {
11224 DPRINTF(sc, WM_DEBUG_GMII,
11225 ("%s: MDIC write error: phy %d reg %d\n",
11226 device_xname(dev), phy, reg));
11227 return -1;
11228 }
11229
11230 /*
11231 * Allow some time after each MDIC transaction to avoid
11232 * reading duplicate data in the next MDIC transaction.
11233 */
11234 if (sc->sc_type == WM_T_PCH2)
11235 delay(100);
11236
11237 return 0;
11238 }
11239
11240 /*
11241 * wm_gmii_i82544_readreg: [mii interface function]
11242 *
11243 * Read a PHY register on the GMII.
11244 */
11245 static int
11246 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11247 {
11248 struct wm_softc *sc = device_private(dev);
11249 int rv;
11250
11251 if (sc->phy.acquire(sc)) {
11252 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11253 return -1;
11254 }
11255
11256 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11257
11258 sc->phy.release(sc);
11259
11260 return rv;
11261 }
11262
11263 static int
11264 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11265 {
11266 struct wm_softc *sc = device_private(dev);
11267 int rv;
11268
11269 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11270 switch (sc->sc_phytype) {
11271 case WMPHY_IGP:
11272 case WMPHY_IGP_2:
11273 case WMPHY_IGP_3:
11274 rv = wm_gmii_mdic_writereg(dev, phy,
11275 IGPHY_PAGE_SELECT, reg);
11276 if (rv != 0)
11277 return rv;
11278 break;
11279 default:
11280 #ifdef WM_DEBUG
11281 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11282 __func__, sc->sc_phytype, reg);
11283 #endif
11284 break;
11285 }
11286 }
11287
11288 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11289 }
11290
11291 /*
11292 * wm_gmii_i82544_writereg: [mii interface function]
11293 *
11294 * Write a PHY register on the GMII.
11295 */
11296 static int
11297 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11298 {
11299 struct wm_softc *sc = device_private(dev);
11300 int rv;
11301
11302 if (sc->phy.acquire(sc)) {
11303 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11304 return -1;
11305 }
11306
11307 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11308 sc->phy.release(sc);
11309
11310 return rv;
11311 }
11312
11313 static int
11314 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11315 {
11316 struct wm_softc *sc = device_private(dev);
11317 int rv;
11318
11319 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11320 switch (sc->sc_phytype) {
11321 case WMPHY_IGP:
11322 case WMPHY_IGP_2:
11323 case WMPHY_IGP_3:
11324 rv = wm_gmii_mdic_writereg(dev, phy,
11325 IGPHY_PAGE_SELECT, reg);
11326 if (rv != 0)
11327 return rv;
11328 break;
11329 default:
11330 #ifdef WM_DEBUG
11331 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11332 __func__, sc->sc_phytype, reg);
11333 #endif
11334 break;
11335 }
11336 }
11337
11338 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11339 }
11340
11341 /*
11342 * wm_gmii_i80003_readreg: [mii interface function]
11343 *
11344 * Read a PHY register on the kumeran
11345 * This could be handled by the PHY layer if we didn't have to lock the
11346 * resource ...
11347 */
11348 static int
11349 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11350 {
11351 struct wm_softc *sc = device_private(dev);
11352 int page_select;
11353 uint16_t temp, temp2;
11354 int rv = 0;
11355
11356 if (phy != 1) /* Only one PHY on kumeran bus */
11357 return -1;
11358
11359 if (sc->phy.acquire(sc)) {
11360 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11361 return -1;
11362 }
11363
11364 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11365 page_select = GG82563_PHY_PAGE_SELECT;
11366 else {
11367 /*
11368 * Use Alternative Page Select register to access registers
11369 * 30 and 31.
11370 */
11371 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11372 }
11373 temp = reg >> GG82563_PAGE_SHIFT;
11374 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11375 goto out;
11376
11377 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11378 /*
11379 * Wait more 200us for a bug of the ready bit in the MDIC
11380 * register.
11381 */
11382 delay(200);
11383 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11384 if ((rv != 0) || (temp2 != temp)) {
11385 device_printf(dev, "%s failed\n", __func__);
11386 rv = -1;
11387 goto out;
11388 }
11389 delay(200);
11390 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11391 delay(200);
11392 } else
11393 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11394
11395 out:
11396 sc->phy.release(sc);
11397 return rv;
11398 }
11399
11400 /*
11401 * wm_gmii_i80003_writereg: [mii interface function]
11402 *
11403 * Write a PHY register on the kumeran.
11404 * This could be handled by the PHY layer if we didn't have to lock the
11405 * resource ...
11406 */
11407 static int
11408 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11409 {
11410 struct wm_softc *sc = device_private(dev);
11411 int page_select, rv;
11412 uint16_t temp, temp2;
11413
11414 if (phy != 1) /* Only one PHY on kumeran bus */
11415 return -1;
11416
11417 if (sc->phy.acquire(sc)) {
11418 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11419 return -1;
11420 }
11421
11422 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11423 page_select = GG82563_PHY_PAGE_SELECT;
11424 else {
11425 /*
11426 * Use Alternative Page Select register to access registers
11427 * 30 and 31.
11428 */
11429 page_select = GG82563_PHY_PAGE_SELECT_ALT;
11430 }
11431 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11432 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11433 goto out;
11434
11435 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11436 /*
11437 * Wait more 200us for a bug of the ready bit in the MDIC
11438 * register.
11439 */
11440 delay(200);
11441 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11442 if ((rv != 0) || (temp2 != temp)) {
11443 device_printf(dev, "%s failed\n", __func__);
11444 rv = -1;
11445 goto out;
11446 }
11447 delay(200);
11448 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11449 delay(200);
11450 } else
11451 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11452
11453 out:
11454 sc->phy.release(sc);
11455 return rv;
11456 }
11457
11458 /*
11459 * wm_gmii_bm_readreg: [mii interface function]
11460 *
11461 * Read a PHY register on the kumeran
11462 * This could be handled by the PHY layer if we didn't have to lock the
11463 * resource ...
11464 */
11465 static int
11466 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11467 {
11468 struct wm_softc *sc = device_private(dev);
11469 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11470 int rv;
11471
11472 if (sc->phy.acquire(sc)) {
11473 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11474 return -1;
11475 }
11476
11477 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11478 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11479 || (reg == 31)) ? 1 : phy;
11480 /* Page 800 works differently than the rest so it has its own func */
11481 if (page == BM_WUC_PAGE) {
11482 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11483 goto release;
11484 }
11485
11486 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11487 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11488 && (sc->sc_type != WM_T_82583))
11489 rv = wm_gmii_mdic_writereg(dev, phy,
11490 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11491 else
11492 rv = wm_gmii_mdic_writereg(dev, phy,
11493 BME1000_PHY_PAGE_SELECT, page);
11494 if (rv != 0)
11495 goto release;
11496 }
11497
11498 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11499
11500 release:
11501 sc->phy.release(sc);
11502 return rv;
11503 }
11504
11505 /*
11506 * wm_gmii_bm_writereg: [mii interface function]
11507 *
11508 * Write a PHY register on the kumeran.
11509 * This could be handled by the PHY layer if we didn't have to lock the
11510 * resource ...
11511 */
11512 static int
11513 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11514 {
11515 struct wm_softc *sc = device_private(dev);
11516 uint16_t page = reg >> BME1000_PAGE_SHIFT;
11517 int rv;
11518
11519 if (sc->phy.acquire(sc)) {
11520 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11521 return -1;
11522 }
11523
11524 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11525 phy = ((page >= 768) || ((page == 0) && (reg == 25))
11526 || (reg == 31)) ? 1 : phy;
11527 /* Page 800 works differently than the rest so it has its own func */
11528 if (page == BM_WUC_PAGE) {
11529 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11530 goto release;
11531 }
11532
11533 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11534 if ((phy == 1) && (sc->sc_type != WM_T_82574)
11535 && (sc->sc_type != WM_T_82583))
11536 rv = wm_gmii_mdic_writereg(dev, phy,
11537 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11538 else
11539 rv = wm_gmii_mdic_writereg(dev, phy,
11540 BME1000_PHY_PAGE_SELECT, page);
11541 if (rv != 0)
11542 goto release;
11543 }
11544
11545 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11546
11547 release:
11548 sc->phy.release(sc);
11549 return rv;
11550 }
11551
11552 /*
11553 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11554 * @dev: pointer to the HW structure
11555 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11556 *
11557 * Assumes semaphore already acquired and phy_reg points to a valid memory
11558 * address to store contents of the BM_WUC_ENABLE_REG register.
11559 */
11560 static int
11561 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11562 {
11563 #ifdef WM_DEBUG
11564 struct wm_softc *sc = device_private(dev);
11565 #endif
11566 uint16_t temp;
11567 int rv;
11568
11569 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11570 device_xname(dev), __func__));
11571
11572 if (!phy_regp)
11573 return -1;
11574
11575 /* All page select, port ctrl and wakeup registers use phy address 1 */
11576
11577 /* Select Port Control Registers page */
11578 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11579 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11580 if (rv != 0)
11581 return rv;
11582
11583 /* Read WUCE and save it */
11584 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11585 if (rv != 0)
11586 return rv;
11587
11588 /* Enable both PHY wakeup mode and Wakeup register page writes.
11589 * Prevent a power state change by disabling ME and Host PHY wakeup.
11590 */
11591 temp = *phy_regp;
11592 temp |= BM_WUC_ENABLE_BIT;
11593 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11594
11595 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11596 return rv;
11597
11598 /* Select Host Wakeup Registers page - caller now able to write
11599 * registers on the Wakeup registers page
11600 */
11601 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11602 BM_WUC_PAGE << IGP3_PAGE_SHIFT);
11603 }
11604
11605 /*
11606 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
11607 * @dev: pointer to the HW structure
11608 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
11609 *
11610 * Restore BM_WUC_ENABLE_REG to its original value.
11611 *
11612 * Assumes semaphore already acquired and *phy_reg is the contents of the
11613 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
11614 * caller.
11615 */
11616 static int
11617 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11618 {
11619 #ifdef WM_DEBUG
11620 struct wm_softc *sc = device_private(dev);
11621 #endif
11622
11623 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11624 device_xname(dev), __func__));
11625
11626 if (!phy_regp)
11627 return -1;
11628
11629 /* Select Port Control Registers page */
11630 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11631 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11632
11633 /* Restore 769.17 to its original value */
11634 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
11635
11636 return 0;
11637 }
11638
11639 /*
11640 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
11641 * @sc: pointer to the HW structure
11642 * @offset: register offset to be read or written
11643 * @val: pointer to the data to read or write
11644 * @rd: determines if operation is read or write
11645 * @page_set: BM_WUC_PAGE already set and access enabled
11646 *
11647 * Read the PHY register at offset and store the retrieved information in
11648 * data, or write data to PHY register at offset. Note the procedure to
11649 * access the PHY wakeup registers is different than reading the other PHY
11650 * registers. It works as such:
11651 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
11652 * 2) Set page to 800 for host (801 if we were manageability)
11653 * 3) Write the address using the address opcode (0x11)
11654 * 4) Read or write the data using the data opcode (0x12)
11655 * 5) Restore 769.17.2 to its original value
11656 *
11657 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
11658 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
11659 *
11660 * Assumes semaphore is already acquired. When page_set==TRUE, assumes
11661 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
11662 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
11663 */
11664 static int
11665 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
11666 bool page_set)
11667 {
11668 struct wm_softc *sc = device_private(dev);
11669 uint16_t regnum = BM_PHY_REG_NUM(offset);
11670 uint16_t page = BM_PHY_REG_PAGE(offset);
11671 uint16_t wuce;
11672 int rv = 0;
11673
11674 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11675 device_xname(dev), __func__));
11676 /* XXX Gig must be disabled for MDIO accesses to page 800 */
11677 if ((sc->sc_type == WM_T_PCH)
11678 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
11679 device_printf(dev,
11680 "Attempting to access page %d while gig enabled.\n", page);
11681 }
11682
11683 if (!page_set) {
11684 /* Enable access to PHY wakeup registers */
11685 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
11686 if (rv != 0) {
11687 device_printf(dev,
11688 "%s: Could not enable PHY wakeup reg access\n",
11689 __func__);
11690 return rv;
11691 }
11692 }
11693 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
11694 device_xname(sc->sc_dev), __func__, page, regnum));
11695
11696 /*
11697 * 2) Access PHY wakeup register.
11698 * See wm_access_phy_wakeup_reg_bm.
11699 */
11700
11701 /* Write the Wakeup register page offset value using opcode 0x11 */
11702 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
11703 if (rv != 0)
11704 return rv;
11705
11706 if (rd) {
11707 /* Read the Wakeup register page value using opcode 0x12 */
11708 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
11709 } else {
11710 /* Write the Wakeup register page value using opcode 0x12 */
11711 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
11712 }
11713 if (rv != 0)
11714 return rv;
11715
11716 if (!page_set)
11717 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
11718
11719 return rv;
11720 }
11721
11722 /*
11723 * wm_gmii_hv_readreg: [mii interface function]
11724 *
11725 * Read a PHY register on the kumeran
11726 * This could be handled by the PHY layer if we didn't have to lock the
11727 * resource ...
11728 */
11729 static int
11730 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
11731 {
11732 struct wm_softc *sc = device_private(dev);
11733 int rv;
11734
11735 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11736 device_xname(dev), __func__));
11737 if (sc->phy.acquire(sc)) {
11738 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11739 return -1;
11740 }
11741
11742 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
11743 sc->phy.release(sc);
11744 return rv;
11745 }
11746
11747 static int
11748 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11749 {
11750 uint16_t page = BM_PHY_REG_PAGE(reg);
11751 uint16_t regnum = BM_PHY_REG_NUM(reg);
11752 int rv;
11753
11754 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11755
11756 /* Page 800 works differently than the rest so it has its own func */
11757 if (page == BM_WUC_PAGE)
11758 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11759
11760 /*
11761 * Lower than page 768 works differently than the rest so it has its
11762 * own func
11763 */
11764 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11765 device_printf(dev, "gmii_hv_readreg!!!\n");
11766 return -1;
11767 }
11768
11769 /*
11770 * XXX I21[789] documents say that the SMBus Address register is at
11771 * PHY address 01, Page 0 (not 768), Register 26.
11772 */
11773 if (page == HV_INTC_FC_PAGE_START)
11774 page = 0;
11775
11776 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11777 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11778 page << BME1000_PAGE_SHIFT);
11779 if (rv != 0)
11780 return rv;
11781 }
11782
11783 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
11784 }
11785
11786 /*
11787 * wm_gmii_hv_writereg: [mii interface function]
11788 *
11789 * Write a PHY register on the kumeran.
11790 * This could be handled by the PHY layer if we didn't have to lock the
11791 * resource ...
11792 */
11793 static int
11794 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
11795 {
11796 struct wm_softc *sc = device_private(dev);
11797 int rv;
11798
11799 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11800 device_xname(dev), __func__));
11801
11802 if (sc->phy.acquire(sc)) {
11803 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11804 return -1;
11805 }
11806
11807 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
11808 sc->phy.release(sc);
11809
11810 return rv;
11811 }
11812
11813 static int
11814 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11815 {
11816 struct wm_softc *sc = device_private(dev);
11817 uint16_t page = BM_PHY_REG_PAGE(reg);
11818 uint16_t regnum = BM_PHY_REG_NUM(reg);
11819 int rv;
11820
11821 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11822
11823 /* Page 800 works differently than the rest so it has its own func */
11824 if (page == BM_WUC_PAGE)
11825 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
11826 false);
11827
11828 /*
11829 * Lower than page 768 works differently than the rest so it has its
11830 * own func
11831 */
11832 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11833 device_printf(dev, "gmii_hv_writereg!!!\n");
11834 return -1;
11835 }
11836
11837 {
11838 /*
11839 * XXX I21[789] documents say that the SMBus Address register
11840 * is at PHY address 01, Page 0 (not 768), Register 26.
11841 */
11842 if (page == HV_INTC_FC_PAGE_START)
11843 page = 0;
11844
11845 /*
11846 * XXX Workaround MDIO accesses being disabled after entering
11847 * IEEE Power Down (whenever bit 11 of the PHY control
11848 * register is set)
11849 */
11850 if (sc->sc_phytype == WMPHY_82578) {
11851 struct mii_softc *child;
11852
11853 child = LIST_FIRST(&sc->sc_mii.mii_phys);
11854 if ((child != NULL) && (child->mii_mpd_rev >= 1)
11855 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
11856 && ((val & (1 << 11)) != 0)) {
11857 device_printf(dev, "XXX need workaround\n");
11858 }
11859 }
11860
11861 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11862 rv = wm_gmii_mdic_writereg(dev, 1,
11863 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11864 if (rv != 0)
11865 return rv;
11866 }
11867 }
11868
11869 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
11870 }
11871
11872 /*
11873 * wm_gmii_82580_readreg: [mii interface function]
11874 *
11875 * Read a PHY register on the 82580 and I350.
11876 * This could be handled by the PHY layer if we didn't have to lock the
11877 * resource ...
11878 */
11879 static int
11880 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
11881 {
11882 struct wm_softc *sc = device_private(dev);
11883 int rv;
11884
11885 if (sc->phy.acquire(sc) != 0) {
11886 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11887 return -1;
11888 }
11889
11890 #ifdef DIAGNOSTIC
11891 if (reg > MII_ADDRMASK) {
11892 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11893 __func__, sc->sc_phytype, reg);
11894 reg &= MII_ADDRMASK;
11895 }
11896 #endif
11897 rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
11898
11899 sc->phy.release(sc);
11900 return rv;
11901 }
11902
11903 /*
11904 * wm_gmii_82580_writereg: [mii interface function]
11905 *
11906 * Write a PHY register on the 82580 and I350.
11907 * This could be handled by the PHY layer if we didn't have to lock the
11908 * resource ...
11909 */
11910 static int
11911 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
11912 {
11913 struct wm_softc *sc = device_private(dev);
11914 int rv;
11915
11916 if (sc->phy.acquire(sc) != 0) {
11917 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11918 return -1;
11919 }
11920
11921 #ifdef DIAGNOSTIC
11922 if (reg > MII_ADDRMASK) {
11923 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11924 __func__, sc->sc_phytype, reg);
11925 reg &= MII_ADDRMASK;
11926 }
11927 #endif
11928 rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
11929
11930 sc->phy.release(sc);
11931 return rv;
11932 }
11933
11934 /*
11935 * wm_gmii_gs40g_readreg: [mii interface function]
11936 *
11937 * Read a PHY register on the I2100 and I211.
11938 * This could be handled by the PHY layer if we didn't have to lock the
11939 * resource ...
11940 */
11941 static int
11942 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
11943 {
11944 struct wm_softc *sc = device_private(dev);
11945 int page, offset;
11946 int rv;
11947
11948 /* Acquire semaphore */
11949 if (sc->phy.acquire(sc)) {
11950 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11951 return -1;
11952 }
11953
11954 /* Page select */
11955 page = reg >> GS40G_PAGE_SHIFT;
11956 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11957 if (rv != 0)
11958 goto release;
11959
11960 /* Read reg */
11961 offset = reg & GS40G_OFFSET_MASK;
11962 rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
11963
11964 release:
11965 sc->phy.release(sc);
11966 return rv;
11967 }
11968
11969 /*
11970 * wm_gmii_gs40g_writereg: [mii interface function]
11971 *
11972 * Write a PHY register on the I210 and I211.
11973 * This could be handled by the PHY layer if we didn't have to lock the
11974 * resource ...
11975 */
11976 static int
11977 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
11978 {
11979 struct wm_softc *sc = device_private(dev);
11980 uint16_t page;
11981 int offset, rv;
11982
11983 /* Acquire semaphore */
11984 if (sc->phy.acquire(sc)) {
11985 device_printf(dev, "%s: failed to get semaphore\n", __func__);
11986 return -1;
11987 }
11988
11989 /* Page select */
11990 page = reg >> GS40G_PAGE_SHIFT;
11991 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11992 if (rv != 0)
11993 goto release;
11994
11995 /* Write reg */
11996 offset = reg & GS40G_OFFSET_MASK;
11997 rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
11998
11999 release:
12000 /* Release semaphore */
12001 sc->phy.release(sc);
12002 return rv;
12003 }
12004
12005 /*
12006 * wm_gmii_statchg: [mii interface function]
12007 *
12008 * Callback from MII layer when media changes.
12009 */
12010 static void
12011 wm_gmii_statchg(struct ifnet *ifp)
12012 {
12013 struct wm_softc *sc = ifp->if_softc;
12014 struct mii_data *mii = &sc->sc_mii;
12015
12016 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12017 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12018 sc->sc_fcrtl &= ~FCRTL_XONE;
12019
12020 /* Get flow control negotiation result. */
12021 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12022 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12023 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12024 mii->mii_media_active &= ~IFM_ETH_FMASK;
12025 }
12026
12027 if (sc->sc_flowflags & IFM_FLOW) {
12028 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12029 sc->sc_ctrl |= CTRL_TFCE;
12030 sc->sc_fcrtl |= FCRTL_XONE;
12031 }
12032 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12033 sc->sc_ctrl |= CTRL_RFCE;
12034 }
12035
12036 if (mii->mii_media_active & IFM_FDX) {
12037 DPRINTF(sc, WM_DEBUG_LINK,
12038 ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12039 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12040 } else {
12041 DPRINTF(sc, WM_DEBUG_LINK,
12042 ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12043 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12044 }
12045
12046 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12047 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12048 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
12049 : WMREG_FCRTL, sc->sc_fcrtl);
12050 if (sc->sc_type == WM_T_80003) {
12051 switch (IFM_SUBTYPE(mii->mii_media_active)) {
12052 case IFM_1000_T:
12053 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12054 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12055 sc->sc_tipg = TIPG_1000T_80003_DFLT;
12056 break;
12057 default:
12058 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12059 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12060 sc->sc_tipg = TIPG_10_100_80003_DFLT;
12061 break;
12062 }
12063 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12064 }
12065 }
12066
12067 /* kumeran related (80003, ICH* and PCH*) */
12068
12069 /*
12070 * wm_kmrn_readreg:
12071 *
12072 * Read a kumeran register
12073 */
12074 static int
12075 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12076 {
12077 int rv;
12078
12079 if (sc->sc_type == WM_T_80003)
12080 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12081 else
12082 rv = sc->phy.acquire(sc);
12083 if (rv != 0) {
12084 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12085 __func__);
12086 return rv;
12087 }
12088
12089 rv = wm_kmrn_readreg_locked(sc, reg, val);
12090
12091 if (sc->sc_type == WM_T_80003)
12092 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12093 else
12094 sc->phy.release(sc);
12095
12096 return rv;
12097 }
12098
12099 static int
12100 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12101 {
12102
12103 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12104 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12105 KUMCTRLSTA_REN);
12106 CSR_WRITE_FLUSH(sc);
12107 delay(2);
12108
12109 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12110
12111 return 0;
12112 }
12113
12114 /*
12115 * wm_kmrn_writereg:
12116 *
12117 * Write a kumeran register
12118 */
12119 static int
12120 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12121 {
12122 int rv;
12123
12124 if (sc->sc_type == WM_T_80003)
12125 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12126 else
12127 rv = sc->phy.acquire(sc);
12128 if (rv != 0) {
12129 device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12130 __func__);
12131 return rv;
12132 }
12133
12134 rv = wm_kmrn_writereg_locked(sc, reg, val);
12135
12136 if (sc->sc_type == WM_T_80003)
12137 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12138 else
12139 sc->phy.release(sc);
12140
12141 return rv;
12142 }
12143
12144 static int
12145 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12146 {
12147
12148 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12149 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12150
12151 return 0;
12152 }
12153
12154 /*
12155 * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12156 * This access method is different from IEEE MMD.
12157 */
12158 static int
12159 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12160 {
12161 struct wm_softc *sc = device_private(dev);
12162 int rv;
12163
12164 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12165 if (rv != 0)
12166 return rv;
12167
12168 if (rd)
12169 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12170 else
12171 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12172 return rv;
12173 }
12174
12175 static int
12176 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12177 {
12178
12179 return wm_access_emi_reg_locked(dev, reg, val, true);
12180 }
12181
12182 static int
12183 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12184 {
12185
12186 return wm_access_emi_reg_locked(dev, reg, &val, false);
12187 }
12188
12189 /* SGMII related */
12190
12191 /*
12192 * wm_sgmii_uses_mdio
12193 *
12194 * Check whether the transaction is to the internal PHY or the external
12195 * MDIO interface. Return true if it's MDIO.
12196 */
12197 static bool
12198 wm_sgmii_uses_mdio(struct wm_softc *sc)
12199 {
12200 uint32_t reg;
12201 bool ismdio = false;
12202
12203 switch (sc->sc_type) {
12204 case WM_T_82575:
12205 case WM_T_82576:
12206 reg = CSR_READ(sc, WMREG_MDIC);
12207 ismdio = ((reg & MDIC_DEST) != 0);
12208 break;
12209 case WM_T_82580:
12210 case WM_T_I350:
12211 case WM_T_I354:
12212 case WM_T_I210:
12213 case WM_T_I211:
12214 reg = CSR_READ(sc, WMREG_MDICNFG);
12215 ismdio = ((reg & MDICNFG_DEST) != 0);
12216 break;
12217 default:
12218 break;
12219 }
12220
12221 return ismdio;
12222 }
12223
12224 /* Setup internal SGMII PHY for SFP */
12225 static void
12226 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12227 {
12228 uint16_t id1, id2, phyreg;
12229 int i, rv;
12230
12231 if (((sc->sc_flags & WM_F_SGMII) == 0)
12232 || ((sc->sc_flags & WM_F_SFP) == 0))
12233 return;
12234
12235 for (i = 0; i < MII_NPHY; i++) {
12236 sc->phy.no_errprint = true;
12237 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12238 if (rv != 0)
12239 continue;
12240 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12241 if (rv != 0)
12242 continue;
12243 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12244 continue;
12245 sc->phy.no_errprint = false;
12246
12247 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12248 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12249 phyreg |= ESSR_SGMII_WOC_COPPER;
12250 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12251 break;
12252 }
12253
12254 }
12255
12256 /*
12257 * wm_sgmii_readreg: [mii interface function]
12258 *
12259 * Read a PHY register on the SGMII
12260 * This could be handled by the PHY layer if we didn't have to lock the
12261 * resource ...
12262 */
12263 static int
12264 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12265 {
12266 struct wm_softc *sc = device_private(dev);
12267 int rv;
12268
12269 if (sc->phy.acquire(sc)) {
12270 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12271 return -1;
12272 }
12273
12274 rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12275
12276 sc->phy.release(sc);
12277 return rv;
12278 }
12279
12280 static int
12281 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12282 {
12283 struct wm_softc *sc = device_private(dev);
12284 uint32_t i2ccmd;
12285 int i, rv = 0;
12286
12287 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12288 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12289 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12290
12291 /* Poll the ready bit */
12292 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12293 delay(50);
12294 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12295 if (i2ccmd & I2CCMD_READY)
12296 break;
12297 }
12298 if ((i2ccmd & I2CCMD_READY) == 0) {
12299 device_printf(dev, "I2CCMD Read did not complete\n");
12300 rv = ETIMEDOUT;
12301 }
12302 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12303 if (!sc->phy.no_errprint)
12304 device_printf(dev, "I2CCMD Error bit set\n");
12305 rv = EIO;
12306 }
12307
12308 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12309
12310 return rv;
12311 }
12312
12313 /*
12314 * wm_sgmii_writereg: [mii interface function]
12315 *
12316 * Write a PHY register on the SGMII.
12317 * This could be handled by the PHY layer if we didn't have to lock the
12318 * resource ...
12319 */
12320 static int
12321 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12322 {
12323 struct wm_softc *sc = device_private(dev);
12324 int rv;
12325
12326 if (sc->phy.acquire(sc) != 0) {
12327 device_printf(dev, "%s: failed to get semaphore\n", __func__);
12328 return -1;
12329 }
12330
12331 rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12332
12333 sc->phy.release(sc);
12334
12335 return rv;
12336 }
12337
12338 static int
12339 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12340 {
12341 struct wm_softc *sc = device_private(dev);
12342 uint32_t i2ccmd;
12343 uint16_t swapdata;
12344 int rv = 0;
12345 int i;
12346
12347 /* Swap the data bytes for the I2C interface */
12348 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12349 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12350 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12351 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12352
12353 /* Poll the ready bit */
12354 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12355 delay(50);
12356 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12357 if (i2ccmd & I2CCMD_READY)
12358 break;
12359 }
12360 if ((i2ccmd & I2CCMD_READY) == 0) {
12361 device_printf(dev, "I2CCMD Write did not complete\n");
12362 rv = ETIMEDOUT;
12363 }
12364 if ((i2ccmd & I2CCMD_ERROR) != 0) {
12365 device_printf(dev, "I2CCMD Error bit set\n");
12366 rv = EIO;
12367 }
12368
12369 return rv;
12370 }
12371
12372 /* TBI related */
12373
12374 static bool
12375 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12376 {
12377 bool sig;
12378
12379 sig = ctrl & CTRL_SWDPIN(1);
12380
12381 /*
12382 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12383 * detect a signal, 1 if they don't.
12384 */
12385 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12386 sig = !sig;
12387
12388 return sig;
12389 }
12390
12391 /*
12392 * wm_tbi_mediainit:
12393 *
12394 * Initialize media for use on 1000BASE-X devices.
12395 */
12396 static void
12397 wm_tbi_mediainit(struct wm_softc *sc)
12398 {
12399 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12400 const char *sep = "";
12401
12402 if (sc->sc_type < WM_T_82543)
12403 sc->sc_tipg = TIPG_WM_DFLT;
12404 else
12405 sc->sc_tipg = TIPG_LG_DFLT;
12406
12407 sc->sc_tbi_serdes_anegticks = 5;
12408
12409 /* Initialize our media structures */
12410 sc->sc_mii.mii_ifp = ifp;
12411 sc->sc_ethercom.ec_mii = &sc->sc_mii;
12412
12413 ifp->if_baudrate = IF_Gbps(1);
12414 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12415 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12416 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12417 wm_serdes_mediachange, wm_serdes_mediastatus,
12418 sc->sc_core_lock);
12419 } else {
12420 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12421 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12422 }
12423
12424 /*
12425 * SWD Pins:
12426 *
12427 * 0 = Link LED (output)
12428 * 1 = Loss Of Signal (input)
12429 */
12430 sc->sc_ctrl |= CTRL_SWDPIO(0);
12431
12432 /* XXX Perhaps this is only for TBI */
12433 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12434 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12435
12436 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12437 sc->sc_ctrl &= ~CTRL_LRST;
12438
12439 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12440
12441 #define ADD(ss, mm, dd) \
12442 do { \
12443 aprint_normal("%s%s", sep, ss); \
12444 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12445 sep = ", "; \
12446 } while (/*CONSTCOND*/0)
12447
12448 aprint_normal_dev(sc->sc_dev, "");
12449
12450 if (sc->sc_type == WM_T_I354) {
12451 uint32_t status;
12452
12453 status = CSR_READ(sc, WMREG_STATUS);
12454 if (((status & STATUS_2P5_SKU) != 0)
12455 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12456 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12457 } else
12458 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12459 } else if (sc->sc_type == WM_T_82545) {
12460 /* Only 82545 is LX (XXX except SFP) */
12461 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12462 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12463 } else if (sc->sc_sfptype != 0) {
12464 /* XXX wm(4) fiber/serdes don't use ifm_data */
12465 switch (sc->sc_sfptype) {
12466 default:
12467 case SFF_SFP_ETH_FLAGS_1000SX:
12468 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12469 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12470 break;
12471 case SFF_SFP_ETH_FLAGS_1000LX:
12472 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12473 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12474 break;
12475 case SFF_SFP_ETH_FLAGS_1000CX:
12476 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12477 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12478 break;
12479 case SFF_SFP_ETH_FLAGS_1000T:
12480 ADD("1000baseT", IFM_1000_T, 0);
12481 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12482 break;
12483 case SFF_SFP_ETH_FLAGS_100FX:
12484 ADD("100baseFX", IFM_100_FX, ANAR_TX);
12485 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12486 break;
12487 }
12488 } else {
12489 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12490 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12491 }
12492 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12493 aprint_normal("\n");
12494
12495 #undef ADD
12496
12497 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12498 }
12499
12500 /*
12501 * wm_tbi_mediachange: [ifmedia interface function]
12502 *
12503 * Set hardware to newly-selected media on a 1000BASE-X device.
12504 */
12505 static int
12506 wm_tbi_mediachange(struct ifnet *ifp)
12507 {
12508 struct wm_softc *sc = ifp->if_softc;
12509 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12510 uint32_t status, ctrl;
12511 bool signal;
12512 int i;
12513
12514 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12515 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12516 /* XXX need some work for >= 82571 and < 82575 */
12517 if (sc->sc_type < WM_T_82575)
12518 return 0;
12519 }
12520
12521 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12522 || (sc->sc_type >= WM_T_82575))
12523 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12524
12525 sc->sc_ctrl &= ~CTRL_LRST;
12526 sc->sc_txcw = TXCW_ANE;
12527 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12528 sc->sc_txcw |= TXCW_FD | TXCW_HD;
12529 else if (ife->ifm_media & IFM_FDX)
12530 sc->sc_txcw |= TXCW_FD;
12531 else
12532 sc->sc_txcw |= TXCW_HD;
12533
12534 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12535 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12536
12537 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12538 device_xname(sc->sc_dev), sc->sc_txcw));
12539 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12540 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12541 CSR_WRITE_FLUSH(sc);
12542 delay(1000);
12543
12544 ctrl = CSR_READ(sc, WMREG_CTRL);
12545 signal = wm_tbi_havesignal(sc, ctrl);
12546
12547 DPRINTF(sc, WM_DEBUG_LINK,
12548 ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
12549
12550 if (signal) {
12551 /* Have signal; wait for the link to come up. */
12552 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12553 delay(10000);
12554 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12555 break;
12556 }
12557
12558 DPRINTF(sc, WM_DEBUG_LINK,
12559 ("%s: i = %d after waiting for link\n",
12560 device_xname(sc->sc_dev), i));
12561
12562 status = CSR_READ(sc, WMREG_STATUS);
12563 DPRINTF(sc, WM_DEBUG_LINK,
12564 ("%s: status after final read = 0x%x, STATUS_LU = %#"
12565 __PRIxBIT "\n",
12566 device_xname(sc->sc_dev), status, STATUS_LU));
12567 if (status & STATUS_LU) {
12568 /* Link is up. */
12569 DPRINTF(sc, WM_DEBUG_LINK,
12570 ("%s: LINK: set media -> link up %s\n",
12571 device_xname(sc->sc_dev),
12572 (status & STATUS_FD) ? "FDX" : "HDX"));
12573
12574 /*
12575 * NOTE: CTRL will update TFCE and RFCE automatically,
12576 * so we should update sc->sc_ctrl
12577 */
12578 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
12579 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12580 sc->sc_fcrtl &= ~FCRTL_XONE;
12581 if (status & STATUS_FD)
12582 sc->sc_tctl |=
12583 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12584 else
12585 sc->sc_tctl |=
12586 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12587 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
12588 sc->sc_fcrtl |= FCRTL_XONE;
12589 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12590 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12591 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12592 sc->sc_tbi_linkup = 1;
12593 } else {
12594 if (i == WM_LINKUP_TIMEOUT)
12595 wm_check_for_link(sc);
12596 /* Link is down. */
12597 DPRINTF(sc, WM_DEBUG_LINK,
12598 ("%s: LINK: set media -> link down\n",
12599 device_xname(sc->sc_dev)));
12600 sc->sc_tbi_linkup = 0;
12601 }
12602 } else {
12603 DPRINTF(sc, WM_DEBUG_LINK,
12604 ("%s: LINK: set media -> no signal\n",
12605 device_xname(sc->sc_dev)));
12606 sc->sc_tbi_linkup = 0;
12607 }
12608
12609 wm_tbi_serdes_set_linkled(sc);
12610
12611 return 0;
12612 }
12613
12614 /*
12615 * wm_tbi_mediastatus: [ifmedia interface function]
12616 *
12617 * Get the current interface media status on a 1000BASE-X device.
12618 */
12619 static void
12620 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12621 {
12622 struct wm_softc *sc = ifp->if_softc;
12623 uint32_t ctrl, status;
12624
12625 ifmr->ifm_status = IFM_AVALID;
12626 ifmr->ifm_active = IFM_ETHER;
12627
12628 status = CSR_READ(sc, WMREG_STATUS);
12629 if ((status & STATUS_LU) == 0) {
12630 ifmr->ifm_active |= IFM_NONE;
12631 return;
12632 }
12633
12634 ifmr->ifm_status |= IFM_ACTIVE;
12635 /* Only 82545 is LX */
12636 if (sc->sc_type == WM_T_82545)
12637 ifmr->ifm_active |= IFM_1000_LX;
12638 else
12639 ifmr->ifm_active |= IFM_1000_SX;
12640 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
12641 ifmr->ifm_active |= IFM_FDX;
12642 else
12643 ifmr->ifm_active |= IFM_HDX;
12644 ctrl = CSR_READ(sc, WMREG_CTRL);
12645 if (ctrl & CTRL_RFCE)
12646 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
12647 if (ctrl & CTRL_TFCE)
12648 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
12649 }
12650
12651 /* XXX TBI only */
12652 static int
12653 wm_check_for_link(struct wm_softc *sc)
12654 {
12655 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12656 uint32_t rxcw;
12657 uint32_t ctrl;
12658 uint32_t status;
12659 bool signal;
12660
12661 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
12662 device_xname(sc->sc_dev), __func__));
12663
12664 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12665 /* XXX need some work for >= 82571 */
12666 if (sc->sc_type >= WM_T_82571) {
12667 sc->sc_tbi_linkup = 1;
12668 return 0;
12669 }
12670 }
12671
12672 rxcw = CSR_READ(sc, WMREG_RXCW);
12673 ctrl = CSR_READ(sc, WMREG_CTRL);
12674 status = CSR_READ(sc, WMREG_STATUS);
12675 signal = wm_tbi_havesignal(sc, ctrl);
12676
12677 DPRINTF(sc, WM_DEBUG_LINK,
12678 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
12679 device_xname(sc->sc_dev), __func__, signal,
12680 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
12681
12682 /*
12683 * SWDPIN LU RXCW
12684 * 0 0 0
12685 * 0 0 1 (should not happen)
12686 * 0 1 0 (should not happen)
12687 * 0 1 1 (should not happen)
12688 * 1 0 0 Disable autonego and force linkup
12689 * 1 0 1 got /C/ but not linkup yet
12690 * 1 1 0 (linkup)
12691 * 1 1 1 If IFM_AUTO, back to autonego
12692 *
12693 */
12694 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
12695 DPRINTF(sc, WM_DEBUG_LINK,
12696 ("%s: %s: force linkup and fullduplex\n",
12697 device_xname(sc->sc_dev), __func__));
12698 sc->sc_tbi_linkup = 0;
12699 /* Disable auto-negotiation in the TXCW register */
12700 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
12701
12702 /*
12703 * Force link-up and also force full-duplex.
12704 *
12705 * NOTE: CTRL was updated TFCE and RFCE automatically,
12706 * so we should update sc->sc_ctrl
12707 */
12708 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
12709 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12710 } else if (((status & STATUS_LU) != 0)
12711 && ((rxcw & RXCW_C) != 0)
12712 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
12713 sc->sc_tbi_linkup = 1;
12714 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
12715 device_xname(sc->sc_dev), __func__));
12716 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12717 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
12718 } else if (signal && ((rxcw & RXCW_C) != 0)) {
12719 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
12720 device_xname(sc->sc_dev), __func__));
12721 } else {
12722 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
12723 device_xname(sc->sc_dev), __func__, rxcw, ctrl,
12724 status));
12725 }
12726
12727 return 0;
12728 }
12729
12730 /*
12731 * wm_tbi_tick:
12732 *
12733 * Check the link on TBI devices.
12734 * This function acts as mii_tick().
12735 */
12736 static void
12737 wm_tbi_tick(struct wm_softc *sc)
12738 {
12739 struct mii_data *mii = &sc->sc_mii;
12740 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12741 uint32_t status;
12742
12743 KASSERT(WM_CORE_LOCKED(sc));
12744
12745 status = CSR_READ(sc, WMREG_STATUS);
12746
12747 /* XXX is this needed? */
12748 (void)CSR_READ(sc, WMREG_RXCW);
12749 (void)CSR_READ(sc, WMREG_CTRL);
12750
12751 /* set link status */
12752 if ((status & STATUS_LU) == 0) {
12753 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
12754 device_xname(sc->sc_dev)));
12755 sc->sc_tbi_linkup = 0;
12756 } else if (sc->sc_tbi_linkup == 0) {
12757 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
12758 device_xname(sc->sc_dev),
12759 (status & STATUS_FD) ? "FDX" : "HDX"));
12760 sc->sc_tbi_linkup = 1;
12761 sc->sc_tbi_serdes_ticks = 0;
12762 }
12763
12764 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
12765 goto setled;
12766
12767 if ((status & STATUS_LU) == 0) {
12768 sc->sc_tbi_linkup = 0;
12769 /* If the timer expired, retry autonegotiation */
12770 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12771 && (++sc->sc_tbi_serdes_ticks
12772 >= sc->sc_tbi_serdes_anegticks)) {
12773 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12774 device_xname(sc->sc_dev), __func__));
12775 sc->sc_tbi_serdes_ticks = 0;
12776 /*
12777 * Reset the link, and let autonegotiation do
12778 * its thing
12779 */
12780 sc->sc_ctrl |= CTRL_LRST;
12781 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12782 CSR_WRITE_FLUSH(sc);
12783 delay(1000);
12784 sc->sc_ctrl &= ~CTRL_LRST;
12785 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12786 CSR_WRITE_FLUSH(sc);
12787 delay(1000);
12788 CSR_WRITE(sc, WMREG_TXCW,
12789 sc->sc_txcw & ~TXCW_ANE);
12790 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12791 }
12792 }
12793
12794 setled:
12795 wm_tbi_serdes_set_linkled(sc);
12796 }
12797
12798 /* SERDES related */
12799 static void
12800 wm_serdes_power_up_link_82575(struct wm_softc *sc)
12801 {
12802 uint32_t reg;
12803
12804 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12805 && ((sc->sc_flags & WM_F_SGMII) == 0))
12806 return;
12807
12808 /* Enable PCS to turn on link */
12809 reg = CSR_READ(sc, WMREG_PCS_CFG);
12810 reg |= PCS_CFG_PCS_EN;
12811 CSR_WRITE(sc, WMREG_PCS_CFG, reg);
12812
12813 /* Power up the laser */
12814 reg = CSR_READ(sc, WMREG_CTRL_EXT);
12815 reg &= ~CTRL_EXT_SWDPIN(3);
12816 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12817
12818 /* Flush the write to verify completion */
12819 CSR_WRITE_FLUSH(sc);
12820 delay(1000);
12821 }
12822
12823 static int
12824 wm_serdes_mediachange(struct ifnet *ifp)
12825 {
12826 struct wm_softc *sc = ifp->if_softc;
12827 bool pcs_autoneg = true; /* XXX */
12828 uint32_t ctrl_ext, pcs_lctl, reg;
12829
12830 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12831 && ((sc->sc_flags & WM_F_SGMII) == 0))
12832 return 0;
12833
12834 /* XXX Currently, this function is not called on 8257[12] */
12835 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12836 || (sc->sc_type >= WM_T_82575))
12837 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12838
12839 /* Power on the sfp cage if present */
12840 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12841 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12842 ctrl_ext |= CTRL_EXT_I2C_ENA;
12843 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12844
12845 sc->sc_ctrl |= CTRL_SLU;
12846
12847 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
12848 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
12849
12850 reg = CSR_READ(sc, WMREG_CONNSW);
12851 reg |= CONNSW_ENRGSRC;
12852 CSR_WRITE(sc, WMREG_CONNSW, reg);
12853 }
12854
12855 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
12856 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
12857 case CTRL_EXT_LINK_MODE_SGMII:
12858 /* SGMII mode lets the phy handle forcing speed/duplex */
12859 pcs_autoneg = true;
12860 /* Autoneg time out should be disabled for SGMII mode */
12861 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
12862 break;
12863 case CTRL_EXT_LINK_MODE_1000KX:
12864 pcs_autoneg = false;
12865 /* FALLTHROUGH */
12866 default:
12867 if ((sc->sc_type == WM_T_82575)
12868 || (sc->sc_type == WM_T_82576)) {
12869 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
12870 pcs_autoneg = false;
12871 }
12872 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
12873 | CTRL_FRCFDX;
12874
12875 /* Set speed of 1000/Full if speed/duplex is forced */
12876 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
12877 }
12878 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12879
12880 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
12881 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
12882
12883 if (pcs_autoneg) {
12884 /* Set PCS register for autoneg */
12885 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
12886
12887 /* Disable force flow control for autoneg */
12888 pcs_lctl &= ~PCS_LCTL_FORCE_FC;
12889
12890 /* Configure flow control advertisement for autoneg */
12891 reg = CSR_READ(sc, WMREG_PCS_ANADV);
12892 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
12893 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
12894 CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
12895 } else
12896 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
12897
12898 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
12899
12900 return 0;
12901 }
12902
12903 static void
12904 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12905 {
12906 struct wm_softc *sc = ifp->if_softc;
12907 struct mii_data *mii = &sc->sc_mii;
12908 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12909 uint32_t pcs_adv, pcs_lpab, reg;
12910
12911 ifmr->ifm_status = IFM_AVALID;
12912 ifmr->ifm_active = IFM_ETHER;
12913
12914 /* Check PCS */
12915 reg = CSR_READ(sc, WMREG_PCS_LSTS);
12916 if ((reg & PCS_LSTS_LINKOK) == 0) {
12917 ifmr->ifm_active |= IFM_NONE;
12918 sc->sc_tbi_linkup = 0;
12919 goto setled;
12920 }
12921
12922 sc->sc_tbi_linkup = 1;
12923 ifmr->ifm_status |= IFM_ACTIVE;
12924 if (sc->sc_type == WM_T_I354) {
12925 uint32_t status;
12926
12927 status = CSR_READ(sc, WMREG_STATUS);
12928 if (((status & STATUS_2P5_SKU) != 0)
12929 && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12930 ifmr->ifm_active |= IFM_2500_KX;
12931 } else
12932 ifmr->ifm_active |= IFM_1000_KX;
12933 } else {
12934 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
12935 case PCS_LSTS_SPEED_10:
12936 ifmr->ifm_active |= IFM_10_T; /* XXX */
12937 break;
12938 case PCS_LSTS_SPEED_100:
12939 ifmr->ifm_active |= IFM_100_FX; /* XXX */
12940 break;
12941 case PCS_LSTS_SPEED_1000:
12942 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12943 break;
12944 default:
12945 device_printf(sc->sc_dev, "Unknown speed\n");
12946 ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12947 break;
12948 }
12949 }
12950 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
12951 if ((reg & PCS_LSTS_FDX) != 0)
12952 ifmr->ifm_active |= IFM_FDX;
12953 else
12954 ifmr->ifm_active |= IFM_HDX;
12955 mii->mii_media_active &= ~IFM_ETH_FMASK;
12956 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
12957 /* Check flow */
12958 reg = CSR_READ(sc, WMREG_PCS_LSTS);
12959 if ((reg & PCS_LSTS_AN_COMP) == 0) {
12960 DPRINTF(sc, WM_DEBUG_LINK,
12961 ("XXX LINKOK but not ACOMP\n"));
12962 goto setled;
12963 }
12964 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
12965 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
12966 DPRINTF(sc, WM_DEBUG_LINK,
12967 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
12968 if ((pcs_adv & TXCW_SYM_PAUSE)
12969 && (pcs_lpab & TXCW_SYM_PAUSE)) {
12970 mii->mii_media_active |= IFM_FLOW
12971 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
12972 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
12973 && (pcs_adv & TXCW_ASYM_PAUSE)
12974 && (pcs_lpab & TXCW_SYM_PAUSE)
12975 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12976 mii->mii_media_active |= IFM_FLOW
12977 | IFM_ETH_TXPAUSE;
12978 } else if ((pcs_adv & TXCW_SYM_PAUSE)
12979 && (pcs_adv & TXCW_ASYM_PAUSE)
12980 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
12981 && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12982 mii->mii_media_active |= IFM_FLOW
12983 | IFM_ETH_RXPAUSE;
12984 }
12985 }
12986 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12987 | (mii->mii_media_active & IFM_ETH_FMASK);
12988 setled:
12989 wm_tbi_serdes_set_linkled(sc);
12990 }
12991
12992 /*
12993 * wm_serdes_tick:
12994 *
12995 * Check the link on serdes devices.
12996 */
12997 static void
12998 wm_serdes_tick(struct wm_softc *sc)
12999 {
13000 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13001 struct mii_data *mii = &sc->sc_mii;
13002 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13003 uint32_t reg;
13004
13005 KASSERT(WM_CORE_LOCKED(sc));
13006
13007 mii->mii_media_status = IFM_AVALID;
13008 mii->mii_media_active = IFM_ETHER;
13009
13010 /* Check PCS */
13011 reg = CSR_READ(sc, WMREG_PCS_LSTS);
13012 if ((reg & PCS_LSTS_LINKOK) != 0) {
13013 mii->mii_media_status |= IFM_ACTIVE;
13014 sc->sc_tbi_linkup = 1;
13015 sc->sc_tbi_serdes_ticks = 0;
13016 mii->mii_media_active |= IFM_1000_SX; /* XXX */
13017 if ((reg & PCS_LSTS_FDX) != 0)
13018 mii->mii_media_active |= IFM_FDX;
13019 else
13020 mii->mii_media_active |= IFM_HDX;
13021 } else {
13022 mii->mii_media_status |= IFM_NONE;
13023 sc->sc_tbi_linkup = 0;
13024 /* If the timer expired, retry autonegotiation */
13025 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13026 && (++sc->sc_tbi_serdes_ticks
13027 >= sc->sc_tbi_serdes_anegticks)) {
13028 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13029 device_xname(sc->sc_dev), __func__));
13030 sc->sc_tbi_serdes_ticks = 0;
13031 /* XXX */
13032 wm_serdes_mediachange(ifp);
13033 }
13034 }
13035
13036 wm_tbi_serdes_set_linkled(sc);
13037 }
13038
13039 /* SFP related */
13040
13041 static int
13042 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13043 {
13044 uint32_t i2ccmd;
13045 int i;
13046
13047 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13048 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13049
13050 /* Poll the ready bit */
13051 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13052 delay(50);
13053 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13054 if (i2ccmd & I2CCMD_READY)
13055 break;
13056 }
13057 if ((i2ccmd & I2CCMD_READY) == 0)
13058 return -1;
13059 if ((i2ccmd & I2CCMD_ERROR) != 0)
13060 return -1;
13061
13062 *data = i2ccmd & 0x00ff;
13063
13064 return 0;
13065 }
13066
13067 static uint32_t
13068 wm_sfp_get_media_type(struct wm_softc *sc)
13069 {
13070 uint32_t ctrl_ext;
13071 uint8_t val = 0;
13072 int timeout = 3;
13073 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13074 int rv = -1;
13075
13076 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13077 ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13078 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13079 CSR_WRITE_FLUSH(sc);
13080
13081 /* Read SFP module data */
13082 while (timeout) {
13083 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13084 if (rv == 0)
13085 break;
13086 delay(100*1000); /* XXX too big */
13087 timeout--;
13088 }
13089 if (rv != 0)
13090 goto out;
13091
13092 switch (val) {
13093 case SFF_SFP_ID_SFF:
13094 aprint_normal_dev(sc->sc_dev,
13095 "Module/Connector soldered to board\n");
13096 break;
13097 case SFF_SFP_ID_SFP:
13098 sc->sc_flags |= WM_F_SFP;
13099 break;
13100 case SFF_SFP_ID_UNKNOWN:
13101 goto out;
13102 default:
13103 break;
13104 }
13105
13106 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13107 if (rv != 0)
13108 goto out;
13109
13110 sc->sc_sfptype = val;
13111 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13112 mediatype = WM_MEDIATYPE_SERDES;
13113 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13114 sc->sc_flags |= WM_F_SGMII;
13115 mediatype = WM_MEDIATYPE_COPPER;
13116 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13117 sc->sc_flags |= WM_F_SGMII;
13118 mediatype = WM_MEDIATYPE_SERDES;
13119 } else {
13120 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13121 __func__, sc->sc_sfptype);
13122 sc->sc_sfptype = 0; /* XXX unknown */
13123 }
13124
13125 out:
13126 /* Restore I2C interface setting */
13127 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13128
13129 return mediatype;
13130 }
13131
13132 /*
13133 * NVM related.
13134 * Microwire, SPI (w/wo EERD) and Flash.
13135 */
13136
13137 /* Both spi and uwire */
13138
13139 /*
13140 * wm_eeprom_sendbits:
13141 *
13142 * Send a series of bits to the EEPROM.
13143 */
13144 static void
13145 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13146 {
13147 uint32_t reg;
13148 int x;
13149
13150 reg = CSR_READ(sc, WMREG_EECD);
13151
13152 for (x = nbits; x > 0; x--) {
13153 if (bits & (1U << (x - 1)))
13154 reg |= EECD_DI;
13155 else
13156 reg &= ~EECD_DI;
13157 CSR_WRITE(sc, WMREG_EECD, reg);
13158 CSR_WRITE_FLUSH(sc);
13159 delay(2);
13160 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13161 CSR_WRITE_FLUSH(sc);
13162 delay(2);
13163 CSR_WRITE(sc, WMREG_EECD, reg);
13164 CSR_WRITE_FLUSH(sc);
13165 delay(2);
13166 }
13167 }
13168
13169 /*
13170 * wm_eeprom_recvbits:
13171 *
13172 * Receive a series of bits from the EEPROM.
13173 */
13174 static void
13175 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13176 {
13177 uint32_t reg, val;
13178 int x;
13179
13180 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13181
13182 val = 0;
13183 for (x = nbits; x > 0; x--) {
13184 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13185 CSR_WRITE_FLUSH(sc);
13186 delay(2);
13187 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13188 val |= (1U << (x - 1));
13189 CSR_WRITE(sc, WMREG_EECD, reg);
13190 CSR_WRITE_FLUSH(sc);
13191 delay(2);
13192 }
13193 *valp = val;
13194 }
13195
13196 /* Microwire */
13197
13198 /*
13199 * wm_nvm_read_uwire:
13200 *
13201 * Read a word from the EEPROM using the MicroWire protocol.
13202 */
13203 static int
13204 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13205 {
13206 uint32_t reg, val;
13207 int i;
13208
13209 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13210 device_xname(sc->sc_dev), __func__));
13211
13212 if (sc->nvm.acquire(sc) != 0)
13213 return -1;
13214
13215 for (i = 0; i < wordcnt; i++) {
13216 /* Clear SK and DI. */
13217 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13218 CSR_WRITE(sc, WMREG_EECD, reg);
13219
13220 /*
13221 * XXX: workaround for a bug in qemu-0.12.x and prior
13222 * and Xen.
13223 *
13224 * We use this workaround only for 82540 because qemu's
13225 * e1000 act as 82540.
13226 */
13227 if (sc->sc_type == WM_T_82540) {
13228 reg |= EECD_SK;
13229 CSR_WRITE(sc, WMREG_EECD, reg);
13230 reg &= ~EECD_SK;
13231 CSR_WRITE(sc, WMREG_EECD, reg);
13232 CSR_WRITE_FLUSH(sc);
13233 delay(2);
13234 }
13235 /* XXX: end of workaround */
13236
13237 /* Set CHIP SELECT. */
13238 reg |= EECD_CS;
13239 CSR_WRITE(sc, WMREG_EECD, reg);
13240 CSR_WRITE_FLUSH(sc);
13241 delay(2);
13242
13243 /* Shift in the READ command. */
13244 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13245
13246 /* Shift in address. */
13247 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13248
13249 /* Shift out the data. */
13250 wm_eeprom_recvbits(sc, &val, 16);
13251 data[i] = val & 0xffff;
13252
13253 /* Clear CHIP SELECT. */
13254 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13255 CSR_WRITE(sc, WMREG_EECD, reg);
13256 CSR_WRITE_FLUSH(sc);
13257 delay(2);
13258 }
13259
13260 sc->nvm.release(sc);
13261 return 0;
13262 }
13263
13264 /* SPI */
13265
13266 /*
13267 * Set SPI and FLASH related information from the EECD register.
13268 * For 82541 and 82547, the word size is taken from EEPROM.
13269 */
13270 static int
13271 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13272 {
13273 int size;
13274 uint32_t reg;
13275 uint16_t data;
13276
13277 reg = CSR_READ(sc, WMREG_EECD);
13278 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13279
13280 /* Read the size of NVM from EECD by default */
13281 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13282 switch (sc->sc_type) {
13283 case WM_T_82541:
13284 case WM_T_82541_2:
13285 case WM_T_82547:
13286 case WM_T_82547_2:
13287 /* Set dummy value to access EEPROM */
13288 sc->sc_nvm_wordsize = 64;
13289 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13290 aprint_error_dev(sc->sc_dev,
13291 "%s: failed to read EEPROM size\n", __func__);
13292 }
13293 reg = data;
13294 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13295 if (size == 0)
13296 size = 6; /* 64 word size */
13297 else
13298 size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13299 break;
13300 case WM_T_80003:
13301 case WM_T_82571:
13302 case WM_T_82572:
13303 case WM_T_82573: /* SPI case */
13304 case WM_T_82574: /* SPI case */
13305 case WM_T_82583: /* SPI case */
13306 size += NVM_WORD_SIZE_BASE_SHIFT;
13307 if (size > 14)
13308 size = 14;
13309 break;
13310 case WM_T_82575:
13311 case WM_T_82576:
13312 case WM_T_82580:
13313 case WM_T_I350:
13314 case WM_T_I354:
13315 case WM_T_I210:
13316 case WM_T_I211:
13317 size += NVM_WORD_SIZE_BASE_SHIFT;
13318 if (size > 15)
13319 size = 15;
13320 break;
13321 default:
13322 aprint_error_dev(sc->sc_dev,
13323 "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13324 return -1;
13325 break;
13326 }
13327
13328 sc->sc_nvm_wordsize = 1 << size;
13329
13330 return 0;
13331 }
13332
13333 /*
13334 * wm_nvm_ready_spi:
13335 *
13336 * Wait for a SPI EEPROM to be ready for commands.
13337 */
13338 static int
13339 wm_nvm_ready_spi(struct wm_softc *sc)
13340 {
13341 uint32_t val;
13342 int usec;
13343
13344 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13345 device_xname(sc->sc_dev), __func__));
13346
13347 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13348 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13349 wm_eeprom_recvbits(sc, &val, 8);
13350 if ((val & SPI_SR_RDY) == 0)
13351 break;
13352 }
13353 if (usec >= SPI_MAX_RETRIES) {
13354 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13355 return -1;
13356 }
13357 return 0;
13358 }
13359
13360 /*
13361 * wm_nvm_read_spi:
13362 *
13363 * Read a work from the EEPROM using the SPI protocol.
13364 */
13365 static int
13366 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13367 {
13368 uint32_t reg, val;
13369 int i;
13370 uint8_t opc;
13371 int rv = 0;
13372
13373 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13374 device_xname(sc->sc_dev), __func__));
13375
13376 if (sc->nvm.acquire(sc) != 0)
13377 return -1;
13378
13379 /* Clear SK and CS. */
13380 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13381 CSR_WRITE(sc, WMREG_EECD, reg);
13382 CSR_WRITE_FLUSH(sc);
13383 delay(2);
13384
13385 if ((rv = wm_nvm_ready_spi(sc)) != 0)
13386 goto out;
13387
13388 /* Toggle CS to flush commands. */
13389 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13390 CSR_WRITE_FLUSH(sc);
13391 delay(2);
13392 CSR_WRITE(sc, WMREG_EECD, reg);
13393 CSR_WRITE_FLUSH(sc);
13394 delay(2);
13395
13396 opc = SPI_OPC_READ;
13397 if (sc->sc_nvm_addrbits == 8 && word >= 128)
13398 opc |= SPI_OPC_A8;
13399
13400 wm_eeprom_sendbits(sc, opc, 8);
13401 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13402
13403 for (i = 0; i < wordcnt; i++) {
13404 wm_eeprom_recvbits(sc, &val, 16);
13405 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13406 }
13407
13408 /* Raise CS and clear SK. */
13409 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13410 CSR_WRITE(sc, WMREG_EECD, reg);
13411 CSR_WRITE_FLUSH(sc);
13412 delay(2);
13413
13414 out:
13415 sc->nvm.release(sc);
13416 return rv;
13417 }
13418
13419 /* Using with EERD */
13420
13421 static int
13422 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13423 {
13424 uint32_t attempts = 100000;
13425 uint32_t i, reg = 0;
13426 int32_t done = -1;
13427
13428 for (i = 0; i < attempts; i++) {
13429 reg = CSR_READ(sc, rw);
13430
13431 if (reg & EERD_DONE) {
13432 done = 0;
13433 break;
13434 }
13435 delay(5);
13436 }
13437
13438 return done;
13439 }
13440
13441 static int
13442 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13443 {
13444 int i, eerd = 0;
13445 int rv = 0;
13446
13447 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13448 device_xname(sc->sc_dev), __func__));
13449
13450 if (sc->nvm.acquire(sc) != 0)
13451 return -1;
13452
13453 for (i = 0; i < wordcnt; i++) {
13454 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13455 CSR_WRITE(sc, WMREG_EERD, eerd);
13456 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13457 if (rv != 0) {
13458 aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13459 "offset=%d. wordcnt=%d\n", offset, wordcnt);
13460 break;
13461 }
13462 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13463 }
13464
13465 sc->nvm.release(sc);
13466 return rv;
13467 }
13468
13469 /* Flash */
13470
13471 static int
13472 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13473 {
13474 uint32_t eecd;
13475 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13476 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13477 uint32_t nvm_dword = 0;
13478 uint8_t sig_byte = 0;
13479 int rv;
13480
13481 switch (sc->sc_type) {
13482 case WM_T_PCH_SPT:
13483 case WM_T_PCH_CNP:
13484 bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13485 act_offset = ICH_NVM_SIG_WORD * 2;
13486
13487 /* Set bank to 0 in case flash read fails. */
13488 *bank = 0;
13489
13490 /* Check bank 0 */
13491 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13492 if (rv != 0)
13493 return rv;
13494 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13495 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13496 *bank = 0;
13497 return 0;
13498 }
13499
13500 /* Check bank 1 */
13501 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13502 &nvm_dword);
13503 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13504 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13505 *bank = 1;
13506 return 0;
13507 }
13508 aprint_error_dev(sc->sc_dev,
13509 "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13510 return -1;
13511 case WM_T_ICH8:
13512 case WM_T_ICH9:
13513 eecd = CSR_READ(sc, WMREG_EECD);
13514 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13515 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13516 return 0;
13517 }
13518 /* FALLTHROUGH */
13519 default:
13520 /* Default to 0 */
13521 *bank = 0;
13522
13523 /* Check bank 0 */
13524 wm_read_ich8_byte(sc, act_offset, &sig_byte);
13525 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13526 *bank = 0;
13527 return 0;
13528 }
13529
13530 /* Check bank 1 */
13531 wm_read_ich8_byte(sc, act_offset + bank1_offset,
13532 &sig_byte);
13533 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13534 *bank = 1;
13535 return 0;
13536 }
13537 }
13538
13539 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13540 device_xname(sc->sc_dev)));
13541 return -1;
13542 }
13543
13544 /******************************************************************************
13545 * This function does initial flash setup so that a new read/write/erase cycle
13546 * can be started.
13547 *
13548 * sc - The pointer to the hw structure
13549 ****************************************************************************/
13550 static int32_t
13551 wm_ich8_cycle_init(struct wm_softc *sc)
13552 {
13553 uint16_t hsfsts;
13554 int32_t error = 1;
13555 int32_t i = 0;
13556
13557 if (sc->sc_type >= WM_T_PCH_SPT)
13558 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13559 else
13560 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13561
13562 /* May be check the Flash Des Valid bit in Hw status */
13563 if ((hsfsts & HSFSTS_FLDVAL) == 0)
13564 return error;
13565
13566 /* Clear FCERR in Hw status by writing 1 */
13567 /* Clear DAEL in Hw status by writing a 1 */
13568 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
13569
13570 if (sc->sc_type >= WM_T_PCH_SPT)
13571 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
13572 else
13573 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13574
13575 /*
13576 * Either we should have a hardware SPI cycle in progress bit to check
13577 * against, in order to start a new cycle or FDONE bit should be
13578 * changed in the hardware so that it is 1 after hardware reset, which
13579 * can then be used as an indication whether a cycle is in progress or
13580 * has been completed .. we should also have some software semaphore
13581 * mechanism to guard FDONE or the cycle in progress bit so that two
13582 * threads access to those bits can be sequentiallized or a way so that
13583 * 2 threads don't start the cycle at the same time
13584 */
13585
13586 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13587 /*
13588 * There is no cycle running at present, so we can start a
13589 * cycle
13590 */
13591
13592 /* Begin by setting Flash Cycle Done. */
13593 hsfsts |= HSFSTS_DONE;
13594 if (sc->sc_type >= WM_T_PCH_SPT)
13595 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13596 hsfsts & 0xffffUL);
13597 else
13598 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13599 error = 0;
13600 } else {
13601 /*
13602 * Otherwise poll for sometime so the current cycle has a
13603 * chance to end before giving up.
13604 */
13605 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
13606 if (sc->sc_type >= WM_T_PCH_SPT)
13607 hsfsts = ICH8_FLASH_READ32(sc,
13608 ICH_FLASH_HSFSTS) & 0xffffUL;
13609 else
13610 hsfsts = ICH8_FLASH_READ16(sc,
13611 ICH_FLASH_HSFSTS);
13612 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13613 error = 0;
13614 break;
13615 }
13616 delay(1);
13617 }
13618 if (error == 0) {
13619 /*
13620 * Successful in waiting for previous cycle to timeout,
13621 * now set the Flash Cycle Done.
13622 */
13623 hsfsts |= HSFSTS_DONE;
13624 if (sc->sc_type >= WM_T_PCH_SPT)
13625 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13626 hsfsts & 0xffffUL);
13627 else
13628 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
13629 hsfsts);
13630 }
13631 }
13632 return error;
13633 }
13634
13635 /******************************************************************************
13636 * This function starts a flash cycle and waits for its completion
13637 *
13638 * sc - The pointer to the hw structure
13639 ****************************************************************************/
13640 static int32_t
13641 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
13642 {
13643 uint16_t hsflctl;
13644 uint16_t hsfsts;
13645 int32_t error = 1;
13646 uint32_t i = 0;
13647
13648 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
13649 if (sc->sc_type >= WM_T_PCH_SPT)
13650 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
13651 else
13652 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13653 hsflctl |= HSFCTL_GO;
13654 if (sc->sc_type >= WM_T_PCH_SPT)
13655 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13656 (uint32_t)hsflctl << 16);
13657 else
13658 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13659
13660 /* Wait till FDONE bit is set to 1 */
13661 do {
13662 if (sc->sc_type >= WM_T_PCH_SPT)
13663 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13664 & 0xffffUL;
13665 else
13666 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13667 if (hsfsts & HSFSTS_DONE)
13668 break;
13669 delay(1);
13670 i++;
13671 } while (i < timeout);
13672 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
13673 error = 0;
13674
13675 return error;
13676 }
13677
13678 /******************************************************************************
13679 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
13680 *
13681 * sc - The pointer to the hw structure
13682 * index - The index of the byte or word to read.
13683 * size - Size of data to read, 1=byte 2=word, 4=dword
13684 * data - Pointer to the word to store the value read.
13685 *****************************************************************************/
13686 static int32_t
13687 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
13688 uint32_t size, uint32_t *data)
13689 {
13690 uint16_t hsfsts;
13691 uint16_t hsflctl;
13692 uint32_t flash_linear_address;
13693 uint32_t flash_data = 0;
13694 int32_t error = 1;
13695 int32_t count = 0;
13696
13697 if (size < 1 || size > 4 || data == 0x0 ||
13698 index > ICH_FLASH_LINEAR_ADDR_MASK)
13699 return error;
13700
13701 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
13702 sc->sc_ich8_flash_base;
13703
13704 do {
13705 delay(1);
13706 /* Steps */
13707 error = wm_ich8_cycle_init(sc);
13708 if (error)
13709 break;
13710
13711 if (sc->sc_type >= WM_T_PCH_SPT)
13712 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13713 >> 16;
13714 else
13715 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13716 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
13717 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
13718 & HSFCTL_BCOUNT_MASK;
13719 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
13720 if (sc->sc_type >= WM_T_PCH_SPT) {
13721 /*
13722 * In SPT, This register is in Lan memory space, not
13723 * flash. Therefore, only 32 bit access is supported.
13724 */
13725 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13726 (uint32_t)hsflctl << 16);
13727 } else
13728 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13729
13730 /*
13731 * Write the last 24 bits of index into Flash Linear address
13732 * field in Flash Address
13733 */
13734 /* TODO: TBD maybe check the index against the size of flash */
13735
13736 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
13737
13738 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
13739
13740 /*
13741 * Check if FCERR is set to 1, if set to 1, clear it and try
13742 * the whole sequence a few more times, else read in (shift in)
13743 * the Flash Data0, the order is least significant byte first
13744 * msb to lsb
13745 */
13746 if (error == 0) {
13747 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
13748 if (size == 1)
13749 *data = (uint8_t)(flash_data & 0x000000FF);
13750 else if (size == 2)
13751 *data = (uint16_t)(flash_data & 0x0000FFFF);
13752 else if (size == 4)
13753 *data = (uint32_t)flash_data;
13754 break;
13755 } else {
13756 /*
13757 * If we've gotten here, then things are probably
13758 * completely hosed, but if the error condition is
13759 * detected, it won't hurt to give it another try...
13760 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
13761 */
13762 if (sc->sc_type >= WM_T_PCH_SPT)
13763 hsfsts = ICH8_FLASH_READ32(sc,
13764 ICH_FLASH_HSFSTS) & 0xffffUL;
13765 else
13766 hsfsts = ICH8_FLASH_READ16(sc,
13767 ICH_FLASH_HSFSTS);
13768
13769 if (hsfsts & HSFSTS_ERR) {
13770 /* Repeat for some time before giving up. */
13771 continue;
13772 } else if ((hsfsts & HSFSTS_DONE) == 0)
13773 break;
13774 }
13775 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
13776
13777 return error;
13778 }
13779
13780 /******************************************************************************
13781 * Reads a single byte from the NVM using the ICH8 flash access registers.
13782 *
13783 * sc - pointer to wm_hw structure
13784 * index - The index of the byte to read.
13785 * data - Pointer to a byte to store the value read.
13786 *****************************************************************************/
13787 static int32_t
13788 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
13789 {
13790 int32_t status;
13791 uint32_t word = 0;
13792
13793 status = wm_read_ich8_data(sc, index, 1, &word);
13794 if (status == 0)
13795 *data = (uint8_t)word;
13796 else
13797 *data = 0;
13798
13799 return status;
13800 }
13801
13802 /******************************************************************************
13803 * Reads a word from the NVM using the ICH8 flash access registers.
13804 *
13805 * sc - pointer to wm_hw structure
13806 * index - The starting byte index of the word to read.
13807 * data - Pointer to a word to store the value read.
13808 *****************************************************************************/
13809 static int32_t
13810 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
13811 {
13812 int32_t status;
13813 uint32_t word = 0;
13814
13815 status = wm_read_ich8_data(sc, index, 2, &word);
13816 if (status == 0)
13817 *data = (uint16_t)word;
13818 else
13819 *data = 0;
13820
13821 return status;
13822 }
13823
13824 /******************************************************************************
13825 * Reads a dword from the NVM using the ICH8 flash access registers.
13826 *
13827 * sc - pointer to wm_hw structure
13828 * index - The starting byte index of the word to read.
13829 * data - Pointer to a word to store the value read.
13830 *****************************************************************************/
13831 static int32_t
13832 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
13833 {
13834 int32_t status;
13835
13836 status = wm_read_ich8_data(sc, index, 4, data);
13837 return status;
13838 }
13839
13840 /******************************************************************************
13841 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
13842 * register.
13843 *
13844 * sc - Struct containing variables accessed by shared code
13845 * offset - offset of word in the EEPROM to read
13846 * data - word read from the EEPROM
13847 * words - number of words to read
13848 *****************************************************************************/
13849 static int
13850 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
13851 {
13852 int32_t rv = 0;
13853 uint32_t flash_bank = 0;
13854 uint32_t act_offset = 0;
13855 uint32_t bank_offset = 0;
13856 uint16_t word = 0;
13857 uint16_t i = 0;
13858
13859 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13860 device_xname(sc->sc_dev), __func__));
13861
13862 if (sc->nvm.acquire(sc) != 0)
13863 return -1;
13864
13865 /*
13866 * We need to know which is the valid flash bank. In the event
13867 * that we didn't allocate eeprom_shadow_ram, we may not be
13868 * managing flash_bank. So it cannot be trusted and needs
13869 * to be updated with each read.
13870 */
13871 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13872 if (rv) {
13873 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13874 device_xname(sc->sc_dev)));
13875 flash_bank = 0;
13876 }
13877
13878 /*
13879 * Adjust offset appropriately if we're on bank 1 - adjust for word
13880 * size
13881 */
13882 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13883
13884 for (i = 0; i < words; i++) {
13885 /* The NVM part needs a byte offset, hence * 2 */
13886 act_offset = bank_offset + ((offset + i) * 2);
13887 rv = wm_read_ich8_word(sc, act_offset, &word);
13888 if (rv) {
13889 aprint_error_dev(sc->sc_dev,
13890 "%s: failed to read NVM\n", __func__);
13891 break;
13892 }
13893 data[i] = word;
13894 }
13895
13896 sc->nvm.release(sc);
13897 return rv;
13898 }
13899
13900 /******************************************************************************
13901 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
13902 * register.
13903 *
13904 * sc - Struct containing variables accessed by shared code
13905 * offset - offset of word in the EEPROM to read
13906 * data - word read from the EEPROM
13907 * words - number of words to read
13908 *****************************************************************************/
13909 static int
13910 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
13911 {
13912 int32_t rv = 0;
13913 uint32_t flash_bank = 0;
13914 uint32_t act_offset = 0;
13915 uint32_t bank_offset = 0;
13916 uint32_t dword = 0;
13917 uint16_t i = 0;
13918
13919 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13920 device_xname(sc->sc_dev), __func__));
13921
13922 if (sc->nvm.acquire(sc) != 0)
13923 return -1;
13924
13925 /*
13926 * We need to know which is the valid flash bank. In the event
13927 * that we didn't allocate eeprom_shadow_ram, we may not be
13928 * managing flash_bank. So it cannot be trusted and needs
13929 * to be updated with each read.
13930 */
13931 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13932 if (rv) {
13933 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13934 device_xname(sc->sc_dev)));
13935 flash_bank = 0;
13936 }
13937
13938 /*
13939 * Adjust offset appropriately if we're on bank 1 - adjust for word
13940 * size
13941 */
13942 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13943
13944 for (i = 0; i < words; i++) {
13945 /* The NVM part needs a byte offset, hence * 2 */
13946 act_offset = bank_offset + ((offset + i) * 2);
13947 /* but we must read dword aligned, so mask ... */
13948 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
13949 if (rv) {
13950 aprint_error_dev(sc->sc_dev,
13951 "%s: failed to read NVM\n", __func__);
13952 break;
13953 }
13954 /* ... and pick out low or high word */
13955 if ((act_offset & 0x2) == 0)
13956 data[i] = (uint16_t)(dword & 0xFFFF);
13957 else
13958 data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
13959 }
13960
13961 sc->nvm.release(sc);
13962 return rv;
13963 }
13964
13965 /* iNVM */
13966
13967 static int
13968 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
13969 {
13970 int32_t rv = 0;
13971 uint32_t invm_dword;
13972 uint16_t i;
13973 uint8_t record_type, word_address;
13974
13975 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13976 device_xname(sc->sc_dev), __func__));
13977
13978 for (i = 0; i < INVM_SIZE; i++) {
13979 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
13980 /* Get record type */
13981 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
13982 if (record_type == INVM_UNINITIALIZED_STRUCTURE)
13983 break;
13984 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
13985 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
13986 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
13987 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
13988 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
13989 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
13990 if (word_address == address) {
13991 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
13992 rv = 0;
13993 break;
13994 }
13995 }
13996 }
13997
13998 return rv;
13999 }
14000
14001 static int
14002 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14003 {
14004 int rv = 0;
14005 int i;
14006
14007 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14008 device_xname(sc->sc_dev), __func__));
14009
14010 if (sc->nvm.acquire(sc) != 0)
14011 return -1;
14012
14013 for (i = 0; i < words; i++) {
14014 switch (offset + i) {
14015 case NVM_OFF_MACADDR:
14016 case NVM_OFF_MACADDR1:
14017 case NVM_OFF_MACADDR2:
14018 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14019 if (rv != 0) {
14020 data[i] = 0xffff;
14021 rv = -1;
14022 }
14023 break;
14024 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14025 rv = wm_nvm_read_word_invm(sc, offset, data);
14026 if (rv != 0) {
14027 *data = INVM_DEFAULT_AL;
14028 rv = 0;
14029 }
14030 break;
14031 case NVM_OFF_CFG2:
14032 rv = wm_nvm_read_word_invm(sc, offset, data);
14033 if (rv != 0) {
14034 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
14035 rv = 0;
14036 }
14037 break;
14038 case NVM_OFF_CFG4:
14039 rv = wm_nvm_read_word_invm(sc, offset, data);
14040 if (rv != 0) {
14041 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
14042 rv = 0;
14043 }
14044 break;
14045 case NVM_OFF_LED_1_CFG:
14046 rv = wm_nvm_read_word_invm(sc, offset, data);
14047 if (rv != 0) {
14048 *data = NVM_LED_1_CFG_DEFAULT_I211;
14049 rv = 0;
14050 }
14051 break;
14052 case NVM_OFF_LED_0_2_CFG:
14053 rv = wm_nvm_read_word_invm(sc, offset, data);
14054 if (rv != 0) {
14055 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
14056 rv = 0;
14057 }
14058 break;
14059 case NVM_OFF_ID_LED_SETTINGS:
14060 rv = wm_nvm_read_word_invm(sc, offset, data);
14061 if (rv != 0) {
14062 *data = ID_LED_RESERVED_FFFF;
14063 rv = 0;
14064 }
14065 break;
14066 default:
14067 DPRINTF(sc, WM_DEBUG_NVM,
14068 ("NVM word 0x%02x is not mapped.\n", offset));
14069 *data = NVM_RESERVED_WORD;
14070 break;
14071 }
14072 }
14073
14074 sc->nvm.release(sc);
14075 return rv;
14076 }
14077
14078 /* Lock, detecting NVM type, validate checksum, version and read */
14079
14080 static int
14081 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14082 {
14083 uint32_t eecd = 0;
14084
14085 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14086 || sc->sc_type == WM_T_82583) {
14087 eecd = CSR_READ(sc, WMREG_EECD);
14088
14089 /* Isolate bits 15 & 16 */
14090 eecd = ((eecd >> 15) & 0x03);
14091
14092 /* If both bits are set, device is Flash type */
14093 if (eecd == 0x03)
14094 return 0;
14095 }
14096 return 1;
14097 }
14098
14099 static int
14100 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14101 {
14102 uint32_t eec;
14103
14104 eec = CSR_READ(sc, WMREG_EEC);
14105 if ((eec & EEC_FLASH_DETECTED) != 0)
14106 return 1;
14107
14108 return 0;
14109 }
14110
14111 /*
14112 * wm_nvm_validate_checksum
14113 *
14114 * The checksum is defined as the sum of the first 64 (16 bit) words.
14115 */
14116 static int
14117 wm_nvm_validate_checksum(struct wm_softc *sc)
14118 {
14119 uint16_t checksum;
14120 uint16_t eeprom_data;
14121 #ifdef WM_DEBUG
14122 uint16_t csum_wordaddr, valid_checksum;
14123 #endif
14124 int i;
14125
14126 checksum = 0;
14127
14128 /* Don't check for I211 */
14129 if (sc->sc_type == WM_T_I211)
14130 return 0;
14131
14132 #ifdef WM_DEBUG
14133 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14134 || (sc->sc_type == WM_T_PCH_CNP)) {
14135 csum_wordaddr = NVM_OFF_COMPAT;
14136 valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14137 } else {
14138 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14139 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14140 }
14141
14142 /* Dump EEPROM image for debug */
14143 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14144 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14145 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14146 /* XXX PCH_SPT? */
14147 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14148 if ((eeprom_data & valid_checksum) == 0)
14149 DPRINTF(sc, WM_DEBUG_NVM,
14150 ("%s: NVM need to be updated (%04x != %04x)\n",
14151 device_xname(sc->sc_dev), eeprom_data,
14152 valid_checksum));
14153 }
14154
14155 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14156 printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14157 for (i = 0; i < NVM_SIZE; i++) {
14158 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14159 printf("XXXX ");
14160 else
14161 printf("%04hx ", eeprom_data);
14162 if (i % 8 == 7)
14163 printf("\n");
14164 }
14165 }
14166
14167 #endif /* WM_DEBUG */
14168
14169 for (i = 0; i < NVM_SIZE; i++) {
14170 if (wm_nvm_read(sc, i, 1, &eeprom_data))
14171 return 1;
14172 checksum += eeprom_data;
14173 }
14174
14175 if (checksum != (uint16_t) NVM_CHECKSUM) {
14176 #ifdef WM_DEBUG
14177 printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14178 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14179 #endif
14180 }
14181
14182 return 0;
14183 }
14184
14185 static void
14186 wm_nvm_version_invm(struct wm_softc *sc)
14187 {
14188 uint32_t dword;
14189
14190 /*
14191 * Linux's code to decode version is very strange, so we don't
14192 * obey that algorithm and just use word 61 as the document.
14193 * Perhaps it's not perfect though...
14194 *
14195 * Example:
14196 *
14197 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14198 */
14199 dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14200 dword = __SHIFTOUT(dword, INVM_VER_1);
14201 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14202 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14203 }
14204
14205 static void
14206 wm_nvm_version(struct wm_softc *sc)
14207 {
14208 uint16_t major, minor, build, patch;
14209 uint16_t uid0, uid1;
14210 uint16_t nvm_data;
14211 uint16_t off;
14212 bool check_version = false;
14213 bool check_optionrom = false;
14214 bool have_build = false;
14215 bool have_uid = true;
14216
14217 /*
14218 * Version format:
14219 *
14220 * XYYZ
14221 * X0YZ
14222 * X0YY
14223 *
14224 * Example:
14225 *
14226 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10)
14227 * 82571 0x50a6 5.10.6?
14228 * 82572 0x506a 5.6.10?
14229 * 82572EI 0x5069 5.6.9?
14230 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4)
14231 * 0x2013 2.1.3?
14232 * 82583 0x10a0 1.10.0? (document says it's default value)
14233 * ICH8+82567 0x0040 0.4.0?
14234 * ICH9+82566 0x1040 1.4.0?
14235 *ICH10+82567 0x0043 0.4.3?
14236 * PCH+82577 0x00c1 0.12.1?
14237 * PCH2+82579 0x00d3 0.13.3?
14238 * 0x00d4 0.13.4?
14239 * LPT+I218 0x0023 0.2.3?
14240 * SPT+I219 0x0084 0.8.4?
14241 * CNP+I219 0x0054 0.5.4?
14242 */
14243
14244 /*
14245 * XXX
14246 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14247 * I've never seen real 82574 hardware with such small SPI ROM.
14248 */
14249 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14250 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14251 have_uid = false;
14252
14253 switch (sc->sc_type) {
14254 case WM_T_82571:
14255 case WM_T_82572:
14256 case WM_T_82574:
14257 case WM_T_82583:
14258 check_version = true;
14259 check_optionrom = true;
14260 have_build = true;
14261 break;
14262 case WM_T_ICH8:
14263 case WM_T_ICH9:
14264 case WM_T_ICH10:
14265 case WM_T_PCH:
14266 case WM_T_PCH2:
14267 case WM_T_PCH_LPT:
14268 case WM_T_PCH_SPT:
14269 case WM_T_PCH_CNP:
14270 check_version = true;
14271 have_build = true;
14272 have_uid = false;
14273 break;
14274 case WM_T_82575:
14275 case WM_T_82576:
14276 case WM_T_82580:
14277 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14278 check_version = true;
14279 break;
14280 case WM_T_I211:
14281 wm_nvm_version_invm(sc);
14282 have_uid = false;
14283 goto printver;
14284 case WM_T_I210:
14285 if (!wm_nvm_flash_presence_i210(sc)) {
14286 wm_nvm_version_invm(sc);
14287 have_uid = false;
14288 goto printver;
14289 }
14290 /* FALLTHROUGH */
14291 case WM_T_I350:
14292 case WM_T_I354:
14293 check_version = true;
14294 check_optionrom = true;
14295 break;
14296 default:
14297 return;
14298 }
14299 if (check_version
14300 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14301 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14302 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14303 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14304 build = nvm_data & NVM_BUILD_MASK;
14305 have_build = true;
14306 } else
14307 minor = nvm_data & 0x00ff;
14308
14309 /* Decimal */
14310 minor = (minor / 16) * 10 + (minor % 16);
14311 sc->sc_nvm_ver_major = major;
14312 sc->sc_nvm_ver_minor = minor;
14313
14314 printver:
14315 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14316 sc->sc_nvm_ver_minor);
14317 if (have_build) {
14318 sc->sc_nvm_ver_build = build;
14319 aprint_verbose(".%d", build);
14320 }
14321 }
14322
14323 /* Assume the Option ROM area is at avove NVM_SIZE */
14324 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14325 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14326 /* Option ROM Version */
14327 if ((off != 0x0000) && (off != 0xffff)) {
14328 int rv;
14329
14330 off += NVM_COMBO_VER_OFF;
14331 rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14332 rv |= wm_nvm_read(sc, off, 1, &uid0);
14333 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14334 && (uid1 != 0) && (uid1 != 0xffff)) {
14335 /* 16bits */
14336 major = uid0 >> 8;
14337 build = (uid0 << 8) | (uid1 >> 8);
14338 patch = uid1 & 0x00ff;
14339 aprint_verbose(", option ROM Version %d.%d.%d",
14340 major, build, patch);
14341 }
14342 }
14343 }
14344
14345 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14346 aprint_verbose(", Image Unique ID %08x",
14347 ((uint32_t)uid1 << 16) | uid0);
14348 }
14349
14350 /*
14351 * wm_nvm_read:
14352 *
14353 * Read data from the serial EEPROM.
14354 */
14355 static int
14356 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14357 {
14358 int rv;
14359
14360 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14361 device_xname(sc->sc_dev), __func__));
14362
14363 if (sc->sc_flags & WM_F_EEPROM_INVALID)
14364 return -1;
14365
14366 rv = sc->nvm.read(sc, word, wordcnt, data);
14367
14368 return rv;
14369 }
14370
14371 /*
14372 * Hardware semaphores.
14373 * Very complexed...
14374 */
14375
14376 static int
14377 wm_get_null(struct wm_softc *sc)
14378 {
14379
14380 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14381 device_xname(sc->sc_dev), __func__));
14382 return 0;
14383 }
14384
14385 static void
14386 wm_put_null(struct wm_softc *sc)
14387 {
14388
14389 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14390 device_xname(sc->sc_dev), __func__));
14391 return;
14392 }
14393
14394 static int
14395 wm_get_eecd(struct wm_softc *sc)
14396 {
14397 uint32_t reg;
14398 int x;
14399
14400 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14401 device_xname(sc->sc_dev), __func__));
14402
14403 reg = CSR_READ(sc, WMREG_EECD);
14404
14405 /* Request EEPROM access. */
14406 reg |= EECD_EE_REQ;
14407 CSR_WRITE(sc, WMREG_EECD, reg);
14408
14409 /* ..and wait for it to be granted. */
14410 for (x = 0; x < 1000; x++) {
14411 reg = CSR_READ(sc, WMREG_EECD);
14412 if (reg & EECD_EE_GNT)
14413 break;
14414 delay(5);
14415 }
14416 if ((reg & EECD_EE_GNT) == 0) {
14417 aprint_error_dev(sc->sc_dev,
14418 "could not acquire EEPROM GNT\n");
14419 reg &= ~EECD_EE_REQ;
14420 CSR_WRITE(sc, WMREG_EECD, reg);
14421 return -1;
14422 }
14423
14424 return 0;
14425 }
14426
14427 static void
14428 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14429 {
14430
14431 *eecd |= EECD_SK;
14432 CSR_WRITE(sc, WMREG_EECD, *eecd);
14433 CSR_WRITE_FLUSH(sc);
14434 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14435 delay(1);
14436 else
14437 delay(50);
14438 }
14439
14440 static void
14441 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14442 {
14443
14444 *eecd &= ~EECD_SK;
14445 CSR_WRITE(sc, WMREG_EECD, *eecd);
14446 CSR_WRITE_FLUSH(sc);
14447 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14448 delay(1);
14449 else
14450 delay(50);
14451 }
14452
14453 static void
14454 wm_put_eecd(struct wm_softc *sc)
14455 {
14456 uint32_t reg;
14457
14458 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14459 device_xname(sc->sc_dev), __func__));
14460
14461 /* Stop nvm */
14462 reg = CSR_READ(sc, WMREG_EECD);
14463 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14464 /* Pull CS high */
14465 reg |= EECD_CS;
14466 wm_nvm_eec_clock_lower(sc, ®);
14467 } else {
14468 /* CS on Microwire is active-high */
14469 reg &= ~(EECD_CS | EECD_DI);
14470 CSR_WRITE(sc, WMREG_EECD, reg);
14471 wm_nvm_eec_clock_raise(sc, ®);
14472 wm_nvm_eec_clock_lower(sc, ®);
14473 }
14474
14475 reg = CSR_READ(sc, WMREG_EECD);
14476 reg &= ~EECD_EE_REQ;
14477 CSR_WRITE(sc, WMREG_EECD, reg);
14478
14479 return;
14480 }
14481
14482 /*
14483 * Get hardware semaphore.
14484 * Same as e1000_get_hw_semaphore_generic()
14485 */
14486 static int
14487 wm_get_swsm_semaphore(struct wm_softc *sc)
14488 {
14489 int32_t timeout;
14490 uint32_t swsm;
14491
14492 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14493 device_xname(sc->sc_dev), __func__));
14494 KASSERT(sc->sc_nvm_wordsize > 0);
14495
14496 retry:
14497 /* Get the SW semaphore. */
14498 timeout = sc->sc_nvm_wordsize + 1;
14499 while (timeout) {
14500 swsm = CSR_READ(sc, WMREG_SWSM);
14501
14502 if ((swsm & SWSM_SMBI) == 0)
14503 break;
14504
14505 delay(50);
14506 timeout--;
14507 }
14508
14509 if (timeout == 0) {
14510 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14511 /*
14512 * In rare circumstances, the SW semaphore may already
14513 * be held unintentionally. Clear the semaphore once
14514 * before giving up.
14515 */
14516 sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14517 wm_put_swsm_semaphore(sc);
14518 goto retry;
14519 }
14520 aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
14521 return 1;
14522 }
14523
14524 /* Get the FW semaphore. */
14525 timeout = sc->sc_nvm_wordsize + 1;
14526 while (timeout) {
14527 swsm = CSR_READ(sc, WMREG_SWSM);
14528 swsm |= SWSM_SWESMBI;
14529 CSR_WRITE(sc, WMREG_SWSM, swsm);
14530 /* If we managed to set the bit we got the semaphore. */
14531 swsm = CSR_READ(sc, WMREG_SWSM);
14532 if (swsm & SWSM_SWESMBI)
14533 break;
14534
14535 delay(50);
14536 timeout--;
14537 }
14538
14539 if (timeout == 0) {
14540 aprint_error_dev(sc->sc_dev,
14541 "could not acquire SWSM SWESMBI\n");
14542 /* Release semaphores */
14543 wm_put_swsm_semaphore(sc);
14544 return 1;
14545 }
14546 return 0;
14547 }
14548
14549 /*
14550 * Put hardware semaphore.
14551 * Same as e1000_put_hw_semaphore_generic()
14552 */
14553 static void
14554 wm_put_swsm_semaphore(struct wm_softc *sc)
14555 {
14556 uint32_t swsm;
14557
14558 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14559 device_xname(sc->sc_dev), __func__));
14560
14561 swsm = CSR_READ(sc, WMREG_SWSM);
14562 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14563 CSR_WRITE(sc, WMREG_SWSM, swsm);
14564 }
14565
14566 /*
14567 * Get SW/FW semaphore.
14568 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
14569 */
14570 static int
14571 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14572 {
14573 uint32_t swfw_sync;
14574 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
14575 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
14576 int timeout;
14577
14578 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14579 device_xname(sc->sc_dev), __func__));
14580
14581 if (sc->sc_type == WM_T_80003)
14582 timeout = 50;
14583 else
14584 timeout = 200;
14585
14586 while (timeout) {
14587 if (wm_get_swsm_semaphore(sc)) {
14588 aprint_error_dev(sc->sc_dev,
14589 "%s: failed to get semaphore\n",
14590 __func__);
14591 return 1;
14592 }
14593 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14594 if ((swfw_sync & (swmask | fwmask)) == 0) {
14595 swfw_sync |= swmask;
14596 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14597 wm_put_swsm_semaphore(sc);
14598 return 0;
14599 }
14600 wm_put_swsm_semaphore(sc);
14601 delay(5000);
14602 timeout--;
14603 }
14604 device_printf(sc->sc_dev,
14605 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
14606 mask, swfw_sync);
14607 return 1;
14608 }
14609
14610 static void
14611 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14612 {
14613 uint32_t swfw_sync;
14614
14615 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14616 device_xname(sc->sc_dev), __func__));
14617
14618 while (wm_get_swsm_semaphore(sc) != 0)
14619 continue;
14620
14621 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14622 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
14623 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14624
14625 wm_put_swsm_semaphore(sc);
14626 }
14627
14628 static int
14629 wm_get_nvm_80003(struct wm_softc *sc)
14630 {
14631 int rv;
14632
14633 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14634 device_xname(sc->sc_dev), __func__));
14635
14636 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
14637 aprint_error_dev(sc->sc_dev,
14638 "%s: failed to get semaphore(SWFW)\n", __func__);
14639 return rv;
14640 }
14641
14642 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14643 && (rv = wm_get_eecd(sc)) != 0) {
14644 aprint_error_dev(sc->sc_dev,
14645 "%s: failed to get semaphore(EECD)\n", __func__);
14646 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14647 return rv;
14648 }
14649
14650 return 0;
14651 }
14652
14653 static void
14654 wm_put_nvm_80003(struct wm_softc *sc)
14655 {
14656
14657 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14658 device_xname(sc->sc_dev), __func__));
14659
14660 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14661 wm_put_eecd(sc);
14662 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14663 }
14664
14665 static int
14666 wm_get_nvm_82571(struct wm_softc *sc)
14667 {
14668 int rv;
14669
14670 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14671 device_xname(sc->sc_dev), __func__));
14672
14673 if ((rv = wm_get_swsm_semaphore(sc)) != 0)
14674 return rv;
14675
14676 switch (sc->sc_type) {
14677 case WM_T_82573:
14678 break;
14679 default:
14680 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14681 rv = wm_get_eecd(sc);
14682 break;
14683 }
14684
14685 if (rv != 0) {
14686 aprint_error_dev(sc->sc_dev,
14687 "%s: failed to get semaphore\n",
14688 __func__);
14689 wm_put_swsm_semaphore(sc);
14690 }
14691
14692 return rv;
14693 }
14694
14695 static void
14696 wm_put_nvm_82571(struct wm_softc *sc)
14697 {
14698
14699 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14700 device_xname(sc->sc_dev), __func__));
14701
14702 switch (sc->sc_type) {
14703 case WM_T_82573:
14704 break;
14705 default:
14706 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14707 wm_put_eecd(sc);
14708 break;
14709 }
14710
14711 wm_put_swsm_semaphore(sc);
14712 }
14713
14714 static int
14715 wm_get_phy_82575(struct wm_softc *sc)
14716 {
14717
14718 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14719 device_xname(sc->sc_dev), __func__));
14720 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14721 }
14722
14723 static void
14724 wm_put_phy_82575(struct wm_softc *sc)
14725 {
14726
14727 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14728 device_xname(sc->sc_dev), __func__));
14729 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14730 }
14731
14732 static int
14733 wm_get_swfwhw_semaphore(struct wm_softc *sc)
14734 {
14735 uint32_t ext_ctrl;
14736 int timeout = 200;
14737
14738 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14739 device_xname(sc->sc_dev), __func__));
14740
14741 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14742 for (timeout = 0; timeout < 200; timeout++) {
14743 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14744 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14745 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14746
14747 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14748 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14749 return 0;
14750 delay(5000);
14751 }
14752 device_printf(sc->sc_dev,
14753 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
14754 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14755 return 1;
14756 }
14757
14758 static void
14759 wm_put_swfwhw_semaphore(struct wm_softc *sc)
14760 {
14761 uint32_t ext_ctrl;
14762
14763 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14764 device_xname(sc->sc_dev), __func__));
14765
14766 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14767 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14768 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14769
14770 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14771 }
14772
14773 static int
14774 wm_get_swflag_ich8lan(struct wm_softc *sc)
14775 {
14776 uint32_t ext_ctrl;
14777 int timeout;
14778
14779 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14780 device_xname(sc->sc_dev), __func__));
14781 mutex_enter(sc->sc_ich_phymtx);
14782 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
14783 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14784 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
14785 break;
14786 delay(1000);
14787 }
14788 if (timeout >= WM_PHY_CFG_TIMEOUT) {
14789 device_printf(sc->sc_dev,
14790 "SW has already locked the resource\n");
14791 goto out;
14792 }
14793
14794 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14795 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14796 for (timeout = 0; timeout < 1000; timeout++) {
14797 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14798 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14799 break;
14800 delay(1000);
14801 }
14802 if (timeout >= 1000) {
14803 device_printf(sc->sc_dev, "failed to acquire semaphore\n");
14804 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14805 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14806 goto out;
14807 }
14808 return 0;
14809
14810 out:
14811 mutex_exit(sc->sc_ich_phymtx);
14812 return 1;
14813 }
14814
14815 static void
14816 wm_put_swflag_ich8lan(struct wm_softc *sc)
14817 {
14818 uint32_t ext_ctrl;
14819
14820 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14821 device_xname(sc->sc_dev), __func__));
14822 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14823 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
14824 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14825 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14826 } else
14827 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
14828
14829 mutex_exit(sc->sc_ich_phymtx);
14830 }
14831
14832 static int
14833 wm_get_nvm_ich8lan(struct wm_softc *sc)
14834 {
14835
14836 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14837 device_xname(sc->sc_dev), __func__));
14838 mutex_enter(sc->sc_ich_nvmmtx);
14839
14840 return 0;
14841 }
14842
14843 static void
14844 wm_put_nvm_ich8lan(struct wm_softc *sc)
14845 {
14846
14847 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14848 device_xname(sc->sc_dev), __func__));
14849 mutex_exit(sc->sc_ich_nvmmtx);
14850 }
14851
14852 static int
14853 wm_get_hw_semaphore_82573(struct wm_softc *sc)
14854 {
14855 int i = 0;
14856 uint32_t reg;
14857
14858 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14859 device_xname(sc->sc_dev), __func__));
14860
14861 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14862 do {
14863 CSR_WRITE(sc, WMREG_EXTCNFCTR,
14864 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
14865 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14866 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
14867 break;
14868 delay(2*1000);
14869 i++;
14870 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
14871
14872 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
14873 wm_put_hw_semaphore_82573(sc);
14874 log(LOG_ERR, "%s: Driver can't access the PHY\n",
14875 device_xname(sc->sc_dev));
14876 return -1;
14877 }
14878
14879 return 0;
14880 }
14881
14882 static void
14883 wm_put_hw_semaphore_82573(struct wm_softc *sc)
14884 {
14885 uint32_t reg;
14886
14887 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14888 device_xname(sc->sc_dev), __func__));
14889
14890 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14891 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14892 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14893 }
14894
14895 /*
14896 * Management mode and power management related subroutines.
14897 * BMC, AMT, suspend/resume and EEE.
14898 */
14899
14900 #ifdef WM_WOL
14901 static int
14902 wm_check_mng_mode(struct wm_softc *sc)
14903 {
14904 int rv;
14905
14906 switch (sc->sc_type) {
14907 case WM_T_ICH8:
14908 case WM_T_ICH9:
14909 case WM_T_ICH10:
14910 case WM_T_PCH:
14911 case WM_T_PCH2:
14912 case WM_T_PCH_LPT:
14913 case WM_T_PCH_SPT:
14914 case WM_T_PCH_CNP:
14915 rv = wm_check_mng_mode_ich8lan(sc);
14916 break;
14917 case WM_T_82574:
14918 case WM_T_82583:
14919 rv = wm_check_mng_mode_82574(sc);
14920 break;
14921 case WM_T_82571:
14922 case WM_T_82572:
14923 case WM_T_82573:
14924 case WM_T_80003:
14925 rv = wm_check_mng_mode_generic(sc);
14926 break;
14927 default:
14928 /* Noting to do */
14929 rv = 0;
14930 break;
14931 }
14932
14933 return rv;
14934 }
14935
14936 static int
14937 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
14938 {
14939 uint32_t fwsm;
14940
14941 fwsm = CSR_READ(sc, WMREG_FWSM);
14942
14943 if (((fwsm & FWSM_FW_VALID) != 0)
14944 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14945 return 1;
14946
14947 return 0;
14948 }
14949
14950 static int
14951 wm_check_mng_mode_82574(struct wm_softc *sc)
14952 {
14953 uint16_t data;
14954
14955 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14956
14957 if ((data & NVM_CFG2_MNGM_MASK) != 0)
14958 return 1;
14959
14960 return 0;
14961 }
14962
14963 static int
14964 wm_check_mng_mode_generic(struct wm_softc *sc)
14965 {
14966 uint32_t fwsm;
14967
14968 fwsm = CSR_READ(sc, WMREG_FWSM);
14969
14970 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
14971 return 1;
14972
14973 return 0;
14974 }
14975 #endif /* WM_WOL */
14976
14977 static int
14978 wm_enable_mng_pass_thru(struct wm_softc *sc)
14979 {
14980 uint32_t manc, fwsm, factps;
14981
14982 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
14983 return 0;
14984
14985 manc = CSR_READ(sc, WMREG_MANC);
14986
14987 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
14988 device_xname(sc->sc_dev), manc));
14989 if ((manc & MANC_RECV_TCO_EN) == 0)
14990 return 0;
14991
14992 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
14993 fwsm = CSR_READ(sc, WMREG_FWSM);
14994 factps = CSR_READ(sc, WMREG_FACTPS);
14995 if (((factps & FACTPS_MNGCG) == 0)
14996 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14997 return 1;
14998 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
14999 uint16_t data;
15000
15001 factps = CSR_READ(sc, WMREG_FACTPS);
15002 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15003 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15004 device_xname(sc->sc_dev), factps, data));
15005 if (((factps & FACTPS_MNGCG) == 0)
15006 && ((data & NVM_CFG2_MNGM_MASK)
15007 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15008 return 1;
15009 } else if (((manc & MANC_SMBUS_EN) != 0)
15010 && ((manc & MANC_ASF_EN) == 0))
15011 return 1;
15012
15013 return 0;
15014 }
15015
15016 static bool
15017 wm_phy_resetisblocked(struct wm_softc *sc)
15018 {
15019 bool blocked = false;
15020 uint32_t reg;
15021 int i = 0;
15022
15023 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15024 device_xname(sc->sc_dev), __func__));
15025
15026 switch (sc->sc_type) {
15027 case WM_T_ICH8:
15028 case WM_T_ICH9:
15029 case WM_T_ICH10:
15030 case WM_T_PCH:
15031 case WM_T_PCH2:
15032 case WM_T_PCH_LPT:
15033 case WM_T_PCH_SPT:
15034 case WM_T_PCH_CNP:
15035 do {
15036 reg = CSR_READ(sc, WMREG_FWSM);
15037 if ((reg & FWSM_RSPCIPHY) == 0) {
15038 blocked = true;
15039 delay(10*1000);
15040 continue;
15041 }
15042 blocked = false;
15043 } while (blocked && (i++ < 30));
15044 return blocked;
15045 break;
15046 case WM_T_82571:
15047 case WM_T_82572:
15048 case WM_T_82573:
15049 case WM_T_82574:
15050 case WM_T_82583:
15051 case WM_T_80003:
15052 reg = CSR_READ(sc, WMREG_MANC);
15053 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15054 return true;
15055 else
15056 return false;
15057 break;
15058 default:
15059 /* No problem */
15060 break;
15061 }
15062
15063 return false;
15064 }
15065
15066 static void
15067 wm_get_hw_control(struct wm_softc *sc)
15068 {
15069 uint32_t reg;
15070
15071 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15072 device_xname(sc->sc_dev), __func__));
15073
15074 if (sc->sc_type == WM_T_82573) {
15075 reg = CSR_READ(sc, WMREG_SWSM);
15076 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15077 } else if (sc->sc_type >= WM_T_82571) {
15078 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15079 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15080 }
15081 }
15082
15083 static void
15084 wm_release_hw_control(struct wm_softc *sc)
15085 {
15086 uint32_t reg;
15087
15088 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15089 device_xname(sc->sc_dev), __func__));
15090
15091 if (sc->sc_type == WM_T_82573) {
15092 reg = CSR_READ(sc, WMREG_SWSM);
15093 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15094 } else if (sc->sc_type >= WM_T_82571) {
15095 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15096 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15097 }
15098 }
15099
15100 static void
15101 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15102 {
15103 uint32_t reg;
15104
15105 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15106 device_xname(sc->sc_dev), __func__));
15107
15108 if (sc->sc_type < WM_T_PCH2)
15109 return;
15110
15111 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15112
15113 if (gate)
15114 reg |= EXTCNFCTR_GATE_PHY_CFG;
15115 else
15116 reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15117
15118 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15119 }
15120
15121 static int
15122 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15123 {
15124 uint32_t fwsm, reg;
15125 int rv = 0;
15126
15127 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15128 device_xname(sc->sc_dev), __func__));
15129
15130 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
15131 wm_gate_hw_phy_config_ich8lan(sc, true);
15132
15133 /* Disable ULP */
15134 wm_ulp_disable(sc);
15135
15136 /* Acquire PHY semaphore */
15137 rv = sc->phy.acquire(sc);
15138 if (rv != 0) {
15139 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
15140 device_xname(sc->sc_dev), __func__));
15141 return -1;
15142 }
15143
15144 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
15145 * inaccessible and resetting the PHY is not blocked, toggle the
15146 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15147 */
15148 fwsm = CSR_READ(sc, WMREG_FWSM);
15149 switch (sc->sc_type) {
15150 case WM_T_PCH_LPT:
15151 case WM_T_PCH_SPT:
15152 case WM_T_PCH_CNP:
15153 if (wm_phy_is_accessible_pchlan(sc))
15154 break;
15155
15156 /* Before toggling LANPHYPC, see if PHY is accessible by
15157 * forcing MAC to SMBus mode first.
15158 */
15159 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15160 reg |= CTRL_EXT_FORCE_SMBUS;
15161 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15162 #if 0
15163 /* XXX Isn't this required??? */
15164 CSR_WRITE_FLUSH(sc);
15165 #endif
15166 /* Wait 50 milliseconds for MAC to finish any retries
15167 * that it might be trying to perform from previous
15168 * attempts to acknowledge any phy read requests.
15169 */
15170 delay(50 * 1000);
15171 /* FALLTHROUGH */
15172 case WM_T_PCH2:
15173 if (wm_phy_is_accessible_pchlan(sc) == true)
15174 break;
15175 /* FALLTHROUGH */
15176 case WM_T_PCH:
15177 if (sc->sc_type == WM_T_PCH)
15178 if ((fwsm & FWSM_FW_VALID) != 0)
15179 break;
15180
15181 if (wm_phy_resetisblocked(sc) == true) {
15182 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15183 break;
15184 }
15185
15186 /* Toggle LANPHYPC Value bit */
15187 wm_toggle_lanphypc_pch_lpt(sc);
15188
15189 if (sc->sc_type >= WM_T_PCH_LPT) {
15190 if (wm_phy_is_accessible_pchlan(sc) == true)
15191 break;
15192
15193 /* Toggling LANPHYPC brings the PHY out of SMBus mode
15194 * so ensure that the MAC is also out of SMBus mode
15195 */
15196 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15197 reg &= ~CTRL_EXT_FORCE_SMBUS;
15198 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15199
15200 if (wm_phy_is_accessible_pchlan(sc) == true)
15201 break;
15202 rv = -1;
15203 }
15204 break;
15205 default:
15206 break;
15207 }
15208
15209 /* Release semaphore */
15210 sc->phy.release(sc);
15211
15212 if (rv == 0) {
15213 /* Check to see if able to reset PHY. Print error if not */
15214 if (wm_phy_resetisblocked(sc)) {
15215 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15216 goto out;
15217 }
15218
15219 /* Reset the PHY before any access to it. Doing so, ensures
15220 * that the PHY is in a known good state before we read/write
15221 * PHY registers. The generic reset is sufficient here,
15222 * because we haven't determined the PHY type yet.
15223 */
15224 if (wm_reset_phy(sc) != 0)
15225 goto out;
15226
15227 /* On a successful reset, possibly need to wait for the PHY
15228 * to quiesce to an accessible state before returning control
15229 * to the calling function. If the PHY does not quiesce, then
15230 * return E1000E_BLK_PHY_RESET, as this is the condition that
15231 * the PHY is in.
15232 */
15233 if (wm_phy_resetisblocked(sc))
15234 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15235 }
15236
15237 out:
15238 /* Ungate automatic PHY configuration on non-managed 82579 */
15239 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15240 delay(10*1000);
15241 wm_gate_hw_phy_config_ich8lan(sc, false);
15242 }
15243
15244 return 0;
15245 }
15246
15247 static void
15248 wm_init_manageability(struct wm_softc *sc)
15249 {
15250
15251 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15252 device_xname(sc->sc_dev), __func__));
15253 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15254 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15255 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15256
15257 /* Disable hardware interception of ARP */
15258 manc &= ~MANC_ARP_EN;
15259
15260 /* Enable receiving management packets to the host */
15261 if (sc->sc_type >= WM_T_82571) {
15262 manc |= MANC_EN_MNG2HOST;
15263 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15264 CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15265 }
15266
15267 CSR_WRITE(sc, WMREG_MANC, manc);
15268 }
15269 }
15270
15271 static void
15272 wm_release_manageability(struct wm_softc *sc)
15273 {
15274
15275 if (sc->sc_flags & WM_F_HAS_MANAGE) {
15276 uint32_t manc = CSR_READ(sc, WMREG_MANC);
15277
15278 manc |= MANC_ARP_EN;
15279 if (sc->sc_type >= WM_T_82571)
15280 manc &= ~MANC_EN_MNG2HOST;
15281
15282 CSR_WRITE(sc, WMREG_MANC, manc);
15283 }
15284 }
15285
15286 static void
15287 wm_get_wakeup(struct wm_softc *sc)
15288 {
15289
15290 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15291 switch (sc->sc_type) {
15292 case WM_T_82573:
15293 case WM_T_82583:
15294 sc->sc_flags |= WM_F_HAS_AMT;
15295 /* FALLTHROUGH */
15296 case WM_T_80003:
15297 case WM_T_82575:
15298 case WM_T_82576:
15299 case WM_T_82580:
15300 case WM_T_I350:
15301 case WM_T_I354:
15302 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15303 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15304 /* FALLTHROUGH */
15305 case WM_T_82541:
15306 case WM_T_82541_2:
15307 case WM_T_82547:
15308 case WM_T_82547_2:
15309 case WM_T_82571:
15310 case WM_T_82572:
15311 case WM_T_82574:
15312 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15313 break;
15314 case WM_T_ICH8:
15315 case WM_T_ICH9:
15316 case WM_T_ICH10:
15317 case WM_T_PCH:
15318 case WM_T_PCH2:
15319 case WM_T_PCH_LPT:
15320 case WM_T_PCH_SPT:
15321 case WM_T_PCH_CNP:
15322 sc->sc_flags |= WM_F_HAS_AMT;
15323 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15324 break;
15325 default:
15326 break;
15327 }
15328
15329 /* 1: HAS_MANAGE */
15330 if (wm_enable_mng_pass_thru(sc) != 0)
15331 sc->sc_flags |= WM_F_HAS_MANAGE;
15332
15333 /*
15334 * Note that the WOL flags is set after the resetting of the eeprom
15335 * stuff
15336 */
15337 }
15338
15339 /*
15340 * Unconfigure Ultra Low Power mode.
15341 * Only for I217 and newer (see below).
15342 */
15343 static int
15344 wm_ulp_disable(struct wm_softc *sc)
15345 {
15346 uint32_t reg;
15347 uint16_t phyreg;
15348 int i = 0, rv = 0;
15349
15350 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15351 device_xname(sc->sc_dev), __func__));
15352 /* Exclude old devices */
15353 if ((sc->sc_type < WM_T_PCH_LPT)
15354 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15355 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15356 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15357 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15358 return 0;
15359
15360 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15361 /* Request ME un-configure ULP mode in the PHY */
15362 reg = CSR_READ(sc, WMREG_H2ME);
15363 reg &= ~H2ME_ULP;
15364 reg |= H2ME_ENFORCE_SETTINGS;
15365 CSR_WRITE(sc, WMREG_H2ME, reg);
15366
15367 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15368 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15369 if (i++ == 30) {
15370 device_printf(sc->sc_dev, "%s timed out\n",
15371 __func__);
15372 return -1;
15373 }
15374 delay(10 * 1000);
15375 }
15376 reg = CSR_READ(sc, WMREG_H2ME);
15377 reg &= ~H2ME_ENFORCE_SETTINGS;
15378 CSR_WRITE(sc, WMREG_H2ME, reg);
15379
15380 return 0;
15381 }
15382
15383 /* Acquire semaphore */
15384 rv = sc->phy.acquire(sc);
15385 if (rv != 0) {
15386 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
15387 device_xname(sc->sc_dev), __func__));
15388 return -1;
15389 }
15390
15391 /* Toggle LANPHYPC */
15392 wm_toggle_lanphypc_pch_lpt(sc);
15393
15394 /* Unforce SMBus mode in PHY */
15395 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15396 if (rv != 0) {
15397 uint32_t reg2;
15398
15399 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15400 __func__);
15401 reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15402 reg2 |= CTRL_EXT_FORCE_SMBUS;
15403 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15404 delay(50 * 1000);
15405
15406 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15407 &phyreg);
15408 if (rv != 0)
15409 goto release;
15410 }
15411 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15412 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15413
15414 /* Unforce SMBus mode in MAC */
15415 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15416 reg &= ~CTRL_EXT_FORCE_SMBUS;
15417 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15418
15419 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15420 if (rv != 0)
15421 goto release;
15422 phyreg |= HV_PM_CTRL_K1_ENA;
15423 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15424
15425 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15426 &phyreg);
15427 if (rv != 0)
15428 goto release;
15429 phyreg &= ~(I218_ULP_CONFIG1_IND
15430 | I218_ULP_CONFIG1_STICKY_ULP
15431 | I218_ULP_CONFIG1_RESET_TO_SMBUS
15432 | I218_ULP_CONFIG1_WOL_HOST
15433 | I218_ULP_CONFIG1_INBAND_EXIT
15434 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15435 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15436 | I218_ULP_CONFIG1_DIS_SMB_PERST);
15437 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15438 phyreg |= I218_ULP_CONFIG1_START;
15439 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15440
15441 reg = CSR_READ(sc, WMREG_FEXTNVM7);
15442 reg &= ~FEXTNVM7_DIS_SMB_PERST;
15443 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15444
15445 release:
15446 /* Release semaphore */
15447 sc->phy.release(sc);
15448 wm_gmii_reset(sc);
15449 delay(50 * 1000);
15450
15451 return rv;
15452 }
15453
15454 /* WOL in the newer chipset interfaces (pchlan) */
15455 static int
15456 wm_enable_phy_wakeup(struct wm_softc *sc)
15457 {
15458 device_t dev = sc->sc_dev;
15459 uint32_t mreg, moff;
15460 uint16_t wuce, wuc, wufc, preg;
15461 int i, rv;
15462
15463 KASSERT(sc->sc_type >= WM_T_PCH);
15464
15465 /* Copy MAC RARs to PHY RARs */
15466 wm_copy_rx_addrs_to_phy_ich8lan(sc);
15467
15468 /* Activate PHY wakeup */
15469 rv = sc->phy.acquire(sc);
15470 if (rv != 0) {
15471 device_printf(dev, "%s: failed to acquire semaphore\n",
15472 __func__);
15473 return rv;
15474 }
15475
15476 /*
15477 * Enable access to PHY wakeup registers.
15478 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15479 */
15480 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15481 if (rv != 0) {
15482 device_printf(dev,
15483 "%s: Could not enable PHY wakeup reg access\n", __func__);
15484 goto release;
15485 }
15486
15487 /* Copy MAC MTA to PHY MTA */
15488 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15489 uint16_t lo, hi;
15490
15491 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15492 lo = (uint16_t)(mreg & 0xffff);
15493 hi = (uint16_t)((mreg >> 16) & 0xffff);
15494 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15495 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15496 }
15497
15498 /* Configure PHY Rx Control register */
15499 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15500 mreg = CSR_READ(sc, WMREG_RCTL);
15501 if (mreg & RCTL_UPE)
15502 preg |= BM_RCTL_UPE;
15503 if (mreg & RCTL_MPE)
15504 preg |= BM_RCTL_MPE;
15505 preg &= ~(BM_RCTL_MO_MASK);
15506 moff = __SHIFTOUT(mreg, RCTL_MO);
15507 if (moff != 0)
15508 preg |= moff << BM_RCTL_MO_SHIFT;
15509 if (mreg & RCTL_BAM)
15510 preg |= BM_RCTL_BAM;
15511 if (mreg & RCTL_PMCF)
15512 preg |= BM_RCTL_PMCF;
15513 mreg = CSR_READ(sc, WMREG_CTRL);
15514 if (mreg & CTRL_RFCE)
15515 preg |= BM_RCTL_RFCE;
15516 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15517
15518 wuc = WUC_APME | WUC_PME_EN;
15519 wufc = WUFC_MAG;
15520 /* Enable PHY wakeup in MAC register */
15521 CSR_WRITE(sc, WMREG_WUC,
15522 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15523 CSR_WRITE(sc, WMREG_WUFC, wufc);
15524
15525 /* Configure and enable PHY wakeup in PHY registers */
15526 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15527 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15528
15529 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15530 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15531
15532 release:
15533 sc->phy.release(sc);
15534
15535 return 0;
15536 }
15537
15538 /* Power down workaround on D3 */
15539 static void
15540 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15541 {
15542 uint32_t reg;
15543 uint16_t phyreg;
15544 int i;
15545
15546 for (i = 0; i < 2; i++) {
15547 /* Disable link */
15548 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15549 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15550 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15551
15552 /*
15553 * Call gig speed drop workaround on Gig disable before
15554 * accessing any PHY registers
15555 */
15556 if (sc->sc_type == WM_T_ICH8)
15557 wm_gig_downshift_workaround_ich8lan(sc);
15558
15559 /* Write VR power-down enable */
15560 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15561 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15562 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15563 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15564
15565 /* Read it back and test */
15566 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15567 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15568 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
15569 break;
15570
15571 /* Issue PHY reset and repeat at most one more time */
15572 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
15573 }
15574 }
15575
15576 /*
15577 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
15578 * @sc: pointer to the HW structure
15579 *
15580 * During S0 to Sx transition, it is possible the link remains at gig
15581 * instead of negotiating to a lower speed. Before going to Sx, set
15582 * 'Gig Disable' to force link speed negotiation to a lower speed based on
15583 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
15584 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
15585 * needs to be written.
15586 * Parts that support (and are linked to a partner which support) EEE in
15587 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
15588 * than 10Mbps w/o EEE.
15589 */
15590 static void
15591 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
15592 {
15593 device_t dev = sc->sc_dev;
15594 struct ethercom *ec = &sc->sc_ethercom;
15595 uint32_t phy_ctrl;
15596 int rv;
15597
15598 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
15599 phy_ctrl |= PHY_CTRL_GBE_DIS;
15600
15601 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
15602
15603 if (sc->sc_phytype == WMPHY_I217) {
15604 uint16_t devid = sc->sc_pcidevid;
15605
15606 if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
15607 (devid == PCI_PRODUCT_INTEL_I218_V) ||
15608 (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
15609 (devid == PCI_PRODUCT_INTEL_I218_V3) ||
15610 (sc->sc_type >= WM_T_PCH_SPT))
15611 CSR_WRITE(sc, WMREG_FEXTNVM6,
15612 CSR_READ(sc, WMREG_FEXTNVM6)
15613 & ~FEXTNVM6_REQ_PLL_CLK);
15614
15615 if (sc->phy.acquire(sc) != 0)
15616 goto out;
15617
15618 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15619 uint16_t eee_advert;
15620
15621 rv = wm_read_emi_reg_locked(dev,
15622 I217_EEE_ADVERTISEMENT, &eee_advert);
15623 if (rv)
15624 goto release;
15625
15626 /*
15627 * Disable LPLU if both link partners support 100BaseT
15628 * EEE and 100Full is advertised on both ends of the
15629 * link, and enable Auto Enable LPI since there will
15630 * be no driver to enable LPI while in Sx.
15631 */
15632 if ((eee_advert & AN_EEEADVERT_100_TX) &&
15633 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
15634 uint16_t anar, phy_reg;
15635
15636 sc->phy.readreg_locked(dev, 2, MII_ANAR,
15637 &anar);
15638 if (anar & ANAR_TX_FD) {
15639 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
15640 PHY_CTRL_NOND0A_LPLU);
15641
15642 /* Set Auto Enable LPI after link up */
15643 sc->phy.readreg_locked(dev, 2,
15644 I217_LPI_GPIO_CTRL, &phy_reg);
15645 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15646 sc->phy.writereg_locked(dev, 2,
15647 I217_LPI_GPIO_CTRL, phy_reg);
15648 }
15649 }
15650 }
15651
15652 /*
15653 * For i217 Intel Rapid Start Technology support,
15654 * when the system is going into Sx and no manageability engine
15655 * is present, the driver must configure proxy to reset only on
15656 * power good. LPI (Low Power Idle) state must also reset only
15657 * on power good, as well as the MTA (Multicast table array).
15658 * The SMBus release must also be disabled on LCD reset.
15659 */
15660
15661 /*
15662 * Enable MTA to reset for Intel Rapid Start Technology
15663 * Support
15664 */
15665
15666 release:
15667 sc->phy.release(sc);
15668 }
15669 out:
15670 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
15671
15672 if (sc->sc_type == WM_T_ICH8)
15673 wm_gig_downshift_workaround_ich8lan(sc);
15674
15675 if (sc->sc_type >= WM_T_PCH) {
15676 wm_oem_bits_config_ich8lan(sc, false);
15677
15678 /* Reset PHY to activate OEM bits on 82577/8 */
15679 if (sc->sc_type == WM_T_PCH)
15680 wm_reset_phy(sc);
15681
15682 if (sc->phy.acquire(sc) != 0)
15683 return;
15684 wm_write_smbus_addr(sc);
15685 sc->phy.release(sc);
15686 }
15687 }
15688
15689 /*
15690 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
15691 * @sc: pointer to the HW structure
15692 *
15693 * During Sx to S0 transitions on non-managed devices or managed devices
15694 * on which PHY resets are not blocked, if the PHY registers cannot be
15695 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
15696 * the PHY.
15697 * On i217, setup Intel Rapid Start Technology.
15698 */
15699 static int
15700 wm_resume_workarounds_pchlan(struct wm_softc *sc)
15701 {
15702 device_t dev = sc->sc_dev;
15703 int rv;
15704
15705 if (sc->sc_type < WM_T_PCH2)
15706 return 0;
15707
15708 rv = wm_init_phy_workarounds_pchlan(sc);
15709 if (rv != 0)
15710 return -1;
15711
15712 /* For i217 Intel Rapid Start Technology support when the system
15713 * is transitioning from Sx and no manageability engine is present
15714 * configure SMBus to restore on reset, disable proxy, and enable
15715 * the reset on MTA (Multicast table array).
15716 */
15717 if (sc->sc_phytype == WMPHY_I217) {
15718 uint16_t phy_reg;
15719
15720 if (sc->phy.acquire(sc) != 0)
15721 return -1;
15722
15723 /* Clear Auto Enable LPI after link up */
15724 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
15725 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15726 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
15727
15728 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15729 /* Restore clear on SMB if no manageability engine
15730 * is present
15731 */
15732 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
15733 &phy_reg);
15734 if (rv != 0)
15735 goto release;
15736 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
15737 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
15738
15739 /* Disable Proxy */
15740 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
15741 }
15742 /* Enable reset on MTA */
15743 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
15744 if (rv != 0)
15745 goto release;
15746 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
15747 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
15748
15749 release:
15750 sc->phy.release(sc);
15751 return rv;
15752 }
15753
15754 return 0;
15755 }
15756
15757 static void
15758 wm_enable_wakeup(struct wm_softc *sc)
15759 {
15760 uint32_t reg, pmreg;
15761 pcireg_t pmode;
15762 int rv = 0;
15763
15764 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15765 device_xname(sc->sc_dev), __func__));
15766
15767 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
15768 &pmreg, NULL) == 0)
15769 return;
15770
15771 if ((sc->sc_flags & WM_F_WOL) == 0)
15772 goto pme;
15773
15774 /* Advertise the wakeup capability */
15775 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
15776 | CTRL_SWDPIN(3));
15777
15778 /* Keep the laser running on fiber adapters */
15779 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
15780 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
15781 reg = CSR_READ(sc, WMREG_CTRL_EXT);
15782 reg |= CTRL_EXT_SWDPIN(3);
15783 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15784 }
15785
15786 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
15787 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
15788 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
15789 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
15790 wm_suspend_workarounds_ich8lan(sc);
15791
15792 #if 0 /* For the multicast packet */
15793 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
15794 reg |= WUFC_MC;
15795 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
15796 #endif
15797
15798 if (sc->sc_type >= WM_T_PCH) {
15799 rv = wm_enable_phy_wakeup(sc);
15800 if (rv != 0)
15801 goto pme;
15802 } else {
15803 /* Enable wakeup by the MAC */
15804 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
15805 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
15806 }
15807
15808 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15809 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15810 || (sc->sc_type == WM_T_PCH2))
15811 && (sc->sc_phytype == WMPHY_IGP_3))
15812 wm_igp3_phy_powerdown_workaround_ich8lan(sc);
15813
15814 pme:
15815 /* Request PME */
15816 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
15817 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
15818 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
15819 /* For WOL */
15820 pmode |= PCI_PMCSR_PME_EN;
15821 } else {
15822 /* Disable WOL */
15823 pmode &= ~PCI_PMCSR_PME_EN;
15824 }
15825 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
15826 }
15827
15828 /* Disable ASPM L0s and/or L1 for workaround */
15829 static void
15830 wm_disable_aspm(struct wm_softc *sc)
15831 {
15832 pcireg_t reg, mask = 0;
15833 unsigned const char *str = "";
15834
15835 /*
15836 * Only for PCIe device which has PCIe capability in the PCI config
15837 * space.
15838 */
15839 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
15840 return;
15841
15842 switch (sc->sc_type) {
15843 case WM_T_82571:
15844 case WM_T_82572:
15845 /*
15846 * 8257[12] Errata 13: Device Does Not Support PCIe Active
15847 * State Power management L1 State (ASPM L1).
15848 */
15849 mask = PCIE_LCSR_ASPM_L1;
15850 str = "L1 is";
15851 break;
15852 case WM_T_82573:
15853 case WM_T_82574:
15854 case WM_T_82583:
15855 /*
15856 * The 82573 disappears when PCIe ASPM L0s is enabled.
15857 *
15858 * The 82574 and 82583 does not support PCIe ASPM L0s with
15859 * some chipset. The document of 82574 and 82583 says that
15860 * disabling L0s with some specific chipset is sufficient,
15861 * but we follow as of the Intel em driver does.
15862 *
15863 * References:
15864 * Errata 8 of the Specification Update of i82573.
15865 * Errata 20 of the Specification Update of i82574.
15866 * Errata 9 of the Specification Update of i82583.
15867 */
15868 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
15869 str = "L0s and L1 are";
15870 break;
15871 default:
15872 return;
15873 }
15874
15875 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
15876 sc->sc_pcixe_capoff + PCIE_LCSR);
15877 reg &= ~mask;
15878 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
15879 sc->sc_pcixe_capoff + PCIE_LCSR, reg);
15880
15881 /* Print only in wm_attach() */
15882 if ((sc->sc_flags & WM_F_ATTACHED) == 0)
15883 aprint_verbose_dev(sc->sc_dev,
15884 "ASPM %s disabled to workaround the errata.\n", str);
15885 }
15886
15887 /* LPLU */
15888
15889 static void
15890 wm_lplu_d0_disable(struct wm_softc *sc)
15891 {
15892 struct mii_data *mii = &sc->sc_mii;
15893 uint32_t reg;
15894 uint16_t phyval;
15895
15896 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15897 device_xname(sc->sc_dev), __func__));
15898
15899 if (sc->sc_phytype == WMPHY_IFE)
15900 return;
15901
15902 switch (sc->sc_type) {
15903 case WM_T_82571:
15904 case WM_T_82572:
15905 case WM_T_82573:
15906 case WM_T_82575:
15907 case WM_T_82576:
15908 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
15909 phyval &= ~PMR_D0_LPLU;
15910 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
15911 break;
15912 case WM_T_82580:
15913 case WM_T_I350:
15914 case WM_T_I210:
15915 case WM_T_I211:
15916 reg = CSR_READ(sc, WMREG_PHPM);
15917 reg &= ~PHPM_D0A_LPLU;
15918 CSR_WRITE(sc, WMREG_PHPM, reg);
15919 break;
15920 case WM_T_82574:
15921 case WM_T_82583:
15922 case WM_T_ICH8:
15923 case WM_T_ICH9:
15924 case WM_T_ICH10:
15925 reg = CSR_READ(sc, WMREG_PHY_CTRL);
15926 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
15927 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15928 CSR_WRITE_FLUSH(sc);
15929 break;
15930 case WM_T_PCH:
15931 case WM_T_PCH2:
15932 case WM_T_PCH_LPT:
15933 case WM_T_PCH_SPT:
15934 case WM_T_PCH_CNP:
15935 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
15936 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
15937 if (wm_phy_resetisblocked(sc) == false)
15938 phyval |= HV_OEM_BITS_ANEGNOW;
15939 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
15940 break;
15941 default:
15942 break;
15943 }
15944 }
15945
15946 /* EEE */
15947
15948 static int
15949 wm_set_eee_i350(struct wm_softc *sc)
15950 {
15951 struct ethercom *ec = &sc->sc_ethercom;
15952 uint32_t ipcnfg, eeer;
15953 uint32_t ipcnfg_mask
15954 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
15955 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
15956
15957 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
15958
15959 ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
15960 eeer = CSR_READ(sc, WMREG_EEER);
15961
15962 /* Enable or disable per user setting */
15963 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15964 ipcnfg |= ipcnfg_mask;
15965 eeer |= eeer_mask;
15966 } else {
15967 ipcnfg &= ~ipcnfg_mask;
15968 eeer &= ~eeer_mask;
15969 }
15970
15971 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
15972 CSR_WRITE(sc, WMREG_EEER, eeer);
15973 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
15974 CSR_READ(sc, WMREG_EEER); /* XXX flush? */
15975
15976 return 0;
15977 }
15978
15979 static int
15980 wm_set_eee_pchlan(struct wm_softc *sc)
15981 {
15982 device_t dev = sc->sc_dev;
15983 struct ethercom *ec = &sc->sc_ethercom;
15984 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
15985 int rv = 0;
15986
15987 switch (sc->sc_phytype) {
15988 case WMPHY_82579:
15989 lpa = I82579_EEE_LP_ABILITY;
15990 pcs_status = I82579_EEE_PCS_STATUS;
15991 adv_addr = I82579_EEE_ADVERTISEMENT;
15992 break;
15993 case WMPHY_I217:
15994 lpa = I217_EEE_LP_ABILITY;
15995 pcs_status = I217_EEE_PCS_STATUS;
15996 adv_addr = I217_EEE_ADVERTISEMENT;
15997 break;
15998 default:
15999 return 0;
16000 }
16001
16002 if (sc->phy.acquire(sc)) {
16003 device_printf(dev, "%s: failed to get semaphore\n", __func__);
16004 return 0;
16005 }
16006
16007 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16008 if (rv != 0)
16009 goto release;
16010
16011 /* Clear bits that enable EEE in various speeds */
16012 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
16013
16014 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16015 /* Save off link partner's EEE ability */
16016 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
16017 if (rv != 0)
16018 goto release;
16019
16020 /* Read EEE advertisement */
16021 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
16022 goto release;
16023
16024 /*
16025 * Enable EEE only for speeds in which the link partner is
16026 * EEE capable and for which we advertise EEE.
16027 */
16028 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16029 lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16030 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16031 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16032 if ((data & ANLPAR_TX_FD) != 0)
16033 lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16034 else {
16035 /*
16036 * EEE is not supported in 100Half, so ignore
16037 * partner's EEE in 100 ability if full-duplex
16038 * is not advertised.
16039 */
16040 sc->eee_lp_ability
16041 &= ~AN_EEEADVERT_100_TX;
16042 }
16043 }
16044 }
16045
16046 if (sc->sc_phytype == WMPHY_82579) {
16047 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16048 if (rv != 0)
16049 goto release;
16050
16051 data &= ~I82579_LPI_PLL_SHUT_100;
16052 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16053 }
16054
16055 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16056 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16057 goto release;
16058
16059 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16060 release:
16061 sc->phy.release(sc);
16062
16063 return rv;
16064 }
16065
16066 static int
16067 wm_set_eee(struct wm_softc *sc)
16068 {
16069 struct ethercom *ec = &sc->sc_ethercom;
16070
16071 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16072 return 0;
16073
16074 if (sc->sc_type == WM_T_I354) {
16075 /* I354 uses an external PHY */
16076 return 0; /* not yet */
16077 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16078 return wm_set_eee_i350(sc);
16079 else if (sc->sc_type >= WM_T_PCH2)
16080 return wm_set_eee_pchlan(sc);
16081
16082 return 0;
16083 }
16084
16085 /*
16086 * Workarounds (mainly PHY related).
16087 * Basically, PHY's workarounds are in the PHY drivers.
16088 */
16089
16090 /* Workaround for 82566 Kumeran PCS lock loss */
16091 static int
16092 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16093 {
16094 struct mii_data *mii = &sc->sc_mii;
16095 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16096 int i, reg, rv;
16097 uint16_t phyreg;
16098
16099 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16100 device_xname(sc->sc_dev), __func__));
16101
16102 /* If the link is not up, do nothing */
16103 if ((status & STATUS_LU) == 0)
16104 return 0;
16105
16106 /* Nothing to do if the link is other than 1Gbps */
16107 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16108 return 0;
16109
16110 for (i = 0; i < 10; i++) {
16111 /* read twice */
16112 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16113 if (rv != 0)
16114 return rv;
16115 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16116 if (rv != 0)
16117 return rv;
16118
16119 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16120 goto out; /* GOOD! */
16121
16122 /* Reset the PHY */
16123 wm_reset_phy(sc);
16124 delay(5*1000);
16125 }
16126
16127 /* Disable GigE link negotiation */
16128 reg = CSR_READ(sc, WMREG_PHY_CTRL);
16129 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16130 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16131
16132 /*
16133 * Call gig speed drop workaround on Gig disable before accessing
16134 * any PHY registers.
16135 */
16136 wm_gig_downshift_workaround_ich8lan(sc);
16137
16138 out:
16139 return 0;
16140 }
16141
16142 /*
16143 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16144 * @sc: pointer to the HW structure
16145 *
16146 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16147 * LPLU, Gig disable, MDIC PHY reset):
16148 * 1) Set Kumeran Near-end loopback
16149 * 2) Clear Kumeran Near-end loopback
16150 * Should only be called for ICH8[m] devices with any 1G Phy.
16151 */
16152 static void
16153 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16154 {
16155 uint16_t kmreg;
16156
16157 /* Only for igp3 */
16158 if (sc->sc_phytype == WMPHY_IGP_3) {
16159 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16160 return;
16161 kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16162 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16163 return;
16164 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16165 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16166 }
16167 }
16168
16169 /*
16170 * Workaround for pch's PHYs
16171 * XXX should be moved to new PHY driver?
16172 */
16173 static int
16174 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16175 {
16176 device_t dev = sc->sc_dev;
16177 struct mii_data *mii = &sc->sc_mii;
16178 struct mii_softc *child;
16179 uint16_t phy_data, phyrev = 0;
16180 int phytype = sc->sc_phytype;
16181 int rv;
16182
16183 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16184 device_xname(dev), __func__));
16185 KASSERT(sc->sc_type == WM_T_PCH);
16186
16187 /* Set MDIO slow mode before any other MDIO access */
16188 if (phytype == WMPHY_82577)
16189 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16190 return rv;
16191
16192 child = LIST_FIRST(&mii->mii_phys);
16193 if (child != NULL)
16194 phyrev = child->mii_mpd_rev;
16195
16196 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16197 if ((child != NULL) &&
16198 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16199 ((phytype == WMPHY_82578) && (phyrev == 1)))) {
16200 /* Disable generation of early preamble (0x4431) */
16201 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16202 &phy_data);
16203 if (rv != 0)
16204 return rv;
16205 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16206 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16207 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16208 phy_data);
16209 if (rv != 0)
16210 return rv;
16211
16212 /* Preamble tuning for SSC */
16213 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16214 if (rv != 0)
16215 return rv;
16216 }
16217
16218 /* 82578 */
16219 if (phytype == WMPHY_82578) {
16220 /*
16221 * Return registers to default by doing a soft reset then
16222 * writing 0x3140 to the control register
16223 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16224 */
16225 if ((child != NULL) && (phyrev < 2)) {
16226 PHY_RESET(child);
16227 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16228 if (rv != 0)
16229 return rv;
16230 }
16231 }
16232
16233 /* Select page 0 */
16234 if ((rv = sc->phy.acquire(sc)) != 0)
16235 return rv;
16236 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16237 sc->phy.release(sc);
16238 if (rv != 0)
16239 return rv;
16240
16241 /*
16242 * Configure the K1 Si workaround during phy reset assuming there is
16243 * link so that it disables K1 if link is in 1Gbps.
16244 */
16245 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16246 return rv;
16247
16248 /* Workaround for link disconnects on a busy hub in half duplex */
16249 rv = sc->phy.acquire(sc);
16250 if (rv)
16251 return rv;
16252 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16253 if (rv)
16254 goto release;
16255 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16256 phy_data & 0x00ff);
16257 if (rv)
16258 goto release;
16259
16260 /* Set MSE higher to enable link to stay up when noise is high */
16261 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16262 release:
16263 sc->phy.release(sc);
16264
16265 return rv;
16266 }
16267
16268 /*
16269 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16270 * @sc: pointer to the HW structure
16271 */
16272 static void
16273 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16274 {
16275
16276 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16277 device_xname(sc->sc_dev), __func__));
16278
16279 if (sc->phy.acquire(sc) != 0)
16280 return;
16281
16282 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16283
16284 sc->phy.release(sc);
16285 }
16286
16287 static void
16288 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16289 {
16290 device_t dev = sc->sc_dev;
16291 uint32_t mac_reg;
16292 uint16_t i, wuce;
16293 int count;
16294
16295 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16296 device_xname(dev), __func__));
16297
16298 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16299 return;
16300
16301 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16302 count = wm_rar_count(sc);
16303 for (i = 0; i < count; i++) {
16304 uint16_t lo, hi;
16305 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16306 lo = (uint16_t)(mac_reg & 0xffff);
16307 hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16308 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16309 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16310
16311 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16312 lo = (uint16_t)(mac_reg & 0xffff);
16313 hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16314 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16315 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16316 }
16317
16318 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16319 }
16320
16321 /*
16322 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16323 * with 82579 PHY
16324 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
16325 */
16326 static int
16327 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16328 {
16329 device_t dev = sc->sc_dev;
16330 int rar_count;
16331 int rv;
16332 uint32_t mac_reg;
16333 uint16_t dft_ctrl, data;
16334 uint16_t i;
16335
16336 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16337 device_xname(dev), __func__));
16338
16339 if (sc->sc_type < WM_T_PCH2)
16340 return 0;
16341
16342 /* Acquire PHY semaphore */
16343 rv = sc->phy.acquire(sc);
16344 if (rv != 0)
16345 return rv;
16346
16347 /* Disable Rx path while enabling/disabling workaround */
16348 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16349 if (rv != 0)
16350 goto out;
16351 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16352 dft_ctrl | (1 << 14));
16353 if (rv != 0)
16354 goto out;
16355
16356 if (enable) {
16357 /* Write Rx addresses (rar_entry_count for RAL/H, and
16358 * SHRAL/H) and initial CRC values to the MAC
16359 */
16360 rar_count = wm_rar_count(sc);
16361 for (i = 0; i < rar_count; i++) {
16362 uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16363 uint32_t addr_high, addr_low;
16364
16365 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16366 if (!(addr_high & RAL_AV))
16367 continue;
16368 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16369 mac_addr[0] = (addr_low & 0xFF);
16370 mac_addr[1] = ((addr_low >> 8) & 0xFF);
16371 mac_addr[2] = ((addr_low >> 16) & 0xFF);
16372 mac_addr[3] = ((addr_low >> 24) & 0xFF);
16373 mac_addr[4] = (addr_high & 0xFF);
16374 mac_addr[5] = ((addr_high >> 8) & 0xFF);
16375
16376 CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16377 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16378 }
16379
16380 /* Write Rx addresses to the PHY */
16381 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16382 }
16383
16384 /*
16385 * If enable ==
16386 * true: Enable jumbo frame workaround in the MAC.
16387 * false: Write MAC register values back to h/w defaults.
16388 */
16389 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16390 if (enable) {
16391 mac_reg &= ~(1 << 14);
16392 mac_reg |= (7 << 15);
16393 } else
16394 mac_reg &= ~(0xf << 14);
16395 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16396
16397 mac_reg = CSR_READ(sc, WMREG_RCTL);
16398 if (enable) {
16399 mac_reg |= RCTL_SECRC;
16400 sc->sc_rctl |= RCTL_SECRC;
16401 sc->sc_flags |= WM_F_CRC_STRIP;
16402 } else {
16403 mac_reg &= ~RCTL_SECRC;
16404 sc->sc_rctl &= ~RCTL_SECRC;
16405 sc->sc_flags &= ~WM_F_CRC_STRIP;
16406 }
16407 CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16408
16409 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16410 if (rv != 0)
16411 goto out;
16412 if (enable)
16413 data |= 1 << 0;
16414 else
16415 data &= ~(1 << 0);
16416 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16417 if (rv != 0)
16418 goto out;
16419
16420 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16421 if (rv != 0)
16422 goto out;
16423 /*
16424 * XXX FreeBSD and Linux do the same thing that they set the same value
16425 * on both the enable case and the disable case. Is it correct?
16426 */
16427 data &= ~(0xf << 8);
16428 data |= (0xb << 8);
16429 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16430 if (rv != 0)
16431 goto out;
16432
16433 /*
16434 * If enable ==
16435 * true: Enable jumbo frame workaround in the PHY.
16436 * false: Write PHY register values back to h/w defaults.
16437 */
16438 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16439 if (rv != 0)
16440 goto out;
16441 data &= ~(0x7F << 5);
16442 if (enable)
16443 data |= (0x37 << 5);
16444 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16445 if (rv != 0)
16446 goto out;
16447
16448 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16449 if (rv != 0)
16450 goto out;
16451 if (enable)
16452 data &= ~(1 << 13);
16453 else
16454 data |= (1 << 13);
16455 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16456 if (rv != 0)
16457 goto out;
16458
16459 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16460 if (rv != 0)
16461 goto out;
16462 data &= ~(0x3FF << 2);
16463 if (enable)
16464 data |= (I82579_TX_PTR_GAP << 2);
16465 else
16466 data |= (0x8 << 2);
16467 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16468 if (rv != 0)
16469 goto out;
16470
16471 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16472 enable ? 0xf100 : 0x7e00);
16473 if (rv != 0)
16474 goto out;
16475
16476 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16477 if (rv != 0)
16478 goto out;
16479 if (enable)
16480 data |= 1 << 10;
16481 else
16482 data &= ~(1 << 10);
16483 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16484 if (rv != 0)
16485 goto out;
16486
16487 /* Re-enable Rx path after enabling/disabling workaround */
16488 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16489 dft_ctrl & ~(1 << 14));
16490
16491 out:
16492 sc->phy.release(sc);
16493
16494 return rv;
16495 }
16496
16497 /*
16498 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16499 * done after every PHY reset.
16500 */
16501 static int
16502 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16503 {
16504 device_t dev = sc->sc_dev;
16505 int rv;
16506
16507 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16508 device_xname(dev), __func__));
16509 KASSERT(sc->sc_type == WM_T_PCH2);
16510
16511 /* Set MDIO slow mode before any other MDIO access */
16512 rv = wm_set_mdio_slow_mode_hv(sc);
16513 if (rv != 0)
16514 return rv;
16515
16516 rv = sc->phy.acquire(sc);
16517 if (rv != 0)
16518 return rv;
16519 /* Set MSE higher to enable link to stay up when noise is high */
16520 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16521 if (rv != 0)
16522 goto release;
16523 /* Drop link after 5 times MSE threshold was reached */
16524 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16525 release:
16526 sc->phy.release(sc);
16527
16528 return rv;
16529 }
16530
16531 /**
16532 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16533 * @link: link up bool flag
16534 *
16535 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16536 * preventing further DMA write requests. Workaround the issue by disabling
16537 * the de-assertion of the clock request when in 1Gpbs mode.
16538 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16539 * speeds in order to avoid Tx hangs.
16540 **/
16541 static int
16542 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16543 {
16544 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
16545 uint32_t status = CSR_READ(sc, WMREG_STATUS);
16546 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
16547 uint16_t phyreg;
16548
16549 if (link && (speed == STATUS_SPEED_1000)) {
16550 sc->phy.acquire(sc);
16551 int rv = wm_kmrn_readreg_locked(sc,
16552 KUMCTRLSTA_OFFSET_K1_CONFIG, &phyreg);
16553 if (rv != 0)
16554 goto release;
16555 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16556 phyreg & ~KUMCTRLSTA_K1_ENABLE);
16557 if (rv != 0)
16558 goto release;
16559 delay(20);
16560 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
16561
16562 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16563 &phyreg);
16564 release:
16565 sc->phy.release(sc);
16566 return rv;
16567 }
16568
16569 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
16570
16571 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
16572 if (((child != NULL) && (child->mii_mpd_rev > 5))
16573 || !link
16574 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
16575 goto update_fextnvm6;
16576
16577 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
16578
16579 /* Clear link status transmit timeout */
16580 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
16581 if (speed == STATUS_SPEED_100) {
16582 /* Set inband Tx timeout to 5x10us for 100Half */
16583 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16584
16585 /* Do not extend the K1 entry latency for 100Half */
16586 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16587 } else {
16588 /* Set inband Tx timeout to 50x10us for 10Full/Half */
16589 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16590
16591 /* Extend the K1 entry latency for 10 Mbps */
16592 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16593 }
16594
16595 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
16596
16597 update_fextnvm6:
16598 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
16599 return 0;
16600 }
16601
16602 /*
16603 * wm_k1_gig_workaround_hv - K1 Si workaround
16604 * @sc: pointer to the HW structure
16605 * @link: link up bool flag
16606 *
16607 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
16608 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
16609 * If link is down, the function will restore the default K1 setting located
16610 * in the NVM.
16611 */
16612 static int
16613 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
16614 {
16615 int k1_enable = sc->sc_nvm_k1_enabled;
16616
16617 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16618 device_xname(sc->sc_dev), __func__));
16619
16620 if (sc->phy.acquire(sc) != 0)
16621 return -1;
16622
16623 if (link) {
16624 k1_enable = 0;
16625
16626 /* Link stall fix for link up */
16627 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16628 0x0100);
16629 } else {
16630 /* Link stall fix for link down */
16631 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16632 0x4100);
16633 }
16634
16635 wm_configure_k1_ich8lan(sc, k1_enable);
16636 sc->phy.release(sc);
16637
16638 return 0;
16639 }
16640
16641 /*
16642 * wm_k1_workaround_lv - K1 Si workaround
16643 * @sc: pointer to the HW structure
16644 *
16645 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
16646 * Disable K1 for 1000 and 100 speeds
16647 */
16648 static int
16649 wm_k1_workaround_lv(struct wm_softc *sc)
16650 {
16651 uint32_t reg;
16652 uint16_t phyreg;
16653 int rv;
16654
16655 if (sc->sc_type != WM_T_PCH2)
16656 return 0;
16657
16658 /* Set K1 beacon duration based on 10Mbps speed */
16659 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
16660 if (rv != 0)
16661 return rv;
16662
16663 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
16664 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
16665 if (phyreg &
16666 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
16667 /* LV 1G/100 Packet drop issue wa */
16668 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
16669 &phyreg);
16670 if (rv != 0)
16671 return rv;
16672 phyreg &= ~HV_PM_CTRL_K1_ENA;
16673 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
16674 phyreg);
16675 if (rv != 0)
16676 return rv;
16677 } else {
16678 /* For 10Mbps */
16679 reg = CSR_READ(sc, WMREG_FEXTNVM4);
16680 reg &= ~FEXTNVM4_BEACON_DURATION;
16681 reg |= FEXTNVM4_BEACON_DURATION_16US;
16682 CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
16683 }
16684 }
16685
16686 return 0;
16687 }
16688
16689 /*
16690 * wm_link_stall_workaround_hv - Si workaround
16691 * @sc: pointer to the HW structure
16692 *
16693 * This function works around a Si bug where the link partner can get
16694 * a link up indication before the PHY does. If small packets are sent
16695 * by the link partner they can be placed in the packet buffer without
16696 * being properly accounted for by the PHY and will stall preventing
16697 * further packets from being received. The workaround is to clear the
16698 * packet buffer after the PHY detects link up.
16699 */
16700 static int
16701 wm_link_stall_workaround_hv(struct wm_softc *sc)
16702 {
16703 uint16_t phyreg;
16704
16705 if (sc->sc_phytype != WMPHY_82578)
16706 return 0;
16707
16708 /* Do not apply workaround if in PHY loopback bit 14 set */
16709 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
16710 if ((phyreg & BMCR_LOOP) != 0)
16711 return 0;
16712
16713 /* Check if link is up and at 1Gbps */
16714 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
16715 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16716 | BM_CS_STATUS_SPEED_MASK;
16717 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16718 | BM_CS_STATUS_SPEED_1000))
16719 return 0;
16720
16721 delay(200 * 1000); /* XXX too big */
16722
16723 /* Flush the packets in the fifo buffer */
16724 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16725 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
16726 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16727 HV_MUX_DATA_CTRL_GEN_TO_MAC);
16728
16729 return 0;
16730 }
16731
16732 static int
16733 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
16734 {
16735 int rv;
16736 uint16_t reg;
16737
16738 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®);
16739 if (rv != 0)
16740 return rv;
16741
16742 return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
16743 reg | HV_KMRN_MDIO_SLOW);
16744 }
16745
16746 /*
16747 * wm_configure_k1_ich8lan - Configure K1 power state
16748 * @sc: pointer to the HW structure
16749 * @enable: K1 state to configure
16750 *
16751 * Configure the K1 power state based on the provided parameter.
16752 * Assumes semaphore already acquired.
16753 */
16754 static void
16755 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
16756 {
16757 uint32_t ctrl, ctrl_ext, tmp;
16758 uint16_t kmreg;
16759 int rv;
16760
16761 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16762
16763 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
16764 if (rv != 0)
16765 return;
16766
16767 if (k1_enable)
16768 kmreg |= KUMCTRLSTA_K1_ENABLE;
16769 else
16770 kmreg &= ~KUMCTRLSTA_K1_ENABLE;
16771
16772 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
16773 if (rv != 0)
16774 return;
16775
16776 delay(20);
16777
16778 ctrl = CSR_READ(sc, WMREG_CTRL);
16779 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
16780
16781 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
16782 tmp |= CTRL_FRCSPD;
16783
16784 CSR_WRITE(sc, WMREG_CTRL, tmp);
16785 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
16786 CSR_WRITE_FLUSH(sc);
16787 delay(20);
16788
16789 CSR_WRITE(sc, WMREG_CTRL, ctrl);
16790 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
16791 CSR_WRITE_FLUSH(sc);
16792 delay(20);
16793
16794 return;
16795 }
16796
16797 /* special case - for 82575 - need to do manual init ... */
16798 static void
16799 wm_reset_init_script_82575(struct wm_softc *sc)
16800 {
16801 /*
16802 * Remark: this is untested code - we have no board without EEPROM
16803 * same setup as mentioned int the FreeBSD driver for the i82575
16804 */
16805
16806 /* SerDes configuration via SERDESCTRL */
16807 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
16808 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
16809 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
16810 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
16811
16812 /* CCM configuration via CCMCTL register */
16813 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
16814 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
16815
16816 /* PCIe lanes configuration */
16817 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
16818 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
16819 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
16820 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
16821
16822 /* PCIe PLL Configuration */
16823 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
16824 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
16825 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
16826 }
16827
16828 static void
16829 wm_reset_mdicnfg_82580(struct wm_softc *sc)
16830 {
16831 uint32_t reg;
16832 uint16_t nvmword;
16833 int rv;
16834
16835 if (sc->sc_type != WM_T_82580)
16836 return;
16837 if ((sc->sc_flags & WM_F_SGMII) == 0)
16838 return;
16839
16840 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
16841 + NVM_OFF_CFG3_PORTA, 1, &nvmword);
16842 if (rv != 0) {
16843 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
16844 __func__);
16845 return;
16846 }
16847
16848 reg = CSR_READ(sc, WMREG_MDICNFG);
16849 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
16850 reg |= MDICNFG_DEST;
16851 if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
16852 reg |= MDICNFG_COM_MDIO;
16853 CSR_WRITE(sc, WMREG_MDICNFG, reg);
16854 }
16855
16856 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff))
16857
16858 static bool
16859 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
16860 {
16861 uint32_t reg;
16862 uint16_t id1, id2;
16863 int i, rv;
16864
16865 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16866 device_xname(sc->sc_dev), __func__));
16867 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16868
16869 id1 = id2 = 0xffff;
16870 for (i = 0; i < 2; i++) {
16871 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
16872 &id1);
16873 if ((rv != 0) || MII_INVALIDID(id1))
16874 continue;
16875 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
16876 &id2);
16877 if ((rv != 0) || MII_INVALIDID(id2))
16878 continue;
16879 break;
16880 }
16881 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
16882 goto out;
16883
16884 /*
16885 * In case the PHY needs to be in mdio slow mode,
16886 * set slow mode and try to get the PHY id again.
16887 */
16888 rv = 0;
16889 if (sc->sc_type < WM_T_PCH_LPT) {
16890 sc->phy.release(sc);
16891 wm_set_mdio_slow_mode_hv(sc);
16892 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
16893 rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
16894 sc->phy.acquire(sc);
16895 }
16896 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
16897 device_printf(sc->sc_dev, "XXX return with false\n");
16898 return false;
16899 }
16900 out:
16901 if (sc->sc_type >= WM_T_PCH_LPT) {
16902 /* Only unforce SMBus if ME is not active */
16903 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16904 uint16_t phyreg;
16905
16906 /* Unforce SMBus mode in PHY */
16907 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
16908 CV_SMB_CTRL, &phyreg);
16909 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16910 wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
16911 CV_SMB_CTRL, phyreg);
16912
16913 /* Unforce SMBus mode in MAC */
16914 reg = CSR_READ(sc, WMREG_CTRL_EXT);
16915 reg &= ~CTRL_EXT_FORCE_SMBUS;
16916 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16917 }
16918 }
16919 return true;
16920 }
16921
16922 static void
16923 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
16924 {
16925 uint32_t reg;
16926 int i;
16927
16928 /* Set PHY Config Counter to 50msec */
16929 reg = CSR_READ(sc, WMREG_FEXTNVM3);
16930 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
16931 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
16932 CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
16933
16934 /* Toggle LANPHYPC */
16935 reg = CSR_READ(sc, WMREG_CTRL);
16936 reg |= CTRL_LANPHYPC_OVERRIDE;
16937 reg &= ~CTRL_LANPHYPC_VALUE;
16938 CSR_WRITE(sc, WMREG_CTRL, reg);
16939 CSR_WRITE_FLUSH(sc);
16940 delay(1000);
16941 reg &= ~CTRL_LANPHYPC_OVERRIDE;
16942 CSR_WRITE(sc, WMREG_CTRL, reg);
16943 CSR_WRITE_FLUSH(sc);
16944
16945 if (sc->sc_type < WM_T_PCH_LPT)
16946 delay(50 * 1000);
16947 else {
16948 i = 20;
16949
16950 do {
16951 delay(5 * 1000);
16952 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
16953 && i--);
16954
16955 delay(30 * 1000);
16956 }
16957 }
16958
16959 static int
16960 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
16961 {
16962 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
16963 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
16964 uint32_t rxa;
16965 uint16_t scale = 0, lat_enc = 0;
16966 int32_t obff_hwm = 0;
16967 int64_t lat_ns, value;
16968
16969 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16970 device_xname(sc->sc_dev), __func__));
16971
16972 if (link) {
16973 uint16_t max_snoop, max_nosnoop, max_ltr_enc;
16974 uint32_t status;
16975 uint16_t speed;
16976 pcireg_t preg;
16977
16978 status = CSR_READ(sc, WMREG_STATUS);
16979 switch (__SHIFTOUT(status, STATUS_SPEED)) {
16980 case STATUS_SPEED_10:
16981 speed = 10;
16982 break;
16983 case STATUS_SPEED_100:
16984 speed = 100;
16985 break;
16986 case STATUS_SPEED_1000:
16987 speed = 1000;
16988 break;
16989 default:
16990 device_printf(sc->sc_dev, "Unknown speed "
16991 "(status = %08x)\n", status);
16992 return -1;
16993 }
16994
16995 /* Rx Packet Buffer Allocation size (KB) */
16996 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
16997
16998 /*
16999 * Determine the maximum latency tolerated by the device.
17000 *
17001 * Per the PCIe spec, the tolerated latencies are encoded as
17002 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
17003 * a 10-bit value (0-1023) to provide a range from 1 ns to
17004 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
17005 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
17006 */
17007 lat_ns = ((int64_t)rxa * 1024 -
17008 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
17009 + ETHER_HDR_LEN))) * 8 * 1000;
17010 if (lat_ns < 0)
17011 lat_ns = 0;
17012 else
17013 lat_ns /= speed;
17014 value = lat_ns;
17015
17016 while (value > LTRV_VALUE) {
17017 scale ++;
17018 value = howmany(value, __BIT(5));
17019 }
17020 if (scale > LTRV_SCALE_MAX) {
17021 device_printf(sc->sc_dev,
17022 "Invalid LTR latency scale %d\n", scale);
17023 return -1;
17024 }
17025 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17026
17027 /* Determine the maximum latency tolerated by the platform */
17028 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17029 WM_PCI_LTR_CAP_LPT);
17030 max_snoop = preg & 0xffff;
17031 max_nosnoop = preg >> 16;
17032
17033 max_ltr_enc = MAX(max_snoop, max_nosnoop);
17034
17035 if (lat_enc > max_ltr_enc) {
17036 lat_enc = max_ltr_enc;
17037 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17038 * PCI_LTR_SCALETONS(
17039 __SHIFTOUT(lat_enc,
17040 PCI_LTR_MAXSNOOPLAT_SCALE));
17041 }
17042
17043 if (lat_ns) {
17044 lat_ns *= speed * 1000;
17045 lat_ns /= 8;
17046 lat_ns /= 1000000000;
17047 obff_hwm = (int32_t)(rxa - lat_ns);
17048 }
17049 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17050 device_printf(sc->sc_dev, "Invalid high water mark %d"
17051 "(rxa = %d, lat_ns = %d)\n",
17052 obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17053 return -1;
17054 }
17055 }
17056 /* Snoop and No-Snoop latencies the same */
17057 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17058 CSR_WRITE(sc, WMREG_LTRV, reg);
17059
17060 /* Set OBFF high water mark */
17061 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17062 reg |= obff_hwm;
17063 CSR_WRITE(sc, WMREG_SVT, reg);
17064
17065 /* Enable OBFF */
17066 reg = CSR_READ(sc, WMREG_SVCR);
17067 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17068 CSR_WRITE(sc, WMREG_SVCR, reg);
17069
17070 return 0;
17071 }
17072
17073 /*
17074 * I210 Errata 25 and I211 Errata 10
17075 * Slow System Clock.
17076 *
17077 * Note that this function is called on both FLASH and iNVM case on NetBSD.
17078 */
17079 static int
17080 wm_pll_workaround_i210(struct wm_softc *sc)
17081 {
17082 uint32_t mdicnfg, wuc;
17083 uint32_t reg;
17084 pcireg_t pcireg;
17085 uint32_t pmreg;
17086 uint16_t nvmword, tmp_nvmword;
17087 uint16_t phyval;
17088 bool wa_done = false;
17089 int i, rv = 0;
17090
17091 /* Get Power Management cap offset */
17092 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17093 &pmreg, NULL) == 0)
17094 return -1;
17095
17096 /* Save WUC and MDICNFG registers */
17097 wuc = CSR_READ(sc, WMREG_WUC);
17098 mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17099
17100 reg = mdicnfg & ~MDICNFG_DEST;
17101 CSR_WRITE(sc, WMREG_MDICNFG, reg);
17102
17103 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17104 /*
17105 * The default value of the Initialization Control Word 1
17106 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17107 */
17108 nvmword = INVM_DEFAULT_AL;
17109 }
17110 tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17111
17112 for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17113 wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17114 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17115
17116 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17117 rv = 0;
17118 break; /* OK */
17119 } else
17120 rv = -1;
17121
17122 wa_done = true;
17123 /* Directly reset the internal PHY */
17124 reg = CSR_READ(sc, WMREG_CTRL);
17125 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17126
17127 reg = CSR_READ(sc, WMREG_CTRL_EXT);
17128 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17129 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17130
17131 CSR_WRITE(sc, WMREG_WUC, 0);
17132 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17133 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17134
17135 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17136 pmreg + PCI_PMCSR);
17137 pcireg |= PCI_PMCSR_STATE_D3;
17138 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17139 pmreg + PCI_PMCSR, pcireg);
17140 delay(1000);
17141 pcireg &= ~PCI_PMCSR_STATE_D3;
17142 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17143 pmreg + PCI_PMCSR, pcireg);
17144
17145 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
17146 CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17147
17148 /* Restore WUC register */
17149 CSR_WRITE(sc, WMREG_WUC, wuc);
17150 }
17151
17152 /* Restore MDICNFG setting */
17153 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
17154 if (wa_done)
17155 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
17156 return rv;
17157 }
17158
17159 static void
17160 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
17161 {
17162 uint32_t reg;
17163
17164 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17165 device_xname(sc->sc_dev), __func__));
17166 KASSERT((sc->sc_type == WM_T_PCH_SPT)
17167 || (sc->sc_type == WM_T_PCH_CNP));
17168
17169 reg = CSR_READ(sc, WMREG_FEXTNVM7);
17170 reg |= FEXTNVM7_SIDE_CLK_UNGATE;
17171 CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
17172
17173 reg = CSR_READ(sc, WMREG_FEXTNVM9);
17174 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17175 CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17176 }
17177
17178 /* Sysctl functions */
17179 static int
17180 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
17181 {
17182 struct sysctlnode node = *rnode;
17183 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17184 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17185 struct wm_softc *sc = txq->txq_sc;
17186 uint32_t reg;
17187
17188 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
17189 node.sysctl_data = ®
17190 return sysctl_lookup(SYSCTLFN_CALL(&node));
17191 }
17192
17193 static int
17194 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
17195 {
17196 struct sysctlnode node = *rnode;
17197 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17198 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17199 struct wm_softc *sc = txq->txq_sc;
17200 uint32_t reg;
17201
17202 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
17203 node.sysctl_data = ®
17204 return sysctl_lookup(SYSCTLFN_CALL(&node));
17205 }
17206
17207 #ifdef WM_DEBUG
17208 static int
17209 wm_sysctl_debug(SYSCTLFN_ARGS)
17210 {
17211 struct sysctlnode node = *rnode;
17212 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17213 uint32_t dflags;
17214 int error;
17215
17216 dflags = sc->sc_debug;
17217 node.sysctl_data = &dflags;
17218 error = sysctl_lookup(SYSCTLFN_CALL(&node));
17219
17220 if (error || newp == NULL)
17221 return error;
17222
17223 sc->sc_debug = dflags;
17224 device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
17225 device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
17226
17227 return 0;
17228 }
17229 #endif
17230